content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import os
import sys
'''
3个空瓶换一瓶
input: n个空瓶
outpu: 最终可换瓶数
'''
def demo1():
while True:
try:
a = int(input())
if a != 0:
print(a//2)
except:
break
######################################
'''
input: n以及n个随机数组成的数组
output: 去重排序后的数组
'''
def demo2():
while True:
try:
n,m = int(input()),set()
for i in range(n):
m.add(int(input()))
for i in sorted(m):
print(i)
except:
break
########################################
'''
input: 十六进制
ouput: 十进制
'''
def hex2dec():
while True:
try:
print(int(input(),16))
except:
break
#########################################
'''
input: n
ouput: 数组a[n],隔二删一,输出最后一个下标
'''
def shangshu():
while True:
try:
n = int(input())
except:
exit()
r = 0
i = 2
while i <= n:
r = (r+3) %i
i += 1
print(r)
#########################################
'''
输入一个字符串,求出该字符串包含的字符集合
'''
def char():
while True:
try:
res,a = "",input()
for i in a:
if i not in res:
res+=i
print(res)
except:
break
##########################################
'''
sudoku
思路:深搜+剪枝
'''
def isok(mat,i,j,num):#判断填入数字
for row in range(0,9):#遍历列
if mat[row][j] == num:
return False
for col in range(0,9):#遍历行
if mat[i][col] == num:
return False
ii = i//3
jj = j//3
#遍历该位置所处的3*3矩阵,若该数字已出现过,则不合法
for row in range(ii*3,ii*3+3):
for col in range(jj*3,jj*3+3):
if mat[row][col] == num:
return False
return True
def dfs(mat,i,j):#深度优先遍历
if i==9:#所有行已遍历完,则结束
return mat
if j==9:#所有列已遍历完,则进入到下一行
return dfs(mat,i+1,0)
flag = False#flag表示该行有需要填充的格子
for col in range(j,9):#遍历该行的所有列,如果有值为0,则需要进行填充
if mat[i][col]==0:
flag = True
isChange =False#ischange表示是否已进行填充
for num in range(1,10):
if isOk(mat,i,col,num):#找出1-9中能够合法填入的数字
isChange =True
mat[i][col] = num
tpp = dfs(mat,i,col+1)#将该位置填充后,该行的后续位置是否有解
if tpp == None:#如果后续位置无解,则将该位置重新置为0,未填充状态
isChange = False
mat[i][col] = 0
continue#尝试下一个数字
else:
return tpp
if isChange==False:#找不到合法数字进行填充
return None
if flag==False:#该行所有位置已填满,进入到下一行
return dfs(mat,i+1,0)
def sudoku():
while True:
isCon = True
mat = []
for i in range(9):
line = sys.stdin.readline().strip()
if not line:
isCon = False
break
line =[int(i) for i in line.split(' ')]
mat.append(line)
if isCon ==False:
break
mat = dfs(mat,0,0)
for line in mat:
print(' '.join(str(j) for j in line))
#####################################################
if __name__ == '__main__':
char()
|
python
|
class SETTING:
server_list = {
"presto": {
"connect_type": "PrestoConnector",
"url": {
"username": "hive"
,"host": ""
,"port": 3600
,"param" : "hive"
,"schema": "default"
,"metastore": "mysql+pymysql://hive:hive@/hive"
},
"table_whitelist": [],
"table_blacklist": [],
},
"hive": {
"connect_type": "HiveSqlaConnector",
"url": {
"username": "yarn"
,"host": ""
,"port": 10000
,"schema": "default"
,"param" : "auth=NONE"
,"metastore": "mysql+pymysql://hive:hive@/hive"
},
"table_whitelist": [],
"table_blacklist": [],
},
}
|
python
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import subprocess
import sys
import tempfile
import time
import logging
LOG = logging.getLogger(__name__)
class SSHError(Exception):
pass
class AsyncSSH:
_ready = False
def __init__(self, username=None, hostname=None, key=None, port=22, cb=None):
if cb:
self.cb = cb
self.key = key
self.username = username if username else "root"
self.hostname = hostname
self.port = str(port)
def cb(self, line):
LOG.debug(repr(line))
@asyncio.coroutine
def run(self, command, stdin=None, return_output=False,
strip_output=True, raise_on_error=True, user=None):
if not self._ready:
try:
yield from self.wait()
except Exception:
if raise_on_error:
raise
else:
return -1
if not user:
user = self.username
output = b""
if isinstance(stdin, str):
f = tempfile.TemporaryFile()
f.write(stdin.encode())
f.flush()
f.seek(0)
stdin = f
cmd = ["ssh", "-T", "-o", "StrictHostKeyChecking=no",
"%s@%s" % (user, self.hostname), "-p", self.port]
if self.key:
cmd += ["-i", self.key]
if isinstance(command, str):
cmd += command.split(" ")
else:
cmd += command
LOG.debug("Running '%s'" % cmd)
process = asyncio.create_subprocess_exec(*cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
process = yield from process
try:
while not process.stdout.at_eof():
line = yield from process.stdout.readline()
self.cb(line)
if return_output:
output += line
except asyncio.CancelledError:
LOG.debug("Terminated.")# Killing child process.")
raise
try:
process.terminate()
yield from process.wait()
except:
LOG.exception("Error waiting for child")
raise
yield from process.wait()
if process.returncode and raise_on_error:
LOG.error("Command failed: %s" % line)
msg = "Cmd '%s' failed. Exit code: %d" % (" ".join(cmd),
process.returncode)
raise SSHError(msg)
if return_output:
output = output.decode()
if strip_output:
return output.strip()
return output
LOG.debug("Returning %s" % process.returncode)
return process.returncode
@asyncio.coroutine
def wait(self, timeout=300):
start = time.time()
while 1:
try:
r, w = yield from asyncio.open_connection(self.hostname,
int(self.port))
self._ready = True
w.close()
return
except ConnectionError:
pass
if time.time() - start > timeout:
raise Exception("Timeout waiting for "
"%s:%s" % (self.hostname, self.port))
LOG.debug("Waiting for ssh %s:%s" % (self.hostname, self.port))
yield from asyncio.sleep(1)
@asyncio.coroutine
def scp_get(self, src, dst):
cmd = ["scp", "-B", "-o", "StrictHostKeyChecking no"]
if self.key:
cmd += ["-i", self.key]
cmd += ["-P", self.port]
cmd += ["-r", "%s@%s:%s" % (self.username, self.hostname, src), dst]
LOG.debug("Runnung %s" % " ".join(cmd))
process = asyncio.create_subprocess_exec(*cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
process = yield from process
try:
while not process.stdout.at_eof():
line = yield from process.stdout.read()
LOG.debug("scp: %s" % line)
except asyncio.CancelledError:
process.terminate()
asyncio.async(process.wait(), loop=asyncio.get_event_loop())
raise
return process.returncode
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2017/8/23 下午12:54
# @Author : chenyuelong
# @Mail : [email protected]
# @File : read.py
# @Software: PyCharm
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
class read():
'''
fastq中每条read
'''
def __init__(self,*args):
self._readID = args[0]
self._readseq = args[1]
self._readinfo = args[2]
self._readq = args[3]
def main():
pass
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
"""
awsscripter.cli
This module implements awsscripter's CLI, and should not be directly imported.
"""
import os
import warnings
import click
import colorama
import yaml
from awsscripter.cli.init.init import init_group
from awsscripter.cli.audit.audit import audit_group
from awsscripter.cli.stack.stack import stack_group
from awsscripter.cli.monitoring.monitor import monitoring_group
from awsscripter.cli.testcommand.testcommand import testcommand_group
from awsscripter.cli.list.list import list_group
from awsscripter.stack.helpers import setup_logging, catch_exceptions
from awsscripter import __version__
from awsscripter.cli.security.security import security_group
from awsscripter.cli.testcommand.testcommand import testcommand_group
from awsscripter.cli.awsbilling.awsbilling import billing_group
@click.group()
@click.version_option(version=__version__, prog_name="awsscripter")
@click.option("--debug", is_flag=True, help="Turn on debug logging.")
@click.option("--dir", "directory", help="Specify awsscripter directory.")
@click.option(
"--output", type=click.Choice(["yaml", "json"]), default="yaml",
help="The formatting style for command output.")
@click.option("--no-colour", is_flag=True, help="Turn off output colouring.")
@click.option(
"--var", multiple=True, help="A variable to template into config files.")
@click.option(
"--var-file", multiple=True, type=click.File("rb"),
help="A YAML file of variables to template into config files.")
@click.pass_context
@catch_exceptions
def cli(
ctx, debug, directory, no_colour, output, var, var_file
):
"""
awsscripter is a tool to manage your cloud native infrastructure deployments.
"""
logger = setup_logging(debug, no_colour)
colorama.init()
# Enable deprecation warnings
warnings.simplefilter("always", DeprecationWarning)
ctx.obj = {
"user_variables": {},
"output_format": output,
"no_colour": no_colour,
"awsscripter_dir": directory if directory else os.getcwd()
}
if var_file:
for fh in var_file:
parsed = yaml.safe_load(fh.read())
ctx.obj["user_variables"].update(parsed)
# the rest of this block is for debug purposes only
existing_keys = set(ctx.obj["user_variables"].keys())
new_keys = set(parsed.keys())
overloaded_keys = existing_keys & new_keys # intersection
if overloaded_keys:
logger.debug(
"Duplicate variables encountered: {0}. "
"Using values from: {1}."
.format(", ".join(overloaded_keys), fh.name)
)
if var:
# --var options overwrite --var-file options
for variable in var:
variable_key, variable_value = variable.split("=")
if variable_key in ctx.obj["user_variables"]:
logger.debug(
"Duplicate variable encountered: {0}. "
"Using value from --var option."
.format(variable_key)
)
ctx.obj["user_variables"].update({variable_key: variable_value})
cli.add_command(init_group)
cli.add_command(audit_group)
cli.add_command(stack_group)
cli.add_command(monitoring_group)
cli.add_command(testcommand_group)
cli.add_command(security_group)
cli.add_command(list_group)
cli.add_command(billing_group)
|
python
|
import tfchain.polyfill.encoding.object as jsobj
import tfchain.polyfill.array as jsarr
import tfchain.polyfill.asynchronous as jsasync
import tfchain.polyfill.crypto as jscrypto
import tfchain.client as tfclient
import tfchain.errors as tferrors
from tfchain.chain import NetworkType, Type
from tfchain.balance import WalletBalance, SingleSigWalletBalance, MultiSigWalletBalance
from tfchain.encoding.siabin import SiaBinaryEncoder
from tfchain.types import ConditionTypes, transactions, FulfillmentTypes
from tfchain.types.transactions.Base import TransactionBaseClass
from tfchain.types.transactions.Minting import TransactionV128, TransactionV129, TransactionV130
from tfchain.types.IO import CoinInput
from tfchain.types.CryptoTypes import PublicKey, PublicKeySpecifier
from tfchain.types.PrimitiveTypes import Hash, Currency
from tfchain.types.ConditionTypes import UnlockHash, UnlockHashType, ConditionUnlockHash, ConditionMultiSignature, ConditionCustodyFee
from tfchain.types.FulfillmentTypes import FulfillmentMultiSignature, PublicKeySignaturePair
def assymetric_key_pair_generate(entropy, index):
if not isinstance(entropy, (bytes, bytearray)) and not jsarr.is_uint8_array(entropy):
raise TypeError("entropy is of an invalid type {}".format(type(entropy)))
if not isinstance(index, int):
raise TypeError("index is of an invalid type {}".format(type(index)))
encoder = SiaBinaryEncoder()
encoder.add_array(entropy)
encoder.add_int(index)
entropy = jscrypto.blake2b(encoder.data)
return jscrypto.AssymetricSignKeyPair(entropy)
def public_key_from_assymetric_key_pair(pair):
if not isinstance(pair, jscrypto.AssymetricSignKeyPair):
raise TypeError("pair is of an invalid type {}".format(type(pair)))
return PublicKey(specifier=PublicKeySpecifier.ED25519, hash=pair.key_public)
def unlockhash_from_assymetric_key_pair(pair):
pk = public_key_from_assymetric_key_pair(pair)
return pk.unlockhash
class TFChainWallet:
"""
Tfchain Wallet object
"""
def __init__(self, network_type, pairs, client):
if not isinstance(network_type, NetworkType):
raise TypeError("network_type is expected to be a tfchain.chain.NetworkType, invalid: {} ({})".format(network_type, type(network_type)))
self._network_type = network_type
if not jsobj.is_js_arr(pairs) or jsarr.is_empty(pairs):
raise TypeError("pairs is expected to be a non-empty list/array of SigningKey pairs, not be of type {}".format(type(pairs)))
self._pairs = pairs
if not isinstance(client, tfclient.TFChainClient):
raise TypeError("client is expected to be a TFChainClient, not be of type {}".format(type(client)))
self._client = client
# store all addresses as well
self._addresses = []
for pair in self._pairs:
uh = unlockhash_from_assymetric_key_pair(pair)
address = uh.__str__()
self._addresses.append(address)
# add sub-apis
self._minter = TFChainMinter(wallet=self)
# self._atomicswap = TFChainAtomicSwap(wallet=self)
# self._threebot = TFChainThreeBot(wallet=self)
# self._erc20 = TFChainERC20(wallet=self)
@property
def addresses(self):
"""
:returns: the addresses owned by this wallet
:rtype: list/array
"""
return self._addresses
@property
def pairs(self):
"""
:returns: the signng key pairs owned by this wallet
:rtype: list/array
"""
return self._pairs
@property
def client(self):
"""
:returns: the (explorer) tfchain client used by this wallet
:rtype: tfchain.client.TFChainClent
"""
return self._client
@property
def network_type(self):
"""
:returns: the type of the (tfchain) network type
:rtype: tfchain.network.Type
"""
return self._network_type
@property
def address(self):
"""
:returns: the primary (=first) address owned by this wallet
:rtype: str
"""
return self.addresses[0]
@property
def address_count(self):
"""
:returns: the amount of addresses owned by this wallet
:rtype: int
"""
return len(self.addresses)
@property
def minter(self):
"""
Minter used to update the (Coin) Minter Definition
as well as to mint new coins, only useable if this wallet
has (co-)ownership over the current (coin) minter definition.
"""
return self._minter
# @property
# def atomicswap(self):
# """
# Atomic Swap API used to create atomic swap contracts as initiator or participator,
# as well as to redeem and refund existing unredeemed atomic swap contrats.
# """
# return self._atomicswap
# @property
# def threebot(self):
# """
# ThreeBot API used to register new 3Bots and
# manage existing 3Bot records.
# """
# return self._threebot
# @property
# def erc20(self):
# """
# ERC20 API used to send coins to ERC20 Addresses,
# and register TFT addresses that can than be used as ERC20 Withdraw addresses.
# """
# return self._erc20
@property
def addresses_multisig(self):
"""
The multi signature wallet addresses co-owned and linked to this wallet,
as reported by the internal balance reporter.
"""
balance = self.balance
return balance.addresses_multisig
@property
def balance(self):
"""
The balance "sheet" of the wallet.
"""
return self.balance_get()
def balance_get(self, chain_info=None):
"""
The balance "sheet" of the wallet.
"""
aggregator = SingleSigWalletBalanceAggregator(self, chain_info=chain_info)
return aggregator.fetch_and_aggregate()
@property
def transactions(self):
"""
Get all transactions linked to a personal wallet address.
"""
# for each address get all transactions
def generator():
for address in self.addresses:
yield self._unlockhash_get(address)
transactions = set()
def gatherer(result):
if result.transactions:
transactions.update(result.transactions)
p = jsasync.promise_pool_new(generator, cb=gatherer)
# define sort cb that will sort it prior to the final return
def cb():
# sort all transactions
def txn_arr_sort(a, b):
height_a = pow(2, 64) if a.height < 0 else a.height
height_b = pow(2, 64) if b.height < 0 else b.height
if height_a < height_b:
return -1
if height_a > height_b:
return 1
tx_order_a = pow(2, 64) if a.transaction_order < 0 else a.transaction_order
tx_order_b = pow(2, 64) if b.transaction_order < 0 else b.transaction_order
if tx_order_a < tx_order_b:
return -1
if tx_order_a > tx_order_b:
return 1
return 0
return jsarr.sort(transactions, txn_arr_sort, reverse=True)
# return promise chain
return jsasync.chain(p, cb)
# def coins_send(self, recipient, amount, source=None, refund=None, lock=None, data=None):
# """
# Send the specified amount of coins to the given recipient,
# optionally locked. Arbitrary data can be attached as well if desired.
# If the given recipient is a valid ERC20 address, than this will send
# the specified amount to that ERC20 address and no lock or data is allowed to be defined.
# The recipient is one of:
# - None: recipient is the Free-For-All wallet
# - str (or unlockhash): recipient is a personal wallet
# - list: recipient is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
# - tuple (addresses, sigcount): recipient is a sigcount-of-addresscount MultiSig wallet
# - an ERC20 address (str/ERC20Address), amount will be send to this ERC20 address
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# The lock can be a str, or int:
# - when it is an int it represents either a block height or an epoch timestamp (in seconds)
# - when a str it can be a Jumpscale Datetime (e.g. '12:00:10', '31/10/2012 12:30', ...) or a Jumpscale Duration (e.g. '+ 2h', '+7d12h', ...)
# Returns a TransactionSendResult.
# @param recipient: see explanation above
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# @param lock: optional lock that can be used to lock the sent amount to a specific time or block height, see explation above
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# if ERC20Address.is_valid_value(recipient):
# if lock != None:
# raise ValueError("a lock cannot be applied when sending coins to an ERC20 Address")
# if data != None:
# raise ValueError("data cannot be added to the transaction when sending coins to an ERC20 Address")
# # all good, try to send to the ERC20 address
# return self.erc20.coins_send(address=recipient, amount=amount, source=source, refund=refund)
# amount = Currency(value=amount)
# if amount <= 0:
# raise ValueError("no amount is defined to be sent")
# # define recipient
# recipient = ConditionTypes.from_recipient(recipient, lock=lock)
# # fund amount
# balance = self.balance
# miner_fee = self.network_type.minimum_miner_fee()
# inputs, remainder, suggested_refund = balance.fund(amount+miner_fee, source=source)
# # define the refund condition
# if refund == None: # automatically choose a refund condition if none is given
# if suggested_refund == None:
# refund = ConditionTypes.unlockhash_new(unlockhash=self.address)
# else:
# refund = suggested_refund
# else:
# # use the given refund condition (defined as a recipient)
# refund = ConditionTypes.from_recipient(refund)
# # create transaction
# txn = tftransactions.new()
# # add main coin output
# txn.coin_output_add(value=amount, condition=recipient)
# # add refund coin output if needed
# if remainder > 0:
# txn.coin_output_add(value=remainder, condition=refund)
# # add the miner fee
# txn.miner_fee_add(miner_fee)
# # add the coin inputs
# txn.coin_inputs = inputs
# # if there is data to be added, add it as well
# if data:
# txn.data = data
# # generate the signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # txn should be fulfilled now
# submit = txn.is_fulfilled()
# if submit:
# # submit the transaction
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# for ci in txn.coin_inputs:
# balance.output_add(ci.parent_output, confirmed=False, spent=True)
# addresses = self.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# # add the id to the coin_output, so we can track it has been spent
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # and return the created/submitted transaction for optional user consumption
# return TransactionSendResult(txn, submit)
def coin_transaction_builder_new(self):
"""
Create a transaction builder that can be used to
add multiple outputs, in a chained manner, and send them all at once.
ERC20 coin outputs are not supported in the Coin Transaction Builder.
"""
return CoinTransactionBuilder(self)
def transaction_sign(self, txn, submit=None, balance=None):
"""
Sign in all places of the transaction where it is still possible,
and on which the wallet has authority to do so.
Returns a TransactionSignResult.
@param txn: transaction to sign, a JSON-encoded txn or already loaded in-memory as a valid Transaction type
"""
# validate and/or normalize txn parameter
if isinstance(txn, (str, dict)):
txn = transactions.from_json(txn)
elif not isinstance(txn, TransactionBaseClass):
raise TypeError("txn value has invalid type {} and cannot be signed".format(type(txn)))
to_submit = submit
balance_is_cached = (balance != None)
def cb(balance):
# check all parentids from the specified coin inputs,
# and set the coin outputs for the ones this wallet knows about
# and that are still unspent
if len(txn.coin_inputs) > 0:
# collect all known outputs
known_outputs = {}
for co in balance.outputs_available:
known_outputs[co.id.__str__()] = co
for co in balance.outputs_unconfirmed_available:
known_outputs[co.id.__str__()] = co
# mark the coin inputs that are known as available outputs by this wallet
for ci in txn.coin_inputs:
parentid = ci.parentid.__str__()
if parentid in known_outputs:
ci.parent_output = known_outputs[parentid]
p = None
# check for specific transaction types, as to
# be able to add whatever content we know we can add
if isinstance(txn, (TransactionV128, TransactionV129)):
def cb(condition):
txn.parent_mint_condition = condition
if not txn.mint_fulfillment_defined():
txn.mint_fulfillment = FulfillmentTypes.from_condition(txn.parent_mint_condition)
# set the parent mint condition
# and define the current fulfillment if it is not defined
p = jsasync.chain(self.client.minter.condition_get(), cb)
def sign_and_such():
# generate the signature requests
sig_requests = txn.signature_requests_new()
if len(sig_requests) == 0:
# possible if the wallet does not own any of the still required signatures,
# or for example because the wallet does not know about the parent outputs of
# the inputs still to be signed
def nop_cb(resolve, reject):
resolve(TransactionSignResult(txn, False, False))
return jsasync.promise_new(nop_cb)
# fulfill the signature requests that we can fulfill
signature_count = 0
for request in sig_requests:
try:
key_pair = self.key_pair_get(request.wallet_address)
pk = public_key_from_assymetric_key_pair(key_pair)
input_hash = request.input_hash_new(public_key=pk)
signature = key_pair.sign(input_hash.value)
request.signature_fulfill(public_key=pk, signature=signature)
signature_count += 1
except KeyError:
pass # this is acceptable due to how we directly try the key_pair_get method
# check if fulfilled, and if so, we'll submit unless the callee does not want that
is_fulfilled = txn.is_fulfilled()
submit = (to_submit and is_fulfilled)
if not submit: # return as-is
def stub_cb(resolve, reject):
resolve(TransactionSignResult(
transaction=txn,
signed=(signature_count>0),
submitted=submit,
))
return jsasync.promise_new(stub_cb)
# submit, and only then return
def id_cb(id):
txn.id = id
if balance_is_cached:
addresses = balance.addresses
# if the balance is cached, also update the balance
for idx, ci in enumerate(txn.coin_inputs):
if ci.parent_output.condition.unlockhash.__str__() in addresses:
balance.output_add(txn, idx, confirmed=False, spent=True)
for idx, co in enumerate(txn.coin_outputs):
if co.condition.unlockhash.__str__() in addresses:
# add the id to the coin_output, so we can track it has been spent
co.id = txn.coin_outputid_new(idx)
balance.output_add(txn, idx, confirmed=False, spent=False)
# return the signed result
return TransactionSignResult(
transaction=txn,
signed=(signature_count>0),
submitted=submit,
)
return jsasync.chain(self._transaction_put(transaction=txn), id_cb)
# sign now, or chain it and sign when possible
if p == None:
return sign_and_such()
return jsasync.chain(p, sign_and_such)
if balance_is_cached:
if not isinstance(balance, WalletBalance):
raise TypeError("balance is of unexpected type: {} ({})".format(balance, type(balance)))
# execute the balance cb directly if a cached balance is given
return cb(balance)
# chain the balance object first
return jsasync.chain(self.balance, cb)
def key_pair_get(self, unlockhash):
"""
Get the private/public key pair for the given unlock hash.
If the unlock has is not owned by this wallet a KeyError exception is raised.
"""
if isinstance(unlockhash, UnlockHash):
unlockhash = unlockhash.__str__()
if not isinstance(unlockhash, str):
raise TypeError("unlockhash cannot be of type {}".format(type(unlockhash)))
if unlockhash[:2] == '00':
return self._pairs[0]
for index, address in enumerate(self.addresses):
if address == unlockhash:
return self._pairs[index]
raise KeyError("wallet does not own unlock hash {}".format(unlockhash))
def _unlockhash_get(self, address):
return self._client.unlockhash_get(address)
def _transaction_put(self, transaction):
return self._client.transaction_put(transaction)
class TFChainMinter():
"""
TFChainMinter contains all Coin Minting logic.
"""
def __init__(self, wallet):
if not isinstance(wallet, TFChainWallet):
raise TypeError("wallet is expected to be a TFChainWallet")
self._wallet = wallet
# def definition_set(self, minter, data=None):
# """
# Redefine the current minter definition.
# Arbitrary data can be attached as well if desired.
# The minter is one of:
# - str (or unlockhash): minter is a personal wallet
# - list: minter is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
# - tuple (addresses, sigcount): minter is a sigcount-of-addresscount MultiSig wallet
# Returns a TransactionSendResult.
# @param minter: see explanation above
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# # create empty Mint Definition Txn, with a newly generated Nonce set already
# txn = tftransactions.mint_definition_new()
# # add the minimum miner fee
# txn.miner_fee_add(self._minium_miner_fee)
# # set the new mint condition
# txn.mint_condition = ConditionTypes.from_recipient(minter)
# # minter definition must be of unlock type 1 or 3
# ut = txn.mint_condition.unlockhash.type
# if ut not in (UnlockHashType.PUBLIC_KEY, UnlockHashType.MULTI_SIG):
# raise ValueError("{} is an invalid unlock hash type and cannot be used for a minter definition".format(ut))
# # optionally set the data
# if data != None:
# txn.data = data
# # get and set the current mint condition
# txn.parent_mint_condition = self._current_mint_condition_get()
# # create a raw fulfillment based on the current mint condition
# txn.mint_fulfillment = FulfillmentTypes.from_condition(txn.parent_mint_condition)
# # get all signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# submit = txn.is_fulfilled()
# if submit:
# txn.id = self._transaction_put(transaction=txn)
# # return the txn, as well as the submit status as a boolean
# return TransactionSendResult(txn, submit)
# def coins_new(self, recipient, amount, lock=None, data=None):
# """
# Create new (amount of) coins and give them to the defined recipient.
# Arbitrary data can be attached as well if desired.
# The recipient is one of:
# - None: recipient is the Free-For-All wallet
# - str (or unlockhash/bytes/bytearray): recipient is a personal wallet
# - list: recipient is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
# - tuple (addresses, sigcount): recipient is a sigcount-of-addresscount MultiSig wallet
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# The lock can be a str, or int:
# - when it is an int it represents either a block height or an epoch timestamp (in seconds)
# - when a str it can be a Jumpscale Datetime (e.g. '12:00:10', '31/10/2012 12:30', ...) or a Jumpscale Duration (e.g. '+ 2h', '+7d12h', ...)
# Returns a TransactionSendResult.
# @param recipient: see explanation above
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param lock: optional lock that can be used to lock the sent amount to a specific time or block height, see explation above
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# # create empty Mint Definition Txn, with a newly generated Nonce set already
# txn = tftransactions.mint_coin_creation_new()
# # add the minimum miner fee
# txn.miner_fee_add(self._minium_miner_fee)
# balance = self._wallet.balance
# # parse the output
# amount = Currency(value=amount)
# if amount <= 0:
# raise ValueError("no amount is defined to be sent")
# # define recipient
# recipient = ConditionTypes.from_recipient(recipient, lock=lock)
# # and add it is the output
# txn.coin_output_add(value=amount, condition=recipient)
# # optionally set the data
# if data != None:
# txn.data = data
# # get and set the current mint condition
# txn.parent_mint_condition = self._current_mint_condition_get()
# # create a raw fulfillment based on the current mint condition
# txn.mint_fulfillment = FulfillmentTypes.from_condition(txn.parent_mint_condition)
# # get all signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# submit = txn.is_fulfilled()
# if submit:
# txn.id = self._transaction_put(transaction=txn)
# # update balance of wallet
# addresses = self._wallet.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# # add the id to the coin_output, so we can track it has been spent
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # return the txn, as well as the submit status as a boolean
# return TransactionSendResult(txn, submit)
def coins_burn(self, amount, source=None, refund=None, data=None, balance=None):
"""
Burn the specified amount of coins,
paying miner fees on top of it.
"""
txn = TransactionV130()
miner_fee = self._minium_miner_fee
amount = Currency(amount)
balance_is_cached = (balance != None)
if amount.less_than_or_equal_to(0):
raise ValueError("a strict positive amount is required to be burned")
def balance_cb(balance):
# compute the amount of coin inputs we can accept, and ensure we do not have more
# > 16e3 is the maximum size allowed by rivine-chains
# > 307 is the size in bytes of a txn without arb. data, one miner fee, and no inputs/outputs
# > 169 bytes is required per (coin) input
max_input_count = (16e3 - 307 - len(txn.data)) // 169
# fund amount
inputs, remainder, suggested_refund = balance.fund(amount.plus(miner_fee), source=source, max_input_count=max_input_count)
# define the refund condition
if refund == None: # automatically choose a refund condition if none is given
if suggested_refund == None:
refund = ConditionTypes.unlockhash_new(unlockhash=self._wallet.address)
else:
refund = suggested_refund
else:
# use the given refund condition (defined as a recipient)
refund = ConditionTypes.from_recipient(refund)
# add refund coin output if needed
if remainder.greater_than(0):
txn.coin_output_add(value=remainder, condition=refund)
# add the miner fee
txn.miner_fee_add(miner_fee)
# add the coin inputs
txn.coin_inputs = inputs
# add custody fees if the wallet is linked to a goldchain network
if self._wallet._network_type.chain_type() == Type.GOLDCHAIN:
total_custody_fee = Currency()
for ci in txn.coin_inputs:
if not ci.parent_output:
raise Exception("BUG: cannot define the required custody fee if no parent output is linked to coin input {}".format(ci.parentid.__str__()))
total_custody_fee = total_custody_fee.plus(ci.parent_output.custody_fee)
txn.coin_output_add(value=total_custody_fee, condition=ConditionCustodyFee(balance.chain_time))
# if there is data to be added, add it as well
if data != None:
txn.data = data
# generate the signature requests
sig_requests = txn.signature_requests_new()
if len(sig_requests) == 0:
raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# fulfill the signature requests that we can fulfill
for request in sig_requests:
try:
key_pair = self._wallet.key_pair_get(request.wallet_address)
pk = public_key_from_assymetric_key_pair(key_pair)
input_hash = request.input_hash_new(public_key=pk)
signature = key_pair.sign(input_hash.value)
request.signature_fulfill(public_key=pk, signature=signature)
except KeyError:
pass # this is acceptable due to how we directly try the key_pair_get method
# txn should be fulfilled now
submit = txn.is_fulfilled()
if not submit: # return as-is
def stub_cb(resolve, reject):
resolve(TransactionSendResult(txn, submit))
return jsasync.promise_new(stub_cb)
# submit, and only then return
def id_cb(id):
txn.id = id
if balance_is_cached:
addresses = balance.addresses
for idx, ci in enumerate(txn.coin_inputs):
if ci.parent_output.condition.unlockhash.__str__() in addresses:
balance.output_add(txn, idx, confirmed=False, spent=True)
# return the semd result
return TransactionSendResult(txn, submit)
return jsasync.chain(self._wallet._transaction_put(transaction=txn), id_cb)
if balance != None:
if not isinstance(balance, WalletBalance):
raise TypeError("balance is of unexpected type: {} ({})".format(balance, type(balance)))
# if balance is given, execute the balance cb directly
return balance_cb(balance)
# else fetch the balance first and get it than
return jsasync.chain(self._wallet.balance, balance_cb)
@property
def _minium_miner_fee(self):
return self._wallet.network_type.minimum_miner_fee()
# from tfchain.types.ConditionTypes import ConditionAtomicSwap, OutputLock, AtomicSwapSecret, AtomicSwapSecretHash
# from tfchain.types.FulfillmentTypes import FulfillmentAtomicSwap
# class TFChainAtomicSwap():
# """
# TFChainAtomicSwap contains all Atomic Swap logic.
# """
# def __init__(self, wallet):
# if not isinstance(wallet, TFChainWallet):
# raise TypeError("wallet is expected to be a TFChainWallet")
# self._wallet = wallet
# def initiate(self, participator, amount, refund_time='+48h', source=None, refund=None, data=None, submit=True):
# """
# Initiate an atomic swap contract, targeted at the specified address,
# with the given amount. By default a 48 hours duration (starting from last block time)
# is used as time until contract can be refunded, but this can be changed.
# The participator is one of:
# - None: participator is the Free-For-All wallet
# - str (or unlockhash): participator is a personal wallet
# - list: participator is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
# - tuple (addresses, sigcount): participator is a sigcount-of-addresscount MultiSig wallet
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# Returns the AtomicSwapInitiationResult.
# @param participator: see explanation above
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param duration: the duration until the atomic swap contract becomes refundable
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# @param submit: True by default, if False the transaction will not be sent even if possible (e.g. if you want to double check)
# """
# # create a random secret
# secret = AtomicSwapSecret.random()
# secret_hash = AtomicSwapSecretHash.from_secret(secret)
# # create the contract
# result = self._create_contract(
# recipient=participator, amount=amount, refund_time=refund_time,
# source=source, refund=refund, data=data, secret_hash=secret_hash,
# submit=submit)
# # return the contract, transaction, submission status as well as secret
# return AtomicSwapInitiationResult(
# AtomicSwapContract(coinoutput=result.transaction.coin_outputs[0], unspent=True,
# current_timestamp=self._chain_time),
# secret, result.transaction, result.submitted)
# def participate(self, initiator, amount, secret_hash, refund_time='+24h', source=None, refund=None, data=None, submit=True):
# """
# Initiate an atomic swap contract, targeted at the specified address,
# with the given amount. By default a 24 hours duration (starting from last block time)
# is used as time until contract can be refunded, but this can be changed.
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# Returns the AtomicSwapParticipationResult.
# @param initiator: str (or unlockhash) of a personal wallet
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param secret_hash: the secret hash to be use, the same secret hash as used for the initiation contract
# @param duration: the duration until the atomic swap contract becomes refundable
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (can only be a personal wallet address)
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# @param submit: True by default, if False the transaction will not be sent even if possible (e.g. if you want to double check)
# """
# # normalize secret hash
# secret_hash = AtomicSwapSecretHash(value=secret_hash)
# # create the contract and return the contract, transaction and submission status
# result = self._create_contract(
# recipient=initiator, amount=amount, refund_time=refund_time, source=source,
# refund=refund, data=data, secret_hash=secret_hash, submit=submit)
# return AtomicSwapParticipationResult(
# AtomicSwapContract(coinoutput=result.transaction.coin_outputs[0], unspent=True, current_timestamp=self._chain_time),
# result.transaction, result.submitted)
# def verify(self, outputid, amount=None, secret_hash=None, min_refund_time=None, sender=False, receiver=False, contract=None):
# """
# Verify the status and content of the Atomic Swap Contract linked to the given outputid.
# An exception is returned if the contract does not exist, has already been spent
# or is not valid according to this validation
# Returns the verified contract.
# @param outputid: str or Hash that identifies the coin output to whuich this contract is linked
# @param amount: validate amount if defined, int or str that defines the amount of TFT to set, see explanation above
# @param secret_hash: validate secret hash if defined, str or BinaryData
# @param min_refund_time: validate contract's refund time if defined, 0 if expected to be refundable, else the minimun time expected until it becomes refundable
# @param sender: if True it is expected that this wallet is registered as the sender of this contract
# @param receiver: if True it is expected that this wallet is registered as the receiver of this contract
# @param contract: if contract fetched in a previous call already, one can verify it also by directly passing it to this method
# """
# if contract == None:
# co = None
# spend_txn = None
# # try to fetch the contract
# try:
# # try to fetch the coin output that is expected to contain the secret
# co, _, spend_txn = self._wallet.client.coin_output_get(outputid)
# except tfchain.errors.ExplorerNoContent as exc:
# raise tfchain.errors.AtomicSwapContractNotFound(outputid=outputid) from exc
# # check if the contract hasn't been spent already
# if spend_txn != None:
# # if a spend transaction exists,
# # it means the contract was already spend, and can therefore no longer be redeemed
# raise tfchain.errors.AtomicSwapContractSpent(contract=AtomicSwapContract(
# coinoutput=co, unspent=False, current_timestamp=self._chain_time), transaction=spend_txn)
# # create the unspent contract
# contract = AtomicSwapContract(coinoutput=co, unspent=True, current_timestamp=self._chain_time)
# elif not isinstance(contract, AtomicSwapContract):
# raise TypeError("contract was expected to be an AtomicSwapContract, not to be of type {}".format(type(contract)))
# else:
# # verify the outputid is the same
# if contract.outputid != outputid:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="output identifier is expected to be {}, not {}".format(str(outputid), str(contract.outputid)),
# contract=contract)
# # if amount is given verify it
# if amount != None:
# amount = Currency(value=amount)
# if amount != contract.amount:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="amount is expected to be {}, not {}".format(amount.str(with_unit=True), contract.amount.str(with_unit=True)),
# contract=contract)
# # if secret hash is given verify it
# if secret_hash != None:
# # normalize secret hash
# secret_hash = AtomicSwapSecretHash(value=secret_hash)
# if secret_hash != contract.secret_hash:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="secret_hash is expected to be {}, not {}".format(str(secret_hash), str(contract.secret_hash)),
# contract=contract)
# # if min_refund_time is given verify it
# if min_refund_time != None:
# chain_time = self._chain_time
# if isinstance(min_refund_time, str):
# min_refund_time = OutputLock(value=min_refund_time, current_timestamp=chain_time).value
# elif not isinstance(min_refund_time, int):
# raise TypeError("expected minimum refund time to be an integer or string, not to be of type {}".format(type(min_refund_time)))
# min_duration = max(0, min_refund_time-chain_time)
# chain_time = self._chain_time
# if chain_time >= contract.refund_timestamp:
# contract_duration = 0
# else:
# contract_duration = contract.refund_timestamp - chain_time
# if min_duration <= 0:
# if contract_duration != 0:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="contract cannot be refunded yet while it was expected to be possible already",
# contract=contract)
# elif contract_duration < min_duration:
# if contract_duration == 0:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="contract was expected to be non-refundable for at least {} more, but it can be refunded already since {}".format(
# duration.toString(min_duration), epoch2HRDateTime(contract.refund_timestamp)),
# contract=contract)
# elif contract_duration < min_duration:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="contract was expected to be available for redemption for at least {}, but it is only available for {}".format(
# duration.toString(min_duration), duration.toString(contract_duration)),
# contract=contract)
# # if expected to be authorized to be the sender, verify this
# if sender and contract.sender not in self._wallet.addresses:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="wallet not registered as sender of this contract", contract=contract)
# # if expected to be authorized to be the receiver, verify this
# if receiver and contract.receiver not in self._wallet.addresses:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="wallet not registered as receiver of this contract", contract=contract)
# # return the contract for further optional consumption,
# # according to our validations it is valid
# return contract
# def redeem(self, outputid, secret, data=None):
# """
# Redeem an unspent Atomic Swap contract.
# Returns the sent transaction.
# @param outputid: the identifier of the coin output that contains the atomic swap contract
# @param secret: secret, matching the contract's secret hash, used to redeem the contract
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# co = None
# spend_txn = None
# # try to fetch the contract
# try:
# # try to fetch the coin output that is expected to contain the secret
# co, _, spend_txn = self._wallet.client.coin_output_get(outputid)
# except tfchain.errors.ExplorerNoContent as exc:
# raise tfchain.errors.AtomicSwapContractNotFound(outputid=outputid) from exc
# # generate the contract
# contract = AtomicSwapContract(coinoutput=co, unspent=False, current_timestamp=self._chain_time) # either it is spent already or we'll spend it
# # check if the contract hasn't been spent already
# if spend_txn != None:
# # if a spend transaction exists,
# # it means the contract was already spend, and can therefore no longer be redeemed
# raise tfchain.errors.AtomicSwapContractSpent(contract=contract, transaction=spend_txn)
# # verify the defined secret
# if not contract.verify_secret(secret):
# raise tfchain.errors.AtomicSwapInvalidSecret(contract=contract)
# # ensure this wallet is authorized to be the receiver
# if contract.receiver not in self._wallet.addresses:
# raise tfchain.errors.AtomicSwapForbidden(message="unauthorized to redeem: wallet does not contain receiver address {}".format(contract.receiver), contract=contract)
# # create the fulfillment
# fulfillment = FulfillmentTypes.atomic_swap_new(secret=secret)
# # create, sign and submit the transaction
# return self._claim_contract(contract=contract, as_sender=False, fulfillment=fulfillment, data=data)
# def refund(self, outputid, data=None):
# """
# Refund an unspent Atomic Swap contract.
# Returns the sent transaction.
# @param outputid: the identifier of the coin output that contains the atomic swap contract
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# co = None
# spend_txn = None
# # try to fetch the contract
# try:
# # try to fetch the coin output that is expected to contain the secret
# co, _, spend_txn = self._wallet.client.coin_output_get(outputid)
# except tfchain.errors.ExplorerNoContent as exc:
# raise tfchain.errors.AtomicSwapContractNotFound(outputid=outputid) from exc
# # generate the contract
# contract = AtomicSwapContract(coinoutput=co, unspent=False, current_timestamp=self._chain_time) # either it is spent already or we'll spend it
# # check if the contract hasn't been spent already
# if spend_txn != None:
# # if a spend transaction exists,
# # it means the contract was already spend, and can therefore no longer be redeemed
# raise tfchain.errors.AtomicSwapContractSpent(contract=contract, transaction=spend_txn)
# # verify the contract can be refunded already
# time = self._chain_time
# if time < contract.refund_timestamp:
# raise tfchain.errors.AtomicSwapForbidden(
# message="unauthorized to refund: contract can only be refunded since {}".format(epoch2HRDateTime(contract.refund_timestamp)),
# contract=contract)
# # ensure this wallet is authorized to be the sender (refunder)
# if contract.sender not in self._wallet.addresses:
# raise tfchain.errors.AtomicSwapForbidden(message="unauthorized to refund: wallet does not contain sender address {}".format(contract.sender), contract=contract)
# # create the fulfillment
# fulfillment = FulfillmentTypes.atomic_swap_new()
# # create, sign and submit the transaction
# return self._claim_contract(contract=contract, as_sender=True, fulfillment=fulfillment, data=data)
# def _create_contract(self, recipient, amount, refund_time, source, refund, data, secret_hash, submit):
# """
# Create a new atomic swap contract,
# the logic for both the initiate as well as participate phase.
# """
# # define the amount
# amount = Currency(value=amount)
# if amount <= 0:
# raise ValueError("no amount is defined to be swapped")
# # define the miner fee
# miner_fee = self._minium_miner_fee
# # ensure the amount is bigger than the miner fee,
# # otherwise the contract cannot be redeemed/refunded
# if amount <= miner_fee:
# raise tfchain.errors.AtomicSwapInsufficientAmountError(amount=amount, minimum_miner_fee=miner_fee)
# # define the coin inputs
# balance = self._wallet.balance
# inputs, remainder, suggested_refund = balance.fund(amount+miner_fee, source=source)
# # define the refund
# if refund != None:
# refund = ConditionTypes.from_recipient(refund)
# elif suggested_refund != None:
# refund = ConditionTypes.from_recipient(suggested_refund)
# else:
# refund = ConditionTypes.from_recipient(self._wallet.address)
# # define the sender
# if isinstance(refund, ConditionUnlockHash):
# sender = refund.unlockhash
# else:
# sender = self._wallet.address
# # create and populate the transaction
# txn = tftransactions.new()
# txn.coin_inputs = inputs
# txn.miner_fee_add(self._minium_miner_fee)
# txn.data = data
# # define refund time already, so we can use the chain time as the current time
# if isinstance(refund_time, str):
# chain_time = self._chain_time
# refund_time = OutputLock(value=refund_time, current_timestamp=chain_time).value
# elif not isinstance(refund_time, int):
# raise TypeError("expected refund time to be an integer or string, not to be of type {}".format(type(refund_time)))
# # define the atomic swap contract and add it as a coin output
# asc = ConditionTypes.atomic_swap_new(
# sender=sender, receiver=recipient, hashed_secret=secret_hash, lock_time=refund_time)
# txn.coin_output_add(condition=asc, value=amount)
# # optionally add a refund coin output
# if remainder > 0:
# txn.coin_output_add(condition=refund, value=remainder)
# # get all signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # assign all coin output ID's for atomic swap contracts,
# # as we always care about the contract's output ID and
# # the refund coin output has to be our coin output
# for idx, co in enumerate(txn.coin_outputs):
# co.id = txn.coin_outputid_new(idx)
# # submit if possible
# submit = submit and txn.is_fulfilled()
# if submit:
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# for ci in txn.coin_inputs:
# balance.output_add(ci.parent_output, confirmed=False, spent=True)
# addresses = self._wallet.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# balance.output_add(co, confirmed=False, spent=False)
# # return the txn, as well as the submit status as a boolean
# return TransactionSendResult(txn, submit)
# def _claim_contract(self, contract, as_sender, fulfillment, data):
# """
# claim an unspent atomic swap contract
# """
# # create the contract and fill in the easy content
# txn = tftransactions.new()
# miner_fee = self._minium_miner_fee
# txn.miner_fee_add(miner_fee)
# txn.data = data
# # define the coin input
# txn.coin_input_add(parentid=contract.outputid, fulfillment=fulfillment, parent_output=contract.coin_output)
# # and the coin output
# txn.coin_output_add(
# condition=ConditionTypes.unlockhash_new(contract.sender if as_sender else contract.receiver),
# value=contract.amount-miner_fee)
# # get all signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # submit if possible
# submit = txn.is_fulfilled()
# if not submit:
# raise Exception("BUG: transaction should be fulfilled at ths point, please fix or report as an isuse")
# # assign transactionid
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# balance = self._wallet.balance
# addresses = self._wallet.addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # return the txn
# return txn
# @property
# def _chain_time(self):
# """
# Returns the time according to the chain's network.
# """
# info = self._wallet.client.blockchain_info_get()
# return info.timestamp
# @property
# def _minium_miner_fee(self):
# """
# Returns the minimum miner fee
# """
# return self._wallet.network_type.minimum_miner_fee()
# def _output_get(self, outputid):
# """
# Get the transactions linked to the given outputID.
# @param: id of te
# """
# return self._wallet.client.output_get(outputid)
# def _transaction_put(self, transaction):
# """
# Submit the transaction to the network using the parent's wallet client.
# Returns the transaction ID.
# """
# return self._wallet.client.transaction_put(transaction=transaction)
# class TFChainThreeBot():
# """
# TFChainThreeBot contains all ThreeBot logic.
# """
# def __init__(self, wallet):
# if not isinstance(wallet, TFChainWallet):
# raise TypeError("wallet is expected to be a TFChainWallet")
# self._wallet = wallet
# def record_new(self, months=1, names=None, addresses=None, key_index=None, source=None, refund=None):
# """
# Create a new 3Bot by creating a new record on the BlockChain,
# by default 1 month rent is already paid for the 3Bot, but up to 24 months can immediately be pre-paid
# against a discount if desired.
# At least one name or one address is required, and up to 5 names and 10 addresses can
# exists for a single 3Bot.
# If no key_index is given a new key pair is generated for the wallet,
# otherwise the key pair on the given index of the wallet is used.
# Returns a TransactionSendResult.
# @param months: amount of months to be prepaid, at least 1 month is required, maximum 24 months is allowed
# @param names: 3Bot Names to add to the 3Bot as aliases (minumum 0, maximum 5)
# @param addresses: Network Addresses used to reach the 3Bot (minimum 0, maximum 10)
# @param key_index: if None is given a new key pair is generated, otherwise the key pair on the defined index is used.
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# # create the txn and fill the easiest properties already
# txn = tftransactions.threebot_registration_new()
# txn.number_of_months = months
# if names == None and addresses == None:
# raise ValueError("at least one name or one address is to be given, none is defined")
# txn.names = names
# txn.addresses = addresses
# # get the fees, and fund the transaction
# balance = self._fund_txn(txn, source, refund)
# # if the key_index is not defined, generate a new public key,
# # otherwise use the key_index given
# if key_index == None:
# txn.public_key = self._wallet.public_key_new()
# else:
# if not isinstance(key_index, int):
# raise TypeError("key index is to be of type int, not type {}".format(type(key_index)))
# addresses = self._wallet.addresses
# if key_index < 0 or key_index >= len(addresses):
# raise ValueError("key index {} is OOB, index cannot be negative, and can be maximum {}".format(key_index, len(addresses)-1))
# txn.public_key = self._wallet.key_pair_get(unlockhash=addresses[key_index]).public_key
# # sign, submit, update Balance and return result
# return self._sign_and_submit_txn(txn, balance)
# def record_update(self, identifier, months=0, names_to_add=None, names_to_remove=None, addresses_to_add=None, addresses_to_remove=None, source=None, refund=None):
# """
# Update the record of an existing 3Bot, for which this Wallet is authorized to make such changes.
# Names and addresses can be added and removed. Removal of data is always for free, adding data costs money.
# Extra months can also be paid (up to 24 months in total), as to extend the expiration time further in the future.
# One of months, names_to_add, names_to_remove, addresses_to_add, addresses_to_remove has to be a value other than 0/None.
# Returns a TransactionSendResult.
# @param months: amount of months to be paid and added to the current months, if the 3Bot was inactive, the starting time will be now
# @param names_to_add: 3Bot Names to add to the 3Bot as aliases (minumum 0, maximum 5)
# @param names_to_remove: 3Bot Names to add to the 3Bot as aliases (minumum 0, maximum 5)
# @param addresses_to_add: Network Addresses to add and used to reach the 3Bot (minimum 0, maximum 10)
# @param addresses_to_remove: Network Addresses to remove and used to reach the 3Bot (minimum 0, maximum 10)
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# if months < 1 and not reduce((lambda r, v: r or (v != None)), [names_to_add, names_to_remove, addresses_to_add, addresses_to_remove], False):
# raise ValueError("extra months is to be given or one name/address is to be added/removed, none is defined")
# # create the txn and fill the easiest properties already
# txn = tftransactions.threebot_record_update_new()
# txn.botid = identifier
# txn.number_of_months = months
# txn.names_to_add = names_to_add
# txn.names_to_remove = names_to_remove
# txn.addresses_to_add = addresses_to_add
# txn.addresses_to_remove = addresses_to_remove
# # get the 3Bot Public Key
# record = self._wallet.client.threebot.record_get(identifier)
# # set the parent public key
# txn.parent_public_key = record.public_key
# # ensure the 3Bot is either active, or will be come active
# if record.expiration <= self._chain_time and months == 0:
# raise tfchain.errors.ThreeBotInactive(identifier, record.expiration)
# # get the fees, and fund the transaction
# balance = self._fund_txn(txn, source, refund)
# # sign, submit, update Balance and return result
# return self._sign_and_submit_txn(txn, balance)
# def name_transfer(self, sender, receiver, names, source=None, refund=None):
# """
# Transfer one or multiple 3Bot names from the sender 3Bot to the receiver 3Bot.
# Both the Sender and Receiver 3Bots have to be active at the time of transfer.
# Returns a TransactionSendResult.
# @param sender: identifier of the existing and active 3Bot sender bot
# @param receiver: identifier of the existing and active 3Bot receiver bot
# @param names: 3Bot Names to transfer (minumum 0, maximum 5)
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# # create the txn and fill the easiest properties already
# txn = tftransactions.threebot_name_transfer_new()
# txn.sender_botid = sender
# txn.receiver_botid = receiver
# txn.names = names
# if len(txn.names) == 0:
# raise ValueError("at least one (3Bot) name has to be transfered, but none were defined")
# # keep track of chain time
# chain_time = self._chain_time
# # get and assign the 3Bot's public key for the sender
# record = self._wallet.client.threebot.record_get(sender)
# txn.sender_parent_public_key = record.public_key
# # ensure sender bot is active
# if record.expiration <= chain_time:
# raise tfchain.errors.ThreeBotInactive(sender, record.expiration)
# # get and assign the 3Bot's public key for the receiver
# record = self._wallet.client.threebot.record_get(receiver)
# txn.receiver_parent_public_key = record.public_key
# # ensure receiver bot is active
# if record.expiration <= chain_time:
# raise tfchain.errors.ThreeBotInactive(receiver, record.expiration)
# # get the fees, and fund the transaction
# balance = self._fund_txn(txn, source, refund)
# # sign and update Balance and return result,
# # only if the 3Bot owns both public keys, the Txn will be already,
# # submitted as well
# return self._sign_and_submit_txn(txn, balance)
# def _fund_txn(self, txn, source, refund):
# """
# common fund/refund/inputs/fees logic for all 3Bot Transactions
# """
# # get the fees, and fund the transaction
# miner_fee = self._minium_miner_fee
# bot_fee = txn.required_bot_fees
# balance = self._wallet.balance
# inputs, remainder, suggested_refund = balance.fund(miner_fee+bot_fee, source=source)
# # add the coin inputs
# txn.coin_inputs = inputs
# # add refund coin output if needed
# if remainder > 0:
# # define the refund condition
# if refund == None: # automatically choose a refund condition if none is given
# if suggested_refund == None:
# refund = ConditionTypes.unlockhash_new(unlockhash=self._wallet.address)
# else:
# refund = suggested_refund
# else:
# # use the given refund condition (defined as a recipient)
# refund = ConditionTypes.from_recipient(refund)
# txn.refund_coin_output_set(value=remainder, condition=refund)
# # add the miner fee
# txn.transaction_fee = miner_fee
# # return balance object
# return balance
# def _sign_and_submit_txn(self, txn, balance):
# """
# common sign and submit logic for all 3Bot Transactions
# """
# # generate the signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # txn should be fulfilled now
# submit = txn.is_fulfilled()
# if submit:
# # submit the transaction
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# for ci in txn.coin_inputs:
# balance.output_add(ci.parent_output, confirmed=False, spent=True)
# addresses = self._wallet.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# # add the id to the coin_output, so we can track it has been spent
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # and return the created/submitted transaction for optional user consumption
# return TransactionSendResult(txn, submit)
# @property
# def _minium_miner_fee(self):
# """
# Returns the minimum miner fee
# """
# return self._wallet.network_type.minimum_miner_fee()
# def _transaction_put(self, transaction):
# """
# Submit the transaction to the network using the parent's wallet client.
# Returns the transaction ID.
# """
# return self._wallet.client.transaction_put(transaction=transaction)
# @property
# def _chain_time(self):
# """
# Returns the time according to the chain's network.
# """
# info = self._wallet.client.blockchain_info_get()
# return info.timestamp
# class TFChainERC20():
# """
# TFChainERC20 contains all ERC20 (wallet) logic.
# """
# def __init__(self, wallet):
# if not isinstance(wallet, TFChainWallet):
# raise TypeError("wallet is expected to be a TFChainWallet")
# self._wallet = wallet
# def coins_send(self, address, amount, source=None, refund=None):
# """
# Send the specified amount of coins to the given ERC20 address.
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# Returns a TransactionSendResult.
# @param address: str or ERC20Address value to which the money is to be send
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# amount = Currency(value=amount)
# if amount <= 0:
# raise ValueError("no amount is defined to be sent")
# # create transaction
# txn = tftransactions.erc20_convert_new()
# # define the amount and recipient
# txn.address = ERC20Address(value=address)
# txn.value = amount
# # fund the transaction
# balance = self._fund_txn(txn, source, refund, txn.value)
# # sign, submit and return the transaction
# return self._sign_and_submit_txn(txn, balance)
# def address_register(self, value=None, source=None, refund=None):
# """
# Register an existing TFT address of this wallet as an ERC20 Withdraw Address,
# either by specifying the address itself or by specifying the index of the address.
# If no value is defined a new key pair will be defined.
# Returns a TransactionSendResult.
# @param value: index of the TFT address or address itself, the address has to be owned by this wallet
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# if value == None:
# public_key = self._wallet.public_key_new()
# elif isinstance(value, (str, UnlockHash)):
# try:
# public_key = self._wallet.key_pair_get(unlockhash=value).public_key
# except KeyError as exc:
# if isinstance(value, str):
# value = UnlockHash.from_json(value)
# raise tfchain.errors.ERC20RegistrationForbidden(address=value) from exc
# elif isinstance(value, int) and not isinstance(value, bool):
# addresses = self._wallet.addresses
# if value < 0 or value >= len(addresses):
# raise ValueError("address index {} is not a valid index for this wallet, has to be in the inclusive range of [0, {}]".format(
# value, len(addresses)-1))
# public_key = self._wallet.key_pair_get(unlockhash=addresses[value]).public_key
# else:
# raise ValueError("value has to be a str, UnlockHash or int, cannot identify an address using value {} (type: {})".format(value, type(value)))
# # create transaction
# txn = tftransactions.erc20_address_registration_new()
# # define the public key
# txn.public_key = public_key
# # fund the transaction
# balance = self._fund_txn(txn, source, refund, txn.registration_fee)
# # sign, submit and return the transaction
# return self._sign_and_submit_txn(txn, balance)
# def address_get(self, value=None):
# """
# Get the registration info of an existing TFT address of this wallet as an ERC20 Withdraw Address,
# either by specifying the address itself or by specifying the index of the address.
# If no value is defined the first wallet address will be used.
# Returns an ERC20AddressInfo named tuple.
# @param value: index of the TFT address or address itself, the address has to be owned by this wallet
# """
# if value == None:
# public_key = self._wallet.key_pair_get(unlockhash=self._wallet.address).public_key
# elif isinstance(value, (str, UnlockHash)):
# try:
# public_key = self._wallet.key_pair_get(unlockhash=value).public_key
# except KeyError as exc:
# if isinstance(value, str):
# value = UnlockHash.from_json(value)
# raise tfchain.errors.AddressNotInWallet(address=value) from exc
# elif isinstance(value, int) and not isinstance(value, bool):
# addresses = self._wallet.addresses
# if value < 0 or value >= len(addresses):
# raise ValueError("address index {} is not a valid index for this wallet, has to be in the inclusive range of [0, {}]".format(
# value, len(addresses)-1))
# public_key = self._wallet.key_pair_get(unlockhash=addresses[value]).public_key
# else:
# raise ValueError("value has to be a str, UnlockHash or int, cannot identify an address using value {} (type: {})".format(value, type(value)))
# # look up the wallet address and return it
# return self._wallet.client.erc20.address_get(unlockhash=public_key.unlockhash)
# def addresses_get(self):
# """
# Get the information for all registered ERC20 withdraw addresses.
# Can return a empty list if no addresses of this wallet were registered as an ERC20 withdraw address.
# Returns a list of ERC20AddressInfo named tuples.
# """
# results = []
# # scan for some new keys first, to ensure we get all addresses
# self._wallet._key_scan()
# # get the ERC20 info for all addresses that are registered as ERC20 withdraw addresses, if any
# for address in self._wallet.addresses:
# try:
# info = self._wallet.client.erc20.address_get(address)
# results.append(info)
# except tfchain.errors.ExplorerNoContent:
# pass
# # return all found info, if anything
# return results
# def _fund_txn(self, txn, source, refund, amount):
# """
# common fund/refund/inputs/fees logic for all ERC20 Transactions
# """
# # get the fees, and fund the transaction
# miner_fee = self._minium_miner_fee
# balance = self._wallet.balance
# inputs, remainder, suggested_refund = balance.fund(miner_fee+amount, source=source)
# # add the coin inputs
# txn.coin_inputs = inputs
# # add refund coin output if needed
# if remainder > 0:
# # define the refund condition
# if refund == None: # automatically choose a refund condition if none is given
# if suggested_refund == None:
# refund = ConditionTypes.unlockhash_new(unlockhash=self._wallet.address)
# else:
# refund = suggested_refund
# else:
# # use the given refund condition (defined as a recipient)
# refund = ConditionTypes.from_recipient(refund)
# txn.refund_coin_output_set(value=remainder, condition=refund)
# # add the miner fee
# txn.transaction_fee = miner_fee
# # return balance object
# return balance
# def _sign_and_submit_txn(self, txn, balance):
# """
# common sign and submit logic for all ERC20 Transactions
# """
# # generate the signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # txn should be fulfilled now
# submit = txn.is_fulfilled()
# if submit:
# # submit the transaction
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# for ci in txn.coin_inputs:
# balance.output_add(ci.parent_output, confirmed=False, spent=True)
# addresses = self._wallet.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# # add the id to the coin_output, so we can track it has been spent
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # and return the created/submitted transaction for optional user consumption
# return TransactionSendResult(txn, submit)
# @property
# def _minium_miner_fee(self):
# """
# Returns the minimum miner fee
# """
# return self._wallet.network_type.minimum_miner_fee()
# def _transaction_put(self, transaction):
# """
# Submit the transaction to the network using the parent's wallet client.
# Returns the transaction ID.
# """
# return self._wallet.client.transaction_put(transaction=transaction)
class TransactionSendResult():
"""
TransactionSendResult is a named tuple,
used as the result for a generic transaction send call.
"""
def __init__(self, transaction, submitted):
self._transaction = transaction
self._submitted = submitted
@property
def transaction(self):
return self._transaction
@property
def submitted(self):
return self._submitted
class TransactionSignResult:
"""
TransactionSignResult is a named tuple,
used as the result for a transaction sign call.
"""
def __init__(self, transaction, signed, submitted):
self._transaction = transaction
self._signed = signed
self._submitted = submitted
@property
def transaction(self):
return self._transaction
@property
def signed(self):
return self._signed
@property
def submitted(self):
return self._submitted
# class AtomicSwapInitiationResult():
# """
# AtomicSwapInitiationResult is a named tuple,
# used as the result for an atomic swap initiation call.
# """
# def __init__(self, contract, secret, transaction, submitted):
# self._contract = contract
# self._secret = secret
# self._transaction = transaction
# self._submitted = submitted
# @property
# def contract(self):
# return self._contract
# @property
# def secret(self):
# return self._secret
# @property
# def transaction(self):
# return self._transaction
# @property
# def submitted(self):
# return self._submitted
# class AtomicSwapParticipationResult():
# """
# AtomicSwapParticipationResult is a named tuple,
# used as the result for an atomic swap participation call.
# """
# def __init__(self, contract, transaction, submitted):
# self._contract = contract
# self._transaction = transaction
# self._submitted = submitted
# @property
# def contract(self):
# return self._contract
# @property
# def transaction(self):
# return self._transaction
# @property
# def submitted(self):
# return self._submitted
class SingleSigWalletBalanceAggregator:
"""
State class to serve as the red line throughout
the chained promise-based balance gathering for a (regular/personal) wallet,
which can involve the merging of results of multiple (single-sig) addresses.
"""
def __init__(self, wallet, chain_info=None):
if not isinstance(wallet, TFChainWallet):
raise TypeError("expected wallet to be of type TFChainWallet, not: {} ({})".format(wallet, type(wallet)))
self._wallet = wallet
self._balance = SingleSigWalletBalance()
self._info = chain_info
if self._info != None and not isinstance(self._info, tfclient.ExplorerBlockchainInfo):
raise TypeError("info has to be an ExplorerBlockchainInfo object, invalid: {} ({})".format(self._info, type(self._info)))
def fetch_and_aggregate(self):
if self._info != None:
return jsasync.chain(
self._personal_pool_chain_get(),
self._balance_get,
)
return jsasync.chain(
self._wallet._client.blockchain_info_get(),
self._collect_chain_info,
self._personal_pool_chain_get,
self._balance_get,
)
def _collect_chain_info(self, info):
self._info = info
def _personal_pool_chain_get(self):
return jsasync.promise_pool_new(
self._personal_address_generator,
cb=self._collect_personal_balance,
)
def _personal_address_generator(self):
for address in self._wallet.addresses:
yield self._wallet._unlockhash_get(address)
def _collect_personal_balance(self, result):
balance = result.balance(info=self._info)
self._balance = self._balance.balance_add(balance)
def _balance_get(self):
return self._balance
class CoinTransactionBuilder():
def __init__(self, wallet):
self._txn = transactions.new()
self._txn_send = False
self._wallet = wallet
@property
def transaction(self):
return self._txn
def output_add(self, recipient, amount, lock=None):
"""
Add an output to the transaction, returning the transaction
itself to allow for chaining.
The recipient is one of:
- None: recipient is the Free-For-All wallet
- str (or unlockhash): recipient is a personal wallet
- list: recipient is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
- tuple (addresses, sigcount): recipient is a sigcount-of-addresscount MultiSig wallet
- an ERC20 address (str/ERC20Address), amount will be send to this ERC20 address
The amount can be a str or an int:
- when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
- when defining as a str you can use the following space-stripped and case-insentive formats:
- '123456789': same as when defining the amount as an int
- '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
- '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
- '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
@param recipient: see explanation above
@param amount: int or str that defines the amount of TFT to set, see explanation above
@param lock: optional lock that can be used to lock the sent amount to a specific time or block height, see explation above
"""
if self._txn_send:
raise RuntimeError("coin transaction builder is already consumed")
amount = Currency(value=amount)
if amount.less_than_or_equal_to(0):
raise ValueError("no amount is defined to be sent")
recipient = ConditionTypes.from_recipient(recipient, lock=lock)
self._txn.coin_output_add(value=amount, condition=recipient)
return self
def send(self, source=None, refund=None, data=None, balance=None, merge=False, merge_min_co_count=None):
if self._txn_send:
raise RuntimeError("coin transaction builder is already consumed")
txn = self._txn
self._txn_send = True
balance_is_cached = (balance != None)
def balance_cb(balance):
if not merge: # regular fund logic
# fund amount
amount = Currency.sum(*[co.value for co in txn.coin_outputs])
miner_fee = self._wallet.network_type.minimum_miner_fee()
# compute the amount of coin inputs we can accept, and ensure we do not have more
# > 16e3 is the maximum size allowed by rivine-chains
# > 307 is the size in bytes of a txn without arb. data, one miner fee, and no inputs/outputs
# > 51 bytes is required per (coin) output
# > 169 bytes is required per (coin) input
extra_bytes_count = 0
if len(txn.coin_outputs) > 0 and txn.coin_outputs[0].condition.ctype == 3:
extra_bytes_count = 17 # add 17 bytes for lock time condition
max_input_count = (16e3 - 307 - (51 * len(txn.coin_outputs)) - len(txn.data) - extra_bytes_count) // 169
# fund the txn
inputs, remainder, suggested_refund = balance.fund(amount.plus(miner_fee), source=source, max_input_count=max_input_count)
# if there is data to be added, add it as well
if data != None:
txn.data = data
# check that we do not have more than the max amount of coin inputs
if len(inputs) > max_input_count:
raise tferrors.InsufficientFunds(
"insufficient big funds funds in this wallet: {} coin inputs overflow the allowed {} inputs".format(
len(inputs), max_input_count))
else: # merge logic
# gather all outputs
all_outputs = []
for co in balance.outputs_available:
all_outputs.append(co)
if len(all_outputs) < 92:
for co in balance.outputs_unconfirmed_available:
all_outputs.append(co)
all_outputs.sort(lambda co: float(co.value.str()))
# select outputs to use (low ones)
output_count = min(len(all_outputs), 92) # 92 is a hardcoded constant of allowed coin outputs
if not output_count or (merge_min_co_count and output_count < min(92, merge_min_co_count)):
# early return in case not enough outputs to merge
def stub_cb(resolve, reject):
resolve(TransactionSendResult(txn, False))
return jsasync.promise_new(stub_cb)
used_outputs = all_outputs[:output_count]
# select the inputs from these inputs
inputs = [CoinInput.from_coin_output(co) for co in used_outputs]
remainder = Currency()
suggested_refund = None
# select and create the output for these inputs
miner_fee = self._wallet.network_type.minimum_miner_fee()
txn.coin_output_add(
Currency.sum(*[co.value for co in used_outputs]).minus(miner_fee),
used_outputs[output_count-1].condition, # use the address with the highest value
)
# add refund coin output if needed
if remainder.greater_than(0):
# define the refund condition
if refund == None: # automatically choose a refund condition if none is given
if suggested_refund == None:
refund = ConditionTypes.unlockhash_new(unlockhash=self._wallet.address)
else:
refund = suggested_refund
else:
# use the given refund condition (defined as a recipient)
refund = ConditionTypes.from_recipient(refund)
txn.coin_output_add(value=remainder, condition=refund)
# add the miner fee
txn.miner_fee_add(miner_fee)
# add the coin inputs
txn.coin_inputs = inputs
# add custody fees if the wallet is linked to a goldchain network
if self._wallet.network_type.chain_type() == Type.GOLDCHAIN:
total_custody_fee = Currency()
for ci in txn.coin_inputs:
if not ci.parent_output:
raise Exception("BUG: cannot define the required custody fee if no parent output is linked to coin input {}".format(ci.parentid.__str__()))
total_custody_fee = total_custody_fee.plus(ci.parent_output.custody_fee)
txn.coin_output_add(value=total_custody_fee, condition=ConditionCustodyFee(balance.chain_time))
# generate the signature requests
sig_requests = txn.signature_requests_new()
if len(sig_requests) == 0:
raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# fulfill the signature requests that we can fulfill
for request in sig_requests:
try:
key_pair = self._wallet.key_pair_get(request.wallet_address)
pk = public_key_from_assymetric_key_pair(key_pair)
input_hash = request.input_hash_new(public_key=pk)
signature = key_pair.sign(input_hash.value)
request.signature_fulfill(public_key=pk, signature=signature)
except KeyError:
pass # this is acceptable due to how we directly try the key_pair_get method
# txn should be fulfilled now
submit = txn.is_fulfilled()
if not submit: # return as-is
def stub_cb(resolve, reject):
resolve(TransactionSendResult(txn, submit))
return jsasync.promise_new(stub_cb)
# submit, and only then return
def id_cb(id):
txn.id = id
if balance_is_cached:
addresses = balance.addresses
for idx, ci in enumerate(txn.coin_inputs):
if ci.parent_output.condition.unlockhash.__str__() in addresses:
balance.output_add(txn, idx, confirmed=False, spent=True)
for idx, co in enumerate(txn.coin_outputs):
if co.condition.unlockhash.__str__() in addresses:
# add the id to the coin_output, so we can track it has been spent
co.id = txn.coin_outputid_new(idx)
balance.output_add(txn, idx, confirmed=False, spent=False)
# return the semd result
return TransactionSendResult(txn, submit)
return jsasync.chain(self._wallet._transaction_put(transaction=txn), id_cb)
if balance != None:
if not isinstance(balance, WalletBalance):
raise TypeError("balance is of unexpected type: {} ({})".format(balance, type(balance)))
# if balance is given, execute the balance cb directly
return balance_cb(balance)
# else fetch the balance first and get it than
return jsasync.chain(self._wallet.balance, balance_cb)
|
python
|
import configparser
from fast_arrow import Client, OptionOrder
print("----- running {}".format(__file__))
config = configparser.ConfigParser()
config.read('config.debug.ini')
#
# initialize fast_arrow client and authenticate
#
client = Client(
username = config['account']['username'],
password = config['account']['password'])
client.authenticate()
#
# fetch option orders
#
option_orders_all = OptionOrder.all(client)
#
# in case you have lots, only use first 25
# (unroll process fetches contract data for each leg)
#
option_orders = option_orders_all[0:25]
#
# unroll option orders ... ie, break each option leg into its own row
# this is helpful when doing detailed P/L reporting
#
option_orders_unrolled = OptionOrder.unroll_option_legs(client, option_orders)
#
# let's print out the results
#
print(option_orders_unrolled[0].keys())
|
python
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from datetime import time
from operator import itemgetter
import jsonschema
from flask import request
from wtforms.fields import BooleanField, FloatField, HiddenField, IntegerField, SelectField, StringField, TextAreaField
from wtforms.fields.html5 import DecimalField, EmailField
from wtforms.validators import DataRequired, Email, InputRequired, NumberRange, Optional, ValidationError
from wtforms.widgets.html5 import NumberInput
from indico.core import signals
from indico.core.config import config
from indico.modules.designer import PageLayout, PageOrientation, PageSize, TemplateType
from indico.modules.designer.util import get_default_template_on_category, get_inherited_templates
from indico.modules.events.features.util import is_feature_enabled
from indico.modules.events.payment import payment_settings
from indico.modules.events.registration.models.forms import ModificationMode
from indico.modules.events.registration.models.invitations import RegistrationInvitation
from indico.modules.events.registration.models.registrations import Registration
from indico.util.i18n import _
from indico.util.placeholders import get_missing_placeholders, render_placeholder_info
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import EmailListField, FileField, IndicoDateTimeField, IndicoEnumSelectField, JSONField
from indico.web.forms.fields.principals import PrincipalListField
from indico.web.forms.fields.simple import HiddenFieldList, IndicoEmailRecipientsField
from indico.web.forms.validators import HiddenUnless, IndicoEmail, LinkedDateTime
from indico.web.forms.widgets import CKEditorWidget, SwitchWidget
def _check_if_payment_required(form, field):
if not field.data:
return
if not is_feature_enabled(form.event, 'payment'):
raise ValidationError(_('You have to enable the payment feature in order to set a registration fee.'))
class RegistrationFormForm(IndicoForm):
_price_fields = ('currency', 'base_price')
_registrant_notification_fields = ('notification_sender_address',
'message_pending', 'message_unpaid', 'message_complete')
_manager_notification_fields = ('manager_notifications_enabled', 'manager_notification_recipients')
_special_fields = _price_fields + _registrant_notification_fields + _manager_notification_fields
title = StringField(_("Title"), [DataRequired()], description=_("The title of the registration form"))
introduction = TextAreaField(_("Introduction"),
description=_("Introduction to be displayed when filling out the registration form"))
contact_info = StringField(_("Contact info"),
description=_("How registrants can get in touch with somebody for extra information"))
moderation_enabled = BooleanField(_("Moderated"), widget=SwitchWidget(),
description=_("If enabled, registrations require manager approval"))
require_login = BooleanField(_("Only logged-in users"), widget=SwitchWidget(),
description=_("Users must be logged in to register"))
require_user = BooleanField(_("Registrant must have account"), widget=SwitchWidget(),
description=_("Registrations emails must be associated with an Indico account"))
limit_registrations = BooleanField(_("Limit registrations"), widget=SwitchWidget(),
description=_("Whether there is a limit of registrations"))
registration_limit = IntegerField(_("Capacity"), [HiddenUnless('limit_registrations'), DataRequired(),
NumberRange(min=1)],
description=_("Maximum number of registrations"))
modification_mode = IndicoEnumSelectField(_("Modification allowed"), enum=ModificationMode,
description=_("Will users be able to modify their data? When?"))
publish_registrations_enabled = BooleanField(_('Publish registrations'), widget=SwitchWidget(),
description=_("Registrations from this form will be displayed in the "
"event page"))
publish_registration_count = BooleanField(_("Publish number of registrations"), widget=SwitchWidget(),
description=_("Number of registered participants will be displayed in "
"the event page"))
publish_checkin_enabled = BooleanField(_('Publish check-in status'), widget=SwitchWidget(),
description=_("Check-in status will be shown publicly on the event page"))
base_price = DecimalField(_('Registration fee'), [NumberRange(min=0, max=999999.99), Optional(),
_check_if_payment_required], filters=[lambda x: x if x is not None else 0],
widget=NumberInput(step='0.01'),
description=_("A fixed fee all users have to pay when registering."))
currency = SelectField(_('Currency'), [DataRequired()], description=_('The currency for new registrations'))
notification_sender_address = StringField(_('Notification sender address'), [IndicoEmail()],
filters=[lambda x: (x or None)])
message_pending = TextAreaField(_("Message for pending registrations"),
description=_("Text included in emails sent to pending registrations"))
message_unpaid = TextAreaField(_("Message for unpaid registrations"),
description=_("Text included in emails sent to unpaid registrations"))
message_complete = TextAreaField(_("Message for complete registrations"),
description=_("Text included in emails sent to complete registrations"))
manager_notifications_enabled = BooleanField(_('Enabled'), widget=SwitchWidget(),
description=_("Enable notifications to managers about registrations"))
manager_notification_recipients = EmailListField(_('List of recipients'),
[HiddenUnless('manager_notifications_enabled',
preserve_data=True), DataRequired()],
description=_("Email addresses that will receive notifications"))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super(RegistrationFormForm, self).__init__(*args, **kwargs)
self._set_currencies()
self.notification_sender_address.description = _('Email address set as the sender of all '
'notifications sent to users. If empty, '
'then {0} is used.'.format(config.NO_REPLY_EMAIL))
def _set_currencies(self):
currencies = [(c['code'], '{0[code]} ({0[name]})'.format(c)) for c in payment_settings.get('currencies')]
self.currency.choices = sorted(currencies, key=lambda x: x[1].lower())
class RegistrationFormScheduleForm(IndicoForm):
start_dt = IndicoDateTimeField(_("Start"), [Optional()], default_time=time(0, 0),
description=_("Moment when registrations will be open"))
end_dt = IndicoDateTimeField(_("End"), [Optional(), LinkedDateTime('start_dt')], default_time=time(23, 59),
description=_("Moment when registrations will be closed"))
modification_end_dt = IndicoDateTimeField(_("Modification deadline"), [Optional(), LinkedDateTime('end_dt')],
default_time=time(23, 59),
description=_("Deadline until which registration information can be "
"modified (defaults to the end date if empty)"))
def __init__(self, *args, **kwargs):
regform = kwargs.pop('regform')
self.timezone = regform.event.timezone
super(RegistrationFormScheduleForm, self).__init__(*args, **kwargs)
class InvitationFormBase(IndicoForm):
_invitation_fields = ('skip_moderation',)
_email_fields = ('email_from', 'email_subject', 'email_body')
email_from = SelectField(_('From'), [DataRequired()])
email_subject = StringField(_("Email subject"), [DataRequired()])
email_body = TextAreaField(_("Email body"), [DataRequired()], widget=CKEditorWidget(simple=True))
skip_moderation = BooleanField(_("Skip moderation"), widget=SwitchWidget(),
description=_("If enabled, the user's registration will be approved automatically."))
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
event = self.regform.event
super(InvitationFormBase, self).__init__(*args, **kwargs)
if not self.regform.moderation_enabled:
del self.skip_moderation
self.email_from.choices = event.get_allowed_sender_emails().items()
self.email_body.description = render_placeholder_info('registration-invitation-email', invitation=None)
def validate_email_body(self, field):
missing = get_missing_placeholders('registration-invitation-email', field.data, invitation=None)
if missing:
raise ValidationError(_('Missing placeholders: {}').format(', '.join(missing)))
class InvitationFormNew(InvitationFormBase):
_invitation_fields = ('first_name', 'last_name', 'email', 'affiliation') + InvitationFormBase._invitation_fields
first_name = StringField(_('First name'), [DataRequired()],
description=_("The first name of the user you are inviting."))
last_name = StringField(_('Last name'), [DataRequired()],
description=_("The last name of the user you are inviting."))
email = EmailField(_('Email'), [DataRequired(), Email()], filters=[lambda x: x.lower() if x else x],
description=_("The invitation will be sent to this address."))
affiliation = StringField(_('Affiliation'),
description=_("The affiliation of the user you are inviting."))
@generated_data
def users(self):
return [{'first_name': self.first_name.data,
'last_name': self.last_name.data,
'email': self.email.data,
'affiliation': self.affiliation.data}]
def validate_email(self, field):
if RegistrationInvitation.find(email=field.data).with_parent(self.regform).count():
raise ValidationError(_("There is already an invitation with this email address."))
if Registration.find(email=field.data, is_active=True).with_parent(self.regform).count():
raise ValidationError(_("There is already a registration with this email address."))
class InvitationFormExisting(InvitationFormBase):
_invitation_fields = ('users_field',) + InvitationFormBase._invitation_fields
users_field = PrincipalListField(_('Users'), [DataRequired()], allow_external_users=True,
description=_("Select the users to invite."))
@generated_data
def users(self):
return [{'first_name': x['firstName'],
'last_name': x['familyName'],
'email': x['email'].lower(),
'affiliation': x['affiliation']}
for x in self.users_field.data]
def validate_users_field(self, field):
emails = {x['email'].lower() for x in field.data}
# invitations
existing = {x.email for x in self.regform.invitations} & emails
if existing:
raise ValidationError(_("There are already invitations for the following email addresses: {emails}")
.format(emails=', '.join(sorted(existing))))
# registrations
existing = {x.email for x in self.regform.registrations if x.is_active} & emails
if existing:
raise ValidationError(_("There are already registrations with the following email addresses: {emails}")
.format(emails=', '.join(sorted(existing))))
class EmailRegistrantsForm(IndicoForm):
from_address = SelectField(_("From"), [DataRequired()])
cc_addresses = EmailListField(_("CC"),
description=_("Beware, addresses in this field will receive one mail per "
"registrant."))
subject = StringField(_("Subject"), [DataRequired()])
body = TextAreaField(_("Email body"), [DataRequired()], widget=CKEditorWidget(simple=True))
recipients = IndicoEmailRecipientsField(_('Recipients'))
copy_for_sender = BooleanField(_('Send copy to me'), widget=SwitchWidget(),
description=_('Send copy of each email to my mailbox'))
attach_ticket = BooleanField(_('Attach ticket'), widget=SwitchWidget(),
description=_('Attach tickets to emails'))
registration_id = HiddenFieldList()
submitted = HiddenField()
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
event = self.regform.event
super(EmailRegistrantsForm, self).__init__(*args, **kwargs)
self.from_address.choices = event.get_allowed_sender_emails().items()
self.body.description = render_placeholder_info('registration-email', regform=self.regform, registration=None)
def validate_body(self, field):
missing = get_missing_placeholders('registration-email', field.data, regform=self.regform, registration=None)
if missing:
raise ValidationError(_('Missing placeholders: {}').format(', '.join(missing)))
def is_submitted(self):
return super(EmailRegistrantsForm, self).is_submitted() and 'submitted' in request.form
class TicketsForm(IndicoForm):
tickets_enabled = BooleanField(_('Enable Tickets'), widget=SwitchWidget(),
description=_('Create tickets for registrations using this registration form.'))
ticket_on_email = BooleanField(_('Send with an e-mail'), [HiddenUnless('tickets_enabled',
preserve_data=True)],
widget=SwitchWidget(),
description=_('Attach PDF ticket to the email sent to a user after completing '
'their registration.'))
ticket_on_event_page = BooleanField(_('Download from event homepage'), [HiddenUnless('tickets_enabled',
preserve_data=True)],
widget=SwitchWidget(),
description=_('Allow users to download their ticket from the '
'conference homepage.'))
ticket_on_summary_page = BooleanField(_('Download from summary page'), [HiddenUnless('tickets_enabled',
preserve_data=True)],
widget=SwitchWidget(),
description=_('Allow users to download their ticket from the registration '
'summary page.'))
ticket_template_id = SelectField(_('Ticket template'), [HiddenUnless('tickets_enabled', preserve_data=True),
Optional()], coerce=int)
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
super(TicketsForm, self).__init__(*args, **kwargs)
default_tpl = get_default_template_on_category(event.category)
all_templates = set(event.designer_templates) | get_inherited_templates(event)
badge_templates = [(tpl.id, tpl.title) for tpl in all_templates
if tpl.type == TemplateType.badge and tpl != default_tpl]
# Show the default template first
badge_templates.insert(0, (default_tpl.id, '{} ({})'.format(default_tpl.title, _('Default category template'))))
self.ticket_template_id.choices = badge_templates
class ParticipantsDisplayForm(IndicoForm):
"""Form to customize the display of the participant list."""
json = JSONField()
def validate_json(self, field):
schema = {
'type': 'object',
'properties': {
'merge_forms': {'type': 'boolean'},
'participant_list_forms': {
'type': 'array',
'items': {'type': 'integer'}
},
'participant_list_columns': {
'type': 'array',
'items': {'type': 'string'}
}
}
}
try:
jsonschema.validate(field.data, schema)
except jsonschema.ValidationError as exc:
raise ValidationError(exc.message)
class ParticipantsDisplayFormColumnsForm(IndicoForm):
"""Form to customize the columns for a particular registration form on the participant list."""
json = JSONField()
def validate_json(self, field):
schema = {
'type': 'object',
'properties': {
'columns': {
'type': 'array',
'items': {'type': 'integer'}
}
}
}
try:
jsonschema.validate(field.data, schema)
except jsonschema.ValidationError as exc:
raise ValidationError(exc.message)
class RegistrationManagersForm(IndicoForm):
"""Form to manage users with privileges to modify registration-related items"""
managers = PrincipalListField(_('Registration managers'), allow_groups=True, allow_emails=True,
allow_external_users=True,
description=_('List of users allowed to modify registrations'),
event=lambda form: form.event)
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super(RegistrationManagersForm, self).__init__(*args, **kwargs)
class CreateMultipleRegistrationsForm(IndicoForm):
"""Form to create multiple registrations of Indico users at the same time."""
user_principals = PrincipalListField(_("Indico users"), [DataRequired()])
notify_users = BooleanField(_("Send e-mail notifications"),
default=True,
description=_("Notify the users about the registration."),
widget=SwitchWidget())
def __init__(self, *args, **kwargs):
self._regform = kwargs.pop('regform')
open_add_user_dialog = kwargs.pop('open_add_user_dialog', False)
super(CreateMultipleRegistrationsForm, self).__init__(*args, **kwargs)
self.user_principals.open_immediately = open_add_user_dialog
def validate_user_principals(self, field):
for user in field.data:
if user.registrations.filter_by(registration_form=self._regform, is_deleted=False).one_or_none():
raise ValidationError(_("A registration for {} already exists.").format(user.full_name))
class BadgeSettingsForm(IndicoForm):
template = SelectField(_('Template'))
save_values = BooleanField(_("Save values for next time"), widget=SwitchWidget(),
description=_("Save these values in the event settings"))
dashed_border = BooleanField(_("Dashed border around each badge"), widget=SwitchWidget(),
description=_("Display a dashed border around each badge"))
page_size = IndicoEnumSelectField(_('Page size'), enum=PageSize, sorted=True)
page_orientation = IndicoEnumSelectField(_('Page orientation'), enum=PageOrientation)
page_layout = IndicoEnumSelectField(_('Page layout'), enum=PageLayout,
description=_('The single sided (foldable) option is only available if the '
'template orientation is the same as the page orientation and '
'its width is exactly half of the page width'))
top_margin = FloatField(_('Top margin'), [InputRequired()])
left_margin = FloatField(_('Left margin'), [InputRequired()])
right_margin = FloatField(_('Right margin'), [InputRequired()])
bottom_margin = FloatField(_('Bottom margin'), [InputRequired()])
margin_columns = FloatField(_('Margin between columns'), [InputRequired()])
margin_rows = FloatField(_('Margin between rows'), [InputRequired()])
submitted = HiddenField()
def __init__(self, event, **kwargs):
all_templates = set(event.designer_templates) | get_inherited_templates(event)
badge_templates = [tpl for tpl in all_templates if tpl.type.name == 'badge']
signals.event.filter_selectable_badges.send(type(self), badge_templates=badge_templates)
tickets = kwargs.pop('tickets')
super(BadgeSettingsForm, self).__init__(**kwargs)
self.template.choices = sorted(((unicode(tpl.id), tpl.title)
for tpl in badge_templates
if tpl.is_ticket == tickets),
key=itemgetter(1))
def is_submitted(self):
return super(BadgeSettingsForm, self).is_submitted() and 'submitted' in request.form
class ImportRegistrationsForm(IndicoForm):
source_file = FileField(_("Source File"), [DataRequired()], accepted_file_types='.csv')
skip_moderation = BooleanField(_("Skip Moderation"), widget=SwitchWidget(), default=True,
description=_("If enabled, the registration will be immediately accepted"))
notify_users = BooleanField(_("E-mail users"), widget=SwitchWidget(),
description=_("Whether the imported users should receive an e-mail notification"))
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
super(ImportRegistrationsForm, self).__init__(*args, **kwargs)
if not self.regform.moderation_enabled:
del self.skip_moderation
|
python
|
#!/usr/bin/env python
# import necessay modules
from lxml import html
from lxml import etree
import requests
# top-level domain
parent_domain = 'http://trevecca.smartcatalogiq.com'
# parent page showing the porgrams of study
parent_page_url = parent_domain + '/en/2015-2016/University-Catalog/Programs-of-Study'
parent_page = requests.get(parent_page_url)
parent_tree = html.fromstring(parent_page.content.replace(' ',''))
# get list of program names and program URLs
program_names = parent_tree.xpath('//div[@id="sc-program-links"]//a//text()')
program_links = parent_tree.xpath('//div[@id="sc-program-links"]//a//@href')
# print opening of web page
print '<!DOCTYPE html>'
print '<html class="no-js" lang="en-US" itemscope itemtype="http://schema.org/CollegeOrUniversity">'
# loop over programs
for program_idx in range(len(program_names)):
# skip display of Four-Year plans
if program_names[program_idx].find("Four-Year")<0 and program_names[program_idx].find("Four Year")<0 and program_names[program_idx].find("Five-Year")<0 and program_names[program_idx].find("Program of Study")<0:
program_page_url = parent_domain + program_links[program_idx]
program_page = requests.get(program_page_url)
program_tree = html.fromstring(program_page.content.replace(' ',''))
program_div = program_tree.xpath('//div[@id="main"]')
if program_idx==0:
print '<head>'
print '<title>' + parent_domain + '</title>'
# css style links
program_style_links = program_tree.xpath('//head/link')
for program_style_link in program_style_links:
print(etree.tostring(program_style_link, pretty_print=True).replace('href="/', 'href="' + parent_domain + '/'))
# add page breaks before H1 (program names)
print '<style type="text/css">'
print '@media print {'
print ' h1:not([name=first]){page-break-before: always;}'
print '}'
print '@media screen {'
print ' h1:not([name=first]){page-break-before: always;}'
print '}'
print '</style>'
print '</head>'
print '<body>'
print(etree.tostring(program_div[0],pretty_print=True).replace('href="/', 'href="' + parent_domain + '/').replace('<h1>', '<h1 name="first">', 1))
else:
print(etree.tostring(program_div[0],pretty_print=True).replace('href="/', 'href="' + parent_domain + '/'))
# uncomment for quicker debug
#if program_idx>10:
# break
# print closing of body and page
print '</body>'
print '</html>'
|
python
|
x1 = int(input())
y1 = int(input())
x2 = int(input())
y2 = int(input())
x3 = int(input())
y3 = y2
a = abs(x3 - x2)
h = abs(y1 - y2)
s = a * h / 2
print(s)
|
python
|
#!/usr/bin/python
import serial
import sys
import time
import string
from serial import SerialException
import RPi.GPIO as gpio
class SerialExpander:
def __init__(self, port='/dev/ttyS0', baud=9600, timeout=0, **kwargs):
self.__port = port
self.__baud = baud
self.ser = serial.Serial(self.__port, self.__baud, timeout=timeout)
# Dictionary for expander addresses as accessed through GPIO
self.expanderAddr = {'P1': [0,0,0],
'P2': [0,0,1],
'P3': [0,1,0],
'P4': [0,1,1],
'P5': [1,0,0],
'P6': [1,0,1],
'P7': [1,1,0],
'P8': [1,1,1]
}
# Configuration for Lutra Airboats
self.config()
def config(self):
"""
Configuration specific for Atlas sensors in LSA's Lutra Airboats.
Namely sets GPIO ports, disable acknowledge (ACK, i.e., OK) messages for sensors
and turns off continuous sensor readings
"""
# Setup for GPIO
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
self.gpioPins = [24, 23, 18] # [S3, S2, S1]
for pin in self.gpioPins:
gpio.setup(pin, gpio.OUT)
# Turn off ACK messages and continuous reading for all devices
for port in self.expanderAddr:
self.ser.flush()
self.send_cmd("*OK,0", port) # Disable OK messages
time.sleep(0.01) # Wait 10 ms before next instruction
self.ser.flush()
self.send_cmd("C,0", port) # Disable continuous reading mode
time.sleep(0.01) # Wait 10 ms before next instruction
# Return to default port "0,0,0" (or "P1")
self.select_SE_port("P1")
def select_SE_port(self, port):
"""
Selects Serial Expander port based on address table in self.expanderAddr dictionary
"""
for i, pin in enumerate(self.gpioPins):
gpio.output(pin, self.expanderAddr[port][i])
def change_port(self, port):
self.__port = port
self.connect()
self.config()
def change_baud(self, baud):
self.__baud = baud
self.connect()
self.config()
def connect(self):
self.disconnect()
while True:
if self.ser.isOpen():
return True
self.ser.open()
def disconnect(self):
while True:
if not self.ser.isOpen():
return True
self.ser.close()
def read_line(self):
"""
taken from the ftdi library and modified to
use the ezo line separator "\r"
"""
lsl = len('\r')
line_buffer = []
while True:
next_char = self.ser.read(1)
if next_char == '':
break
line_buffer.append(next_char)
if (len(line_buffer) >= lsl and
line_buffer[-lsl:] == list('\r')):
break
return ''.join(line_buffer)
def read_lines(self):
"""
also taken from ftdi lib to work with modified readline function
"""
lines = []
try:
while True:
line = self.read_line()
if not line:
break
self.ser.flush_input()
lines.append(line)
return lines
except SerialException as e:
print( "Error, ", e)
return None
def send_cmd(self, cmd, port):
"""
Send command to the Atlas Sensor.
Before sending, add Carriage Return at the end of the command.
:param port:
:param cmd:
:return:
"""
self.select_SE_port(port)
buf = cmd + "\r" # add carriage return
try:
self.ser.write(buf.encode('utf-8'))
return True
except SerialException as e:
print ("Error, ", e)
return None
def get_data(self, port):
"""
Gets a single reading from sensor in selected port
"""
# Clear previous data
self.ser.flush()
# Send request for data
self.send_cmd("R", port)
time.sleep(1)
lines = self.read_lines()
return lines[0]
|
python
|
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
content = f.read()
setup(
name='Wechatbot',
version='0.0.1',
description='Wechatbot project',
long_description=readme,
install_requires=['itchat==1.3.10', 'requests==2.19.1'],
author='Kevin Zhao',
author_email='[email protected]',
url='https://github.com/zhaomy2000/flyingshui/wechatbot',
license=content,
packages=find_packages()
)
|
python
|
TLS_VERSIONING = "1.0.23"
TLS_DATE = "12 March 2019"
|
python
|
"""
This module deals with the definition of all the database models needed for the application
"""
from app import db, app
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from math import cos, sin, atan2, sqrt, radians, degrees
class Lens(db.Model):
""" Represent a lens """
__tablename__ = 'lenses'
id = db.Column(db.Integer, primary_key=True)
display_name = db.Column(db.String(256))
focal_range = db.Column(db.String(64))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def __repr__(self):
return 'Lens: %r' % (self.displayName)
class BetaCode(db.Model):
""" Table containing the beta codes """
__tablename__ = 'betacodes'
id = db.Column(db.Integer, primary_key=True)
code = db.Column(db.String(16))
def __repr__(self):
return 'Code: %r' % (self.code)
class User(db.Model):
""" Represents a user of the service """
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True)
password = db.Column(db.String(128))
fullname = db.Column(db.String(256))
email = db.Column(db.String(256))
guides = db.relationship('Guide', backref='owner', lazy='dynamic')
lenses = db.relationship('Lens', backref='owner', lazy='dynamic')
def __repr__(self):
return 'User: %r' % (self.username)
def hash_password(self, password):
""" Encrypts the given password before saving it in the entry """
self.password = pwd_context.encrypt(password)
def verify_password(self, password):
""" Validate the given password against the DB one """
return pwd_context.verify(password, self.password)
def generate_auth_token(self):
""" Generate a JWT token for this account """
token = Serializer(
app.config['API_SECRET_KEY'],
expires_in=app.config['JWT_TOKEN_EXPIRATION']
)
return token.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
""" Check that the token received is still valid """
# In case the token so wrong that it's None
if not token:
raise BadSignatureToken
gen_token = Serializer(app.config['API_SECRET_KEY'])
try:
data = gen_token.loads(token)
except SignatureExpired:
raise ExpiredToken() # valid token, but expired
except BadSignature:
raise BadSignatureToken() # invalid token
user = User.query.get(data['id'])
return user
class ExpiredToken(Exception):
""" Exception raised when jwt token is expired """
pass
class BadSignatureToken(Exception):
""" Exception raised when jwt token is invalid """
pass
""" Link for many-to-many relationship between photos and guides """
photo_guide = db.Table(
'photo_guide',
db.Column('guide_id', db.Integer, db.ForeignKey('guides.id')),
db.Column('photo_id', db.Integer, db.ForeignKey('photos.id'))
)
class Guide(db.Model):
""" Represents a travel guide """
__tablename__ = 'guides'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(256))
creation = db.Column(db.DateTime, default=db.func.now())
last_edited = db.Column(db.DateTime, default=db.func.now())
visibility = db.Column(db.SmallInteger, default=0)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
photos = db.relationship('Photo', backref='guides', lazy='dynamic', secondary=photo_guide)
number_photo = 0
def __repr__(self):
return 'Guide: %r' % (self.title)
@staticmethod
def getFeaturedLocation(guide):
""" Return the featured image """
photos = guide.photos.all()
x = 0
y = 0
z = 0
size = 0
for photo in photos:
if photo.latitude:
lat = radians(float(photo.latitude))
lon = radians(float(photo.longitude))
x += cos(lat) * cos(lon)
y += cos(lat) * sin(lon)
z += sin(lat)
size+=1
if size is 0:
return None
x = float(x / size)
y = float(y / size)
z = float(z / size)
return {
'latitude': degrees(atan2(z, sqrt(x * x + y * y))),
'longitude': degrees(atan2(y, x))
}
# return atan2(z, sqrt(x * x + y * y)), atan2(y, x)
# for photo in photos:
# if photo.latitude:
# return {
# 'latitude': photo.latitude,
# 'longitude': photo.longitude
# }
# return None
@staticmethod
def getFeaturedImage(guide):
""" Return the featured image """
if guide.photos.first():
return guide.photos.first().url
return None
@staticmethod
def getNumberPhoto(guide):
""" Return the featured image """
return len(guide.photos.all())
class Photo(db.Model):
"""
Represent a photo stored in an external service (flickr/500px)
Photo are linked in a many to many relationship to the guides
"""
__tablename__ = 'photos'
id = db.Column(db.Integer, primary_key=True)
origin = db.Column(db.Enum('Flickr', '500px', name='service_origin'))
title = db.Column(db.Text())
author = db.Column(db.String(256))
flickr_id = db.Column(db.String(16))
url = db.Column(db.Text())
latitude = db.Column(db.String(16))
longitude = db.Column(db.String(16))
lensFocal = db.Column(db.String(16))
flash_fired = db.Column(db.SmallInteger, default=0)
exposure = db.Column(db.String(16))
def __repr__(self):
return 'Photo: %r' % (self.id)
|
python
|
from WConio2 import textcolor, clrscr, getch, setcursortype
import ctypes
ctypes.windll.kernel32.SetConsoleTitleW("n Numbers HCF")
def hcf(n):
a, b, r = n[0], n[1], 0
for x in range(0, len(n) - 1):
while a != 0:
r = b % a
b = a
a = r
a = b
if x + 2 == len(n): break
b = n[x + 2]
return a
def main():
textcolor(11)
clrscr()
print("\t\tAfter Entering Enough Numbers Enter last Value as 0 to Find HCF\n\n")
num, x, p, ch = [], 0, [], '\0'
textcolor(10)
while True:
try:
x = float(input("Enter a Number="))
except:
break
if not x:
break
p.append(str(int(x)) if x == int(x) else str(x))
x = abs(x)
while x != int(x):
x *= 10
num.append(int(x))
if len(p) == 1 or not len(p):
return
setcursortype(0)
clrscr()
textcolor(12)
print("\n\nHCF of These Entered Numbers:")
textcolor(15)
print(' , '.join(p))
textcolor(14)
print("Is =", hcf(num))
textcolor(15)
print('\nPress Enter Key to Continue')
while ch != '\r':
ch = getch()[1]
return
main()
|
python
|
# Copyright (c) 2019 AT&T Intellectual Property.
# Copyright (c) 2018-2019 Nokia.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This source code is part of the near-RT RIC (RAN Intelligent Controller)
# platform project (RICP).
#
"""The module provides implementation of Shared Data Layer (SDL) database backend interface."""
import contextlib
import threading
from typing import (Callable, Dict, Set, List, Optional, Tuple, Union)
import zlib
import redis
from redis import Redis
from redis.sentinel import Sentinel
from redis.lock import Lock
from redis._compat import nativestr
from redis import exceptions as redis_exceptions
from ricsdl.configuration import _Configuration
from ricsdl.exceptions import (
RejectedByBackend,
NotConnected,
BackendError
)
from .dbbackend_abc import DbBackendAbc
from .dbbackend_abc import DbBackendLockAbc
@contextlib.contextmanager
def _map_to_sdl_exception():
"""Translates known redis exceptions into SDL exceptions."""
try:
yield
except(redis_exceptions.ResponseError) as exc:
raise RejectedByBackend("SDL backend rejected the request: {}".
format(str(exc))) from exc
except(redis_exceptions.ConnectionError, redis_exceptions.TimeoutError) as exc:
raise NotConnected("SDL not connected to backend: {}".
format(str(exc))) from exc
except(redis_exceptions.RedisError) as exc:
raise BackendError("SDL backend failed to process the request: {}".
format(str(exc))) from exc
class PubSub(redis.client.PubSub):
def __init__(self, event_separator, connection_pool, ignore_subscribe_messages=False):
super().__init__(connection_pool, shard_hint=None, ignore_subscribe_messages=ignore_subscribe_messages)
self.event_separator = event_separator
def handle_message(self, response, ignore_subscribe_messages=False):
"""
Parses a pub/sub message. If the channel or pattern was subscribed to
with a message handler, the handler is invoked instead of a parsed
message being returned.
Adapted from: https://github.com/andymccurdy/redis-py/blob/master/redis/client.py
"""
message_type = nativestr(response[0])
if message_type == 'pmessage':
message = {
'type': message_type,
'pattern': response[1],
'channel': response[2],
'data': response[3]
}
elif message_type == 'pong':
message = {
'type': message_type,
'pattern': None,
'channel': None,
'data': response[1]
}
else:
message = {
'type': message_type,
'pattern': None,
'channel': response[1],
'data': response[2]
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
if message_type == 'punsubscribe':
pattern = response[1]
if pattern in self.pending_unsubscribe_patterns:
self.pending_unsubscribe_patterns.remove(pattern)
self.patterns.pop(pattern, None)
else:
channel = response[1]
if channel in self.pending_unsubscribe_channels:
self.pending_unsubscribe_channels.remove(channel)
self.channels.pop(channel, None)
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
if message_type == 'pmessage':
handler = self.patterns.get(message['pattern'], None)
else:
handler = self.channels.get(message['channel'], None)
if handler:
# Need to send only channel and notification instead of raw
# message
message_channel = self._strip_ns_from_bin_key('', message['channel'])
message_data = message['data'].decode('utf-8')
messages = message_data.split(self.event_separator)
handler(message_channel, messages)
return message_channel, messages
elif message_type != 'pong':
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
return None
return message
@classmethod
def _strip_ns_from_bin_key(cls, ns: str, nskey: bytes) -> str:
try:
redis_key = nskey.decode('utf-8')
except UnicodeDecodeError as exc:
msg = u'Namespace %s key conversion to string failed: %s' % (ns, str(exc))
raise RejectedByBackend(msg)
nskey = redis_key.split(',', 1)
if len(nskey) != 2:
msg = u'Namespace %s key:%s has no namespace prefix' % (ns, redis_key)
raise RejectedByBackend(msg)
return nskey[1]
class RedisBackend(DbBackendAbc):
"""
A class providing an implementation of database backend of Shared Data Layer (SDL), when
backend database solution is Redis.
Args:
configuration (_Configuration): SDL configuration, containing credentials to connect to
Redis database backend.
"""
def __init__(self, configuration: _Configuration) -> None:
super().__init__()
self.next_client_event = 0
self.event_separator = configuration.get_event_separator()
self.clients = list()
with _map_to_sdl_exception():
self.clients = self.__create_redis_clients(configuration)
def __del__(self):
self.close()
def __str__(self):
out = {"DB type": "Redis"}
for i, r in enumerate(self.clients):
out["Redis client[" + str(i) + "]"] = str(r)
return str(out)
def is_connected(self):
is_connected = True
with _map_to_sdl_exception():
for c in self.clients:
if not c.redis_client.ping():
is_connected = False
break
return is_connected
def close(self):
for c in self.clients:
c.redis_client.close()
def set(self, ns: str, data_map: Dict[str, bytes]) -> None:
db_data_map = self.__add_data_map_ns_prefix(ns, data_map)
with _map_to_sdl_exception():
self.__getClient(ns).mset(db_data_map)
def set_if(self, ns: str, key: str, old_data: bytes, new_data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command('SETIE', db_key, new_data, old_data)
def set_if_not_exists(self, ns: str, key: str, data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
with _map_to_sdl_exception():
return self.__getClient(ns).setnx(db_key, data)
def get(self, ns: str, keys: List[str]) -> Dict[str, bytes]:
ret = dict()
db_keys = self.__add_keys_ns_prefix(ns, keys)
with _map_to_sdl_exception():
values = self.__getClient(ns).mget(db_keys)
for idx, val in enumerate(values):
# return only key values, which has a value
if val is not None:
ret[keys[idx]] = val
return ret
def find_keys(self, ns: str, key_pattern: str) -> List[str]:
db_key_pattern = self.__add_key_ns_prefix(ns, key_pattern)
with _map_to_sdl_exception():
ret = self.__getClient(ns).keys(db_key_pattern)
return self.__strip_ns_from_bin_keys(ns, ret)
def find_and_get(self, ns: str, key_pattern: str) -> Dict[str, bytes]:
# todo: replace below implementation with redis 'NGET' module
ret = dict() # type: Dict[str, bytes]
with _map_to_sdl_exception():
matched_keys = self.find_keys(ns, key_pattern)
if matched_keys:
ret = self.get(ns, matched_keys)
return ret
def remove(self, ns: str, keys: List[str]) -> None:
db_keys = self.__add_keys_ns_prefix(ns, keys)
with _map_to_sdl_exception():
self.__getClient(ns).delete(*db_keys)
def remove_if(self, ns: str, key: str, data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command('DELIE', db_key, data)
def add_member(self, ns: str, group: str, members: Set[bytes]) -> None:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
self.__getClient(ns).sadd(db_key, *members)
def remove_member(self, ns: str, group: str, members: Set[bytes]) -> None:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
self.__getClient(ns).srem(db_key, *members)
def remove_group(self, ns: str, group: str) -> None:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
self.__getClient(ns).delete(db_key)
def get_members(self, ns: str, group: str) -> Set[bytes]:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
return self.__getClient(ns).smembers(db_key)
def is_member(self, ns: str, group: str, member: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
return self.__getClient(ns).sismember(db_key, member)
def group_size(self, ns: str, group: str) -> int:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
return self.__getClient(ns).scard(db_key)
def set_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]],
data_map: Dict[str, bytes]) -> None:
db_data_map = self.__add_data_map_ns_prefix(ns, data_map)
channels_and_events_prepared = []
total_events = 0
channels_and_events_prepared, total_events = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command(
"MSETMPUB",
len(db_data_map),
total_events,
*[val for data in db_data_map.items() for val in data],
*channels_and_events_prepared,
)
def set_if_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]], key: str,
old_data: bytes, new_data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
channels_and_events_prepared = []
channels_and_events_prepared, _ = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
ret = self.__getClient(ns).execute_command("SETIEMPUB", db_key, new_data, old_data,
*channels_and_events_prepared)
return ret == b"OK"
def set_if_not_exists_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]],
key: str, data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
channels_and_events_prepared, _ = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
ret = self.__getClient(ns).execute_command("SETNXMPUB", db_key, data,
*channels_and_events_prepared)
return ret == b"OK"
def remove_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]],
keys: List[str]) -> None:
db_keys = self.__add_keys_ns_prefix(ns, keys)
channels_and_events_prepared, total_events = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command(
"DELMPUB",
len(db_keys),
total_events,
*db_keys,
*channels_and_events_prepared,
)
def remove_if_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]], key: str,
data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
channels_and_events_prepared, _ = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
ret = self.__getClient(ns).execute_command("DELIEMPUB", db_key, data,
*channels_and_events_prepared)
return bool(ret)
def remove_all_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]]) -> None:
keys = self.__getClient(ns).keys(self.__add_key_ns_prefix(ns, "*"))
channels_and_events_prepared, total_events = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command(
"DELMPUB",
len(keys),
total_events,
*keys,
*channels_and_events_prepared,
)
def subscribe_channel(self, ns: str, cb: Callable[[str, List[str]], None],
channels: List[str]) -> None:
channels = self.__add_keys_ns_prefix(ns, channels)
for channel in channels:
with _map_to_sdl_exception():
redis_ctx = self.__getClientConn(ns)
redis_ctx.redis_pubsub.subscribe(**{channel: cb})
if not redis_ctx.pubsub_thread.is_alive() and redis_ctx.run_in_thread:
redis_ctx.pubsub_thread = redis_ctx.redis_pubsub.run_in_thread(sleep_time=0.001,
daemon=True)
def unsubscribe_channel(self, ns: str, channels: List[str]) -> None:
channels = self.__add_keys_ns_prefix(ns, channels)
for channel in channels:
with _map_to_sdl_exception():
self.__getClientConn(ns).redis_pubsub.unsubscribe(channel)
def start_event_listener(self) -> None:
redis_ctxs = self.__getClientConns()
for redis_ctx in redis_ctxs:
if redis_ctx.pubsub_thread.is_alive():
raise RejectedByBackend("Event loop already started")
if redis_ctx.redis_pubsub.subscribed and len(redis_ctx.redis_client.pubsub_channels()) > 0:
redis_ctx.pubsub_thread = redis_ctx.redis_pubsub.run_in_thread(sleep_time=0.001, daemon=True)
redis_ctx.run_in_thread = True
def handle_events(self) -> Optional[Tuple[str, List[str]]]:
if self.next_client_event >= len(self.clients):
self.next_client_event = 0
redis_ctx = self.clients[self.next_client_event]
self.next_client_event += 1
if redis_ctx.pubsub_thread.is_alive() or redis_ctx.run_in_thread:
raise RejectedByBackend("Event loop already started")
try:
return redis_ctx.redis_pubsub.get_message(ignore_subscribe_messages=True)
except RuntimeError:
return None
def __create_redis_clients(self, config):
clients = list()
cfg_params = config.get_params()
if cfg_params.db_cluster_addr_list is None:
clients.append(self.__create_legacy_redis_client(cfg_params))
else:
for addr in cfg_params.db_cluster_addr_list.split(","):
client = self.__create_redis_client(cfg_params, addr)
clients.append(client)
return clients
def __create_legacy_redis_client(self, cfg_params):
return self.__create_redis_client(cfg_params, cfg_params.db_host)
def __create_redis_client(self, cfg_params, addr):
new_sentinel = None
new_redis = None
if cfg_params.db_sentinel_port is None:
new_redis = Redis(host=addr, port=cfg_params.db_port, db=0, max_connections=20)
else:
sentinel_node = (addr, cfg_params.db_sentinel_port)
master_name = cfg_params.db_sentinel_master_name
new_sentinel = Sentinel([sentinel_node])
new_redis = new_sentinel.master_for(master_name)
new_redis.set_response_callback('SETIE', lambda r: r and nativestr(r) == 'OK' or False)
new_redis.set_response_callback('DELIE', lambda r: r and int(r) == 1 or False)
redis_pubsub = PubSub(self.event_separator, new_redis.connection_pool, ignore_subscribe_messages=True)
pubsub_thread = threading.Thread(target=None)
run_in_thread = False
return _RedisConn(new_redis, redis_pubsub, pubsub_thread, run_in_thread)
def __getClientConns(self):
return self.clients
def __getClientConn(self, ns):
clients_cnt = len(self.clients)
client_id = self.__get_hash(ns) % clients_cnt
return self.clients[client_id]
def __getClient(self, ns):
clients_cnt = len(self.clients)
client_id = 0
if clients_cnt > 1:
client_id = self.__get_hash(ns) % clients_cnt
return self.clients[client_id].redis_client
@classmethod
def __get_hash(cls, str):
return zlib.crc32(str.encode())
@classmethod
def __add_key_ns_prefix(cls, ns: str, key: str):
return '{' + ns + '},' + key
@classmethod
def __add_keys_ns_prefix(cls, ns: str, keylist: List[str]) -> List[str]:
ret_nskeys = []
for k in keylist:
ret_nskeys.append('{' + ns + '},' + k)
return ret_nskeys
@classmethod
def __add_data_map_ns_prefix(cls, ns: str, data_dict: Dict[str, bytes]) -> Dict[str, bytes]:
ret_nsdict = {}
for key, val in data_dict.items():
ret_nsdict['{' + ns + '},' + key] = val
return ret_nsdict
@classmethod
def __strip_ns_from_bin_keys(cls, ns: str, nskeylist: List[bytes]) -> List[str]:
ret_keys = []
for k in nskeylist:
try:
redis_key = k.decode("utf-8")
except UnicodeDecodeError as exc:
msg = u'Namespace %s key conversion to string failed: %s' % (ns, str(exc))
raise RejectedByBackend(msg)
nskey = redis_key.split(',', 1)
if len(nskey) != 2:
msg = u'Namespace %s key:%s has no namespace prefix' % (ns, redis_key)
raise RejectedByBackend(msg)
ret_keys.append(nskey[1])
return ret_keys
def _prepare_channels(self, ns: str,
channels_and_events: Dict[str, List[str]]) -> Tuple[List, int]:
channels_and_events_prepared = []
for channel, events in channels_and_events.items():
one_channel_join_events = None
for event in events:
if one_channel_join_events is None:
channels_and_events_prepared.append(self.__add_key_ns_prefix(ns, channel))
one_channel_join_events = event
else:
one_channel_join_events = one_channel_join_events + self.event_separator + event
channels_and_events_prepared.append(one_channel_join_events)
pairs_cnt = int(len(channels_and_events_prepared) / 2)
return channels_and_events_prepared, pairs_cnt
def get_redis_connection(self, ns: str):
"""Return existing Redis database connection valid for the namespace."""
return self.__getClient(ns)
class _RedisConn:
"""
Internal class container to hold redis client connection
"""
def __init__(self, redis_client, pubsub, pubsub_thread, run_in_thread):
self.redis_client = redis_client
self.redis_pubsub = pubsub
self.pubsub_thread = pubsub_thread
self.run_in_thread = run_in_thread
def __str__(self):
return str(
{
"Client": repr(self.redis_client),
"Subscrions": self.redis_pubsub.subscribed,
"PubSub thread": repr(self.pubsub_thread),
"Run in thread": self.run_in_thread,
}
)
class RedisBackendLock(DbBackendLockAbc):
"""
A class providing an implementation of database backend lock of Shared Data Layer (SDL), when
backend database solution is Redis.
Args:
ns (str): Namespace under which this lock is targeted.
name (str): Lock name, identifies the lock key in a Redis database backend.
expiration (int, float): Lock expiration time after which the lock is removed if it hasn't
been released earlier by a 'release' method.
redis_backend (RedisBackend): Database backend object containing connection to Redis
database.
"""
lua_get_validity_time = None
# KEYS[1] - lock name
# ARGS[1] - token
# return < 0 in case of failure, otherwise return lock validity time in milliseconds.
LUA_GET_VALIDITY_TIME_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token then
return -10
end
if token ~= ARGV[1] then
return -11
end
return redis.call('pttl', KEYS[1])
"""
def __init__(self, ns: str, name: str, expiration: Union[int, float],
redis_backend: RedisBackend) -> None:
super().__init__(ns, name)
self.__redis = redis_backend.get_redis_connection(ns)
with _map_to_sdl_exception():
redis_lockname = '{' + ns + '},' + self._lock_name
self.__redis_lock = Lock(redis=self.__redis, name=redis_lockname, timeout=expiration)
self._register_scripts()
def __str__(self):
return str(
{
"lock DB type": "Redis",
"lock namespace": self._ns,
"lock name": self._lock_name,
"lock status": self._lock_status_to_string()
}
)
def acquire(self, retry_interval: Union[int, float] = 0.1,
retry_timeout: Union[int, float] = 10) -> bool:
succeeded = False
self.__redis_lock.sleep = retry_interval
with _map_to_sdl_exception():
succeeded = self.__redis_lock.acquire(blocking_timeout=retry_timeout)
return succeeded
def release(self) -> None:
with _map_to_sdl_exception():
self.__redis_lock.release()
def refresh(self) -> None:
with _map_to_sdl_exception():
self.__redis_lock.reacquire()
def get_validity_time(self) -> Union[int, float]:
validity = 0
if self.__redis_lock.local.token is None:
msg = u'Cannot get validity time of an unlocked lock %s' % self._lock_name
raise RejectedByBackend(msg)
with _map_to_sdl_exception():
validity = self.lua_get_validity_time(keys=[self.__redis_lock.name],
args=[self.__redis_lock.local.token],
client=self.__redis)
if validity < 0:
msg = (u'Getting validity time of a lock %s failed with error code: %d'
% (self._lock_name, validity))
raise RejectedByBackend(msg)
ftime = validity / 1000.0
if ftime.is_integer():
return int(ftime)
return ftime
def _register_scripts(self):
cls = self.__class__
client = self.__redis
if cls.lua_get_validity_time is None:
cls.lua_get_validity_time = client.register_script(cls.LUA_GET_VALIDITY_TIME_SCRIPT)
def _lock_status_to_string(self) -> str:
try:
if self.__redis_lock.locked():
if self.__redis_lock.owned():
return 'locked'
return 'locked by someone else'
return 'unlocked'
except(redis_exceptions.RedisError) as exc:
return f'Error: {str(exc)}'
|
python
|
# Copyright 2014 Facebook, Inc.
# Modified by Vivek Menon
from facebookads.adobjects.adaccount import AdAccount
from facebookads.adobjects.campaign import Campaign
from facebookads.adobjects.adset import AdSet
from facebookads.adobjects.adcreative import AdCreative
from facebookads.adobjects.ad import Ad
from facebookads.adobjects.adpreview import AdPreview
from facebookads.api import FacebookAdsApi
# Access Details
access_token = ''
ad_account_id = ''
app_secret = ''
page_id = ''
FacebookAdsApi.init(access_token=access_token)
campaign = AdAccount(ad_account_id).create_campaign(
fields=[]],
params={
'name': 'My Campaign',
'buying_type': 'AUCTION',
'objective': 'PAGE_LIKES',
'status': 'PAUSED',
},
)
print('campaign', campaign)
campaign_id = campaign.get_id()
print('campaign_id:', campaign_id, '\n')
fields = [
]
params = {
'name': 'My AdSet',
'optimization_goal': 'PAGE_LIKES',
'billing_event': 'IMPRESSIONS',
'bid_amount': '20',
'promoted_object': {'page_id': page_id},
'daily_budget': '1000',
'campaign_id': campaign_id,
'targeting': {'geo_locations': {'countries': ['US']}},
'status': 'PAUSED',
}
ad_set = AdAccount(ad_account_id).create_ad_set(
fields=fields,
params=params,
)
print 'ad_set', ad_set
ad_set_id = ad_set.get_id()
print 'ad_set_id:', ad_set_id, '\n'
fields = [
]
params = {
'name': 'My Creative',
'object_id': page_id,
'title': 'My Page Like Ad',
'body': 'Like My Page',
'image_url': 'http://www.facebookmarketingdevelopers.com/static/images/resource_1.jpg',
}
creative = AdAccount(ad_account_id).create_ad_creative(
fields=fields,
params=params,
)
print 'creative', creative
creative_id = creative.get_id()
print 'creative_id:', creative_id, '\n'
fields = [
]
params = {
'name': 'My Ad',
'adset_id': ad_set_id,
'creative': {'creative_id': creative_id},
'status': 'PAUSED',
}
ad = AdAccount(ad_account_id).create_ad(
fields=fields,
params=params,
)
print 'ad', ad
ad_id = ad.get_id()
print 'ad_id:', ad_id, '\n'
fields = [
]
params = {
'ad_format': 'DESKTOP_FEED_STANDARD',
}
print Ad(ad_id).get_previews(
fields=fields,
params=params,
)
|
python
|
def solution(s):
word_dict = {}
for element in s.lower():
word_dict[element] = word_dict.get(element, 0) + 1
if word_dict.get('p', 0) == word_dict.get('y', 0):
return True
return False
if __name__ == '__main__':
s = 'pPoooyY'
print(solution(s))
"""
def solution(s):
return s.lower().count('p') == s.lower().count('y')
"""
|
python
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# ERPNext - web based ERP (http://erpnext.com)
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
import http.client
import mimetypes
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import get_url, cint
from frappe.utils.background_jobs import enqueue
from frappe import msgprint
from frappe.model.document import Document
import datetime
from frappe.utils import cint, flt, cstr, now
from datetime import date, datetime
from fuzzywuzzy import process
from fuzzywuzzy import fuzz
class SMSApi(Document):
pass
@frappe.whitelist()
def send_message(payload_to_send):
msgprint(payload_to_send)
#payload_to_use = json.loads(payload_to_send)
msgparameters = []
msgparameters.append(payload_to_send)
conn = http.client.HTTPSConnection("api.onfonmedia.co.ke")
payload ={}
payload["SenderId"] ="MTRH"
payload["MessageParameters"] = msgparameters
"""[
{
"Number":number,
"Text":message,
}
]"""
payload["ApiKey"] = "69pJq6iTBSwfAaoL4BU7yHi361dGLkqQ1MJYHQF/lJI="
payload["ClientId"] ="8055c2c9-489b-4440-b761-a0cc27d1e119"
msgprint(payload)
headers ={}
headers['Content-Type']= 'application/json'
headers['AccessKey']= 'FKINNX9pwrBDzGHxgQ2EB97pXMz6vVgd'
headers['Content-Type']= 'application/json'
headers['Cookie']= 'AWSALBTG=cWN78VX7OjvsWtCKpI8+ZTJuLfqNCOqRtmN6tRa4u47kdC/G4k7L3TdKrzftl6ni4LspFPErGdwg/iDlloajVm0LoGWChohiR07jljLMz/a8tduH+oHvptQVo1DgCplIyjCC+SyvnUjS2vrFiLN5E+OvP9KwWIjvmHjRiNJZSVJ4MageyKQ=; AWSALBTGCORS=cWN78VX7OjvsWtCKpI8+ZTJuLfqNCOqRtmN6tRa4u47kdC/G4k7L3TdKrzftl6ni4LspFPErGdwg/iDlloajVm0LoGWChohiR07jljLMz/a8tduH+oHvptQVo1DgCplIyjCC+SyvnUjS2vrFiLN5E+OvP9KwWIjvmHjRiNJZSVJ4MageyKQ='
conn.request("POST", "/v1/sms/SendBulkSMS", payload, headers)
res = conn.getresponse()
data = res.read()
#print(data.decode("utf-8"))
frappe.response["payload"] = payload
frappe.response["response"] =data
@frappe.whitelist()
def duplicate_checker(item_code):
item = frappe.db.get_value("Item",{"item_code":item_code},"item_name")
items = frappe.db.get_list('Item',
filters={
'disabled': "0",
'item_code':["NOT LIKE",item_code] #EXCLUDE THIS PARTICULAR ITEM
},
fields=['item_name','item_code','item_group'],
as_list=False
)
itemsarray =[]
itemdict={}
for row in items:
ratio = fuzz.token_sort_ratio(item,str(row.item_name))
itemcode = row.item_code
itemname =row.item_name
itemgroup =row.item_group
if ratio > 80:
itemdict["item_code"] = itemcode
itemdict["item_name"] = itemname
itemdict["item_category"] = itemgroup
itemdict["ratio"] = ratio
itemsarray.append(itemdict)
itemdict ={}
#payload = process.extract(item, itemsarray)
frappe.response["potential_duplicates"]=itemsarray
frappe.response["iteminquestion"] = item
return itemsarray
@frappe.whitelist()
def canceldocuments(payload):
#payload_to_use = json.loads(payload)
items = frappe.db.get_list('Item',
filters={
'item_code':["NOT IN", ["ITM000299", "760000","ITM000173"]] #760000
},
fields=['name'],
as_list=False
)
myarr=[]
payload_to_use =[]
for item in items:
payload_to_use.append(str(item.name))
for lisitem in payload_to_use:
item_code = lisitem
#myarr.append(lisitem)
#frappe.db.set_value("Item",item_code,"disabled","1")
frappe.delete_doc("Item",item_code)
"""awards = frappe.db.get_list('Tender Quotation Award',
filters={
'docstatus': "1",
'item_code':item_code
},
fields=['name'],
as_list=False
)
for award in awards:
docname = award.name
frappe.db.set_value("Tender Quotation Award",docname,"docstatus","2")
frappe.delete_doc("Tender Quotation Award",docname)
#frappe.delete_doc("Item",item_code)"""
frappe.response["items"]=myarr
|
python
|
import warnings
from datetime import datetime
import pytest
import pandas as pd
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, custom_errors, create, conversion, conversion_rules
from mssql_dataframe.core.write import insert, _exceptions
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
self.insert = insert.insert(self.connection, autoadjust_sql_objects=True)
self.insert_meta = insert.insert(self.connection, include_metadata_timestamps=True, autoadjust_sql_objects=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
def test_insert_autoadjust_errors(sql):
table_name = "##test_insert_autoadjust_errors"
# create table with column for each conversion rule
columns = conversion_rules.rules['sql_type'].to_numpy()
columns = {'_'+x:x for x in columns}
sql.create.table(table_name, columns=columns)
# create dataframes for each conversion rule that should fail insert
boolean = [3]
exact_numeric = ['a', '2-1', 1.1, datetime.now()]
approximate_numeric = ['a', '2-1',datetime.now()]
date_time = ['a', 1, 1.1]
character_string = [1, datetime.now()]
dataframe = [
pd.DataFrame({'_bit': boolean}),
pd.DataFrame({'_tinyint': exact_numeric}),
pd.DataFrame({'_smallint': exact_numeric}),
pd.DataFrame({'_int': exact_numeric}),
pd.DataFrame({'_bigint': exact_numeric}),
pd.DataFrame({'_float': approximate_numeric}),
pd.DataFrame({'_time': date_time}),
pd.DataFrame({'_date': date_time}),
pd.DataFrame({'_datetime2': date_time}),
pd.DataFrame({'_varchar': character_string}),
pd.DataFrame({'_nvarchar': character_string}),
]
# insure all conversion rules are being tested
assert pd.Series(columns.keys()).isin([x.columns[0] for x in dataframe]).all()
for df in dataframe:
# check each row to infer to base pandas type
for row in df.index:
with pytest.raises(custom_errors.DataframeColumnInvalidValue):
sql.insert.insert(table_name, df.loc[[row]].infer_objects())
def test_insert_create_table(sql):
table_name = "##test_insert_create_table"
dataframe = pd.DataFrame(
{"ColumnA": [1, 2, 3], "ColumnB": ["06/22/2021", "06-22-2021", "2021-06-22"]}
)
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.insert_meta.insert(
table_name, dataframe=dataframe
)
assert len(warn) == 3
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert "Creating table " + table_name in str(warn[0].message)
assert "Created table: " + table_name in str(warn[1].message)
assert "Creating column _time_insert in table " + table_name in str(
warn[2].message
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
expected = pd.DataFrame(
{
"ColumnA": pd.Series([1, 2, 3], dtype="UInt8"),
"ColumnB": pd.Series(
[pd.Timestamp(year=2021, month=6, day=22)] * 3,
dtype="datetime64[ns]",
),
}
).set_index(keys="ColumnA")
assert result[expected.columns].equals(expected)
assert all(result["_time_insert"].notna())
def test_insert_create_table_indexpk(sql):
table_name = "##test_insert_create_table_indexpk"
dataframe = pd.DataFrame(
{"ColumnA": [1, 2, 3], "ColumnB": ["06/22/2021", "06-22-2021", "2021-06-22"]},
index=pd.Series([1,2,3], name='indexpk')
)
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.insert.insert(
table_name, dataframe=dataframe
)
assert len(warn) == 2
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert "Creating table " + table_name in str(warn[0].message)
assert "Created table: " + table_name in str(warn[1].message)
schema,_ = conversion.get_schema(sql.connection, table_name)
assert schema.index[schema['pk_seq'].notna()].equals(pd.Index(['indexpk']))
def test_insert_add_column(sql):
table_name = "##test_insert_add_column"
sql.create.table(table_name, columns={"ColumnA": "TINYINT"})
dataframe = pd.DataFrame({"ColumnA": [1], "ColumnB": [2], "ColumnC": ["zzz"]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.insert_meta.insert(table_name, dataframe=dataframe)
assert len(warn) == 3
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert (
str(warn[0].message)
== f"Creating column _time_insert in table {table_name} with data type DATETIME2."
)
assert (
str(warn[1].message)
== f"Creating column ColumnB in table {table_name} with data type tinyint."
)
assert (
str(warn[2].message)
== f"Creating column ColumnC in table {table_name} with data type varchar(3)."
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
assert all(result["_time_insert"].notna())
def test_insert_alter_column_unchanged(sql):
table_name = "##test_insert_alter_column_unchanged"
sql.create.table(
table_name,
columns={"ColumnA": "TINYINT", "ColumnB": "VARCHAR(1)", "ColumnC": "TINYINT"},
)
dataframe = pd.DataFrame({"ColumnA": [1], "ColumnB": ["a"], "ColumnC": [1]})
failure = custom_errors.SQLInsufficientColumnSize(
"manually testing expection for ColumnB, ColumnC", ["ColumnB", "ColumnC"]
)
with pytest.raises(custom_errors.SQLRecastColumnUnchanged):
_exceptions.handle(
failure,
table_name,
dataframe,
updating_table=False,
autoadjust_sql_objects=sql.insert.autoadjust_sql_objects,
modifier=sql.insert._modify,
creator=sql.insert._create,
)
def test_insert_alter_column(sql):
table_name = "##test_insert_alter_column"
sql.create.table(
table_name,
columns={"ColumnA": "TINYINT", "ColumnB": "VARCHAR(1)", "ColumnC": "TINYINT"},
)
dataframe = pd.DataFrame({"ColumnA": [1], "ColumnB": ["aaa"], "ColumnC": [100000]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.insert_meta.insert(table_name, dataframe=dataframe)
assert len(warn) == 3
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert (
str(warn[0].message)
== f"Creating column _time_insert in table {table_name} with data type DATETIME2."
)
assert (
str(warn[1].message)
== f"Altering column ColumnB in table {table_name} to data type varchar(3) with is_nullable=True."
)
assert (
str(warn[2].message)
== f"Altering column ColumnC in table {table_name} to data type int with is_nullable=True."
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
assert all(result["_time_insert"].notna())
_, dtypes = conversion.sql_spec(schema, dataframe)
assert dtypes == {
"ColumnA": "tinyint",
"ColumnB": "varchar(3)",
"ColumnC": "int",
"_time_insert": "datetime2",
}
def test_insert_alter_primary_key(sql):
# inital insert
table_name = "##test_insert_alter_primary_key"
dataframe = pd.DataFrame(
{
"ColumnA": [0, 1, 2, 3],
"ColumnB": [0, 1, 2, 3],
"ColumnC": ["a", "b", "c", "d"],
}
).set_index(keys=["ColumnA", "ColumnB"])
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(table_name, dataframe, primary_key="index")
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema,_ = conversion.get_schema(sql.connection, table_name)
_, dtypes = conversion.sql_spec(schema, dataframe)
assert dtypes == {
"ColumnA": "tinyint",
"ColumnB": "tinyint",
"ColumnC": "varchar(1)",
}
assert schema.at["ColumnA", "pk_seq"] == 1
assert schema.at["ColumnB", "pk_seq"] == 2
assert pd.isna(schema.at["ColumnC", "pk_seq"])
# insert that alters primary key
new = pd.DataFrame(
{
"ColumnA": [256, 257, 258, 259],
"ColumnB": [4, 5, 6, 7],
"ColumnC": ["e", "f", "g", "h"],
}
).set_index(keys=["ColumnA", "ColumnB"])
with warnings.catch_warnings(record=True) as warn:
new = sql.insert.insert(table_name, new)
assert len(warn) == 1
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert (
str(warn[0].message)
== "Altering column ColumnA in table ##test_insert_alter_primary_key to data type smallint with is_nullable=False."
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
assert result.equals(dataframe.append(new))
_, dtypes = conversion.sql_spec(schema, new)
assert dtypes == {
"ColumnA": "smallint",
"ColumnB": "tinyint",
"ColumnC": "varchar(1)",
}
assert schema.at["ColumnA", "pk_seq"] == 1
assert schema.at["ColumnB", "pk_seq"] == 2
assert pd.isna(schema.at["ColumnC", "pk_seq"])
def test_insert_add_and_alter_column(sql):
table_name = "##test_insert_add_and_alter_column"
dataframe = pd.DataFrame({"ColumnA": [0, 1, 2, 3], "ColumnB": [0, 1, 2, 3]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create_meta.table_from_dataframe(table_name, dataframe, primary_key="index")
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
new = pd.DataFrame({
'ColumnA': [4,5,6,7],
'ColumnB': [256, 257, 258, 259],
'ColumnC': [0, 1, 2, 3]
}, index=[4,5,6,7])
new.index.name = '_index'
with warnings.catch_warnings(record=True) as warn:
new = sql.insert_meta.insert(table_name, new)
assert len(warn) == 2
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert (
str(warn[0].message)
== f"Creating column ColumnC in table {table_name} with data type tinyint."
)
assert (
str(warn[1].message)
== f"Altering column ColumnB in table {table_name} to data type smallint with is_nullable=False."
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
assert result[new.columns].equals(dataframe.append(new))
assert all(result["_time_insert"].notna())
_, dtypes = conversion.sql_spec(schema, dataframe)
assert dtypes == {
"_index": "tinyint",
"ColumnA": "tinyint",
"ColumnB": "smallint",
"_time_insert": "datetime2",
"ColumnC": "tinyint",
}
|
python
|
# Server must be restarted after creating new tags file
from django import template
register = template.Library ()
@ register.inclusion_tag ('oauth/tags/user_avatar.html')
def get_user_avatar_tag (user):
'''Return the user's picture, it is an img tag'''
return {'user': user}
|
python
|
# -*- coding:Utf-8 -*-
from gi.repository import Gtk, GObject, GdkPixbuf
from crudel import Crudel
import glob
class PicsouDiapo(Gtk.Window):
""" Affichage d'une image dans une Gtk.Window """
def __init__(self, crud, args):
Gtk.Window.__init__(self, title=args)
self.crud = crud
self.args = args
self.width = self.args["width"] if self.args.get("width") is not None else 800
self.heigth = self.args["height"] if self.args.get("height") is not None else 600
self.nb_cols = self.args["nb_cols"] if self.args.get("nb_cols") is not None else 3
self.set_size_request(self.width, self.heigth)
self.directory = "{}/{}".format(self.crud.get_application_prop("data_directory"),self.args["directory"])
self.set_title("Historique des cours")
self.create_widget()
def create_widget(self):
""" Construction des diapos """
vbox = Gtk.VBox()
hbox = None
self.nb_cols = 2
icol = 0
files = sorted([f for f in glob.glob(self.directory + "/**/*.png", recursive=True)])
for file_path in files:
# print(file_path)
if icol == 0:
hbox = Gtk.HBox()
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(file_path, self.width//self.nb_cols - 4, -1, True)
image = Gtk.Image.new_from_pixbuf(pixbuf)
hbox.pack_start(image, False, False, 0)
icol+=1
if icol >= self.nb_cols:
vbox.pack_start(hbox, False, False, 0)
icol = 0
hbox = None
if hbox is not None:
vbox.pack_start(hbox, False, False, 0)
scroll_window = Gtk.ScrolledWindow()
scroll_window.set_policy(Gtk.PolicyType.ALWAYS, Gtk.PolicyType.ALWAYS)
scroll_window.add_with_viewport(vbox)
self.add(scroll_window)
self.show_all()
|
python
|
"""
======================
Geographic Projections
======================
This shows 4 possible geographic projections. Cartopy_ supports more
projections.
.. _Cartopy: http://scitools.org.uk/cartopy
"""
import matplotlib.pyplot as plt
###############################################################################
plt.figure()
plt.subplot(projection="aitoff")
plt.title("Aitoff")
plt.grid(True)
###############################################################################
plt.figure()
plt.subplot(projection="hammer")
plt.title("Hammer")
plt.grid(True)
###############################################################################
plt.figure()
plt.subplot(projection="lambert")
plt.title("Lambert")
plt.grid(True)
###############################################################################
plt.figure()
plt.subplot(projection="mollweide")
plt.title("Mollweide")
plt.grid(True)
plt.show()
|
python
|
"""
Wrappers around the Google API's.
"""
import os
import json
from datetime import (
datetime,
timedelta,
)
from collections import namedtuple
try:
# this is only an issue with Python 2.7 and if the
# Google-API packages were not installed with msl-io
from enum import Enum
except ImportError:
Enum = object
# having the Google-API packages are optional
try:
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials
from googleapiclient.http import (
MediaFileUpload,
MediaIoBaseDownload,
DEFAULT_CHUNK_SIZE,
)
HAS_GOOGLE_API = True
except ImportError:
DEFAULT_CHUNK_SIZE = 100 * 1024 * 1024
HAS_GOOGLE_API = False
from .constants import (
HOME_DIR,
IS_PYTHON2,
)
def _authenticate(token, client_secrets_file, scopes):
"""Authenticate with a Google API.
Parameters
----------
token : :class:`str`
The path to a token file. If it does not exist then it will be created.
client_secrets_file : :class:`str`
The "client secrets" file to use to generate the OAuth credentials.
scopes : :class:`list` of :class:`str`
The list of scopes to enable.
Returns
-------
:class:`google.oauth2.credentials.Credentials`
The OAuth 2.0 credentials for the user.
"""
if not HAS_GOOGLE_API:
raise RuntimeError(
'You must install the Google-API packages, run\n'
' pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib'
)
credentials = None
# load the token from an environment variable if it exists
# ignore the '.json' extension
token_env_name = os.path.basename(token)[:-5].replace('-', '_').upper()
if token_env_name in os.environ:
info = json.loads(os.environ[token_env_name])
credentials = Credentials.from_authorized_user_info(info, scopes=scopes)
# load the cached token file if it exists
if not credentials and os.path.isfile(token):
credentials = Credentials.from_authorized_user_file(token, scopes=scopes)
# if there are no (valid) credentials available then let the user log in
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
try:
credentials.refresh(Request())
except RefreshError as err:
if os.path.isfile(token) and not os.getenv('MSL_IO_RUNNING_TESTS'):
message = '{}: {}\nDo you want to delete the token file and re-authenticate ' \
'(y/[n])? '.format(err.__class__.__name__, err.args[0])
if IS_PYTHON2:
yes_no = raw_input(message)
else:
yes_no = input(message)
if yes_no.lower().startswith('y'):
os.remove(token)
return _authenticate(token, client_secrets_file, scopes)
raise
else:
if not client_secrets_file:
raise OSError('You must specify the path to a "client secrets" file as the credentials')
flow = InstalledAppFlow.from_client_secrets_file(client_secrets_file, scopes)
credentials = flow.run_local_server(port=0)
# save the credentials for the next run
if token_env_name in os.environ:
os.environ[token_env_name] = credentials.to_json()
else:
# make sure that all parent directories exist before creating the file
dirname = os.path.dirname(token)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
with open(token, mode='w') as fp:
fp.write(credentials.to_json())
return credentials
class GoogleAPI(object):
def __init__(self, service, version, credentials, scopes, is_read_only, is_corporate_account):
"""Base class for all Google API's."""
testing = 'testing-' if os.getenv('MSL_IO_RUNNING_TESTS') else ''
corporate = '-corporate' if is_corporate_account else ''
readonly = '-readonly' if is_read_only else ''
filename = '{}token-{}{}{}.json'.format(testing, service, corporate, readonly)
token = os.path.join(HOME_DIR, filename)
oauth = _authenticate(token, credentials, scopes)
self._service = build(service, version, credentials=oauth)
@property
def service(self):
"""The Resource object with methods for interacting with the API service."""
return self._service
class GDrive(GoogleAPI):
MIME_TYPE_FOLDER = 'application/vnd.google-apps.folder'
ROOT_NAMES = ['Google Drive', 'My Drive', 'Shared drives']
def __init__(self, credentials=None, is_read_only=True, is_corporate_account=True, scopes=None):
"""Interact with a user's Google Drive.
.. attention::
You must follow the instructions in the prerequisites section for setting up the
`Drive API <https://developers.google.com/drive/api/v3/quickstart/python#prerequisites>`_
before you can use this class. It is also useful to be aware of the
`refresh token expiration <https://developers.google.com/identity/protocols/oauth2#expiration>`_
policy.
Parameters
----------
credentials : :class:`str`, optional
The path to the "client secrets" credential file. This file only
needs to be specified the first time that you interact with a
user's Google Drive or if you delete the token file that was
created when you previously authenticated using the credentials.
is_read_only : :class:`bool`, optional
Whether to interact with a user's Google Drive in read-only mode.
is_corporate_account : :class:`bool`, optional
Whether you want to interact with a user's Google Drive via a
corporate Google account or a personal Google account.
scopes : :class:`list` of :class:`str`, optional
The list of scopes to enable for the Google API. See
`Drive scopes <https://developers.google.com/identity/protocols/oauth2/scopes#drive>`_
for more details. If not specified then default scopes are chosen
based on the value of `is_read_only`.
"""
if not scopes:
if is_read_only:
scopes = [
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/drive.metadata.readonly'
]
else:
scopes = [
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.metadata',
]
super(GDrive, self).__init__(
'drive', 'v3', credentials, scopes, is_read_only, is_corporate_account
)
self._files = self._service.files()
self._drives = self._service.drives()
@staticmethod
def _folder_hierarchy(folder):
# create a list of sub-folder names in the folder hierarchy
f = folder
names = []
while True:
f, name = os.path.split(f)
if not name or name in GDrive.ROOT_NAMES:
break
names.append(name)
return names[::-1]
def folder_id(self, folder, parent_id=None):
"""Get the ID of a Google Drive folder.
Parameters
----------
folder : :class:`str`
The path to a Google Drive file.
parent_id : :class:`str`, optional
The ID of the parent folder that the value of `folder` is relative to.
If not specified then `folder` is relative to the "root" folder.
Returns
-------
:class:`str`
The folder ID.
"""
# find the ID of the folder
folder_id = parent_id or 'root'
names = GDrive._folder_hierarchy(folder)
for name in names:
q = '"{}" in parents and name="{}" and trashed=false and mimeType="{}"'.format(
folder_id, name, GDrive.MIME_TYPE_FOLDER
)
request = self._files.list(q=q, fields='files(id,name)')
response = request.execute()
files = response['files']
if not files:
raise OSError('Not a valid Google Drive folder {!r}'.format(folder))
if len(files) > 1:
raise OSError('Multiple folder matches -- {}'.format(files))
first = files[0]
assert name == first['name'], '{!r} != {!r}'.format(name, first['name'])
folder_id = first['id']
return folder_id
def file_id(self, file, mime_type=None, folder_id=None):
"""Get the ID of a Google Drive file.
Parameters
----------
file : :class:`str`
The path to a Google Drive file.
mime_type : :class:`str`, optional
The mime type to use to filter the results.
folder_id : :class:`str`, optional
The ID of the folder that the value of `file` is relative to.
If not specified then `file` is relative to the "root" folder.
Returns
-------
:class:`str`
The file ID.
"""
folders, name = os.path.split(file)
folder_id = self.folder_id(folders, parent_id=folder_id)
q = '"{}" in parents and name="{}" and trashed=false'.format(folder_id, name)
if not mime_type:
q += ' and mimeType!="{}"'.format(GDrive.MIME_TYPE_FOLDER)
else:
q += ' and mimeType="{}"'.format(mime_type)
request = self._files.list(q=q, fields='files(id,name,mimeType)')
response = request.execute()
files = response['files']
if not files:
raise OSError('Not a valid Google Drive file {!r}'.format(file))
if len(files) > 1:
mime_types = '\n '.join(f['mimeType'] for f in files)
raise OSError('Multiple file matches. Filter by mime type:\n ' + mime_types)
first = files[0]
assert name == first['name'], '{!r} != {!r}'.format(name, first['name'])
return first['id']
def is_file(self, file, mime_type=None, folder_id=None):
"""Check if a file exists.
Parameters
----------
file : :class:`str`
The path to a Google Drive file.
mime_type : :class:`str`, optional
The mime type to use to filter the results.
folder_id : :class:`str`, optional
The ID of the folder that the value of `file` is relative to.
If not specified then `file` is relative to the "root" folder.
Returns
-------
:class:`bool`
Whether the file exists.
"""
try:
self.file_id(file, mime_type=mime_type, folder_id=folder_id)
except OSError as err:
return str(err).startswith('Multiple file matches')
else:
return True
def is_folder(self, folder, parent_id=None):
"""Check if a folder exists.
Parameters
----------
folder : :class:`str`
The path to a Google Drive folder.
parent_id : :class:`str`, optional
The ID of the parent folder that the value of `folder` is relative to.
If not specified then `folder` is relative to the "root" folder.
Returns
-------
:class:`bool`
Whether the folder exists.
"""
try:
self.folder_id(folder, parent_id=parent_id)
except OSError:
return False
else:
return True
def create_folder(self, folder, parent_id=None):
"""Create a folder.
Makes all intermediate-level folders needed to contain the leaf directory.
Parameters
----------
folder : :class:`str`
The folder(s) to create, for example, 'folder1' or 'folder1/folder2/folder3'.
parent_id : :class:`str`, optional
The ID of the parent folder that the value of `folder` is relative to.
If not specified then `folder` is relative to the "root" folder.
Returns
-------
:class:`str`
The ID of the last (right most) folder that was created.
"""
names = GDrive._folder_hierarchy(folder)
response = {'id': parent_id or 'root'}
for name in names:
request = self._files.create(
body={
'name': name,
'mimeType': GDrive.MIME_TYPE_FOLDER,
'parents': [response['id']],
},
fields='id',
supportsAllDrives=True, # ability to create in shared drives
)
response = request.execute()
return response['id']
def delete(self, file_or_folder_id):
"""Delete a file or a folder.
Parameters
----------
file_or_folder_id : :class:`str`
The ID of the file or folder to delete.
"""
self._files.delete(fileId=file_or_folder_id).execute()
def empty_trash(self):
"""Permanently delete all of the user's trashed files."""
self._files.emptyTrash().execute()
def upload(self, file, folder_id=None, mime_type=None, resumable=False, chunk_size=DEFAULT_CHUNK_SIZE):
"""Upload a file.
Parameters
----------
file : :class:`str`
The file to upload.
folder_id : :class:`str`, optional
The ID of the folder to upload the file to.
If not specified then uploads to the "root" folder.
mime_type : :class:`str`, optional
The mime type to use for the file's metadata. If not specified
then a value will be guessed from the file extension.
resumable : :class:`bool`
Whether the upload can be resumed.
chunk_size : :class:`int`
The file will be uploaded in chunks of this many bytes. Only used
if `resumable` is :data:`True`. Pass in a value of -1 if the file
is to be uploaded in a single chunk. Note that Google App Engine
has a 5MB limit on request size, so you should never set
`chunk_size` to be >5MB or to -1 (if the file size is >5MB).
Returns
-------
:class:`str`
The ID of the file that was uploaded.
"""
parent_id = folder_id or 'root'
filename = os.path.basename(file)
body = {'name': filename, 'parents': [parent_id]}
if mime_type:
body['mimeType'] = mime_type
request = self._files.create(
body=body,
media_body=MediaFileUpload(
file,
mimetype=mime_type,
chunksize=chunk_size,
resumable=resumable
),
fields='id',
supportsAllDrives=True, # ability to upload to shared drives
)
response = request.execute()
return response['id']
def download(self, file_id, save_as=None, num_retries=0, chunk_size=DEFAULT_CHUNK_SIZE, callback=None):
"""Download a file.
Parameters
----------
file_id : :class:`str`
The ID of the file to download.
save_as : :term:`path-like <path-like object>` or :term:`file-like <file object>`, optional
The location to save the file to.
Default is in the current working directory.
num_retries : :class:`int`, optional
The number of times to retry the download.
If zero (default) then attempt the request only once.
chunk_size : :class:`int`, optional
The file will be downloaded in chunks of this many bytes.
callback
The callback to call after each chunk of the file is downloaded.
The `callback` accepts one positional argument, for example::
def handler(file):
print(file.progress(), file.total_size, file.resumable_progress)
drive.download('0Bwab3C2ejYSdM190b2psXy1C50P', callback=handler)
"""
if hasattr(save_as, 'write'):
fh = save_as
else:
if not save_as:
request = self._files.get(fileId=file_id, fields='name')
response = request.execute()
save_as = response['name']
fh = open(save_as, mode='wb')
request = self._files.get_media(fileId=file_id)
downloader = MediaIoBaseDownload(fh, request, chunksize=chunk_size)
done = False
while not done:
status, done = downloader.next_chunk(num_retries=num_retries)
if callback:
callback(status)
if fh is not save_as: # then close the file that was opened
fh.close()
def path(self, file_or_folder_id):
"""Convert an ID to a path.
Parameters
----------
file_or_folder_id : :class:`str`
The ID of a file or folder.
Returns
-------
:class:`str`
The corresponding path of the ID.
"""
names = []
while True:
request = self._files.get(fileId=file_or_folder_id, fields='name,parents')
response = request.execute()
names.append(response['name'])
parents = response.get('parents', [])
if not parents:
break
if len(parents) > 1:
raise OSError('Multiple parents exist. This case has not been handled yet. Contact developers.')
file_or_folder_id = response['parents'][0]
return '/'.join(names[::-1])
class GValueOption(Enum):
"""Determines how values should be returned."""
FORMATTED = 'FORMATTED_VALUE'
"""Values will be calculated and formatted in the reply according to the
cell's formatting. Formatting is based on the spreadsheet's locale, not
the requesting user's locale. For example, if A1 is 1.23 and A2 is =A1
and formatted as currency, then A2 would return "$1.23"."""
UNFORMATTED = 'UNFORMATTED_VALUE'
"""Values will be calculated, but not formatted in the reply.
For example, if A1 is 1.23 and A2 is =A1 and formatted as currency, then
A2 would return the number 1.23."""
FORMULA = 'FORMULA'
"""Values will not be calculated. The reply will include the formulas.
For example, if A1 is 1.23 and A2 is =A1 and formatted as currency,
then A2 would return "=A1"."""
class GDateTimeOption(Enum):
"""Determines how dates should be returned."""
SERIAL_NUMBER = 'SERIAL_NUMBER'
"""Instructs date, time, datetime, and duration fields to be output as
doubles in "serial number" format, as popularized by Lotus 1-2-3. The
whole number portion of the value (left of the decimal) counts the days
since December 30th 1899. The fractional portion (right of the decimal)
counts the time as a fraction of the day. For example, January 1st 1900
at noon would be 2.5, 2 because it's 2 days after December 30st 1899,
and .5 because noon is half a day. February 1st 1900 at 3pm would be
33.625. This correctly treats the year 1900 as not a leap year."""
FORMATTED_STRING = 'FORMATTED_STRING'
"""Instructs date, time, datetime, and duration fields to be output as
strings in their given number format (which is dependent on the
spreadsheet locale)."""
class GCellType(Enum):
"""The data type of a spreadsheet cell."""
BOOLEAN = 'BOOLEAN'
CURRENCY = 'CURRENCY'
DATE = 'DATE'
DATE_TIME = 'DATE_TIME'
EMPTY = 'EMPTY'
ERROR = 'ERROR'
NUMBER = 'NUMBER'
PERCENT = 'PERCENT'
SCIENTIFIC = 'SCIENTIFIC'
STRING = 'STRING'
TIME = 'TIME'
UNKNOWN = 'UNKNOWN'
GCell = namedtuple('GCell', ('value', 'type', 'formatted'))
"""The information about a Google Sheets cell.
.. attribute:: value
The value of the cell.
.. attribute:: type
:class:`GCellType`: The data type of `value`.
.. attribute:: formatted
:class:`str`: The formatted value (i.e., how the `value` is displayed in the cell).
"""
class GSheets(GoogleAPI):
MIME_TYPE = 'application/vnd.google-apps.spreadsheet'
SERIAL_NUMBER_ORIGIN = datetime(1899, 12, 30)
def __init__(self, credentials=None, is_read_only=True, is_corporate_account=True, scopes=None):
"""Interact with a user's Google Sheets.
.. attention::
You must follow the instructions in the prerequisites section for setting up the
`Sheets API <https://developers.google.com/sheets/api/quickstart/python#prerequisites>`_
before you can use this class. It is also useful to be aware of the
`refresh token expiration <https://developers.google.com/identity/protocols/oauth2#expiration>`_
policy.
Parameters
----------
credentials : :class:`str`, optional
The path to the "client secrets" credential file. This file only
needs to be specified the first time that you interact with a
user's Google Sheets or if you delete the token file that was
created when you previously authenticated using the credentials.
is_read_only : :class:`bool`, optional
Whether to interact with a user's Google Sheets in read-only mode.
is_corporate_account : :class:`bool`, optional
Whether you want to interact with a user's Google Sheets via a
corporate Google account or a personal Google account.
scopes : :class:`list` of :class:`str`, optional
The list of scopes to enable for the Google API. See
`Sheets scopes <https://developers.google.com/identity/protocols/oauth2/scopes#sheets>`_
for more details. If not specified then default scopes are chosen
based on the value of `is_read_only`.
"""
if not scopes:
if is_read_only:
scopes = ['https://www.googleapis.com/auth/spreadsheets.readonly']
else:
scopes = ['https://www.googleapis.com/auth/spreadsheets']
super(GSheets, self).__init__(
'sheets', 'v4', credentials, scopes, is_read_only, is_corporate_account
)
self._spreadsheets = self._service.spreadsheets()
def sheet_names(self, spreadsheet_id):
"""Get the names of all sheets in a spreadsheet.
Parameters
----------
spreadsheet_id : :class:`str`
The ID of a Google Sheets file.
Returns
-------
:class:`tuple` of :class:`str`
The names of all sheets.
"""
request = self._spreadsheets.get(spreadsheetId=spreadsheet_id)
response = request.execute()
return tuple(r['properties']['title'] for r in response['sheets'])
def values(self,
spreadsheet_id,
sheet=None,
cells=None,
row_major=True,
value_option=GValueOption.FORMATTED,
datetime_option=GDateTimeOption.SERIAL_NUMBER
):
"""Return a range of values from a spreadsheet.
Parameters
----------
spreadsheet_id : :class:`str`
The ID of a Google Sheets file.
sheet : :class:`str`, optional
The name of a sheet in the spreadsheet. If not specified and
only one sheet exists in the spreadsheet then automatically
determines the sheet name.
cells : :class:`str`, optional
The A1 notation or R1C1 notation of the range to retrieve values
from. If not specified then returns all values that are in `sheet`.
row_major : :class:`bool`, optional
Whether to return the values in row-major or column-major order.
value_option : :class:`str` or :class:`GValueOption`, optional
How values should be represented in the output. If a string
then it must be equal to one of the values in :class:`GValueOption`.
datetime_option : :class:`str` or :class:`GDateTimeOption`, optional
How dates, times, and durations should be represented in the
output. If a string then it must be equal to one of the values
in :class:`GDateTimeOption`. This argument is ignored if
`value_option` is :attr:`GValueOption.FORMATTED`.
Returns
-------
:class:`list`
The values from the sheet.
"""
if not sheet:
names = self.sheet_names(spreadsheet_id)
if len(names) != 1:
sheets = ', '.join(repr(n) for n in names)
raise ValueError('You must specify a sheet name: ' + sheets)
range_ = names[0]
else:
range_ = sheet
if cells:
range_ += '!{}'.format(cells)
if hasattr(value_option, 'value'):
value_option = value_option.value
if hasattr(datetime_option, 'value'):
datetime_option = datetime_option.value
request = self._spreadsheets.values().get(
spreadsheetId=spreadsheet_id,
range=range_,
majorDimension='ROWS' if row_major else 'COLUMNS',
valueRenderOption=value_option,
dateTimeRenderOption=datetime_option
)
response = request.execute()
return response.get('values', [])
def cells(self, spreadsheet_id, ranges=None):
"""Return cells from a spreadsheet.
Parameters
----------
spreadsheet_id : :class:`str`
The ID of a Google Sheets file.
ranges : :class:`str` or :class:`list` of :class:`str`, optional
The ranges to retrieve from the spreadsheet. Examples:
* ``'Sheet1'`` :math:`\\rightarrow` return all cells from
the sheet named Sheet1
* ``'Sheet1!A1:H5'`` :math:`\\rightarrow` return cells A1:H5
from the sheet named Sheet1
* ``['Sheet1!A1:H5', 'Data', 'Devices!B4:B9']`` :math:`\\rightarrow`
return cells A1:H5 from the sheet named Sheet1, all cells from the
sheet named Data and cells B4:B9 from the sheet named Devices
If not specified then return all cells from all sheets.
Returns
-------
:class:`dict`
The cells from the spreadsheet. The keys are the names of the
sheets and the values are a :class:`list` of :class:`GCell`
objects for the specified range of each sheet.
"""
request = self._spreadsheets.get(
spreadsheetId=spreadsheet_id, includeGridData=True, ranges=ranges
)
response = request.execute()
cells = {}
for sheet in response['sheets']:
data = []
for item in sheet['data']:
for row in item.get('rowData', []):
row_data = []
for col in row.get('values', []):
effective_value = col.get('effectiveValue', None)
formatted = col.get('formattedValue', '')
if effective_value is None:
value = None
typ = GCellType.EMPTY
elif 'numberValue' in effective_value:
value = effective_value['numberValue']
t = col.get('effectiveFormat', {}).get('numberFormat', {}).get('type', 'NUMBER')
typ = GCellType(t)
elif 'stringValue' in effective_value:
value = effective_value['stringValue']
typ = GCellType.STRING
elif 'boolValue' in effective_value:
value = effective_value['boolValue']
typ = GCellType.BOOLEAN
elif 'errorValue' in effective_value:
msg = effective_value['errorValue']['message']
value = '{} ({})'.format(col['formattedValue'], msg)
typ = GCellType.ERROR
else:
value = formatted
typ = GCellType.UNKNOWN
row_data.append(GCell(value=value, type=typ, formatted=formatted))
data.append(row_data)
cells[sheet['properties']['title']] = data
return cells
@staticmethod
def to_datetime(value):
"""Convert a "serial number" date into a :class:`datetime.datetime`.
Parameters
----------
value : :class:`float`
A date in the "serial number" format.
Returns
-------
:class:`datetime.datetime`
The date converted.
"""
days = int(value)
seconds = (value - days) * 86400 # 60 * 60 * 24
return GSheets.SERIAL_NUMBER_ORIGIN + timedelta(days=days, seconds=seconds)
|
python
|
#!/bin/python
#NOTE: modified from original to be more module friendly (PS)
#Original source: https://github.com/jczaplew/postgis2geojson/blob/master/postgis2geojson.py
import argparse
import datetime
import decimal
import json
import subprocess
import psycopg2
#defaults for use as a module, possibly modified by the module user
global argsd
argsd = {"geometry": "geometry", "pretty": False, "topojson": False}
# Fix to float decimals
# http://stackoverflow.com/questions/16957275/python-to-json-serialization-fails-on-decimal
def check_for_decimals(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
#Main entry point for module use.
#NOTE: does not handle records with null geometry gracefully (exception in json.loads: TypeError: expected string or buffer)
def getData(conn, query, params=None):
"""NOTE: expects one field in query to be "ST_AsGeoJSON(foo) as geometry"."""
# Create a cursor for executing queries
with conn.cursor() as cur:
#print "query: " + query
# Execute the query
try:
if (params):
cur.execute(query, params)
else:
cur.execute(query)
except Exception as exc:
print "Unable to execute query. Error was {0}".format(str(exc))
raise exc
# Retrieve the results of the query
rows = cur.fetchall()
# Get the column names returned
colnames = [desc[0] for desc in cur.description]
# Find the index of the column that holds the geometry
geomIndex = colnames.index('geometry')
feature_collection = {'type': 'FeatureCollection', 'features': []}
# For each row returned...
for row in rows:
feature = {
'type': 'Feature',
'geometry': json.loads(row[geomIndex]),
'properties': {},
}
for index, colname in enumerate(colnames):
if colname not in ('geometry'):
if isinstance(row[index], datetime.datetime):
# datetimes are not JSON.dumpable, manually stringify these.
value = str(row[index])
else:
value = row[index]
feature['properties'][colname] = value
feature_collection['features'].append(feature)
indent = 2 if argsd["pretty"] is True else None
jsonified = json.dumps(feature_collection, indent=indent, default=check_for_decimals)
cur.close()
return jsonified
|
python
|
"""Package initialization procedures.
The cli package provides components to build and execute the CLI.
"""
|
python
|
import re
# noinspection PyShadowingBuiltins
def all(_path):
return True
def path_contains(*subs):
def func(path):
return any(map(path.__contains__, subs))
return func
def contains_regex(pattern):
def func(path):
with open(path) as f:
code = f.read()
return bool(re.search(pattern, code))
return func
|
python
|
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# get color transfer function/color map for 'DICOMImage'
dICOMImageLUT = GetColorTransferFunction('DICOMImage')
# Rescale transfer function
dICOMImageLUT.RescaleTransferFunction(200.0, 7622.0)
# get opacity transfer function/opacity map for 'DICOMImage'
dICOMImagePWF = GetOpacityTransferFunction('DICOMImage')
# Rescale transfer function
dICOMImagePWF.RescaleTransferFunction(200.0, 7622.0)
#### uncomment the following to render all views
RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
|
python
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.cache import cache
from .models import *
@receiver(post_save, sender=Email)
def question__delete_caches_on_create(sender, instance, created, **kwargs):
cache_name = "get_sent_emails_user_id_" + str(1)
if(cache_name in cache):
cache.delete(cache_name)
|
python
|
from flask import Blueprint, session, redirect, render_template, request, flash, url_for, abort
from models import PageDetails, Database,Authentication, General
from functools import wraps
from validator_collection import *
admin_remove = Blueprint("admin_remove", __name__)
@admin_remove.route("/Admin/Remove", methods=["POST", "GET"])
@admin_remove.route("/Admin/remove", methods=["POST", "GET"])
@admin_remove.route("/Admin/Delete", methods=["POST", "GET"])
@admin_remove.route("/Admin/delete", methods=["POST", "GET"])
def admin_remove_index():
return render_template("admin/admin_remove_options.html")
@admin_remove.route("/Admin/Remove/Day-Part", methods=["POST", "GET"])
def remove_day_options_admin():
""" The Remove day options Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
remove_ones = []
day_parts = [
"Name",
"Cover",
"Description",
"Text",
"To_Do",
"Quotes",
"Musics",
"Musics_Description",
"Ted_Video",
"Ted_Video_Description",
"Animation_Link",
"Animation_Description",
"Movie_Links",
"Movie_Text",
"Movie_Cover",
"Podcast",
"Podcast_Description",
]
for part in day_parts:
if (part) == request.form.get(part):
remove_ones.append(part)
message = Database().delete_parts_of_day_of_course_data_in_db(
slug=request.form.get("slug"),
day_of_course=request.form.get("day"),
remove_parts_names=remove_ones,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_options_admin"))
Parts = PageDetails().all_day_parts_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_day_options.html",
Parts=Parts,
Courses=PageDetails(session).all_courses_page_info_html(),
)
@admin_remove.route("/Admin/Remove/Course", methods=["POST", "GET"])
def remove_course_admin():
""" The Remove day options Page as an admin. """
if request.method == "POST":
def form_handler(request):
remove_ones = []
course_names = []
for course in PageDetails().all_courses_page_info_html():
course_names.append(course["Slug"])
for part in course_names:
if (part) == request.form.get(part):
remove_ones.append(part)
for course in remove_ones:
Database().delete_courses_data_from_db(course)
General().remove_file_to_trash("static/assets/images/blog/{slug}/".format(slug=course))
return True
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_course_admin"))
Parts = PageDetails().all_course_data_slug_name_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_courses.html",
Parts=Parts,
)
@admin_remove.route("/Admin/Remove/Day", methods=["POST", "GET"])
def remove_full_day_admin():
""" The Remove Full Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
message = Database().delete_day_of_course_data_to_db(
slug=request.form.get("slug"),
day=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_full_day_admin"))
Parts = PageDetails().all_day_parts_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_day.html",
Parts=Parts,
Courses=PageDetails(session).all_courses_page_info_html(),
)
@admin_remove.route("/Admin/Remove/Day/Essential", methods=["POST", "GET"])
def remove_day_essential_main_data_admin():
""" The Remove Essential Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_essential_main_data_admin"))
return render_template(
"admin/admin_remove_day_essential.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Text", methods=["POST", "GET"])
def remove_day_text_data_admin():
""" The Remove Main Text Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_text_data_admin"))
return render_template(
"admin/admin_remove_day_text.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Todo", methods=["POST", "GET"])
def remove_day_todo_data_admin():
""" The Remove Todo-Excersices Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_todo_data_admin"))
return render_template(
"admin/admin_remove_day_todo.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Quotes", methods=["POST", "GET"])
def remove_day_quotes_data_admin():
""" The Remove Quotes Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_quotes_data_admin"))
return render_template(
"admin/admin_remove_day_quotes.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Music", methods=["POST", "GET"])
def remove_day_music_data_admin():
""" The Remove Music Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_music_data_admin"))
return render_template(
"admin/admin_remove_day_music.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Ted", methods=["POST", "GET"])
def remove_day_ted_data_admin():
""" The Remove TED video Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_ted_data_admin"))
return render_template(
"admin/admin_remove_day_ted.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Animation", methods=["POST", "GET"])
def remove_day_animation_data_admin():
""" The Remove short Animation film Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_animation_data_admin"))
return render_template(
"admin/admin_remove_day_animation.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Podcast", methods=["POST", "GET"])
def remove_day_podcast_data_admin():
""" The Remove podcast Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_podcast_data_admin"))
return render_template(
"admin/admin_remove_day_podcast.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Movie", methods=["POST", "GET"])
def remove_day_movie_data_admin():
""" The Remove Movie Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_movie_data_admin"))
return render_template(
"admin/admin_remove_day_movie.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Post/<slug_post>", methods=["POST", "GET"])
def remove_post_blog_admin(slug_post):
""" The remove a Post for blog Page as an admin. """
post = Database().get_blog_post_data_from_db(slug_post)
if post is False:
abort(404)
if request.method == "POST":
def form_handler(request):
if request.form.get("confirmation") == "True":
message = Database().delete_post_blog_data_from_db(slug_post)
else:
message = {"Result": False, "Message": "حذف تایید نشده است."}
General().remove_file_to_trash("static/assets/images/blog/{slug}/".format(slug=slug_post))
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_edit.post_blog_options_admin"))
return render_template(
"admin/admin_remove_post.html",
post=post
)
@admin_remove.route("/Admin/Remove/Music", methods=["POST", "GET"])
def remove_music_admin():
""" The Remove Music as an admin. """
if request.method == "POST":
def form_handler(request):
name = request.form.get("music_name")
remove_ones = []
music_names = []
for music_name in Database().get_all_musics_data_from_db():
music_names.append(music_name["Music_Name"])
for part in music_names:
if (part) == request.form.get(part):
remove_ones.append(part)
for music_name in remove_ones:
Database().delete_music_data_from_db(music_name)
message = True
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_music_admin"))
Parts = PageDetails().all_music_data_name_creator_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_musics.html",
Parts=Parts,
)
@admin_remove.route("/Admin/Remove/Tool", methods=["POST", "GET"])
@admin_remove.route("/Admin/Remove/tool", methods=["POST", "GET"])
def remove_tool_admin():
""" The Remove tool as an admin. """
if request.method == "POST":
def form_handler(request):
name = request.form.get("tool_name")
remove_ones = []
tool_names = []
for tool_name in Database().get_all_tools_data_db():
tool_names.append(tool_name["Slug"])
for part in tool_names:
if part == request.form.get(part):
remove_ones.append(part)
for tool_slug in remove_ones:
Database().delete_tool_data_from_db(tool_slug)
message = True
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_tool_admin"))
Parts = PageDetails().all_tools_data_name_slug_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_tool.html",
Parts=Parts,
)
|
python
|
#
# Test Netatmo class
#
import logging
import Netatmo
def main():
logging.basicConfig(level=logging.DEBUG)
netatmo = Netatmo.Netatmo("PyAtmo.conf")
home=netatmo.getHomesData()
netatmo.getHomeStatus()
if __name__ == "__main__":
main()
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tempfile
import warnings
from typing import Any, Dict, Generator, List, Optional, Tuple, Union
from kubernetes import client, config, watch
from kubernetes.config import ConfigException
from airflow.compat.functools import cached_property
from airflow.kubernetes.kube_client import _disable_verify_ssl, _enable_tcp_keepalive
try:
import airflow.utils.yaml as yaml
except ImportError:
import yaml # type: ignore[no-redef]
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
def _load_body_to_dict(body):
try:
body_dict = yaml.safe_load(body)
except yaml.YAMLError as e:
raise AirflowException(f"Exception when loading resource definition: {e}\n")
return body_dict
class KubernetesHook(BaseHook):
"""
Creates Kubernetes API connection.
- use in cluster configuration by using ``extra__kubernetes__in_cluster`` in connection
- use custom config by providing path to the file using ``extra__kubernetes__kube_config_path``
- use custom configuration by providing content of kubeconfig file via
``extra__kubernetes__kube_config`` in connection
- use default config by providing no extras
This hook check for configuration option in the above order. Once an option is present it will
use this configuration.
.. seealso::
For more information about Kubernetes connection:
:doc:`/connections/kubernetes`
:param conn_id: The :ref:`kubernetes connection <howto/connection:kubernetes>`
to Kubernetes cluster.
:param client_configuration: Optional dictionary of client configuration params.
Passed on to kubernetes client.
:param cluster_context: Optionally specify a context to use (e.g. if you have multiple
in your kubeconfig.
:param config_file: Path to kubeconfig file.
:param in_cluster: Set to ``True`` if running from within a kubernetes cluster.
:param disable_verify_ssl: Set to ``True`` if SSL verification should be disabled.
:param disable_tcp_keepalive: Set to ``True`` if you want to disable keepalive logic.
"""
conn_name_attr = 'kubernetes_conn_id'
default_conn_name = 'kubernetes_default'
conn_type = 'kubernetes'
hook_name = 'Kubernetes Cluster Connection'
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import BooleanField, StringField
return {
"extra__kubernetes__in_cluster": BooleanField(lazy_gettext('In cluster configuration')),
"extra__kubernetes__kube_config_path": StringField(
lazy_gettext('Kube config path'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__kube_config": StringField(
lazy_gettext('Kube config (JSON format)'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__namespace": StringField(
lazy_gettext('Namespace'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__cluster_context": StringField(
lazy_gettext('Cluster context'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__disable_verify_ssl": BooleanField(lazy_gettext('Disable SSL')),
"extra__kubernetes__disable_tcp_keepalive": BooleanField(lazy_gettext('Disable TCP keepalive')),
}
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['host', 'schema', 'login', 'password', 'port', 'extra'],
"relabeling": {},
}
def __init__(
self,
conn_id: Optional[str] = default_conn_name,
client_configuration: Optional[client.Configuration] = None,
cluster_context: Optional[str] = None,
config_file: Optional[str] = None,
in_cluster: Optional[bool] = None,
disable_verify_ssl: Optional[bool] = None,
disable_tcp_keepalive: Optional[bool] = None,
) -> None:
super().__init__()
self.conn_id = conn_id
self.client_configuration = client_configuration
self.cluster_context = cluster_context
self.config_file = config_file
self.in_cluster = in_cluster
self.disable_verify_ssl = disable_verify_ssl
self.disable_tcp_keepalive = disable_tcp_keepalive
# these params used for transition in KPO to K8s hook
# for a deprecation period we will continue to consider k8s settings from airflow.cfg
self._deprecated_core_disable_tcp_keepalive: Optional[bool] = None
self._deprecated_core_disable_verify_ssl: Optional[bool] = None
self._deprecated_core_in_cluster: Optional[bool] = None
self._deprecated_core_cluster_context: Optional[str] = None
self._deprecated_core_config_file: Optional[str] = None
@staticmethod
def _coalesce_param(*params):
for param in params:
if param is not None:
return param
@cached_property
def conn_extras(self):
if self.conn_id:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
else:
extras = {}
return extras
def _get_field(self, field_name):
if field_name.startswith('extra_'):
raise ValueError(
f"Got prefixed name {field_name}; please remove the 'extra__kubernetes__' prefix "
f"when using this method."
)
if field_name in self.conn_extras:
return self.conn_extras[field_name] or None
prefixed_name = f"extra__kubernetes__{field_name}"
return self.conn_extras.get(prefixed_name) or None
@staticmethod
def _deprecation_warning_core_param(deprecation_warnings):
settings_list_str = ''.join([f"\n\t{k}={v!r}" for k, v in deprecation_warnings])
warnings.warn(
f"\nApplying core Airflow settings from section [kubernetes] with the following keys:"
f"{settings_list_str}\n"
"In a future release, KubernetesPodOperator will no longer consider core\n"
"Airflow settings; define an Airflow connection instead.",
DeprecationWarning,
)
def get_conn(self) -> Any:
"""Returns kubernetes api session for use with requests"""
in_cluster = self._coalesce_param(
self.in_cluster, self.conn_extras.get("extra__kubernetes__in_cluster") or None
)
cluster_context = self._coalesce_param(
self.cluster_context, self.conn_extras.get("extra__kubernetes__cluster_context") or None
)
kubeconfig_path = self._coalesce_param(
self.config_file, self.conn_extras.get("extra__kubernetes__kube_config_path") or None
)
kubeconfig = self.conn_extras.get("extra__kubernetes__kube_config") or None
num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o])
if num_selected_configuration > 1:
raise AirflowException(
"Invalid connection configuration. Options kube_config_path, "
"kube_config, in_cluster are mutually exclusive. "
"You can only use one option at a time."
)
disable_verify_ssl = self._coalesce_param(
self.disable_verify_ssl, _get_bool(self._get_field("disable_verify_ssl"))
)
disable_tcp_keepalive = self._coalesce_param(
self.disable_tcp_keepalive, _get_bool(self._get_field("disable_tcp_keepalive"))
)
# BEGIN apply settings from core kubernetes configuration
# this section should be removed in next major release
deprecation_warnings: List[Tuple[str, Any]] = []
if disable_verify_ssl is None and self._deprecated_core_disable_verify_ssl is True:
deprecation_warnings.append(('verify_ssl', False))
disable_verify_ssl = self._deprecated_core_disable_verify_ssl
# by default, hook will try in_cluster first. so we only need to
# apply core airflow config and alert when False and in_cluster not otherwise set.
if in_cluster is None and self._deprecated_core_in_cluster is False:
deprecation_warnings.append(('in_cluster', self._deprecated_core_in_cluster))
in_cluster = self._deprecated_core_in_cluster
if not cluster_context and self._deprecated_core_cluster_context:
deprecation_warnings.append(('cluster_context', self._deprecated_core_cluster_context))
cluster_context = self._deprecated_core_cluster_context
if not kubeconfig_path and self._deprecated_core_config_file:
deprecation_warnings.append(('config_file', self._deprecated_core_config_file))
kubeconfig_path = self._deprecated_core_config_file
if disable_tcp_keepalive is None and self._deprecated_core_disable_tcp_keepalive is True:
deprecation_warnings.append(('enable_tcp_keepalive', False))
disable_tcp_keepalive = True
if deprecation_warnings:
self._deprecation_warning_core_param(deprecation_warnings)
# END apply settings from core kubernetes configuration
if disable_verify_ssl is True:
_disable_verify_ssl()
if disable_tcp_keepalive is not True:
_enable_tcp_keepalive()
if in_cluster:
self.log.debug("loading kube_config from: in_cluster configuration")
config.load_incluster_config()
return client.ApiClient()
if kubeconfig_path is not None:
self.log.debug("loading kube_config from: %s", kubeconfig_path)
config.load_kube_config(
config_file=kubeconfig_path,
client_configuration=self.client_configuration,
context=cluster_context,
)
return client.ApiClient()
if kubeconfig is not None:
with tempfile.NamedTemporaryFile() as temp_config:
self.log.debug("loading kube_config from: connection kube_config")
temp_config.write(kubeconfig.encode())
temp_config.flush()
config.load_kube_config(
config_file=temp_config.name,
client_configuration=self.client_configuration,
context=cluster_context,
)
return client.ApiClient()
return self._get_default_client(cluster_context=cluster_context)
def _get_default_client(self, *, cluster_context=None):
# if we get here, then no configuration has been supplied
# we should try in_cluster since that's most likely
# but failing that just load assuming a kubeconfig file
# in the default location
try:
config.load_incluster_config(client_configuration=self.client_configuration)
except ConfigException:
self.log.debug("loading kube_config from: default file")
config.load_kube_config(
client_configuration=self.client_configuration,
context=cluster_context,
)
return client.ApiClient()
@cached_property
def api_client(self) -> Any:
"""Cached Kubernetes API client"""
return self.get_conn()
@cached_property
def core_v1_client(self):
return client.CoreV1Api(api_client=self.api_client)
def create_custom_object(
self, group: str, version: str, plural: str, body: Union[str, dict], namespace: Optional[str] = None
):
"""
Creates custom resource definition object in Kubernetes
:param group: api group
:param version: api version
:param plural: api plural
:param body: crd object definition
:param namespace: kubernetes namespace
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
if isinstance(body, str):
body_dict = _load_body_to_dict(body)
else:
body_dict = body
try:
api.delete_namespaced_custom_object(
group=group,
version=version,
namespace=namespace,
plural=plural,
name=body_dict["metadata"]["name"],
)
self.log.warning("Deleted SparkApplication with the same name.")
except client.rest.ApiException:
self.log.info("SparkApp %s not found.", body_dict['metadata']['name'])
try:
response = api.create_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, body=body_dict
)
self.log.debug("Response: %s", response)
return response
except client.rest.ApiException as e:
raise AirflowException(f"Exception when calling -> create_custom_object: {e}\n")
def get_custom_object(
self, group: str, version: str, plural: str, name: str, namespace: Optional[str] = None
):
"""
Get custom resource definition object from Kubernetes
:param group: api group
:param version: api version
:param plural: api plural
:param name: crd object name
:param namespace: kubernetes namespace
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
try:
response = api.get_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, name=name
)
return response
except client.rest.ApiException as e:
raise AirflowException(f"Exception when calling -> get_custom_object: {e}\n")
def get_namespace(self) -> Optional[str]:
"""Returns the namespace that defined in the connection"""
if self.conn_id:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
namespace = extras.get("extra__kubernetes__namespace", "default")
return namespace
return None
def get_pod_log_stream(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
) -> Tuple[watch.Watch, Generator[str, None, None]]:
"""
Retrieves a log stream for a container in a kubernetes pod.
:param pod_name: pod name
:param container: container name
:param namespace: kubernetes namespace
"""
api = client.CoreV1Api(self.api_client)
watcher = watch.Watch()
return (
watcher,
watcher.stream(
api.read_namespaced_pod_log,
name=pod_name,
container=container,
namespace=namespace if namespace else self.get_namespace(),
),
)
def get_pod_logs(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
):
"""
Retrieves a container's log from the specified pod.
:param pod_name: pod name
:param container: container name
:param namespace: kubernetes namespace
"""
api = client.CoreV1Api(self.api_client)
return api.read_namespaced_pod_log(
name=pod_name,
container=container,
_preload_content=False,
namespace=namespace if namespace else self.get_namespace(),
)
def _get_bool(val) -> Optional[bool]:
"""
Converts val to bool if can be done with certainty.
If we cannot infer intention we return None.
"""
if isinstance(val, bool):
return val
elif isinstance(val, str):
if val.strip().lower() == 'true':
return True
elif val.strip().lower() == 'false':
return False
return None
|
python
|
import json
import random
from flask import Flask, request
from flask_restplus import Resource, Api, Namespace
import os
from datetime import date, datetime
from faker import Faker
import security
fake = Faker('en_AU')
## load bsb data into memory
with open('./resources/bsbs.json') as json_file:
bsbs = json.load(json_file)
api = Namespace('account', description='Account Namespace')
## comment
def Generate_Account():
account = random.choice(bsbs)
bankAccount = {}
bankAccount['institutionName'] = account['bsb']['content']['bsbDetails'][
'institutionName'].title()
bankAccount['institutionCode'] = account['bsb']['content']['bsbDetails'][
'financialInstitutionCode']
bankAccount['bankStateBranchCode'] = account['bsb']['content'][
'bsbDetails']['bankStateBranchCode']
bankAccount['branchName'] = account['bsb']['content']['activity'][0][
'role'][0]['party'][0]['name'][0]['fullName']
bankAccount['streetAddress'] = account['bsb']['content']['activity'][0][
'role'][0]['party'][0]['address'][0]['streetAddress'].title()
try:
bankAccount['postcode'] = account['bsb']['content']['activity'][0][
'role'][0]['party'][0]['address'][0]['postcode']
except:
bankAccount['postcode'] = '2000'
try:
bankAccount['suburb'] = account['bsb']['content']['activity'][0][
'role'][0]['party'][0]['address'][0]['suburb'].title()
except:
bankAccount['suburb'] = 'Sydney'
bankAccount['state'] = account['bsb']['content']['activity'][0]['role'][0][
'party'][0]['address'][0]['state']
bankAccount['accountNumber'] = fake.numerify(text="##-###-####")
return bankAccount
@api.route('/random')
class GenerateName(Resource):
@security.token_required
def get(self):
account = Generate_Account()
return account
|
python
|
import os
import pathlib
import re
from collections import defaultdict
from functools import lru_cache
import stanza
import torch
from loguru import logger
from nlgeval import NLGEval
from transformers import AutoModelForCausalLM, AutoTokenizer
current_dir = pathlib.Path(__file__).parent.absolute()
def step_len(func):
def wrapper(*args, **kwargs):
self = args[0]
self.len += 1
return func(*args, **kwargs)
return wrapper
class Scorer:
def __init__(self, preprocess=True, metrics_to_omit=["CIDEr", "METEOR"]):
self.preprocess = preprocess
self.nlgeval = NLGEval(
no_glove=True, no_skipthoughts=True, metrics_to_omit=metrics_to_omit
)
self.score = defaultdict(lambda: 0.0)
self.len = 0
if self.preprocess:
self.nlp = stanza.Pipeline(
lang="en", processors="tokenize", tokenize_no_ssplit=True, verbose=False
)
#
self.stop_words_sign = (
open(os.path.join(current_dir, "stopwords-sign.txt"), "r", encoding="utf-8")
.read()
.split()
)
self.stop_words_sign_rule = "|".join(
[re.escape(sign) for sign in self.stop_words_sign]
)
@lru_cache(maxsize=200)
def _preprocess(self, raw_sentence):
result = self.nlp(raw_sentence.replace("\n\n", ""))
tokens = []
try:
for token in result.sentences[0].tokens:
tokens.append(token.text.lower())
tokenize_sentence = " ".join(tokens)
tokenize_sentence = re.sub(self.stop_words_sign_rule, "", tokenize_sentence)
except Exception as e:
logger.warning(e)
logger.warning(
f'preprocess fail, return "" raw_sentence:{raw_sentence} result:{result}'
)
return ""
return tokenize_sentence
def clean(self):
self.score = defaultdict(lambda: 0.0)
self.len = 0
@step_len
def add(*args, **kwargs):
raise NotImplementedError
def compute(
self, save_report_dir=None, save_file_name="score.txt", return_score=True
):
#
out_score = {}
if save_report_dir is not None:
os.makedirs(save_report_dir, exist_ok=True)
save_score_report_path = os.path.join(save_report_dir, save_file_name)
score_f = open(save_score_report_path, "w", encoding="utf-8")
for score_key in self.score.keys():
_score = self.score[score_key] / self.len
out_score[score_key] = _score
if save_report_dir is not None:
score_f.write("%s\t%3.5f\n" % (score_key, _score))
if return_score:
return out_score
class SimilarityScorer(Scorer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@step_len
def add(self, hyp, refs):
refs = refs[:]
if self.preprocess:
hyp = self._preprocess(hyp)
refs = [self._preprocess(ref) for ref in refs]
_score = self.nlgeval.compute_individual_metrics(hyp=hyp, ref=refs)
for score_key in _score.keys():
self.score[score_key] += _score[score_key]
class CoverageScorer(Scorer):
def __init__(self, preprocess=True):
super().__init__(preprocess=preprocess)
self.stop_words_en = open(
os.path.join(current_dir, "stopwords-en.txt"), "r", encoding="utf-8"
)
self.stop_words_sign = open(
os.path.join(current_dir, "stopwords-sign.txt"), "r", encoding="utf-8"
)
self.stop_words = (
self.stop_words_en.read().split() + self.stop_words_sign.read().split()
)
# some sign used to split context to sentence, remove them from `stopwords-sign`
self.stop_words_sign = (
open(os.path.join(current_dir, "stopwords-sign.txt"), "r", encoding="utf-8")
.read()
.split()
)
self.stop_words_sign.remove(",")
self.stop_words_sign.remove(".")
self.stop_words_sign.remove("!")
self.stop_words_sign.remove("?")
self.stop_words_sign_rule = "|".join(
[re.escape(sign) for sign in self.stop_words_sign]
)
def _compute_coverage_score(self, sents: list, article: str):
sent = " ".join(sents)
sent_list = re.split(r",|\.|\!|\?", sent)
for sent in sent_list[:]:
if sent == "":
sent_list.remove(sent)
# get sents keywords
keyword_list = []
for sent in sent_list[:]:
sent = sent.lower()
word_list = sent.split()
for word in word_list:
if word not in self.stop_words:
keyword_list.append(word)
# process acticle into words and compute coverage
article_sent_list = re.split(r",|\.|\!|\?", article)
count_article_sent = len(article_sent_list)
if count_article_sent == 0:
return 0.0
count_coverage = 0
for article_sent in article_sent_list:
article_sent = article_sent.lower().split()
for keyword in keyword_list:
if keyword in article_sent:
count_coverage += 1
break
return count_coverage / count_article_sent
@step_len
def add(self, sents: list, article: str):
sents = sents[:]
if self.preprocess:
sents = [self._preprocess(sent) for sent in sents]
article = self._preprocess(article)
coverage_score = self._compute_coverage_score(sents, article)
self.score["keyword_coverage"] += coverage_score
class PPLScorer(Scorer):
def __init__(self, model, tokenizer, stride=512, max_length=512, use_sppl=False):
self.model = model
self.tokenizer = tokenizer
self.stride = stride
self.max_length = max_length
self.use_sppl = use_sppl
#
self.score = defaultdict(lambda: 0.0)
self.len = 0
@step_len
def add(self, sentence):
if self.use_sppl:
self.score["ppl"] += self._compute_scaled_ppl(sentence)
else:
self.score["ppl"] += self._compute_ppl(sentence)
def _compute_scaled_ppl(self, sentence, alpha=0.2):
# https://www.desmos.com/calculator/scqyyq0ody
avg_ll = self._compute_avg_log_likelihood(sentence)
return torch.exp(-avg_ll * alpha).item()
def _compute_ppl(self, sentence):
# https://huggingface.co/transformers/perplexity.html
avg_ll = self._compute_avg_log_likelihood(sentence)
return torch.exp(avg_ll).item()
@lru_cache(maxsize=200)
def _compute_avg_log_likelihood(self, sentence):
stride = self.stride
max_length = self.max_length
encodings = self.tokenizer(sentence, return_tensors="pt")
model = self.model
lls = []
for i in range(0, encodings.input_ids.size(1), stride):
begin_loc = max(i + stride - max_length, 0)
end_loc = min(i + stride, encodings.input_ids.size(1))
trg_len = end_loc - i # may be different from stride on last loop
input_ids = encodings.input_ids[:, begin_loc:end_loc].to(self.model.device)
target_ids = input_ids.clone()
target_ids[:, :-trg_len] = -100
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
log_likelihood = outputs[0] * trg_len
lls.append(log_likelihood)
return torch.stack(lls).sum() / end_loc
|
python
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# VulnerableCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/vulnerablecode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import logging
from datetime import datetime
from datetime import timezone
from typing import List
from typing import Tuple
from django.db import transaction
from vulnerabilities import models
from vulnerabilities.importer import PackageURL
from vulnerabilities.improver import Inference
from vulnerabilities.models import Advisory
logger = logging.getLogger(__name__)
class ImproveRunner:
"""
ImproveRunner is responsible for populating the database with any
consumable data. It does so in its ``run`` method by invoking the given
improver and parsing the returned Inferences into proper database fields
"""
def __init__(self, improver_class):
self.improver_class = improver_class
def run(self) -> None:
improver = self.improver_class()
logger.info(f"Running improver: {improver.qualified_name}")
for advisory in improver.interesting_advisories:
inferences = improver.get_inferences(advisory_data=advisory.to_advisory_data())
process_inferences(
inferences=inferences, advisory=advisory, improver_name=improver.qualified_name
)
logger.info("Finished improving using %s.", self.improver_class.qualified_name)
@transaction.atomic
def process_inferences(inferences: List[Inference], advisory: Advisory, improver_name: str):
"""
An atomic transaction that updates both the Advisory (e.g. date_improved)
and processes the given inferences to create or update corresponding
database fields.
This avoids failing the entire improver when only a single inference is
erroneous. Also, the atomic transaction for every advisory and its
inferences makes sure that date_improved of advisory is consistent.
"""
if not inferences:
logger.warn(f"Nothing to improve. Source: {improver_name} Advisory id: {advisory.id}")
return
logger.info(f"Improving advisory id: {advisory.id}")
for inference in inferences:
vuln = get_or_create_vulnerability_and_aliases(
inference.vulnerability_id, inference.aliases, inference.summary
)
if not vuln:
logger.warn(f"Unable to get vulnerability for inference: {inference!r}")
continue
for ref in inference.references:
reference, _ = models.VulnerabilityReference.objects.get_or_create(
reference_id=ref.reference_id, url=ref.url
)
models.VulnerabilityRelatedReference.objects.update_or_create(
reference=reference, vulnerability=vuln
)
for severity in ref.severities:
_vs, updated = models.VulnerabilitySeverity.objects.update_or_create(
scoring_system=severity.system.identifier,
reference=reference,
defaults={"value": str(severity.value)},
)
if updated:
logger.info(f"Severity updated for reference {ref!r} to {severity.value!r}")
if inference.affected_purls:
for pkg in inference.affected_purls:
vulnerable_package, _ = _get_or_create_package(pkg)
models.PackageRelatedVulnerability(
vulnerability=vuln,
package=vulnerable_package,
created_by=improver_name,
confidence=inference.confidence,
fix=False,
).update_or_create()
if inference.fixed_purl:
fixed_package, _ = _get_or_create_package(inference.fixed_purl)
models.PackageRelatedVulnerability(
vulnerability=vuln,
package=fixed_package,
created_by=improver_name,
confidence=inference.confidence,
fix=True,
).update_or_create()
advisory.date_improved = datetime.now(timezone.utc)
advisory.save()
def _get_or_create_package(p: PackageURL) -> Tuple[models.Package, bool]:
query_kwargs = {}
# TODO: this should be revisited as this should best be a model or manager method... and possibly streamlined
query_kwargs = dict(
type=p.type or "",
namespace=p.namespace or "",
name=p.name or "",
version=p.version or "",
qualifiers=p.qualifiers or {},
subpath=p.subpath or "",
)
return models.Package.objects.get_or_create(**query_kwargs)
def _package_url_to_package(purl: PackageURL) -> models.Package:
# FIXME: this is is likely creating a package from a purl?
p = models.Package()
p.set_package_url(purl)
return p
def get_or_create_vulnerability_and_aliases(vulnerability_id, alias_names, summary):
"""
Get or create vulnerabilitiy and aliases such that all existing and new
aliases point to the same vulnerability
"""
existing_vulns = set()
alias_names = set(alias_names)
new_alias_names = set()
for alias_name in alias_names:
try:
alias = models.Alias.objects.get(alias=alias_name)
existing_vulns.add(alias.vulnerability)
except models.Alias.DoesNotExist:
new_alias_names.add(alias_name)
# If given set of aliases point to different vulnerabilities in the
# database, request is malformed
# TODO: It is possible that all those vulnerabilities are actually
# the same at data level, figure out a way to merge them
if len(existing_vulns) > 1:
logger.warn(
f"Given aliases {alias_names} already exist and do not point "
f"to a single vulnerability. Cannot improve. Skipped."
)
return
existing_alias_vuln = existing_vulns.pop() if existing_vulns else None
if (
existing_alias_vuln
and vulnerability_id
and existing_alias_vuln.vulnerability_id != vulnerability_id
):
logger.warn(
f"Given aliases {alias_names!r} already exist and point to existing"
f"vulnerability {existing_alias_vuln}. Unable to create Vulnerability "
f"with vulnerability_id {vulnerability_id}. Skipped"
)
return
if existing_alias_vuln:
vulnerability = existing_alias_vuln
elif vulnerability_id:
try:
vulnerability = models.Vulnerability.objects.get(vulnerability_id=vulnerability_id)
except models.Vulnerability.DoesNotExist:
logger.warn(
f"Given vulnerability_id: {vulnerability_id} does not exist in the database"
)
return
else:
vulnerability = models.Vulnerability(summary=summary)
vulnerability.save()
if summary and summary != vulnerability.summary:
logger.warn(
f"Inconsistent summary for {vulnerability!r}. "
f"Existing: {vulnerability.summary}, provided: {summary}"
)
for alias_name in new_alias_names:
alias = models.Alias(alias=alias_name, vulnerability=vulnerability)
alias.save()
logger.info(f"New alias for {vulnerability!r}: {alias_name}")
return vulnerability
|
python
|
__all__ = ("MDRenderer", "LOGGER", "RenderTreeNode", "DEFAULT_RENDERER_FUNCS")
import logging
from types import MappingProxyType
from typing import Any, Mapping, MutableMapping, Sequence
from markdown_it.common.normalize_url import unescape_string
from markdown_it.token import Token
from mdformat.renderer._default_renderers import DEFAULT_RENDERER_FUNCS
from mdformat.renderer._tree import SyntaxTreeNode
from mdformat.renderer.typing import RendererFunc
LOGGER = logging.getLogger(__name__)
class MDRenderer:
"""Markdown renderer.
A renderer class that outputs formatted Markdown. Compatible with
`markdown_it.MarkdownIt`.
"""
__output__ = "md"
def __init__(self, parser: Any = None):
"""__init__ must have `parser` parameter for markdown-it-py
compatibility."""
def render(
self,
tokens: Sequence[Token],
options: Mapping[str, Any],
env: MutableMapping,
*,
finalize: bool = True,
) -> str:
"""Takes token stream and generates Markdown.
Args:
tokens: A sequence of block tokens to render
options: Params of parser instance
env: Additional data from parsed input
finalize: write references and add trailing newline
"""
tree = RenderTreeNode(tokens)
return self.render_tree(tree, options, env, finalize=finalize)
def render_tree(
self,
tree: "RenderTreeNode",
options: Mapping[str, Any],
env: MutableMapping,
*,
finalize: bool = True,
) -> str:
# Update RENDERER_MAP defaults with renderer functions defined
# by plugins.
updated_renderers = {}
for plugin in options.get("parser_extension", []):
for token_name, renderer_func in plugin.RENDERER_FUNCS.items():
if token_name in updated_renderers:
LOGGER.warning(
"Plugin conflict. More than one plugin defined a renderer"
f' for "{token_name}" token or token pair.'
)
else:
updated_renderers[token_name] = renderer_func
renderer_map = MappingProxyType({**DEFAULT_RENDERER_FUNCS, **updated_renderers})
text = tree.render(renderer_map, options, env)
if finalize:
if env.get("used_refs"):
text += "\n\n"
text += self._write_references(env)
text += "\n"
return text
@staticmethod
def _write_references(env: MutableMapping) -> str:
ref_list = []
for label in sorted(env.get("used_refs", [])):
ref = env["references"][label]
destination = ref["href"] if ref["href"] else "<>"
destination = unescape_string(destination)
item = f"[{label.lower()}]: {destination}"
title = ref["title"]
if title:
title = title.replace('"', '\\"')
item += f' "{title}"'
ref_list.append(item)
return "\n".join(ref_list)
class RenderTreeNode(SyntaxTreeNode):
def render(
self,
renderer_funcs: Mapping[str, RendererFunc],
options: Mapping[str, Any],
env: MutableMapping,
) -> str:
renderer_func = renderer_funcs[self.type]
return renderer_func(self, renderer_funcs, options, env)
|
python
|
import numpy as np
from utils import env_paths as paths
from base import Train
import time
class TrainModel(Train):
def __init__(self, model, output_freq=1, pickle_f_custom_freq=None,
f_custom_eval=None):
super(TrainModel, self).__init__(model, pickle_f_custom_freq, f_custom_eval)
self.output_freq = output_freq
def train_model(self, f_train, train_args, f_test, test_args, f_validate, validation_args,
n_train_batches=600, n_valid_batches=1, n_test_batches=1, n_epochs=100, anneal=None):
self.write_to_logger("### MODEL PARAMS ###")
self.write_to_logger(self.model.model_info())
self.write_to_logger("### TRAINING PARAMS ###")
self.write_to_logger(
"Train -> %s: %s" % (";".join(train_args['inputs'].keys()), str(train_args['inputs'].values())))
self.write_to_logger(
"Test -> %s: %s" % (";".join(test_args['inputs'].keys()), str(test_args['inputs'].values())))
if anneal is not None:
for t in anneal:
key, freq, rate, min_val = t
self.write_to_logger(
"Anneal %s %0.4f after %i epochs with minimum value %f." % (key, rate, int(freq), min_val))
self.write_to_logger("### TRAINING MODEL ###")
if self.custom_eval_func is not None:
self.custom_eval_func(self.model, paths.get_custom_eval_path(0, self.model.root_path))
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch += 1
start_time = time.time()
train_outputs = []
for i in xrange(n_train_batches):
train_output = f_train(i, *train_args['inputs'].values())
train_outputs.append(train_output)
self.eval_train[epoch] = np.mean(np.array(train_outputs), axis=0)
self.model.after_epoch()
end_time = time.time() - start_time
if anneal is not None:
for t in anneal:
key, freq, rate, min_val = t
new_val = train_args['inputs'][key] * rate
if new_val < min_val:
train_args['inputs'][key] = min_val
elif epoch % freq == 0:
train_args['inputs'][key] = new_val
if epoch % self.output_freq == 0:
if n_test_batches == 1:
self.eval_test[epoch] = f_test(*test_args['inputs'].values())
else:
test_outputs = []
for i in xrange(n_test_batches):
test_output = f_test(i, *test_args['inputs'].values())
test_outputs.append(test_output)
self.eval_test[epoch] = np.mean(np.array(test_outputs), axis=0)
if f_validate is not None:
if n_valid_batches == 1:
self.eval_validation[epoch] = f_validate(*validation_args['inputs'].values())
else:
valid_outputs = []
for i in xrange(n_valid_batches):
valid_output = f_validate(i, *validation_args['inputs'].values())
valid_outputs.append(valid_output)
self.eval_validation[epoch] = np.mean(np.array(valid_outputs), axis=0)
else:
self.eval_validation[epoch] = [0.] * len(validation_args['outputs'].keys())
# Formatting the output string from the generic and the user-defined values.
output_str = "epoch=%0" + str(len(str(n_epochs))) + "i; time=%0.2f;"
output_str %= (epoch, end_time)
def concatenate_output_str(out_str, d):
for k, v in zip(d.keys(), d.values()):
out_str += " %s=%s;" % (k, v)
return out_str
output_str = concatenate_output_str(output_str, train_args['outputs'])
output_str = concatenate_output_str(output_str, test_args['outputs'])
output_str = concatenate_output_str(output_str, validation_args['outputs'])
outputs = [float(o) for o in self.eval_train[epoch]]
outputs += [float(o) for o in self.eval_test[epoch]]
outputs += [float(o) for o in self.eval_validation[epoch]]
output_str %= tuple(outputs)
self.write_to_logger(output_str)
if self.pickle_f_custom_freq is not None and epoch % self.pickle_f_custom_freq == 0:
if self.custom_eval_func is not None:
self.custom_eval_func(self.model, paths.get_custom_eval_path(epoch, self.model.root_path))
self.plot_eval(self.eval_train, train_args['outputs'].keys(), "_train")
self.plot_eval(self.eval_test, test_args['outputs'].keys(), "_test")
self.plot_eval(self.eval_validation, validation_args['outputs'].keys(), "_validation")
self.dump_dicts()
self.model.dump_model()
if self.pickle_f_custom_freq is not None:
self.model.dump_model()
|
python
|
from typing import Text, Type
from aiogram import types
from aiogram.dispatcher.filters.builtin import Command, Text
from aiogram.dispatcher import FSMContext
from aiogram.types import message
from middlewares.states.all_states import download_sticker_state
import os
from loader import dp
@dp.message_handler(text="/cancel", state=download_sticker_state)
async def cancel(message: types.Message, state: FSMContext):
await message.answer("✅ Функция остановлена!\n\nВведите новую команду /commands")
await state.finish()
@dp.message_handler(Command("download_sticker"), state=None)
async def get_sticker_id(message: types.Message):
await message.answer('''Вы зашли в функцию по загрузке стикеров.\n
Скиньте стикер боту!\n
❗️ Всё, что вы будете сюда скидывать автоматически будут обрабатываться в этой функции.
❗️ Если вам нужно её остановить, то введите /cancel''')
await download_sticker_state.step1.set()
@dp.message_handler(content_types="sticker", state=download_sticker_state.step1)
async def get_sticker_id_send(message: types.Message):
if message.sticker.is_animated == True:
await message.answer("❗️ Загрузка анимированных стикер не работает!")
elif message.sticker.is_animated == False:
stickerpack_name = message.sticker.set_name
file_id = message.sticker.file_unique_id
await message.sticker.download(f"./handlers/download_sticker/temp/{stickerpack_name} - @{file_id}.png")
await message.reply_document(types.InputFile(f"./handlers/download_sticker/temp/{stickerpack_name} - @{file_id}.png"))
os.remove(f"./handlers/download_sticker/temp/{stickerpack_name} - @{file_id}.png")
|
python
|
import csv
import argparse
import enum
import sys
from normalizer import normalizer
from common import common
def RepresentsFloat(val):
try:
float(val)
return True
except ValueError:
return False
if __name__ == "__main__":
ft_type = enum.Enum("ft_type", ("train", "valid"))
parser = argparse.ArgumentParser(
description='Hazır veri setinde yer alan yorumları FastText kütüphanesi kullanımı için etiketleyerek kaydeder.')
parser.add_argument('--ft_type', type=str, nargs='?',
choices=tuple(t.name for t in ft_type),
default=ft_type.train.name,
help='FastText öğrenmesi esnasında kullanım tipini giriniz')
parser.add_argument('--start_line', type=int, nargs='?', const=2,
default=2,
help='Okumaya başlanacak satır sayısını giriniz')
args = parser.parse_args()
inputs = []
with open(f"dataset/sentiment_data.csv", 'r') as f:
reader = csv.reader(f)
[next(reader, None) for item in range(args.start_line - 1)]
for row in reader:
rate_is_convertable = RepresentsFloat(row[1])
if rate_is_convertable:
label = "__label__"
comment = normalizer.normalize(row[2])
rate = float(row[1])
if rate == 0:
label += "negative"
elif rate == 1:
label += "positive"
else:
label += "notr"
inputs.append(f"{label} {comment}")
common.colored_print(
common.bcolors.WARNING, f"All items {len(inputs)} are labeled. {args.ft_type} file creation is starting...")
with open(f'dataset/comments.{args.ft_type}', 'a') as f:
for item in inputs:
f.write(f"{item}\n")
common.colored_print(common.bcolors.OKBLUE, "=== COMPLETED ===")
|
python
|
import os
import sys
import json
import tweepy
import requests
import pandas as pd
from defipulse import DefiPulse
from coingecko import CoinGecko
from subprocess import call
# Data Preprocessing and Feature Engineering
consumer_key = os.environ.get('TWITTER_CONSUMER_KEY', 'ap-northeast-1')
consumer_secret = os.environ.get('TWITTER_CONSUMER_SECRET', 'ap-northeast-1')
access_token = os.environ.get('TWITTER_ACCESS_TOKEN', 'ap-northeast-1')
access_secret = os.environ.get('TWITTER_ACCESS_SECRET', 'ap-northeast-1')
def rates(token):
obj = DefiPulse()
rates = obj.getRates(token)
names = ['Maker', 'Compound', 'Aave']
tweet = "Current DeFi Rates for {} #DeFi #Ethereum\n".format(token)
for name in names:
tweet = tweet + "{0}, lend: {1}%, borrow: {2}%\n".format(name, rates['rates'][name]['lend']['rate'], rates['rates'][name]['borrow']['rate'])
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if tweet is not None:
print(tweet)
api.update_status(tweet)
else:
print('empty tweet')
except Exception as e:
print(e)
def prices():
obj = DefiPulse()
projects, names = obj.getProjects()
# print(' '.join([project['name'] for project in projects]))
tweet = "Current DeFi Top3 in TVL/USD #DeFi #Ethereum\n"
for project in projects[:3]:
tweet = tweet + "Name: {0}, tvlUSD: {1}, USD 1day relative {2}%\n".format(project['name'], project['value']['tvl']['USD']['value'], project['value']['tvl']['USD']['relative_1d'])
# tweet = 'Name: {0}, tvlUSD: {1}, USD 1day relative {2}%, tvlETH: {3}, ETH 1day relative {4}%'.format(project['name'], project['value']['tvl']['USD']['value'], project['value']['tvl']['USD']['relative_1d'], project['value']['tvl']['ETH']['value'], project['value']['tvl']['ETH']['relative_1d'])
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if tweet is not None:
print(tweet)
api.update_status(tweet)
else:
print('empty tweet')
except Exception as e:
print(e)
def tvl(coin, vs_currency, days, period='1w'):
obj = DefiPulse()
tvls = obj.getTVL(period)
obj1 = CoinGecko()
df = obj1.getCoinVolume(coin, vs_currency, days)
path1 = obj.drawTVLinUSD(tvls, df)
path2 = obj.drawTVLinETH(tvls, df)
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if os.path.exists(path1):
api.update_with_media(filename=path1, status='Eth trading volume and Total Value Locked in USD #DeFi #Ethereum')
else:
print('empty tweet')
if os.path.exists(path2):
api.update_with_media(filename=path2, status='Eth trading volume and Total Value Locked in ETH #DeFi #Ethereum')
else:
print('empty tweet')
except Exception as e:
print(e)
call('rm -rf /tmp/*', shell=True)
def tokenprices(coins, vs_currency, days):
obj = CoinGecko()
df = pd.DataFrame()
for coin in coins:
y = obj.getCoinData(coin, vs_currency, days)
df = pd.concat([df, y[['Close']]], axis=1, sort=True, join='outer')
return df
def draws(period='1w'):
tokens = ['Uniswap', 'Maker', 'Aave', 'Compound', 'Synthetix']
obj = DefiPulse()
path = obj.drawPercent(tokens, period)
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if os.path.exists(path):
api.update_with_media(filename=path, status='Weekly Total Value Lock change in DefiPulse #DeFi #Ethereum')
else:
print('empty tweet')
except Exception as e:
print(e)
call('rm -rf /tmp/*', shell=True)
def debts():
obj = DefiPulse()
path = obj.drawDebt()
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if os.path.exists(path):
api.update_with_media(filename=path, status='Weekly Outstanding Debt USD in DefiPulse #DeFi #Ethereum')
else:
print('empty tweet')
except Exception as e:
print(e)
call('rm -rf /tmp/*', shell=True)
def tweet_with_image(path, tweet):
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if os.path.exists(path):
api.update_with_media(filename=path, status=tweet)
else:
print('empty tweet')
except Exception as e:
print(e)
call('rm -rf /tmp/*', shell=True)
def lambda_handler(event, context):
if event['operation'] == 'rates':
token = event['token']
rates(token)
elif event['operation'] == 'prices':
prices()
elif event['operation'] == 'tvl':
token = 'ethereum'
tvl(token, 'usd', '7')
elif event['operation'] == 'draws':
draws()
elif event['operation'] == 'debts':
debts()
elif event['operation'] == 'govtokens':
coins = ['bitcoin', 'ethereum', 'maker', 'uniswap', 'compound-governance-token', 'havven', 'aave', 'yearn-finance']
tickers = ['BTC', 'ETH', 'MKR', 'UNI', 'COMP', 'SNX', 'AAVE', 'YFI']
df = tokenprices(coins, 'usd', '7')
df.columns = tickers
df.dropna(how='any', inplace=True)
df_t = df.copy()
df_t /= df.loc[df.index[0]]
obj = CoinGecko()
path = obj.draw(df_t, 'Weekly_Governance_Token_Price_Change')
tweet = 'Weekly Governance Token Price Change #DeFi #Ethereum'
tweet_with_image(path, tweet)
elif event['operation'] == 'corrtokens':
coins = ['bitcoin', 'ethereum', 'maker', 'uniswap', 'compound-governance-token', 'havven', 'aave', 'yearn-finance']
tickers = ['BTC', 'ETH', 'MKR', 'UNI', 'COMP', 'SNX', 'AAVE', 'YFI']
df = tokenprices(coins, 'usd', '7')
df.columns = tickers
df.dropna(how='any', inplace=True)
df_c = df.copy()
df_corr = pd.DataFrame()
for t in tickers:
df_c['pct_' + t] = df_c.loc[:, t].pct_change(1).fillna(df_c[t].pct_change(1).median())
df_c['rol_' + t] = df_c.loc[:, 'pct_' + t].rolling(7).sum().fillna(df_c['pct_' + t].rolling(7).sum().median())
pd.concat([df_corr, df_c['rol_' + t]], axis=1, sort=True, join='outer')
df_corr = df_c.loc[:, df_c.columns.str.contains('rol')]
df_corr.columns = tickers
obj = CoinGecko()
path = obj.draw(df_corr[7:], 'Rolling_7-days_change_of_DeFi_and_Crypto')
tweet = 'Rolling 7 days change of DeFi and Crypto(%) #DeFi #Ethereum'
tweet_with_image(path, tweet)
# call lambda_handler
if __name__ == "__main__":
lambda_handler(json.loads(sys.argv[1]), {})
|
python
|
#
# Copyright (c) 2021 Incisive Technology Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import importlib
import pytest
from hikaru import HikaruDocumentBase, set_default_release
from hikaru.model.rel_1_16.versions import versions
def beginning():
set_default_release('rel_1_16')
def ending():
pass
@pytest.fixture(scope='module', autouse=True)
def setup():
beginning()
yield 1
ending()
test_classes = []
for version in versions:
mod = importlib.import_module(f".{version}", f"hikaru.model.rel_1_16.{version}")
for c in vars(mod).values():
if (type(c) is type and issubclass(c, HikaruDocumentBase) and
c is not HikaruDocumentBase):
test_classes.append(c)
@pytest.mark.parametrize('cls', test_classes)
def test_docclass(cls):
assert hasattr(cls, 'apiVersion'), f"Class {cls.__name__} doesn't have apiVersion"
assert cls.apiVersion, f"Class {cls.__name__} has no value for apiVersion"
assert hasattr(cls, 'kind'), f"Class {cls.__name__} doesn't have kind"
assert cls.kind, f"Class {cls.__name__} has no value for kind"
if __name__ == "__main__":
beginning()
try:
for cls in test_classes:
test_docclass(cls)
finally:
ending()
|
python
|
"""Remote apps
=============
"""
|
python
|
import os
import sqlite3
'''
THING I THINK I'M MISSING
The completed date shouldn't be a boolean because it can be overdue.
It was an integer before so it could be set to the date in which it was marked completed.
This should be changed back.
'''
class Equipment:
def __init__(self, pk=-1, name=''):
self.pk = pk
self.name = name
class MaintenanceItem:
def __init__(self, pk=-1, name='', numdays=-1, equipment=None):
self.pk = pk
self.name = name
self.numdays = numdays
self.equipment = equipment
class MaintenanceDate:
def __init__(self, pk=-1, startdate=0, completedate=0, iscomplete=False, maintenanceitem=None):
self.pk = pk
self.startdate = startdate
self.completedate = completedate
self.iscomplete = iscomplete
self.maintenanceitem = maintenanceitem
class DBManager:
def __init__(self, name):
exists = False
if os.path.isfile(name):
exists = True
self.conn = sqlite3.connect(name)
self.cursor = self.conn.cursor()
if not exists:
self.setup_database()
def setup_database(self):
with open('db_setup.sql', 'r') as f:
db_instructions = f.read()
self.conn.executescript(db_instructions)
# Equipment
def get_all_equipment(self):
self.cursor.execute('''
SELECT
pk,
name
FROM
equipment
ORDER BY
name DESC''') # this is not a mistake... blame treeview
db_eq = self.cursor.fetchall()
equipment_list = []
for eq in db_eq:
equipment_list.append(Equipment(eq[0], eq[1]))
return equipment_list
def get_equipment(self, name):
self.cursor.execute("SELECT pk, name FROM equipment WHERE name=?", (name,))
db_eq = self.cursor.fetchone()
if db_eq == None:
return None
return Equipment(db_eq[0], db_eq[1])
def insert_equipment(self, name):
try:
self.cursor.execute("INSERT INTO equipment(name) VALUES(?);", (name,))
self.conn.commit()
except sqlite3.IntegrityError:
return None
return self.get_equipment(name)
# Maintenace Items
def get_all_maintenance_items(self, equipment):
self.cursor.execute('''
SELECT
pk,
name,
numdays,
equipmentid
FROM
maintenanceitem
WHERE
equipmentid=?''',
(equipment.pk,))
db_mi = self.cursor.fetchall()
mi_list = []
for mi in db_mi:
mi_list.append(MaintenanceItem(mi[0], mi[1], mi[2], equipment))
return mi_list
def insert_maintenance_item(self, maintenance_name, numdays, equipment):
self.cursor.execute('''
INSERT INTO
maintenanceitem(
name,
numdays,
equipmentid
)
VALUES(?,?,?);''',
(maintenance_name, numdays, equipment.pk))
self.conn.commit()
pk = self.get_maintenance_item_pk(maintenance_name, numdays, equipment.pk)
if pk == None:
return None
return MaintenanceItem(pk, maintenance_name, numdays, equipment)
def get_maintenance_item_pk(self, name, numdays, equip_pk):
self.cursor.execute( '''
SELECT
pk
FROM
maintenanceitem
WHERE
name=?
AND
numdays=?
AND
equipmentid=?''',
(name, numdays, equip_pk,))
op = self.cursor.fetchone()
if op == None:
return None
return op[0]
def get_all_maintenance_dates(self, m_item: MaintenanceItem):
self.cursor.execute('''
SELECT
pk,
startdate,
completedate,
iscomplete
FROM
maintenancedate
WHERE
maintenanceid=?
ORDER BY
startdate DESC''',
(m_item.pk,))
db_md = self.cursor.fetchall()
md_list = []
for md in db_md:
print('yee haw')
print(md)
print(md[1])
md_list.append(MaintenanceDate(md[0], md[1], md[2], md[3], m_item))
return md_list
def insert_maintenance_date(self, m_item: MaintenanceItem, startdate):
print(startdate)
self.cursor.execute('''
INSERT INTO
maintenancedate(
startdate,
iscomplete,
maintenanceid
)
VALUES(?,?,?);''',
(startdate, False, m_item.pk))
self.conn.commit()
pass
# pk = self.get_maintenance_item_pk(maintenance_name, numdays, equipment.pk)
# if pk == None:
# return None
# return MaintenanceItem(pk, maintenance_name, numdays, equipment)
def get_maintenance_date_pk(self, startdate, numdays):
pass
def set_completed(self, m_date: MaintenanceDate, completed: bool):
self.cursor.execute('''
UPDATE
maintenancedate
SET
completed = ?
WHERE
pk = ?''',
(completed, m_date.pk,))
self.conn.commit()
def close(self):
self.conn.close()
# BE CAREFUL MY DUDE
def drop_all(self):
self.cursor.execute("DROP TABLE IF EXISTS equipment")
self.cursor.execute("DROP TABLE IF EXISTS maintenanceitem")
self.cursor.execute("DROP TABLE IF EXISTS maintenancedate")
|
python
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
from collections import OrderedDict
import six
class ScholarshipItem(Item):
University = Field()
Program = Field()
Degree = Field()
Duration = Field()
Instruction_Language = Field()
Tuition_Fee_RMB = Field()
Starting_Date = Field()
Application_Deadline = Field()
pass
|
python
|
import boto3
import csv
profile = "default"
def get_instance(instance_name):
ec2 = boto3.resource('ec2')
return ec2.instances.filter(Filters=[{'Name': 'tag:Name', 'Values': [instance_name]}])
boto3.setup_default_session(profile_name=profile)
ec2 = boto3.client('ec2')
ec2_list = []
sg_name_dict = {}
# dict all {'sg_name': 'sg_id'} on aws
response = ec2.describe_security_groups()
for sg in response['SecurityGroups']:
sg_name = sg['GroupName']
try:
sg_id = sg['GroupId']
except:
sg_id = ""
sg_name_dict[sg_name] = sg_id
# read csv file
with open('input.csv', 'r') as input_file:
reader = csv.reader(input_file)
rows = [row for row in reader]
for server_list in rows[1:]:
instance_name = server_list[0]
each_instance = get_instance(instance_name)
all_sg_ids = []
sg_list = []
for sg in server_list[1:]:
if sg:
sg_list.append(sg)
all_sg_ids.append(sg_name_dict.get(sg, ''))
# modify sg
try:
for i in each_instance:
i.modify_attribute(Groups=all_sg_ids)
print(instance_name + ' replace security group successfully: ' + ', '.join(sg_list))
except:
print(instance_name + ': unable to replace security group. The security group you typed does not exist')
|
python
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import struct
from uuid import uuid4
from xos.exceptions import *
from backupoperation_decl import *
class BackupOperation(BackupOperation_decl):
class Meta:
proxy = True
def save(self, *args, **kwargs):
if not self.uuid:
self.uuid = uuid4()
super(BackupOperation, self).save(*args, **kwargs)
|
python
|
from ..resources.resource import Resource
import requests
from requests.auth import HTTPBasicAuth
class Suppliers(Resource):
def __init__(self):
super().__init__("suppliers")
def delete(self, id):
raise NotImplementedError("Not possible to post a warehouse")
|
python
|
def tree(x):
print("\n".join([f"{'*'*(2* n + 1):^{2*x+1}}" for n in range(x)]))
def trunk(n):
for i in range(n):
for j in range(n-1):
print(' ', end=' ')
print('***')
tree(1)
trunk(3)
|
python
|
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import messagebox
import algoritmo
import aicSpider
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def switch_frame(self, frame_class):
"""Destroi frame atual e cria o novo."""
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.grid()
# Classe da primeira janela
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
master.title("Projeto AIC 3")
#### Deixa Janela no centro
larguraDaJanela = 300
alturaDaJanela = 200
larguraDaTela = master.winfo_screenwidth()
alturaDaTela = master.winfo_screenheight()
Xjanela = (larguraDaTela/2) - (larguraDaJanela/2)
Yjanela = (alturaDaTela/2) - (alturaDaJanela/2)
master.geometry("%dx%d+%d+%d" % (larguraDaJanela, alturaDaJanela, Xjanela,Yjanela))
############################
# Variáveis para orientação automatizada da posição dos elementos na janela
j = 0
i = 0
##########Insersão do tamanho da memória
textoTamMemoria = tk.Label(self, text="Tamanho da memória: ")
textoTamMemoria.grid(column=i, row=j,padx=5)
i += 1
entradaTamMemoria = tk.Entry(self,width=10)
entradaTamMemoria.grid(column=i, row=j,pady=25)
j += 1
##############Opções de heurística
i = 0
textoOpcaoHeuristica = tk.Label(self, text="Heurística:")
textoOpcaoHeuristica.grid(column=i, row=j)
i += 1
#Menu "bolinha"
##Varíavel que recebe opção escolhida
selected = tk.IntVar()
optionFirst = tk.Radiobutton(self,text='First Fit', value=1,variable = selected)
optionFirst.grid(column=i, row=j)
j+=1
optionBest = tk.Radiobutton(self,text='Best Fit', value=2,variable = selected)
optionBest.grid(column=i, row=j)
j+=1
optionWorst = tk.Radiobutton(self,text='Worst Fit', value=3,variable = selected)
optionWorst.grid(column=i, row=j)
j+=1
## Inicialização do algorítmo
def botaoPressionado(PageTwo):
global modo
modo = selected.get()
global tamMemoria
tamMemoria = int(entradaTamMemoria.get())
if (tamMemoria < 200):
messagebox.showinfo('Erro!', 'O valor de memória deve ser maior que 200.')
return
if (tamMemoria > 1024):
messagebox.showinfo('Erro!', 'O valor de memória deve ser menor que 1024.')
return
master.switch_frame(PageTwo)
#Botão que inicia o algorítmo
botaoInicio = tk.Button(self, text="Iniciar", command=lambda: botaoPressionado(PageTwo))
botaoInicio.grid(column=i, row=j,pady=15)
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
master.title("Memória Tempo-Real")
### Deixa janela no centro
larguraDaJanela = 1000
alturaDaJanela = 300
larguraDaTela = master.winfo_screenwidth()
alturaDaTela = master.winfo_screenheight()
Xjanela = (larguraDaTela/2) - (larguraDaJanela/2)
Yjanela = (alturaDaTela/2) - (alturaDaJanela/2)
master.geometry("%dx%d+%d+%d" % (larguraDaJanela, alturaDaJanela, Xjanela,Yjanela))
##########################
#Função que gera uma cor aleatória
def geraCor(posicaoProcesso, tamProcesso):
codigoCor = '#'+str(abs(tamProcesso - posicaoProcesso))
for i in range(len(codigoCor),7):
codigoCor = codigoCor + "f"
#Retorna um hexadecimal aleatório de seis digitos"#??????"
return codigoCor
#Criação do "canvas" -> Ambiente de desenho
canvas = tk.Canvas(self, width=larguraDaJanela, height=100)
# Cria as linhas que formam a memória
def criaLinhas():
lista = []
i, x, y, y1 = 0, 20, 1, 80
while (x < (larguraDaJanela-20)):
lista.append(canvas.create_line(x, y, x, y1, fill="#a0a0a0"))
x += 1
global numeroDeLinhasMemoria
numeroDeLinhasMemoria = len(lista)
return lista
#### Legenda ########################
if(modo == 1):
textoHeuristica = tk.Label(self, text="First Fit",font = "Helvetica 16 bold")
textoHeuristica.grid(column=1,row=0,padx=5)
if(modo == 2):
textoHeuristica = tk.Label(self, text="Best Fit",font = "Helvetica 16 bold")
textoHeuristica.grid(column=1,row=0,padx=5)
if(modo == 3):
textoHeuristica = tk.Label(self, text="Worst Fit",font = "Helvetica 16 bold")
textoHeuristica.grid(column=1,row=0,padx=5)
textoLivre = tk.Label(self, text="(Cinza) Memória livre",font = "Helvetica 16 bold")
textoLivre.grid(column=0, row=2,padx=5, pady=10)
textoOcupada = tk.Label(self, text="(Azul) Memória Ocupada", font= "Helvetica 16 bold")
textoOcupada.grid(column=2, row=2,padx=5, pady=10)
######################################
## Preenche a memória com o processo inserido
## Necessita do tamanho do processo, tamanho da memória e posição na memória
def preencheMemoria(posicaoProcesso, tamProcesso):
if(tamProcesso != 0):
# Calucula quantas linhas devem ser preenchidas
taxaDeLinhas = tamProcesso/tamMemoria
numeroDeLinhas = taxaDeLinhas * len(listaDeLinhas)
numeroDeLinhas = (int(numeroDeLinhas))+2
# Descobre quanto vale cada linha desenhada
pesoLinha = tamMemoria/len(listaDeLinhas)
# Descobre em qual lugar da memória deve começar a pintar as linhas
posicaoMemoria = posicaoProcesso/pesoLinha
posicaoMemoria = (int(posicaoMemoria))+2
#Pinta do ponto range("X",y) até o ponto range(x,"Y")
cor = geraCor(posicaoProcesso, tamProcesso)
for i in range(posicaoMemoria,(posicaoMemoria+numeroDeLinhas)):
canvas.itemconfig(i, fill=cor)
if(posicaoProcesso == 0):
for i in range(0,10):
canvas.itemconfig(i, fill=cor)
# print("Tamanho da Memoria: " + str(tamMemoria))
# print("Tamanho do processo: " + str(tamProcesso))
# print("Peso da Linha: " + str(pesoLinha))
# print("Posição do processo: " + str(posicaoProcesso))
# print("Numero de Linhas total: " + str(len(listaDeLinhas)))
# print("Posição na memória: " + str(posicaoMemoria))
# print("Numero de Linhas pra pintar: " + str(numeroDeLinhas)
def limpaMemoria():
for i in range(0,numeroDeLinhasMemoria):
canvas.itemconfig(i, fill='#a0a0a0')
##### Botoes de Controle ######
#Função Geral dos botoes
def pressionado(listaDeEstados, botao):
global momento
if(botao == "proximo"):
if (momento < len(listaDeEstados)-1):
momento += 1
if((botao == "anterior") and (momento != 0)):
if (momento > 0):
momento -= 1
if(botao == "inicio"):
momento = 0
# Recebe lista do momento atual, com [Clock,[posiçãoInicial, tamanhoProcesso],[pos2,tam2]]
estadoAtual = listaDeEstados[momento]
# Atribui cada valor à sua respectiva variável
## Recebe e "printa" o Clock atual
clock = estadoAtual[0]
textoClock = tk.Label(self, text="Clock "+str(clock),font = "Helvetica 16 bold")
textoClock.grid(column=2,row=0,padx=5)
i = 1
limpaMemoria()
while (i <= len(estadoAtual)-1):
posicaoProcesso = estadoAtual[i][0]
tamProcesso = estadoAtual[i][1]
# Preenche a memória com os dados informados
preencheMemoria(posicaoProcesso, tamProcesso)
i += 1
#Função Executada quando é pressionado o Botao "Proximo"
def pressionadoInicio(listaDeEstados):
botao = "proximo"
pressionado(listaDeEstados,botao)
def pressionadoProximo(listaDeEstados):
botao = "proximo"
pressionado(listaDeEstados,botao)
#Função Executada quando é pressionado o Botao "Anterior"
def pressionadoAnterior(listaDeEstados):
botao = "anterior"
pressionado(listaDeEstados,botao)
global listaDeLinhas
listaDeLinhas = criaLinhas()
#Cria lista com todos os momentos de entrada e saída de processos
global matrizGeral
matrizGeral = algoritmo.main(tamMemoria)
listaDeEstados = matrizGeral[modo-1][0]
listaDeEstados.insert(0,[0,[0,0]])
# Momento (clock atual)
global momento
momento = 0
# Declaração dos botões (função lambda necessária para passar parâmetros)
botaoInicio = tk.Button(self, text="Inicio",command=lambda: pressionado(listaDeEstados,"inicio"))
botaoProximo = tk.Button(self, text="Proximo",command=lambda: pressionado(listaDeEstados, "proximo"))
botaoAnterior = tk.Button(self, text="Anterior",command=lambda: pressionado(listaDeEstados, "anterior"))
botaoInicio.grid(column=1,row=3,pady=10)
botaoProximo.grid(column=2,row=3,pady=10)
botaoAnterior.grid(column=0,row=3,pady=10)
# Função do botao que chama o matplot
def pressionadoGrafico(PageThree):
# Opção 1: Chamar janela tkinter que recebe imagem do matplot
#master.switch_frame(PageThree)
# Opção 2: Chamar matplot direto
self.master.destroy()
aicSpider.main(matrizGeral)
botaoGraficos = tk.Button(self, text="Ir Para Gráficos",command=lambda: pressionadoGrafico(PageThree))
botaoGraficos.grid(column=1,row=4,pady=5)
#Posição do Canvas
canvas.grid(columnspan=3,row=1)
#Tentativa de fazer uma terceira janela TkInter
class PageThree(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
master.title("Gráfico de resultados")
### Deixa janela no centro
larguraDaJanela = 300
alturaDaJanela = 300
larguraDaTela = master.winfo_screenwidth()
alturaDaTela = master.winfo_screenheight()
Xjanela = (larguraDaTela/2) - (larguraDaJanela/2)
Yjanela = (alturaDaTela/2) - (alturaDaJanela/2)
master.geometry("%dx%d+%d+%d" % (larguraDaJanela, alturaDaJanela, Xjanela,Yjanela))
##########################
aicSpider.main()
#img = aicSpider.main()
label = tk.Label(self, text="This is page 2")
label.grid(pady=10)
button = tk.Button(self, text="Go to the start page",
command=lambda: controller.show_frame("StartPage"))
button.grid()
if __name__ == "__main__":
app = SampleApp()
app.mainloop()
# Menu superior
# menu = Menu(self)
# new_item = Menu(menu)
# new_item.add_command(label='New')
# new_item.add_separator()
# new_item.add_command(label='Edit')
# menu.add_cascade(label='File', menu=new_item)
# self.config(menu=menu)
|
python
|
from django.conf import settings
from django.urls import include, path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("following", views.following, name="following"),
path("post-message", views.postmessage, name="postmessage"),
path("like/<int:id>", views.like, name="like"),
path("profile/<str:username>", views.profile, name="profile"),
path("follow/<int:id>", views.follow, name="follow"),
path("editpost/<int:id>", views.editpost, name="editpost")
]
|
python
|
import argparse
import os
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
# setup
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--save-interval', type=int, default=10, metavar='N',
help='how many batches to wait before checkpointing')
parser.add_argument('--resume', action='store_true', default=False,
help='resume training from checkpoint')
args = parser.parse_args()
use_cuda = torch.cuda.is_available() and not args.no_cuda
device = torch.device('cuda' if use_cuda else 'cpu')
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
# data
data_path = os.path.join(os.path.expanduser('~'), '.torch', 'datasets', 'mnist')
train_data = datasets.MNIST(data_path, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
test_data = datasets.MNIST(data_path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
train_loader = DataLoader(train_data, batch_size=args.batch_size,
shuffle=True, num_workers=4, pin_memory=True)
test_loader = DataLoader(test_data, batch_size=args.batch_size,
num_workers=4, pin_memory=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net().to(device)
optimiser = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
if args.resume:
model.load_state_dict(torch.load('model.pth'))
optimiser.load_state_dict(torch.load('optimiser.pth'))
# training
model.train()
train_losses = []
for i, (data, target) in enumerate(train_loader):
data = data.to(device=device, non_blocking=True)
target = target.to(device=device, non_blocking=True)
optimiser.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
train_losses.append(loss.item())
optimiser.step()
if i % 10 == 0:
print(i, loss.item())
torch.save(model.state_dict(), 'model.pth')
torch.save(optimiser.state_dict(), 'optimiser.pth')
torch.save(train_losses, 'train_losses.pth')
# testing
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for data, target in test_loader:
data = data.to(device=device, non_blocking=True)
target = target.to(device=device, non_blocking=True)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_data)
acc = correct / len(test_data)
print(acc, test_loss)
|
python
|
"""Using a class as a decorator demo.
Count the number of times a function is called.
"""
class CallCount:
def __init__(self, function):
self.f = function
self.count = 0
def __call__(self, *args, **kwargs):
self.count = 1
return self.f(*args, **kwargs)
@property
def count(self):
return self._count
@count.setter
def count(self, value):
try:
self._count += value
except AttributeError:
self._count = 0
@CallCount
def hello(name):
print(f"Hello {name}")
if __name__ == "__main__":
hello("Ana")
hello("Annabelle")
hello("Miguel")
hello("Tony")
print(f"The function hello has been called {hello.count} times.")
|
python
|
# Copyright (c) Open-MMLab. All rights reserved.
from mmcv.runner.hooks.hook import HOOKS, Hook
@HOOKS.register_module()
class AlternateTrainingHook(Hook):
# def before_train_iter(self, runner):
def before_train_epoch(self, runner):
runner.model.module.neck.epoch_num = runner._epoch
# if runner._iter < 2000:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = False
# for param_0 in runner.model.module.neck.attention_block_0.parameters():
# param_0.requires_grad = True
# for param_1 in runner.model.module.neck.attention_block_1.parameters():
# param_1.requires_grad = True
# for param_2 in runner.model.module.neck.attention_block_2.parameters():
# param_2.requires_grad = True
# for param_3 in runner.model.module.neck.attention_block_3.parameters():
# param_3.requires_grad = True
# if runner._iter > 2000:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = False
# for param_0 in runner.model.module.neck.attention_block_0.parameters():
# param_0.requires_grad = False
# for param_1 in runner.model.module.neck.attention_block_1.parameters():
# param_1.requires_grad = False
# for param_2 in runner.model.module.neck.attention_block_2.parameters():
# param_2.requires_grad = False
# for param_3 in runner.model.module.neck.attention_block_3.parameters():
# param_3.requires_grad = False
# runner.model.module.neck.attention_block_0.requires_grad = True
# runner.model.module.neck.attention_block_1.requires_grad = True
# runner.model.module.neck.attention_block_2.requires_grad = True
# runner.model.module.neck.attention_block_3.requires_grad = True
if runner._epoch % 2 == 0:
for param in runner.model.module.neck.parameters():
param.requires_grad = True
for param_0 in runner.model.module.neck.attention_block_0.parameters():
param_0.requires_grad = False
for param_1 in runner.model.module.neck.attention_block_1.parameters():
param_1.requires_grad = False
for param_2 in runner.model.module.neck.attention_block_2.parameters():
param_2.requires_grad = False
for param_3 in runner.model.module.neck.attention_block_3.parameters():
param_3.requires_grad = False
else:
for param in runner.model.module.neck.parameters():
param.requires_grad = False
for param_0 in runner.model.module.neck.attention_block_0.parameters():
param_0.requires_grad = True
for param_1 in runner.model.module.neck.attention_block_1.parameters():
param_1.requires_grad = True
for param_2 in runner.model.module.neck.attention_block_2.parameters():
param_2.requires_grad = True
for param_3 in runner.model.module.neck.attention_block_3.parameters():
param_3.requires_grad = True
# elif runner._iter == 100:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = False
# runner.model.module.neck.attention_block_0.requires_grad = True
# runner.model.module.neck.attention_block_1.requires_grad = True
# runner.model.module.neck.attention_block_2.requires_grad = True
# runner.model.module.neck.attention_block_3.requires_grad = True
#
# elif runner._iter == 150:
# # if runner._epoch % 2 == 0:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = True
# runner.model.module.neck.attention_block_0.requires_grad = False
# runner.model.module.neck.attention_block_1.requires_grad = False
# runner.model.module.neck.attention_block_2.requires_grad = False
# runner.model.module.neck.attention_block_3.requires_grad = False
#
# elif runner._iter == 200:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = False
# runner.model.module.neck.attention_block_0.requires_grad = True
# runner.model.module.neck.attention_block_1.requires_grad = True
# runner.model.module.neck.attention_block_2.requires_grad = True
# runner.model.module.neck.attention_block_3.requires_grad = True
# elif runner._iter == 250:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = True
# runner.model.module.neck.attention_block_0.requires_grad = False
# runner.model.module.neck.attention_block_1.requires_grad = False
# runner.model.module.neck.attention_block_2.requires_grad = False
# runner.model.module.neck.attention_block_3.requires_grad = False
|
python
|
import os
import logging
from flask import Flask
from slack import WebClient
from slackeventsapi import SlackEventAdapter
# Initialize a Flask app to host the events adapter
app = Flask(__name__)
# Create an events adapter and register it to an endpoint in the slack app for event injestion.
slack_events_adapter = SlackEventAdapter(os.environ.get("SLACK_EVENTS_TOKEN"), "/slack/events", app)
# When a 'message' event is detected by the events adapter, forward that payload
# to this function.
@slack_events_adapter.on("message")
def message(payload):
"""Parse the message event, and if the activation string is in the text,
send a reply
"""
# Get the event data from the payload
event = payload.get("event", {})
# Get the text from the event that came through
text = event.get("text")
# Check and see if the activation phrase was in the text of the message.
if "hi" == text.lower():
# Since the activation phrase was met
# get the channel ID that the event was executed on
channel_id = event.get("channel")
# Execute the send message
return send_message(channel_id, "Hello!")
if "how are you" in text.lower():
channel_id = event.get("channel")
return send_message(channel_id, "Hello!")
def send_message(channel, text):
# Initialize a Web API client
slack_web_client = WebClient(token=os.environ.get("SLACK_TOKEN"))
# message payload
message = {
"channel": channel,
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": (text),
},
},
],
}
# Post the onboarding message in Slack
slack_web_client.chat_postMessage(**message)
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
# Run our app on our externally facing IP address on port 3000 instead of
# running it on localhost, which is traditional for development.
app.run(host='0.0.0.0', port=3000)
|
python
|
import os
import sys
import glob
import argparse
import numpy as np
from PIL import Image
from Utility import *
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.models import Model, load_model
from keras.layers import Dense, GlobalAveragePooling2D, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import adam, sgd
from keras.callbacks import ModelCheckpoint
from keras.preprocessing import image
from sklearn.metrics import roc_curve, auc
FC_SIZE = 1024
NB_VGG_LAYERS_TO_FREEZE = 20
IM_WIDTH, IM_HEIGHT = 256, 256
def setup_to_transfer_learn(model, base_model):
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer=adam(), loss='binary_crossentropy', metrics=['accuracy'])
def add_new_last_layer(base_model, nb_classes):
x = base_model.output
x = BatchNormalization()(x)
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu', kernel_initializer='he_normal')(x)
predictions = Dense(nb_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
return model
def setup_to_finetune(model):
for layer in model.layers[:NB_VGG_LAYERS_TO_FREEZE]:
layer.trainable = False
for layer in model.layers[NB_VGG_LAYERS_TO_FREEZE:]:
layer.trainable = True
model.compile(optimizer=sgd(lr=1e-5, momentum=0.9), loss='binary_crossentropy', metrics=['accuracy'])
def train(args):
nb_train_samples = get_nb_files(args.train_dir)
nb_classes = len(glob.glob(args.train_dir + "/*"))
nb_val_samples = get_nb_files(args.val_dir)
batch_size = int(args.batch_size)
# MARK :- prepare train data generator
train_datagen = ImageDataGenerator(
preprocessing_function=None,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.1,
horizontal_flip=True,
vertical_flip=True,
featurewise_center=args.featurewise_center
)
# MARK :- fit train data generator for featurewise_center
if args.featurewise_center:
train_x, _ = get_data(args.train_dir, tar_size=(IM_WIDTH, IM_HEIGHT, 3))
train_datagen.fit(train_x / 225)
# MARK :- prepare valid data generator
valid_datagen = ImageDataGenerator(
preprocessing_function=None,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.1,
horizontal_flip=True,
vertical_flip=True,
featurewise_center=args.featurewise_center
)
# MARK :- fit valid data generator for featurewise_center
if args.featurewise_center:
valid_x, _ = get_data(args.val_dir, tar_size=(IM_WIDTH, IM_HEIGHT, 3))
valid_datagen.fit(valid_x / 225)
# MARK :- prepare train and valid generators
train_generator = train_datagen.flow_from_directory(
args.train_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
class_mode='categorical'
)
validation_generator = valid_datagen.flow_from_directory(
args.val_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
class_mode='categorical'
)
# MARK :- prepare base model
base_model = VGG16(
weights='imagenet',
include_top=False, classes=2,
input_shape=(IM_WIDTH, IM_HEIGHT, 3)
)
# MARK :- setup model to transfer learning
model = add_new_last_layer(base_model, nb_classes)
setup_to_transfer_learn(model, base_model)
# MARK :- prepare VA and VL checkpoints for transfer learning
best_tl_va = ModelCheckpoint(
'Models/tl_va_' + args.output_model_file,
monitor='val_acc',
mode='max',
verbose=1,
save_best_only=True
)
best_tl_vl = ModelCheckpoint(
'Models/tl_vl_' + args.output_model_file,
monitor='val_loss',
mode='min',
verbose=1,
save_best_only=True
)
# MARK :- fit model with transfer learning
history_tl = model.fit_generator(
train_generator,
steps_per_epoch=int(round(nb_train_samples / batch_size)),
epochs=int(args.tl_epoch),
validation_data=validation_generator,
validation_steps=int(round(nb_val_samples / batch_size)),
class_weight='auto',
callbacks=[best_tl_va, best_tl_vl]
)
plot_training(history_tl, 'tl_history.png')
# MARK :- load best transfer learning model and setup it
model = load_model(filepath=args.tl_model)
setup_to_finetune(model)
# MARK :- prepare VA and VL checkpoints for fine tuning
best_ft_va = ModelCheckpoint(
'Models/ft_va_' + args.output_model_file,
monitor='val_acc',
mode='max',
verbose=1,
save_best_only=True
)
best_ft_vl = ModelCheckpoint(
'Models/ft_vl_' + args.output_model_file,
monitor='val_loss',
mode='min',
verbose=1,
save_best_only=True
)
# MARK :- fit model with fine tuning
history_ft = model.fit_generator(
train_generator,
steps_per_epoch=int(round(nb_train_samples / batch_size)),
epochs=int(args.ft_epoch),
validation_data=validation_generator,
validation_steps=int(round(nb_val_samples / batch_size)),
class_weight='auto',
callbacks=[best_ft_va, best_ft_vl]
)
plot_training(history_ft, 'ft_history.png')
if __name__ == "__main__":
a = argparse.ArgumentParser()
a.add_argument("--train_dir", default="Dataset/Train")
a.add_argument("--val_dir", default="Dataset/Validation")
a.add_argument("--test_dir", default="Dataset/Test")
a.add_argument("--tl_epoch", default=15)
a.add_argument("--ft_epoch", default=5)
a.add_argument("--batch_size", default=30)
a.add_argument("--output_model_file", default="vgg16.h5")
a.add_argument("--image", help="path to image")
a.add_argument("--ft_model", default="Models/ft_vl_vgg16.h5")
a.add_argument("--tl_model", default="Models/tl_vl_vgg16.h5")
a.add_argument("--featurewise_center", default=False)
a.add_argument("--plot_roc_auc", default=False)
args = a.parse_args()
if args.image is not None:
model = load_model(filepath=args.ft_model)
img = resize_image(Image.open(args.image), (IM_WIDTH, IM_HEIGHT))
x = np.expand_dims(image.img_to_array(img), axis=0)
# x = preprocess_input(x)
preds = model.predict(x)
plot_preds(Image.open(args.image), preds[0])
sys.exit(1)
if args.plot_roc_auc:
model = load_model(filepath=args.ft_model)
test_x, test_y = get_data(args.val_dir, tar_size=(IM_WIDTH, IM_HEIGHT, 3))
pred_test_y = model.predict(test_x).ravel()
fpr, tpr, thresholds = roc_curve(test_y.ravel(), pred_test_y)
auc_score = auc(fpr, tpr)
plot_auc_roc(tpr, fpr, auc_score, "auc_roc_ft_vgg16")
sys.exit(1)
if args.train_dir is None or args.val_dir is None:
a.print_help()
sys.exit(1)
if (not os.path.exists(args.train_dir)) or (not os.path.exists(args.val_dir)):
print("directories do not exist")
sys.exit(1)
train(args)
|
python
|
from __future__ import absolute_import
from .base import Model
from .base import DifferentiableModel
class ModelWrapper(Model):
"""Base class for models that wrap other models.
This base class can be used to implement model wrappers
that turn models into new models, for example by preprocessing
the input or modifying the gradient.
Parameters
----------
model : :class:`Model`
The model that is wrapped.
"""
def __init__(self, model):
super(ModelWrapper, self).__init__(
bounds=model.bounds(),
channel_axis=model.channel_axis())
self.wrapped_model = model
def __enter__(self):
assert self.wrapped_model.__enter__() == self.wrapped_model
return self
def __exit__(self, exc_type, exc_value, traceback):
return self.wrapped_model.__exit__(exc_type, exc_value, traceback)
def batch_predictions(self, images):
return self.wrapped_model.batch_predictions(images)
def predictions(self, image):
return self.wrapped_model.predictions(image)
def num_classes(self):
return self.wrapped_model.num_classes()
class DifferentiableModelWrapper(ModelWrapper):
"""Base class for models that wrap other models and provide
gradient methods.
This base class can be used to implement model wrappers
that turn models into new models, for example by preprocessing
the input or modifying the gradient.
Parameters
----------
model : :class:`Model`
The model that is wrapped.
"""
def predictions_and_gradient(self, image, label):
return self.wrapped_model.predictions_and_gradient(image, label)
def gradient(self, image, label):
return self.wrapped_model.gradient(image, label)
def backward(self, gradient, image):
return self.wrapped_model.backward(gradient, image)
class ModelWithoutGradients(ModelWrapper):
"""Turns a model into a model without gradients.
"""
pass
class ModelWithEstimatedGradients(DifferentiableModelWrapper):
"""Turns a model into a model with gradients estimated
by the given gradient estimator.
Parameters
----------
model : :class:`Model`
The model that is wrapped.
gradient_estimator : `callable`
Callable taking three arguments (pred_fn, image, label) and
returning the estimated gradients. pred_fn will be the
batch_predictions method of the wrapped model.
"""
def __init__(self, model, gradient_estimator):
super(ModelWithEstimatedGradients, self).__init__(
model=model)
assert callable(gradient_estimator)
self._gradient_estimator = gradient_estimator
def predictions_and_gradient(self, image, label):
predictions = self.predictions(image)
gradient = self.gradient(image, label)
return predictions, gradient
def gradient(self, image, label):
pred_fn = self.batch_predictions
bounds = self.bounds()
return self._gradient_estimator(pred_fn, image, label, bounds)
def backward(self, gradient, image):
raise NotImplementedError
class CompositeModel(DifferentiableModel):
"""Combines predictions of a (black-box) model with the gradient of a
(substitute) model.
Parameters
----------
forward_model : :class:`Model`
The model that should be fooled and will be used for predictions.
backward_model : :class:`Model`
The model that provides the gradients.
"""
def __init__(self, forward_model, backward_models, weights):
bounds = forward_model.bounds()
for backward_model in backward_models:
assert bounds == backward_model.bounds()
channel_axis = forward_model.channel_axis()
for backward_model in backward_models:
assert channel_axis == backward_model.channel_axis()
num_classes = forward_model.num_classes()
for backward_model in backward_models:
assert num_classes == backward_model.num_classes()
super(CompositeModel, self).__init__(
bounds=bounds,
channel_axis=channel_axis)
self.forward_model = forward_model
self.backward_models = backward_models
self._num_classes = num_classes
self.weights = weights
def num_classes(self):
return self._num_classes
def batch_predictions(self, images):
return self.forward_model.batch_predictions(images)
def predictions_and_gradient(self, image, label):
predictions = self.forward_model.predictions(image)
gradient = 0
for i in range(len(self.backward_models)):
gradient += self.weights[i] * self.backward_models[i].gradient(image, label)
# for backward_model in self.backward_models:
# gradient += backward_model.gradient(image, label)
# gradient /= len(self.backward_models)
return predictions, gradient
def gradient(self, image, label):
gradient = 0
for i in range(len(self.backward_models)):
gradient += self.weights[i] * self.backward_models[i].gradient(image, label)
# gradient /= len(self.backward_models)
return gradient
def __enter__(self):
assert self.forward_model.__enter__() == self.forward_model
for backward_model in self.backward_models:
assert backward_model.__enter__() == backward_model
return self
def __exit__(self, exc_type, exc_value, traceback):
r1 = self.forward_model.__exit__(exc_type, exc_value, traceback)
rb = []
for backward_model in self.backward_models:
rb.append(backward_model.__exit__(exc_type, exc_value, traceback))
bNone = True
for r in rb:
if r is not None:
bNone = False
break
if r1 is None and bNone:
return None
return (r1,) + tuple(rb) # pragma: no cover
|
python
|
from jinja2 import Template
import bot_logger
import lang
import models
def help_user(msg):
user = models.User(msg.author.name)
if user.is_registered():
msg.reply(Template(lang.message_help + lang.message_footer).render(
username=msg.author.name, address=user.address))
else:
bot_logger.logger.info('user %s not registered (command : help) ' % msg.author.name)
msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))
|
python
|
#!/usr/bin/env python
import sys
import random
import itertools
import ast
def nbackseq(n, length, words):
"""Generate n-back balanced sequences
:param n: int
How many characters (including the current one) to look back
to assure no duplicates
:param length: int
The total length of the sequence to produce
:param words: list
A list of words to be used to generate sequences
NOTE: must input words parameter as a literal e.g., '[1, 2]' with the quotes!!
:return: list
A list of solutions where each solution is a list of words of length 'length'
"""
solutions = []
solution_attempts = []
while len(solution_attempts) < len(list(itertools.permutations(words, length))):
solution = random.sample(words, length)
if solution not in solution_attempts:
good = True
for index in range(len(solution)):
subseq = solution[index: index + n]
if len(set(subseq)) != n:
good = False
break
if good:
solutions.append(solution)
solution_attempts.append(solution)
return solutions
def test_nbackseq():
assert nbackseq(2, 1, [1, 2]) == []
assert nbackseq(1, 1, ['a']) == [['a']]
assert nbackseq(2, 2, [1, 2]) == [[1, 2], [2, 1]]
if __name__ == '__main__':
n = int(sys.argv[1])
length = int(sys.argv[2])
try:
words = ast.literal_eval(sys.argv[3])
except:
raise ValueError("'words' parameter needs to be a literal (e.g. '[1, 2]' with the quotes!")
solutions = nbackseq(n, length, words)
print(solutions)
|
python
|
sir = "mere pere droguri mofturi CamIoane"
rime = {}
for i in range(len(sir)):
if (i == 0 or sir[i - 1] == " "):
k = i
if (sir[i] == " "):
if (rime.get(sir[i - 2:i]) == None):
rime[sir[i - 2:i]] = [sir[k:i]]
else:
rime[sir[i - 2:i]].append(sir[k:i])
if (i == len(sir) - 1):
if (rime.get(sir[i - 1:i + 1]) == None):
rime[sir[i - 1:i + 1]] = [sir[k:i + 1]]
else:
rime[sir[i - 1:i + 1]].append(sir[k:i + 1])
final = {}
print(rime)
for j, v in rime.items():
if (len(rime[j]) >= 2):
final[j] = v
print(final)
|
python
|
import os
import sys
import cherrypy
import ConfigParser
import urllib
import urllib2
import simplejson as json
import webtools
import time
import datetime
import random
import pprint
from pyechonest import song as song_api, config
config.TRACE_API_CALLS=True
config.ECHO_NEST_API_KEY='EHY4JJEGIOFA1RCJP'
import collections
import hashlib
catalog='paulify'
rcatalog='id:' + catalog
class Server(object):
def __init__(self, config):
self.production_mode = config.getboolean('settings', 'production')
self.cache_dir = '/lab/mir/data/cache'
self.total = 0;
self.cached = 0;
def search(self, q='', special='', sid='', artist='', title='', callback='', _=''):
if callback:
cherrypy.response.headers['Content-Type']= 'text/javascript'
else:
cherrypy.response.headers['Content-Type']= 'application/json'
print 'total', self.total, 'cached', self.cached, q, callback
response = {}
if len(special) > 0:
results = self.read_from_cache(special)
if results:
results = callback + "(" + results + ")"
return results
else:
response['status'] = 'not_found'
return to_json(response, callback)
elif len(sid) > 0:
result = song_api.Song(sid, buckets=[rcatalog, 'tracks', 'audio_summary'], limit=True, results=1)
results = [result]
elif len(artist) > 0:
results = song_api.search(artist=artist, title=title,\
buckets=[rcatalog, 'tracks', 'audio_summary'], limit=True, results=1)
else:
results = song_api.search(combined=q, \
buckets=[rcatalog, 'tracks', 'audio_summary'], limit=True, results=1)
if len(results) > 0:
track = results[0].get_tracks(catalog)[0]
id = track['id']
results = self.read_from_cache(id)
if results:
print 'cache hit'
else:
print 'cache miss'
response['status'] = 'ok'
t = self.get_track(id)
response['track'] = t
results = to_json(response, None)
self.write_to_cache(id, results)
results = callback + "(" + results + ")"
return results
else:
response['status'] = 'not_found'
return to_json(response, callback)
search.exposed = True
def get_track(self, id):
track = {}
rtrack = fetch_track(id)
pprint.pprint(rtrack)
track['id'] = rtrack['id']
track['artist'] = rtrack['artist']
track['title'] = rtrack['title']
track['audio'] = rtrack['audio']
track['summary'] = rtrack['audio_summary']
track['analysis'] = self.get_analysis(rtrack)
return track
def get_analysis(self, rtrack):
f = urllib.urlopen(rtrack['audio_summary']['analysis_url'])
js = f.read()
f.close()
return json.loads(js)
def read_from_cache(self, id):
full_path = os.path.join(self.cache_dir, id)
if os.path.exists(full_path):
with open(full_path) as f:
return f.read()
else:
return None;
def write_to_cache(self, id, results):
full_path = os.path.join(self.cache_dir, id)
with open(full_path, 'w') as f:
f.write(results)
def fetch_track(trid):
url = 'http://developer.echonest.com/api/v4/track/profile?api_key=N6E4NIOVYMTHNDM8J&format=json&bucket=audio_summary&id=' + trid
f = urllib.urlopen(url)
js = f.read()
print 'json', js
f.close()
response = json.loads(js)
return response['response']['track']
def to_json(dict, callback=None):
results = json.dumps(dict, sort_keys=True, indent = 4)
if callback:
results = callback + "(" + results + ")"
return results
if __name__ == '__main__':
urllib2.install_opener(urllib2.build_opener())
conf_path = os.path.abspath('web.conf')
print 'reading config from', conf_path
cherrypy.config.update(conf_path)
config = ConfigParser.ConfigParser()
config.read(conf_path)
production_mode = config.getboolean('settings', 'production')
current_dir = os.path.dirname(os.path.abspath(__file__))
# Set up site-wide config first so we get a log if errors occur.
if production_mode:
print "Starting in production mode"
cherrypy.config.update({'environment': 'production',
'log.error_file': 'simdemo.log',
'log.screen': True})
else:
print "Starting in development mode"
cherrypy.config.update({'noenvironment': 'production',
'log.error_file': 'site.log',
'log.screen': True})
conf = webtools.get_export_map_for_directory("static")
cherrypy.quickstart(Server(config), '/SongServer', config=conf)
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ppretredit.ui'
#
# Created: Mon Jan 11 21:22:20 2010
# by: PyQt4 UI code generator 4.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_TrainerEditDlg(object):
def setupUi(self, TrainerEditDlg):
TrainerEditDlg.setObjectName("TrainerEditDlg")
TrainerEditDlg.resize(799, 603)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("PPRE.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
TrainerEditDlg.setWindowIcon(icon)
self.chooseTrainer = QtGui.QComboBox(TrainerEditDlg)
self.chooseTrainer.setGeometry(QtCore.QRect(150, 5, 181, 27))
self.chooseTrainer.setObjectName("chooseTrainer")
self.curtrLabel = QtGui.QLabel(TrainerEditDlg)
self.curtrLabel.setGeometry(QtCore.QRect(20, 5, 121, 20))
self.curtrLabel.setObjectName("curtrLabel")
self.maintab = QtGui.QTabWidget(TrainerEditDlg)
self.maintab.setGeometry(QtCore.QRect(30, 55, 731, 541))
self.maintab.setTabPosition(QtGui.QTabWidget.North)
self.maintab.setObjectName("maintab")
self.tab0 = QtGui.QWidget()
self.tab0.setObjectName("tab0")
self.gridLayoutWidget = QtGui.QWidget(self.tab0)
self.gridLayoutWidget.setGeometry(QtCore.QRect(5, 5, 711, 492))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setObjectName("gridLayout")
self.label = QtGui.QLabel(self.gridLayoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.class_2 = QtGui.QComboBox(self.gridLayoutWidget)
self.class_2.setObjectName("class_2")
self.gridLayout.addWidget(self.class_2, 3, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.gridLayoutWidget)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 2, 0, 1, 1)
self.label_6 = QtGui.QLabel(self.gridLayoutWidget)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 4, 0, 1, 1)
self.label_7 = QtGui.QLabel(self.gridLayoutWidget)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 5, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.gridLayoutWidget)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 6, 0, 1, 1)
self.label_9 = QtGui.QLabel(self.gridLayoutWidget)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 7, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.gridLayoutWidget)
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 8, 0, 1, 1)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.item1 = QtGui.QComboBox(self.gridLayoutWidget)
self.item1.setObjectName("item1")
self.gridLayout_2.addWidget(self.item1, 0, 0, 1, 1)
self.item3 = QtGui.QComboBox(self.gridLayoutWidget)
self.item3.setObjectName("item3")
self.gridLayout_2.addWidget(self.item3, 1, 0, 1, 1)
self.item2 = QtGui.QComboBox(self.gridLayoutWidget)
self.item2.setObjectName("item2")
self.gridLayout_2.addWidget(self.item2, 0, 1, 1, 1)
self.item4 = QtGui.QComboBox(self.gridLayoutWidget)
self.item4.setObjectName("item4")
self.gridLayout_2.addWidget(self.item4, 1, 1, 1, 1)
self.gridLayout.addLayout(self.gridLayout_2, 4, 1, 1, 1)
self.label_11 = QtGui.QLabel(self.gridLayoutWidget)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 9, 0, 1, 1)
self.doubleBool = QtGui.QCheckBox(self.gridLayoutWidget)
self.doubleBool.setObjectName("doubleBool")
self.gridLayout.addWidget(self.doubleBool, 9, 1, 1, 1)
self.label_12 = QtGui.QLabel(self.gridLayoutWidget)
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 10, 0, 1, 1)
self.label_13 = QtGui.QLabel(self.gridLayoutWidget)
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 11, 0, 1, 1)
self.label_14 = QtGui.QLabel(self.gridLayoutWidget)
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 12, 0, 1, 1)
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_19 = QtGui.QLabel(self.gridLayoutWidget)
self.label_19.setObjectName("label_19")
self.gridLayout_4.addWidget(self.label_19, 0, 0, 1, 1)
self.label_20 = QtGui.QLabel(self.gridLayoutWidget)
self.label_20.setObjectName("label_20")
self.gridLayout_4.addWidget(self.label_20, 1, 0, 1, 1)
self.itemBool = QtGui.QCheckBox(self.gridLayoutWidget)
self.itemBool.setObjectName("itemBool")
self.gridLayout_4.addWidget(self.itemBool, 0, 1, 1, 1)
self.attackBool = QtGui.QCheckBox(self.gridLayoutWidget)
self.attackBool.setObjectName("attackBool")
self.gridLayout_4.addWidget(self.attackBool, 1, 1, 1, 1)
self.gridLayout.addLayout(self.gridLayout_4, 2, 1, 1, 1)
self.pokenum = QtGui.QSpinBox(self.gridLayoutWidget)
self.pokenum.setMaximum(6)
self.pokenum.setObjectName("pokenum")
self.gridLayout.addWidget(self.pokenum, 1, 1, 1, 1)
self.uc = QtGui.QSpinBox(self.gridLayoutWidget)
self.uc.setObjectName("uc")
self.gridLayout.addWidget(self.uc, 5, 1, 1, 1)
self.ud = QtGui.QSpinBox(self.gridLayoutWidget)
self.ud.setObjectName("ud")
self.gridLayout.addWidget(self.ud, 6, 1, 1, 1)
self.ue = QtGui.QSpinBox(self.gridLayoutWidget)
self.ue.setObjectName("ue")
self.gridLayout.addWidget(self.ue, 7, 1, 1, 1)
self.uf = QtGui.QSpinBox(self.gridLayoutWidget)
self.uf.setObjectName("uf")
self.gridLayout.addWidget(self.uf, 8, 1, 1, 1)
self.u11 = QtGui.QSpinBox(self.gridLayoutWidget)
self.u11.setObjectName("u11")
self.gridLayout.addWidget(self.u11, 10, 1, 1, 1)
self.u12 = QtGui.QSpinBox(self.gridLayoutWidget)
self.u12.setObjectName("u12")
self.gridLayout.addWidget(self.u12, 11, 1, 1, 1)
self.u13 = QtGui.QSpinBox(self.gridLayoutWidget)
self.u13.setObjectName("u13")
self.gridLayout.addWidget(self.u13, 12, 1, 1, 1)
self.trname = QtGui.QLineEdit(self.gridLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.trname.sizePolicy().hasHeightForWidth())
self.trname.setSizePolicy(sizePolicy)
self.trname.setObjectName("trname")
self.gridLayout.addWidget(self.trname, 0, 1, 1, 1)
self.maintab.addTab(self.tab0, "")
self.tab = QtGui.QWidget()
self.tab.setObjectName("tab")
self.textEdit = QtGui.QTextEdit(self.tab)
self.textEdit.setGeometry(QtCore.QRect(50, 50, 600, 75))
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QtGui.QTextEdit(self.tab)
self.textEdit_2.setGeometry(QtCore.QRect(50, 175, 600, 75))
self.textEdit_2.setObjectName("textEdit_2")
self.textEdit_3 = QtGui.QTextEdit(self.tab)
self.textEdit_3.setGeometry(QtCore.QRect(50, 300, 600, 75))
self.textEdit_3.setObjectName("textEdit_3")
self.label_2 = QtGui.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(55, 20, 166, 18))
self.label_2.setObjectName("label_2")
self.label_21 = QtGui.QLabel(self.tab)
self.label_21.setGeometry(QtCore.QRect(55, 145, 211, 18))
self.label_21.setObjectName("label_21")
self.label_22 = QtGui.QLabel(self.tab)
self.label_22.setGeometry(QtCore.QRect(55, 270, 231, 18))
self.label_22.setObjectName("label_22")
self.maintab.addTab(self.tab, "")
self.tab1 = QtGui.QWidget()
self.tab1.setObjectName("tab1")
self.gridLayoutWidget_3 = QtGui.QWidget(self.tab1)
self.gridLayoutWidget_3.setGeometry(QtCore.QRect(50, 50, 536, 236))
self.gridLayoutWidget_3.setObjectName("gridLayoutWidget_3")
self.gridLayout_3 = QtGui.QGridLayout(self.gridLayoutWidget_3)
self.gridLayout_3.setObjectName("gridLayout_3")
self.spec1 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.spec1.setObjectName("spec1")
self.gridLayout_3.addWidget(self.spec1, 0, 1, 1, 1)
self.label_15 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_15.setObjectName("label_15")
self.gridLayout_3.addWidget(self.label_15, 0, 0, 1, 1)
self.label_16 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_16.setObjectName("label_16")
self.gridLayout_3.addWidget(self.label_16, 1, 0, 1, 1)
self.pokelvl1 = QtGui.QSpinBox(self.gridLayoutWidget_3)
self.pokelvl1.setObjectName("pokelvl1")
self.gridLayout_3.addWidget(self.pokelvl1, 1, 1, 1, 1)
self.label_17 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_17.setObjectName("label_17")
self.gridLayout_3.addWidget(self.label_17, 2, 0, 1, 1)
self.pokeItem1 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.pokeItem1.setObjectName("pokeItem1")
self.gridLayout_3.addWidget(self.pokeItem1, 2, 1, 1, 1)
self.label_18 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_18.setObjectName("label_18")
self.gridLayout_3.addWidget(self.label_18, 3, 0, 1, 1)
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.attack1_1 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.attack1_1.setObjectName("attack1_1")
self.gridLayout_5.addWidget(self.attack1_1, 0, 0, 1, 1)
self.attack1_2 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.attack1_2.setObjectName("attack1_2")
self.gridLayout_5.addWidget(self.attack1_2, 0, 1, 1, 1)
self.attack1_3 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.attack1_3.setObjectName("attack1_3")
self.gridLayout_5.addWidget(self.attack1_3, 1, 0, 1, 1)
self.attack1_4 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.attack1_4.setObjectName("attack1_4")
self.gridLayout_5.addWidget(self.attack1_4, 1, 1, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_5, 3, 1, 1, 1)
self.maintab.addTab(self.tab1, "")
self.saveButton = QtGui.QPushButton(TrainerEditDlg)
self.saveButton.setGeometry(QtCore.QRect(410, 5, 126, 28))
self.saveButton.setObjectName("saveButton")
self.retranslateUi(TrainerEditDlg)
self.maintab.setCurrentIndex(0)
QtCore.QObject.connect(self.saveButton, QtCore.SIGNAL("pressed()"), TrainerEditDlg.saveTrainer)
QtCore.QMetaObject.connectSlotsByName(TrainerEditDlg)
def retranslateUi(self, TrainerEditDlg):
TrainerEditDlg.setWindowTitle(QtGui.QApplication.translate("TrainerEditDlg", "PPRE: Pokemon Edit", None, QtGui.QApplication.UnicodeUTF8))
self.curtrLabel.setText(QtGui.QApplication.translate("TrainerEditDlg", "Current Trainer", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("TrainerEditDlg", "Trainer Name", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("TrainerEditDlg", "Number of Pokemon", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("TrainerEditDlg", "Trainer Class", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("TrainerEditDlg", "Trainer Type", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("TrainerEditDlg", "Items", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("TrainerEditDlg", "0Ch", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("TrainerEditDlg", "0Dh", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("TrainerEditDlg", "0Eh", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("TrainerEditDlg", "0Fh", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("TrainerEditDlg", "Double Battle", None, QtGui.QApplication.UnicodeUTF8))
self.doubleBool.setText(QtGui.QApplication.translate("TrainerEditDlg", "True", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setText(QtGui.QApplication.translate("TrainerEditDlg", "11h", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setText(QtGui.QApplication.translate("TrainerEditDlg", "12h", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setText(QtGui.QApplication.translate("TrainerEditDlg", "13h", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setText(QtGui.QApplication.translate("TrainerEditDlg", "Pokemon have items", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setText(QtGui.QApplication.translate("TrainerEditDlg", "Pokemon have different attacks", None, QtGui.QApplication.UnicodeUTF8))
self.itemBool.setText(QtGui.QApplication.translate("TrainerEditDlg", "True", None, QtGui.QApplication.UnicodeUTF8))
self.attackBool.setText(QtGui.QApplication.translate("TrainerEditDlg", "True", None, QtGui.QApplication.UnicodeUTF8))
self.maintab.setTabText(self.maintab.indexOf(self.tab0), QtGui.QApplication.translate("TrainerEditDlg", "Main", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("TrainerEditDlg", "Introduction Text", None, QtGui.QApplication.UnicodeUTF8))
self.label_21.setText(QtGui.QApplication.translate("TrainerEditDlg", "Defeated Text", None, QtGui.QApplication.UnicodeUTF8))
self.label_22.setText(QtGui.QApplication.translate("TrainerEditDlg", "Won Text", None, QtGui.QApplication.UnicodeUTF8))
self.maintab.setTabText(self.maintab.indexOf(self.tab), QtGui.QApplication.translate("TrainerEditDlg", "Quotes", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setText(QtGui.QApplication.translate("TrainerEditDlg", "Pokemon", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setText(QtGui.QApplication.translate("TrainerEditDlg", "Level", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setText(QtGui.QApplication.translate("TrainerEditDlg", "Item", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setText(QtGui.QApplication.translate("TrainerEditDlg", "Extra Attacks", None, QtGui.QApplication.UnicodeUTF8))
self.maintab.setTabText(self.maintab.indexOf(self.tab1), QtGui.QApplication.translate("TrainerEditDlg", "Data", None, QtGui.QApplication.UnicodeUTF8))
self.saveButton.setText(QtGui.QApplication.translate("TrainerEditDlg", "Save", None, QtGui.QApplication.UnicodeUTF8))
|
python
|
import datetime as dt
import smtplib
import random
import pandas
import os
PLACEHOLDER = "[NAME]"
MY_EMAIL = "[email protected]"
MY_PASSWORD = "my_password"
LETTER_TO_SEND = ""
now = dt.datetime.now()
is_day = now.day
is_month = now.month
data = pandas.read_csv("./birthdays.csv")
birthdays = data.to_dict(orient="records")
is_birthday = [person for person in birthdays if person['month'] == is_month and person['day'] == is_day]
name = is_birthday[0]["name"]
email = is_birthday[0]["email"]
if is_birthday:
file = random.choice(os.listdir("./letter_templates/"))
with open(f"./letter_templates/{file}") as letter:
new_letter = letter.read()
LETTER_TO_SEND = new_letter.replace(PLACEHOLDER, name)
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(user=MY_EMAIL, password=MY_PASSWORD)
connection.sendmail(
from_addr=MY_EMAIL,
to_addrs=email,
msg=f"Subject:Happy Birthday {name}\n\n{LETTER_TO_SEND}"
)
|
python
|
# coding=utf-8
import json
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
authenticator = IAMAuthenticator('your_api_key')
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator)
language_translator.set_service_url('https://gateway.watsonplatform.net/language-translator/api')
## Translate
translation = language_translator.translate(
text='Hello', model_id='en-es').get_result()
print(json.dumps(translation, indent=2, ensure_ascii=False))
# List identifiable languages
# languages = language_translator.list_identifiable_languages().get_result()
# print(json.dumps(languages, indent=2))
# # Identify
# language = language_translator.identify(
# 'Language translator translates text from one language to another').get_result()
# print(json.dumps(language, indent=2))
# # List models
# models = language_translator.list_models(
# source='en').get_result()
# print(json.dumps(models, indent=2))
# # Create model
# with open('glossary.tmx', 'rb') as glossary:
# response = language_translator.create_model(
# base_model_id='en-es',
# name='custom-english-to-spanish',
# forced_glossary=glossary).get_result()
# print(json.dumps(response, indent=2))
# # Delete model
# response = language_translator.delete_model(model_id='<YOUR MODEL ID>').get_result()
# print(json.dumps(response, indent=2))
# # Get model details
# model = language_translator.get_model(model_id='<YOUR MODEL ID>').get_result()
# print(json.dumps(model, indent=2))
#### Document Translation ####
# List Documents
result = language_translator.list_documents().get_result()
print(json.dumps(result, indent=2))
# Translate Document
with open('en.pdf', 'rb') as file:
result = language_translator.translate_document(
file=file,
file_content_type='application/pdf',
filename='en.pdf',
model_id='en-fr').get_result()
print(json.dumps(result, indent=2))
# Document Status
result = language_translator.get_document_status(
document_id='{document id}').get_result()
print(json.dumps(result, indent=2))
# Translated Document
with open('translated.pdf', 'wb') as f:
result = language_translator.get_translated_document(
document_id='{document id}',
accept='application/pdf').get_result()
f.write(result.content)
# Delete Document
language_translator.delete_document(document_id='{document id}')
|
python
|
# TODO: support axis=k to create multiple factors for each row/col
# TODO: knapsack (how to pass costs? must check broadcast/shape)
class Logic(object):
def __init__(self, variables):
self._variables = variables
# TODO: deal with negated
def _construct(self, fg, variables):
return [fg.create_factor_logic(self.factor_type, variables)], []
class Xor(Logic):
factor_type = "XOR"
class Or(Logic):
factor_type = "OR"
class AtMostOne(Logic):
factor_type = "ATMOSTONE"
class Imply(Logic):
factor_type = "IMPLY"
class XorOut(Logic):
factor_type = "XOROUT"
class OrOut(Logic):
factor_type = "OROUT"
class AndOut(Logic):
factor_type = "ANDOUT"
class Budget(object):
def __init__(self, variables, budget):
self._variables = variables
self.budget = budget
# TODO: deal with negated
def _construct(self, fg, pvars):
return [fg.create_factor_budget(pvars, self.budget)], []
class Pair(object):
# TODO: possible to have it be faster?
def __init__(self, vars_i, vars_j, additionals):
self._variables = vars_i, vars_j
self._additionals = additionals
def _construct(self, fg, pvars):
vars_i, vars_j = pvars
n = len(vars_i)
adds = self._additionals
factors = [
fg.create_factor_pair([
vars_i[k],
vars_j[k]],
adds[k])
for k in range(n)
]
add_tensors = [adds[k] for k in range(n)]
return factors, add_tensors
|
python
|
import sys
import os
import numpy as np
import shutil
from common import PostProcess, update_metrics_in_report_json
from common import read_limits, check_limits_and_add_to_report_json
#from common import VirtualVehicleMakeMetrics as VVM
def main():
print "in main....."
sampleRate = 0.10
startAnalysisTime = 50
f = open('../rawdata.csv', 'w')
mat_file_name = sys.argv[1]
print "Mat file name is "+mat_file_name
if not os.path.exists(mat_file_name):
print "Given result file does not exist: ",mat_file_name
raise IOError('Given result file does not exist: {0}'.format(sys.argv[1]))
else:
print "line 10....",os.getcwd()
dstpath = os.path.join(os.getcwd(), 'matfiles')
print "dstPath is ",dstpath
if not os.path.isdir(dstpath):
os.makedirs(dstpath)
numFiles = len(os.listdir(dstpath))
dstname = '_' + str(numFiles) + mat_file_name
print "dstname ",dstname
#shutil.copyfile(mat_file_name, os.path.join(dstpath, dstname))
print "line30"
print "Line 24: Opened "+mat_file_name
## First limit part
#limit_dict, filter = read_limits()
print "done limits"
filter = []
## End of first limit part
## Post processing part
#--------accelerations-----------------------------------------------
#---------------------------------------------------------------------------------
# Processing
#---------------------------------------------------------------------------------
# loads results with the filtered out variables (and 'time' which is default)
filter = []
pp = PostProcess(mat_file_name, filter)
vars_available = pp.get_names()
dumpList = []
print vars_available[1]
for vv in vars_available:
if vv.find("current_") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("voltage_") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("angle_") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("BaseTemp") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("GyroTemp") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("BattTemp") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
pp.print_time()
print "Last time is "+str(pp.get_max_time())
sampData = []
for vv in dumpList:
ndat = pp.resample_data(vv,sampleRate)
print "got ndat size=",len(ndat)
sampData.append(ndat)
print 'sampdata=',len(sampData),'cols',len(sampData[0]),'rows'
i = 0
print "dumping raw data headers"
for c,vv in enumerate(dumpList):
print vv,c
f.write( vv+',')
f.write( "\n")
print "dump data"
print len(sampData),'cols',len(sampData[0])
while i < len(sampData[0]):
if i % 1000 == 0:
print "line ",i
for c,vv in enumerate(dumpList):
f.write(str(sampData[c][i])+',')
f.write( "\n")
i = i + 1
f.close()
actAngleIdx = -1
setAngleIdx = -1
voltBusIdx = -1
currGyroIdx = -1
gyroTempIdx = -1
baseTempIdx = -1
for c,vv in enumerate(dumpList):
if vv.find("angle_set") != -1:
setAngleIdx = c
if vv.find("angle_act") != -1:
actAngleIdx = c
if vv.find("voltage_bus") != -1:
voltBusIdx = c
if vv.find("current_gyro") != -1:
currGyroIdx = c
print "gyro idx ",currGyroIdx
if vv.find("GyroTemp") != -1:
gyroTempIdx = c
if vv.find("BaseTemp") != -1:
baseTempIdx = c
maxErr = 0
sumErr = 0
avgErr = 0
maxBusV = -1
minBusV = 100
minBattCap = 100
maxGyroCurr = 0;
maxTemp = 0
maxGyroTemp = 0
if actAngleIdx != -1 and setAngleIdx != -1:
i = int(startAnalysisTime/sampleRate)
first = i
print "scanning angles from ",i," to " ,len(sampData[setAngleIdx])
while i < len(sampData[setAngleIdx]):
angErr = abs(sampData[setAngleIdx][i] - sampData[actAngleIdx][i])
if angErr > maxErr:
maxErr = angErr
sumErr = sumErr + angErr
i = i + 1
avgErr = sumErr / (i - first + 1)
if voltBusIdx != -1:
i = int(startAnalysisTime/sampleRate)
while i < len(sampData[voltBusIdx]):
vts = abs(sampData[voltBusIdx][i])
if vts > maxBusV:
maxBusV = vts
if vts < minBusV:
minBusV = vts
i = i + 1
if currGyroIdx != -1:
i = int(startAnalysisTime/sampleRate)
print "scanning Gyro currents from ",i," to " ,len(sampData[currGyroIdx])
while i < len(sampData[currGyroIdx]):
vts = abs(sampData[currGyroIdx][i])
if vts > maxGyroCurr:
maxGyroCurr = vts
print vts
i = i + 1
if baseTempIdx != -1:
i = int(startAnalysisTime/sampleRate)
while i < len(sampData[baseTempIdx]):
vts = abs(sampData[baseTempIdx][i])
if vts > maxTemp:
maxTemp = vts
i = i + 1
if gyroTempIdx != -1:
i = int(startAnalysisTime/sampleRate)
while i < len(sampData[gyroTempIdx]):
vts = abs(sampData[gyroTempIdx][i])
if vts > maxGyroTemp:
maxGyroTemp = vts
i = i + 1
output_dir = "../"
json_filename = os.path.join(output_dir, 'testbench_manifest.json')
import json
json_data = {}
if os.path.isfile(json_filename):
with open(json_filename, "r") as json_file:
print "reading json"
json_data = json.load(json_file)
print "json_data is....."
print json_data
for metric in json_data['Metrics']:
if metric["Name"] == "angleMaxError":
metric["Value"] = str(maxErr)
print 'angleMaxError '+str(maxErr)
if metric["Name"] == "angleAvgError":
metric["Value"] = str(avgErr)
if metric["Name"] == "minBusVoltage":
metric["Value"] = str(minBusV)
if metric["Name"] == "maxBusVoltage":
metric["Value"] = str(maxBusV)
if metric["Name"] == "minBattCapacity":
metric["Value"] = str(minBattCap)
if metric["Name"] == "maxGyroCurrent":
metric["Value"] = str(maxGyroCurr)
if metric["Name"] == "maxTemp":
metric["Value"] = str(maxTemp)
if metric["Name"] == "maxGyroTemp":
metric["Value"] = str(maxGyroTemp)
print "dumping to ",json_filename
print json_data
with open(json_filename, "w") as json_file:
json.dump(json_data, json_file, indent=4)
# #---------------------------------------------------------------------------------
# # Potential_Design
# #---------------------------------------------------------------------------------
# Potential_Design = 0
# followTime = pp.get_data_by_index(followTime_uri, -1)
# if (SettlingTime == -1 or riseTime == -1 or minDistance < .1*minDistanceVelocity*followTime):
# Potential_Design = -1
# else: Potential_Design = 1
# print "Potential_Design: %d" %Potential_Design
# #---------------------------------------------------------------------------------
# # Metrics
# #---------------------------------------------------------------------------------
# metrics = {}
# metrics.update({'vehicleMass':{'value': vehicleMass, 'unit':'kg'},
# 'distanceTraveled':{'value': distanceTraveled, 'unit': 'm'},
# 'minDistance': {'value': minDistance, 'unit': 'm'},
# 'finalVelocity':{'value': Vf, 'unit': 'm/s'},
# 'requiredTorque':{'value': requiredTorque, 'unit':'N-m'},
# 'riseTime':{'value': np.amax(riseTime), 'unit' :''},
# 'Overshoot':{'value': np.amax(Overshoot), 'unit' :''},
# 'settlingTime':{'value': np.amax(SettlingTime), 'unit' :''},
# 'rms_error':{'value': RMS_error, 'unit' : ''},
# 'numSetpointCrossings':{'value':numSetPointCrossings, 'unit': ''},
# 'averageA': {'value': maxAccel, 'unit': 'm/s2'},
# 'averageJ': {'value': maxJerk, 'unit': 'm/s3'},
# 'Potential_Design': {'value': Potential_Design, 'unit': ''},
# #'chassisType':{'value': chassisType, 'unit' :''},
# })
#print metrics
cwd = os.getcwd()
os.chdir('..')
# print 'Plot saved to : {0}'.format(pp.save_as_svg(vehicle_speed,
# pp.global_abs_max(vehicle_speed),
# 'VehicleSpeed',
# 'max(FTP_Driver.driver_bus.vehicle_speed)',
# 'kph'))
#pp.store_data_to_csv(jerk_uri, '{0}.csv'.format(jerk_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(a_uri, '{0}.csv'.format(a_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(pp.time_array, '{0}.csv'.format(pp.time_array), 0, time_inc, numSamples)
#pp.store_data_to_csv(boomCylLength_uri, '{0}.csv'.format(boomCylLength_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(armCylLength_uri, '{0}.csv'.format(armCylLength_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(bucketCylLength_uri, '{0}.csv'.format(bucketCylLength_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(boomCylRPressure_uri, '{0}.csv'.format(boomCylRPressure_uri), 0, 0.1, dur)
#pp.store_data_to_csv(arm_ang_vel_uri, '{0}.csv'.format(arm_ang_vel_uri), 0, 0.1, dur)
#pp.store_data_to_csv(max_Y_uri, '{0}.csv'.format(max_Y_uri), 0, 0.1, dur)
#pp.store_data_to_csv(max_reach_uri, '{0}.csv'.format(max_reach_uri), 0, 0.1, dur)
#pp.store_data_to_csv(State_uri, '{0}.csv'.format(State_uri), 0, 0.1, dur)
## end of postprocessing part
## Second limit part
#check_limits_and_add_to_report_json(pp, limit_dict)
#update_metrics_in_report_json(metrics)
## end of Second limit part
os.chdir(cwd)
print "done main"
if __name__ == '__main__':
root_dir = os.getcwd()
print "Starting in "+root_dir
try:
print "Starting Main...."
main()
except:
print "exception occurred..."
os.chdir(root_dir)
import traceback
trace = traceback.format_exc()
# Generate this file on failed executions, https://github.com/scipy/scipy/issues/1840
with open(os.path.join('..', '_POST_PROCESSING_FAILED.txt'), 'wb') as f_out:
f_out.write(trace)
|
python
|
# Algorithms > Warmup > Simple Array Sum
# Calculate the sum of integers in an array.
#
# https://www.hackerrank.com/challenges/simple-array-sum/problem
#
#
# Complete the simpleArraySum function below.
#
def simpleArraySum(ar):
#
# Write your code here.
#
return sum(ar)
if __name__ == '__main__':
ar_count = int(input())
ar = list(map(int, input().rstrip().split()))
result = simpleArraySum(ar)
print(result)
|
python
|
from cached_property import cached_property
from onegov.activity import Activity, Attendee, Booking, Occasion
from onegov.feriennet import _
from onegov.feriennet import FeriennetApp
from onegov.feriennet.collections import BillingCollection, MatchCollection
from onegov.feriennet.exports.unlucky import UnluckyExport
from onegov.feriennet.layout import DefaultLayout
from onegov.org.models import Boardlet, BoardletFact
class FeriennetBoardlet(Boardlet):
@cached_property
def session(self):
return self.request.session
@cached_property
def period(self):
return self.request.app.active_period
@cached_property
def layout(self):
return DefaultLayout(None, self.request)
@property
def state(self):
if not self.period:
return 'failure'
if not self.period.confirmed:
return 'warning'
return 'success'
@FeriennetApp.boardlet(name='period', order=(1, 1))
class PeriodBoardlet(FeriennetBoardlet):
@property
def title(self):
return self.period and self.period.title or _("No active period")
@property
def state(self):
if not self.period:
return 'failure'
return 'success'
@property
def facts(self):
if not self.period:
return
def icon(checked):
return checked and 'fa-check-square-o' or 'fa-square-o'
yield BoardletFact(
text=_("Prebooking: ${dates}", mapping={
'dates': self.layout.format_date_range(
self.period.prebooking_start,
self.period.prebooking_end,
)
}),
icon=icon(self.period.confirmed)
)
yield BoardletFact(
text=_("Booking: ${dates}", mapping={
'dates': self.layout.format_date_range(
self.period.booking_start,
self.period.booking_end,
)
}),
icon=icon(self.period.finalized if self.period.finalizable
else self.period.is_booking_in_past)
)
yield BoardletFact(
text=_("Execution: ${dates}", mapping={
'dates': self.layout.format_date_range(
self.period.execution_start,
self.period.execution_end,
)
}),
icon=icon(self.period.is_execution_in_past)
)
@FeriennetApp.boardlet(name='activities', order=(1, 2))
class ActivitiesBoardlet(FeriennetBoardlet):
@cached_property
def occasions_count(self):
if not self.period:
return 0
return self.session.query(Occasion)\
.filter_by(period_id=self.period.id)\
.count()
@cached_property
def activities_count(self):
if not self.period:
return 0
return self.session.query(Activity).filter(Activity.id.in_(
self.session.query(Occasion.activity_id)
.filter_by(period_id=self.period.id)
.subquery()
)).filter_by(state='accepted').count()
@property
def title(self):
return _("${count} Activities", mapping={
'count': self.activities_count
})
@property
def state(self):
if not self.period:
return 'failure'
return self.activities_count and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${count} Activities", mapping={
'count': self.activities_count
}),
icon='fa-dot-circle-o'
)
yield BoardletFact(
text=_("${count} Occasions", mapping={
'count': self.occasions_count
}),
icon='fa-circle-o'
)
@FeriennetApp.boardlet(name='bookings', order=(1, 3))
class BookingsBoardlet(FeriennetBoardlet):
@cached_property
def counts(self):
if not self.period:
return {
'accepted': 0,
'blocked': 0,
'cancelled': 0,
'denied': 0,
'total': 0,
}
bookings = self.session.query(Booking)\
.filter_by(period_id=self.period.id)
return {
'accepted': bookings.filter_by(state='accepted').count(),
'blocked': bookings.filter_by(state='blocked').count(),
'cancelled': bookings.filter_by(state='cancelled').count(),
'denied': bookings.filter_by(state='denied').count(),
'total': bookings.count(),
}
@cached_property
def attendees_count(self):
if not self.period:
return 0
return self.session.query(Attendee)\
.filter(Attendee.id.in_(
self.session.query(Booking.attendee_id).filter_by(
period_id=self.period.id
)
)).count()
@property
def title(self):
if not self.period or not self.period.confirmed:
return _("${count} Wishes", mapping={
'count': self.counts['total']
})
else:
return _("${count} Bookings", mapping={
'count': self.counts['total']
})
@property
def state(self):
if not self.period:
return 'failure'
return self.counts['total'] and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
if not self.period.confirmed:
yield BoardletFact(
text=_("${count} Wishes", mapping={
'count': self.counts['total']
}),
icon='fa-square',
)
yield BoardletFact(
text=_("${count} Wishes per Attendee", mapping={
'count': self.attendees_count and (
round(self.counts['total'] / self.attendees_count, 1)
) or 0
}),
icon='fa-line-chart',
)
else:
yield BoardletFact(
text=_("${count} Bookings", mapping={
'count': self.counts['total']
}),
icon='fa-square',
)
yield BoardletFact(
text=_("${count} accepted", mapping={
'count': self.counts['accepted']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} cancelled", mapping={
'count': self.counts['cancelled']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} denied", mapping={
'count': self.counts['denied']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} blocked", mapping={
'count': self.counts['blocked']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} Bookings per Attendee", mapping={
'count': self.attendees_count and round(
self.counts['accepted'] / self.attendees_count, 1
) or 0
}),
icon='fa-line-chart',
)
@FeriennetApp.boardlet(name='attendees', order=(1, 4))
class AttendeesBoardlet(FeriennetBoardlet):
@cached_property
def attendee_counts(self):
if not self.period:
return {
'total': 0,
'female': 0,
'male': 0,
}
attendees = self.session.query(Attendee)\
.filter(Attendee.id.in_(
self.session.query(Booking.attendee_id).filter_by(
period_id=self.period.id
)
))
return {
'total': attendees.count(),
'girls': attendees.filter_by(gender='female').count(),
'boys': attendees.filter_by(gender='male').count(),
}
@property
def title(self):
return _("${count} Attendees", mapping={
'count': self.attendee_counts['total']
})
@property
def state(self):
if not self.period:
return 'failure'
return self.attendee_counts['total'] and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${count} Girls", mapping={
'count': self.attendee_counts['girls']
}),
icon='fa-female'
)
yield BoardletFact(
text=_("${count} Boys", mapping={
'count': self.attendee_counts['boys']
}),
icon='fa-male'
)
@FeriennetApp.boardlet(name='matching', order=(1, 5))
class MatchingBoardlet(FeriennetBoardlet):
@cached_property
def happiness(self):
if not self.period or not self.period.confirmed:
return 0
raw = MatchCollection(self.session, self.period).happiness
return round(raw * 100, 2)
@cached_property
def unlucky_count(self):
if not self.period:
return 0
return UnluckyExport().query(self.session, self.period).count()
@property
def title(self):
return _("${amount}% Happiness", mapping={
'amount': self.happiness
})
@property
def state(self):
if not self.period:
return 'failure'
return self.happiness > 75 and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${amount}% Happiness", mapping={
'amount': self.happiness
}),
icon='fa-smile-o',
)
yield BoardletFact(
text=_("${count} Attendees Without Occasion", mapping={
'count': self.unlucky_count
}),
icon='fa-frown-o',
)
@FeriennetApp.boardlet(name='billing', order=(1, 6))
class BillingPortlet(FeriennetBoardlet):
@cached_property
def amounts(self):
if not self.period:
return {
'total': 0,
'outstanding': 0,
'paid': 0,
}
billing = BillingCollection(self.request, self.period)
result = {
'total': billing.total,
'outstanding': billing.outstanding,
}
result['paid'] = result['total'] - result['outstanding']
return result
@property
def title(self):
return _("${amount} CHF outstanding", mapping={
'amount': self.layout.format_number(self.amounts['outstanding'])
})
@property
def state(self):
if not self.period:
return 'failure'
return self.amounts['outstanding'] and 'warning' or 'success'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${amount} CHF total", mapping={
'amount': self.layout.format_number(self.amounts['total'])
}),
icon='fa-circle',
)
yield BoardletFact(
text=_("${amount} CHF paid", mapping={
'amount': self.layout.format_number(self.amounts['paid'])
}),
icon='fa-plus-circle',
)
yield BoardletFact(
text=_("${amount} CHF outstanding", mapping={
'amount': self.layout.format_number(
self.amounts['outstanding']
)
}),
icon='fa-minus-circle',
)
|
python
|
# -*- coding: utf-8 -*-
"""Configurations for slimming simple network.
- Author: Curt-Park
- Email: [email protected]
"""
from config.train.cifar100 import simplenet, simplenet_finetune
train_config = simplenet.config
regularizer_params = {
"REGULARIZER": "BnWeight",
"REGULARIZER_PARAMS": dict(coeff=1e-5),
"EPOCHS": train_config["EPOCHS"],
}
train_config.update(regularizer_params)
finetune_config = simplenet_finetune.config
regularizer_params = {
"REGULARIZER": "BnWeight",
"REGULARIZER_PARAMS": dict(coeff=1e-5),
"EPOCHS": finetune_config["EPOCHS"],
}
finetune_config.update(regularizer_params)
config = {
"TRAIN_CONFIG": train_config,
"TRAIN_CONFIG_AT_PRUNE": finetune_config,
"N_PRUNING_ITER": 15,
"PRUNE_METHOD": "SlimMagnitude",
"PRUNE_PARAMS": dict(
PRUNE_AMOUNT=0.1,
NORM=2,
STORE_PARAM_BEFORE=10,
TRAIN_START_FROM=0,
PRUNE_AT_BEST=False,
),
}
|
python
|
# hack to capture stdout to a string, to test it
import re
import os
import subprocess
import io
import sys
from contextlib import contextmanager
import filecmp
def test_rr_cases():
# now for various combinations of inputs
output_file = 'test_output.csv'
# test exact round robin case, but just once
process_command_line = ['python','src/sports_schedule_sat.py'
,'-t','8'
,'-d','7'
,'-p','2'
,'--cpu','2'
,'--debug'
,'--timelimit','10'
,'--csv',output_file]
try:
proc = subprocess.run(process_command_line, encoding='utf8', capture_output=True)
out = proc.stdout
err = proc.stderr
print('out 186 is ',out)
assert re.search('OPTIMAL', out, re.MULTILINE)
assert re.search('num_search_workers: 2',err,re.MULTILINE)
except:
assert False
try:
# clean up the temp file
os.unlink(output_file)
except:
print('no file to delete')
# test exact round robin case, but just twice around
process_command_line = ['python','src/sports_schedule_sat.py'
,'-t','4'
,'-d','6'
,'-p','1'
,'--cpu','2'
,'--debug'
,'--timelimit','60'
,'--csv',output_file]
try:
proc = subprocess.run(process_command_line, encoding='utf8', capture_output=True)
out = proc.stdout
err = proc.stderr
assert re.search('OPTIMAL', out, re.MULTILINE)
assert re.search('num_search_workers: 2',err,re.MULTILINE)
except:
assert False
try:
# clean up the temp file
os.unlink(output_file)
except:
print('no file to delete')
|
python
|
from gpkit import Model, Variable, VectorVariable, SignomialsEnabled
import numpy as np
class MST(Model):
def setup(self, N):
edgeCost = VectorVariable([N, N],
'edgeCost')
edgeMaxFlow = VectorVariable([N, N],
'edgeMaxFlow')
connect = VectorVariable([N,N],'connectivity')
flow = VectorVariable([N, N], 'flow')
source = VectorVariable(N, 'source')
sink = VectorVariable(N, 'sink')
totalCost = Variable('totalCost')
constraints = []
with SignomialsEnabled():
for i in range(0, N):
constraints.extend([sink[i] + sum(flow[i, :]) <= source[i] + sum(flow[:, i]),])
for j in range(0, N):
constraints.extend([flow[i, j] <= connect[i,j]*edgeMaxFlow[i, j]])
for i in range(0, N):
for j in range(i + 1, N):
constraints.extend([flow[i, j] * flow[j, i] <= 1e-5])
constraints.extend([totalCost >= sum(edgeCost * flow) ])
return constraints
|
python
|
import pytest
from auth import create_jwt_payload
@pytest.mark.usefixtures("default_qr_code")
def test_qr_exists(client, default_qr_code):
code = default_qr_code["code"]
graph_ql_query_string = f"""query CheckQrExistence {{
qrExists(qrCode: "{code}")
}}"""
data = {"query": graph_ql_query_string}
response = client.post("/graphql", json=data)
assert response.status_code == 200
assert response.json["data"]["qrExists"]
graph_ql_query_string = """query CheckQrExistence {
qrExists(qrCode: "111")
}"""
data = {"query": graph_ql_query_string}
response = client.post("/graphql", json=data)
assert response.status_code == 200
assert not response.json["data"]["qrExists"]
@pytest.mark.usefixtures("default_qr_code")
def test_qr_code(client, default_qr_code):
graph_ql_query_string = f"""query {{
qrCode(qrCode: "{default_qr_code['code']}") {{
code
}}
}}"""
data = {"query": graph_ql_query_string}
response = client.post("/graphql", json=data)
queried_code = response.json["data"]["qrCode"]
assert response.status_code == 200
assert queried_code["code"] == default_qr_code["code"]
@pytest.mark.usefixtures("qr_code_without_box")
def test_code_not_associated_with_box(client, qr_code_without_box):
code = qr_code_without_box["code"]
graph_ql_query_string = f"""query {{
qrCode(qrCode: "{code}") {{
box {{
id
}}
}}
}}"""
data = {"query": graph_ql_query_string}
response_data = client.post("/graphql", json=data)
assert (
"<Model: Box> instance matching query does not exist"
in response_data.json["errors"][0]["message"]
)
queried_box = response_data.json["data"]["qrCode"]["box"]
assert queried_box is None
def test_code_does_not_exist(client):
graph_ql_query_string = """query Box {
qrCode(qrCode: "-1") {
id
}
}"""
data = {"query": graph_ql_query_string}
response_data = client.post("/graphql", json=data)
queried_code = response_data.json["data"]["qrCode"]
assert (
"<Model: QRCode> instance matching query does not exist"
in response_data.json["errors"][0]["message"]
)
assert queried_code is None
@pytest.mark.usefixtures("box_without_qr_code")
def test_create_qr_code(client, box_without_qr_code):
data = {"query": "mutation { createQrCode { id } }"}
response = client.post("/graphql", json=data)
qr_code_id = int(response.json["data"]["createQrCode"]["id"])
assert response.status_code == 200
assert qr_code_id > 2
data = {
"query": f"""mutation {{
createQrCode(boxLabelIdentifier: "{box_without_qr_code['box_label_identifier']}") # noqa
{{
id
box {{
id
items
}}
}}
}}"""
}
response = client.post("/graphql", json=data)
created_qr_code = response.json["data"]["createQrCode"]
assert response.status_code == 200
assert int(created_qr_code["id"]) == qr_code_id + 1
assert created_qr_code["box"]["items"] == box_without_qr_code["items"]
assert int(created_qr_code["box"]["id"]) == box_without_qr_code["id"]
data = {"query": """mutation { createQrCode(boxLabelIdentifier: "xxx") { id } }"""}
response = client.post("/graphql", json=data)
assert response.status_code == 200
assert response.json["data"]["createQrCode"] is None
assert len(response.json["errors"]) == 1
assert response.json["errors"][0]["extensions"]["code"] == "BAD_USER_INPUT"
def test_invalid_permission(client, mocker):
mocker.patch("jose.jwt.decode").return_value = create_jwt_payload(permissions=[])
data = {"query": "mutation { createQrCode { id } }"}
response = client.post("/graphql", json=data)
assert response.status_code == 200
assert response.json["data"]["createQrCode"] is None
assert len(response.json["errors"]) == 1
assert response.json["errors"][0]["extensions"]["code"] == "FORBIDDEN"
|
python
|
import os
import time
import requests
from flask import render_template, request, flash, redirect, url_for, abort
from jinja2 import Markup
from app import app, PLOTS_FOLDER, UPLOAD_FOLDER
from functions import dir_listing, process_images
@app.route('/')
def home():
return render_template('home.html')
@app.route('/uploads/', defaults={'req_path': ''})
@app.route('/uploads/<path:req_path>')
def uploads(req_path):
return dir_listing(UPLOAD_FOLDER, req_path, 'images.html')
@app.route('/plots/', defaults={'req_path': ''})
@app.route('/plots/<path:req_path>')
def plots(req_path):
return dir_listing(PLOTS_FOLDER, req_path, 'plots.html')
@app.route('/upload/', methods=['GET', 'POST'])
def upload_form():
if request.method == 'POST':
result_response, status_code = process_images(request)
if status_code != 200:
message = 'There was an error uploading photos: ' + str(result_response['reason'])
flash(message)
return redirect('/upload/')
else:
return redirect(url_for('uploads', req_path=result_response['folder_name']))
else:
return render_template('upload_form.html')
@app.route('/create/', methods=['GET', 'POST'])
def create():
folders = os.listdir(UPLOAD_FOLDER)
if request.method == 'GET':
return render_template('create.html', folders=folders)
elif request.method == 'POST':
form_data = request.form
image_location = os.path.join(UPLOAD_FOLDER, form_data['folder_name'])
destination = os.path.join(PLOTS_FOLDER, form_data['folder_name'])
if os.path.isfile(os.path.join(image_location, 'metadata.csv')):
metadata = os.path.join(image_location, 'metadata.csv')
data = {"args": ['--images', image_location + "/*.jpg", '--out_dir', destination, '--metadata', metadata]}
else:
data = {"args": ['--images', image_location + "/*.jpg", '--out_dir', destination]}
response = requests.post(request.url_root + "api/pixplot", json=data)
app.logger.info('Create request status: ' + str(response.status_code))
app.logger.debug('Create request status: ' + str(response.json()))
while True:
time.sleep(10)
result = requests.get(response.json()['result_url'])
app.logger.debug(str(result.json()))
if 'status' in result.json().keys() and result.json()['status'] == 'running':
# Still running
continue
elif 'report' in result.json().keys() and result.json()['report'][-6:-1] == 'Done!':
# Complete without error
message = Markup('<a href="/plots/%s/index.html" class="alert-link">Finished! Your PixPlot is uploaded here.</a>' % form_data['folder_name'])
break
else:
# Something botched
message = 'There was an error creating your PixPlot. Sorry.'
app.logger.error(str(result.json()))
break
flash(message)
return redirect('/create/')
else:
return abort(404)
|
python
|
# Packages
from sys import argv, exit
from os.path import realpath, dirname
from flask import Flask
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from flask_sqlalchemy import SQLAlchemy
# Local
from config import Config
path = dirname(realpath(__file__))
secret = "%s/app/app.secret" % path
if not isfile(secret):
print("Error: Missing file '%s'" % secret)
exit(1)
app = Flask(__name__, static_url_path="/assets")
with open(secret, "r") as f:
app.secret_key = f.read()
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = None
if len(argv) > 1:
manager = Manager(app)
manager.add_command("db", MigrateCommand)
from app import routes, models
|
python
|
#! /usr/bin/env python
from __future__ import print_function
from builtins import str
import sys
import os
from vmrunner import vmrunner
import socket
# Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]
def UDP_test(trigger_line):
print("<Test.py> Performing UDP tests")
HOST, PORT = "10.0.0.55", 4242
sock = socket.socket
# SOCK_DGRAM is the socket type to use for UDP sockets
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# NOTE: This is necessary for the test to exit after the VM has
# been shut down due to a VM timeout
sock.settimeout(20)
data = "Lucky"
sock.sendto(data, (HOST, PORT))
received = sock.recv(1024)
print("<Test.py> Sent: {}".format(data))
print("<Test.py> Received: {}".format(received))
if received != data: return False
data = "Luke"
sock.sendto(data, (HOST, PORT))
received = sock.recv(1024)
print("<Test.py> Sent: {}".format(data))
print("<Test.py> Received: {}".format(received))
if received != data: return False
data = "x" * 1472
sock.sendto(data, (HOST, PORT))
received = sock.recv(1500)
if received != data:
print("<Test.py> Did not receive long string: {}".format(received))
return False
data = "x" * 9216 # 9216 is apparently default max for MacOS
sock.sendto(data, (HOST, PORT))
received = bytearray()
while (len(received) < len(data)):
received.extend(sock.recv(len(data)))
print("RECEIVED: ", len(received))
if received != data:
print("<Test.py> Did not receive mega string (64k)")
return False
vm.exit(0, "Test completed without errors")
# Add custom event-handler
vm.on_output("UDP test service", UDP_test)
if len(sys.argv) > 1:
vm.boot(image_name=str(sys.argv[1]))
else:
# Boot the VM, taking a timeout as parameter
vm.cmake().boot(30,image_name="net_udp").clean()
|
python
|
#!/usr/bin/python3
spam = ['apples', 'bannanas', 'tofus', 'cats']
length = len(spam)
item = 0
while item < length - 1:
print(spam[item], end=' ')
item = item + 1
print(' and ' + spam[item])
|
python
|
from SOC.models import Manna
import numpy as np
import pytest
def test_boundary_shape():
sim = Manna(L=10)
assert sim.values.shape == (12, 12)
assert sim.L_with_boundary == 12
def test_run_abel():
sim = Manna(L=20)
sim.run(5)
def test_run_nonabel():
sim = Manna(L=20, abelian = False)
sim.run(5)
def test_driving_does_not_pollute_boundary():
sim = Manna(L=10)
for i in range(1000):
sim.drive()
def test_toppling_reduces_middle_to_max_one():
sim = Manna(L=10)
sim.values[1:-1, 1:-1] = 6
sim.AvalancheLoop()
assert (0 <= sim.values[1:-1, 1:-1]).all()
assert (sim.values[1:-1, 1:-1] <= 1).all()
@pytest.mark.skip
def test_whiteboard_case_1():
sim = Manna(L=3)
sim.values[2, 2] = 2
results = sim.AvalancheLoop()
assert int(results['AvalancheSize']) == 2
assert int(results['number_of_iterations']) == 1
@pytest.mark.skip
def test_whiteboard_case_2():
sim = Manna(L=3)
sim.values[2, 2] = 2
results = sim.AvalancheLoop()
assert int(results['AvalancheSize']) == 2
assert int(results['number_of_iterations']) == 1
def test_resurrect():
sim = Manna(L=10)
filename = "test_ressurrect.zarr"
sim.run(5, filename=filename)
saved = sim.saved_snapshots[-1].copy()
save_every_orig = sim.save_every
sim2 = Manna.from_file(filename)
np.testing.assert_allclose(sim2.values, saved)
assert sim2.save_every == save_every_orig
def test_resurrect_default_name():
sim = Manna(L=10)
filename = sim.run(50, filename=False)
saved = sim.saved_snapshots[-1].copy()
save_every_orig = sim.save_every
sim2 = Manna.from_file(filename)
np.testing.assert_allclose(sim2.values, saved)
assert sim2.save_every == save_every_orig
|
python
|
# Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing NVIDIA Driver installation.
"""
import re
from absl import flags
from absl import logging
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import os_types
from perfkitbenchmarker import regex_util
NVIDIA_DRIVER_LOCATION_BASE = 'https://us.download.nvidia.com/tesla'
NVIDIA_TESLA_K80 = 'k80'
NVIDIA_TESLA_P4 = 'p4'
NVIDIA_TESLA_P100 = 'p100'
NVIDIA_TESLA_V100 = 'v100'
NVIDIA_TESLA_T4 = 't4'
NVIDIA_TESLA_A100 = 'a100'
"""Default GPU clocks and autoboost configurations.
Base_clock is the default clock speeds when setting the GPU clocks. Max_clock
is currently unused. The clock speeds are in the format of
[memory_clock in MHz, graphics_clock in MHz].
"""
GPU_DEFAULTS = {
NVIDIA_TESLA_K80: {
'base_clock': [2505, 562],
'max_clock': [2505, 875],
'autoboost_enabled': True,
},
NVIDIA_TESLA_P4: {
'base_clock': [3003, 885],
'max_clock': [3003, 1531],
'autoboost_enabled': None,
},
NVIDIA_TESLA_P100: {
'base_clock': [715, 1189],
'max_clock': [715, 1328],
'autoboost_enabled': None,
},
NVIDIA_TESLA_V100: {
'base_clock': [877, 1312],
'max_clock': [877, 1530],
'autoboost_enabled': None,
},
NVIDIA_TESLA_T4: {
'base_clock': [5001, 585],
'max_clock': [5001, 1590],
'autoboost_enabled': None,
},
NVIDIA_TESLA_A100: {
'base_clock': [1215, 1410],
'max_clock': [1215, 1410],
'autoboost_enabled': None,
},
}
EXTRACT_CLOCK_SPEEDS_REGEX = r'(\d*).*,\s*(\d*)'
flag_util.DEFINE_integerlist('gpu_clock_speeds',
None,
'desired gpu clock speeds in the form '
'[memory clock, graphics clock]')
flags.DEFINE_boolean('gpu_autoboost_enabled', None,
'whether gpu autoboost is enabled')
flags.DEFINE_string('nvidia_driver_version', '450.80.02',
'The version of nvidia driver to install. '
'For example, "418.67" or "418.87.01."')
flags.DEFINE_boolean('nvidia_driver_force_install', False,
'Whether to install NVIDIA driver, even if it is already '
'installed.')
flags.DEFINE_string('nvidia_driver_x_library_path', '/usr/lib',
'X library path for nvidia driver installation')
flags.DEFINE_string('nvidia_driver_x_module_path', '/usr/lib/xorg/modules',
'X module path for nvidia driver installation')
flags.DEFINE_boolean('nvidia_driver_persistence_mode', None,
'whether to enable persistence mode on the NVIDIA GPU')
FLAGS = flags.FLAGS
class UnsupportedClockSpeedError(Exception):
pass
class NvidiaSmiParseOutputError(Exception):
pass
class HeterogeneousGpuTypesError(Exception):
pass
class UnsupportedGpuTypeError(Exception):
pass
def CheckNvidiaGpuExists(vm):
"""Returns whether NVIDIA GPU exists or not on the vm.
Args:
vm: The virtual machine to check.
Returns:
True or False depending on whether NVIDIA GPU exists.
"""
# PKB only supports NVIDIA driver on DEBIAN for now.
if vm.BASE_OS_TYPE != os_types.DEBIAN:
return False
vm.Install('pciutils')
output, _ = vm.RemoteCommand('sudo lspci', should_log=True)
regex = re.compile(r'3D controller: NVIDIA Corporation')
return regex.search(output) is not None
def CheckNvidiaSmiExists(vm):
"""Returns whether nvidia-smi is installed or not on a VM.
Args:
vm: The virtual to check.
Returns:
True or False depending on whether nvidia-smi command exists.
"""
# PKB only supports NVIDIA driver on DEBIAN for now.
if vm.BASE_OS_TYPE != os_types.DEBIAN:
return False
resp, _ = vm.RemoteHostCommand('command -v nvidia-smi',
ignore_failure=True,
suppress_warning=True)
return bool(resp.rstrip())
def GetDriverVersion(vm):
"""Returns the NVIDIA driver version as a string.
Args:
vm: Virtual machine to query.
Returns:
String containing NVIDIA driver version installed.
Raises:
NvidiaSmiParseOutputError: If nvidia-smi output cannot be parsed.
"""
stdout, _ = vm.RemoteCommand('nvidia-smi', should_log=True)
regex = r'Driver Version\:\s+(\S+)'
match = re.search(regex, stdout)
if match:
return str(match.group(1))
raise NvidiaSmiParseOutputError('Unable to parse driver version from {}'
.format(stdout))
def GetGpuType(vm):
"""Return the type of NVIDIA gpu(s) installed on the vm.
Args:
vm: Virtual machine to query.
Returns:
Type of gpus installed on the vm as a string.
Raises:
NvidiaSmiParseOutputError: If nvidia-smi output cannot be parsed.
HeterogeneousGpuTypesError: If more than one gpu type is detected.
UnsupportedClockSpeedError: If gpu type is not supported.
Example:
If 'nvidia-smi -L' returns:
GPU 0: Tesla V100-SXM2-16GB (UUID: GPU-1a046bb9-e456-45d3-5a35-52da392d09a5)
GPU 1: Tesla V100-SXM2-16GB (UUID: GPU-56cf4732-054c-4e40-9680-0ec27e97d21c)
GPU 2: Tesla V100-SXM2-16GB (UUID: GPU-4c7685ad-4b3a-8adc-ce20-f3a945127a8a)
GPU 3: Tesla V100-SXM2-16GB (UUID: GPU-0b034e63-22be-454b-b395-382e2d324728)
GPU 4: Tesla V100-SXM2-16GB (UUID: GPU-b0861159-4727-ef2f-ff66-73a765f4ecb6)
GPU 5: Tesla V100-SXM2-16GB (UUID: GPU-16ccaf51-1d1f-babe-9f3d-377e900bf37e)
GPU 6: Tesla V100-SXM2-16GB (UUID: GPU-6eba1fa6-de10-80e9-ec5f-4b8beeff7e12)
GPU 7: Tesla V100-SXM2-16GB (UUID: GPU-cba5a243-219c-df12-013e-1dbc98a8b0de)
GetGpuType() will return:
['V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB',
'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB']
"""
stdout, _ = vm.RemoteCommand('nvidia-smi -L', should_log=True)
try:
gpu_types = []
for line in stdout.splitlines():
if not line:
continue
splitted = line.split()
if splitted[2] == 'Tesla':
gpu_types.append(splitted[3])
else:
gpu_types.append(splitted[2])
except:
raise NvidiaSmiParseOutputError('Unable to parse gpu type from {}'
.format(stdout))
if any(gpu_type != gpu_types[0] for gpu_type in gpu_types):
raise HeterogeneousGpuTypesError(
'PKB only supports one type of gpu per VM')
if 'K80' in gpu_types[0]:
return NVIDIA_TESLA_K80
if 'P4' in gpu_types[0]:
return NVIDIA_TESLA_P4
if 'P100' in gpu_types[0]:
return NVIDIA_TESLA_P100
if 'V100' in gpu_types[0]:
return NVIDIA_TESLA_V100
if 'T4' in gpu_types[0]:
return NVIDIA_TESLA_T4
if 'A100' in gpu_types[0]:
return NVIDIA_TESLA_A100
raise UnsupportedClockSpeedError(
'Gpu type {0} is not supported by PKB'.format(gpu_types[0]))
def QueryNumberOfGpus(vm):
"""Returns the number of NVIDIA GPUs on the system.
Args:
vm: Virtual machine to query.
Returns:
Integer indicating the number of NVIDIA GPUs present on the vm.
"""
stdout, _ = vm.RemoteCommand('sudo nvidia-smi --query-gpu=count --id=0 '
'--format=csv', should_log=True)
return int(stdout.split()[1])
def GetPeerToPeerTopology(vm):
"""Returns a string specifying which GPUs can access each other via p2p.
Args:
vm: Virtual machine to operate on.
Example:
If p2p topology from nvidia-smi topo -p2p r looks like this:
0 1 2 3
0 X OK NS NS
1 OK X NS NS
2 NS NS X OK
3 NS NS OK X
GetTopology will return 'Y Y N N;Y Y N N;N N Y Y;N N Y Y'
"""
stdout, _ = vm.RemoteCommand('nvidia-smi topo -p2p r', should_log=True)
lines = [line.split() for line in stdout.splitlines()]
num_gpus = len(lines[0])
results = []
for idx, line in enumerate(lines[1:]):
if idx >= num_gpus:
break
results.append(' '.join(line[1:]))
# Delimit each GPU result with semicolons,
# and simplify the result character set to 'Y' and 'N'.
return (';'.join(results)
.replace('X', 'Y') # replace X (self) with Y
.replace('OK', 'Y') # replace OK with Y
.replace('NS', 'N')) # replace NS (not supported) with N
def SetAndConfirmGpuClocks(vm):
"""Sets and confirms the GPU clock speed and autoboost policy.
The clock values are provided either by the gpu_pcie_bandwidth_clock_speeds
flags, or from gpu-specific defaults. If a device is queried and its
clock speed does not align with what it was just set to, an exception will
be raised.
Args:
vm: The virtual machine to operate on.
Raises:
UnsupportedClockSpeedError: If a GPU did not accept the
provided clock speeds.
"""
gpu_type = GetGpuType(vm)
gpu_clock_speeds = GPU_DEFAULTS[gpu_type]['base_clock']
autoboost_enabled = GPU_DEFAULTS[gpu_type]['autoboost_enabled']
if FLAGS.gpu_clock_speeds is not None:
gpu_clock_speeds = FLAGS.gpu_clock_speeds
if FLAGS.gpu_autoboost_enabled is not None:
autoboost_enabled = FLAGS.gpu_autoboost_enabled
desired_memory_clock = gpu_clock_speeds[0]
desired_graphics_clock = gpu_clock_speeds[1]
EnablePersistenceMode(vm)
SetGpuClockSpeed(vm, desired_memory_clock, desired_graphics_clock)
SetAutoboostDefaultPolicy(vm, autoboost_enabled)
num_gpus = QueryNumberOfGpus(vm)
for i in range(num_gpus):
if QueryGpuClockSpeed(vm, i) != (desired_memory_clock,
desired_graphics_clock):
raise UnsupportedClockSpeedError(
'Unrecoverable error setting GPU #{} clock speed to {},{}'.format(
i, desired_memory_clock, desired_graphics_clock))
def SetGpuClockSpeed(vm, memory_clock_speed, graphics_clock_speed):
"""Sets autoboost and memory and graphics clocks to the specified frequency.
Args:
vm: Virtual machine to operate on.
memory_clock_speed: Desired speed of the memory clock, in MHz.
graphics_clock_speed: Desired speed of the graphics clock, in MHz.
"""
num_gpus = QueryNumberOfGpus(vm)
for device_id in range(num_gpus):
current_clock_speeds = QueryGpuClockSpeed(vm, device_id)
if current_clock_speeds != (memory_clock_speed, graphics_clock_speed):
vm.RemoteCommand('sudo nvidia-smi -ac {},{} --id={}'.format(
memory_clock_speed,
graphics_clock_speed,
device_id
))
def QueryGpuClockSpeed(vm, device_id):
"""Returns the value of the memory and graphics clock.
All clock values are in MHz.
Args:
vm: Virtual machine to operate on.
device_id: Id of GPU device to query.
Returns:
Tuple of clock speeds in MHz in the form (memory clock, graphics clock).
"""
query = ('sudo nvidia-smi --query-gpu=clocks.applications.memory,'
'clocks.applications.graphics --format=csv --id={0}'
.format(device_id))
stdout, _ = vm.RemoteCommand(query, should_log=True)
clock_speeds = stdout.splitlines()[1]
matches = regex_util.ExtractAllMatches(EXTRACT_CLOCK_SPEEDS_REGEX,
clock_speeds)[0]
return (int(matches[0]), int(matches[1]))
def EnablePersistenceMode(vm):
"""Enables persistence mode on the NVIDIA driver.
Args:
vm: Virtual machine to operate on.
"""
vm.RemoteCommand('sudo nvidia-smi -pm 1')
def SetAutoboostDefaultPolicy(vm, autoboost_enabled):
"""Sets the autoboost policy to the specified value.
For each GPU on the VM, this function will set the autoboost policy
to the value specified by autoboost_enabled.
Args:
vm: Virtual machine to operate on.
autoboost_enabled: Bool or None. Value (if any) to set autoboost policy to
"""
if autoboost_enabled is None:
return
num_gpus = QueryNumberOfGpus(vm)
for device_id in range(num_gpus):
current_state = QueryAutoboostPolicy(vm, device_id)
if current_state['autoboost_default'] != autoboost_enabled:
vm.RemoteCommand('sudo nvidia-smi --auto-boost-default={0} --id={1}'
.format(1 if autoboost_enabled else 0, device_id))
def QueryAutoboostPolicy(vm, device_id):
"""Returns the state of autoboost and autoboost_default.
Args:
vm: Virtual machine to operate on.
device_id: Id of GPU device to query.
Returns:
Dict containing values for autoboost and autoboost_default.
Values can be True (autoboost on), False (autoboost off),
and None (autoboost not supported).
Raises:
NvidiaSmiParseOutputError: If output from nvidia-smi can not be parsed.
"""
autoboost_regex = r'Auto Boost\s*:\s*(\S+)'
autoboost_default_regex = r'Auto Boost Default\s*:\s*(\S+)'
query = 'sudo nvidia-smi -q -d CLOCK --id={0}'.format(device_id)
stdout, _ = vm.RemoteCommand(query, should_log=True)
autoboost_match = re.search(autoboost_regex, stdout)
autoboost_default_match = re.search(autoboost_default_regex, stdout)
nvidia_smi_output_string_to_value = {
'On': True,
'Off': False,
'N/A': None,
}
if (autoboost_match is None) or (autoboost_default_match is None):
raise NvidiaSmiParseOutputError('Unable to parse Auto Boost policy from {}'
.format(stdout))
return {
'autoboost': nvidia_smi_output_string_to_value[
autoboost_match.group(1)],
'autoboost_default': nvidia_smi_output_string_to_value[
autoboost_default_match.group(1)]
}
def GetMetadata(vm):
"""Returns gpu-specific metadata as a dict.
Args:
vm: Virtual machine to operate on.
Returns:
A dict of gpu-specific metadata.
"""
clock_speeds = QueryGpuClockSpeed(vm, 0)
autoboost_policy = QueryAutoboostPolicy(vm, 0)
return {
'gpu_memory_clock': clock_speeds[0],
'gpu_graphics_clock': clock_speeds[1],
'gpu_autoboost': autoboost_policy['autoboost'],
'gpu_autoboost_default': autoboost_policy['autoboost_default'],
'nvidia_driver_version': GetDriverVersion(vm),
'gpu_type': GetGpuType(vm),
'num_gpus': QueryNumberOfGpus(vm),
'peer_to_peer_gpu_topology': GetPeerToPeerTopology(vm),
}
def DoPostInstallActions(vm):
"""Perform post NVIDIA driver install action on the vm.
Args:
vm: The virtual machine to operate on.
"""
SetAndConfirmGpuClocks(vm)
def Install(vm):
"""Install NVIDIA GPU driver on the vm.
Args:
vm: The virtual machine to install NVIDIA driver on.
"""
version_to_install = FLAGS.nvidia_driver_version
if not version_to_install:
logging.info('--nvidia_driver_version unset. Not installing.')
return
elif not FLAGS.nvidia_driver_force_install and CheckNvidiaSmiExists(vm):
logging.warn('NVIDIA drivers already detected. Not installing.')
return
location = ('{base}/{version}/NVIDIA-Linux-x86_64-{version}.run'
.format(base=NVIDIA_DRIVER_LOCATION_BASE,
version=version_to_install))
vm.Install('wget')
tokens = re.split('/', location)
filename = tokens[-1]
vm.RemoteCommand('wget {location} && chmod 755 {filename} '
.format(location=location, filename=filename),
should_log=True)
vm.RemoteCommand('sudo ./{filename} -q -x-module-path={x_module_path} '
'--ui=none -x-library-path={x_library_path} '
'--no-install-compat32-libs'
.format(filename=filename,
x_module_path=FLAGS.nvidia_driver_x_module_path,
x_library_path=FLAGS.nvidia_driver_x_library_path),
should_log=True)
if FLAGS.nvidia_driver_persistence_mode:
EnablePersistenceMode(vm)
|
python
|
# Code adapted from https://github.com/araffin/learning-to-drive-in-5-minutes/
# Author: Sheelabhadra Dey
import argparse
import os
import time
from collections import OrderedDict
from pprint import pprint
import numpy as np
import yaml
from stable_baselines.common import set_global_seeds
from stable_baselines.common.vec_env import VecFrameStack, VecNormalize, DummyVecEnv
from stable_baselines.ddpg import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.ppo2.ppo2 import constfn
from config import MIN_THROTTLE, MAX_THROTTLE, FRAME_SKIP,\
SIM_PARAMS, N_COMMAND_HISTORY, BASE_ENV, ENV_ID, MAX_STEERING_DIFF
from utils.utils import make_env, ALGOS, linear_schedule, get_latest_run_id, load_vae, create_callback
from environment.env import Env
from environment.carla.client import make_carla_client, CarlaClient
from environment.carla.tcp import TCPConnectionError
parser = argparse.ArgumentParser()
parser.add_argument('-tb', '--tensorboard-log', help='Tensorboard log dir', default='', type=str)
parser.add_argument('-i', '--trained-agent', help='Path to a pretrained agent to continue training',
default='', type=str)
parser.add_argument('--algo', help='RL Algorithm', default='sac',
type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument('-n', '--n-timesteps', help='Overwrite the number of timesteps', default=-1,
type=int)
parser.add_argument('--log-interval', help='Override log interval (default: -1, no change)', default=-1,
type=int)
parser.add_argument('-f', '--log-folder', help='Log folder', type=str, default='logs')
parser.add_argument('-vae', '--vae-path', help='Path to saved VAE', type=str, default='')
parser.add_argument('--save-vae', action='store_true', default=False,
help='Save VAE')
parser.add_argument('--seed', help='Random generator seed', type=int, default=0)
args = parser.parse_args()
set_global_seeds(args.seed)
tensorboard_log = None if args.tensorboard_log == '' else args.tensorboard_log + '/' + ENV_ID
print("=" * 10, ENV_ID, args.algo, "=" * 10)
vae = None
if args.vae_path != '':
print("Loading VAE ...")
vae = load_vae(args.vae_path)
# Load hyperparameters from yaml file
with open('hyperparams/{}.yml'.format(args.algo), 'r') as f:
hyperparams = yaml.load(f)[BASE_ENV]
# Sort hyperparams that will be saved
saved_hyperparams = OrderedDict([(key, hyperparams[key]) for key in sorted(hyperparams.keys())])
# save vae path
saved_hyperparams['vae_path'] = args.vae_path
if vae is not None:
saved_hyperparams['z_size'] = vae.z_size
# Save simulation params
for key in SIM_PARAMS:
saved_hyperparams[key] = eval(key)
pprint(saved_hyperparams)
# Compute and create log path
log_path = os.path.join(args.log_folder, args.algo)
save_path = os.path.join(log_path, "{}_{}".format(ENV_ID, get_latest_run_id(log_path, ENV_ID) + 1))
params_path = os.path.join(save_path, ENV_ID)
os.makedirs(params_path, exist_ok=True)
# Create learning rate schedules for ppo2 and sac
if args.algo in ["ppo2", "sac"]:
for key in ['learning_rate', 'cliprange']:
if key not in hyperparams:
continue
if isinstance(hyperparams[key], str):
schedule, initial_value = hyperparams[key].split('_')
initial_value = float(initial_value)
hyperparams[key] = linear_schedule(initial_value)
elif isinstance(hyperparams[key], float):
hyperparams[key] = constfn(hyperparams[key])
else:
raise ValueError('Invalid valid for {}: {}'.format(key, hyperparams[key]))
if args.n_timesteps > 0:
n_timesteps = args.n_timesteps
else:
n_timesteps = int(hyperparams['n_timesteps'])
del hyperparams['n_timesteps']
with make_carla_client('localhost', 2000) as client:
print("CarlaClient connected")
env = DummyVecEnv([make_env(client, args.seed, vae=vae)])
# Optional Frame-stacking
n_stack = 1
if hyperparams.get('frame_stack', False):
n_stack = hyperparams['frame_stack']
env = VecFrameStack(env, n_stack)
print("Stacking {} frames".format(n_stack))
del hyperparams['frame_stack']
# Parse noise string for DDPG
if args.algo == 'ddpg' and hyperparams.get('noise_type') is not None:
noise_type = hyperparams['noise_type'].strip()
noise_std = hyperparams['noise_std']
n_actions = env.action_space.shape[0]
if 'adaptive-param' in noise_type:
hyperparams['param_noise'] = AdaptiveParamNoiseSpec(initial_stddev=noise_std,
desired_action_stddev=noise_std)
elif 'normal' in noise_type:
hyperparams['action_noise'] = NormalActionNoise(mean=np.zeros(n_actions),
sigma=noise_std * np.ones(n_actions))
elif 'ornstein-uhlenbeck' in noise_type:
hyperparams['action_noise'] = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions),
sigma=noise_std * np.ones(n_actions))
else:
raise RuntimeError('Unknown noise type "{}"'.format(noise_type))
print("Applying {} noise with std {}".format(noise_type, noise_std))
del hyperparams['noise_type']
del hyperparams['noise_std']
# Train an agent from scratch
model = ALGOS[args.algo](env=env, tensorboard_log=tensorboard_log, verbose=1, **hyperparams)
kwargs = {}
if args.log_interval > -1:
kwargs = {'log_interval': args.log_interval}
if args.algo == 'sac':
kwargs.update({'callback': create_callback(args.algo,
os.path.join(save_path, ENV_ID + "_best"),
verbose=1)})
model.learn(n_timesteps, **kwargs)
# Save trained model
model.save(os.path.join(save_path, ENV_ID))
# Save hyperparams
with open(os.path.join(params_path, 'config.yml'), 'w') as f:
yaml.dump(saved_hyperparams, f)
if args.save_vae and vae is not None:
print("Saving VAE")
vae.save(os.path.join(params_path, 'vae'))
|
python
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import collections
import fnmatch
import itertools
import pickle
import re
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Sequence
from ..core import _imperative_rt
from ..core._imperative_rt import ComputingGraph, SerializationMetadata
from ..core._trace_option import set_symbolic_shape as _set_symbolic_shape
from ..core.tensor import megbrain_graph as G
from ..logger import get_logger
from .comp_graph_tools import get_dep_vars, get_opr_type, get_oprs_seq
from .network_node import (
ConstOpBase,
Host2DeviceCopy,
ImmutableTensor,
NetworkNode,
OpNode,
VarNode,
str_to_mge_class,
)
logger = get_logger(__name__)
class Network:
def __init__(self):
self.input_vars = [] # input var of graph
self._orig_inputs = []
self.output_vars = [] # output var of graph
self._orig_outputs = []
self.all_oprs_map = OrderedDict() # _imperative_rt.graph.VarNode.id: VarNode
self.all_vars_map = (
OrderedDict()
) # _imperative_rt.graph.OperatorNode.id: OpNode
self.graph = ComputingGraph()
self._metadata = None
@property
def metadata(self):
r"""Load metadata as a dict."""
if not self._metadata.is_valid:
logger.info("metadata is not valid!")
return None
ret = dict()
try:
user_info = pickle.loads(self._metadata.user_info)
except: # pylint: disable=bare-except
logger.warning(
"can't parse user info by pickle, so return the original bytes object!"
)
user_info = self._metadata.user_info
ret["user_info"] = user_info
ret["graph_modified"] = self._metadata.graph_modified
ret["optimized_for_inference"] = self._metadata.optimized_for_inference
if ret["optimized_for_inference"]:
ret.update(G.deserialize_infer_option(self._metadata.optimize_options))
return ret
@classmethod
def load(cls, model_path: str, outspec: List[str] = None):
r"""Loads a computing graph as a Network object.
Args:
model_path: file path of mge model.
outspec: only load the subgraph with outspec as its endpoints.
"""
self = cls()
ret = G.load_graph(model_path)
outputs, self._metadata = ret.output_vars_list, ret.metadata
if outspec is not None:
output_spec = outspec.copy()
all_vars = get_dep_vars(outputs) + outputs
new_outputs = {}
for i in all_vars:
if i.name in output_spec:
new_outputs[i.name] = i
output_spec.remove(i.name)
assert len(output_spec) == 0, "Can not find {} in this model".format(
output_spec
)
outputs = [new_outputs[i] for i in outspec]
self._orig_outputs = outputs
for x in self._orig_outputs:
self.output_vars.append(self._get_var(x))
self.add_dep_oprs()
for x in self._orig_inputs:
self.input_vars.append(self._get_var(x))
self.graph = self._orig_outputs[0].graph
return self
def _compile(self):
self.all_oprs_map = {}
self.all_vars_map = {}
for opr in self.all_oprs:
if isinstance(opr, (ConstOpBase, Host2DeviceCopy)):
opr.compile(self.graph)
else:
opr.compile()
if opr.name is not None:
opr._opr.name = opr.name
self.all_oprs_map[opr._opr.id] = opr
for o in opr.outputs:
self.all_vars_map[o.var.id] = o
def optimize_for_inference(self, dest_vars, **kwargs):
r"""Applies optimize_for_inference pass for operator graph.
Args:
dest_vars: list of output vars in the operator graph
Keyword Arguments:
* enable_io16xc32 --
whether to use float16 for I/O between oprs and use
float32 as internal computation precision. Note the output var would be
changed to float16.
* enable_ioc16 --
whether to use float16 for both I/O and computation
precision.
* enable_hwcd4 --
whether to use NHWCD4 data layout. This is faster on some
OpenCL backend.
* enable_nchw88 --
whether to use NCHW88 data layout, currently
used in X86 AVX backend.
* enable_nchw44 --
whether to use NCHW44 data layout, currently
used in arm backend.
* enable_nchw44_dot --
whether to use NCHW44_dot data layout, currently
used in armv8.2+dotprod backend.
* enable_nchw4 --
whether to use NCHW4 data layout, currently
used in nvidia backend(based on cudnn).
* enable_nchw32 --
whether to use NCHW32 data layout, currently
used in nvidia backend with tensorcore(based on cudnn).
* enable_chwn4 --
whether to use CHWN4 data layout, currently
used in nvidia backend with tensorcore.
* enable_nchw64 --
whether to use NCHW64 data layout, used for fast int4
support on Nvidia GPU.
* enable_fuse_conv_bias_nonlinearity: whether to fuse conv+bias+nonlinearty
into one opr.
* enable_fuse_conv_bias_with_z: whether to fuse conv_bias with z
input for inference on nvidia backend(this optimization pass will
result in mismatch of the precision of output of training and
inference)
"""
if not isinstance(dest_vars, Sequence):
dest_vars = [dest_vars]
dest_vars = list(G.VarNode(var.var) for var in dest_vars)
new_vars = G.optimize_for_inference(dest_vars, **kwargs)
return list(self._get_var(var) for var in new_vars)
def dump(
self,
file,
*,
keep_var_name: int = 1,
keep_opr_name: bool = False,
keep_param_name: bool = False,
keep_opr_priority: bool = False,
strip_info_file=None,
append_json=False,
optimize_for_inference=True,
append=False,
user_info: Any = None,
enable_metadata=True,
**kwargs
):
r"""Serializes graph to file.
Args:
file: output file, could be file object or filename.
append: whether output is appended to ``file``.
Only works when ``file`` is str.
keep_var_name: level for keeping variable names:
* 0: none of the names are kept
* 1: (default)keep names of output vars
* 2: keep names of all (output and internal) vars
keep_opr_name: whether to keep operator names.
keep_param_name: whether to keep param names, so param values can be
easily manipulated after loading model
keep_opr_priority: whether to keep priority setting for operators
strip_info_file: a string for path or a file handler. if is not None,
then the dump information for code strip would be written to ``strip_info_file``
append_json: will be check when `strip_info_file` is not None. if set
true, the information for code strip will be append to strip_info_file.
if set false, will rewrite strip_info_file
optimize_for_inference: enbale optmizations,
will skip all optimize options if this is False. Default: True
user_info: any type object, which will be pickled to bytes.
enable_metadata: whether to save metadata into output file.
See more detials in :meth:`~.trace.dump`.
"""
def _set_var_name(var):
graph_var = G.VarNode(var.var)
graph_var.name = var.name
return graph_var
self._compile()
out = list(map(_set_var_name, self.output_vars))
if kwargs.pop("arg_names", False):
logger.warning(
'"arg_names" is not supported in Network.dump, rename input vars directly'
)
if kwargs.pop("output_names", False):
logger.warning(
'"output_names" is not supported in Network.dump, rename output vars directly'
)
if optimize_for_inference:
out, optimize_options = G.optimize_for_inference(out, **kwargs)
metadata = SerializationMetadata()
if enable_metadata:
metadata.is_valid = True
metadata.graph_modified = True
metadata.user_info = pickle.dumps(user_info)
if optimize_for_inference:
metadata.optimize_options = optimize_options
G.set_priority_to_id([o._node if isinstance(o, G.VarNode) else o for o in out])
dump_content, _ = G.dump_graph(
out,
keep_var_name=keep_var_name,
keep_opr_name=keep_opr_name,
keep_param_name=keep_param_name,
keep_opr_priority=keep_opr_priority,
strip_info_file=strip_info_file,
append_json=append_json,
metadata=metadata,
)
if isinstance(file, str):
permission = "wb" if append == False else "ab"
file = open(file, permission)
file.write(dump_content)
def make_const(self, data, name=None, device=None):
r"""Makes an ImmutableTensor OpNode to provide a parameter for the network."""
node = ImmutableTensor(data, name, device, self.graph)
node.compile(self.graph)
return node.outputs[0]
def make_input_node(self, shape, dtype, name=None, device=None):
r"""Makes a Host2DeviceCopy OpNode to provide an input varnode for the network."""
node = Host2DeviceCopy(shape, dtype, name, device)
node.compile(self.graph)
return node.outputs[0]
def add_output(self, *vars: VarNode):
r"""Adds vars into the network output node list"""
if not all([var.owner for var in vars]):
self.add_dep_oprs(*vars)
for var in vars:
# use method 'is' instead of 'in' to avoid
# compare VarNode use elemwise equal
if not any(var is _ for _ in self.output_vars):
self.output_vars.append(var)
def remove_output(self, *vars: VarNode):
r"""Removes vars from the network output node list"""
for var in vars:
# use list pop instead of remove to avoid
# compare VarNode use elemwise equal
for idx, out_var in enumerate(self.output_vars):
if var is out_var:
self.output_vars.pop(idx)
def add_dep_oprs(self, *vars):
if len(vars) == 0:
vars = self.output_vars
assert all(isinstance(var, VarNode) for var in vars), "Only support add VarNode"
q = list(vars)
while len(q) > 0:
cur = q.pop(0)
if cur.owner is not None:
continue
if cur.name is None:
cur.name = cur.var.name
self.all_vars_map[cur.var.id] = cur
mge_opr = cur.var.owner
if get_opr_type(mge_opr) == "Host2DeviceCopy":
self._orig_inputs.extend(mge_opr.outputs)
cur.owner = self._add_opr(mge_opr)
if cur.owner is None:
cur.owner = self.all_oprs_map[mge_opr.id]
continue
q.extend(cur.owner.inputs)
return list(vars)
def modify_opr_names(self, modifier):
r"""Modifies names of operators **inplace**; useful for merging loaded
network into another network
Args:
modifier(str or callable): a string to be prepended to the name, or a function
that maps from name to name
"""
if isinstance(modifier, str):
om = modifier
modifier = lambda v: "{}.{}".format(om, v)
assert isinstance(modifier, collections.Callable)
for i in self.all_oprs:
v0 = i.name
v1 = modifier(v0)
assert isinstance(v1, str)
i.name = v1
def reset_batch_size(self, batchsize, *, blacklist=()):
r"""Helper for reset batch size; first dimension of all data providers
not in blacklist are assumed to be the batch size
Args:
blacklist: data provider names whose first dimension is not
batchbatch size
"""
blacklist = set(blacklist)
prev_batchsize = None
for i in self.data_providers_filter:
if i.name in blacklist:
blacklist.remove(i.name)
else:
shp = list(i.shape)
if prev_batchsize is None:
prev_batchsize = shp[0]
else:
assert prev_batchsize == shp[0], (
"batchsize mismatch: batchsize={} "
"shape={} dp={}".format(prev_batchsize, shp, i.name)
)
shp[0] = batchsize
i.shape = tuple(shp)
self._compile()
assert prev_batchsize is not None, "no data provider found"
assert not blacklist, "unused items in blacklist: {}".format(blacklist)
def replace_vars(self, repl_dict: Dict[VarNode, VarNode]):
r"""Replaces vars in the graph.
Args:
repl_dict: the map {old_var: new_var} that specifies how to replace the vars.
"""
if not all([var.owner for var in repl_dict.values()]):
self.add_dep_oprs(*list(repl_dict.values()))
for var in self.all_vars:
if var in repl_dict:
repl_var = repl_dict[var]
if repl_var is var:
continue
for opnode in var.users:
# use method 'is' instead of 'in' to avoid
# compare VarNode use elemwise equal
assert any([var is _ for _ in opnode.inputs])
opnode.inputs = [repl_var if var is i else i for i in opnode.inputs]
if opnode not in repl_var.users:
repl_var.users.append(opnode)
var.users.clear()
self._compile()
def replace_oprs(self, repl_dict: Dict[OpNode, OpNode]):
r"""Replaces operators in the graph.
Args:
repl_dict: the map {old_opr: new_opr} that specifies how to replace the operators.
"""
for opr in self.all_oprs:
if opr in repl_dict:
assert len(opr.outputs) == len(
repl_dict[opr].outputs
), "can not replace {} with {}".format(type(opr), type(repl_dict[opr]))
for ind, var in enumerate(opr.outputs):
var.owner = repl_dict[opr]
var.__dict__.update(repl_dict[opr].outputs[ind].__dict__)
var.var = repl_dict[opr].outputs[ind].var
repl_dict[opr].outputs = opr.outputs
self._compile()
def get_opr_by_type(self, oprcls, unique=True):
assert issubclass(oprcls, OpNode)
rst = self.opr_filter.type(oprcls).as_list()
if unique:
assert len(rst) == 1, "{} operators of type {} found".format(
len(rst), oprcls
)
(rst,) = rst
return rst
def get_opr_by_name(self, name, unique=True):
rst = self.opr_filter.name(name).as_list()
if unique:
assert len(rst) == 1, "{} operators of type {} found".format(len(rst), name)
(rst,) = rst
return rst
def get_var_by_name(self, name, unique=True):
rst = self.var_filter.name(name).as_list()
if unique:
assert len(rst) == 1, "{} operators of type {} found".format(len(rst), name)
(rst,) = rst
return rst
def get_var_receive_oprs(self, var):
r"""Gets all oprs which use var as input"""
return self.opr_filter.has_input(var).as_list()
def get_dep_oprs(self, var):
r"""Gets dependent oprs of var"""
return get_oprs_seq(var, False, False)
@property
def opr_filter(self):
r"""Filter on all opnodes of the Network."""
oprs = self.all_oprs
return NodeFilter(itertools.islice(oprs, len(oprs)))
@property
def var_filter(self):
r"""Filter on all varnode of the Network."""
vars = self.all_vars
return NodeFilter(itertools.islice(vars, len(vars)))
@property
def params_filter(self): # all immutable tensor
r"""Filter on all parameters (ImmutableTensor Opr) of the Network"""
return self.opr_filter.param_provider()
@property
def data_providers_filter(self): # all host2devicecopy
r"""Filter on all input nodes (Host2DeviceCopy Opr) of the Network"""
return self.opr_filter.data_provider()
@property
def dest_vars(self):
r"""Output varnodes of the Network."""
return self.output_vars
@property
def all_oprs(self):
return get_oprs_seq(self.output_vars, False, False)
@property
def all_vars(self):
return get_dep_vars(self.output_vars)
@property
def all_vars_dict(self):
return self.var_filter.as_dict()
@property
def all_oprs_dict(self):
return self.opr_filter.as_dict()
def _add_opr(self, opr) -> Optional[OpNode]:
r"""Used for loading and building graph."""
assert isinstance(opr, _imperative_rt.graph.OperatorNode)
# TODO: use megbrain C++ RTTI to replace type string
if opr.id not in self.all_oprs_map:
opnode = str_to_mge_class(get_opr_type(opr)).load(opr)
self.all_oprs_map[opr.id] = opnode
for var in opr.inputs:
varnode = self._get_var(var)
opnode.add_inp_var(varnode)
varnode.users.append(opnode)
for var in opr.outputs:
opnode.add_out_var(self._get_var(var))
return opnode
else:
# overwrite the opnode 'new' output VarNode with
# original one when output number larger than 1,
# or will cause dependence issue in _compiler step.
if len(opr.outputs) > 1:
opnode = self.all_oprs_map[opr.id]
for idx, output in enumerate(opnode.outputs):
if output.var.id in self.all_vars_map:
opnode.outputs[idx] = self.all_vars_map[output.var.id]
return None
def _get_opr(self, x):
if x.id in self.all_oprs_map:
return self.all_oprs_map[x.id]
else:
return None
def _get_var(self, x):
r"""Convert :class:`~._imperative_rt.graph.VarNode` to :class:`~.VarNode`."""
assert isinstance(x, _imperative_rt.graph.VarNode)
if x.id not in self.all_vars_map or self.all_vars_map[x.id].var != x:
self.all_vars_map[x.id] = VarNode.load(x, self._get_opr(x.owner))
return self.all_vars_map[x.id]
def set_symbolic_shape(option: bool):
r"""Set the VarNode use symbolic shape or not, return the last status.
Please set to True and must recover after dump if want to change the input batch size.
Args:
option: True for enable symbolic shape.
"""
return _set_symbolic_shape(option)
def as_varnode(obj):
r"""convert a :class:`.VarNode` compatible object to :class:`.VarNode`.
Args:
obj: it must be one of the following:
1. a :class:`.VarNode` object
2. a :class:`.OpNode` object that has unique output
3. an iterable that produces either type 1 or 2, with length 1
"""
if type(obj) is VarNode:
return obj
if isinstance(obj, OpNode):
assert len(obj.outputs) == 1, (
"operator {} must have one output to be converted to VarNode; "
"got {} actually".format(obj, len(obj.outputs))
)
ret = obj.outputs[0]
assert type(ret) is VarNode
return ret
assert isinstance(
obj, collections.Iterable
), "{} is not compatible with VarNode".format(obj)
val = list(obj)
assert (
len(val) == 1
), "can not convert sequence of length {} to VarNode ({})".format(
len(val), (lambda s: s if len(s) < 50 else s[:50] + " ...")(str(val))
)
return as_varnode(val[0])
def as_oprnode(obj):
r"""convert a :class:`.OpNode` compatible object to
:class:`.OpNode`; it works like :func:`as_varnode`.i
"""
if type(obj) is VarNode:
return obj.owner
if isinstance(obj, OpNode):
return obj
assert isinstance(
obj, collections.Iterable
), "{} is not compatible with OpNode".format(obj)
val = list(obj)
assert (
len(val) == 1
), "can not convert sequence of length {} to " "OpNode({})".format(len(val), val)
return as_oprnode(val[0])
class NodeFilter:
r"""Filter on node iterator. This class is an iterator of
:class:`.NetworkNode` objects and multiple filtering conditions and
mappers can be chained.
Example:
.. code-block::
# find all :class:`.ImmutableTensor` nodes
for i in NodeFilter(node_iter).param_provider():
print(i)
# find all :class:`.ImmutableTensor` nodes that end with ':W'
for i in NodeFilter(node_iter).param_provider().name('*:W'):
print(i)
# number of inputs
nr_input = NodeFilter(node_iter).data_provider().as_count()
"""
_iter = None
def __init__(self, node_iter):
"""
:param node_iter: iterator to :class:`.NetworkNode`, or a
:class:`.VarNode`-compatible object; in the later case, its
dependent oprs would be used
"""
if isinstance(node_iter, VarNode):
oprs = get_oprs_seq(node_iter, False, False)
node_iter = itertools.islice(oprs, len(oprs) - 1)
if isinstance(node_iter, OpNode):
oprs = get_oprs_seq(node_iter.inputs, False, False)
node_iter = itertools.islice(oprs, len(oprs) - 1)
assert isinstance(node_iter, collections.Iterable)
if (not isinstance(node_iter, NodeFilter)) and type(
self
) is not NodeFilterCheckType:
node_iter = NodeFilterCheckType(node_iter, NetworkNode)
self._iter = node_iter
@classmethod
def make_all_deps(cls, *dest_vars):
r"""make a :class:`NodeFilter` that contains all deps of given vars"""
return cls(list(get_oprs_seq(dest_vars, False, False)))
def __iter__(self):
r"""to be overwritten by subclass to implement filters"""
return iter(self._iter)
def type(self, node_type):
r"""filter by specific node type
Args:
node_type: node type class
Returns:
a new :class:`NodeFilter` object
"""
return NodeFilterType(self, node_type)
def check_type(self, node_type):
r"""assert that all oprs produced by this iterator are instances of
certain type
Args:
node_type: node type class
Returns:
a new :class:`NodeFilter` object
Raises:
TypeError if type check failed
"""
return NodeFilterCheckType(self, node_type)
def not_type(self, node_type):
r"""remove oprs of specific type
Args:
node_type: node type class
Returns:
a new :class:`NodeFilter` object
"""
return NodeFilterNotType(self, node_type)
def param_provider(self):
r"""get :class:`~.ParamProvider` oprs; shorthand for
``.type(ParamProvider)``
"""
return self.type(ImmutableTensor)
def data_provider(self):
r"""get :class:`.DataProvider` oprs; shorthand for
``.type(DataProvider)``
"""
return self.type(Host2DeviceCopy)
def name(self, pattern, ignorecase=True):
r"""filter by node name
Args:
pattern(class:`str`): a string in glob syntax that can contain ``?`` and
``*`` to match a single or arbitrary characters.
ignorecase(bool, optional): whether to ignroe case
Returns:
a new :class:`NodeFilter` object
"""
return NodeFilterName(self, pattern, ignorecase)
def has_input(self, var):
r"""an opr is kept if it has given var as one of its inputs
Args:
var: var node to checked
Returns:
a new :class:`NodeFilter` object
"""
return NodeFilterHasInput(self, var)
def as_list(self):
r"""consume this iterator and return its content as a list"""
return list(self)
def as_unique(self):
r"""assert that this iterator yields only one node and return it
Returns:
class:`.GraphNodeBase`: the unique node
Raises:
ValueError if this iterator does not yield a unique node
"""
(opr,) = self
return opr
def as_dict(self):
r"""construct an ordered dict to map from node names to objects in
this iterator
"""
return collections.OrderedDict((i.name, i) for i in self)
def as_count(self):
r"""consume this iterator and get the number of elements"""
return sum(1 for _ in self)
class NodeFilterType(NodeFilter):
r"""see :meth:`NodeFilter.type`"""
_node_type = None
def __init__(self, node_iter, node_type):
assert issubclass(node_type, NetworkNode), "bad opr type: {}".format(node_type)
super().__init__(node_iter)
self._node_type = node_type
def __iter__(self):
for i in self._iter:
if isinstance(i, self._node_type):
yield i
class NodeFilterNotType(NodeFilterType):
r"""see :meth:`NodeFilter.not_type`"""
def __iter__(self):
for i in self._iter:
if not isinstance(i, self._node_type):
yield i
class NodeFilterCheckType(NodeFilterType):
r"""see :meth:`NodeFilter.check_type`"""
def __iter__(self):
for i in self._iter:
if not isinstance(i, self._node_type):
raise TypeError(
"all nodes should be {}; got {!r}".format(self._node_type, i)
)
yield i
class NodeFilterHasInput(NodeFilter):
r"""see :meth:`NodeFilter.has_input`"""
_var = None
def __init__(self, node_iter, var):
var = as_varnode(var)
super().__init__(node_iter)
self.var = var
def __iter__(self):
for i in self._iter:
assert isinstance(
i, OpNode
), "has_input() must be used with OpNode; " "got {!r}".format(i)
if any(self.var is _ for _ in i.inputs):
yield i
class NodeFilterName(NodeFilter):
r"""see :meth:`NodeFilter.name`"""
_re = None
def __init__(self, node_iter, pattern, ignorecase):
super().__init__(node_iter)
self.pattern = pattern
self._re = self.make_re(pattern, ignorecase)
@classmethod
def make_re(cls, pattern, ignorecase=True):
assert isinstance(pattern, str), "bad pattern: {!r}".format(pattern)
assert isinstance(ignorecase, bool)
flags = 0
if ignorecase:
flags |= re.IGNORECASE
return re.compile(fnmatch.translate(pattern), flags=flags)
def __iter__(self):
for i in self._iter:
if self.pattern == i.name or self._re.match(i.name):
yield i
|
python
|
from glue.config import DictRegistry
__all__ = ['viewer_registry', 'ViewerRegistry']
class ViewerRegistry(DictRegistry):
"""
Registry containing references to custom viewers.
"""
def __call__(self, name=None):
def decorator(cls):
self.add(name, cls)
return cls
return decorator
def add(self, name, cls):
"""
Add an item to the registry.
Parameters
----------
name : str
The key referencing the associated class in the registry
dictionary.
cls : type
The class definition (not instance) associated with the name given
in the first parameter.
"""
if name in self.members:
raise ValueError(f"Viewer with the name {name} already exists, "
f"please choose a different name.")
else:
self.members[name] = {'cls': cls}
viewer_registry = ViewerRegistry()
|
python
|
# -*- coding: utf-8 -*-
'''
Code List Object
===============
'''
from __future__ import annotations
__all__ = ('CodeList',)
from typing import Tuple
from builder.commands.scode import SCode
from builder.datatypes.builderexception import BuilderError
from builder.utils import assertion
from builder.utils.logger import MyLogger
# logger
LOG = MyLogger.get_logger(__name__)
LOG.set_file_handler()
class CodeList(object):
''' Code list package object.
'''
def __init__(self, *args: SCode):
self._data = tuple(assertion.is_instance(a, SCode) for a in args)
#
# property
#
@property
def data(self) -> Tuple[SCode]:
return self._data
|
python
|
# import logging
from xml.etree import ElementTree as ET
import lxml.etree as LET
from ckeditor.fields import RichTextField
from acdh_tei_pyutils.tei import TeiReader
from curator.models import Upload
# Create your models here.
from django.db import models
from django.utils.timezone import now
from .namespaces import NS, TEI_NS, XML_ID, get_attribute
from .util import clean_id, element2string
from django.template.loader import render_to_string
for ns, uri in NS.items():
ET.register_namespace(ns, uri)
class Witness(models.Model):
"""
Stores the physical tablet information - siglum
Probably unnecessary in the database
"""
witness_id = models.CharField(max_length=100, primary_key=True) # siglum
museum_numbers = models.TextField(blank=True, null=True)
provenance = models.CharField(max_length=100, blank=True, null=True)
script = models.TextField(blank=True, null=True)
state_publication = models.TextField(blank=True, null=True)
state_preservation = models.TextField(blank=True, null=True)
manuscript_type = models.TextField(blank=True, null=True)
tablets_attested = models.TextField(blank=True, null=True)
omens_attested = models.TextField(blank=True, null=True)
cdli_number = models.TextField(blank=True, null=True)
remarks = models.TextField(blank=True, null=True)
ctime = models.DateTimeField(default=now)
def __str__(self):
return f"{self.witness_id}: {self.museum_numbers}"
@property
def xml_id(self):
return clean_id(self.witness_id)
@property
def tei(self):
wit = ET.Element(get_attribute("witness", TEI_NS), {XML_ID: self.xml_id})
idno = ET.SubElement(wit, get_attribute("idno", TEI_NS))
idno.text = self.witness_id
return wit
# @staticmethod
# def corresponding_witness(cls, witness_label):
# """
# returns the corresponding witness object (eg. BM 036389+)
# given the witness label found in the score (eg. BM 36389+.2)
# """
# search_str = witness_label.split("+")[0]
# return Witness.objects.filter(witness_id__startswith=search_str)
class Chapter(models.Model):
"""
Stores the chapter number, name and links to omens
"""
chapter_name = models.CharField(max_length=100, unique=True)
animal = models.CharField(max_length=100, blank=True, null=True)
author = models.CharField(max_length=100, blank=True, null=True)
reviewer = models.CharField(max_length=100, blank=True, null=True)
proofreader = models.CharField(max_length=100, blank=True, null=True)
remarks = models.CharField(max_length=100, blank=True, null=True)
ctime = models.DateTimeField(default=now, blank=True, null=True)
witness = models.ManyToManyField(Witness)
upload = models.ManyToManyField(Upload)
introduction = RichTextField(default="Page under construction", blank=True, null=True)
def __str__(self):
return f"Chapter {self.chapter_name}"
def get_witness_from_omen(self):
witnesses = Witness.objects.filter(omen__in=self.omen_set.all()).distinct()
return witnesses
@property
def full_tei_string(self):
template_name = "omens/tei_templates/chapter.xml"
context = {"object": self}
full_tei_string = render_to_string(template_name, context)
return full_tei_string
class Omen(models.Model):
"""
Individual omen
"""
xml_id = models.CharField(max_length=100, unique=True) # TEI ID
omen_name = models.CharField(max_length=100, primary_key=True) # TEI @n
omen_num = models.CharField(max_length=100) # from sheet name
ctime = models.DateTimeField(default=now)
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE, default="")
witness = models.ManyToManyField(Witness)
upload = models.ManyToManyField(Upload)
tei_content = models.TextField(blank=True, null=True)
@property
def tei(self):
chapter_tei = ET.XML(self.chapter.tei)
omen_tei = chapter_tei.find(f'.//*[@n="{self.omen_name}"]')
if omen_tei:
tei_string = element2string(omen_tei)
return tei_string
return ""
@property
def full_tei_string(self):
template_name = "omens/tei_templates/omen.xml"
context = {"object": self}
full_tei_string = render_to_string(template_name, context)
return full_tei_string
@property
def protasis(self):
return Segment.objects.filter(xml_id=self.xml_id + "_P")[0]
@property
def apodosis(self):
return Segment.objects.filter(xml_id=self.xml_id + "_A")[0]
class Segment(models.Model):
"""
A segment in the omen, either PROTASIS or APODOSIS
"""
xml_id = models.CharField(max_length=100, unique=True) # TEI ID
omen = models.ForeignKey(Omen, on_delete=models.CASCADE)
segment_type = models.CharField(
max_length=9,
choices=(("PROTASIS", "Protasis"), ("APODOSIS", "Apodosis")),
default="PROTASIS",
)
@classmethod
def protasis(cls, omen):
return cls.objects.filter(xml_id=omen.omen_name + "_P")[0]
@classmethod
def apodosis(cls, omen):
return cls.objects.filter(xml_id=omen.omen_name + "_A")[0]
def __str__(self):
return f"Omen {self.omen.omen_name} - {self.segment_type}"
class Lemma(models.Model):
"""
A lemma in the omen, represented using w element inside the score in the TEI
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
lemma_idx = models.IntegerField(
default=0
) # index of the lemma in the in the omen (position of the w element, implicit in the TEI)
omen = models.ForeignKey(Omen, on_delete=models.CASCADE)
segment = models.ForeignKey(Segment, on_delete=models.CASCADE)
def set_segment_type_to_apodosis(self):
# logging.debug("Changing to Apodosis %s", self.omen.apodosis)
self.segment = self.omen.apodosis
self.save()
def __str__(self):
return f"{self.xml_id}_{self.segment}"
class Reconstruction(models.Model):
"""
A reconstruction of the omen, which contains one or more of the following:
- translation
- transcription
- transliteration
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
label = models.CharField(max_length=100)
omen = models.ForeignKey(Omen, on_delete=models.CASCADE, default="")
witness = models.ForeignKey(Witness, on_delete=models.CASCADE, null=True)
@property
def safe_id(self):
return self.xml_id.replace("_", "-").replace(".", "-")
class Translation(models.Model):
"""
Translation of the omen, corresponding to a particular reconstruction
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
reconstruction = models.ForeignKey(
Reconstruction, on_delete=models.CASCADE, default=""
)
segment = models.ForeignKey(Segment, on_delete=models.CASCADE)
translation_txt = models.CharField(max_length=1000, default="")
lang = models.CharField(
max_length=2,
choices=(("en", "ENGLISH"), ("dt", "GERMAN"), ("de", "GERMAN")),
default="en",
)
@property
def safe_id(self):
return f"{self.reconstruction.safe_id}-{self.segment.segment_type}"
def __str__(self):
return f"{self.xml_id} {self.segment}"
# class Word(models.Model):
# """
# Words and word roots from the translation,
# to be linked with the curated SenseTree later
# """
# translation = models.ForeignKey(Translation, on_delete=models.CASCADE)
# # position of the word in the in the translation segment
# word_idx = models.IntegerField(default=0)
# # root form of the word
# word_root = models.CharField(max_length=100, default="")
# sense_tree = models.ForeignKey(SenseTree, on_delete=models.CASCADE)
class Transliteration(models.Model):
"""
A row represents a lemma in a transliteration reconstruction of the omen
Probably unnecessary
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
reconstruction = models.ForeignKey(
Reconstruction, on_delete=models.CASCADE, default=""
)
lemma = models.ForeignKey(Lemma, on_delete=models.CASCADE, default="")
class Transcription(models.Model):
"""
A row represents a lemma in a transcription of the omen
Probably unnecessary
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
reconstruction = models.ForeignKey(
Reconstruction, on_delete=models.CASCADE, default=""
)
lemma = models.ForeignKey(Lemma, on_delete=models.CASCADE, default="")
class Sequence(models.Model):
"""
A row represents a named sequence of omens curated
"""
seq_name = models.CharField(max_length=100, unique=True)
omen = models.ForeignKey(Omen, on_delete=models.CASCADE)
position = models.IntegerField(default=0)
class PhilComment(models.Model):
"""
A row represents a philological comment
"""
omen = models.ForeignKey(Omen, on_delete=models.CASCADE)
comment = RichTextField(blank=True, null=True)
def __str__(self):
if self.comment:
return f"{self.comment[:24]}... (Omen: {self.omen.omen_num})"
@property
def xml_id(self):
return f"phil-comment__{self.id}"
def as_tei_node(self):
if self.comment:
note_node = LET.Element("{http://www.tei-c.org/ns/1.0}note")
note_node.attrib['type'] = "phil-comment"
note_node.attrib["{http://www.w3.org/XML/1998/namespace}id"] = self.xml_id
note_node.text = self.comment
return note_node
return None
def get_parent_node(self):
if self.omen.tei_content:
try:
omen_tei = TeiReader(self.omen.tei_content)
except LET.XMLSyntaxError:
return None
return omen_tei.tree
else:
return None
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
some_div = self.get_parent_node()
if some_div is not None:
phil_note = self.as_tei_node()
xpath = f'//*[@xml:id="{self.xml_id}"]'
for bad in some_div.xpath(xpath):
bad.getparent().remove(bad)
if phil_note is not None:
some_div.insert(0, phil_note)
self.omen.tei_content = ET.tostring(some_div).decode()
self.omen.save()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django_fsm.db.fields.fsmfield
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nickname', models.CharField(max_length=100, null=True, blank=True)),
('nickname_slug', models.CharField(db_index=True, max_length=100, null=True, blank=True)),
('nickname_state', django_fsm.db.fields.fsmfield.FSMField(default=b'unset', max_length=50)),
('wants_reviews', models.BooleanField(default=False, help_text=b"Reviewing other's tasks helps finish transcripts faster.", verbose_name=b"Help review other's tasks.")),
('task_order', models.CharField(default=b'eager', max_length=10, verbose_name=b'Which order would you like to receive tasks?', choices=[(b'eager', b'Give me different kinds of tasks when they are available.'), (b'sequential', b'Give me the same kinds of tasks in sequence.')])),
],
),
migrations.CreateModel(
name='TaskType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=10)),
('description', models.CharField(max_length=200)),
('order', models.PositiveIntegerField(default=0, unique=True)),
],
options={
'ordering': ('order',),
},
),
migrations.AddField(
model_name='profile',
name='task_types',
field=models.ManyToManyField(to='profiles.TaskType', verbose_name=b'Which tasks would you like to help with?'),
),
migrations.AddField(
model_name='profile',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
]
|
python
|
from pycket.interpreter import (
App,
Begin,
Begin0,
BeginForSyntax,
CaseLambda,
Cell,
CellRef,
DefineValues,
If,
Lambda,
Let,
Letrec,
LexicalVar,
Module,
ModuleVar,
LinkletVar,
Quote,
QuoteSyntax,
Require,
SetBang,
ToplevelVar,
VariableReference,
WithContinuationMark,
make_let,
make_letrec,
)
from rpython.rlib.objectmodel import specialize
class ASTVisitor(object):
"""
An abstract visitor class for the AST classes defined below.
A subclass need only define handler functions for the relevant portions
of the AST, as the default implementations in this class pass along the
relevant data.
"""
@specialize.argtype(0)
def visit_cell(self, ast, *args):
assert isinstance(ast, Cell)
expr = ast.expr.visit(self, *args)
return Cell(expr, need_cell_flags=ast.need_cell_flags)
@specialize.argtype(0)
def visit_quote(self, ast, *args):
assert isinstance(ast, Quote)
return ast
@specialize.argtype(0)
def visit_quote_syntax(self, ast, *args):
assert isinstance(ast, QuoteSyntax)
return ast
@specialize.argtype(0)
def visit_variable_reference(self, ast, *args):
assert isinstance(ast, VariableReference)
return ast
@specialize.argtype(0)
def visit_with_continuation_mark(self, ast, *args):
assert isinstance(ast, WithContinuationMark)
key = ast.key.visit(self, *args)
value = ast.value.visit(self, *args)
body = ast.body.visit(self, *args)
return WithContinuationMark(key, value, body)
@specialize.argtype(0)
def visit_app(self, ast, *args):
assert isinstance(ast, App)
rator = ast.rator.visit(self, *args)
rands = [a.visit(self, *args) for a in ast.rands]
return App.make(rator, rands, ast.env_structure)
@specialize.argtype(0)
def visit_begin0(self, ast, *args):
assert isinstance(ast, Begin0)
first = ast.first.visit(self, *args)
body = [b.visit(self, *args) for b in ast.body]
return Begin0.make(first, body)
@specialize.argtype(0)
def visit_begin(self, ast, *args):
assert isinstance(ast, Begin)
body = [b.visit(self, *args) for b in ast.body]
return Begin.make(body)
@specialize.argtype(0)
def visit_begin_for_syntax(self, ast, *args):
assert isinstance(ast, BeginForSyntax)
return ast
@specialize.argtype(0)
def visit_cell_ref(self, ast, *args):
assert isinstance(ast, CellRef)
return ast
@specialize.argtype(0)
def visit_lexical_var(self, ast, *args):
assert isinstance(ast, LexicalVar)
return ast
@specialize.argtype(0)
def visit_module_var(self, ast, *args):
assert isinstance(ast, ModuleVar)
return ast
@specialize.argtype(0)
def visit_linklet_var(self, ast, *args):
assert isinstance(ast, LinkletVar)
return ast
@specialize.argtype(0)
def visit_toplevel_var(self, ast, *args):
assert isinstance(ast, ToplevelVar)
return ast
@specialize.argtype(0)
def visit_set_bang(self, ast, *args):
assert isinstance(ast, SetBang)
var = ast.var.visit(self, *args)
rhs = ast.rhs.visit(self, *args)
return SetBang(var, rhs)
@specialize.argtype(0)
def visit_if(self, ast, *args):
assert isinstance(ast, If)
tst = ast.tst.visit(self, *args)
thn = ast.thn.visit(self, *args)
els = ast.els.visit(self, *args)
return If.make(tst, thn, els)
@specialize.argtype(0)
def visit_case_lambda(self, ast, *args):
assert isinstance(ast, CaseLambda)
lams = [l.visit(self, *args) for l in ast.lams]
return CaseLambda(lams, recursive_sym=ast.recursive_sym, arity=ast._arity)
@specialize.argtype(0)
def visit_lambda(self, ast, *args):
from pycket.interpreter import make_lambda
assert isinstance(ast, Lambda)
body = [b.visit(self, *args) for b in ast.body]
return make_lambda(ast.formals, ast.rest, body, sourceinfo=ast.sourceinfo)
@specialize.argtype(0)
def visit_letrec(self, ast, *args):
assert isinstance(ast, Letrec)
rhss = [r.visit(self, *args) for r in ast.rhss]
body = [b.visit(self, *args) for b in ast.body]
vars = ast._rebuild_args()
return make_letrec(vars, rhss, body)
@specialize.argtype(0)
def visit_let(self, ast, *args):
assert isinstance(ast, Let)
rhss = [r.visit(self, *args) for r in ast.rhss]
body = [b.visit(self, *args) for b in ast.body]
vars = ast._rebuild_args()
return make_let(vars, rhss, body)
@specialize.argtype(0)
def visit_define_values(self, ast, *args):
assert isinstance(ast, DefineValues)
rhs = ast.rhs.visit(self, *args)
return DefineValues(ast.names, rhs, ast.display_names)
@specialize.argtype(0)
def visit_module(self, ast, *args):
""" Must not produce a new module AST """
assert isinstance(ast, Module)
for i, b in enumerate(ast.body):
ast.body[i] = b.visit(self, *args)
for i, r in enumerate(ast.requires):
ast.requires[i] = r.visit(self, *args)
return ast
@specialize.argtype(0)
def visit_require(self, ast, *args):
assert isinstance(ast, Require)
return ast
class CopyVisitor(ASTVisitor):
def visit_variable_reference(self, ast):
assert isinstance(ast, VariableReference)
return VariableReference(ast.var, ast.path, ast.is_mut)
def visit_quote(self, ast):
assert isinstance(ast, Quote)
return Quote(ast.w_val)
def visit_lexical_var(self, ast):
assert isinstance(ast, LexicalVar)
return LexicalVar(ast.sym, ast.env_structure)
def visit_module_var(self, ast):
assert isinstance(ast, ModuleVar)
var = ModuleVar(ast.sym, ast.srcmod, ast.srcsym, ast.path)
var.modenv = ast.modenv
var.w_value = ast.w_value
return var
def visit_cell_ref(self, ast):
assert isinstance(ast, CellRef)
return CellRef(ast.sym, ast.env_structure)
def visit_let(self, ast):
assert isinstance(ast, Let)
body = [b.visit(self) for b in ast.body]
rhss = [r.visit(self) for r in ast.rhss]
result = Let(ast.args,
ast.counts,
rhss,
body,
ast.remove_num_envs)
result.copy_body_pruning(ast)
return result
def visit_letrec(self, ast):
assert isinstance(ast, Letrec)
body = [b.visit(self) for b in ast.body]
rhss = [r.visit(self) for r in ast.rhss]
result = Letrec(ast.args,
ast.counts,
rhss,
body)
result.copy_body_pruning(ast)
return result
def visit_begin(self, ast):
assert isinstance(ast, Begin)
body = [b.visit(self) for b in ast.body]
result = Begin(body)
result.copy_body_pruning(ast)
return result
def visit_begin0(self, ast):
assert isinstance(ast, Begin0)
fst = ast.first.visit(self)
rst = [r.visit(self) for r in ast.body]
result = Begin0(fst, rst)
result.copy_body_pruning(ast)
return result
def copy_ast(ast):
visitor = CopyVisitor()
return ast.visit(visitor)
|
python
|
# -*- coding: UTF-8 -*-
"""
@CreateDate: 2021/07/25
@Author: Xingyan Liu
@File: builder.py
@Project: stagewiseNN
"""
import os
import sys
from pathlib import Path
from typing import Sequence, Mapping, Optional, Union, Callable
import logging
import pandas as pd
import numpy as np
from scipy import sparse
import scanpy as sc
from .utils import quick_preprocess_raw, make_binary
from .multipartite_graph import stagewise_knn
from .graph2tree import max_connection, adaptive_tree
class BuilderParams:
def __init__(self, **kwargs):
self._dict = {}
self.update(**kwargs)
def update(self, **kwargs):
self._dict.update(**kwargs)
return self
@property
def keys(self):
return self._dict.keys()
def __getattr__(self, key):
return self._dict[key]
class Builder(object):
def __init__(
self,
stage_order: Sequence,
**build_params
):
"""
Parameters
----------
stage_order: Sequence
the order of stages
"""
self.stage_order = stage_order
self._params = BuilderParams(**build_params)
self._distmat = None
self._connect = None
self._stage_lbs = None
self._group_lbs = None
self._edgedf = None
self._refined_group_lbs = None
@property
def stage_lbs(self):
return self._stage_lbs
@property
def group_lbs(self):
return self._group_lbs
# @group_lbs.setter
# def group_lbs(self, group_lbs):
# pass
@property
def distmat(self):
return self._distmat
@property
def connect(self):
return self._connect
@property
def connect_bin(self):
""" binarized edges """
if self._connect is not None:
return make_binary(self._connect)
return None
@property
def edgedf(self):
return self._edgedf
@property
def refined_group_lbs(self):
return self._refined_group_lbs
def build_graph(
self,
X, stage_lbs,
binary_edge: bool = True,
ks: Union[Sequence[int], int] = 10,
n_pcs: Union[Sequence[int], int] = 50,
pca_base_on: Optional[str] = 'stacked',
leaf_size: int = 5,
**kwargs
):
"""
Build multipartite KNN-graph stage-by-stage.
Parameters
----------
X: np.ndarray or sparse matrix
data matrix, of shape (n_samples, n_features)
stage_lbs: Sequence
stage labels for each sample (nodes in `build_graph`)
binary_edge: bool (default=True)
whether to use the binarized edges. Set as True may cause some
information loss but a more robust result.
ks:
the number of nearest neighbors to be calculated.
n_pcs:
The number of principal components after PCA reduction.
If `pca_base_on` is None, this will be ignored.
pca_base_on: str {'x1', 'x2', 'stacked', None} (default='stacked')
if None, perform KNN on the original data space.
leaf_size: int (default=5)
Leaf size passed to BallTree or KDTree, for adjusting the
approximation level. The higher the faster, while of
less promises to find the exact nearest neighbors.
Setting as 1 for brute-force (exact) KNN.
kwargs:
other parameters for `stagewise_knn`
Returns
-------
distmat: sparse.csr_matrix
the distance matrix, of shape (n_samples, n_samples)
connect: sparse.csr_matrix
the connectivities matrix, of shape (n_samples, n_samples)
"""
self._stage_lbs = stage_lbs
distmat, connect = stagewise_knn(
X, self.stage_lbs,
stage_order=self.stage_order,
k=ks,
leaf_size=leaf_size, # 1 for brute-force KNN
pca_base_on=pca_base_on,
n_pcs=n_pcs,
binary_edge=False,
**kwargs
)
self._distmat = distmat
self._connect = connect
if binary_edge:
connect = self.connect_bin
# record parameters
self._params.update(
binary_edge=binary_edge,
ks=ks,
n_pcs=n_pcs,
pca_base_on=pca_base_on,
leaf_size=leaf_size,
)
return distmat, connect
def build_tree(
self,
group_lbs: Sequence,
stage_lbs: Optional[Sequence] = None,
ignore_pa=(),
ext_sep: str = '_',
):
"""
Adaptatively build the developmental tree from the stagewise-KNN graph.
Parameters
----------
group_lbs: Sequence
group labels for each sample (nodes in `build_graph`)
stage_lbs: Sequence
stage labels for each sample (nodes in `build_graph`)
ignore_pa: list or set
parent nodes to be ignored; empty tuple by default.
ext_sep: str
parse string for automatically extract the stage-labels from
`group_lbs`
Returns
-------
edgedf: pd.DataFrame
pd.DataFrame of columns {'node', 'parent', 'prop'},
and of the same number of rows as number of total stage-clusters.
the column 'prop' is the proportion of nodes that have votes for
the current parent.
refined_group_lbs:
refined group labels for each sample (e.g. single-cell)
"""
# connect-matrix NOT calculated by StagewiseNN may cause un-expected
# result by using `sparse.triu()`.
# TODO: define `take_cross_stage_edges(spmatrix)`
conn_upper = sparse.triu(self.connect)
adj_max = max_connection(conn_upper)
self._group_lbs = group_lbs
if self.stage_lbs is None:
self._stage_lbs = stage_lbs
edgedf, refined_group_lbs = adaptive_tree(
adj_max, self.group_lbs,
stage_lbs=self.stage_lbs,
stage_ord=self.stage_order,
ignore_pa=ignore_pa,
ext_sep=ext_sep,
)
self._edgedf = edgedf
self._refined_group_lbs = refined_group_lbs
# record parameters
self._params.update(
ignore_pa=ignore_pa,
ext_sep=ext_sep,
)
return edgedf, refined_group_lbs
def __test__():
pass
if __name__ == '__main__':
import time
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s-%(lineno)d-%(funcName)s(): '
'%(levelname)s\n%(message)s'
)
t = time.time()
__test__()
print('Done running file: {}\nTime: {}'.format(
os.path.abspath(__file__), time.time() - t,
))
|
python
|
# A import performance test of standard classes, dataclasses, attrs, and cluegen
import sys
import time
standard_template = '''
class C{n}:
def __init__(self, a, b, c, d, e):
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
def __repr__(self):
return f'C{n}({{self.a!r}}, {{self.b!r}}, {{self.c!r}}, {{self.d!r}}, {{self.e!r}})'
def __eq__(self, other):
if self.__class__ is other.__class:
return (self.a, self.b, self.c, self.d, self.e) == (other.a, other.b, other.c, other.d, other.e)
else:
return NotImplemented
'''
namedtuple_template = '''
C{n} = namedtuple('C{n}', ['a', 'b', 'c', 'd', 'e'])
'''
namedtuple_template = '''
class C{n}(NamedTuple):
a : int
b : int
c : int
d : int
e : int
'''
dataclass_template = '''
@dataclass
class C{n}:
a : int
b : int
c : int
d : int
e : int
'''
attr_template = '''
@attr.s
class C{n}:
a = attr.ib()
b = attr.ib()
c = attr.ib()
d = attr.ib()
e = attr.ib()
'''
cluegen_template = '''
class C{n}(Datum):
a : int
b : int
c : int
d : int
e : int
'''
# cluegen, but same default methods as dataclasses generated
cluegen_eval_template = '''
class C{n}(Datum):
a : int
b : int
c : int
d : int
e : int
C{n}.__init__, C{n}.__repr__, C{n}.__eq__
'''
def run_test(name, n):
start = time.time()
while n > 0:
import perftemp
del sys.modules['perftemp']
n -= 1
end = time.time()
print(name, (end-start))
def write_perftemp(count, template, setup):
with open('perftemp.py', 'w') as f:
f.write(setup)
for n in range(count):
f.write(template.format(n=n))
def main(reps):
write_perftemp(100, standard_template, '')
run_test('standard classes', reps)
write_perftemp(100, namedtuple_template, 'from collections import namedtuple\n')
write_perftemp(100, namedtuple_template, 'from typing import NamedTuple\n')
run_test('namedtuple', reps)
write_perftemp(100, dataclass_template, 'from dataclasses import dataclass\n')
run_test('dataclasses', reps)
try:
write_perftemp(100, attr_template, 'import attr\n')
run_test('attrs', reps)
except ImportError:
print("attrs not installed")
write_perftemp(100, cluegen_template, 'from cluegen import Datum\n')
run_test('cluegen', reps)
write_perftemp(100, cluegen_eval_template, 'from cluegen import Datum\n')
run_test('cluegen_eval', reps)
if __name__ == '__main__':
if len(sys.argv) == 2:
reps = int(sys.argv[1])
else:
reps = 100
main(reps)
|
python
|
from __future__ import print_function
from __future__ import division
from collections import defaultdict, OrderedDict
from itertools import izip
import numbers
from time import time
import itertools
import math
import scipy.sparse as sparse
import sklearn
from sklearn.base import BaseEstimator
from sklearn.ensemble import GradientBoostingClassifier as GBClassifier
from sklearn.ensemble._gradient_boosting import _random_sample_mask
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.ensemble.gradient_boosting import LossFunction, LOSS_FUNCTIONS, MultinomialDeviance, \
LogOddsEstimator, BinomialDeviance
import numpy
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors.unsupervised import NearestNeighbors
from sklearn.tree._tree import DTYPE
from sklearn.utils.random import check_random_state
from sklearn.utils.validation import check_arrays, column_or_1d
from commonutils import generate_sample, check_sample_weight
import commonutils
import reports
__author__ = 'Alex Rogozhnikov'
# TODO updating tree in FL and NFL
class KnnLossFunction(LossFunction, BaseEstimator):
def __init__(self, uniform_variables):
"""KnnLossFunction is a base class to be inherited by other loss functions,
which choose the particular A matrix and w vector. The formula of loss is:
loss = \sum_i w_i * exp(- \sum_j a_ij y_j score_j)
"""
LossFunction.__init__(self, 1)
self.uniform_variables = uniform_variables
# real matrix and vector will be computed during fitting
self.A = None
self.A_t = None
self.w = None
def __call__(self, y, pred):
"""Computing the loss itself"""
assert len(y) == len(pred) == self.A.shape[1], "something is wrong with sizes"
y_signed = 2 * y - 1
exponents = numpy.exp(- self.A.dot(y_signed * numpy.ravel(pred)))
return numpy.sum(self.w * exponents)
def negative_gradient(self, y, pred, **kwargs):
"""Computing negative gradient"""
assert len(y) == len(pred) == self.A.shape[1], "something is wrong with sizes"
y_signed = 2 * y - 1
exponents = numpy.exp(- self.A.dot(y_signed * numpy.ravel(pred)))
result = self.A_t.dot(self.w * exponents) * y_signed
return result
def fit(self, X, y):
"""This method is used to compute A matrix and w based on train dataset"""
assert len(X) == len(y), "different size of arrays"
A, w = self.compute_parameters(X, y)
self.A = sparse.csr_matrix(A)
self.A_t = sparse.csr_matrix(self.A.transpose())
self.w = numpy.array(w)
assert A.shape[0] == len(w), "inconsistent sizes"
assert A.shape[1] == len(X), "wrong size of matrix"
return self
def compute_parameters(self, trainX, trainY):
"""This method should be overloaded in descendant, and should return A, w (matrix and vector)"""
raise NotImplementedError()
def init_estimator(self, X=None, y=None):
return LogOddsEstimator()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
y_signed = 2 * y - 1
self.update_exponents = self.w * numpy.exp(- self.A.dot(y_signed * numpy.ravel(y_pred)))
LossFunction.update_terminal_regions(self, tree, X, y, residual, y_pred, sample_mask, learning_rate, k)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred):
# terminal_region = numpy.where(terminal_regions == leaf)[0]
y_signed = 2 * y - 1
z = self.A.dot((terminal_regions == leaf) * y_signed)
alpha = numpy.sum(self.update_exponents * z) / (numpy.sum(self.update_exponents * z * z) + 1e-10)
tree.value[leaf, 0, 0] = alpha
# Descendants of KnnLossFunction - particular cases, each has its own
# algorithm of generating A and w
class SimpleKnnLossFunction(KnnLossFunction):
def __init__(self, uniform_variables, knn=10, uniform_label=1, distinguish_classes=True, row_norm=1.):
"""A matrix is square, each row corresponds to a single event in train dataset, in each row we put ones
to the closest neighbours of that event if this event from class along which we want to have uniform prediction.
:param list[str] uniform_variables: the features, along which uniformity is desired
:param int knn: the number of nonzero elements in the row, corresponding to event in 'uniform class'
:param int|list[int] uniform_label: the label (labels) of 'uniform classes'
:param bool distinguish_classes: if True, 1's will be placed only for
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.row_norm = row_norm
self.uniform_label = [uniform_label] if isinstance(uniform_label, numbers.Number) else uniform_label
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
sample_weight = numpy.ones(len(trainX))
A_parts = []
w_parts = []
for label in self.uniform_label:
label_mask = trainY == label
n_label = numpy.sum(label_mask)
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, label_mask,
n_neighbours=self.knn)
else:
mask = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, mask, self.knn)
knn_indices = knn_indices[label_mask, :]
ind_ptr = numpy.arange(0, n_label * self.knn + 1, self.knn)
column_indices = knn_indices.flatten()
data = numpy.ones(n_label * self.knn, dtype=float) * self.row_norm / self.knn
A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)])
w_part = numpy.mean(numpy.take(sample_weight, knn_indices), axis=1)
assert A_part.shape[0] == len(w_part)
A_parts.append(A_part)
w_parts.append(w_part)
for label in set(trainY).difference(self.uniform_label):
label_mask = trainY == label
n_label = numpy.sum(label_mask)
ind_ptr = numpy.arange(0, n_label + 1)
column_indices = numpy.where(label_mask)[0].flatten()
data = numpy.ones(n_label, dtype=float) * self.row_norm
A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)])
w_part = sample_weight[label_mask]
A_parts.append(A_part)
w_parts.append(w_part)
A = sparse.vstack(A_parts, format='csr', dtype=float)
w = numpy.concatenate(w_parts)
return A, w
class SimpleKnnLossFunctionEyeBg(KnnLossFunction):
def __init__(self, uniform_variables, knn=5, distinguish_classes=True, diagonal=0.):
"""A matrix is square, each row corresponds to a single event in train dataset,
in each row we put ones to the closest neighbours of that event for signal.
For background we have identity matrix.
If distinguish_classes==True, only events of the same class are chosen.
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.diagonal = diagonal
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, self.knn)
if not self.distinguish_classes:
is_signal = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, is_signal, self.knn)
bg_index = numpy.where(~ is_signal)[0]
j = 0
k = 0
ind_ptr = [0]
x = set(bg_index)
column_indices_help = []
for i in range(len(trainX)):
if i in x:
column_indices_help.append(bg_index[j])
ind_ptr.append(k + 1)
k += 1
j += 1
else:
for n in knn_indices[i]:
column_indices_help.append(n)
ind_ptr.append(k + self.knn)
k += self.knn
column_indices = numpy.array(column_indices_help)
data = numpy.ones(len(column_indices))
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(len(trainX), len(trainX)))
w = numpy.ones(len(trainX))
return A, w
class SimpleKnnLossFunctionKnnOnDiagonalSignal(KnnLossFunction):
def __init__(self, uniform_variables, knn=5, distinguish_classes=True, diagonal=0.):
"""A matrix is square, each row corresponds to a single event in train dataset,
in each row we put ones to the closest neighbours of that event for signal. For background we
have identity matrix times self.knn.
If distinguish_classes==True, only events of the same class are chosen.
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.diagonal = diagonal
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, self.knn)
if not self.distinguish_classes:
is_signal = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, is_signal, self.knn)
bg_index = numpy.where(is_signal == False)[0]
j = 0
k = 0
ind_ptr = [0]
x = set(bg_index)
column_indices_help = []
for i in range(len(trainX)):
if i in x:
column_indices_help.append(bg_index[j])
ind_ptr.append(k + 1)
k += 1
j += 1
else:
for n in knn_indices[i]:
column_indices_help.append(n)
ind_ptr.append(k + self.knn)
k += self.knn
column_indices = numpy.array(column_indices_help)
data = numpy.ones(len(column_indices))
data[bg_index] = self.knn
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(len(trainX), len(trainX)))
w = numpy.ones(len(trainX))
return A, w
class SimpleKnnLossFunctionKnnOnDiagonalBg(KnnLossFunction):
def __init__(self, uniform_variables, knn=5, distinguish_classes=True, diagonal=0.):
"""A matrix is square, each row corresponds to a single event in train dataset,
in each row we put ones to the closest neighbours of that event for signal. For background we
have identity matrix times self.knn.
If distinguish_classes==True, only events of the same class are chosen.
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.diagonal = diagonal
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, self.knn)
if not self.distinguish_classes:
is_signal = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, is_signal, self.knn)
bg_index = numpy.where(is_signal == True)[0]
j = 0
k = 0
ind_ptr = [0]
x = set(bg_index)
column_indices_help = []
for i in range(len(trainX)):
if i in x:
column_indices_help.append(bg_index[j])
ind_ptr.append(k + 1)
k += 1
j += 1
else:
for n in knn_indices[i]:
column_indices_help.append(n)
ind_ptr.append(k + self.knn)
k += self.knn
column_indices = numpy.array(column_indices_help)
data = numpy.ones(len(column_indices))
data[bg_index] = self.knn
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(len(trainX), len(trainX)))
w = numpy.ones(len(trainX))
return A, w
class SimpleKnnLossFunctionEyeSignal(KnnLossFunction):
def __init__(self, uniform_variables, knn=5, distinguish_classes=True, diagonal=0.):
"""A matrix is square, each row corresponds to a single event in train dataset,
in each row we put ones to the closest neighbours of that event for background.
For signal we have identity matrix.
If distinguish_classes==True, only events of the same class are chosen.
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.diagonal = diagonal
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, self.knn)
if not self.distinguish_classes:
is_signal = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, is_signal, self.knn)
bg_index = numpy.where(is_signal)[0]
j = 0
k = 0
ind_ptr = [0]
x = set(bg_index)
column_indices_help = []
for i in range(len(trainX)):
if i in x:
column_indices_help.append(bg_index[j])
ind_ptr.append(k + 1)
k += 1
j += 1
else:
for n in knn_indices[i]:
column_indices_help.append(n)
ind_ptr.append(k + self.knn)
k += self.knn
column_indices = numpy.array(column_indices_help)
data = numpy.ones(len(column_indices))
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(len(trainX), len(trainX)))
w = numpy.ones(len(trainX))
return A, w
class PairwiseKnnLossFunction(KnnLossFunction):
def __init__(self, uniform_variables, knn, exclude_self=True, penalize_large_preds=True):
""" A is rectangular matrix, in each row we have only two '1's,
all other elements are zeros, these two '1's are placed in the columns, corresponding to neighbours
exclude_self: bool, exclude self from knn?
"""
self.knn = knn
self.exclude_self = exclude_self
self.penalize_large_preds = penalize_large_preds
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
knn = self.knn
if self.exclude_self:
knn_indices = \
commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, knn+1)[:, 1:]
else:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, knn)
rows = xrange(len(trainX) * knn)
columns1 = numpy.repeat(numpy.arange(0, len(trainX)), knn)
columns2 = knn_indices.flatten()
data = numpy.ones(len(rows))
A = sparse.csr_matrix((data, (rows, columns1)), shape=[len(trainX) * knn, len(trainX)]) + \
sparse.csr_matrix((data, (rows, columns2)), shape=[len(trainX) * knn, len(trainX)])
if self.penalize_large_preds:
penalty1 = - sparse.eye(len(trainX), len(trainX))
penalty2 = sparse.eye(len(trainX), len(trainX))
A = sparse.vstack((A, penalty1, penalty2), format="csr")
w = numpy.ones(A.shape[0])
return A, w
class RandomKnnLossFunction(KnnLossFunction):
def __init__(self, uniform_variables, n_rows, knn=5, knn_factor=3, large_preds_penalty=1.):
"""A general loss,
at each iteration it takes some random event from train dataset,
and selects randomly knn of its knn*knn_factor neighbours, the process is repeated 'n_rows' times"""
self.n_rows = n_rows
self.knn = knn
self.knn_factor = knn_factor
self.large_preds_penalty = large_preds_penalty
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
knn_max = int(self.knn * self.knn_factor)
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, knn_max)
selected_originals = numpy.random.randint(0, len(trainX), self.n_rows)
selected_knns = knn_indices[selected_originals, :]
groups_indices = numpy.zeros((self.n_rows, self.knn), dtype=numpy.int)
for i, event_neighs in enumerate(selected_knns):
indices = numpy.random.permutation(knn_max)[:self.knn]
groups_indices[i] = event_neighs[indices]
ind_ptr = numpy.arange(0, self.n_rows * self.knn + 1, self.knn)
column_indices = groups_indices.flatten()
data = numpy.ones(self.n_rows * self.knn)
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(self.n_rows, len(trainX)))
if self.large_preds_penalty > 0:
penalty1 = - self.large_preds_penalty * sparse.eye(len(trainX), len(trainX))
penalty2 = self.large_preds_penalty * sparse.eye(len(trainX), len(trainX))
A = sparse.vstack((A, penalty1, penalty2), format="csr")
w = numpy.ones(A.shape[0])
return A, w
class AdaLossFunction(KnnLossFunction):
def __init__(self):
"""Good old Ada loss, implemented as version of KnnLostFunction """
KnnLossFunction.__init__(self, None)
def compute_parameters(self, trainX, trainY):
return sparse.eye(len(trainX), len(trainX)), numpy.ones(len(trainX))
class DistanceBasedKnnFunction(KnnLossFunction):
def __init__(self, uniform_variables, knn=None, distance_dependence=None, large_preds_penalty=0.,
row_normalize=False):
"""If knn is None, the matrix will be filled, otherwise it will be sparse
with knn as number of nonzero cells,
distance dependence is function, that takes distance between i-th and j-th
events and returns a_ij
"""
self.knn = knn
self.distance_dependence = distance_dependence
self.large_pred_penalty = large_preds_penalty
self.row_normalize = row_normalize
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
for variable in self.uniform_variables:
if variable not in trainX.columns:
raise ValueError("Dataframe is missing %s column" % variable)
if self.knn is None:
A = pairwise_distances(trainX[self.uniform_variables])
A = self.distance_dependence(A)
A *= (trainY[:, numpy.newaxis] == trainY[numpy.newaxis, :])
else:
is_signal = trainY > 0.5
# computing knn indices of same type
uniforming_features_of_signal = numpy.array(trainX.ix[is_signal, self.uniform_variables])
neighbours = NearestNeighbors(n_neighbors=self.knn, algorithm='kd_tree').fit(uniforming_features_of_signal)
signal_distances, knn_signal_indices = neighbours.kneighbors(uniforming_features_of_signal)
knn_signal_indices = numpy.where(is_signal)[0].take(knn_signal_indices)
uniforming_features_of_bg = numpy.array(trainX.ix[~is_signal, self.uniform_variables])
neighbours = NearestNeighbors(n_neighbors=self.knn, algorithm='kd_tree').fit(uniforming_features_of_bg)
bg_distances, knn_bg_indices = neighbours.kneighbors(uniforming_features_of_bg)
knn_bg_indices = numpy.where(~is_signal)[0].take(knn_bg_indices)
signal_distances = self.distance_dependence(signal_distances.flatten())
bg_distances = self.distance_dependence(bg_distances.flatten())
signal_ind_ptr = numpy.arange(0, sum(is_signal) * self.knn + 1, self.knn)
bg_ind_ptr = numpy.arange(0, sum(~is_signal) * self.knn + 1, self.knn)
signal_column_indices = knn_signal_indices.flatten()
bg_column_indices = knn_bg_indices.flatten()
A_sig = sparse.csr_matrix(sparse.csr_matrix((signal_distances, signal_column_indices, signal_ind_ptr),
shape=(sum(is_signal), len(trainX))))
A_bg = sparse.csr_matrix(sparse.csr_matrix((bg_distances, bg_column_indices, bg_ind_ptr),
shape=(sum(~is_signal), len(trainX))))
A = sparse.vstack((A_sig, A_bg), format='csr')
if self.row_normalize:
from sklearn.preprocessing import normalize
A = normalize(A, norm='l1', axis=1)
return A, numpy.ones(A.shape[0])
def compute_efficiencies(mask, y_pred, sample_weight):
"""For each event computes it position among other events by prediction. """
order = numpy.argsort(y_pred[mask])
weights = sample_weight[mask][order]
efficiencies = (numpy.cumsum(weights) - 0.5 * weights) / numpy.sum(weights)
return efficiencies[numpy.argsort(order)]
def test_compute_efficiency(size=100):
y_pred = numpy.random.random(size)
mask = numpy.random.random(size) > 0.5
effs = compute_efficiencies(mask, y_pred, sample_weight=numpy.ones(size))
assert len(effs) == numpy.sum(mask)
assert len(effs) == len(set(effs))
assert numpy.all(effs[numpy.argsort(y_pred[mask])] == numpy.sort(effs))
effs2 = compute_efficiencies(numpy.where(mask)[0], y_pred, sample_weight=numpy.ones(size))
assert numpy.all(effs == effs2)
print("Compute efficiency is ok")
test_compute_efficiency()
def exp_margin(margin):
""" margin = - y_signed * y_pred """
return numpy.exp(numpy.clip(margin, -1e5, 2))
class FlatnessLossFunction(LossFunction, BaseEstimator):
def __init__(self, uniform_variables, bins=10, uniform_label=1, power=2., ada_coefficient=1.,
allow_wrong_signs=True, keep_debug_info=False):
"""
This loss function contains separately penalty for non-flatness and ada_coefficient.
The penalty for non-flatness is using bins.
:type uniform_variables: the vars, along which we want to obtain uniformity
:type bins: the number of bins along each axis
:type uniform_label: int | list(int), the labels for which we want to obtain uniformity
:type power: the loss contains the difference | F - F_bin |^p, where p is power
:type ada_coefficient: coefficient of ada_loss added to this one. The greater the coefficient,
the less we tend to uniformity.
:type allow_wrong_signs: defines whether gradient may different sign from the "sign of class"
(i.e. may have negative gradient on signal)
"""
self.uniform_variables = uniform_variables
self.bins = bins
self.uniform_label = numpy.array([uniform_label]) if isinstance(uniform_label, numbers.Number) \
else numpy.array(uniform_label)
self.power = power
self.ada_coefficient = ada_coefficient
self.allow_wrong_signs = allow_wrong_signs
self.keep_debug_info = keep_debug_info
LossFunction.__init__(self, 1)
def fit(self, X, y, sample_weight=None):
assert len(X) == len(y), 'The lengths are different'
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
self.group_indices = defaultdict(list)
# The weight of bin is mean of weights inside bin
self.group_weights = defaultdict(list)
occurences = numpy.zeros(len(X), dtype=int)
for label in self.uniform_label:
group_indices = self.compute_groups_indices(X, y, sample_weight=sample_weight, label=label)
# cleaning the bins - deleting tiny or empty groups, canonizing
for indices in group_indices:
if len(indices) < 5:
# ignoring very small groups
continue
assert numpy.all((y == label)[indices])
self.group_indices[label].append(numpy.array(indices))
self.group_weights[label].append(numpy.mean(sample_weight[indices]))
occurences[indices] += 1
y = numpy.array(y, dtype=int)
needed_indices = numpy.in1d(y, self.uniform_label)
out_of_bins = numpy.sum((occurences == 0) & needed_indices)
if out_of_bins > 0.01 * len(X):
print("warning: %i events are out of all bins" % out_of_bins)
self.sample_weight = sample_weight
self.event_weights = sample_weight / (occurences + 1e-10)
if self.keep_debug_info:
self.debug_dict = defaultdict(list)
return self
def compute_groups_indices(self, X, y, sample_weight, label):
"""Returns a list, each element is events' indices in some group."""
mask = y == label
bin_limits = []
for var in self.uniform_variables:
bin_limits.append(numpy.linspace(numpy.min(X[var][mask]), numpy.max(X[var][mask]), 2 * self.bins + 1))
result = list()
for shift in [0, 1]:
bin_limits2 = []
for axis_limits in bin_limits:
bin_limits2.append(axis_limits[1 + shift:-1:2])
bin_indices = reports.compute_bin_indices(X, self.uniform_variables, bin_limits2)
result += reports.bin_to_group_indices(bin_indices, mask=mask)
return result
def __call__(self, y, pred):
# computing the common distribution of signal
# taking only signal by now
# this is approximate computation!
# TODO reimplement, this is wrong implementation
pred = numpy.ravel(pred)
loss = 0
for label in self.uniform_label:
needed_indices = y == label
sorted_pred = numpy.sort(pred[needed_indices])
for bin_weight, indices_in_bin in zip(self.group_weights[label], self.group_indices[label]):
probs_in_bin = numpy.take(pred, indices_in_bin)
probs_in_bin = numpy.sort(probs_in_bin)
positions = numpy.searchsorted(sorted_pred, probs_in_bin)
global_effs = positions / float(len(sorted_pred))
local_effs = (numpy.arange(0, len(probs_in_bin)) + 0.5) / len(probs_in_bin)
bin_loss = numpy.sum((global_effs - local_effs) ** self.power)
loss += bin_loss * bin_weight
# Ada loss now
loss += self.ada_coefficient * numpy.sum(numpy.exp(-y * pred))
return loss
def negative_gradient(self, y, y_pred, **kw_args):
y_pred = numpy.ravel(y_pred)
neg_gradient = numpy.zeros(len(y))
for label in self.uniform_label:
label_mask = y == label
global_efficiencies = numpy.zeros(len(y_pred), dtype=float)
global_efficiencies[label_mask] = compute_efficiencies(label_mask, y_pred, sample_weight=self.sample_weight)
for bin_weight, indices_in_bin in zip(self.group_weights[label], self.group_indices[label]):
assert numpy.all(label_mask[indices_in_bin]), "TODO delete"
local_effs = compute_efficiencies(indices_in_bin, y_pred, sample_weight=self.sample_weight)
global_effs = global_efficiencies[indices_in_bin]
bin_gradient = self.power * numpy.sign(local_effs - global_effs) \
* numpy.abs(local_effs - global_effs) ** (self.power - 1)
# TODO multiply by derivative of F_global ?
neg_gradient[indices_in_bin] += bin_weight * bin_gradient
assert numpy.all(neg_gradient[~numpy.in1d(y, self.uniform_label)] == 0)
y_signed = 2 * y - 1
if self.keep_debug_info:
self.debug_dict['pred'].append(numpy.copy(y_pred))
self.debug_dict['fl_grad'].append(numpy.copy(neg_gradient))
self.debug_dict['ada_grad'].append(y_signed * self.sample_weight * numpy.exp(- y_signed * y_pred))
# adding ada
neg_gradient += self.ada_coefficient * y_signed * self.sample_weight \
* exp_margin(-self.ada_coefficient * y_signed * y_pred)
if not self.allow_wrong_signs:
neg_gradient = y_signed * numpy.clip(y_signed * neg_gradient, 0, 1e5)
return neg_gradient
# def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_mask, learning_rate=1.0, k=0):
# the standard version is used
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred):
# terminal_region = numpy.where(terminal_regions == leaf)[0]
tree.value[leaf, 0, 0] = numpy.clip(tree.value[leaf, 0, 0], -10, 10)
# TODO think of real minimization
def init_estimator(self, X=None, y=None):
return LogOddsEstimator()
class NewRF(RandomForestRegressor):
"""Just a random forest regressor, that returns a two-dimensional array"""
def predict(self, X):
return RandomForestRegressor.predict(self, X)[:, numpy.newaxis]
class NewFlatnessLossFunction(FlatnessLossFunction, BaseEstimator):
def __init__(self, uniform_variables, n_neighbours=100, uniform_label=1, ada_coefficient=1.,
allow_wrong_signs=True, keep_debug_info=False, uniforming_factor=1., update_tree=True):
"""
:param int|list[int] uniform_label: labels of classes for which the uniformity of predictions is desired
"""
self.uniform_variables = uniform_variables
self.n_neighbours = n_neighbours
self.uniform_label = numpy.array([uniform_label]) if isinstance(uniform_label, numbers.Number) \
else numpy.array(uniform_label)
self.ada_coefficient = ada_coefficient
self.allow_wrong_signs = allow_wrong_signs
self.keep_debug_info = keep_debug_info
self.uniforming_factor = uniforming_factor
self.update_tree = update_tree
LossFunction.__init__(self, 1)
def fit(self, X, y, sample_weight=None):
assert len(X) == len(y), 'The lengths are different'
# sample_weight = check_sample_weight(y, sample_weight=sample_weight)
y = column_or_1d(y)
assert set(y) == {0,1}, "Only two classes are supported, their labels should be 0 and 1"
self.knn_indices = defaultdict(list)
for label in self.uniform_label:
label_mask = y == label
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, X, label_mask, n_neighbors=self.n_neighbours)
# taking only rows, corresponding to this class
self.knn_indices[label] = knn_indices[label_mask, :]
if self.keep_debug_info:
self.debug_dict = defaultdict(list)
return self
def __call__(self, y, pred):
return 1
def init_estimator(self, X=None, y=None):
return NewRF()
def negative_gradient(self, y, y_pred, sample_weight=None, **kw_args):
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
y_pred = numpy.ravel(y_pred)
neg_gradient = numpy.zeros(len(y))
for label in self.uniform_label:
label_mask = y == label
assert sum(label_mask) == len(self.knn_indices[label])
# global_efficiencies = numpy.zeros(len(y_pred), dtype=float)
# global_efficiencies[label_mask] = compute_efficiencies(label_mask, y_pred, sample_weight=self.sample_weight)
values = y_pred[label_mask]
knn_values = numpy.take(y_pred, self.knn_indices[label])
knn_weights = numpy.take(sample_weight, self.knn_indices[label])
# TODO use heaviside here?
local_efficiencies = numpy.average(knn_values > values[:, numpy.newaxis], axis=1, weights=knn_weights)
global_targets = commonutils.weighted_percentile(values, local_efficiencies,
sample_weight=sample_weight[label_mask])
neg_gradient[label_mask] += self.uniforming_factor * (global_targets - values)
assert numpy.all(neg_gradient[~numpy.in1d(y, self.uniform_label)] == 0)
y_signed = 2 * y - 1
if self.keep_debug_info:
self.debug_dict['pred'].append(numpy.copy(y_pred))
self.debug_dict['fl_grad'].append(numpy.copy(neg_gradient))
self.debug_dict['ada_grad'].append(y_signed * sample_weight * numpy.exp(- y_signed * y_pred))
# adding ada
neg_gradient += self.ada_coefficient * y_signed * sample_weight \
* exp_margin(- self.ada_coefficient * y_signed * y_pred)
if not self.allow_wrong_signs:
neg_gradient = y_signed * numpy.clip(y_signed * neg_gradient, 0, 1e5)
return neg_gradient
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred):
if not self.update_tree:
return
terminal_region = numpy.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = numpy.median(residual)
class MyGradientBoostingClassifier(GBClassifier):
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0, train_variables=None):
"""
GradientBoosting from sklearn, which is modified to work with KnnLossFunction and it's versions.
Train variables are variables used in training trees.
:param LossFunction|str loss:
"""
self.train_variables = train_variables
GBClassifier.__init__(self, loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
subsample=subsample, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
max_depth=max_depth, init=init, random_state=random_state, max_features=max_features, verbose=verbose)
def get_train_variables(self, X):
if self.train_variables is None:
return X
else:
return X[self.train_variables]
def fit(self, X, y, sample_weight=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
sample_weight: array-like, shape = [n_samples], default None,
positive weights if they are needed
Returns
-------
self : object
Returns self.
"""
y = column_or_1d(y, warn=True)
self.classes_, y = numpy.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
assert self.n_classes_ == 2, "at this moment only two-class classification is supported"
self._check_params()
# fitting the loss if it needs
if isinstance(self.loss_, KnnLossFunction) or isinstance(self.loss_, FlatnessLossFunction):
self.loss_.fit(X, y)
X = self.get_train_variables(X)
# Check input
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense", check_ccontiguous=True)
n_samples, n_features = X.shape
self.n_features = n_features
random_state = check_random_state(self.random_state)
# pull frequently used parameters into local scope
subsample = self.subsample
do_oob = subsample < 1.0
# allocate model state data structures
self.estimators_ = numpy.empty((self.n_estimators, self.loss_.K), dtype=numpy.object)
self.train_score_ = numpy.zeros((self.n_estimators,), dtype=numpy.float64)
sample_mask = numpy.ones((n_samples,), dtype=numpy.bool)
n_inbag = max(1, int(subsample * n_samples))
if self.verbose:
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
if do_oob:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
verbose_fmt = ' '.join(verbose_fmt)
# print the header line
print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields))
# plot verbose info each time i % verbose_mod == 0
verbose_mod = 1
start_time = time()
# fit initial model
self.init_.fit(X, y)
# init predictions
y_pred = self.init_.predict(X)
# perform boosting iterations
for i in range(self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag, random_state)
# fit next stage of tree
args = {}
# TODO write own gradient boosting
if sklearn.__version__ >= '0.15':
args = {'criterion': 'mse', 'splitter': 'best', }
y_pred = self._fit_stage(i, X, y, y_pred=y_pred, sample_mask=sample_mask, random_state=random_state, **args)
self.train_score_[i] = self.loss_(y, y_pred)
if self.verbose > 0:
if (i + 1) % verbose_mod == 0:
remaining_time = (self.n_estimators - (i + 1)) * (time() - start_time) / float(i + 1)
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(verbose_fmt.format(iter=i + 1,
train_score=self.train_score_[i],
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
verbose_mod *= 10
return self
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
# everything connected with loss was moved to self.fit
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0")
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0")
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0)")
# we enable to pass simply LossFunction object
if isinstance(self.loss, LossFunction):
self.loss_ = self.loss
else:
if self.loss not in LOSS_FUNCTIONS:
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance if len(self.classes_) > 2 else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if self.subsample <= 0.0 or self.subsample > 1:
raise ValueError("subsample must be in (0,1]")
if self.init is not None:
if (not hasattr(self.init, 'fit') or not hasattr(self.init, 'predict')):
raise ValueError("init must be valid estimator")
self.init_ = self.init
else:
self.init_ = self.loss_.init_estimator()
def predict(self, X):
return GBClassifier.predict(self, self.get_train_variables(X))
def predict_proba(self, X):
return GBClassifier.predict_proba(self, self.get_train_variables(X))
def staged_predict_proba(self, X):
return GBClassifier.staged_predict_proba(self, self.get_train_variables(X))
def test_gradient(loss, size=1000):
X, y = commonutils.generate_sample(size, 10)
loss.fit(X, y)
pred = numpy.random.random(size)
epsilon = 1e-7
val = loss(y, pred)
gradient = numpy.zeros_like(pred)
for i in range(size):
pred2 = pred.copy()
pred2[i] += epsilon
val2 = loss(y, pred2)
gradient[i] = (val2 - val) / epsilon
n_gradient = loss.negative_gradient(y, pred)
assert numpy.all(abs(n_gradient + gradient) < 1e-3), "Problem with functional gradient"
def test_gradient_boosting(samples=1000):
# Generating some samples correlated with first variable
distance = 0.6
testX, testY = generate_sample(samples, 10, distance)
trainX, trainY = generate_sample(samples, 10, distance)
# We will try to get uniform distribution along this variable
uniform_variables = ['column0']
n_estimators = 20
loss1 = SimpleKnnLossFunction(uniform_variables)
loss2 = PairwiseKnnLossFunction(uniform_variables, knn=10)
loss3 = AdaLossFunction()
loss4 = RandomKnnLossFunction(uniform_variables, samples * 2, knn=5, knn_factor=3)
loss5 = DistanceBasedKnnFunction(uniform_variables, knn=10, distance_dependence=lambda r: numpy.exp(-0.1 * r))
loss6 = FlatnessLossFunction(uniform_variables, ada_coefficient=0.5)
loss7 = FlatnessLossFunction(uniform_variables, ada_coefficient=0.5, uniform_label=[0,1])
loss8 = NewFlatnessLossFunction(uniform_variables, ada_coefficient=0.5, uniform_label=1)
loss9 = NewFlatnessLossFunction(uniform_variables, ada_coefficient=0.5, uniform_label=[0, 1])
for loss in [loss1, loss2, loss3, loss4, loss5, loss6, loss7, loss8, loss9]:
result = MyGradientBoostingClassifier(loss=loss, min_samples_split=20, max_depth=5, learning_rate=.2,
subsample=0.7, n_estimators=n_estimators, train_variables=None)\
.fit(trainX[:samples], trainY[:samples]).score(testX, testY)
assert result >= 0.7, "The quality is too poor: %.3f" % result
# TODO return this code and test losses
# for loss in [loss1, loss2, loss3, loss4, loss5]:
# testGradient(loss)
print('uniform gradient boosting is ok')
test_gradient_boosting()
|
python
|
# -*- encoding: utf-8 -*-
text = u"""
Mr. Speaker, Mr. President, distinguished Members of the Congress, honored guests, and fellow citizens:
I come before you to report on the state of our Union, and I'm pleased to report that after 4 years of united effort, the American people have brought forth a nation renewed, stronger, freer, and more secure than before.
Four years ago we began to change, forever I hope, our assumptions about government and its place in our lives. Out of that change has come great and robust growth-in our confidence, our economy, and our role in the world.
Tonight America is stronger because of the values that we hold dear. We believe faith and freedom must be our guiding stars, for they show us truth, they make us brave, give us hope, and leave us wiser than we were. Our progress began not in Washington, DC, but in the hearts of our families, communities, workplaces, and voluntary groups which, together, are unleashing the invincible spirit of one great nation under God.
Four years ago we said we would invigorate our economy by giving people greater freedom and incentives to take risks and letting them keep more of what they earned. We did what we promised, and a great industrial giant is reborn.
Tonight we can take pride in 25 straight months of economic growth, the strongest in 34 years; a 3-year inflation average of 3.9 percent, the lowest in 17 years; and 7.3 million new jobs in 2 years, with more of our citizens working than ever before.
New freedom in our lives has planted the rich seeds for future success:
For an America of wisdom that honors the family, knowing that if
the family goes, so goes our civilization;
For an America of vision that sees tomorrow's dreams in the learning and hard work we do today;
For an America of courage whose service men and women, even as we meet, proudly stand watch on the frontiers of freedom;
For an America of compassion that opens its heart to those who cry out for help.
We have begun well. But it's only a beginning. We're not here to congratulate ourselves on what we have done but to challenge ourselves to finish what has not yet been done.
We're here to speak for millions in our inner cities who long for real jobs, safe neighborhoods, and schools that truly teach. We're here to speak for the American farmer, the entrepreneur, and every worker in industries fighting to modernize and compete. And, yes, we're here to stand, and proudly so, for all who struggle to break free from totalitarianism, for all who know in their hearts that freedom is the one true path to peace and human happiness.
Proverbs tell us, without a vision the people perish. When asked what great principle holds our Union together, Abraham Lincoln said: "Something in
Declaration giving liberty, not alone to the people of this country, but hope to the world for all future time."
We honor the giants of our history not by going back but forward to the dreams their vision foresaw. My fellow citizens, this nation is poised for greatness. The time has come to proceed toward a great new challenge—a second American Revolution of hope and opportunity; a revolution carrying us to new heights of progress by pushing back frontiers of knowledge and space; a revolution of spirit that taps the soul of America, enabling us to summon greater strength than we've ever known; and a revolution that carries beyond our shores the golden promise of human freedom in a world of peace.
Let us begin by challenging our conventional wisdom. There are no constraints on the human mind, no walls around the human spirit, no barriers to our progress except those we ourselves erect. Already, pushing down tax rates has freed our economy to vault forward to record growth.
In Europe, they're calling it "the American Miracle." Day by day, we're shattering accepted notions of what is possible. When I was growing up, we failed to see how a new thing called radio would transform our marketplace. Well, today, many have not yet seen how advances in technology are transforming our lives.
In the late 1950's workers at the AT&T semiconductor plant in Pennsylvania produced five transistors a day for $7.50 apiece. They now produce over a million for less than a penny apiece.
New laser techniques could revolutionize heart bypass surgery, cut diagnosis time for viruses linked to cancer from weeks to minutes, reduce hospital costs dramatically, and hold out new promise for saving human lives.
Our automobile industry has overhauled assembly lines, increased worker productivity, and is competitive once again.
We stand on the threshold of a great ability to produce more, do more, be more. Our economy is not getting older and weaker; it's getting younger and stronger. It doesn't need rest and supervision; it needs new challenge, greater freedom. And that word "freedom" is the key to the second American revolution that we need to bring about.
Let us move together with an historic reform of tax simplification for fairness and growth. Last year I asked Treasury Secretary-then-Regan to develop a plan to simplify the tax code, so all taxpayers would be treated more fairly and personal tax rates could come further down.
We have cut tax rates by almost 25 percent, yet the tax system remains unfair and limits our potential for growth. Exclusions and exemptions cause similar incomes to be taxed at different levels. Low-income families face steep tax barriers that make hard lives even harder. The Treasury Department has produced an excellent reform plan, whose principles will guide the final proposal that we will ask you to enact.
One thing that tax reform will not be is a tax increase in disguise. We will not jeopardize the mortgage interest deduction that families need. We will reduce personal tax rates as low as possible by removing many tax preferences. We will propose a top rate of no more than 35 percent, and possibly lower. And we will propose reducing corporate rates, while maintaining incentives for capital formation.
To encourage opportunity and jobs rather than dependency and welfare, we will propose that individuals living at or near the poverty line be totally exempt from Federal income tax. To restore fairness to families, we will propose increasing significantly the personal exemption.
And tonight, I am instructing Treasury Secretary James Baker—I have to get used to saying that—to begin working with congressional authors and committees for bipartisan legislation conforming to these principles. We will call upon the American people for support and upon every man and woman in this Chamber. Together, we can pass, this year, a tax bill for fairness, simplicity, and growth, making this economy the engine of our dreams and America the investment capital of the world. So let us begin.
Tax simplification will be a giant step toward unleashing the tremendous pent-up power of our economy. But a second American revolution must carry the promise of opportunity for all. It is time to liberate the spirit of enterprise in the most distressed areas of our country.
This government will meet its responsibility to help those in need. But policies that increase dependency, break up families, and destroy self-respect are not progressive; they're reactionary. Despite our strides in civil rights, blacks, Hispanics, and all minorities will not have full and equal power until they have full economic power.
We have repeatedly sought passage of enterprise zones to help those in the abandoned corners of our land find jobs, learn skills, and build better lives. This legislation is supported by a majority of you.
Mr. Speaker, I know we agree that 'there must be no forgotten Americans. Let us place new dreams in a million hearts and create a new generation of entrepreneurs by passing enterprise zones this year. And, Tip, you could make that a birthday present.
Nor must we lose the chance to pass our youth employment opportunity wage proposal. We can help teenagers, who have the highest unemployment rate, find summer jobs, so they can know the pride of work and have confidence in their futures.
We'll continue to support the Job Training Partnership Act, which has a nearly two-thirds job placement rate. Credits in education and health care vouchers will help working families shop for services that they need.
Our administration is already encouraging certain low-income public housing residents to own and manage their own dwellings. It's time that all public housing residents have that opportunity of ownership.
The Federal Government can help create a new atmosphere of freedom. But States and localities, many of which enjoy surpluses from the recovery, must not permit their tax and regulatory policies to stand as barriers to growth.
Let us resolve that we will stop spreading dependency and start spreading opportunity; that we will stop spreading bondage and start spreading freedom.
There are some who say that growth initiatives must await final action on deficit reductions. Well, the best way to reduce deficits is through economic growth. More businesses will be started, more investments made, more jobs created, and more people will be on payrolls paying taxes. The best way to reduce government spending is to reduce the need for spending by increasing prosperity. Each added percentage point per year of real GNP growth will lead to cumulative reduction in deficits of nearly $200 billion over 5 years.
To move steadily toward a balanced budget, we must also lighten government's claim on our total economy. We will not do this by raising taxes. We must make sure that our economy grows faster than the growth in spending by the Federal Government. In our fiscal year 1986 budget, overall government program spending will be frozen at the current level. It must not be one dime higher than fiscal year 1985, and three points are key.
First, the social safety net for the elderly, the needy, the disabled, and unemployed will be left intact. Growth of our major health care programs, Medicare and Medicaid, will be slowed, but protections for the elderly and needy will be preserved.
Second, we must not relax our efforts to restore military strength just as we near our goal of a fully equipped, trained, and ready professional corps. National security is government's first responsibility; so in past years defense spending took about half the Federal budget. Today it takes less than a third. We've already reduced our planned defense expenditures by nearly a hundred billion dollars over the past 4 years and reduced projected spending again this year.
You know, we only have a military-industrial complex until a time of danger, and then it becomes the arsenal of democracy. Spending for defense is investing in things that are priceless—peace and freedom.
Third, we must reduce or eliminate costly government subsidies. For example, deregulation of the airline industry has led to cheaper airfares, but on Amtrak taxpayers pay about $35 per passenger every time an Amtrak train leaves the station, It's time we ended this huge Federal subsidy.
Our farm program costs have quadrupled in recent years. Yet I know from visiting farmers, many in great financial distress, that we need an orderly transition to a market-oriented farm economy. We can help farmers best not by expanding Federal payments but by making fundamental reforms, keeping interest rates heading down, and knocking down foreign trade barriers to American farm exports.
We're moving ahead with Grace commission reforms to eliminate waste and improve government's management practices. In the long run, we must protect the taxpayers from government. And I ask again that you pass, as 32 States have now called for, an amendment mandating the Federal Government spend no more than it takes in. And I ask for the authority, used responsibly by 43 Governors, to veto individual items in appropriation bills. Senator Mattingly has introduced a bill permitting a 2-year trial run of the line-item veto. I hope you'll pass and send that legislation to my desk.
Nearly 50 years of government living beyond its means has brought us to a time of reckoning. Ours is but a moment in history. But one moment of courage, idealism, and bipartisan unity can change American history forever.
Sound monetary policy is key to long-running economic strength and stability. We will continue to cooperate with the Federal Reserve Board, seeking a steady policy that ensures price stability without keeping interest rates artificially high or needlessly holding down growth.
Reducing unneeded red tape and regulations, and deregulating the energy, transportation, and financial industries have unleashed new competition, giving consumers more choices, better services, and lower prices. In just one set of grant programs we have reduced 905 pages of regulations to 31. We seek to fully deregulate natural gas to bring on new supplies and bring us closer to energy independence. Consistent with safety standards, we will continue removing restraints on the bus and railroad industries, we will soon end up legislation—or send up legislation, I should say—to return Conrail to the private sector where it belongs, and we will support further deregulation of the trucking industry.
Every dollar the Federal Government does not take from us, every decision it does not make for us will make our economy stronger, our lives more abundant, our future more free.
Our second American revolution will push on to new possibilities not only on Earth but in the next frontier of space. Despite budget restraints, we will seek record funding for research and development.
We've seen the success of the space shuttle. Now we're going to develop a permanently manned space station and new opportunities for free enterprise, because in the next decade Americans and our friends around the world will be living and working together in space.
In the zero gravity of space, we could manufacture in 30 days lifesaving medicines it would take 30 years to make on Earth. We can make crystals of exceptional purity to produce super computers, creating jobs, technologies, and medical breakthroughs beyond anything we ever dreamed possible.
As we do all this, we'll continue to protect our natural resources. We will seek reauthorization and expanded funding for the Superfund program to continue cleaning up hazardous waste sites which threaten human health and the environment.
Now, there's another great heritage to speak of this evening. Of all the changes that have swept America the past 4 years, none brings greater promise than our rediscovery of the values of faith, freedom, family, work, and neighborhood.
We see signs of renewal in increased attendance in places of worship; renewed optimism and faith in our future; love of country rediscovered by our young, who are leading the way. We've rediscovered that work is good in and of itself, that it ennobles us to create and contribute no matter how seemingly humble our jobs. We've seen a powerful new current from an old and honorable tradition—American generosity.
From thousands answering Peace Corps appeals to help boost food production in Africa, to millions volunteering time, corporations adopting schools, and communities pulling together to help the neediest among us at home, we have refound our values. Private sector initiatives are crucial to our future.
I thank the Congress for passing equal access legislation giving religious groups the same right to use classrooms after school that other groups enjoy. But no citizen need tremble, nor the world shudder, if a child stands in a classroom and breathes a prayer. We ask you again, give children back a right they had for a century and a half or more in this country.
The question of abortion grips our nation. Abortion is either the taking of a human life or it isn't. And if it is—and medical technology is increasingly showing it is—it must be stopped. It is a terrible irony that while some turn to abortion, so many others who cannot become parents cry out for children to adopt. We have room for these children. We can fill the cradles of those who want a child to love. And tonight I ask you in the Congress to move this year on legislation to protect the unborn.
In the area of education, we're returning to excellence, and again, the heroes are our people, not government. We're stressing basics of discipline, rigorous testing, and homework, while helping children become computer-smart as well. For 20 years scholastic aptitude test scores of our high school students went down, but now they have gone up 2 of the last 3 years. We must go forward in our commitment to the new basics, giving parents greater authority and making sure good teachers are rewarded for hard work and achievement through merit pay.
Of all the changes in the past 20 years, none has more threatened our sense of national well-being than the explosion of violent crime. One does not have to be attacked to be a victim. The woman who must run to her car after shopping at night is a victim. The couple draping their door with locks and chains are victims; as is the tired, decent cleaning woman who can't ride a subway home without being afraid.
We do not seek to violate the rights of defendants. But shouldn't we feel more compassion for the victims of crime than for those who commit crime? For the first time in 20 years, the crime index has fallen 2 years in a row. We've convicted over 7,400 drug offenders and put them, as well as leaders of organized crime, behind bars in record numbers.
But we must do more. I urge the House to follow the Senate and enact proposals permitting use of all reliable evidence that police officers acquire in good faith. These proposals would also reform the habeas corpus laws and allow, in keeping with the will of the overwhelming majority of Americans, the use of the death penalty where necessary.
There can be no economic revival in ghettos when the most violent among us are allowed to roam free. It's time we restored domestic tranquility. And we mean to do just that.
Just as we're positioned as never before to secure justice in our economy, we're poised as never before to create a safer, freer, more peaceful world. Our alliances are stronger than ever. Our economy is stronger than ever. We have resumed our historic role as a leader of the free world. And all of these together are a great force for peace.
Since 1981 we've been committed to seeking fair and verifiable arms agreements that would lower the risk of war and reduce the size of nuclear arsenals. Now our determination to maintain a strong defense has influenced the Soviet Union to return to the bargaining table. Our negotiators must be able to go to that table with the united support of the American people. All of us have no greater dream than to see the day when nuclear weapons are banned from this Earth forever.
Each Member of the Congress has a role to play in modernizing our defenses, thus supporting our chances for a meaningful arms agreement. Your vote this spring on the Peacekeeper missile will be a critical test of our resolve to maintain the strength we need and move toward mutual and verifiable arms reductions.
For the past 20 years we've believed that no war will be launched as long as each side knows it can retaliate with a deadly counterstrike. Well, I believe there's a better way of eliminating the threat of nuclear war. It is a Strategic Defense Initiative aimed ultimately at finding a nonnuclear defense against ballistic missiles. It's the most hopeful possibility of the nuclear age. But it's not very well understood.
Some say it will bring war to the heavens, but its purpose is to deter war in the heavens and on Earth. Now, some say the research would be expensive. Perhaps, but it could save millions of lives, indeed humanity itself. And some say if we build such a system, the Soviets will build a defense system of their own. Well, they already have strategic defenses that surpass ours; a civil defense system, where we have almost none; and a research program covering roughly the same areas of technology that we're now exploring. And finally some say the research will take a long time. Well, the answer to that is: Let's get started.
Harry Truman once said that, ultimately, our security and the world's hopes for peace and human progress "lie not in measures of defense or in the control of weapons, but in the growth and expansion of freedom and self-government."
And tonight, we declare anew to our fellow citizens of the world: Freedom is not the sole prerogative of a chosen few; it is the universal right of all God's children. Look to where peace and prosperity flourish today. It is in homes that freedom built. Victories against poverty are greatest and peace most secure where people live by laws that ensure free press, free speech, and freedom to worship, vote, and create wealth.
Our mission is to nourish and defend freedom and democracy, and to communicate these ideals everywhere we can. America's economic success is freedom's success; it can be repeated a hundred times in a hundred different nations. Many countries in east Asia and the Pacific have few resources other than the enterprise of their own people. But through low tax rates and free markets they've soared ahead of centralized economies. And now China is opening up its economy to meet its needs.
We need a stronger and simpler approach to the process of making and implementing trade policy, and we'll be studying potential changes in that process in the next few weeks. We've seen the benefits of free trade and lived through the disasters of protectionism. Tonight I ask all our trading partners, developed and developing alike, to join us in a new round of trade negotiations to expand trade and competition and strengthen the global economy—and to begin it in this next year.
There are more than 3 billion human beings living in Third World countries with an average per capita income of $650 a year. Many are victims of dictatorships that impoverished them with taxation and corruption. Let us ask our allies to join us in a practical program of trade and assistance that fosters economic development through personal incentives to help these people climb from poverty on their own.
We cannot play innocents abroad in a world that's not innocent; nor can we be passive when freedom is under siege. Without resources, diplomacy cannot succeed. Our security assistance programs help friendly governments defend themselves and give them confidence to work for peace. And I hope that you in the Congress will understand that, dollar for dollar, security assistance contributes as much to global security as our own defense budget.
We must stand by all our democratic allies. And we must not break faith with those who are risking their lives—on every continent, from Afghanistan to Nicaragua—to defy Soviet-supported aggression and secure rights which have been ours from birth.
The Sandinista dictatorship of Nicaragua, with full Cuban-Soviet bloc support, not only persecutes its people, the church, and denies a free press, but arms and provides bases for Communist terrorists attacking neighboring states. Support for freedom fighters is self-defense and totally consistent with the OAS and U.N. Charters. It is essential that the Congress continue all facets of our assistance to Central America. I want to work with you to support the democratic forces whose struggle is tied to our own security.
And tonight, I've spoken of great plans and great dreams. They're dreams we can make come true. Two hundred years of American history should have taught us that nothing is impossible.
Ten years ago a young girl left Vietnam with her family, part of the exodus that followed the fall of Saigon. They came to the United States with no possessions and not knowing a word of English. Ten years ago—the young girl studied hard, learned English, and finished high school in the top of her class. And this May, May 22d to be exact, is a big date on her calendar. Just 10 years from the time she left Vietnam, she will graduate from the United States Military Academy at West Point. I thought you might like to meet an American hero named Jean Nguyen.
Now, there's someone else here tonight, born 79 years ago. She lives in the inner city, where she cares for infants born of mothers who are heroin addicts. The children, born in withdrawal, are sometimes even dropped on her doorstep. She helps them with love. Go to her house some night, and maybe you'll see her silhouette against the window as she walks the floor talking softly, soothing a child in her arms-Mother Hale of Harlem, and she, too, is an American hero.
Jean, Mother Hale, your lives tell us that the oldest American saying is new again: Anything is possible in America if we have the faith, the will, and the heart. History is asking us once again to be a force for good in the world. Let us begin in unity, with justice, and love.
Thank you, and God bless you.
"""
import phrasemachine
phrases = phrasemachine.get_phrases(text)
print "%s phrase types" % len(phrases['counts'])
print "%s phrase hits" % sum(phrases['counts'].values())
print "Top phrases:"
print phrases['counts'].most_common(10)
print "From crappy tokenization:"
crappy_tokens = text.split()
print phrasemachine.get_phrases(tokens=crappy_tokens)['counts'].most_common(10)
print "Phrase spans"
phrases = phrasemachine.get_phrases(text, output=['token_spans','tokens'])
print "%s phrase hits" % len(phrases['token_spans'])
print phrases['token_spans'][:20]
print phrases['token_spans'][-20:]
print "First several phrase hits"
print [(s,e, phrases['tokens'][s:e]) for (s,e) in phrases['token_spans'][:10]]
print "From crappy tokenization"
xx = phrasemachine.get_phrases(tokens=crappy_tokens, output='token_spans')['token_spans']
print [(s,e, crappy_tokens[s:e]) for (s,e) in xx[:10]]
|
python
|
#
# PySNMP MIB module HUAWEI-SEP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-SEP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:36:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
MibScalar, MibTable, MibTableRow, MibTableColumn, ifName, InterfaceIndex, Integer32, ObjectIdentity, ModuleIdentity, Unsigned32 = mibBuilder.importSymbols("IF-MIB", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ifName", "InterfaceIndex", "Integer32", "ObjectIdentity", "ModuleIdentity", "Unsigned32")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
VlanId, = mibBuilder.importSymbols("Q-BRIDGE-MIB", "VlanId")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, MibIdentifier, iso, Counter64, Counter32, Bits, IpAddress, Integer32, Gauge32, ModuleIdentity, ObjectIdentity, TimeTicks, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "MibIdentifier", "iso", "Counter64", "Counter32", "Bits", "IpAddress", "Integer32", "Gauge32", "ModuleIdentity", "ObjectIdentity", "TimeTicks", "Unsigned32")
RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "DisplayString")
hwSepMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223))
if mibBuilder.loadTexts: hwSepMIB.setLastUpdated('200911171530Z')
if mibBuilder.loadTexts: hwSepMIB.setOrganization('Huawei Technologies Co.,Ltd.')
hwSepObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1))
hwSepResetPktCnt = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 65535))).clone(namedValues=NamedValues(("clear", 1), ("unused", 65535)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwSepResetPktCnt.setStatus('current')
hwSepSegmentTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2), )
if mibBuilder.loadTexts: hwSepSegmentTable.setStatus('current')
hwSepSegmentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1), ).setIndexNames((0, "HUAWEI-SEP-MIB", "hwSepSegmentId"))
if mibBuilder.loadTexts: hwSepSegmentEntry.setStatus('current')
hwSepSegmentId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024)))
if mibBuilder.loadTexts: hwSepSegmentId.setStatus('current')
hwSepControlVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 4094), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepControlVlanId.setStatus('current')
hwSepPreemptManual = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepPreemptManual.setStatus('current')
hwSepPreemptDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(15, 600), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepPreemptDelay.setStatus('current')
hwSepBlockPortMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("optimal", 1), ("middle", 2), ("hop", 3), ("name", 4), ("null", 5)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepBlockPortMode.setStatus('current')
hwSepBlockPortHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 512), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepBlockPortHop.setStatus('current')
hwSepBlockPortSysname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepBlockPortSysname.setStatus('current')
hwSepBlockPortIfname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepBlockPortIfname.setStatus('current')
hwSepTcNotifySep = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 129))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifySep.setStatus('current')
hwSepTcNotifyRrpp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 10), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifyRrpp.setStatus('current')
hwSepTcNotifyStp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 11), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifyStp.setStatus('current')
hwSepTcNotifyVpls = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 12), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifyVpls.setStatus('current')
hwSepTcNotifyVll = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 13), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifyVll.setStatus('current')
hwSepTcNotifySmartLinkCtrlVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 4094), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifySmartLinkCtrlVlan.setStatus('current')
hwSepDealSmartLinkFlush = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 15), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepDealSmartLinkFlush.setStatus('current')
hwSepProtectedInstanceList = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 16), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 512))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepProtectedInstanceList.setStatus('current')
hwSepTcProtectionInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcProtectionInterval.setStatus('current')
hwSepSegmentRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 128), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepSegmentRowStatus.setStatus('current')
hwSepTopologyTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3), )
if mibBuilder.loadTexts: hwSepTopologyTable.setStatus('current')
hwSepTopologyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1), ).setIndexNames((0, "HUAWEI-SEP-MIB", "hwSepSegmentId"), (0, "HUAWEI-SEP-MIB", "hwSepHop"))
if mibBuilder.loadTexts: hwSepTopologyEntry.setStatus('current')
hwSepHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 512)))
if mibBuilder.loadTexts: hwSepHop.setStatus('current')
hwSepPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortId.setStatus('current')
hwSepTopoSysname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoSysname.setStatus('current')
hwSepTopoPortname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortname.setStatus('current')
hwSepTopoPortConfigPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortConfigPriority.setStatus('current')
hwSepTopoPortActivePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortActivePriority.setStatus('current')
hwSepTopoConfigPortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoConfigPortRole.setStatus('current')
hwSepTopoActivePortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoActivePortRole.setStatus('current')
hwSepTopoPortNbrState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("down", 1), ("init", 2), ("preup", 3), ("up", 4), ("conflict", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortNbrState.setStatus('current')
hwSepTopoBrotherPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoBrotherPortId.setStatus('current')
hwSepTopoNbrPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoNbrPortId.setStatus('current')
hwSepTopoPortLinkState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("down", 1), ("up", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortLinkState.setStatus('current')
hwSepTopoPortFwdState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("discarding", 1), ("forwarding", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortFwdState.setStatus('current')
hwSepPortTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4), )
if mibBuilder.loadTexts: hwSepPortTable.setStatus('current')
hwSepPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1), ).setIndexNames((0, "HUAWEI-SEP-MIB", "hwSepSegmentId"), (0, "HUAWEI-SEP-MIB", "hwSepPortType"), (0, "HUAWEI-SEP-MIB", "hwSepPortId1"), (0, "HUAWEI-SEP-MIB", "hwSepPortId2"), (0, "HUAWEI-SEP-MIB", "hwSepPortId3"), (0, "HUAWEI-SEP-MIB", "hwSepPortId4"))
if mibBuilder.loadTexts: hwSepPortEntry.setStatus('current')
hwSepPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 1)))
if mibBuilder.loadTexts: hwSepPortType.setStatus('current')
hwSepPortId1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: hwSepPortId1.setStatus('current')
hwSepPortId2 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: hwSepPortId2.setStatus('current')
hwSepPortId3 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: hwSepPortId3.setStatus('current')
hwSepPortId4 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: hwSepPortId4.setStatus('current')
hwSepSysname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepSysname.setStatus('current')
hwSepPortname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortname.setStatus('current')
hwSepPortConfigPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepPortConfigPriority.setStatus('current')
hwSepPortActivePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortActivePriority.setStatus('current')
hwSepConfigPortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 10), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepConfigPortRole.setStatus('current')
hwSepActivePortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepActivePortRole.setStatus('current')
hwSepPortNbrState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("down", 1), ("init", 2), ("preup", 3), ("up", 4), ("conflict", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortNbrState.setStatus('current')
hwSepNbrPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepNbrPortId.setStatus('current')
hwSepPortFwdState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("discarding", 1), ("forwarding", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortFwdState.setStatus('current')
hwSepRxNbrPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxNbrPktCnt.setStatus('current')
hwSepTxNbrPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxNbrPktCnt.setStatus('current')
hwSepRxLsaInfoPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxLsaInfoPktCnt.setStatus('current')
hwSepTxLsaInfoPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxLsaInfoPktCnt.setStatus('current')
hwSepRxLsaAckPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxLsaAckPktCnt.setStatus('current')
hwSepTxLsaAckPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxLsaAckPktCnt.setStatus('current')
hwSepRxPreemptReqPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxPreemptReqPktCnt.setStatus('current')
hwSepTxPreemptReqPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxPreemptReqPktCnt.setStatus('current')
hwSepRxPreemptAckPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxPreemptAckPktCnt.setStatus('current')
hwSepTxPreemptAckPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxPreemptAckPktCnt.setStatus('current')
hwSepRxTcPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxTcPktCnt.setStatus('current')
hwSepTxTcPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxTcPktCnt.setStatus('current')
hwSepRxEpaPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 27), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxEpaPktCnt.setStatus('current')
hwSepTxEpaPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxEpaPktCnt.setStatus('current')
hwSepResetPortPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 65535))).clone(namedValues=NamedValues(("clear", 1), ("unused", 65535)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwSepResetPortPktCnt.setStatus('current')
hwSepPortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 128), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepPortRowStatus.setStatus('current')
hwSepGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2))
hwSepGlobalInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2, 1)).setObjects(("HUAWEI-SEP-MIB", "hwSepResetPktCnt"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSepGlobalInfoGroup = hwSepGlobalInfoGroup.setStatus('current')
hwSepSegmentInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2, 2)).setObjects(("HUAWEI-SEP-MIB", "hwSepSegmentId"), ("HUAWEI-SEP-MIB", "hwSepControlVlanId"), ("HUAWEI-SEP-MIB", "hwSepPreemptManual"), ("HUAWEI-SEP-MIB", "hwSepPreemptDelay"), ("HUAWEI-SEP-MIB", "hwSepBlockPortMode"), ("HUAWEI-SEP-MIB", "hwSepBlockPortHop"), ("HUAWEI-SEP-MIB", "hwSepBlockPortSysname"), ("HUAWEI-SEP-MIB", "hwSepBlockPortIfname"), ("HUAWEI-SEP-MIB", "hwSepTcNotifySep"), ("HUAWEI-SEP-MIB", "hwSepTcNotifyRrpp"), ("HUAWEI-SEP-MIB", "hwSepTcNotifyStp"), ("HUAWEI-SEP-MIB", "hwSepTcNotifyVpls"), ("HUAWEI-SEP-MIB", "hwSepTcNotifyVll"), ("HUAWEI-SEP-MIB", "hwSepTcNotifySmartLinkCtrlVlan"), ("HUAWEI-SEP-MIB", "hwSepDealSmartLinkFlush"), ("HUAWEI-SEP-MIB", "hwSepProtectedInstanceList"), ("HUAWEI-SEP-MIB", "hwSepTcProtectionInterval"), ("HUAWEI-SEP-MIB", "hwSepSegmentRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSepSegmentInfoGroup = hwSepSegmentInfoGroup.setStatus('current')
hwSepPortInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2, 3)).setObjects(("HUAWEI-SEP-MIB", "hwSepPortType"), ("HUAWEI-SEP-MIB", "hwSepPortId1"), ("HUAWEI-SEP-MIB", "hwSepPortId2"), ("HUAWEI-SEP-MIB", "hwSepPortId3"), ("HUAWEI-SEP-MIB", "hwSepPortId4"), ("HUAWEI-SEP-MIB", "hwSepSysname"), ("HUAWEI-SEP-MIB", "hwSepPortname"), ("HUAWEI-SEP-MIB", "hwSepPortConfigPriority"), ("HUAWEI-SEP-MIB", "hwSepPortActivePriority"), ("HUAWEI-SEP-MIB", "hwSepConfigPortRole"), ("HUAWEI-SEP-MIB", "hwSepActivePortRole"), ("HUAWEI-SEP-MIB", "hwSepPortNbrState"), ("HUAWEI-SEP-MIB", "hwSepNbrPortId"), ("HUAWEI-SEP-MIB", "hwSepPortFwdState"), ("HUAWEI-SEP-MIB", "hwSepRxNbrPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxNbrPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxLsaInfoPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxLsaInfoPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxLsaAckPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxLsaAckPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxPreemptReqPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxPreemptReqPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxPreemptAckPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxPreemptAckPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxTcPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxTcPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxEpaPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxEpaPktCnt"), ("HUAWEI-SEP-MIB", "hwSepResetPortPktCnt"), ("HUAWEI-SEP-MIB", "hwSepPortRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSepPortInfoGroup = hwSepPortInfoGroup.setStatus('current')
hwSepTopologyInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2, 4)).setObjects(("HUAWEI-SEP-MIB", "hwSepHop"), ("HUAWEI-SEP-MIB", "hwSepPortId"), ("HUAWEI-SEP-MIB", "hwSepTopoSysname"), ("HUAWEI-SEP-MIB", "hwSepTopoPortname"), ("HUAWEI-SEP-MIB", "hwSepTopoPortConfigPriority"), ("HUAWEI-SEP-MIB", "hwSepTopoPortActivePriority"), ("HUAWEI-SEP-MIB", "hwSepTopoConfigPortRole"), ("HUAWEI-SEP-MIB", "hwSepTopoActivePortRole"), ("HUAWEI-SEP-MIB", "hwSepTopoPortNbrState"), ("HUAWEI-SEP-MIB", "hwSepTopoNbrPortId"), ("HUAWEI-SEP-MIB", "hwSepTopoPortLinkState"), ("HUAWEI-SEP-MIB", "hwSepTopoPortFwdState"), ("HUAWEI-SEP-MIB", "hwSepTopoBrotherPortId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSepTopologyInfoGroup = hwSepTopologyInfoGroup.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-SEP-MIB", hwSepTcNotifySep=hwSepTcNotifySep, hwSepPortname=hwSepPortname, hwSepSegmentTable=hwSepSegmentTable, hwSepMIB=hwSepMIB, hwSepTopoPortname=hwSepTopoPortname, hwSepTxTcPktCnt=hwSepTxTcPktCnt, hwSepGroups=hwSepGroups, hwSepPortId2=hwSepPortId2, hwSepTopoConfigPortRole=hwSepTopoConfigPortRole, hwSepRxLsaAckPktCnt=hwSepRxLsaAckPktCnt, hwSepActivePortRole=hwSepActivePortRole, hwSepPortRowStatus=hwSepPortRowStatus, hwSepTopoSysname=hwSepTopoSysname, hwSepTopoPortNbrState=hwSepTopoPortNbrState, hwSepPortId3=hwSepPortId3, hwSepPortNbrState=hwSepPortNbrState, hwSepPortType=hwSepPortType, hwSepTopologyTable=hwSepTopologyTable, hwSepSegmentId=hwSepSegmentId, hwSepRxTcPktCnt=hwSepRxTcPktCnt, hwSepBlockPortMode=hwSepBlockPortMode, hwSepBlockPortIfname=hwSepBlockPortIfname, hwSepPortTable=hwSepPortTable, hwSepConfigPortRole=hwSepConfigPortRole, hwSepHop=hwSepHop, hwSepRxPreemptReqPktCnt=hwSepRxPreemptReqPktCnt, hwSepSysname=hwSepSysname, hwSepTxPreemptAckPktCnt=hwSepTxPreemptAckPktCnt, hwSepProtectedInstanceList=hwSepProtectedInstanceList, hwSepTxEpaPktCnt=hwSepTxEpaPktCnt, hwSepTopologyEntry=hwSepTopologyEntry, hwSepTopoPortActivePriority=hwSepTopoPortActivePriority, hwSepTxLsaInfoPktCnt=hwSepTxLsaInfoPktCnt, hwSepGlobalInfoGroup=hwSepGlobalInfoGroup, hwSepRxEpaPktCnt=hwSepRxEpaPktCnt, hwSepPortEntry=hwSepPortEntry, hwSepTopoPortConfigPriority=hwSepTopoPortConfigPriority, hwSepSegmentInfoGroup=hwSepSegmentInfoGroup, hwSepTopoBrotherPortId=hwSepTopoBrotherPortId, hwSepPortFwdState=hwSepPortFwdState, hwSepTxNbrPktCnt=hwSepTxNbrPktCnt, hwSepResetPktCnt=hwSepResetPktCnt, hwSepSegmentEntry=hwSepSegmentEntry, hwSepDealSmartLinkFlush=hwSepDealSmartLinkFlush, hwSepTcProtectionInterval=hwSepTcProtectionInterval, hwSepNbrPortId=hwSepNbrPortId, hwSepRxPreemptAckPktCnt=hwSepRxPreemptAckPktCnt, hwSepResetPortPktCnt=hwSepResetPortPktCnt, hwSepPreemptManual=hwSepPreemptManual, hwSepPortId4=hwSepPortId4, hwSepTcNotifyVll=hwSepTcNotifyVll, hwSepTopoActivePortRole=hwSepTopoActivePortRole, hwSepPreemptDelay=hwSepPreemptDelay, hwSepPortActivePriority=hwSepPortActivePriority, hwSepTcNotifySmartLinkCtrlVlan=hwSepTcNotifySmartLinkCtrlVlan, hwSepTcNotifyVpls=hwSepTcNotifyVpls, hwSepBlockPortSysname=hwSepBlockPortSysname, hwSepPortConfigPriority=hwSepPortConfigPriority, hwSepPortInfoGroup=hwSepPortInfoGroup, hwSepTopologyInfoGroup=hwSepTopologyInfoGroup, hwSepControlVlanId=hwSepControlVlanId, hwSepObjects=hwSepObjects, hwSepTcNotifyStp=hwSepTcNotifyStp, hwSepPortId=hwSepPortId, hwSepTopoNbrPortId=hwSepTopoNbrPortId, hwSepTxLsaAckPktCnt=hwSepTxLsaAckPktCnt, hwSepTopoPortFwdState=hwSepTopoPortFwdState, hwSepRxLsaInfoPktCnt=hwSepRxLsaInfoPktCnt, hwSepPortId1=hwSepPortId1, hwSepRxNbrPktCnt=hwSepRxNbrPktCnt, PYSNMP_MODULE_ID=hwSepMIB, hwSepBlockPortHop=hwSepBlockPortHop, hwSepTcNotifyRrpp=hwSepTcNotifyRrpp, hwSepSegmentRowStatus=hwSepSegmentRowStatus, hwSepTopoPortLinkState=hwSepTopoPortLinkState, hwSepTxPreemptReqPktCnt=hwSepTxPreemptReqPktCnt)
|
python
|
import unittest
import unittest.mock
import denonavr.__main__ as avr
class TestDenonAVR(unittest.TestCase):
def test_valid_input_source(self):
self.assertTrue(avr._is_valid_input_source("DVD"))
self.assertTrue(avr._is_valid_input_source("BD"))
self.assertTrue(avr._is_valid_input_source("GAME"))
self.assertTrue(avr._is_valid_input_source("SAT/CBL"))
self.assertFalse(avr._is_valid_input_source("VHS"))
def test_convert_input_source(self):
self.assertEqual(avr._convert_input_source("satcbl"), "SAT/CBL")
self.assertEqual(avr._convert_input_source("vhs"), "VHS")
def test_execute(self):
with unittest.mock.patch("telnetlib.Telnet") as telnet_mock:
telnet_mock.return_value.read_until.return_value = "b'Test\\r'"
self.assertEqual(avr.execute("?Test", avr.CONFIG), "Test")
telnet_mock.return_value.write.assert_called_once_with(b'?Test\r')
telnet_mock.return_value.close.assert_called_once_with()
def test_execute_error(self):
with unittest.mock.patch("telnetlib.Telnet") as telnet_mock:
telnet_mock.return_value.write.side_effect = OSError
self.assertEqual(avr.execute("CMD", avr.CONFIG), "ERROR")
telnet_mock.return_value = unittest.mock.MagicMock(spec=["write", "read_until", "close"])
telnet_mock.side_effect = None
telnet_mock.return_value.write.side_effect = OSError
self.assertEqual(avr.execute("CMD", avr.CONFIG), "ERROR")
telnet_mock.return_value.close.assert_called_with()
if __name__ == '__main__':
unittest.main()
|
python
|
from ...hek.defs.obje import *
def get():
return obje_def
# replace the model animations dependency with an open sauce one
obje_attrs = dict(obje_attrs)
obje_attrs[8] = dependency('animation_graph', valid_model_animations_yelo)
obje_body = Struct('tagdata',
obje_attrs
)
obje_def = TagDef("obje",
blam_header('obje'),
obje_body,
ext=".object", endian=">", tag_cls=ObjeTag
)
|
python
|
class Coordenadas():
def __init__(self, coordenadaX , coordenadaY):
self.coordenadaX = coordenadaX
self.coordenadaY = coordenadaY
def valores(self):
print("Los valores ingresados fueron:","(" , self.coordenadaX,",", self.coordenadaY ,")")
def cuadrante(self):
if(self.coordenadaX > 0 and self.coordenadaY>0 ):
print("Pertenece al primer cuadrante")
elif(self.coordenadaX <0 and self.coordenadaY >0 ):
print("Pertenece al segundo cuadrante")
elif (self.coordenadaX <0 and self.coordenadaY< 0):
print("Pertence al tercer cuadrante")
elif(self.coordenadaX>0 and self.coordenadaY<0):
print("Pertenece al cuarto cuadrante")
def vector_resultante(self):
otroX= int(input("Ingrese nueva coordenada X: "))
otroY= int(input("Ingrese nueva coordenada Y: "))
self.VRX = otroX - self.coordenadaX
self.VRY = otroY -self.coordenadaY
print("El vector resultante es:","(" , self.VRX,",", self.VRY ,")")
def distancia(self):
d = ((self.VRX)**2 + (self.VRY)**2)**0.5
print("La distancia entre sus puntos es: ", d)
def __resultante(self):
coor.vector_resultante()
coor = Coordenadas(coordenadaX = -2, coordenadaY=3)
coor.valores()
coor.cuadrante()
coor.vector_resultante()
coor.distancia()
|
python
|
import sys
import unittest
import os
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, os.path.abspath(
os.path.join(script_dir, os.path.join('..', '..'))))
import pake
class SubpakeTest(unittest.TestCase):
def file_helper_test_stub(self, ctx, silent):
fp = pake.FileHelper(ctx)
self.assertEqual(fp.printer, ctx)
# FileHelper.makedirs
# =============================
fp.makedirs('test_data/filehelper/sub', silent=silent)
try:
fp.makedirs('test_data/filehelper/sub', silent=silent)
except Exception:
self.fail('pake.FileHelper.makedirs threw creating an existing directory tree. '
'It should not do this when exist_ok=True, which is default.')
with self.assertRaises(OSError):
fp.makedirs('test_data/filehelper/sub', exist_ok=False, silent=silent)
with self.assertRaises(OSError):
fp.makedirs('test_data/filehelper', exist_ok=False, silent=silent)
with self.assertRaises(OSError):
fp.makedirs('test_data', exist_ok=False, silent=silent)
self.assertTrue(os.path.isdir('test_data/filehelper'))
self.assertTrue(os.path.isdir('test_data/filehelper/sub'))
for i in range(0, 3):
fp.makedirs('test_data/filehelper/delete_me_{}/sub'.format(i), silent=silent)
self.assertTrue(os.path.isdir('test_data/filehelper/delete_me_{}/sub'.format(i)))
touch_file = 'test_data/filehelper/delete_me_{idx}/sub/file{idx}.txt'.format(idx=i)
fp.touch(touch_file, silent=silent)
self.assertTrue(os.path.isfile(touch_file))
# FileHelper.copytree
# =============================
fp.copytree('test_data/filehelper', 'test_data/filehelper/copy_test', silent=silent)
self.assertTrue(os.path.isdir('test_data/filehelper/copy_test'))
for i in range(0, 3):
touch_file = 'test_data/filehelper/copy_test/delete_me_{idx}/sub/file{idx}.txt'.format(idx=i)
self.assertTrue(os.path.isfile(touch_file))
with self.assertRaises(FileExistsError):
fp.copytree('test_data/filehelper', 'test_data/filehelper/copy_test', silent=silent)
# FileHelper.move
# =============================
fp.makedirs('test_data/filehelper/movedir', silent=silent)
fp.touch('test_data/filehelper/move.txt', silent=silent)
fp.move('test_data/filehelper/move.txt', 'test_data/filehelper/movedir', silent=silent)
self.assertTrue(os.path.isfile('test_data/filehelper/movedir/move.txt'))
fp.move('test_data/filehelper/movedir', 'test_data/filehelper/copy_test', silent=silent)
self.assertTrue(os.path.isfile('test_data/filehelper/copy_test/movedir/move.txt'))
# FileHelper.remove
# =============================
fp.remove('test_data/filehelper/copy_test/movedir/move.txt', silent=silent)
self.assertFalse(os.path.isfile('test_data/filehelper/copy_test/movedir/move.txt'))
try:
fp.remove('test_data/filehelper/copy_test/movedir/move.txt', silent=silent)
except Exception:
self.fail(
'pake.FileHelper.remove threw removing a non existing file. It should not do this when must_exist=True, which is default.')
with self.assertRaises(FileNotFoundError):
fp.remove('test_data/filehelper/copy_test/movedir/move.txt', must_exist=True, silent=silent)
# Cannot use remove to remove directories, must use rmtree
with self.assertRaises(OSError):
fp.remove('test_data/filehelper/copy_test/movedir', must_exist=True, silent=silent)
# FileHelper.touch
# =============================
try:
fp.touch('test_data/filehelper/delete_me_0/sub/file0.txt', silent=silent)
except Exception:
self.fail(
'pake.FileHelper.touch threw touching an existing file. It should not do this when exist_ok=True, which is default.')
with self.assertRaises(FileExistsError):
fp.touch('test_data/filehelper/delete_me_0/sub/file0.txt', silent=silent, exist_ok=False)
# FileHelper.glob_remove
# =============================
fp.glob_remove('test_data/filehelper/delete_me_**/sub/file*.txt', silent=silent)
for i in range(0, 3):
self.assertFalse(os.path.isfile('test_data/filehelper/delete_me_{idx}/sub/file{idx}.txt'.format(idx=i)))
# FileHelper.copy
# =============================
fp.copy('test_data/in1', 'test_data/filehelper', silent=silent)
self.assertTrue(os.path.isfile('test_data/filehelper/in1'))
try:
fp.copy('test_data/in1', 'test_data/filehelper', silent=silent)
except Exception:
self.fail(
'pake.FileHelper.copy threw overwriting an existing file. It should not do this.')
# Just to hit the second path, there is not really a way to portably test copying the metadata,
# it is up to the shutil module to do it anyway.
fp.copy('test_data/in2', 'test_data/filehelper', silent=silent, copy_metadata=True)
self.assertTrue(os.path.isfile('test_data/filehelper/in2'))
try:
fp.copy('test_data/in2', 'test_data/filehelper', silent=silent, copy_metadata=True)
except Exception:
self.fail(
'pake.FileHelper.copy with metadata threw overwriting an existing file. It should not do this.')
# FileHelper.glob_remove_dirs
# =============================
# remove the sub folders under the folders starting with delete_me_*
fp.glob_remove_dirs('test_data/filehelper/delete_me_**/su*', silent=silent)
for i in range(0, 3):
# delete_me_* should remain intact, the sub folders should not
self.assertTrue(os.path.isdir('test_data/filehelper/delete_me_{}'.format(i)))
self.assertFalse(os.path.isdir('test_data/filehelper/delete_me_{}/sub'.format(i)))
fp.glob_remove_dirs('test_data/filehelper/delete_me_*', silent=silent)
for i in range(0, 3):
# now they should be gone
self.assertFalse(os.path.isdir('test_data/filehelper/delete_me_{}'.format(i)))
# FileHelper.rmtree
# =============================
fp.rmtree('test_data/filehelper', silent=silent)
try:
fp.rmtree('test_data/filehelper', silent=silent)
except Exception:
self.fail(
'pake.FileHelper.rmtree threw removing a non existent directory. It should not do this when must_exist=False, which is default.')
with self.assertRaises(FileNotFoundError):
fp.rmtree('test_data/filehelper', silent=silent, must_exist=True)
def test_filehelper(self):
fh = pake.FileHelper()
self.assertEqual(fh.printer, None)
class SilentTestCtx:
def print(*args):
nonlocal self
self.fail('SilentTestCtx printed from file helper function set to be silent.')
class TestCtx:
def print(*args):
pass
class ErrCtx:
# I don't implement print
pass
with self.assertRaises(ValueError):
# Error because no print function is defined.
_ = pake.FileHelper(ErrCtx())
past_cwd = os.getcwd()
os.chdir(script_dir)
pake.FileHelper().rmtree('test_data/filehelper')
self.file_helper_test_stub(SilentTestCtx(), silent=True)
self.file_helper_test_stub(TestCtx(), silent=False)
self.file_helper_test_stub(None, silent=True)
self.file_helper_test_stub(None, silent=False)
os.chdir(past_cwd)
|
python
|
import sys
import re
def main():
file = sys.stdin
if file.isatty():
filename = input('Input file name: ')
file = open(filename)
rules = file.readlines()
file.close()
bags = parseRules(rules)
shinyGoldBag = bags['shiny gold']
shinyGoldContainers = shinyGoldBag.findContainers()
print(f'Shiny gold bags can be carried in {len(shinyGoldContainers)} other bags')
print(f'Shiny gold bags must contain {shinyGoldBag.countContainedBags()} other bags')
def parseRules(rules):
bags = {bag.color: bag for bag in [Bag(rule) for rule in rules]}
ruleRegex = re.compile(r'(\d+) ([a-z]+ [a-z]+) bags?')
for bag in bags.values():
for rule in bag.containsRule.split(', '):
match = ruleRegex.match(rule)
if match and match.group(2) in bags:
cBag = bags[match.group(2)]
bag.contains[cBag.color] = ContainedBag(cBag, int(match.group(1)))
cBag.containedIn[bag.color] = bag
return bags
class Bag:
def __init__(self, rule):
(color, restOfRule) = rule.split(' bags contain ')
self.color = color
self.containsRule = restOfRule.strip('. \r\n')
self.contains = {}
self.containedIn = {}
def findContainers(self, containers = set()):
for container in self.containedIn.values():
if container not in containers:
containers.add(container)
container.findContainers(containers)
return containers
def countContainedBags(self):
count = 0
for contained in self.contains.values():
count += contained.count + contained.count * contained.bag.countContainedBags()
return count
class ContainedBag:
def __init__(self, bag, count):
self.bag = bag
self.count = count
if __name__ == "__main__":
main()
|
python
|
import copy
import logging
import os
import typing
import yaml
from nbcollection.ci.constants import ENCODING, SCANNER_ARTIFACT_DEST_DIR
from nbcollection.ci.generate_ci_environment.constants import NBCOLLECTION_BUILDER, CONFIG_TEMPLATE, JOB_TEMPLATE, \
PULL_REQUEST_TEMPLATE, NBCOLLECTION_WORKFLOW_NAME, PUBLISH_JOB_NAME_TEMPLATE
from nbcollection.ci.commands.datatypes import CIEnvironment
from nbcollection.ci.datatypes import BuildJob
from nbcollection.ci.renderer import render_template
logger = logging.getLogger(__name__)
def gen_ci_env(jobs: typing.List[BuildJob], ci_env: CIEnvironment, project_path: str, enable_website_publication: bool,
enable_nightly: bool = False) -> None:
if ci_env is not CIEnvironment.CircleCI:
raise NotImplementedError(f'CIEnvironment "{ci_env}" not supported')
formatted_collections = []
formatted_job_names = []
config = copy.deepcopy(CONFIG_TEMPLATE)
logger.info(f'Using {NBCOLLECTION_BUILDER} for CircleCI Image Executor')
for build_job in jobs:
formatted_cat_name = ' '.join(build_job.category.name.split('_'))
formatted_cat_name = formatted_cat_name.title()
formatted_col_name = ' '.join(build_job.collection.name.split('_'))
formatted_col_name = formatted_col_name.title()
job_name = '-'.join([formatted_col_name, formatted_cat_name])
logger.info(f'Generating job for "{job_name}"')
job = copy.deepcopy(JOB_TEMPLATE)
job['steps'][2]['run']['command'] = ' '.join([
'nbcollection-ci build-notebooks',
f'--collection-names {build_job.collection.name}',
f'--category-names {build_job.category.name}',
])
job['steps'][2]['run']['name'] = f'Build {job_name} notebooks'
job['steps'][3]['store_artifacts']['path'] = SCANNER_ARTIFACT_DEST_DIR
config['jobs'][job_name] = job
config['workflows'][NBCOLLECTION_WORKFLOW_NAME]['jobs'].append(job_name)
if not build_job.collection.name in formatted_collections:
formatted_collections.append(build_job.collection.name)
formatted_job_names.append(job_name)
formatted_collections = ','.join(formatted_collections)
# Pull Request
pr_job_name = 'Pull Request'
config['jobs'][pr_job_name] = copy.deepcopy(PULL_REQUEST_TEMPLATE)
config['workflows'][NBCOLLECTION_WORKFLOW_NAME]['jobs'].append(pr_job_name)
# Publish Website
if enable_website_publication:
publish_job_name = 'Publish Website'
config['jobs'][publish_job_name] = copy.deepcopy(PUBLISH_JOB_NAME_TEMPLATE)
config['jobs'][publish_job_name]['steps'][2]['run']['command'] = f'nbcollection-ci merge-artifacts -c {formatted_collections} -o $CIRCLE_PROJECT_USERNAME -r $CIRCLE_PROJECT_REPONAME'
config['jobs'][publish_job_name]['steps'][2]['run']['name'] = 'Publish Website'
config['workflows'][NBCOLLECTION_WORKFLOW_NAME]['jobs'].append({
publish_job_name: {
'requires': formatted_job_names
}
})
if enable_nightly:
schedule_key = f'{NBCOLLECTION_WORKFLOW_NAME}-periodic'
config['workflows'][schedule_key] = copy.deepcopy(config['workflows'][NBCOLLECTION_WORKFLOW_NAME])
config['workflows'][schedule_key]['triggers'] = [
{
'schedule': {
'cron': '30 8 * * *',
'filters': {
'branches': {
'only': ['main']
}
}
}
}
]
config_path = os.path.join(project_path, '.circleci/config.yml')
config_dirpath = os.path.dirname(config_path)
if not os.path.exists(config_dirpath):
os.makedirs(config_dirpath)
logger.info(f'Writing config-file to "{config_path}"')
with open(config_path, 'wb') as stream:
stream.write(yaml.dump(config).encode(ENCODING))
setup_script_filepath = os.path.join(project_path, '.circleci/setup-env.sh')
logger.info(f"Rendering Setup Script: {setup_script_filepath}")
with open(setup_script_filepath, 'wb') as stream:
rendered_script = render_template('setup-env.sh', {})
stream.write(rendered_script.encode(ENCODING))
build_pull_request_filepath = os.path.join(project_path, '.circleci/build-pull-request.sh')
logger.info(f'Rendering Pull Request: {build_pull_request_filepath}')
with open(build_pull_request_filepath, 'wb') as stream:
rendered_pr_script = render_template('build-pull-request.sh', {})
stream.write(rendered_pr_script.encode(ENCODING))
|
python
|
"""
protonate.py: Wrapper method for the reduce program: protonate (i.e., add hydrogens) a pdb using reduce
and save to an output file.
Pablo Gainza - LPDI STI EPFL 2019
Released under an Apache License 2.0
"""
from subprocess import Popen, PIPE
import os
from ipdb import set_trace
def protonate(in_pdb_file, out_pdb_file):
# protonate (i.e., add hydrogens) a pdb using reduce and save to an output file.
# in_pdb_file: file to protonate.
# out_pdb_file: output file where to save the protonated pdb file.
# Remove protons first, in case the structure is already protonated
args = ["reduce", "-Trim", in_pdb_file]
p2 = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p2.communicate()
outfile = open(out_pdb_file, "w")
outfile.write(stdout.rstrip())
outfile.close()
# Now add them again.
args = ["reduce", "-HIS", out_pdb_file]
p2 = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p2.communicate()
outfile = open(out_pdb_file, "w")
outfile.write(stdout)
outfile.close()
|
python
|
import collections
import logging
from arekit.common.data.input.providers.opinions import OpinionProvider
logger = logging.getLogger(__name__)
class BaseRowProvider(object):
""" Base provider for rows that suppose to be filled into BaseRowsStorage.
"""
# region protected methods
def _provide_rows(self, parsed_news, text_opinion_linkage, idle_mode):
raise NotImplementedError()
# endregion
def iter_by_rows(self, opinion_provider, doc_ids_iter, idle_mode):
assert(isinstance(opinion_provider, OpinionProvider))
assert(isinstance(doc_ids_iter, collections.Iterable))
for parsed_news, linkage in opinion_provider.iter_linked_opinions(doc_ids_iter):
rows_it = self._provide_rows(parsed_news=parsed_news,
text_opinion_linkage=linkage,
idle_mode=idle_mode)
for row in rows_it:
yield row
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
'pan-python',
]
test_requirements = [
'pan-python',
'mock',
'pytest',
]
setup(
name='pandevice',
version='0.6.3',
description='Framework for interacting with Palo Alto Networks devices via API',
long_description='The Palo Alto Networks Device Framework is a way to interact with Palo Alto Networks devices (including Next-generation Firewalls and Panorama) using the device API that is object oriented and conceptually similar to interaction with the device via the GUI or CLI.',
author='Palo Alto Networks',
author_email='[email protected]',
url='https://github.com/PaloAltoNetworks/pandevice',
packages=[
'pandevice',
],
package_dir={'pandevice':
'pandevice'},
include_package_data=True,
install_requires=requirements,
license="ISC",
zip_safe=False,
keywords='pandevice',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
"Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=['pytest-runner', ],
)
|
python
|
class Activation(object):
def __init__(self):
pass
from deepend.activations.Softmax import Softmax
from deepend.activations.LeakyReLU import LeakyReLU
from deepend.activations.Linear import Linear
from deepend.activations.TanH import TanH
from deepend.activations.Sigmoid import Sigmoid
from deepend.activations.ReLU import ReLU
activation_mapping = {
"softmax": Softmax,
"leaky_relu": LeakyReLU,
"linear": Linear,
"tanh": TanH,
"sigmoid": Sigmoid,
"relu": ReLU,
}
|
python
|
"""Exemplo melhorias de Contraste."""
from PIL import Image, ImageEnhance
im = Image.open('beijo.jpg')
contrast = ImageEnhance.Contrast(im)
contrast.enhance(1.2)
contrast.show()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.