content
stringlengths
0
894k
type
stringclasses
2 values
from .duration import Duration from .numeric import Numeric from .rate import Rate from .size import Size
python
import clr clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * fams = UnwrapElement(IN[0]) ptypes = list() for fam in fams: if fam.GetType().ToString() == "Autodesk.Revit.DB.Family": ptypes.append(fam.FamilyPlacementType) else: ptypes.append(None) OUT = ptypes
python
from sys import stdin, stdout from operator import itemgetter cases = int(stdin.readline()) for c in range(cases): text = stdin.readline().strip().lower() text = [ch for ch in text if ch.isalpha()] freq = {} max_f = 0 for ch in text: if not ch in freq: freq[ch] = 1 else: freq[ch] += 1 if freq[ch] > max_f: max_f = freq[ch] chars = [] for key, f in freq.items(): if f == max_f: chars.append(key) stdout.write("{}\n".format("".join(sorted(chars))))
python
from temboo.Library.LinkedIn.PeopleAndConnections.GetMemberProfile import GetMemberProfile, GetMemberProfileInputSet, GetMemberProfileResultSet, GetMemberProfileChoreographyExecution
python
import time import asyncio import concurrent.futures from functools import partial def a(): time.sleep(1) return 'A' async def b(): await asyncio.sleep(1) return 'B' async def c(): loop = asyncio.get_running_loop() return await loop.run_in_executor(None, a) def show_perf(func): print('*' * 20) start = time.perf_counter() asyncio.run(func()) print(f'{func.__name__} Cost: {time.perf_counter() - start}') async def c1(): loop = asyncio.get_running_loop() await asyncio.gather( loop.run_in_executor(None, a), b() ) async def c2(): await asyncio.gather(b(), c()) async def c3(): loop = asyncio.get_running_loop() with concurrent.futures.ProcessPoolExecutor() as e: print(await asyncio.gather( loop.run_in_executor(e, a), b() )) if __name__ == '__main__': for f in (c1, c2, c3): show_perf(f)
python
import logging def _cancel_pending_orders(client, orders): pending = [(order['variety'], order['order_id']) for order in orders if 'OPEN' in order['status']] # ToDo: if doesn't work in time, try to run it async. for p in pending: try: order_id = client.cancel_order(*p) logging.info('Order {} was canceled'.format(order_id)) except Exception as err: logging.error(err) def _handle_quantity_diff(client, quantity_diff): if quantity_diff > 0: try: order_id = client.place_order( variety=client.VARIETY_REGULAR, tradingsymbol='SBIN', exchange=client.EXCHANGE_NSE, transaction_type=client.TRANSACTION_TYPE_SELL, quantity=quantity_diff, order_type=client.ORDER_TYPE_MARKET, product=client.PRODUCT_MIS) logging.info("Order placed. ID is: {}".format(order_id)) except Exception as err: logging.error(err) elif quantity_diff < 0: try: order_id = client.place_order( variety=client.VARIETY_REGULAR, tradingsymbol='SBIN', exchange=client.EXCHANGE_NSE, transaction_type=client.TRANSACTION_TYPE_BUY, quantity=abs(quantity_diff), order_type=client.ORDER_TYPE_MARKET, product=client.PRODUCT_MIS) logging.info("Order placed. ID is: {}".format(order_id)) except Exception as err: logging.error(err) def mis_mode(server): client = server.kite_loginer.get_client() kite_trader = server.kite def tick(): orders = client.orders() _cancel_pending_orders(client, orders) positions = client.positions() quantity_diff = positions['day'][0]['day_buy_quantity'] - positions['day'][0]['day_sell_quantity'] _handle_quantity_diff(client, quantity_diff) kite_trader.reset() # logging.info('Kite Agent balance and inventory were reset to {}, {}'.format( # kite_trader.balance, kite_trader.inventory)) #104 for improvised def_reset() function in the kite_8.py logging.info('Kite Agent balance and inventory were reset to {}, {}, {}, {}, {}, {}'.format( kite_trader.balance, kite_trader.inventory, kite_trader._queue, kite_trader.buy_price_queue, kite_trader.sell_price_queue, kite_trader.actions_queue)) return tick
python
import appdaemon.plugins.hass.hassapi as hass import os import glob import random # # A helper app providing random template selection and rendering. # # This app could be used by Smart Assistants to provide some "randomness" in the assistant words. # # noinspection PyAttributeOutsideInit class AssistantTemplate(hass.Hass): def initialize(self): self.add_namespace('assistant') self.register_service('assistant/template', self.template_service, namespace='assistant') self.language = self.args['language'] self.templates_path = self.args['templates_path'] self.log("Assistant Template support started for language %s", self.language, level='INFO') def template_service(self, namespace, domain, service, data): self.log("Service: %r", data, level='DEBUG') tmpl_name = data['template'] tmpl_variables = data.get('variables') tmpl_dir = self.template_dirname(tmpl_name) if os.path.isdir(tmpl_dir): tmpl_file = self.select_template(tmpl_dir) else: tmpl_file = self.template_filename(tmpl_name) return self.render_template_file(tmpl_file, tmpl_variables) # noinspection PyMethodMayBeStatic def select_template(self, tmpl_dir): return random.choice(glob.glob(os.path.join(tmpl_dir, '*.jinja2'))) def render_template_file(self, tmpl_file, variables): with open(tmpl_file, mode='r', encoding='utf-8') as tmpl_fp: tmpl_content = tmpl_fp.read() self.log('Rendering template text: %r', tmpl_content) return self.call_service('template/render', template=tmpl_content, variables=variables, namespace='hass') def template_dirname(self, name): return os.path.join(self.templates_path, self.language, name) def template_filename(self, name): return os.path.join(self.templates_path, self.language, name + '.jinja2')
python
#!/usr/bin/env python ################################################################ # # osm.py - Obsidian Settings Manager # Copyright 2021 Peter Kaminski. Licensed under MIT License. # https://github.com/peterkaminski/obsidian-settings-manager # ################################################################ VERSION = 'v0.3.0' APPNAME = 'Obsidian Settings Manager' import argparse import datetime import json import shutil import subprocess import sys import traceback from pathlib import Path # set up argparse def init_argparse(): # TODO: make "action" flags (list, update, execute, etc.) mutually exclusive parser = argparse.ArgumentParser(description='Manage Obsidian settings across multiple vaults.') parser.add_argument('--list', '-l', action='store_true', help='list Obsidian vaults') parser.add_argument('--update', '-u', help='update Obsidian vaults from UPDATE vault') parser.add_argument('--rm', action='store_true', help='with --update, remove .obsidian and create again, rather than retain old .obsidian files') parser.add_argument('--execute', '-x', help='run EXECUTE command within each vault (use caution!)') parser.add_argument('--backup-list', action='store_true', help='list ISO 8601-formatted .obsidian backup files from all vaults') parser.add_argument('--backup-remove', action='store_true', help='remove ISO 8601-formatted .obsidian backup files from all vaults') parser.add_argument('--version', '-v', action='store_true', help='show version and exit') return parser # find all the vaults Obsidian is tracking def get_vault_paths(): vault_paths = [] # read primary file # location per https://help.obsidian.md/Advanced+topics/How+Obsidian+stores+data#System+directory # (TODO: should be parameterized and support other OSes) with open(Path.home() / 'Library/Application Support/obsidian/obsidian.json') as infile: obsidian = json.load(infile) vaults = obsidian['vaults'] for vault in vaults: # skip Help or other system directory vaults # TODO: support other OSes if Path(vaults[vault]['path']).parent == Path.home() / 'Library/Application Support/obsidian': continue vault_paths.append(vaults[vault]['path']) # sort paths (case-insensitive) vault_paths.sort(key=str.lower) # return paths return vault_paths # helper for `copy_settings()` # does nothing if `src` does not exist def copy_settings_file(datestring, src, dest, filename): src_target = Path(src) / filename dest_target = Path(dest) / filename if src_target.exists(): if dest_target.exists(): dest_target.rename(str(dest_target)+datestring) shutil.copy2(str(src_target), str(dest_target)) # helper for `copy_settings()` # does nothing if `src` does not exist def copy_settings_dir(datestring, src, dest, dirname): src_target = Path(src) / dirname dest_target = Path(dest) / dirname if src_target.exists(): if dest_target.exists(): dest_target.rename(str(dest_target)+datestring) shutil.copytree(str(src_target), dest_target) # copy the usual settings files from `src` to `dest` # `dest` is backed up to same filename with a ISO 8601-style # date string ('2021-05-23T23:38:32.509386Z') in UTC appended, # unless `--rm` is given def copy_settings(src, dest, args): # don't operate on self if str(src) == str(dest): return print(f"Copying '{src}' configuration to '{dest}'") # expand src and dest src = Path(src) / '.obsidian' dest = Path(dest) / '.obsidian' # get current date/time datestring = f"-{datetime.datetime.utcnow().isoformat()}Z" # if --rm, remove and recreate .obsidian if args.rm: shutil.rmtree(str(dest), ignore_errors=True) dest.mkdir() # copy config copy_settings_file(datestring, src, dest, 'config') # copy starred.json copy_settings_file(datestring, src, dest, 'starred.json') # copy file used for vaults distributed to others via git copy_settings_file(datestring, src, dest, 'README.md') # copy plugins copy_settings_dir(datestring, src, dest, 'plugins') # copy snippets copy_settings_dir(datestring, src, dest, 'snippets') def backup_list_remove(vault_path, args): dir_path = Path(vault_path) / '.obsidian' iso_8601_glob = '*-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9]*Z' for dest in dir_path.glob(iso_8601_glob): if args.backup_list: print(dest) elif args.backup_remove: if dest.is_file(): dest.unlink() elif dest.is_dir(): shutil.rmtree(str(dest), ignore_errors=True) def main(): # set up argparse argparser = init_argparse(); args = argparser.parse_args(); # do stuff try: vault_paths = get_vault_paths() # decide what to do if args.version: print(f'{APPNAME} {VERSION}') elif args.list: for vault_path in vault_paths: print(Path(vault_path).relative_to(Path.home())) elif args.update: # TODO: check if given UPDATE vault is really an Obsidian vault for vault_path in vault_paths: copy_settings(Path.home() / args.update, vault_path, args) elif args.backup_list or args.backup_remove: for vault_path in vault_paths: backup_list_remove(vault_path, args) elif args.execute: for vault_path in vault_paths: print(f'\n# {vault_path}\n') p = subprocess.Popen(args.execute, cwd=vault_path, shell=True) p.wait() else: argparser.print_help(sys.stderr) except Exception: traceback.print_exc() if __name__ == "__main__": exit(main())
python
import os import argparse from misc import date_str, get_dir def model_args(): parser = argparse.ArgumentParser() # Paths parser.add_argument('--train_dir', help='Directory of train data', default='./data/poetryDB/txt/') # parser.add_argument('--test_dir', # help='Directory of test data', # default='./data/bitmoji/test') parser.add_argument('--save_dir', help='Directory to save logs and model checkpoints', default=os.path.join('.', 'save', date_str())) parser.add_argument('--load_path', help='Path of the model checkpoint to load') parser.add_argument('--data_reader_path', help='Path to save/load the DataReader object', default=os.path.join('.', 'save', 'reader.pkl')) # Model Architecture parser.add_argument('--cell_size', help='Minibatch size', default=256, type=int) parser.add_argument('--num_layers', help='Minibatch size', default=3, type=int) # Hyperparams parser.add_argument('--batch_size', help='Minibatch size', default=128, type=int) parser.add_argument('--seq_len', help='Sequence length (the number of tokens in each element of the batch)', default=20, type=int) parser.add_argument('--lr', help='Learning rate', default=1e-3, type=float) parser.add_argument('--lr_decay_steps', help='The number of steps over which to decay by a multiple of lr_decay_rate', default=200, type=int) parser.add_argument('--lr_decay_rate', help='The multiple by which to decay the learning rate every lr_decay_steps steps', default=0.9, type=float) parser.add_argument('--keep_prob', help='The keep probability for dropout (always 1 for testing)', default=0.5, type=float) # Training parser.add_argument('--max_steps', help='Max number of steps to train', default=30000, type=int) parser.add_argument('--summary_freq', help='Frequency (in steps) with which to write tensorboard summaries', default=100, type=int) parser.add_argument('--model_save_freq', help='Frequency (in steps) with which to save the model', default=1000, type=int) parser.add_argument('--inference_freq', help='Frequency (in steps) with which to perform inference', default=100, type=int) # Inference parser.add_argument('--inference', help="Use the model to generate new text.", action='store_true') parser.add_argument('--argmax', help="Use argmax to choose the next word, rather than sampling.", action='store_true') parser.add_argument('--max_gen_len', help="The maximum number of words to generate.", default=20, type=int) parser.add_argument('--primer', help="The priming text to use for inference. Random if not supplied", default=None) # System parser.add_argument('--gpu', help='Comma separated list of GPU(s) to use.') args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu get_dir(args.save_dir) return args def export_args(): parser = argparse.ArgumentParser() parser.add_argument('--export_dir', help='Directory to save the data', default='save/serving/') parser.add_argument('--load_path', help='Path of the model checkpoint to load', default='save/hal-3layer/model-9001') parser.add_argument('--version', help='Version of the model to save', default=0, type=int) parser.add_argument('--data_reader_path', help='Path to save/load the DataReader object', default=os.path.join('.', 'save', 'reader.pkl')) # Model Architecture parser.add_argument('--cell_size', help='Minibatch size', default=256, type=int) parser.add_argument('--num_layers', help='Minibatch size', default=3, type=int) # Hyperparams parser.add_argument('--batch_size', help='Minibatch size', default=128, type=int) parser.add_argument('--seq_len', help='Sequence length (the number of tokens in each element of the batch)', default=20, type=int) parser.add_argument('--keep_prob', help='The keep probability for dropout (always 1 for testing)', default=1, type=float) parser.add_argument('--lr', help='Learning rate', default=1e-3, type=float) parser.add_argument('--lr_decay_steps', help='The number of steps over which to decay by a multiple of lr_decay_rate', default=200, type=int) parser.add_argument('--lr_decay_rate', help='The multiple by which to decay the learning rate every lr_decay_steps steps', default=0.9, type=float) # Inference parser.add_argument('--argmax', help="Use argmax to choose the next word, rather than sampling.", action='store_true') parser.add_argument('--max_gen_len', help="The maximum number of words to generate.", default=20, type=int) args = parser.parse_args() return args
python
# Copyright 2020 Mark Dickinson. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The CFGraph class provides the graph structure for the control-flow graph. Conceptually, our graph is very similar to a DFA graph for a regular expression. It consists of: - a set of nodes - for each node, a set of edge labels (strings) - for each node and edge label, a target node The set of operations that can mutate the graph is very limited: - a new node can be added, together with edges to existing nodes - an isolated node can be removed - a node with no outgoing edges can be identified with another node Parallel edges (with different labels) and self-loops are permitted. Nodes can be any hashable object. """ from typing import Container, Dict, Mapping, Optional, Set, Tuple, TypeVar #: Type of nodes. For now, require only that nodes are hashable. NodeType = TypeVar("NodeType") class CFGraph(Container[NodeType]): """ The directed graph underlying the control flow graph. """ #: The collection of nodes. _nodes: Set[NodeType] #: Mapping from source node and edge label to target node. _edges: Dict[NodeType, Dict[str, NodeType]] #: Mapping from target node to collection of (source node, edge) pairs. _backedges: Dict[NodeType, Set[Tuple[NodeType, str]]] def __init__(self) -> None: self._nodes = set() self._edges = {} self._backedges = {} # Functions that change the state of the graph. def add_node( self, node: NodeType, *, edges: Optional[Mapping[str, NodeType]] = None, ) -> None: """ Add a new node, along with edges to existing nodes to the graph. Parameters ---------- node The node to be added to the graph. edges Edges from the given node, if any, provided as a mapping from edge labels (strings) to target nodes. The target nodes should already be in the graph. Raises ------ ValueError If the given node is already in the graph, or if any of the target nodes for edges are not already in the graph. """ if node in self: raise ValueError(f"node {node} is already present in the graph") self._add_node(node) if edges is not None: for label, target in edges.items(): if target not in self or target == node: raise ValueError( f"target {target} for edge {label} is not in the graph" ) self._add_edge(node, label, target) def remove_node(self, node: NodeType) -> None: """ Remove an isolated node from the graph. Fails if there are edges to or from that node: all edges must be removed before it's possible to remove the node itself. Parameters ---------- node: NodeType The node to be removed. Raises ------ ValueError If the node is not in the graph, or if the node is not isolated. """ if node not in self._nodes: raise ValueError(f"node {node} not present in the graph") if self._edges[node]: raise ValueError(f"node {node} is not isolated: it has forward edges") if self._backedges[node]: raise ValueError(f"node {node} is not isolated: it has back edges") self._nodes.remove(node) def collapse_node(self, dummy: NodeType, target: NodeType) -> None: """ Identify two nodes. Identifies the *dummy* node with the *target* node, and removes the *dummy* node from the graph. The dummy node should not have any outward edges. Note that this is the only mechanism for introducing cycles into the graph. Parameters ---------- dummy Node to be collapsed and removed target Node to identify *dummy* with Raises ------ ValueError If *dummy* has any outward edges, or if either of dummy or target is not in the graph. """ if dummy not in self: raise ValueError(f"node {dummy} is not in the graph") if target not in self: raise ValueError(f"node {target} is not in the graph") if self._edges[dummy]: raise ValueError(f"node {dummy} has outward edges") if dummy == target: raise ValueError(f"nodes {dummy} and {target} must be distinct") edges_to_dummy = self.edges_to(dummy) for source, label in edges_to_dummy.copy(): self._remove_edge(source, label) self._add_edge(source, label, target) self.remove_node(dummy) # Functions for examining or traversing the graph. def edge(self, source: NodeType, label: str) -> NodeType: """ Get the target of a given edge. """ return self._edges[source][label] def edge_labels(self, source: NodeType) -> Set[str]: """ Get labels of all edges. """ return set(self._edges[source].keys()) def edges_to(self, target: NodeType) -> Set[Tuple[NodeType, str]]: """ Set of pairs (source, label) representing edges to this node. """ return self._backedges[target] # Support for membership testing def __contains__(self, node: object) -> bool: """ Determine whether a given node is contained in the graph. """ return node in self._nodes # Low-level functions def _add_node(self, node: NodeType) -> None: """ Add a node to the graph. Raises ValueError on an attempt to add a node that's already in the graph. """ assert node not in self._nodes self._nodes.add(node) self._edges[node] = {} self._backedges[node] = set() def _add_edge(self, source: NodeType, label: str, target: NodeType) -> None: """ Add a labelled edge to the graph. """ assert label not in self._edges[source] self._edges[source][label] = target assert (source, label) not in self._backedges[target] self._backedges[target].add((source, label)) def _remove_edge(self, source: NodeType, label: str) -> None: """ Remove a labelled edge from the graph. """ target = self._edges[source][label] self._backedges[target].remove((source, label)) self._edges[source].pop(label)
python
import logging import os import sys import pandas as pd import re from collections import OrderedDict import numpy as np import argparse import zipfile import paramiko import time from sqlalchemy.exc import IntegrityError from dataactcore.models.domainModels import DUNS from dataactcore.interfaces.db import GlobalDB from dataactcore.logging import configure_logging from dataactvalidator.health_check import create_app from dataactvalidator.scripts.loaderUtils import clean_data, insert_dataframe from dataactcore.config import CONFIG_BROKER logger = logging.getLogger(__name__) REMOTE_SAM_DIR = '/current/SAM/2_FOUO/UTF-8/' def get_config(): sam_config = CONFIG_BROKER.get('sam_duns') if sam_config: return sam_config.get('username'), sam_config.get('password'), sam_config.get('host'), \ sam_config.get('port') return None, None, None, None, None def get_relevant_models(data, benchmarks=False): # Get a list of the duns we're gonna work off of to prevent multiple calls to the database if benchmarks: get_models = time.time() logger.info("Getting relevant models") duns_found = [duns.strip().zfill(9) for duns in list(data["awardee_or_recipient_uniqu"].unique())] dun_objects_found = sess.query(DUNS).filter(DUNS.awardee_or_recipient_uniqu.in_(duns_found)) models = {duns.awardee_or_recipient_uniqu: duns for duns in dun_objects_found} logger.info("Getting models with activation dates already set") activated_models = {duns_num: duns for duns_num, duns in models.items() if duns.activation_date is not None} if benchmarks: logger.info("Getting models took {} seconds".format(time.time() - get_models)) return models, activated_models def load_duns_by_row(data, sess, models, activated_models, benchmarks=False): # data = activation_check(data, activated_models, benchmarks).where(pd.notnull(data), None) update_duns(models, data, benchmarks=benchmarks) sess.add_all(models.values()) # Removed this function when adding registration_date # def activation_check(data, activated_models, benchmarks=False): # # if activation_date's already set, keep it, otherwise update it (default) # logger.info("going through activation check") # if benchmarks: # activation_check_start = time.time() # lambda_func = (lambda duns_num: pd.Series([activated_models[duns_num].activation_date # if duns_num in activated_models else np.nan])) # data = data.assign(old_activation_date=data["awardee_or_recipient_uniqu"].apply(lambda_func)) # data.loc[pd.notnull(data["old_activation_date"]), "activation_date"] = data["old_activation_date"] # del data["old_activation_date"] # if benchmarks: # logger.info("Activation check took {} seconds".format(time.time()-activation_check_start)) # return data def update_duns(models, new_data, benchmarks=False): """Modify existing models or create new ones""" logger.info("Updating duns") if benchmarks: update_duns_start = time.time() for _, row in new_data.iterrows(): awardee_or_recipient_uniqu = row['awardee_or_recipient_uniqu'] if awardee_or_recipient_uniqu not in models: models[awardee_or_recipient_uniqu] = DUNS() for field, value in row.items(): setattr(models[awardee_or_recipient_uniqu], field, value) if benchmarks: logger.info("Updating duns took {} seconds".format(time.time() - update_duns_start)) def clean_sam_data(data): return clean_data(data, DUNS, { "awardee_or_recipient_uniqu": "awardee_or_recipient_uniqu", "activation_date": "activation_date", "deactivation_date": "deactivation_date", "registration_date": "registration_date", "expiration_date": "expiration_date", "last_sam_mod_date": "last_sam_mod_date", "sam_extract_code": "sam_extract_code", "legal_business_name": "legal_business_name" }, {}) def parse_sam_file(file_path, sess, monthly=False, benchmarks=False): parse_start_time = time.time() logger.info("Starting file " + str(file_path)) dat_file_name = os.path.splitext(os.path.basename(file_path))[0]+'.dat' sam_file_type = "MONTHLY" if monthly else "DAILY" dat_file_date = re.findall(".*{}_(.*).dat".format(sam_file_type), dat_file_name)[0] with create_app().app_context(): column_header_mapping = { "awardee_or_recipient_uniqu": 0, "sam_extract_code": 4, "registration_date": 6, "expiration_date": 7, "last_sam_mod_date": 8, "activation_date": 9, "legal_business_name": 10 } column_header_mapping_ordered = OrderedDict(sorted(column_header_mapping.items(), key=lambda c: c[1])) # Initial sweep of the file to see rows and possibly what DUNS we're updating if benchmarks: initial_sweep = time.time() nrows = 0 with zipfile.ZipFile(file_path) as zip_file: with zip_file.open(dat_file_name) as dat_file: nrows = len(dat_file.readlines()) if benchmarks: logger.info("Initial sweep took {} seconds".format(time.time() - initial_sweep)) block_size = 10000 batches = nrows//block_size # skip the first line again if the last batch is also the first batch skiplastrows = 2 if batches == 0 else 1 last_block_size = (nrows % block_size)-skiplastrows batch = 0 added_rows = 0 while batch <= batches: skiprows = 1 if batch == 0 else (batch*block_size) nrows = (((batch+1)*block_size)-skiprows) if (batch < batches) else last_block_size logger.info('Loading rows %s to %s', skiprows+1, nrows+skiprows) with zipfile.ZipFile(file_path) as zip_file: with zip_file.open(dat_file_name) as dat_file: csv_data = pd.read_csv(dat_file, dtype=str, header=None, skiprows=skiprows, nrows=nrows, sep='|', usecols=column_header_mapping_ordered.values(), names=column_header_mapping_ordered.keys(), quoting=3) # add deactivation_date column for delete records lambda_func = (lambda sam_extract: pd.Series([dat_file_date if sam_extract == "1" else np.nan])) csv_data = csv_data.assign(deactivation_date=pd.Series([np.nan], name='deactivation_date') if monthly else csv_data["sam_extract_code"].apply(lambda_func)) # removing rows where DUNS number isn't even provided csv_data = csv_data.where(csv_data["awardee_or_recipient_uniqu"].notnull()) # cleaning and replacing NaN/NaT with None's csv_data = clean_sam_data(csv_data.where(pd.notnull(csv_data), None)) if monthly: logger.info("Adding all monthly data with bulk load") if benchmarks: bulk_month_load = time.time() del csv_data["sam_extract_code"] insert_dataframe(csv_data, DUNS.__table__.name, sess.connection()) if benchmarks: logger.info("Bulk month load took {} seconds".format(time.time()-bulk_month_load)) else: add_data = csv_data[csv_data.sam_extract_code == '2'] update_delete_data = csv_data[(csv_data.sam_extract_code == '3') | (csv_data.sam_extract_code == '1')] for dataframe in [add_data, update_delete_data]: del dataframe["sam_extract_code"] if not add_data.empty: try: logger.info("Attempting to bulk load add data") insert_dataframe(add_data, DUNS.__table__.name, sess.connection()) except IntegrityError: logger.info("Bulk loading add data failed, loading add data by row") sess.rollback() models, activated_models = get_relevant_models(add_data, benchmarks=benchmarks) logger.info("Loading add data ({} rows)".format(len(add_data.index))) load_duns_by_row(add_data, sess, models, activated_models, benchmarks=benchmarks) if not update_delete_data.empty: models, activated_models = get_relevant_models(update_delete_data, benchmarks=benchmarks) logger.info("Loading update_delete data ({} rows)".format(len(update_delete_data.index))) load_duns_by_row(update_delete_data, sess, models, activated_models, benchmarks=benchmarks) sess.commit() added_rows += nrows batch += 1 logger.info('%s DUNS records inserted', added_rows) if benchmarks: logger.info("Parsing {} took {} seconds with {} rows".format(dat_file_name, time.time()-parse_start_time, added_rows)) def process_from_dir(root_dir, file_name, sess, local, monthly=False, benchmarks=False): file_path = os.path.join(root_dir, file_name) if not local: logger.info("Pulling {}".format(file_name)) with open(file_path, "wb") as zip_file: sftp.getfo(''.join([REMOTE_SAM_DIR, '/', file_name]), zip_file) parse_sam_file(file_path, sess, monthly=monthly, benchmarks=benchmarks) if not local: os.remove(file_path) def get_parser(): parser = argparse.ArgumentParser(description="Get the latest data from SAM and update duns table. By default, it " "loads the latest daily file.") parser.add_argument("--historic", "-i", action="store_true", help='load the oldest monthly zip and all the daily' 'files afterwards from the directory.') parser.add_argument("--local", "-l", type=str, default=None, help='work from a local directory') parser.add_argument("--monthly", "-m", type=str, default=None, help='load a local monthly file') parser.add_argument("--daily", "-d", type=str, default=None, help='load a local daily file') parser.add_argument("--benchmarks", "-b", action="store_true", help='log times of operations for testing') parser.add_argument("--update", "-u", action="store_true", help='Run all daily files since latest last_sam_mod_date in table') return parser if __name__ == '__main__': parser = get_parser() args = parser.parse_args() historic = args.historic local = args.local monthly = args.monthly daily = args.daily benchmarks = args.benchmarks update = args.update with create_app().app_context(): configure_logging() sess = GlobalDB.db().session if monthly and daily: logger.error("For loading a single local file, you must provide either monthly or daily.") sys.exit(1) if historic and update: logger.error("For multiple file loads you must choose either historic or update.") sys.exit(1) elif (monthly or daily) and local: logger.error("Local directory specified with a local file.") sys.exit(1) elif monthly: parse_sam_file(monthly, sess=sess, monthly=True, benchmarks=benchmarks) elif daily: parse_sam_file(daily, sess=sess, benchmarks=benchmarks) else: # dealing with a local or remote directory if not local: root_dir = CONFIG_BROKER["d_file_storage_path"] username, password, host, port = get_config() if None in (username, password): logger.error("Missing config elements for connecting to SAM") sys.exit(1) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect( hostname=host, username=username, password=password ) sftp = client.open_sftp() # dirlist on remote host dirlist = sftp.listdir(REMOTE_SAM_DIR) else: root_dir = local dirlist = os.listdir(local) # generate chronological list of daily and monthly files sorted_monthly_file_names = sorted([monthly_file for monthly_file in dirlist if re.match(".*MONTHLY_\d+\.ZIP", monthly_file.upper())]) sorted_daily_file_names = sorted([daily_file for daily_file in dirlist if re.match(".*DAILY_\d+\.ZIP", daily_file.upper())]) if historic or update: if historic: if sorted_monthly_file_names: process_from_dir(root_dir, sorted_monthly_file_names[0], sess, local, monthly=True, benchmarks=benchmarks) else: logger.info("No monthly file found.") if sorted_daily_file_names: if historic: if sorted_monthly_file_names: earliest_daily_file = sorted_monthly_file_names[0].replace("MONTHLY", "DAILY") else: # Insert item into sorted file list with date of last sam mod last_update = sess.query(DUNS.last_sam_mod_date).\ order_by(DUNS.last_sam_mod_date.desc()). \ filter(DUNS.last_sam_mod_date.isnot(None)). \ limit(1).one()[0].strftime("%Y%m%d") earliest_daily_file = re.sub("_DAILY_[0-9]{8}\.ZIP", "_DAILY_" + last_update + ".ZIP", sorted_daily_file_names[0]) if earliest_daily_file: sorted_full_list = sorted(sorted_daily_file_names + [earliest_daily_file]) daily_files_after = sorted_full_list[sorted_full_list.index(earliest_daily_file) + 1:] else: daily_files_after = sorted_daily_file_names if daily_files_after: for daily_file in daily_files_after: process_from_dir(root_dir, daily_file, sess, local, benchmarks=benchmarks) else: logger.info("No daily file found.") else: if sorted_daily_file_names: process_from_dir(root_dir, sorted_daily_file_names[-1], sess, local, benchmarks=benchmarks) else: logger.info("No daily file found.") sess.close()
python
import os , csv # relative path to find the csv file os.chdir(os.path.abspath(os.path.dirname(__file__))) path = os.getcwd() my_path = os.path.join('.', 'Resources', 'budget_data.csv') #defining our variables totalMonths = 0 total = 0 averageChange = 0 greatestIncrease = 0 greatestDecrease = 0 #extra variables used during for loops lastValue = 0 bigCheck = 0 smallCheck = 0 with open(my_path) as csvfile: csvreader = csv.reader(csvfile, delimiter=',') header = next(csvreader) for row in csvreader: #counts the number of rows totalMonths += 1 #adds up total profit total += int(row[1]) #if the difference between the current rows profit and that of the previous row is greater than the script has seen #it will save the pertinent information to the greatestIncrease variable and update the bigCheck variable if int(row[1]) - lastValue > bigCheck: greatestIncrease = row[0] + ' ($' + str(int(row [1]) - lastValue) +')' bigCheck = int(row[1]) - lastValue #Same except for small differences if int(row[1]) - lastValue < smallCheck: greatestDecrease = row[0] + ' ($' + str(int(row [1]) - lastValue) + ')' smallCheck = int(row[1]) - lastValue lastValue = int(row[1]) #Takes the final value for profit/losses and subtracts the first value from it, then divides that by the total months to find the average change with open(my_path) as csvfile: csvreader = csv.reader(csvfile, delimiter=',') header = next(csvreader) test = list(csvreader) averageChange = (int(test[85][1]) - int(test[0][1])) / totalMonths #prints out the analysis print(f''' Financial Analysis ---------------------- Total Months: {totalMonths} Total: ${total} Average Change: ${averageChange} Greatest Increase in Profits: {greatestIncrease} Greatest Decrease in Profits: {greatestDecrease} ''') #saves to a text document file1 = open("./Analysis/analysis.txt", "w") file1.write(f''' Financial Analysis ---------------------- Total Months: {totalMonths} Total: ${total} Average Change: ${averageChange} Greatest Increase in Profits: {greatestIncrease} Greatest Decrease in Profits: {greatestDecrease} ''')
python
import numpy as np ip_list=[int(x) for x in input().split()] ip_list=np.asfarray(ip_list) def listmul(ip_list): op_list=[] for i in range(0,len(ip_list)): temp=1 for j in range(0,len(ip_list)): if i!=j: temp=temp*ip_list[j] op_list.append(temp) return op_list op_list = listmul(ip_list) print(op_list)
python
from time import time from typing import Any from flask import render_template def login_render(auth_url: str) -> Any: """Return login page. Arguments: auth_url {str} -- Link to last.fm authorization page. """ return render_template("login.html", auth_url=auth_url, timestamp=time())
python
# -*- coding: utf-8 -*- """ Created on Tue Apr 7 18:10:42 2020 Small module to allow multiprocessing of the point in polygon problem @author: Matthew Varnam - The University of Manchester @email: matthew.varnam(-at-)manchester.ac.uk """ #Import numpy for mathematical calculations import numpy as np #Import shapely to create easily queryable objects from shapely.geometry import Point, Polygon from shapely.strtree import STRtree #Create controlling worker to operate multiple procedures def worker_pp (point_poly_list): #Create global variables to be used by the procedures global tree global polygon_list global px_points global index_by_id #Extract points and polygon lists from initialiser function argument point_list,poly_list = point_poly_list px_points = np.array(point_list) sorted_polygons = np.array(poly_list) #Convert to shapely class Points and Polygons point_list = [Point(point) for point in px_points] polygon_list = [Polygon(poly) for poly in sorted_polygons] #Create STRtree to speed up checking of points and polygons tree = STRtree(point_list) #Create dictionary to index point list for faster querying index_by_id = dict((id(pt), i) for i, pt in enumerate(point_list)) #Procedure function to be called multiple times to identify points in polygons def proc_pp (*args): #Choose the polygon matching the index provided by the multiprocessing Pool k = args[0] poly = polygon_list[k] #Conduct two things - query the STRtree then confirm an intersection valid_ids = [(index_by_id[id(pt)]) for pt in tree.query(poly) if pt.intersects(poly)] #Find the coordinates of the points that lie inside the polygon valid_points = px_points[valid_ids] returner = valid_points.tolist() return k,returner
python
import sys, os import librosa import torch import numpy as np from typing import Union, Tuple, List from collections import defaultdict import configparser config = configparser.ConfigParser(allow_no_value=True) config.read("config.ini") from vectorizer.model import Model from vectorizer.utils import chunk_data from void.utils import Segmenter from spectralcluster import SpectralClusterer from scipy.optimize import linear_sum_assignment from scipy.spatial import distance class ToolBox(object): def __init__(self, use_cpu:bool=False): self.use_cpu = use_cpu self._load() def _load(self): self.model = Model() self.storage = 'cuda:0' if torch.cuda.is_available() else 'cpu' self.storage = 'cpu' if self.use_cpu else self.storage checkpoint = torch.load(config.get('VECTORIZER', 'trained_model'), map_location=self.storage) self.model.load_state_dict(checkpoint['model_state_dict']) self.model = self.model.to(self.storage) self.model.eval() self.segmenter = Segmenter() self.clusterer = SpectralClusterer( min_clusters=2, max_clusters=100, p_percentile=0.95, gaussian_blur_sigma=1.0) def _check_audio(self, audio:Union[np.array, str], sr:int) -> Union[np.array, str]: if isinstance(audio, str): if not os.path.exists(audio): raise FileNotFoundError(f"File not found at location: `{audio}`.") try: audio, _ = librosa.load(audio, sr=sr, mono=True) except Exception as e: raise ValueError(f"Exception: {e}\nCould not read audio at location: `{audio}`.") elif not isinstance(audio, (np.ndarray, np.generic)): raise TypeError(f"Invalid argument type: audio should be either str or np.array.") audio = np.squeeze(audio) if not len(audio.shape) == 1: raise ValueError(f"Expected audio input to be 1 dimensional.") return audio def vectorize(self, audio:Union[np.array, str], sr:int=16000, frame_stride:float=None, hop_size:float=None) -> np.array: """ Parameters ---------- audio : np.array or str 1D numpy array or filepath to the audio file to vectorize. sr : int, optional Audio sample rate frame_stride: float, optional Chunk audio in frames of length frame_stride seconds hop_size: float, optional Chunk audio in frames of length frame_stride seconds with hop_size seconds Returns ------- np.array A 2 Dimensional vector representation of the audio input. """ audio = self._check_audio(audio, sr) frame_stride = config.getfloat("AUDIO", "frame_stride") if frame_stride is None else frame_stride hop_size = config.getfloat("AUDIO", "hop_size") if hop_size is None else hop_size frame_stride = int(sr*frame_stride) hop_size = int(sr*hop_size) audio = chunk_data(audio, frame_stride, max(0, (frame_stride-hop_size))) audio = torch.from_numpy(np.array(audio).astype(np.float32)).to(self.storage) with torch.no_grad(): features = self.model(audio) return features.cpu().numpy() def _diarize(self, audio:np.array, max_num_speakers:int) -> Tuple[List[Tuple[int, int]], np.array]: segments = self.segmenter(audio) audio_clips = [audio[s[0]:s[1]] for s in segments] vectors = list(map(self.vectorize, audio_clips)) vectors = [item for sublist in vectors for item in sublist] self.clusterer.max_clusters = max_num_speakers labels = self.clusterer.predict(np.squeeze(np.array(vectors))) return segments, labels def diarize(self, audio:Union[np.array, str], sr:int=16000, max_num_speakers:int=30) -> List[str]: """ Parameters ---------- audio : np.array or str 1D numpy array or filepath to the audio file to vectorize. sr : int, optional Audio sample rate max_num_speakers: int, optional Maximum amount of expected speakers in the audio Returns ------- list A list of strings. Each line is compatible with the RTTM format """ rttm = list() audio = self._check_audio(audio, sr) segments, labels = self._diarize(audio, max_num_speakers) for idx, segment in enumerate(segments): line = f"SPEAKER filename 1 {segment[0]/sr:.2f} {(segment[1]-segment[0])/sr:.2f} <NA> <NA> speaker{labels[idx]} <NA> <NA>\n" rttm.append(line) return rttm def recognize(self, audio:Union[np.array, str], enrollments:list, sr:int=16000, max_num_speakers:int=30) -> List[str]: """ Parameters ---------- audio : np.array or str 1D numpy array or filepath to the audio file to vectorize. enrollments: list list of tuples: (audio:Union[np.array, str], label:str) sr : int, optional Audio sample rate max_num_speakers: int, optional Maximum amount of expected speakers in the audio Returns ------- list A list of strings. Each line is compatible with the RTTM format """ rttm = list() audio = self._check_audio(audio, sr) enrollments = [(self._check_audio(audio, sr), label) for audio, label in enrollments] enrollments = [(self.vectorize(audio), label) for audio, label in enrollments] enrollment_vectors = list() for vectors, l in enrollments: for v in list(vectors): enrollment_vectors.append((v, l)) # Compute representative vector for each label enrollment_dict = defaultdict(list) for vector, label in enrollment_vectors: enrollment_dict[label].append(np.squeeze(vector)) enrollment_X, enrollment_y = zip(*[(np.mean(vectors, axis=0), label) for label, vectors in enrollment_dict.items()]) # Run diarization segments, labels = self._diarize(audio, max_num_speakers) # Compute representative vector for each label segments_dict = defaultdict(list) for idx, vector in enumerate(vectors): segments_dict[labels[idx]].append(np.squeeze(vector)) segment_X, segment_y = zip(*[(np.mean(vectors, axis=0), label) for label, vectors in segments_dict.items()]) # Make sure we have the right shape enrollment_X = np.squeeze(enrollment_X) segment_X = np.squeeze(segment_X) if len(enrollment_X.shape) == 1: enrollment_X = enrollment_X[None, :] if len(segment_X.shape) == 1: segment_X = segment_X[None, :] cost = distance.cdist(np.array(enrollment_X), np.array(segment_X), metric='cosine') row_ind, col_ind = linear_sum_assignment(cost) num_solutions = len(row_ind) id2label = dict() # Map between speaker ID and provided label (if it exists) for sol in range(num_solutions): id2label[list(segment_y)[col_ind[sol]]] = list(enrollment_y)[row_ind[sol]] for idx, segment in enumerate(segments): label = id2label.get(labels[idx]) if label is None: label = f"speaker{labels[idx]}" line = f"SPEAKER filename 1 {segment[0]/sr:.2f} {(segment[1]-segment[0])/sr:.2f} <NA> <NA> {label} <NA> <NA>\n" rttm.append(line) return rttm def verify(self, audio:Union[np.array, str], enrollments:list, sr:int=16000 ) -> float: """ Parameters ---------- audio : np.array or str 1D numpy array or filepath to the audio file to vectorize. enrollments: list list of tuples: (audio:Union[np.array, str], label:str) sr : int, optional Audio sample rate Returns ------- float Similarity score --> [0, 1] """ audio = self._check_audio(audio, sr) enrollments = [(self._check_audio(audio, sr), label) for audio, label in enrollments] enrollment_vector = [np.mean(self.vectorize(audio),axis=0) for audio, _ in enrollments] segments = self.segmenter(audio) audio_clips = [audio[s[0]:s[1]] for s in segments] vectors = list(map(self.vectorize, audio_clips)) vectors = [item for sublist in vectors for item in sublist] audio_vector = np.mean(vectors, axis=0) similarity = max(0, np.mean(1-distance.cdist(audio_vector[None, :], np.array(enrollment_vector), 'cosine'))) return similarity if __name__ == "__main__": toolbox = ToolBox() print(toolbox.vectorize.__doc__) print(toolbox.diarize.__doc__) print(toolbox.recognize.__doc__) print(toolbox.verify.__doc__)
python
#!/usr/bin/env python3 import re import pysam from .most_common import most_common from .sequence_properties import repeat cigar_ptn = re.compile(r"[0-9]+[MIDNSHPX=]") def realn_softclips( reads, pos, ins_or_del, idl_seq, idl_flanks, decompose_non_indel_read ): template = make_indel_template(idl_seq, idl_flanks) candidate_reads = [ classify_softclip_read(read, pos) for read in reads if classify_softclip_read(read, pos) ] if not candidate_reads: return [] fw_decomposed = [ forward_decomposition( read, softclip_ptrn, pos, ins_or_del, idl_seq, decompose_non_indel_read ) for read, softclip_ptrn in candidate_reads ] rv_decomposed = [ reverse_decomposition(read, pos, ins_or_del, idl_seq) for read, softclip_ptrn in candidate_reads ] decomposed_candidates = fw_decomposed + rv_decomposed compatible_softclip_reads = [ decom[0] for decom in decomposed_candidates if is_compatible(decom, template, ins_or_del) ] return compatible_softclip_reads def make_indel_template(idl_seq, idl_flanks): """Make consensus contig """ lt_flanks = [flank[0][::-1] for flank in idl_flanks if flank[0][-1] != "N"] rt_flanks = [flank[1] for flank in idl_flanks if flank[1][0] != "N"] lt_template = find_consensus_seq(lt_flanks)[::-1] rt_template = find_consensus_seq(rt_flanks) return lt_template, idl_seq, rt_template def get_ith_char(seq, i): try: return seq[i] except: return None def find_consensus_seq(seq_lst): consensus = "" if not seq_lst: return consensus for i in range(len(max(seq_lst, key=len))): ith_chars = [get_ith_char(seq, i) for seq in seq_lst if get_ith_char(seq, i)] if most_common(ith_chars) == "N": break else: consensus += most_common(ith_chars) return consensus.upper() def classify_softclip_read(read, pos): """Check softclip pattern and the clipped alignment is in the exon of interest """ cigarstring = read.cigarstring if not "S" in cigarstring: return None cigarlst = cigar_ptn.findall(read.cigarstring) start_adjust = int(cigarlst[0][:-1]) if cigarlst[0].endswith("S") else 0 read_start = read.reference_start - start_adjust end_adjust = int(cigarlst[-1][:-1]) if cigarlst[-1].endswith("S") else 0 read_end = read.reference_end + end_adjust if "N" in cigarstring: idx_at_splicesite = [ i for i, cigartoken in enumerate(cigarlst) if cigartoken.endswith("N") ] exonic_cigarlst = split_lst_by_index(cigarlst, idx_at_splicesite) # merge blocks separated by insertion/deletions deletion_lengths = [ int(token[:-1]) for token in cigarlst if token.endswith("D") ] d = max(deletion_lengths) if deletion_lengths else 0 blocks = merge_blocks(read.get_blocks(), d) idx_at_this_exon = [] for i, block in enumerate(blocks): if i == 0 and read_start <= pos <= block[1]: idx_at_this_exon.append(i) elif i == len(blocks) - 1 and block[0] <= pos <= read_end: idx_at_this_exon.append(i) elif block[0] <= pos <= block[1]: idx_at_this_exon.append(i) else: pass if idx_at_this_exon: this_exon_cigarstring = exonic_cigarlst[idx_at_this_exon[0]] else: return None else: this_exon_cigarstring = cigarlst first, last = this_exon_cigarstring[0][-1], this_exon_cigarstring[-1][-1] if first == "S" and last != "S": return (read, "leading") elif first != "S" and last == "S": return (read, "trailing") elif first == "S" and last == "S": # give up this pattern for now return None else: return None def merge_blocks(lst, d): merged = [] for i, b in enumerate(lst): if i <= len(lst) - 2: if lst[i + 1][0] <= b[1] + d: merged.append((b[0], lst[i + 1][1])) del lst[i + 1] else: merged.append((b[0], b[1])) else: if lst[i - 1][1] < b[0]: merged.append((b[0], b[1])) else: pass if merged == lst or len(merged) == 1: return merged else: return merge_blocks(merged, d) def split_lst_by_index(lst, idx): if idx: idx = (0,) + tuple(data + 1 for data in idx) + (len(lst) + 1,) my_lst = [] for start, end in zip(idx, idx[1:]): my_lst.append(lst[start : end - 1]) return my_lst def forward_decomposition( read, softclip_ptrn, pos, ins_or_del, idl_seq, decompose_non_indel_read ): """Decompose softclipped read from 5'side """ decom = decompose_non_indel_read(read, pos, ins_or_del, idl_seq) lt_flank, mid_seq, rt_flank = decom[2][0], decom[1], decom[2][1] if ins_or_del == "I" and softclip_ptrn == "leading": mid_seq = lt_flank[-len(idl_seq) :] lt_flank = lt_flank[: -len(idl_seq)] elif ins_or_del == "I" and softclip_ptrn == "trailing": mid_seq = rt_flank[: len(idl_seq)] rt_flank = rt_flank[len(idl_seq) :] elif ins_or_del == "D" and softclip_ptrn == "leading": lt_flank = lt_flank + mid_seq mid_seq = idl_seq else: rt_flank = mid_seq + rt_flank mid_seq = idl_seq return (read, lt_flank, mid_seq, rt_flank) def reverse_decomposition(read, pos, ins_or_del, idl_seq): """Decompose softclipped read from 3'side """ read_seq = read.query_sequence[::-1] cigarstring = read.cigarstring cigarlst = cigar_ptn.findall(read.cigarstring)[::-1] adjust = int(cigarlst[0][:-1]) if cigarlst[0].endswith("S") else 0 read_pos = read.reference_end + adjust idl_len = len(idl_seq) read_idx = 0 pos = pos if ins_or_del == "I" else pos + idl_len for token in cigarlst: event, event_len = token[-1], int(token[:-1]) if pos < read_pos: read_pos = read_pos if event == "I" else (read_pos - event_len) read_idx = ( read_idx if event == "D" or event == "N" else read_idx + event_len ) else: break diff = read_pos - pos if ins_or_del == "D": lt_flank, mid_seq, rt_flank = ( read_seq[read_idx + diff :], "", read_seq[: read_idx + diff], ) else: rt_flank, mid_seq, lt_flank = ( read_seq[: read_idx + diff], read_seq[read_idx + diff : read_idx + diff + idl_len], read_seq[read_idx + diff + idl_len :], ) lt_flank, mid_seq, rt_flank = lt_flank[::-1], mid_seq[::-1], rt_flank[::-1] return (read, lt_flank, mid_seq, rt_flank) def is_compatible(read_tuple, template_tuple, ins_or_del): read = read_tuple[0] read_lt_flank, read_indel, read_rt_flank = ( read_tuple[1], read_tuple[2], read_tuple[3], ) template_lt_flank, template_indel, template_rt_flank = ( template_tuple[0], template_tuple[1], template_tuple[2], ) lt_len = min(len(read_lt_flank), len(template_lt_flank)) rt_len = min(len(read_rt_flank), len(template_rt_flank)) # count repeat in template idl_type = 1 if ins_or_del == "I" else 0 if template_lt_flank and template_rt_flank: template_repeat = repeat( idl_type, template_lt_flank, template_indel, template_rt_flank ) else: return None if template_repeat > 0: if lt_len == 0 or rt_len == 0: return False else: read_repeat = repeat(idl_type, read_lt_flank, template_indel, read_rt_flank) if template_repeat != read_repeat: return False if ins_or_del == "D" and (lt_len == 0 or rt_len == 0): return False if lt_len > 0: lt_read, lt_template = read_lt_flank[-lt_len:], template_lt_flank[-lt_len:] else: lt_read, lt_template = "", "" rt_read, rt_template = read_rt_flank[:rt_len], template_rt_flank[:rt_len] if not is_almost_same(lt_read[::-1], lt_template[::-1]) or not is_almost_same( rt_read, rt_template ): return False if read_indel and ins_or_del == "I": template_indel_len = len(template_indel) read_indel_len = len(read_indel) if template_indel_len < read_indel_len: return False elif read_indel == template_indel: return True elif 4 <= template_indel_len <= 5: return identical_for_end_n_bases(read_indel, template_indel, 2) elif 6 <= template_indel_len <= 7: return identical_for_end_n_bases(read_indel, template_indel, 3) else: return identical_for_end_n_bases(read_indel, template_indel, 4) elif ins_or_del == "D": return True else: return False def identical_for_end_n_bases(query_str, subject_str, n): return (query_str[:n] == subject_str[:n]) or (query_str[-n:] == subject_str[-n:]) def is_almost_same(seq1, seq2, len_lim=10, mismatch_lim=1): seq_len = len(seq1) if seq_len > 0 and seq1[0] != seq2[0]: return False hamming = sum([seq1[i] != seq2[i] for i in range(seq_len)]) if seq_len >= len_lim: return hamming <= mismatch_lim else: return hamming == 0
python
"""Config namespace.""" from flask_restx import Namespace, Resource, fields # type: ignore from jsonschema import ValidationError # type: ignore from configmodel.logic.config import ( create_config, delete_config, get_config, get_configs, validate_config, ) api = Namespace("config", description="Config operations") config_model = api.model( "Config", { "config_id": fields.Integer( readonly=True, description="The configuration unique identifier" ), "hostname": fields.String(required=True, description="Device hostname"), "schema": fields.String(required=True, description="Configuration schema name"), "config": fields.Raw(required=True, description="Configuration JSON object"), }, ) @api.route("/") class ConfigList(Resource): """Shows a list of all configs, and lets you POST to add new ones.""" @api.marshal_list_with(config_model) def get(self): """List all configs.""" return get_configs() @api.expect(config_model, validate=True) @api.marshal_with(config_model, code=201, mask=None) def post(self): """Create a new config.""" try: validate_config(api.payload) except ValidationError as e: api.abort(400, e) config = create_config( hostname=api.payload["hostname"], schema=api.payload["schema"], config=api.payload["config"], ) return config, 201 @api.route("/<int:config_id>") @api.response(404, "config_id not found") @api.param("config_id", "The config identifier") class Config(Resource): """Show a single config item and lets you delete it.""" @api.marshal_with(config_model) def get(self, config_id): """Fetch a given config.""" config = get_config(config_id) if config is None: api.abort(404) return config @api.response(204, "Config deleted") def delete(self, config_id): """Delete a config given its identifier.""" result = delete_config(config_id) if not result: api.abort(404) return "", 204
python
# coding: utf-8 # In[1]: import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data # In[2]: # 载入数据集 mnist = input_data.read_data_sets('./../../datas/mnist/', one_hot=True) # 输入图片是28*28 n_inputs = 28 # 输入一行,一行有28个数据 max_time = 28 # 一共28行 lstm_size = 100 # 隐层单元 n_classes = 10 # 10个分类 batch_size = 50 # 每批次50个样本 n_batch = mnist.train.num_examples // batch_size # 计算一共有多少个批次 # 这里的none表示第一个维度可以是任意的长度 x = tf.placeholder(tf.float32, [None, 784]) # 正确的标签 y = tf.placeholder(tf.float32, [None, 10]) # 初始化权值 weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes], stddev=0.1)) # 初始化偏置值 biases = tf.Variable(tf.constant(0.1, shape=[n_classes])) # 定义RNN网络 def RNN(X, weights, biases): # inputs=[batch_size, max_time, n_inputs] inputs = tf.reshape(X, [-1, max_time, n_inputs]) # 定义LSTM基本CELL lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(lstm_size) # final_state[0]是cell state # final_state[1]是hidden_state outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, inputs, dtype=tf.float32) results = tf.nn.softmax(tf.matmul(final_state[1], weights) + biases) return results # 计算RNN的返回结果 prediction = RNN(x, weights, biases) # 损失函数 cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y)) # 使用AdamOptimizer进行优化 train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) # 结果存放在一个布尔型列表中 correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1)) # argmax返回一维张量中最大的值所在的位置 # 求准确率 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 把correct_prediction变为float32类型 # 初始化 init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(6): for batch in range(n_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys}) acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}) print("Iter " + str(epoch) + ", Testing Accuracy= " + str(acc)) # In[ ]:
python
from framework.types import RequestT from framework.types import ResponseT from framework.utils import build_status from framework.utils import read_static def handle_image(_request: RequestT) -> ResponseT: payload = read_static("image.jpg") status = build_status(200) headers = {"Content-type": "image/jpeg"} return ResponseT(status, headers, payload)
python
import pandas as pd import googlemaps import json from shapely.geometry import shape, Point with open('static/GEOJSON/USCounties_final.geojson') as f: geojson1 = json.load(f) county = geojson1["features"] with open('static/GEOJSON/ID2.geojson') as f: geojson = json.load(f) district = geojson["features"] project = pd.read_csv('static/Excel/CleanedProject.csv') df = pd.read_csv('static/Excel/Community Partners.csv') #Get the Excel file from static/Excel gmaps = googlemaps.Client(key='') #google geocoding API collection = {'type': 'FeatureCollection', 'features': []} df['fulladdress'] = df[['address_line1', 'city', 'state']].apply(lambda x: ' '.join(x.astype(str)), axis=1) def feature_from_row(Community, Address, Mission, CommunityType, Website): feature = {'type': 'Feature', 'properties': {'CommunityPartner': '', 'Address': '', 'Legislative District Number': '', 'Number of projects': '', 'Income': '', 'County': '', 'Mission Area': '', 'CommunityType': '', 'Campus Partner': '', 'Academic Year': '', 'Website': ''}, 'geometry': {'type': 'Point', 'coordinates': []} } geocode_result = gmaps.geocode(Address) # get the coordinates print(Address) print(geocode_result) if (geocode_result[0]): latitude = geocode_result[0]['geometry']['location']['lat'] longitude = geocode_result[0]['geometry']['location']['lng'] feature['geometry']['coordinates'] = [longitude, latitude] coord = Point([longitude, latitude]) for i in range(len(district)): # iterate through a list of district polygons property = district[i] polygon = shape(property['geometry']) # get the polygons if polygon.contains(coord): # check if a partner is in a polygon feature['properties']['Legislative District Number'] = property["properties"]["id"] # assign the district number to a partner for m in range(len(county)): # iterate through the County Geojson properties2 = county[m] polygon = shape(properties2['geometry']) # get the polygon if polygon.contains(coord): # check if the partner in question belongs to a polygon feature['properties']['County'] = properties2['properties']['NAME'] feature['properties']['Income'] = properties2['properties']['Income'] # projectlist = 0 yearlist = [] campuslist = [] partners = project['community_partner'] years = project['academic_year'] campuses = project['campus_partner'] count = 0 for n in range(len(partners)): if (partners[n] == Community): if (years[n] not in yearlist): yearlist.append(years[n]) if (campuses[n] not in campuslist): campuslist.append(campuses[n]) count += 1 feature['properties']['Number of projects'] = count feature['properties']['Campus Partner'] = campuslist feature['properties']['Academic Year'] = yearlist feature['properties']['CommunityPartner'] = Community feature['properties']['CommunityType'] = CommunityType feature['properties']['Website'] = Website feature['properties']['Mission Area'] = Mission collection['features'].append(feature) return feature geojson_series = df.apply( lambda x: feature_from_row(x['name'], x['fulladdress'], x['mission_area'], x['community_type'], x['website_url']), axis=1) # jsonstring = pd.io.json.dumps(collection) output_filename = 'static/GEOJSON/Partner.geojson' #The file will be saved under static/GEOJSON with open(output_filename, 'w') as output_file: output_file.write(format(jsonstring))
python
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Optimizers based on scalarization. One of the simplest approaches to optimizing multi-loss problems is to scalarize to a real objective by combining the individual losses. Depending on how the scalarization is performed, different optimization algorithms arise. """ import gin import tensorflow.compat.v1 as tf from yoto.optimizers import base as optimizers_base from yoto.optimizers import distributions @gin.configurable("LinearlyScalarizedOptimizer") class LinearlyScalarizedOptimizer(optimizers_base.MultiLossOptimizer): r"""An optimizer that linearly scalarizes the losss. Namely, if the losses are loss_1, ..., loss_n, then it minimizes \sum_i loss_i * weight_i, for fixed weights. The weights can be either randomly drawn from one of the supported distributions, or fixed. """ def __init__(self, problem, weights, batch_size=None, seed=17): """Initializes the optimizer. Args: problem: An instance of `problems.Problem`. weights: Either `distributions.DistributionSpec` class or a dictionary mapping the loss names to their corresponding weights. batch_size: Passed to the initializer of `MultiLossOptimizer`. seed: random seed to be used for sampling the weights. """ super(LinearlyScalarizedOptimizer, self).__init__( problem, batch_size=batch_size) sampled_weights = distributions.get_samples_as_dicts( weights, names=self._losses_names, seed=seed)[0] self._check_weights_dict(sampled_weights) self._weights = sampled_weights def compute_train_loss_and_update_op(self, inputs, base_optimizer): losses, metrics = self._problem.losses_and_metrics(inputs, training=True) del metrics linearized_loss = 0. for loss_name, loss_value in losses.items(): linearized_loss += tf.reduce_mean(loss_value * self._weights[loss_name]) train_op = base_optimizer.minimize( linearized_loss, global_step=tf.train.get_or_create_global_step()) self.normal_vars = tf.trainable_variables() return linearized_loss, train_op def compute_eval_loss(self, inputs): losses, metrics = self._problem.losses_and_metrics(inputs, training=False) del metrics linearized_loss = 0. for loss_name, loss_value in losses.items(): linearized_loss += tf.reduce_mean(loss_value * self._weights[loss_name]) return linearized_loss
python
import sqlite3 from .utility import exception_info, enquote2 class SQLighter: def __init__(self, db): self.connection = sqlite3.connect(db) self.cursor = self.connection.cursor() def db_query(self, query, args=None): with self.connection: if args is None or args == (): self.cursor.execute(query) else: self.cursor.execute(query, args) rows = self.cursor.fetchall() return rows def db_query_rows(self, query, args=None): rows = self.db_query(query, args) if len(rows) == 0: return None else: return rows def db_query_row(self, query, args=None): rows = self.db_query(query, args) if len(rows) == 0: return None else: return rows[0] def db_query_single(self, query, args=None): rows = self.db_query(query, args) if len(rows) == 0: return None else: return rows[0][0] def db_query_commit(self, query, args): try: with self.connection: self.cursor.execute(query, args) self.connection.commit() except Exception as ex: print("#######" + exception_info(ex)) # TODO: ? return None else: return self.cursor.lastrowid def close(self): self.connection.close() @staticmethod def gen_insert(table, **kwargs): """Generates DB insert statement""" cols = [] vals = [] for col, val in kwargs.items(): cols.append(enquote2(col)) vals.append(enquote2(str(val))) cols = ", ".join(cols) vals = ", ".join(vals) return "INSERT INTO '%s'(%s) VALUES(%s);" % ( table, cols, vals)
python
from selfdrive.car import limit_steer_rate from selfdrive.car.hyundai.hyundaican import create_lkas11, create_lkas12, \ create_1191, create_1156, \ learn_checksum, create_mdps12, create_clu11 from selfdrive.car.hyundai.values import Buttons from selfdrive.can.packer import CANPacker import zmq from selfdrive.services import service_list import selfdrive.messaging as messaging from selfdrive.config import Conversions as CV from common.params import Params from selfdrive.swaglog import cloudlog # Steer torque limits class SteerLimitParams: STEER_MAX = 255 # >255 results in frozen torque, >409 results in no torque STEER_DELTA_UP = 3 STEER_DELTA_DOWN = 5 STEER_DRIVER_ALLOWANCE = 50 STEER_DRIVER_MULTIPLIER = 2 STEER_DRIVER_FACTOR = 1 DIVIDER = 2.0 # Must be > 1.0 class CarController(object): def __init__(self, dbc_name, car_fingerprint): self.apply_steer_last = 0 self.car_fingerprint = car_fingerprint self.lkas11_cnt = 0 self.clu11_cnt = 0 self.mdps12_cnt = 0 self.cnt = 0 self.last_resume_cnt = 0 self.map_speed = 0 self.map_data_sock = messaging.sub_sock(service_list['liveMapData'].port) self.params = Params() self.speed_conv = 3.6 self.speed_offset = 1.03 # Multiplier for cruise speed vs speed limit TODO: Add to UI self.speed_enable = True # Enable Auto Speed Set TODO: Add to UI self.speed_adjusted = False self.checksum = "NONE" self.checksum_learn_cnt = 0 self.turning_signal_timer = 0 self.camera_disconnected = False self.checksum_found = False self.packer = CANPacker(dbc_name) def update(self, enabled, CS, actuators, pcm_cancel_cmd, hud_alert): ### Error State Resets ### disable_steer = False can_sends = [] ### Learn Checksum ### if not self.checksum_found: # Learn Checksum from the Camera if self.checksum == "NONE": self.checksum = learn_checksum(self.packer, CS.lkas11) if self.checksum == "NONE" and self.checksum_learn_cnt < 50: self.checksum_learn_cnt += 1 return else: cloudlog.info("Discovered Checksum %s" % self.checksum) self.checksum_found = True # If MDPS is faulted from bad checksum, then cycle through all Checksums until 1 works if CS.steer_error == 1: self.camera_disconnected = True cloudlog.warning("Camera Not Detected: Brute Forcing Checksums") if self.checksum_learn_cnt > 300: self.checksum_learn_cnt = 50 if self.checksum == "NONE": cloudlog.info("Testing 6B Checksum") self.checksum = "6B" elif self.checksum == "6B": cloudlog.info("Testing 7B Checksum") self.checksum = "7B" elif self.checksum == "7B": cloudlog.info("Testing CRC8 Checksum") self.checksum = "crc8" else: self.checksum = "NONE" return else: self.checksum_learn_cnt += 1 else: cloudlog.info("Discovered Checksum %s" % self.checksum) self.checksum_found = True ### Minimum Steer Speed ### # Apply Usage of Minimum Steer Speed if CS.low_speed_alert: disable_steer = True ### Turning Indicators ### if (CS.left_blinker_on == 1 or CS.right_blinker_on == 1): self.turning_signal_timer = 100 # Disable for 1.0 Seconds after blinker turned off if self.turning_signal_timer > 0: disable_steer = True self.turning_signal_timer -= 1 ### Steering Torque ### apply_steer = actuators.steer * SteerLimitParams.STEER_MAX apply_steer = limit_steer_rate(apply_steer, self.apply_steer_last, CS.steer_torque_driver, SteerLimitParams) if not enabled or disable_steer: apply_steer = 0 steer_req = 0 else: steer_req = 1 self.apply_steer_last = apply_steer ''' ### Auto Speed Limit ### # Read Speed Limit and define if adjustment needed if (self.cnt % 50) == 0 and self.speed_enable: if not (enabled and CS.acc_active): self.speed_adjusted = False map_data = messaging.recv_one_or_none(self.map_data_sock) if map_data is not None: if bool(self.params.get("IsMetric")): self.speed_conv = CV.MS_TO_KPH else: self.speed_conv = CV.MS_TO_MPH if map_data.liveMapData.speedLimitValid: last_speed = self.map_speed v_speed = int(map_data.liveMapData.speedLimit * self.speed_offset) self.map_speed = v_speed * self.speed_conv if last_speed != self.map_speed: self.speed_adjusted = False else: self.map_speed = 0 self.speed_adjusted = True else: self.map_speed = 0 self.speed_adjusted = True # Spam buttons for Speed Adjustment if CS.acc_active and not self.speed_adjusted and self.map_speed > (8.5 * self.speed_conv) and (self.cnt % 9 == 0 or self.cnt % 9 == 1): if (CS.cruise_set_speed * self.speed_conv) > (self.map_speed * 1.005): can_sends.append(create_clu11(self.packer, CS.clu11, Buttons.SET_DECEL, (1 if self.cnt % 9 == 1 else 0))) elif (CS.cruise_set_speed * self.speed_conv) < (self.map_speed / 1.005): can_sends.append(create_clu11(self.packer, CS.clu11, Buttons.RES_ACCEL, (1 if self.cnt % 9 == 1 else 0))) else: self.speed_adjusted = True # Cancel Adjustment on Pedal if CS.pedal_gas: self.speed_adjusted = True ''' ### Generate CAN Messages ### self.lkas11_cnt = self.cnt % 0x10 # self.clu11_cnt = self.cnt % 0x10 self.mdps12_cnt = self.cnt % 0x100 if self.camera_disconnected: if (self.cnt % 10) == 0: can_sends.append(create_lkas12()) if (self.cnt % 50) == 0: can_sends.append(create_1191()) if (self.cnt % 7) == 0: can_sends.append(create_1156()) can_sends.append(create_lkas11(self.packer, self.car_fingerprint, apply_steer, steer_req, self.lkas11_cnt, enabled, CS.lkas11, hud_alert, (not self.camera_disconnected), self.checksum)) if not self.camera_disconnected: can_sends.append(create_mdps12(self.packer, self.car_fingerprint, self.mdps12_cnt, CS.mdps12, CS.lkas11, \ self.checksum)) # if pcm_cancel_cmd: # can_sends.append(create_clu11(self.packer, CS.clu11, Buttons.CANCEL, 0)) if CS.stopped and (self.cnt - self.last_resume_cnt) > 20: if (self.cnt - self.last_resume_cnt) > 20: self.last_resume_cnt = self.cnt can_sends.append(create_clu11(self.packer, CS.clu11, Buttons.RES_ACCEL, self.clu11_cnt)) self.cnt += 1 return can_sends
python
##========================================================== ## 2016.02.09 vsTAAmbk 0.4.1 ## Ported from TAAmbk 0.7.0 by Evalyn ## Email: [email protected] ## Thanks (author)kewenyu for help ##========================================================== ## Requirements: ## EEDI2 ## nnedi3 ## RemoveGrain/Repair ## fmtconv ## GenericFilters ## MSmoosh ## MVTools ## TemporalSoften ## sangnom ## HAvsFunc(and its requirements) ## VapourSynth R28 or newer ## ##========================================================== ##========================================================== ## ## #### Only YUV colorfmaily is supported ! ## #### And input bitdepth must be 8 or 16 INT ! ## ## Add lsb[bool] to control nnedi3 input bitdepth. ## "False" means input depth for nnedi3 is always 8bit. ## "thin" and "dark" are now removed. ## add "aatype = 7" using "pure" sangnom. ## ##========================================================== ## ## Output bitdepth is always 16bit INTEGER. ## AA precision is 16bit (8bit if necessary). ## Mask precision depend on the input. ## (if 8 then 8, if 16 then 16) ## Other parts are all 16bit. ## ##========================================================== import vapoursynth as vs import havsfunc as haf def TAAmbk(input, aatype=1, lsb=False, preaa=0, sharp=0, postaa=None, mtype=None, mthr=32, src=None, cycle=0, eedi3sclip=None, predown=False, repair=None, stabilize=0, p1=None, p2=None, p3=None, p4=None, p5=None, p6=None, showmask=False, mtype2=0, mthr2=32, auxmthr=None): core = vs.get_core() #constant value funcname = 'TAAmbk' w = input.width h = input.height upw4 = (round(w*0.09375)*16) # mod16(w*1.5) uph4 = (round(h*0.09375)*16) # mod16(h*1.5) downw4 = (round(w*0.046875)*16) # mod16(w*0.75) downh4 = (round(h*0.046875)*16) # mod16(h*0.75) if input.format.num_planes == 1: GRAY = True else: GRAY = False # border to add for SangNomMod when aatype = 6 or 7 if aatype == 6 or aatype == 7: # mod16 or not if w % 16 == 0: mod16w = True else: mod16w = False borderW = (16 - w % 16) if h % 16 == 0: mod16h = True else: mod16h = False borderH = (16 - h % 16) #generate paramerters if None if mtype == None: if preaa == 0 and aatype == 0: mtype = 0 else: mtype = 1 if auxmthr == None: if mtype == 1: auxmthr = 1.2 else: if mtype ==3: auxmthr = 8 else: auxmthr = 0.0 absSh = abs(sharp) if postaa == None: if absSh > 70 or (absSh > 0.4 and absSh < 1): postaa = True else: postaa = False if repair == None: if (aatype != 1 and aatype != 2 and aatype != 3): repair = 20 else: repair = 0 if isinstance(mtype, vs.VideoNode): rp = 20 else: if mtype == 5: rp = 0 else: rp = 20 if eedi3sclip is None: eedi3sclip = False else: if not isinstance(eedi3sclip, bool): raise TypeError(funcname + ': \"eedi3sclip\" must be bool !') # p1~p6 preset groups pindex = aatype + 3 # aatype = -3 -2 -1 0 1 2 3 4 5 6 7 if p1 is None: p1 = [ 48, 48, 48, 0, 10, 0.5, 3, 48, 48, 48, 48][pindex] if p2 is None: p2 = [ 3, 0.5, 10, 0, 20, 0.2, 1, 1, 0, rp, rp][pindex] if p3 is None: p3 = [ 1, 0.2, 20, 0, 20, 20, 2, 3, 0, 0, 0][pindex] if p4 is None: p4 = [ 2, 20, 20, 0, 24, 3, 0, 2, 0, 0, 0][pindex] if p4 is None: p4 = [ 2, 20, 20, 0, 24, 3, 0, 2, 0, 0, 0][pindex] if p5 is None: p5 = [ 0, 3, 24, 0, 50, 30, 0, 0, 0, 0, 0][pindex] if p6 is None: p6 = [ 0, 30, 50, 0, 0, 0, 0, 0, 0, 0, 0][pindex] #paramerters check #input type check if not isinstance(input, vs.VideoNode): raise ValueError(funcname + ': \"input\" must be a clip !') #YUV constant value inputFormatid = input.format.id # A unique id identifying the format. sColorFamily = input.format.color_family # Which group of colorspaces the format describes. sbits_per_sample = int(input.format.bits_per_sample) # How many bits are used to store one sample in one plane. sSType = input.format.sample_type # source sample type #format check if sColorFamily == vs.YUV or sColorFamily == vs.GRAY: if sSType != vs.INTEGER: raise TypeError(funcname + ': \"input\" must be INTEGER format !') else: if not (sbits_per_sample == 8 or sbits_per_sample == 16): raise TypeError(funcname + ': \"input\" must be 8bit or 16bit INTEGER !') else: raise TypeError(funcname + ': Only YUV colorfmaily is supported !') #aatype check if not isinstance(aatype, int) or (aatype < -3 or aatype > 7): raise ValueError(funcname + ': \"aatype\" (int: -3~7) invalid !') #lsb check if not isinstance(lsb, bool): raise TypeError(funcname + ': \"lsb\" must be BOOL !') #preaa check if not isinstance(preaa, int) or (preaa < 0 or preaa > 1): raise ValueError(funcname + ': \"preaa\" (int: 0~1) invalid !') #mtype check if not isinstance(mtype, int): if not isinstance(mtype, vs.VideoNode): raise TypeError(funcname + ': \"mtype\" is not a clip !') else: if mtype.format.id != inputFormatid : raise TypeError(funcname + ': \"input\" and \"mclip(mtype)\" must be of the same format !') else: if mtype.width != w or mtype.height != h: raise TypeError(funcname + ': resolution of \"input\" and your custome mask clip \"mtype\" must match !') else: if mtype < 0 or mtype > 6: raise ValueError(funcname + ': \"mtype\" (int: 0~6) invalid !') #mthr check if not isinstance(mthr, int) or (mthr < 0 or mthr > 255): raise ValueError(funcname + ': \"mthr\" (int: 0~255) invalid !') #repair check if not isinstance(repair, int) or (repair < -24 or repair > 24): raise ValueError(funcname + ': \"repair\" (int: -24~24) invalid !') #src clip check if src is not None and isinstance(src, vs.VideoNode): if src.format.id != inputFormatid : raise TypeError(funcname + ': \"input\" and \"src\" must be of the same format !') else: if src.width != w or src.height != h: raise TypeError(funcname + ': resolution of \"input\" and \"src\" must match !') elif src is not None: raise ValueError(funcname + ': \"src\" is not a clip !') #cycle check if not isinstance(cycle, int) or cycle < 0: raise ValueError(funcname + ': \"cycle\" must be non-negative int !') #stabilize check if not isinstance(stabilize, int) or (stabilize < -3 or stabilize > 3): raise ValueError(funcname + ': \"stabilize\" (int: -3~3) invalid !') if showmask and mtype == 0: raise ValueError(funcname + ': There is NO mask to show when \"mtype\" = 0 !') ################################### ### Small functions ############## ################################### # average two clips of 3 yuv planes def average(clipa, clipb): return (core.std.Expr(clips=[clipa,clipb], expr=["x y + 2 /"])) # bitdepth conversion from mvsfunc, mawen1250 Thanks! def Depth(input, depth=None): sbitPS = input.format.bits_per_sample if sbitPS == depth: return input else: return core.fmtc.bitdepth(input,bits=depth,flt=0,dmode=3) # fast PointResize from mvsfunc def PointPower(input, vpow=1): for i in range(vpow): clip = core.std.Interleave([input,input]).std.DoubleWeave(tff=True).std.SelectEvery(2,0) return clip ################################### # src clip issue #====================== if src == None: if predown: if lsb: src = core.nnedi3.nnedi3(core.fmtc.resample(input, w=downw4, h=downh4,kernel="spline36"),field=1,dh=True) src = core.std.Transpose(core.fmtc.resample(src,w=downw4,h=h,sx=0,sy=[-0.5,-0.5*(1<<input.format.subsampling_h)],kernel="spline36")) src = core.std.Transpose(core.fmtc.resample(core.nnedi3.nnedi3(src,field=1,dh=True),w=h,h=w,sx=0,sy=[-0.5,-0.5*(1<<input.format.subsampling_h)],kernel="spline36")) else: src = core.nnedi3.nnedi3(Depth(core.fmtc.resample(input, w=downw4, h=downh4,kernel="spline36"),8),field=1,dh=True) src = core.std.Transpose(core.fmtc.resample(src,w=downw4,h=h,sx=0,sy=[-0.5,-0.5*(1<<input.format.subsampling_h)],kernel="spline36")) src = core.std.Transpose(core.fmtc.resample(core.nnedi3.nnedi3(Depth(src,8),field=1,dh=True),w=h,h=w,sx=0,sy=[-0.5,-0.5*(1<<input.format.subsampling_h)],kernel="spline36")) else: src = input #====================== #internal function def TAAmbk_prepass(clip, predown=predown, downw4=downw4, downh4=downh4, thin=0, dark=0, preaa=preaa): if predown: pdclip = core.resize.Spline36(clip, downw4, downh4) else: pdclip = clip if preaa == 1: if lsb: nn = core.nnedi3.nnedi3(pdclip, field=3) nnt = core.std.Transpose(core.nnedi3.nnedi3(core.std.Transpose(pdclip), field=3)) else: nn = core.nnedi3.nnedi3(Depth(pdclip,8), field=3) nnt = core.std.Transpose(core.nnedi3.nnedi3(Depth(core.std.Transpose(pdclip),8), field=3)) #nnedi3 double rate start with top clph = average(core.std.SelectEvery(nn, cycle=2, offsets=0), core.std.SelectEvery(nn, cycle=2, offsets=1)) clpv = average(core.std.SelectEvery(nnt, cycle=2, offsets=0), core.std.SelectEvery(nnt, cycle=2, offsets=1)) clp = average(clph, clpv) preaaB = clp else: preaaB = pdclip preaaC = preaaB #filters unavailable #======================================= # if thin == 0 and dark == 0: # preaaC = preaaB # else: # if dark == 0: # preaaC = core.warp.AWarpSharp2(preaaB,depth=thin) # elif thin == 0: # preaaC = Toon(preaaB,dark) #? # else: # preaaC = Toon(core.warp.AWarpSharp2(preaaB,depth=thin),dark) #? #======================================= return preaaC #internal functions def TAAmbk_mainpass(preaaC, aatype=aatype, cycle=cycle, p1=p1, p2=p2, p3=p3, p4=p4, p5=p5, p6=p6, w=w, h=h, uph4=uph4, upw4=upw4, eedi3sclip=eedi3sclip): # generate eedi3 sclip using nnedi3 double height if eedi3sclip is True: if aatype == -2: if lsb: sclip = core.nnedi3.nnedi3(preaaC,field=1,dh=True) sclip_r = core.resize.Spline36(sclip,w,uph4) sclip_r = core.std.Transpose(sclip_r) sclip_r = core.nnedi3.nnedi3(sclip_r,field=1,dh=True) sclip = Depth(sclip,8) sclip_r = Depth(sclip_r,8) else: sclip = core.nnedi3.nnedi3(Depth(preaaC,8),field=1,dh=True) sclip_r = core.resize.Spline36(sclip,w,uph4) sclip_r = core.std.Transpose(sclip_r) sclip_r = core.nnedi3.nnedi3(sclip_r,field=1,dh=True) elif aatype == 2: if lsb: sclip = core.nnedi3.nnedi3(preaaC,field=1,dh=True) sclip_r = sclip_r = core.resize.Spline36(sclip,w,h) sclip_r = core.std.Transpose(sclip_r) sclip_r = core.nnedi3.nnedi3(sclip_r,field=1,dh=True) sclip = Depth(sclip,8) sclip_r = Depth(sclip_r,8) else: sclip = core.nnedi3.nnedi3(Depth(preaaC,8),field=1,dh=True) sclip_r = sclip_r = core.resize.Spline36(sclip,w,h) sclip_r = core.std.Transpose(sclip_r) sclip_r = core.nnedi3.nnedi3(sclip_r,field=1,dh=True) # generate aa_clip ########################## # # # AAtype -3 or 4 # # # ########################## if aatype == -3 or aatype == 4: if lsb: aa_clip = core.nnedi3.nnedi3(preaaC, dh=True, field=1, nsize=int(p2), nns=int(p3), qual=int(p4)) aa_clip = core.std.Transpose(core.fmtc.resample(aa_clip,w=w,h=uph4,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36")) aa_clip = core.fmtc.resample(core.nnedi3.nnedi3(aa_clip, dh=True, field=1, nsize=int(p2), nns=int(p3), qual=int(p4)),w=uph4,h=upw4,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = Depth(aa_clip,depth=8) aa_clip = core.sangnom.SangNomMod(core.std.Transpose(core.sangnom.SangNomMod(aa_clip,aa=int(p1))),aa=int(p1)) aa_clip = core.fmtc.resample(aa_clip,w=w,h=h,kernel=["spline36","spline36"]) else: aa_clip = core.nnedi3.nnedi3(Depth(preaaC,8), dh=True, field=1, nsize=int(p2), nns=int(p3), qual=int(p4)) aa_clip = core.std.Transpose(core.fmtc.resample(aa_clip,w=w,h=uph4,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36")) aa_clip = core.fmtc.resample(core.nnedi3.nnedi3(Depth(aa_clip,8), dh=True, field=1, nsize=int(p2), nns=int(p3), qual=int(p4)),w=uph4,h=upw4,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = Depth(aa_clip,depth=8) aa_clip = core.sangnom.SangNomMod(core.std.Transpose(core.sangnom.SangNomMod(aa_clip,aa=int(p1))),aa=int(p1)) aa_clip = core.fmtc.resample(aa_clip,w=w,h=h,kernel=["spline36","spline36"]) ###################### # # # AA type -2 # # # ###################### elif aatype == -2: if eedi3sclip == False: aa_clip = core.fmtc.resample(core.eedi3.eedi3(Depth(preaaC,8), dh=True, field=1, alpha=p2, beta=p3, gamma=p4, nrad=int(p5), mdis=int(p6)), w=w, h=uph4, sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = Depth(aa_clip,depth=8) aa_clip = core.eedi3.eedi3(core.std.Transpose(aa_clip), dh=True, field=1, alpha=p2, beta=p3, gamma=p4, nrad=int(p5), mdis=int(p6)) aa_clip = core.sangnom.SangNomMod(Depth(core.fmtc.resample(aa_clip, w=uph4, h=upw4, sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36"),depth=8),aa=int(p1)) aa_clip = core.sangnom.SangNomMod(core.std.Transpose(aa_clip),aa=int(p1)) aa_clip = core.fmtc.resample(aa_clip,w=w,h=h,kernel=["spline36","spline36"]) else: # EEDI3 need w * h aa_clip = core.fmtc.resample(core.eedi3.eedi3(Depth(preaaC,8), dh=True, field=1, alpha=p2, beta=p3, gamma=p4, nrad=int(p5), mdis=int(p6), sclip=sclip), w=w, h=uph4, sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") # output w * uph4 aa_clip = Depth(aa_clip,depth=8) # EEDI3 need uph4 * w aa_clip = core.eedi3.eedi3(core.std.Transpose(aa_clip), dh=True, field=1, alpha=p2, beta=p3, gamma=p4, nrad=int(p5), mdis=int(p6), sclip=sclip_r) aa_clip = core.sangnom.SangNomMod(Depth(core.fmtc.resample(aa_clip, w=uph4, h=upw4, sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36"),depth=8),aa=int(p1)) aa_clip = core.sangnom.SangNomMod(core.std.Transpose(aa_clip),aa=int(p1)) aa_clip = core.fmtc.resample(aa_clip,w=w,h=h,kernel=["spline36","spline36"]) ###################### # # # AA type -1 # # # ###################### elif aatype == -1: aa_clip = core.fmtc.resample(core.eedi2.EEDI2(preaaC, field=1, mthresh=int(p2), lthresh=int(p3), vthresh=int(p4), maxd=int(p5), nt=int(p6)),w=w,h=uph4,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = core.eedi2.EEDI2(core.std.Transpose(aa_clip),field=1, mthresh=int(p2), lthresh=int(p3), vthresh=int(p4), maxd=int(p5), nt=int(p6)) aa_clip = core.sangnom.SangNomMod(Depth(core.fmtc.resample(aa_clip,w=uph4,h=upw4,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36"),depth=8),aa=int(p1)) aa_clip = core.sangnom.SangNomMod(core.std.Transpose(aa_clip),aa=int(p1)) aa_clip = core.fmtc.resample(aa_clip,w=w,h=h,kernel=["spline36","spline36"]) ###################### # # # AA type 1 # # # ###################### elif aatype == 1: aa_clip = core.fmtc.resample(core.eedi2.EEDI2(preaaC,field=1,mthresh=int(p1), lthresh=int(p2), vthresh=int(p3), maxd=int(p4), nt=int(p5)),w=w,h=h,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = core.eedi2.EEDI2(core.std.Transpose(aa_clip),field=1,mthresh=int(p1), lthresh=int(p2), vthresh=int(p3), maxd=int(p4), nt=int(p5)) aa_clip = core.std.Transpose(core.fmtc.resample(aa_clip,w=h,h=w,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36")) ###################### # # # AA type 2 # # # ###################### elif aatype == 2: if eedi3sclip == False: aa_clip = core.fmtc.resample(core.eedi3.eedi3(Depth(preaaC,8),dh=True, field=1, alpha=p1, beta=p2, gamma=p3, nrad=int(p4), mdis=int(p5)),w=w,h=h,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = Depth(core.std.Transpose(aa_clip),depth=8) aa_clip = core.fmtc.resample(core.eedi3.eedi3(aa_clip,dh=True, field=1, alpha=p1, beta=p2, gamma=p3, nrad=int(p4), mdis=int(p5)),w=h,h=w,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = core.std.Transpose(aa_clip) else: #EEDI3 need w * h aa_clip = core.fmtc.resample(core.eedi3.eedi3(Depth(preaaC,8),dh=True, field=1, alpha=p1, beta=p2, gamma=p3, nrad=int(p4), mdis=int(p5), sclip=sclip),w=w,h=h,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") #output w * h aa_clip = Depth(core.std.Transpose(aa_clip),depth=8) #EEDI3 need h * w aa_clip = core.fmtc.resample(core.eedi3.eedi3(aa_clip,dh=True, field=1, alpha=p1, beta=p2, gamma=p3, nrad=int(p4), mdis=int(p5), sclip=sclip_r),w=h,h=w,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = core.std.Transpose(aa_clip) ###################### # # # AA type 3 # # # ###################### elif aatype == 3: if lsb: aa_clip = core.fmtc.resample(core.nnedi3.nnedi3(preaaC, dh=True, field=1, nsize=int(p1), nns=int(p2), qual=int(p3)),w=w,h=h,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = core.nnedi3.nnedi3(core.std.Transpose(aa_clip), dh=True, field=1, nsize=int(p1), nns=int(p2), qual=int(p3)) aa_clip = core.std.Transpose(core.fmtc.resample(aa_clip,w=h,h=w,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36")) else: aa_clip = core.fmtc.resample(core.nnedi3.nnedi3(Depth(preaaC,8), dh=True, field=1, nsize=int(p1), nns=int(p2), qual=int(p3)),w=w,h=h,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = core.nnedi3.nnedi3(Depth(core.std.Transpose(aa_clip),8), dh=True, field=1, nsize=int(p1), nns=int(p2), qual=int(p3)) aa_clip = core.std.Transpose(core.fmtc.resample(aa_clip,w=h,h=w,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36")) ###################### # # # AA type 5 # # # ###################### elif aatype == 5: aa_clip = Depth(core.fmtc.resample(preaaC, w=upw4, h=uph4 ,kernel=["lanczos","bicubic"]),depth=8) aa_clip = core.std.Transpose(core.sangnom.SangNomMod(aa_clip,aa=int(p1))) aa_clip = core.fmtc.resample(core.sangnom.SangNomMod(aa_clip,aa=int(p1)),w=h,h=w,kernel="spline36") aa_clip = core.std.Transpose(aa_clip) ###################### # # # AA type 6 # # # ###################### elif aatype == 6: aa_clip = Depth(core.fmtc.resample(preaaC, w=w, h=uph4 ,kernel=["lanczos","bicubic"]),depth=8) if mod16w is True: aa_clip = core.fmtc.resample(core.sangnom.SangNomMod(aa_clip,aa=int(p1)),w=w,h=h,kernel="spline36") else: aa_clip = core.std.AddBorders(aa_clip,borderW) aa_clip = core.fmtc.resample(core.sangnom.SangNomMod(aa_clip,aa=int(p1)),w=w,h=h,kernel="spline36") aa_clip = core.std.CropRel(aa_clip,borderW) aa_clip = core.fmtc.resample(core.std.Transpose(aa_clip),w=h,h=upw4,kernel=["lanczos","bicubic"]) if mod16h is True: aa_clip = core.sangnom.SangNomMod(Depth(aa_clip,depth=8),aa=int(p1)) else: aa_clip = core.std.AddBorders(aa_clip,borderH) aa_clip = core.sangnom.SangNomMod(Depth(aa_clip,depth=8),aa=int(p1)) aa_clip = core.std.CropRel(aa_clip,borderH) aa_clip = core.std.Transpose(core.fmtc.resample(aa_clip,w=h,h=w,kernel="spline36")) aa_clip = core.rgvs.Repair(aa_clip, core.fmtc.resample(preaaC,w=w,h=h,kernel="spline64"), mode=int(p2)) ###################### # # # AA type 7 # # # ###################### elif aatype == 7: aa_clip = PointPower(Depth(preaaC,8)) if mod16w and not predown: aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) aa_clip = core.std.Transpose(aa_clip) elif predown: if aa_clip.width == downw4: aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) aa_clip = core.std.Transpose(aa_clip) elif mod16w: aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) aa_clip = core.std.Transpose(aa_clip) else: aa_clip = core.std.AddBorders(aa_clip,borderW) aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) aa_clip = core.std.CropRel(aa_clip,borderW) aa_clip = core.std.Transpose(aa_clip) else: aa_clip = core.std.AddBorders(aa_clip,borderW) aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) aa_clip = core.std.CropRel(aa_clip,borderW) aa_clip = core.std.Transpose(aa_clip) aa_clip = PointPower(aa_clip) if mod16h and not predown: aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) elif predown: if aa_clip.width == downh4 * 2: aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) elif mod16h: aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) else: aa_clip = core.std.AddBorders(aa_clip,(16 - h * 2 % 16)) aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) aa_clip = core.std.CropRel(aa_clip,(16 - h * 2 % 16)) else: aa_clip = core.std.AddBorders(aa_clip,(16 - h * 2 % 16)) aa_clip = core.sangnom.SangNomMod(aa_clip,aa=int(p1)) aa_clip = core.std.CropRel(aa_clip,(16 - h * 2 % 16)) aa_clip = core.std.Transpose(core.fmtc.resample(aa_clip,w=h,h=w,kernel="spline36")) if predown: aa_clip = core.rgvs.Repair(aa_clip, core.fmtc.resample(preaaC,w=w,h=h,kernel="spline64"), mode=int(p2)) else: aa_clip = core.rgvs.Repair(aa_clip, Depth(preaaC,16), mode=int(p2)) # if predown and no aa, use nnedi3 to recover else: if predown: if lsb: aa_clip = core.fmtc.resample(core.nnedi3.nnedi3(preaaC,dh=True, field=1, nsize=1, nns=3, qual=2),w=preaaC.width,h=h,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = core.nnedi3.nnedi3(core.std.Transpose(aa_clip),dh=True, field=1, nsize=1, nns=3, qual=2) aa_clip = core.std.Transpose(core.fmtc.resample(aa_clip,w=h,h=w,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36")) else: aa_clip = core.fmtc.resample(core.nnedi3.nnedi3(Depth(preaaC,8),dh=True, field=1, nsize=1, nns=3, qual=2),w=preaaC.width,h=h,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36") aa_clip = core.nnedi3.nnedi3(Depth(core.std.Transpose(aa_clip),8),dh=True, field=1, nsize=1, nns=3, qual=2) aa_clip = core.std.Transpose(core.fmtc.resample(aa_clip,w=h,h=w,sx=0,sy=[-0.5,-0.5*(1<<preaaC.format.subsampling_h)],kernel="spline36")) return aa_clip if cycle == 0 else TAAmbk_mainpass(aa_clip, aatype=aatype ,cycle=cycle-1, p1=p1, p2=p2, p3=p3, p4=p4, p5=p5, p6=p6, w=w, h=h, uph4=uph4, upw4=upw4, eedi3sclip=eedi3sclip) #Internal functions def TAAmbk_mask(input, mtype=mtype, mthr=mthr, w=w, mtype2=mtype2, mthr2=mthr2, auxmthr=auxmthr): bits = input.format.bits_per_sample shift = bits - 8 neutral = 128 << shift peak = (1 << bits) - 1 multiple = peak / 255 #generate edge_mask_1 if mtype == 1: edge_mask_1 = core.tcanny.TCanny(input, sigma=auxmthr, mode=1, op=2, planes=0) exprY = "x "+str(mthr*multiple)+" <= x 2 / x 2 * ?" edge_mask_1 = core.std.Expr(edge_mask_1, [exprY] if GRAY else [exprY,""]) if w > 1100: edge_mask_1 = core.rgvs.RemoveGrain(edge_mask_1, [20] if GRAY else [20,0]) else: edge_mask_1 = core.rgvs.RemoveGrain(edge_mask_1, [11] if GRAY else [11,0]) edge_mask_1 = core.generic.Inflate(edge_mask_1, planes=0) elif mtype == 3: edge_mask_1 = core.generic.TEdge(input, min=auxmthr, planes=0) exprY = "x "+str(mthr*multiple/5)+" <= x 2 / x 16 * ?" edge_mask_1 = core.std.Expr(edge_mask_1, [exprY] if GRAY else [exprY,""]) edge_mask_1 = core.generic.Deflate(edge_mask_1, planes=0) if w > 1100: edge_mask_1 = core.rgvs.RemoveGrain(edge_mask_1, [20] if GRAY else [20,0]) else: edge_mask_1 = core.rgvs.RemoveGrain(edge_mask_1, [11] if GRAY else [11,0]) elif mtype == 2: edge_mask_1 = core.msmoosh.MSharpen(input, threshold=mthr//5, strength=0, mask=True, planes=0) elif mtype == 4: edge_mask_1 = core.generic.Sobel(input, min=5, max=7, planes=0) edge_mask_1 = core.generic.Inflate(edge_mask_1, planes=0) elif mtype == 5: edge_mask_1 = core.std.Convolution(input,[0, 0, 0, 0, 2, -1, 0, -1, 0],planes=0) edge_mask_1 = core.generic.Inflate(edge_mask_1, planes=0) elif mtype == 6: edgemask1 = core.std.Convolution(input,[1, 1, 0, 1, 0, -1, 0, -1, -1],divisor=1,saturate=False,planes=0) edgemask2 = core.std.Convolution(input,[1, 1, 1, 0, 0, 0, -1, -1, -1],divisor=1,saturate=False,planes=0) edgemask3 = core.std.Convolution(input,[1, 0, -1, 1, 0, -1, 1, 0, -1],divisor=1,saturate=False,planes=0) edgemask4 = core.std.Convolution(input,[0, -1, -1, 1, 0, -1, 1, 1, 0],divisor=1,saturate=False,planes=0) mt = "x y max z max a max" edge_mask_1 = core.std.Expr([edgemask1,edgemask2,edgemask3,edgemask4],[mt] if GRAY else [mt,""]) exprY = "x "+str(mthr*multiple)+" <= x 2 / x 2.639015821545 * ?" edge_mask_1 = core.std.Expr(edge_mask_1, [exprY] if GRAY else [exprY,""]) edge_mask_1 = core.rgvs.RemoveGrain(edge_mask_1, [4] if GRAY else [4,0]) edge_mask_1 = core.generic.Inflate(edge_mask_1, planes=0) else: edge_mask_1 == None #generate edge_mask_2 if mtype2 == 0: edge_mask_2 = None elif mtype2 == 1: edge_mask_2 = core.tcanny.TCanny(input, sigma=1.2, mode=1, op=0, planes=0) exprY = "x "+str(mthr2*multiple)+" <= x 2 / x 2 * ?" edge_mask_2 = core.std.Expr(edge_mask_2, [exprY] if GRAY else [exprY,""]) if w > 1100: edge_mask_2 = core.rgvs.RemoveGrain(edge_mask_2, [20] if GRAY else [20,0]) else: edge_mask_2 = core.rgvs.RemoveGrain(edge_mask_2, [11] if GRAY else [11,0]) edge_mask_1 = core.generic.Inflate(edge_mask_2, planes=0) elif mtype2 == 3: edge_mask_2 = core.generic.TEdge(input, planes=0) exprY = "x "+str(mthr2*multiple/5)+" <= x 2 / x 16 * ?" edge_mask_2 = core.std.Expr(edge_mask_2, [exprY] if GRAY else [exprY,""]) edge_mask_2 = core.generic.Deflate(edge_mask_2, planes=0) if w > 1100: edge_mask_2 = core.rgvs.RemoveGrain(edge_mask_2, [20] if GRAY else [20,0]) else: edge_mask_2 = core.rgvs.RemoveGrain(edge_mask_2, [11] if GRAY else [11,0]) elif mtype2 == 2: edge_mask_2 = core.msmoosh.MSharpen(input, threshold=mthr2//5, strength=0, mask=True, planes=0) elif mtype2 == 4: edge_mask_2 = core.generic.Sobel(input, min=5, max=7, planes=0) edge_mask_2 = core.generic.Inflate(edge_mask_2, planes=0) elif mtype2 == 5: edge_mask_1 = core.std.Convolution(input,[0, 0, 0, 0, 2, -1, 0, -1, 0],planes=0) edge_mask_2 = core.generic.Inflate(edge_mask_2, planes=0) else: edgemask1 = core.std.Convolution(input,[1, 1, 0, 1, 0, -1, 0, -1, -1],divisor=1,saturate=False,planes=0) edgemask2 = core.std.Convolution(input,[1, 1, 1, 0, 0, 0, -1, -1, -1],divisor=1,saturate=False,planes=0) edgemask3 = core.std.Convolution(input,[1, 0, -1, 1, 0, -1, 1, 0, -1],divisor=1,saturate=False,planes=0) edgemask4 = core.std.Convolution(input,[0, -1, -1, 1, 0, -1, 1, 1, 0],divisor=1,saturate=False,planes=0) mt = "x y max z max a max" edge_mask_2 = core.std.Expr([edgemask1,edgemask2,edgemask3,edgemask4],[mt] if GRAY else [mt,""]) exprY = "x "+str(mthr2*multiple)+" <= x 2 / x 2.639015821545 * ?" edge_mask_2 = core.std.Expr(edge_mask_2, [exprY] if GRAY else [exprY,""]) edge_mask_2 = core.rgvs.RemoveGrain(edge_mask_2, [4] if GRAY else [4,0]) edge_mask_2 = core.generic.Inflate(edge_mask_2, planes=0) #generate final_mask if mtype2 == 0: final_mask = edge_mask_1 else: final_mask = core.std.Expr([edge_mask_1,edge_mask_2], ["x y max"] if GRAY else ["x y max",""]) return final_mask #temporal stabilizer of sharped clip def Soothe(sharp, origin, keep=24): bits = sharp.format.bits_per_sample shift = bits - 8 neutral = 128 << shift peak = (1 << bits) - 1 multiple = peak / 255 const = 100 * multiple if keep > 100: keep = 100 if keep < 0: keep = 0 KP = keep*multiple mt1 = 'x y - {neutral} +'.format(neutral=neutral) diff = core.std.Expr(clips=[origin,sharp], expr=[mt1]) diff2 = core.focus.TemporalSoften(diff, radius=1, luma_threshold=255, chroma_threshold=255, scenechange=32, mode=2) expr = 'x {neutral} - y {neutral} - * 0 < x {neutral} - {const} / {KP} * {neutral} + x {neutral} - abs y {neutral} - abs > x {KP} * y {const} {KP} - * + {const} / x ? ?'.format(neutral=neutral, const=const, KP=KP) diff3 = core.std.Expr(clips=[diff,diff2], expr=[expr]) mt2 = 'x y {neutral} - -'.format(neutral=neutral) return core.std.Expr(clips=[origin,diff3], expr=[mt2]) #internal functions def TAAmbk_stabilize(input, aaedsharp, stabilize): aadiff = core.std.MakeDiff(Depth(input,16), aaedsharp) if(stabilize < 0): aadiff_stab = core.rgvs.Repair(core.focus.TemporalSoften(aadiff,abs(stabilize), 255, 255, 254, 2),aadiff,4) else: inputsuper = core.mv.Super(input,pel=1) diffsuper = core.mv.Super(aadiff,pel=1,levels=1) if stabilize == 3: fv3 = core.mv.Analyse(inputsuper,isb=False,delta=3,overlap=8,blksize=16) bv3 = core.mv.Analyse(inputsuper,isb=True,delta=3,overlap=8,blksize=16) if stabilize >= 2: fv2 = core.mv.Analyse(inputsuper,isb=False,delta=2,overlap=8,blksize=16) bv2 = core.mv.Analyse(inputsuper,isb=True,delta=2,overlap=8,blksize=16) if stabilize >= 1: fv1 = core.mv.Analyse(inputsuper,isb=False,delta=1,overlap=8,blksize=16) bv1 = core.mv.Analyse(inputsuper,isb=True,delta=1,overlap=8,blksize=16) if stabilize == 1: stabilized_diff = core.mv.Degrain1(aadiff,diffsuper,bv1,fv1) elif stabilize == 2: stabilized_diff = core.mv.Degrain2(aadiff,diffsuper,bv1,fv1,bv2,fv2) elif stabilize == 3: stabilized_diff = core.mv.Degrain3(aadiff,diffsuper,bv1,fv1,bv2,fv2,bv3,fv3) else: stabilized_diff = None bits = aadiff.format.bits_per_sample shift = bits - 8 neutral = 128 << shift peak = (1 << bits) - 1 multiple = peak / 255 mt = 'x {neutral} - abs y {neutral} - abs < x y ?'.format(neutral=neutral) aadiff_stab = core.std.Expr(clips=[aadiff,stabilized_diff], expr=[mt]) aadiff_stab = core.std.Merge(aadiff_stab, stabilized_diff, [0.6] if GRAY else [0.6,0]) aaed_stab = core.std.MakeDiff(Depth(input,16), aadiff_stab) return aaed_stab #============================== #main functions #============================== preaaC = TAAmbk_prepass(input, predown=predown, downw4=downw4, downh4=downh4, preaa=preaa) aa_clip = TAAmbk_mainpass(preaaC,aatype=aatype, cycle=cycle, p1=p1, p2=p2, p3=p3, p4=p4, p5=p5, p6=p6, w=w, h=h, uph4=uph4, upw4=upw4, eedi3sclip=eedi3sclip) #sharp if sharp == 0: aaedsp = aa_clip elif sharp >= 1: aaedsp = haf.LSFmod(aa_clip,strength=int(absSh), defaults="old", source=Depth(src,16)) elif sharp > 0: per = int(40*absSh) matrix = [-1, -2, -1, -2, 52-per , -2, -1, -2, -1] aaedsp = core.generic.Convolution(aa_clip,matrix) elif sharp > -1: aaedsp = haf.LSFmod(aa_clip,strength=round(absSh*100), defaults="fast", source=Depth(src,16)) elif sharp == -1: if w > 1100: clipb = core.std.MakeDiff(aa_clip, core.rgvs.RemoveGrain(aa_clip, mode=20)) else: clipb = core.std.MakeDiff(aa_clip, core.rgvs.RemoveGrain(aa_clip, mode=11)) clipb = core.rgvs.Repair(clipb, core.std.MakeDiff(Depth(src,16), aa_clip),mode=13) aaedsp = core.std.MergeDiff(aa_clip, clipb) else: aaedsp = haf.LSFmod(aa_clip,strength=int(absSh), defaults="slow", source=Depth(src,16)) #postAA if postaa: aaedsp = Soothe(aaedsp,aa_clip,keep=48) #stabilize if stabilize != 0: aaedstab = TAAmbk_stabilize(input, aaedsp, stabilize) else: aaedstab = aaedsp #masked merge if isinstance(mtype, vs.VideoNode): edge_mask = mtype aamerge = core.std.MaskedMerge(Depth(input,16),aaedstab,Depth(edge_mask,16),first_plane=True) elif mtype != 0: edge_mask = TAAmbk_mask(input, mtype=mtype, mthr=mthr, w=w, mtype2=mtype2, mthr2=mthr2, auxmthr=auxmthr) aamerge = core.std.MaskedMerge(Depth(input,16),aaedstab,Depth(edge_mask,16),first_plane=True) else: aamerge = aaedstab # output if showmask: return edge_mask else: if repair == 0 or aatype == 0: return aamerge elif(repair > 0): return core.rgvs.Repair(aamerge, Depth(input,depth=16), mode=repair) else: return core.rgvs.Repair(Depth(input,depth=16), aamerge, mode=abs(repair))
python
#!/usr/bin/env python3 from setuptools import setup setup( name='asyncpgsa', version=__import__('asyncpgsa').__version__, install_requires=[ 'asyncpg~=0.9.0', 'sqlalchemy', ], packages=['asyncpgsa', 'asyncpgsa.testing'], url='https://github.com/canopytax/asyncpgsa', license='Apache 2.0', author='nhumrich', author_email='[email protected]', description='sqlalchemy support for asyncpg' )
python
import torch import torch.nn as nn class Ensemble(nn.Module): """ Ensemble decoding. Decodes using multiple models simultaneously, Note: Do not use this class directly, use one of the sub classes. """ def __init__(self, models): super(Ensemble, self).__init__() self.models = models self.num_models = len(models) def forward(self, *args, **kwargs): raise NotImplementedError class BasicEnsemble(Ensemble): """ Basic ensemble decoding. Decodes using multiple models simultaneously, combining their prediction distributions by adding. All models in the ensemble must share a target characters. """ def __init__(self, models): super(BasicEnsemble, self).__init__(models) def forward(self, inputs, input_lengths): hypothesis = None with torch.no_grad(): for model in self.models: if hypothesis is None: hypothesis = model(inputs, input_lengths, teacher_forcing_ratio=0.0) else: hypothesis += model(inputs, input_lengths, teacher_forcing_ratio=0.0) return hypothesis class WeightedEnsemble(Ensemble): """ Weighted ensemble decoding. Decodes using multiple models simultaneously, combining their prediction distributions by weighted sum. All models in the ensemble must share a target characters. """ def __init__(self, models, dim=128): super(WeightedEnsemble, self).__init__(models) self.meta_classifier = nn.Sequential( nn.Linear(self.num_models, dim), nn.ELU(inplace=True), nn.Linear(dim, self.num_models) ) def forward(self, inputs, input_lengths): hypothesis = None outputs = list() weights = torch.FloatTensor([1.] * self.num_models) # model`s parameters are fixed with torch.no_grad(): for model in self.models: outputs.append(model(inputs, input_lengths, teacher_forcing_ratio=0.0)) weights = self.meta_classifier(weights) for (output, weight) in zip(outputs, weights): if hypothesis is None: hypothesis = output * weight else: hypothesis += output * weight return hypothesis
python
# -*- coding: utf-8 -*- from gui.shared.tooltips.module import ModuleTooltipBlockConstructor ModuleTooltipBlockConstructor.MAX_INSTALLED_LIST_LEN = 1000 print '[LOAD_MOD]: [mod_tooltipsCountItemsLimitExtend 1.00 (11-05-2018), by spoter, gox]'
python
from .utils import validator @validator def ipv4(value): """ Return whether or not given value is a valid IP version 4 address. This validator is based on `WTForms IPAddress validator`_ .. _WTForms IPAddress validator: https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py Examples:: >>> ipv4('123.0.0.7') True >>> ipv4('900.80.70.11') ValidationFailure(func=ipv4, args={'value': '900.80.70.11'}) .. versionadded:: 0.2 :param value: IP address string to validate """ parts = value.split('.') if len(parts) == 4 and all(x.isdigit() for x in parts): numbers = list(int(x) for x in parts) return all(num >= 0 and num < 256 for num in numbers) return False @validator def ipv6(value): """ Return whether or not given value is a valid IP version 6 address. This validator is based on `WTForms IPAddress validator`_. .. _WTForms IPAddress validator: https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py Examples:: >>> ipv6('abcd:ef::42:1') True >>> ipv6('abc.0.0.1') ValidationFailure(func=ipv6, args={'value': 'abc.0.0.1'}) .. versionadded:: 0.2 :param value: IP address string to validate """ parts = value.split(':') if len(parts) > 8: return False num_blank = 0 for part in parts: if not part: num_blank += 1 else: try: value = int(part, 16) except ValueError: return False else: if value < 0 or value >= 65536: return False if num_blank < 2: return True elif num_blank == 2 and not parts[0] and not parts[1]: return True return False
python
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """TC77: Serially accessible digital temperature sensor particularly suited for low cost and small form-factor applications.""" __author__ = "ChISL" __copyright__ = "TBD" __credits__ = ["Microchip"] __license__ = "TBD" __version__ = "0.1" __maintainer__ = "https://chisl.io" __email__ = "[email protected]" __status__ = "Test" from TC77_constants import * # name: TC77 # description: Serially accessible digital temperature sensor particularly suited for low cost and small form-factor applications. # manuf: Microchip # version: 0.1 # url: http://ww1.microchip.com/downloads/en/DeviceDoc/20092B.pdf # date: 2016-08-17 # Derive from this class and implement read and write class TC77_Base: """Serially accessible digital temperature sensor particularly suited for low cost and small form-factor applications.""" # Register CONFIG # Select either Shutdown, Continuous Conversion or Test modes: def setCONFIG(self, val): """Set register CONFIG""" self.write(REG.CONFIG, val, 16) def getCONFIG(self): """Get register CONFIG""" return self.read(REG.CONFIG, 16) # Bits CONFIG # Register TEMP # holds the temperature conversion data. def setTEMP(self, val): """Set register TEMP""" self.write(REG.TEMP, val, 16) def getTEMP(self): """Get register TEMP""" return self.read(REG.TEMP, 16) # Bits TEMP # the 13 bit tws complement data from the temperature conversion # Bits FLAG_COMPLETE # Bit 2 is set to a logic1 after # completion of the first temperature conversion following a power-up or reset event. # Bit 2 is set to a logic 0 during the time needed to complete the first # temperature conversion. Therefore, the status of bit 2 can be monitored to indicate # that the TC77 has completed the first temperature conversion. # Bits unused_0 # Bits 0 and 1 are undefined and will be tri-state outputs during a read sequence. # Register M_ID # Manufacture's identification code def setM_ID(self, val): """Set register M_ID""" self.write(REG.M_ID, val, 16) def getM_ID(self): """Get register M_ID""" return self.read(REG.M_ID, 16) # Bits ID # Bits unused_0 # bits 7:2 are set to0 # Bits unused_1 # Bits 1:0 are undefined and will be tri- state outputs during a read sequence
python
file = open('Day 10 input.txt','r') #file = open('Advent-of-Code-2021\\Day 10 testin.txt','r') illegal = [0,0,0,0] completescores = [] for line in file: line = line.strip() illegalflag = False stack = [] for char in line: if ((ord(char) == 40) or (ord(char) == 91) or (ord(char) == 123) or (ord(char) == 60)): stack.append(char) continue opener = stack.pop() if ((ord(opener) == 40) and (ord(char) == 41)): continue if ((ord(opener) == 91) and (ord(char) == 93)): continue if ((ord(opener) == 123) and (ord(char) == 125)): continue if ((ord(opener) == 60) and (ord(char) == 62)): continue if (ord(char) == 41): illegal[0] = illegal[0] + 1 illegalflag = True break if (ord(char) == 93): illegal[1] = illegal[1] + 1 illegalflag = True break if (ord(char) == 125): illegal[2] = illegal[2] + 1 illegalflag = True break if (ord(char) == 62): illegal[3] = illegal[3] + 1 illegalflag = True break if (illegalflag == True): continue completescore = 0 while not (stack == []): item = stack.pop() completescore = completescore * 5 if (ord(item) == 40): completescore = completescore + 1 continue if (ord(item) == 91): completescore = completescore + 2 continue if (ord(item) == 123): completescore = completescore + 3 continue if (ord(item) == 60): completescore = completescore + 4 continue completescores.append(completescore) print(sorted(completescores)[len(completescores)//2])
python
import numpy as np import pyautogui def screenshot(bounds=None): image = pyautogui.screenshot() open_cv_image = np.array(image) open_cv_image = open_cv_image[:, :, ::-1] if bounds is not None: x = bounds[0] y = bounds[1] open_cv_image = open_cv_image[x[0]:x[1], y[0]:y[1]] return open_cv_image
python
from pathlib import PurePath from typing import Dict, List from lab import util from lab.logger import internal from .indicators import Indicator, Scalar from .writers import Writer class Store: indicators: Dict[str, Indicator] def __init__(self, logger: 'internal.LoggerInternal'): self.values = {} # self.queues = {} # self.histograms = {} # self.pairs: Dict[str, List[Tuple[int, int]]] = {} # self.scalars = {} self.__logger = logger self.indicators = {} self.__indicators_file = None def save_indicators(self, file: PurePath): self.__indicators_file = file indicators = {k: ind.to_dict() for k, ind in self.indicators.items()} with open(str(file), "w") as file: file.write(util.yaml_dump(indicators)) def add_indicator(self, indicator: Indicator): """ ### Add an indicator """ assert indicator.name not in self.indicators self.indicators[indicator.name] = indicator indicator.clear() if self.__indicators_file is not None: self.save_indicators(self.__indicators_file) def _store_list(self, items: List[Dict[str, float]]): for item in items: self.store(**item) def _store_kv(self, k, v): if k not in self.indicators: self.__logger.add_indicator(Scalar(k, True)) self.indicators[k].collect_value(v) def _store_kvs(self, **kwargs): for k, v in kwargs.items(): self._store_kv(k, v) def store(self, *args, **kwargs): """ ### Stores a value in the logger. This may be added to a queue, a list or stored as a TensorBoard histogram depending on the type of the indicator. """ assert len(args) <= 2 if len(args) == 0: self._store_kvs(**kwargs) elif len(args) == 1: assert not kwargs assert isinstance(args[0], list) self._store_list(args[0]) elif len(args) == 2: assert isinstance(args[0], str) if isinstance(args[1], list): for v in args[1]: self._store_kv(args[0], v) else: self._store_kv(args[0], args[1]) def clear(self): for k, v in self.indicators.items(): v.clear() def write(self, writer: Writer, global_step): return writer.write(global_step=global_step, indicators=self.indicators)
python
#Verilen listenin içindeki elemanları tersine döndüren bir fonksiyon yazın. # Eğer listenin içindeki elemanlar da liste içeriyorsa onların elemanlarını da tersine döndürün. # Örnek olarak: # input: [[1, 2], [3, 4], [5, 6, 7]] # output: [[[7, 6, 5], [4, 3], [2, 1]] liste = [[1, 2], [3, 4], [5, 6, 7]] liste.reverse() for l in liste: l.reverse() print(liste)
python
import discord from discord.ext import commands from typing import Union from CatLampPY import isGuild, hasPermissions, CommandErrorMsg # pylint: disable=import-error class Moderation(commands.Cog): def __init__(self, bot): self.bot = bot self.bot.cmds.append(self.purge) self.bot.cmds.append(self.kick) self.bot.cmds.append(self.ban) self.bot.cmds.append(self.unban) async def gf_user(self, user_id: int): user = self.bot.get_user(user_id) if not user: try: user = await self.bot.fetch_user(user_id) except discord.NotFound: raise CommandErrorMsg(f'No user with the ID {user_id} was found!') return user @commands.command(aliases=["bulkDelete"]) @isGuild() @hasPermissions("manage_messages") async def purge(self, ctx, number_of_messages: int): """Purges a certain amount of messages up to 100. Only works in servers.""" if number_of_messages <= 0: raise CommandErrorMsg("I need at least 1 message to purge!") elif number_of_messages > 100: raise CommandErrorMsg("I can't purge more than 100 messages at a time!") await ctx.message.delete() msgsDeleted = await ctx.channel.purge(limit=number_of_messages) msg = await ctx.send(f"Deleted {len(msgsDeleted)} messages.") try: await msg.delete(delay=5) except discord.NotFound: pass @commands.command(cooldown_after_parsing=True) @commands.cooldown(1, 10, commands.BucketType.member) @hasPermissions("kick_members") async def kick(self, ctx, member: discord.Member, reason: str = "No reason specified."): """Kick a user with an optional reason. Requires the Kick Members permission.""" if member.id == self.bot.user.id: await ctx.send(":(") return elif member.id == ctx.guild.owner.id: raise CommandErrorMsg("I can't kick the server owner!") try: await ctx.guild.kick(member, reason=f"Kicked by {str(ctx.author)} ({ctx.author.id}) with reason: '{reason}'") except discord.Forbidden: raise CommandErrorMsg("I'm not high enough in the role hierarchy to kick that person!") await ctx.send(f"{member.mention} ({str(member)}) has been kicked from the server with reason: '{reason}'") @commands.command(cooldown_after_parsing=True) @commands.cooldown(1, 10, commands.BucketType.member) @hasPermissions("ban_members") async def ban(self, ctx, user: Union[discord.User, int], reason: str = "No reason specified.", days_of_messages_to_delete: int = 0): """Ban a user (including someone not in the server) with an optional reason and days of messages to delete. Requires the Ban Members permission.""" if isinstance(user, int): user = await self.gf_user(user) try: await ctx.guild.fetch_ban(user) # Since an exception wasn't raised, a ban for this user already exists. await ctx.send("That user is already banned!") return except discord.NotFound: if user.id == self.bot.user.id: await ctx.send(":(") return try: await ctx.guild.ban(user, reason=f"Banned by {str(ctx.author)} " f"({ctx.author.id}) with reason: '{reason}'", delete_message_days=days_of_messages_to_delete) except discord.Forbidden: raise CommandErrorMsg("I'm not high enough in the role hierarchy to ban that person!") await ctx.send(f"{user.mention} ({str(user)}) has been banned from the server with reason: '{reason}'") @commands.command(cooldown_after_parsing=True) @commands.cooldown(1, 10, commands.BucketType.member) @hasPermissions("ban_members") async def unban(self, ctx, user: Union[discord.User, int]): """Unbans a user. Requires the Ban Members permission.""" if isinstance(user, int): user = await self.gf_user(user) try: # This is to check if the user is actually banned. # If the user is not banned, fetch_ban will raise NotFound. await ctx.guild.fetch_ban(user) await ctx.guild.unban( user, reason=f'Unbanned by {ctx.author} ({ctx.author.id})' ) await ctx.send(f'{user.mention} ({user}) has been unbanned from the server.') except discord.NotFound: raise CommandErrorMsg("That user is not banned!") def setup(bot): bot.add_cog(Moderation(bot))
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """Low-level feature detection including: Canny, corner Harris, Hough line, Hough circle, good feature to track, etc. """ from __future__ import annotations
python
from dataclasses import dataclass from typing import Optional from pyhcl.core._repr import CType from pyhcl.ir import low_ir @dataclass(eq=False, init=False) class INT(CType): v: int def __init__(self, v: int): self.v = int(v) @property def orR(self): return Bool(not not self.v) class UInit(type): def __call__(cls, v: int): return U.w(max(v.bit_length(), 1))(v) class U(CType, metaclass=UInit): def __init__(self, _: int): pass @staticmethod def _lowWidth(width: Optional[int] = None): return low_ir.IntWidth(width) if width is not None else None @staticmethod def w(width: Optional[int] = None): """ Return a UInt type with assigned width If width is not given, it would be inferred """ @classmethod def _flip(cls): cls.field = low_ir.Flip() return cls def _mapToIR(_, __=None): # If caller is UInt Type, it would call `mapToIR(ctx)` # Or caller is UInt Literal, it would call `mapToIR(literal, ctx)` if __ is not None: return low_ir.UIntLiteral(_.v, U._lowWidth(width)) else: return low_ir.UIntType(U._lowWidth(width)) def _idxType(_ = None): return U.w(1) uk = type(f"U?", (INT,), {"mapToIR": _mapToIR, "getIndexedType": _idxType}) uk.typ = uk if width is not None: t = type(f"U{width}", (INT,), {"width": width, "mapToIR": _mapToIR, "getIndexedType": _idxType, "field": low_ir.Default(), "flip": _flip}) t.typ = uk return t else: return uk Bool = U.w(1) class SInit(type): def __call__(cls, v: int): return S.w(v.bit_length() + 1)(v) class S(CType, metaclass=SInit): def __init__(self, _: int): pass @staticmethod def _lowWidth(width: Optional[int] = None): return low_ir.IntWidth(width) if width is not None else None @staticmethod def w(width: Optional[int] = None): """ Return a UInt type with assigned width If width is not given, it would be inferred """ def _mapToIR(_, __=None): # If caller is SInt Type, it would call `mapToIR(ctx)` # Or caller is SInt Literal, it would call `mapToIR(literal, ctx)` if __ is not None: return low_ir.SIntLiteral(_.v, S._lowWidth(width)) else: return low_ir.SIntType(S._lowWidth(width)) def _idxType(): return S.w(1) uk = type(f"S?", (INT,), {"mapToIR": _mapToIR, "getIndexedType": _idxType}) uk.typ = uk if width is not None: t = type(f"S{width}", (INT,), {"width": width, "mapToIR": _mapToIR, "getIndexedType": _idxType}) t.typ = uk return t else: return uk class Clock(CType): def mapToIR(self, ctx): return low_ir.ClockType()
python
""" Main Methods are declared here """ from picocv._settings import Settings from picocv.utils.train import Trainer from picocv.utils.augment import DatasetAugmenter def autoCorrect(model_func, dataset_func, settings : Settings): """ Performs Auto Correct Algorithm (Main Method) :param model_func: Function that returns Custom Model Class (torch.nn.Module) :param dataset_func: Function that returns Custom Dataset Class (torch.utils.data.Dataset) :param settings: Picocv Settings (picocv.Settings) :return: None """ # Validate Settings assert settings.validate(), 'Update to Valid Settings Variables!!' # Initialize Dataset Augmenter dataset_augmenter = DatasetAugmenter(dataset_func=dataset_func, settings=settings) input_string = input('\nContinue? (Y/n)') if input_string == 'Y': # Start Pico Algorithm for iteration in range(settings.n_iter): print('[{current_iteration}/{total_iteration}] Starting {current_iteration}-th Iteration...'.format(current_iteration=iteration + 1, total_iteration=settings.n_iter)) for segment_id in range(dataset_augmenter.N_SEGMENT): print('Start Training Checker-[{segment_id}]'.format(segment_id=segment_id)) segment_dataset = dataset_augmenter.get_dataset(iteration_id=iteration, segment_id=segment_id) # returned segmented dataset trainer = Trainer(model_func=model_func, dataset=segment_dataset, settings=settings) # initialize trainer print('finished')
python
from .run import wait, load __all__ = ['wait', 'load']
python
# Админка раздел редактор курсов # Энпоинты меню редактора курсов ДШ в тек. уг path_admin_schedules_grade_1 = '/schedules?grade=1&school=true&' path_admin_schedules_grade_2 = '/schedules?grade=2&school=true&' path_admin_schedules_grade_3 = '/schedules?grade=3&school=true&' path_admin_schedules_grade_4 = '/schedules?grade=4&school=true&' path_admin_schedules_grade_5 = '/schedules?grade=5&school=true&' path_admin_schedules_grade_6 = '/schedules?grade=6&school=true&' path_admin_schedules_grade_7 = '/schedules?grade=7&school=true&' path_admin_schedules_grade_8 = '/schedules?grade=8&school=true&' path_admin_schedules_grade_9 = '/schedules?grade=9&school=true&' path_admin_schedules_grade_10 = '/schedules?grade=10&school=true&' path_admin_schedules_grade_11 = '/schedules?grade=11&school=true&' # Прикрепление\удаление предмета path_admin_add_subject = '/schedules?' path_admin_delete_subject = '/schedules/5583026?' # Раздел редактирования предмета path_admin_item_editor = '/schedule_items.json?schedule_id=3908531&' # переход в редактор предмета path_admin_add_topic = '/topics?' # добавить тему path_admin_add_lesson = 'lessons.json?' # Создание нового урока path_admin_lesson_for_day = '/schedule_items.json?' # привязка урока к дате path_admin_remove_lesson = '/lessons/37865.json?' # удаление урока path_admin_remove_topic = '/topics/24273?addLessonHide=true&addLessonNameEvent=click&calendarActive=false&editTopicNameHide=true&lessonsHide=false&name=тест&schedule_id=3908531&subject_id=201&' path_admin_save_date_ege = '/schedules/3908531?' # сохранение даты ЕГЭ # редактор МДЗ path_admin_monthly_homework_editor = '/monthly_homeworks?schedule_id=3908531&' # открытие редактора МДЗ path_admin_create_monthly_homework = '/monthly_homeworks?' # создание МДЗ path_admin_delete_monthly_homework = '/monthly_homeworks/7229?' # удаление МДЗ # Энпоинты редактора курсов ЕГЭ path_admin_editor_ege = '/schedules?grade=11&school=false&' # переход в редактор егэ path_admin_attach_subject_ege = '/schedules?' # прикрепление предмета егэ path_admin_delete_subject_ege = '/schedules/5583707?' # удаление предмета егэ path_admin_add_topic = '/topics?' # добавить тему def __init__(self, token=None): self.token = token def get_token(self): headers_user = { "Authorization": self.access_token, } return headers_user
python
import os import re import sys sys.path.append(os.path.dirname(__file__)) import nb_file_util as fu class SymbolLister(fu.CellProcessorBase): def calls_sympy_symbol(self): """ if symbol definition line included, return the line numbers and the contents in a list :return: list of dict('line_number':int, 'source':str]) """ # TODO : What if import sympy # TODO : Consider using ast module result = [] if self.is_code(): if self.has_source(): for line_number, source_line in enumerate(self.cell['source'].splitlines()): if ('sy.symbols' in source_line) or ('sy.Symbol' in source_line): result.append({'line number': line_number, 'source': source_line}) return result def process_cell(self): return self.calls_sympy_symbol() class SymbolConverter(SymbolLister): """ sy.symbols('L_AB_m', real=True, nonnegative=True) -> sy.symbols('L_{AB}[m]', real=True, nonnegative=True) sy.symbols('w0_N_m', real=True) -> sy.symbols('w0[N/m]', real=True) "L_AB_m, L_AC_m = sy.symbols('L_AB_m, L_AC_m', real=True, nonnegative=True)" -> [find symbol location] -> 'L_AB_m, L_AC_m' -> 'L_AB_m' -> [wrap_symbol_name] -> 'L_{AB}_{m}' -> 'L_{AB}[m]' """ units_set = {'m', 'mm', 'mm3', 'm2', 'm3', 'm4', 'deg', 'rad', 'N', 'Nm', 'N_m', 'Pa', 'MPa', 'm_s2', 'kg'} def __init__(self): super().__init__() self.conversion_table_dict = self.unit_underline_wrap_bracket() self.secondary_table_dict = self.make_secondary_table() self.re_split = self.prepare_split_rule() @staticmethod def make_secondary_table(): return { '_{N}[m]': '[N/m]', '_{N}[mm]': '[N/mm]', '_{N}[m^{2}]': '[N/m^{2}]', '_{N}[mm^{2}]': '[N/mm^{2}]', } @staticmethod def prepare_split_rule(): return re.compile(r'[, ]') @staticmethod def wrap_symbol_name(symbol_name): """ Wrap '_' separated symbol name parts with '{}' :param str symbol_name: :return: Example >>> cp = SymbolConverter() >>> cp.wrap_symbol_name('L_AB_m') 'L_{AB}_{m}' """ symbol_name_split_under_line = symbol_name.split('_') if 1 < len(symbol_name_split_under_line): symbol_name_underline_wrapped = [symbol_name_split_under_line[0]] for part in symbol_name_split_under_line[1:]: symbol_name_underline_wrapped.append('{%s}' % part) symbol_name = '_'.join(symbol_name_underline_wrapped) return symbol_name def unit_underline_wrap_bracket(self): """ '_{m_s2}': '[m/s^{2}]' '_{N_m}': '[N/m]' :return: dictionary :rtype dict """ conversion_table_dict = {} for unit in self.units_set: key = '_{%s}' % unit value = '[%s]' % unit.replace('_', '/').replace('4', '^{4}').replace('3', '^{3}').replace('2', '^{2}') conversion_table_dict[key] = value return conversion_table_dict def process_cell(self): source_lines = self.cell['source'].splitlines() symbol_list = self.calls_sympy_symbol() # [{'line number': int, 'source': str}] for symbol_line in symbol_list: converted_line = self.process_line(symbol_line['source']) # replace the source code with the new line source_lines[symbol_line['line number']] = converted_line converted_source_code = '\n'.join(source_lines) if self.cell['source'] and '\n' == self.cell['source'][-1]: converted_source_code += '\n' # update cell self.cell['source'] = converted_source_code def process_line(self, source_line): """ SymbolConverter.process_line() Find SymPy """ symbol_names_location = self.find_symbol_name_location(source_line) symbol_names_str = source_line[symbol_names_location[0]:symbol_names_location[1]] symbol_names_list = filter(lambda x: bool(x), [symbol_name.strip() for symbol_name in self.re_split.split(symbol_names_str)]) converted_symbol_names_list = [self.process_symbol_name(symbol_name) for symbol_name in symbol_names_list] converted_symbol_names_str = ', '.join(converted_symbol_names_list) converted_source_line = (source_line[:symbol_names_location[0]] + converted_symbol_names_str + source_line[symbol_names_location[1]:]) return converted_source_line def process_symbol_name(self, symbol_name): result = {symbol_name:symbol_name} wrapped = self.wrap_symbol_name(symbol_name) # first conversion layer : for majority of cases result.update(self.apply_lookup_table(wrapped, symbol_name)) # second conversion layer : for N/m, N/m^{2} cases result.update(self.apply_lookup_table(result[symbol_name], symbol_name, self.secondary_table_dict)) return result[symbol_name] def find_symbol_name_location(self, source_line): """ :param str source_line: :return: (int, int) >>> cp = SymbolConverter() >>> source_line = "L_AB_m = sy.symbols('L_AB_m', real=True, nonnegative=True)" >>> result = cp.find_symbol_name_location(source_line) >>> source_line[result[0]:result[1]] 'L_AB_m' >>> source_line = "L_AB_m = sy.Symbol('L_AB_m', real=True, nonnegative=True)" >>> result = cp.find_symbol_name_location(source_line) >>> source_line[result[0]:result[1]] 'L_AB_m' "'" """ first_attempt = re.search(r'.*\.(Symbol|symbols)\s*\([\'\"]', source_line) quote = source_line[first_attempt.regs[0][1] - 1] quote_pattern = chr(92) + quote # backslash + ['"] second_attempt = re.search(r'.*\.(Symbol|symbols)\s*\(' + quote_pattern + r'(.+?)' + quote_pattern, source_line) if first_attempt: start = first_attempt.regs[0][1] end = second_attempt.regs[0][1] - 1 result = (start, end) else: result = None return result def apply_lookup_table(self, text_to_apply, original_symbol_name, lookup_table_dict=None): if lookup_table_dict is None: lookup_table_dict = self.conversion_table_dict new_small_dict = {} # lookup table loop for to_be_replaced in lookup_table_dict: if text_to_apply.endswith(to_be_replaced): new_small_dict[original_symbol_name] = text_to_apply.replace(to_be_replaced, lookup_table_dict[to_be_replaced]) # if lookup table original_symbol_name found, break lookup table loop break return new_small_dict class IpynbUnitConverter(fu.FileProcessor): def __init__(self, nb_filename): super().__init__(nb_filename=nb_filename, cell_processor=SymbolConverter()) def symbol_lines_in_file(input_file_name): sc = SymbolLister() file_processor = fu.FileProcessor(input_file_name, sc) result = file_processor.process_nb_file() return result
python
class ParserListener: def update(self, phase, row): """ Called when the parser has parsed a new record. """ pass def handle(self, event, message, groups): """ Called when the parser has parsed a registered event. """ pass def registerKey(self, phase, key): """ Called when a new key was found in the log data. """ pass def parsingFinished(self): """ Called when the parser has processed all available streams. """ pass
python
import math import matplotlib.pyplot as plt from .Generaldistribution import Distribution class Binomial(Distribution): """ Binomial distribution class for calculating and visualizing a Binomial distribution. Attributes: mean (float) representing the mean value of the distribution stdev (float) representing the standard deviation of the distribution data_list (list of floats) a list of floats to be extracted from the data file p (float) representing the probability of an event occurring n (int) number of trials """ def __init__(self, file_name='name'): Distribution.__init__(self, file_name) self.n = len(self.data) self.p = 1.0 * sum(self.data) / len(self.data) def calculate_mean(self): """Function to calculate the mean from p and n Args: None Returns: float: mean of the data set """ self.mean = self.p * self.n return self.mean def calculate_stdev(self): """Function to calculate the standard deviation from p and n. Args: None Returns: float: standard deviation of the data set """ self.stdev = math.sqrt(self.n * self.p * (1 - self.p)) return self.stdev def extract_stats_from_data(self): """Function to calculate p, n from the data set Args: None Returns: None """ self.n = len(self.data) self.p = 1.0 * sum(self.data) / len(self.data) def plot_bar(self): """Function to output a bar chart of the number of successes and failures using matplotlib pyplot library. Args: None Returns: None """ self.extract_stats_from_data() plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n]) plt.title('Number of successes (1) and failures (0) ') plt.xlabel('outcome') plt.ylabel('count') plt.show() def pmf(self, k): """Probability mass function calculator for the binomial distribution. Args: k (natural number): number of successes Returns: float: probability mass function output """ if ((isinstance(k,int) == False) or (k < 0)): print ("k (the argumnet of pmf) needs to be a non-negative integer") exit() self.extract_stats_from_data() a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k))) b = (self.p ** k) * (1 - self.p) ** (self.n - k) return a * b def plot_bar_pmf(self): """Function to plot the pmf of the binomial distribution Args: None Returns: list: x values for the pdf plot list: y values for the pdf plot """ x = [] y = [] self.extract_stats_from_data() # calculate the x values to visualize for i in range(self.n + 1): x.append(i) y.append(self.pmf(i)) # make the plots plt.bar(x, y) plt.title('Distribution of Outcomes') plt.ylabel('Probability Mass Function') plt.xlabel('Number of successes (k)') plt.show() return def __repr__(self): """Function to output the parameters of the Binomial instance Args: None Returns: string: characteristics of the Binomial """ self.extract_stats_from_data() return "Number of trials {}, success propability for each trial {} ".\ format(self.n, round(self.p, 2))
python
def f(x): y = x return f(y) f(0)
python
import os import sys root_path = os.path.abspath("../../../") if root_path not in sys.path: sys.path.append(root_path) import numpy as np import tensorflow as tf from _Dist.NeuralNetworks.DistBase import Base, AutoBase, AutoMeta, DistMixin, DistMeta class LinearSVM(Base): def __init__(self, *args, **kwargs): super(LinearSVM, self).__init__(*args, **kwargs) self._name_appendix = "LinearSVM" self.c = None def init_from_data(self, x, y, x_test, y_test, sample_weights, names): super(LinearSVM, self).init_from_data(x, y, x_test, y_test, sample_weights, names) metric = self.model_param_settings.setdefault("metric", "binary_acc") if metric == "acc": self.model_param_settings["metric"] = "binary_acc" self.n_class = 1 def init_model_param_settings(self): self.model_param_settings.setdefault("lr", 0.01) self.model_param_settings.setdefault("n_epoch", 10 ** 3) self.model_param_settings.setdefault("max_epoch", 10 ** 6) super(LinearSVM, self).init_model_param_settings() self.c = self.model_param_settings.get("C", 1.) def _build_model(self, net=None): self._model_built = True if net is None: net = self._tfx current_dimension = net.shape[1].value self._output = self._fully_connected_linear( net, [current_dimension, 1], "_final_projection" ) def _define_loss_and_train_step(self): self._loss = self.c * tf.reduce_sum( tf.maximum(0., 1 - self._tfy * self._output) ) + tf.nn.l2_loss(self._ws[0]) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): self._train_step = self._optimizer.minimize(self._loss) def _get_feed_dict(self, x, y=None, weights=None, is_training=False): if y is not None: y[y == 0] = -1 return super(LinearSVM, self)._get_feed_dict(x, y, weights, is_training) def predict_classes(self, x): return (self._calculate(x, tensor=self._output, is_training=False) >= 0).astype(np.int32) class SVM(LinearSVM): def __init__(self, *args, **kwargs): super(SVM, self).__init__(*args, **kwargs) self._name_appendix = "SVM" self._p = self._gamma = None self._x = self._gram = self._kernel_name = None @property def kernel(self): if self._kernel_name == "linear": return self.linear if self._kernel_name == "poly": return lambda x, y: self.poly(x, y, self._p) if self._kernel_name == "rbf": return lambda x, y: self.rbf(x, y, self._gamma) raise NotImplementedError("Kernel '{}' is not implemented".format(self._kernel_name)) @staticmethod def linear(x, y): return x.dot(y.T) @staticmethod def poly(x, y, p): return (x.dot(y.T) + 1) ** p @staticmethod def rbf(x, y, gamma): return np.exp(-gamma * np.sum((x[..., None, :] - y) ** 2, axis=2)) def init_from_data(self, x, y, x_test, y_test, sample_weights, names): self._x, y = np.atleast_2d(x).astype(np.float32), np.asarray(y, np.float32) self._p = self.model_param_settings.setdefault("p", 3) self._gamma = self.model_param_settings.setdefault("gamma", 1 / self._x.shape[1]) self._kernel_name = self.model_param_settings.setdefault("kernel_name", "rbf") self._gram, x_test = self.kernel(self._x, self._x), self.kernel(x_test, self._x) super(SVM, self).init_from_data(self._gram, y, x_test, y_test, sample_weights, names) def init_model_param_settings(self): super(SVM, self).init_model_param_settings() self._p = self.model_param_settings["p"] self._gamma = self.model_param_settings["gamma"] self._kernel_name = self.model_param_settings["kernel_name"] def _define_py_collections(self): super(SVM, self)._define_py_collections() self.py_collections += ["_x", "_gram"] def _define_loss_and_train_step(self): self._loss = self.c * tf.reduce_sum(tf.maximum(0., 1 - self._tfy * self._output)) + 0.5 * tf.matmul( self._ws[0], tf.matmul(self._gram, self._ws[0]), transpose_a=True )[0] with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): self._train_step = self._optimizer.minimize(self._loss) def _evaluate(self, x=None, y=None, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None): n_sample = self._x.shape[0] cv_feat_dim = None if x_cv is None else x_cv.shape[1] test_feat_dim = None if x_test is None else x_test.shape[1] x_cv = None if x_cv is None else self.kernel(x_cv, self._x) if cv_feat_dim != n_sample else x_cv x_test = None if x_test is None else self.kernel(x_test, self._x) if test_feat_dim != n_sample else x_test return super(SVM, self)._evaluate(x, y, x_cv, y_cv, x_test, y_test) def predict(self, x): # noinspection PyTypeChecker return self._predict(self.kernel(x, self._x)) def predict_classes(self, x): return (self.predict(x) >= 0).astype(np.int32) def evaluate(self, x, y, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None): return self._evaluate(self.kernel(x, self._x), y, x_cv, y_cv, x_test, y_test, metric) class AutoLinearSVM(AutoBase, LinearSVM, metaclass=AutoMeta): pass class DistLinearSVM(AutoLinearSVM, DistMixin, metaclass=DistMeta): pass
python
#!/usr/bin/python # # Copyright 2019 Fortinet Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import time import box import os import tempfile import pickle import uuid from termcolor import colored # Custom imports from bph.core.logger import BphLogger as Logger from bph.core.constants import * from bph.core.session import BphSession as Session from bph.core.sample import BphLabFile as LabFile from bph.core.constants import * class BphTemplate: def __init__(self): self.logger = Logger(level='INFO', module=self.__module__) class BphToolTemplate(BphTemplate): def __init__(self): super().__init__() class BphToolTemplateConfiguration(BphToolTemplate): def __init__(self): super().__init__() def __locate_tool_config_file(self, *args): """ Search for the Tool config file """ self.tool_name = args[0] self.arch = args[1] self.version = args[2] self.tool_directory = None self.md5 = Session.sample_md5 self.logger.log('TemplateConfig #1: {}'.format(self.__dict__), level='DEBUG') # Detect the tool's base folder. for root, dirs, files in os.walk(BPH_PLUGIN_DIR): for directory in dirs: if self.tool_name in directory: self.logger.log('Tool Match: {}'.format(self.tool_name), level='DEBUG') tool_dir = os.path.join(root, directory, self.arch) self.logger.log(tool_dir, level='DEBUG') if os.path.isdir(tool_dir): self.logger.log(f"Tool dir: {tool_dir}", level='DEBUG') self.tool_directory = tool_dir # Generating remote tool's path # Peid: E:\basic\static\peid\x86\0.95\peid.exe self.remote_tool_path = "{}\\{}".format( "\\".join(tool_dir.split('/')[5:]), self.version) self.logger.log(f"Remote Tool Path: {self.remote_tool_path}", level='DEBUG') def load_tool_config_file(self, tool_name, arch, version, target_file=None): """ Loads the tool config file: (JSON data -> BOX object) conversion""" try: # print(f"Loading Template ({tool_name}) Arch: {arch} Version: ({version})") self.__locate_tool_config_file(tool_name, arch, version) cfg_file = f"{self.tool_directory}/{self.version}/{self.tool_name}.json" self.logger.log('Config file path: {}'.format(cfg_file)) j = open(cfg_file, 'r') except FileNotFoundError as e: self.logger.log('Cannot open config JSON file: {}'.format(e), level='DEBUG') else: self.logger.log('Loading JSON config file', level='DEBUG') try: json_data = json.load(j) # This will set the dictionary required to hold # custom user variables used in json template/config files. json_data['configuration']['execution']['download_sample'] = False json_data['configuration']['execution']['custom_user_vars'] = {} json_data['configuration']['execution']['delay'] = 0 json_data['actions']['action'] = "" except json.JSONDecodeError: self.logger.log('Error during JSON decoding', level='DEBUG') return False else: j.close() self.logger.log('The JSON config file was loaded correctly', level='DEBUG') # The Config JSON data is loaded and then converted # into an extended python dict by using the python-box # module. Through this way, attributes can be accessed # with dot notation: # # self.automation.normal_scan.execute = True # self.__dict__.update(box.Box(json_data)) #print("JSON_AND_DICT_DATA: {}".format(self.__dict__)) if target_file is None: self.logger.log('>> Target file is not set', level='DEBUG') self.configuration.execution.download_sample = False elif target_file is not None: self.logger.log('>> Target file is set', level='DEBUG') self.configuration.execution.download_sample = True self.download_url = target_file.download_url else: self.logger.log('>> Unknown target', level='DEBUG') class BphToolTemplateExecutor(BphToolTemplateConfiguration): server_status = None template_delivered = False template_file = None def __init__(self): super().__init__() # Variables added into the general (not-boxed) JSON Template self.module_name = self.__module__ self.sid = Session.get_session_id() self.md5 = Session.sample_md5 self.project_name = Session.project_name self.rid = str(uuid.uuid4()) self.tool_drive = BPH_REMOTE_TOOLS_DRIVE def __dump_command_file(self, tmp_file): """ Dump Template's JSON data into Temporary file """ try: tmp = open(tmp_file, 'wb') self.logger.log(f"Dumping Template Data into a Tmp file: {tmp.name}", level='DEBUG') # At this time self.__dict__ was already boxed. # Making a copy of current objetc's dictionaty and removing logger # from it. This way the 'logger object' is not included within the # template data and regular 'logger; module remains. template_data = {} for k,v in self.__dict__.items(): if k != "logger": self.logger.log('Key: {} Value: {}'.format(k, v), level='DEBUG') if k not in template_data: template_data.update({k: v}) if BPH_TEMPLATE_SERVER_OUTPUT: self.logger.log(template_data) pickle.dump(template_data, tmp, protocol=2) del template_data tmp.close() self.logger.log(self.__dict__, level='DEBUG') except IOError: self.logger.log("Tmp file can't be written", level='DEBUG') return False else: self.logger.log('Tmp file - OK', level='DEBUG') return True def __make_cmds_tmp_file(self): """ Created Temporary File """ try: self.logger.log('Creating Temporary File', level='DEBUG') with tempfile.NamedTemporaryFile(mode='w+b', dir=BPH_TMP_DIR, delete=False, prefix='blackphenix_') as f: tmp_file = f.name except: self.logger.log('Error when creating tmp file', level='DEBUG') else: self.logger.log('Tmp file created:{}'.format(tmp_file), level='DEBUG') return tmp_file def _scan_bph_tmp_file(self, clean=False): """ Scans Windows Temporary Folder for bph_ files """ self.logger.log('Scanning...', level='DEBUG') for root, dirs, files in os.walk(BPH_TMP_DIR): for file in files: # All files matching "blackphenix_" prefix if "blackphenix_" in file: bph_tmp_file = "{}{}".format(root, file) if os.path.getsize(bph_tmp_file) != 0: self.logger.log('Tmp file: {}'.format(bph_tmp_file), level='DEBUG') #os.system("ls -lskh {}".format(bph_tmp_file)) else: self.logger.log('Removing Empty file...') os.remove(bph_tmp_file) if clean is not False: try: self.logger.log('Cleaning: {}'.format(bph_tmp_file), level='DEBUG') os.remove(bph_tmp_file) except OSError: self.logger.log("Tmp file can't be deleted", level='DEBUG') return False else: self.logger.log('File was removed - cleaned.', level='DEBUG') self.logger.log('Found BphFile: {}'.format(bph_tmp_file), level='DEBUG') return bph_tmp_file def execute(self, delay=0): self.logger.log("Executing Template") # If a user choose a delay for execute(), then this # value is passed as parameter within the template # request. This will allow the windows agent to pause # the same amount of seconds chosen by the execute() # function. # <Box: {'admin_required': False, # 'delay': 20}> # self.configuration.execution.delay = delay # The 1 sec timeout allows enough time between exec() requests # to generate a template file and make it ready for the agent. time.sleep(2) if not BphToolTemplateExecutor.server_status: self.logger.log('Waiting for Agent Connection....') while True: if BphToolTemplateExecutor.server_status: self.logger.log('Agent is Connected. Delivering Template now...') # Creates a Temp file to dump the current Boxed content # self.__dict__ was created by using box.Box() tmp = self.__make_cmds_tmp_file() # Dumps the self.__dict__ data into the Temporary file # This file will be used by the Agent Server to send # the file's content to the VM network Agent self.__dump_command_file(tmp) self.logger.log(self.__dict__, level='DEBUG') break self.logger.log('Template Delivered: {}'.format(BphToolTemplateExecutor.template_delivered), level='DEBUG') while BphToolTemplateExecutor.template_delivered != True: self.logger.log('Waiting to deliver template...') time.sleep(5) self.logger.log('Template has been delivered.') BphToolTemplateExecutor.template_delivered = False self.logger.log('Next instruction will be sent in ({}) seconds'.format(delay)) time.sleep(delay) def output(self, show=False): def output_conversor(tool_output_log): self.logger.log('output conversor', level='DEBUG') tool_output = [] with open(tool_output_log) as tool_log: for line in tool_log: if line not in tool_output: if show: self.logger.log('Adding: {}'.format(line), level='DEBUG') tool_output.append(line.strip()) return tool_output tool_output_log = tool_files_folder = os.path.join(Session.sid_folder, self.tool_name, self.rid, "{}.log".format(self.tool_name)) if show: self.logger.log(tool_output_log, level='DEBUG') while True: try: # Don't give any response until the file has arrived if os.path.isfile(tool_output_log): self.logger.log('Log file was found', level='DEBUG') result_data = output_conversor(tool_output_log) for line in result_data: self.logger.log('Content: {}'.format(colored(line, 'green'))) return result_data except FileNotFoundError: self.logger.log('File has not arrived yet. Retrying in 5 seconds') time.sleep(5) self.logger.log('Retrying now...') self.output(show=show) def files(self): time.sleep(5) tool_files_folder = os.path.join(Session.sid_folder, self.tool_name, self.rid) self.logger.log('Searching for files now in: {}'.format(tool_files_folder)) files_found = [] while True: if os.path.isdir(tool_files_folder): self.logger.log('Directory OK', level='DEBUG') for root, dirs, files in os.walk(tool_files_folder): for file in files: if file not in files_found: file = os.path.join(root, file) files_found.append(file) for file in files_found: self.logger.log(colored('File: {}'.format(os.path.basename(file)), 'green')) return files_found
python
import sys print("Congratulations on installing Python!", '\n') print("This system is running {}".format(sys.version), '\n') if "conda" in sys.version: print("Hello from Anaconda!") else: print("Hello from system-installed Python!")
python
from collections import defaultdict import re from collections import Counter print("Reactor Reboot") with open("day22/day22_1_input.txt", "r") as f: commands = [entry for entry in f.read().strip().split("\n")] # print(commands) cubeDict = defaultdict(bool) for command in commands: action, cubePositions = command.split(" ") positionRange = [[int(startEnd) for startEnd in position.split("=")[1].split("..")] for position in cubePositions.split(",")] isOutOfPosition = False for position in positionRange: for value in position: if value < -50 or value > 50: isOutOfPosition = True break if isOutOfPosition: break if isOutOfPosition: continue for x in range(positionRange[0][0], positionRange[0][1] + 1, 1): for y in range(positionRange[1][0], positionRange[1][1] + 1, 1): for z in range(positionRange[2][0], positionRange[2][1] + 1, 1): # print(x, y, z) cubeDict[(x, y, z)] = True if action == "on" else False nbOn = 0 for cube, value in cubeDict.items(): if value: nbOn +=1 print("rs part1: ", nbOn) ## part2 with open('day22/day22_1_input.txt', 'r') as file: raw_data = file.read() def parse_input(raw_data): res = [] for line in raw_data.split('\n'): state = int(line.split()[0] == 'on') x0, x1, y0, y1, z0, z1 = map(int, re.findall('-?\d+', line)) res.append((state, x0, x1, y0, y1, z0, z1)) return res DATA = parse_input(raw_data) # print(DATA) def intersect(cube_a, cube_b): x0, x1, y0, y1, z0, z1 = cube_a i0, i1, j0, j1, k0, k1 = cube_b x_s, y_s, z_s = ( max(a, b) for a, b in zip((x0, y0, z0), (i0, j0, k0)) ) x_e, y_e, z_e = ( min(a, b) for a, b in zip((x1, y1, z1), (i1, j1, k1)) ) # print(x_s, y_s, z_s, x_e, y_e, z_e) if x_s <= x_e and y_s <= y_e and z_s <= z_e: return x_s, x_e, y_s, y_e, z_s, z_e return False def toggle_cubes(step, cubes): #print("step: ", step, "cubes: ", cubes) state, cur = step[0], step[1:] new = Counter() for cube in cubes: intsct = intersect(cur, cube) if intsct: print("intersect: ",intsct, "cube: ", cube, "cur: ", cur, cubes[cube]) new[intsct] -= cubes[cube] ## if it is on substract 1 for intersection (prevents double checking) # print("new: ", new) if state: cubes[cur] = 1 # print(new) cubes.update(new) print(cubes) print("--------------------------") return cubes def calc_toggled(cubes): res = 0 print("Calculation: ", cubes.items()) for k, v in cubes.items(): x0, x1, y0, y1, z0, z1 = k print(k) size = (x1 + 1 - x0) * (y1 + 1 - y0) * (z1 + 1 - z0) res += size * v print(res, v) return res """def part_one(steps): cubes = Counter() for step in steps: state, cur = step[0], step[1:] # print(cur) cur = intersect(cur, (-50, 50, -50, 50, -50, 50)) if not cur: continue cubes = toggle_cubes((state, *cur), cubes) return calc_toggled(cubes)""" def part_two(steps): cubes = Counter() for step in steps: cubes = toggle_cubes(step, cubes) return calc_toggled(cubes) print("part2.", part_two(DATA))
python
from .fp16_optimizer import FP16_Optimizer from .fused_adam import FusedAdam
python
""" An audio URL. """ def audio_url(): return 'https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav'
python
############################################################### # Autogenerated module. Please don't modify. # # Edit according file in protocol_generator/templates instead # ############################################################### from typing import Dict from ...structs.api.offset_fetch_request import OffsetFetchRequestData, Partition, Topic from ._main_serializers import ArraySerializer, ClassSerializer, Schema, int32Serializer, stringSerializer partitionSchemas: Dict[int, Schema] = { 0: [("partition", int32Serializer)], 1: [("partition", int32Serializer)], 2: [("partition", int32Serializer)], 3: [("partition", int32Serializer)], 4: [("partition", int32Serializer)], 5: [("partition", int32Serializer)], } partitionSerializers: Dict[int, ClassSerializer[Partition]] = { version: ClassSerializer(Partition, schema) for version, schema in partitionSchemas.items() } partitionSerializers[-1] = partitionSerializers[5] topicSchemas: Dict[int, Schema] = { 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[0]))], 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[1]))], 2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[2]))], 3: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[3]))], 4: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[4]))], 5: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionSerializers[5]))], } topicSerializers: Dict[int, ClassSerializer[Topic]] = { version: ClassSerializer(Topic, schema) for version, schema in topicSchemas.items() } topicSerializers[-1] = topicSerializers[5] offsetFetchRequestDataSchemas: Dict[int, Schema] = { 0: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[0]))], 1: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[1]))], 2: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[2]))], 3: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[3]))], 4: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[4]))], 5: [("group_id", stringSerializer), ("topics", ArraySerializer(topicSerializers[5]))], } offsetFetchRequestDataSerializers: Dict[int, ClassSerializer[OffsetFetchRequestData]] = { version: ClassSerializer(OffsetFetchRequestData, schema) for version, schema in offsetFetchRequestDataSchemas.items() } offsetFetchRequestDataSerializers[-1] = offsetFetchRequestDataSerializers[5]
python
import gym import pybullet as p import pybullet_data import os import numpy as np from gym import spaces # Initial joint angles RESET_VALUES = [ 0.015339807878856412, -1.2931458041875956, 1.0109710760673565, -1.3537670644267164, -0.07158577010132992, .027] # End-effector boundaries BOUNDS_XMIN = -100 BOUNDS_XMAX = 100 BOUNDS_YMIN = -100 BOUNDS_YMAX = 100 BOUNDS_ZMIN = -100 BOUNDS_ZMAX = 100 # Joint boundaries JOINT_MIN = np.array([ -3.1, -1.571, -1.571, -1.745, -2.617, 0.003 ]) JOINT_MAX = np.array([ 3.1, 1.571, 1.571, 1.745, 2.617, 0.03 ]) class WidowxEnv(gym.Env): def __init__(self): """ Initialise the environment """ self.goal_oriented = True # Define action space self.action_space = spaces.Box( low=np.float32(np.array([-0.5, -0.25, -0.25, -0.25, -0.5, -0.005]) / 30), high=np.float32(np.array([0.5, 0.25, 0.25, 0.25, 0.5, 0.005]) / 30), dtype=np.float32) # Define observation space self.obs_space_low = np.float32( np.array([-.16, -.15, 0.14, -3.1, -1.6, -1.6, -1.8, -3.1, 0])) self.obs_space_high = np.float32( np.array([.16, .15, .41, 3.1, 1.6, 1.6, 1.8, 3.1, 0.05])) self.observation_space = spaces.Box( low=np.float32(self.obs_space_low), high=np.float32(self.obs_space_high), dtype=np.float32) if self.goal_oriented: self.observation_space = spaces.Dict(dict( desired_goal=spaces.Box(low=np.float32(np.array([-.16, -.15, 0.25])), high=np.float32(np.array([.16, .15, 0.41])), dtype=np.float32), achieved_goal=spaces.Box(low=np.float32(self.obs_space_low[:3]), high=np.float32(self.obs_space_high[:3]), dtype=np.float32), observation=self.observation_space )) self.current_pos = None # Initialise the goal position self.goal = np.array([.14, .0, 0.26]) # Fixed goal # self.set_goal(self.sample_goal_for_rollout()) # Random goal # Connect to physics client. By default, do not render self.physics_client = p.connect(p.DIRECT) # Load URDFs self.create_world() def create_world(self): # Initialise camera angle p.resetDebugVisualizerCamera( cameraDistance=0.6, cameraYaw=0, cameraPitch=-30, cameraTargetPosition=[0.2, 0, 0.1], physicsClientId=self.physics_client) # Load robot, sphere and plane urdf p.setAdditionalSearchPath(pybullet_data.getDataPath()) path = os.path.abspath(os.path.dirname(__file__)) self.arm = p.loadURDF( os.path.join( path, "URDFs/widowx/widowx.urdf"), useFixedBase=True) self.sphere = p.loadURDF( os.path.join( path, "URDFs/sphere.urdf"), useFixedBase=True) self.plane = p.loadURDF('plane.urdf') # reset environment self.reset() def sample_goal_for_rollout(self): """ Sample random goal coordinates """ return np.random.uniform(low=np.array( [-.14, -.13, 0.26]), high=np.array([.14, .13, .39])) def set_goal(self, goal): self.goal = goal def step(self, action): """ Execute the action. Parameters ---------- action : array holding the angles changes from the previous time step [δ1, δ2, δ3, δ4, δ5, δ6] Returns ------- obs, reward, episode_over, info : tuple obs (object) : Either [xe, ye, ze, θ1, θ2, θ3, θ4, θ5, θ6] for a Gym env or an observation dict for a goal env reward (float) : Negative, squared, l2 distance between current position and goal position episode_over (bool) : Whether or not we have reached the goal info (dict) : Additional information """ self.action = np.array(action, dtype=np.float32) # Retrive current joint position and velocities # (note that velocities are always 0 due to the force joint reset) self.joint_positions, self.joint_velocities = self._get_current_joint_positions() # Update the new joint position with the action self.new_joint_positions = self.joint_positions + self.action # Clip the joint position to fit the joint's allowed boundaries self.new_joint_positions = np.clip( np.array(self.new_joint_positions), JOINT_MIN, JOINT_MAX) # Instantaneously reset the joint position (no torque applied) self._force_joint_positions(self.new_joint_positions) # Retrieve the end effector position. # If it's outside the boundaries defined, don't update the joint # position end_effector_pos = self._get_current_end_effector_position() x, y, z = end_effector_pos[0], end_effector_pos[1], end_effector_pos[2] conditions = [ x <= BOUNDS_XMAX, x >= BOUNDS_XMIN, y <= BOUNDS_YMAX, y >= BOUNDS_YMIN, z <= BOUNDS_ZMAX, z >= BOUNDS_ZMIN ] violated_boundary = False for condition in conditions: if not condition: violated_boundary = True break if violated_boundary: self._force_joint_positions(self.joint_positions) # Backup old position and get current joint position and current end # effector position self.old_pos = self.current_pos self.current_pos = self._get_current_state() return self._generate_step_tuple() def _generate_step_tuple(self): """ return (obs, reward, episode_over, info) tuple """ # Reward reward = self._get_reward(self.goal) # Info self.old_distance = np.linalg.norm(self.old_pos[:3] - self.goal) self.new_distance = np.linalg.norm(self.current_pos[:3] - self.goal) info = {} info['new_distance'] = self.new_distance info['old_distance'] = self.old_distance info['goal_position'] = self.goal info['tip_position'] = self.current_pos[:3] info['old_joint_pos'] = self.joint_positions info['new_joint_pos'] = self.new_joint_positions info['joint_vel'] = self.joint_velocities # Never end episode prematurily episode_over = False # if self.new_distance < 0.0005: # episode_over = True if self.goal_oriented: obs = self._get_obs() return obs, reward, episode_over, info return self.current_pos, reward, episode_over, info def reset(self): """ Reset robot and goal at the beginning of an episode Return observation """ # Reset robot at the origin and move sphere to the goal position p.resetBasePositionAndOrientation( self.arm, [0, 0, 0], p.getQuaternionFromEuler([np.pi, np.pi, np.pi])) p.resetBasePositionAndOrientation( self.sphere, self.goal, p.getQuaternionFromEuler([np.pi, np.pi, np.pi])) # Reset joint at initial angles and get current state self._force_joint_positions(RESET_VALUES) self.current_pos = self._get_current_state() if self.goal_oriented: return self._get_obs() return self.current_pos def _get_obs(self): """ return goal_oriented observation """ obs = {} obs['observation'] = self.current_pos obs['desired_goal'] = self.goal obs['achieved_goal'] = self.current_pos[:3] return obs def _get_reward(self, goal): """ Calculate the reward as - distance **2 """ return - (np.linalg.norm(self.current_pos[:3] - goal) ** 2) def render(self, mode='human'): """ Render Pybullet simulation """ p.disconnect(self.physics_client) self.physics_client = p.connect(p.GUI) self.create_world() def compute_reward(self, achieved_goal, goal, info): """ Function necessary for goal Env""" return - (np.linalg.norm(achieved_goal - goal)**2) def _get_current_joint_positions(self): """ Return current joint position and velocities """ joint_positions = [] joint_velocities = [] for i in range(6): joint_positions.append(p.getJointState(self.arm, i)[0]) joint_velocities.append(p.getJointState(self.arm, i)[1]) return np.array( joint_positions, dtype=np.float32), np.array( joint_velocities, dtype=np.float32) def _get_current_end_effector_position(self): """ Get end effector coordinates """ return np.array( list( p.getLinkState( self.arm, 5, computeForwardKinematics=1)[4])) def _set_joint_positions(self, joint_positions): """ Position control (not reset) """ # In Pybullet, gripper halves are controlled separately joint_positions = list(joint_positions) + [joint_positions[-1]] p.setJointMotorControlArray( self.arm, [0, 1, 2, 3, 4, 7, 8], controlMode=p.POSITION_CONTROL, targetPositions=joint_positions ) def _force_joint_positions(self, joint_positions): """ Instantaneous reset of the joint angles (not position control) """ for i in range(5): p.resetJointState( self.arm, i, joint_positions[i] ) # In Pybullet, gripper halves are controlled separately for i in range(7, 9): p.resetJointState( self.arm, i, joint_positions[-1] ) def _get_current_state(self): """ Return observation: end effector position + current joint position """ return np.concatenate( [self._get_current_end_effector_position(), self._get_current_joint_positions()[0]], axis=0)
python
from discord import Embed async def compose_embed(bot, msg, message): names = { "user_name": msg.author.display_name, "user_icon": msg.author.avatar_url, "channel_name": msg.channel.name, "guild_name": msg.guild.name, "guild_icon": msg.guild.icon_url } if msg.guild != message.guild: names = await update_names(bot, msg, names) embed_type = await get_embed_type(bot, message) embed_color = await get_embed_color(bot, message) if embed_type == 1: embed = await Compose.type_1(msg, message, names, embed_color) else: embed = await Compose.type_1(msg, message, names, embed_color) return embed, embed_type async def update_names(bot, msg, names): guild_anonymity = await bot.check.anonymity(bot.guilds_data, msg.guild.id) user_anonymity = await bot.check.anonymity(bot.users_data, msg.author.id) if user_anonymity is None: if guild_anonymity: names["user_name"] = '匿名ユーザー' names["user_icon"] = 'https://discord.com/assets/7c8f476123d28d103efe381543274c25.png' else: names["user_name"] = msg.author.display_name names["user_icon"] = msg.author.avatar_url if user_anonymity is True: names["user_name"] = '匿名ユーザー' names["user_icon"] = 'https://discord.com/assets/7c8f476123d28d103efe381543274c25.png' if user_anonymity is False: names["user_name"] = msg.author.display_name names["user_icon"] = msg.author.avatar_url return names async def get_embed_type(bot, message): user_data = bot.users_data.get(str(message.author.id)) if user_data: return user_data.get('embed_type') guild_data = bot.guilds_data.get(str(message.guild.id)) if guild_data: return guild_data.get('embed_type') return 1 async def get_embed_color(bot, message): user_data = bot.users_data.get(str(message.author.id)) if user_data: return user_data.get('embed_color') guild_data = bot.guilds_data.get(str(message.guild.id)) if guild_data: return guild_data.get('embed_color') return '000000' class Compose: async def type_1(msg, message, names, embed_color): embed = Embed( description=msg.content, timestamp=msg.created_at, url=f'{message.jump_url}?{message.author.id}', colour=int(f'0x{embed_color}', 16) ) embed.set_author( name=names["user_name"], icon_url=names["user_icon"], url=f'{msg.jump_url}?{msg.author.id}' ) if names.get('category_name') is None: channel_txt = f'#{names["channel_name"]}' else: channel_txt = f'#{names["category_name"]}/{names["channel_name"]}' if msg.guild == message.guild: footer_txt = f'{channel_txt} | Quoted by {str(message.author)}' else: footer_txt = f'@{names["guild_name"]} | {channel_txt} | Quoted by {str(message.author)}' embed.set_footer( text=footer_txt, icon_url=names["guild_icon"], ) if msg.attachments and msg.attachments[0].proxy_url: embed.set_image( url=msg.attachments[0].proxy_url ) return embed
python
import time import datetime from haste_storage_client.core import HasteStorageClient, OS_SWIFT_STORAGE, TRASH from haste_storage_client.interestingness_model import RestInterestingnessModel haste_storage_client_config = { 'haste_metadata_server': { # See: https://docs.mongodb.com/manual/reference/connection-string/ 'connection_string': 'mongodb://130.xxx.yy.zz:27017' }, 'os_swift': { # See: https://docs.openstack.org/keystoneauth/latest/ # api/keystoneauth1.identity.v3.html#module-keystoneauth1.identity.v3.password 'username': 'xxxxx', 'password': 'xxxx', 'project_name': 'xxxxx', 'user_domain_name': 'xxxx', 'auth_url': 'xxxxx', 'project_domain_name': 'xxxx' } } # Identifies both the experiment, and the session (ie. unique each time the stream starts), # for example, this would be a good format - this needs to be generated at the stream edge. initials = 'anna_exampleson' stream_id = datetime.datetime.today().strftime('%Y_%m_%d__%H_%M_%S') + '_exp1_' + initials print('stream ID is: %s' % stream_id) # Optionally, specify REST server with interesting model: interestingness_model = RestInterestingnessModel('http://localhost:5000/model/api/v0.1/evaluate') client = HasteStorageClient(stream_id, config=haste_storage_client_config, interestingness_model=interestingness_model, storage_policy=[(0.5, 1.0, OS_SWIFT_STORAGE)], # map 0.5<=interestingness<=1.0 to OS swift. default_storage=TRASH) # discard blobs which don't match the policy above. blob_bytes = b'this is a binary blob eg. image data.' timestamp_cloud_edge = time.time() substream_id = 'B13' # Group by microscopy well ID. client.save(timestamp_cloud_edge, (12.34, 56.78), substream_id, blob_bytes, {'image_height_pixels': 300, # bag of extracted features here 'image_width_pixels': 300, 'number_of_green_pixels': 1234}) client.close()
python
"""Checkmarx CxSAST source up-to-dateness collector.""" from dateutil.parser import parse from collector_utilities.functions import days_ago from collector_utilities.type import Value from source_model import SourceResponses from .base import CxSASTBase class CxSASTSourceUpToDateness(CxSASTBase): """Collector class to measure the up-to-dateness of a Checkmarx CxSAST scan.""" async def _parse_value(self, responses: SourceResponses) -> Value: """Override to parse the date and time of the most recent scan.""" scan = (await responses[0].json())[0] return str(days_ago(parse(scan["dateAndTime"]["finishedOn"])))
python
#!/usr/bin/env python import argparse import re import sys # Prevent creation of compiled bytecode files sys.dont_write_bytecode = True from core.framework import cli from core.utils.printer import Colors # ====================================================================================================================== # Setup command completion and run the UI # ====================================================================================================================== def launch_ui(args): # Setup tab completion try: import readline except ImportError: print('%s[!] Module \'readline\' not available. Tab complete disabled.%s' % (Colors.R, Colors.N)) else: import rlcompleter if 'libedit' in readline.__doc__: readline.parse_and_bind('bind ^I rl_complete') else: readline.parse_and_bind('tab: complete') readline.set_completer_delims(re.sub('[/-]', '', readline.get_completer_delims())) # Instantiate the UI object x = cli.CLI() # Check for and run script session if args.script_file: x.do_resource(args.script_file) # Run the UI try: x.cmdloop() except KeyboardInterrupt: print('') # ====================================================================================================================== # MAIN # ====================================================================================================================== def main(): description = '%%(prog)s - %s %s' % (cli.__author__, cli.__email__) parser = argparse.ArgumentParser(description=description, version=cli.__version__) parser.add_argument('-r', help='load commands from a resource file', metavar='filename', dest='script_file', action='store') args = parser.parse_args() launch_ui(args) if __name__ == '__main__': main()
python
from copy import copy def _rec(arr, n, m): if n < 1: return yield from _rec(arr, n-1, m) for i in range(1,m): arr_loop = copy(arr) arr_loop[n-1] = i yield arr_loop yield from _rec(arr_loop, n-1, m) def main(n, m): arr = [0]*n yield arr yield from _rec(arr, n-1, m) for i in range(1,m): arr_loop = copy(arr) arr_loop[n-1] = i yield arr_loop yield from _rec(arr_loop, n-1, m) if __name__ == "__main__": for arr in main(4, 3): print(arr)
python
___assertEqual(0**17, 0) ___assertEqual(17**0, 1) ___assertEqual(0**0, 1) ___assertEqual(17**1, 17) ___assertEqual(2**10, 1024) ___assertEqual(2**-2, 0.25)
python
from libqtile.backend.x11 import core def test_keys(display): assert "a" in core.get_keys() assert "shift" in core.get_modifiers() def test_no_two_qtiles(manager): try: core.Core(manager.display).finalize() except core.ExistingWMException: pass else: raise Exception("expected an error on multiple qtiles connecting")
python
# Copyright (C) 2017-2019 New York University, # University at Buffalo, # Illinois Institute of Technology. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import re from vizier.core.util import dump_json, load_json from vizier.datastore.annotation.base import DatasetAnnotation from vizier.datastore.annotation.dataset import DatasetMetadata from vizier.datastore.dataset import DatasetHandle, DatasetColumn, DatasetRow from vizier.datastore.mimir.reader import MimirDatasetReader import vizier.mimir as mimir """Mimir annotation keys.""" ANNO_UNCERTAIN = 'mimir:uncertain' """Value casts for SQL update statements.""" CAST_TRUE = 'CAST(1 AS BOOL)' CAST_FALSE = 'CAST(0 AS BOOL)' """Compiled regular expressions to identify valid date and datetime values. Note that this does not check if a date string actually specifies a valid calendar date. But it appears that Mimir accepts any sting that follows this format.""" DATE_FORMAT = re.compile('^\d{4}-\d\d?-\d\d?$') DATETIME_FORMAT = re.compile('^\d{4}-\d\d?-\d\d? \d\d?:\d\d?:\d\d?(\.\d+)?$') class MimirDatasetColumn(DatasetColumn): """Column in a dataset that is stored as a Mimir table or view. Given that column names are not necessarily unique in a dataset, there is a need to maintain a mapping of dataset names to attribute names for tables/views in the relational database. Attributes ---------- identifier: int Unique column identifier name: string Name of column in the dataset name_in_rdb: string Name of the corresponding attribute in a relational table or views data_type: string, optional String representation of the column type in the database. By now the following data_type values are expected: date (format yyyy-MM-dd), int, varchar, real, and datetime (format yyyy-MM-dd hh:mm:ss:zzzz). """ def __init__(self, identifier=None, name_in_dataset=None, name_in_rdb=None, data_type=None): """Initialize the dataset column. Parameters ---------- identifier: int Unique column identifier name_in_dataset: string Name of column in the dataset name_in_rdb: string, optional Name of the corresponding attribute in a relational table or views data_type: string, optional Identifier for data type of column values. Default is String """ # Ensure that a valid data type is given super(MimirDatasetColumn, self).__init__( identifier=identifier, name=name_in_dataset, data_type=data_type ) if not name_in_rdb is None: self.name_in_rdb = name_in_rdb.upper() else: self.name_in_rdb = name_in_dataset.upper() @staticmethod def from_dict(doc): """Create dataset column object from dictionary serialization. Parameters ---------- doc: dict Dictionary serialization for dataset column object Returns ------- vizier.datastore.mimir.DatasetColumn """ return MimirDatasetColumn( identifier=doc['id'], name_in_dataset=doc['name'], name_in_rdb=doc['rdbName'], data_type=doc['dataType'] ) def is_numeric(self): """Flag indicating if the data type of this column is numeric, i.e., integer or real. Returns ------- bool """ return self.data_type.lower() in ['int', 'real'] def to_dict(self): """Get dictionary serialization for dataset column object. Returns ------- dict """ return { 'id': self.identifier, 'name': self.name, 'rdbName': self.name_in_rdb, 'dataType': self.data_type } def to_sql_value(self, value): """Return an SQL conform representation of the given value based on the column's data type. Raises ValueError if the column type is numeric but the given value cannot be converted to a numeric value. Parameters ---------- value: string Dataset cell value Returns ------- string """ # If the given value is None simply return the keyword NULL if value is None: return 'NULL' # If the data type of the columns is numeric (int or real) try to # convert the given argument to check whether it actually is a numeric # value. Note that we always return a string beacuse the result is # intended to be concatenated as part of a SQL query string. if self.data_type.lower() in ['int', 'real']: try: int(value) return str(value) except ValueError: return str(float(value)) elif self.data_type.lower() == 'date': if DATE_FORMAT.match(value): return 'CAST(\'' + str(value) + '\' AS DATE)' raise ValueError('not a date \'' + str(value) + '\'') elif self.data_type.lower() == 'datetime': if DATETIME_FORMAT.match(value): return 'CAST(\'' + str(value) + '\' AS DATETIME)' raise ValueError('not a datetime \'' + str(value) + '\'') elif self.data_type.lower() == 'bool': if isinstance(value, bool): if value: return CAST_TRUE else: return CAST_FALSE elif isinstance(value, int): if value == 1: return CAST_TRUE elif value == 0: return CAST_FALSE else: str_val = str(value).upper() if str_val in ['TRUE', '1']: return CAST_TRUE elif str_val in ['FALSE', '0']: return CAST_FALSE # If none of the previous tests returned a bool representation we # raise an exception to trigger value casting. raise ValueError('not a boolean value \'' + str(value) + '\'') #elif self.data_type.lower() in ['date', 'datetime']: #return self.data_type.upper() + '(\'' + str(value) + '\')' # return 'DATE(\'' + str(value) + '\')' # By default and in case the given value could not be transformed into # the target format return a representation for a string value return '\'' + str(value) + '\'' MIMIR_ROWID_COL= MimirDatasetColumn( name_in_dataset='', data_type='rowid') class MimirDatasetHandle(DatasetHandle): """Internal descriptor for datasets managed by the Mimir data store. Contains mapping for column names from a dataset to the corresponding object in a relational and a reference to the table or view that contains the dataset. """ def __init__( self, identifier, columns, table_name, row_counter, annotations=None, name=None ): """Initialize the descriptor. Parameters ---------- identifier: string Unique dataset identifier columns: list(vizier.datastore.mimir.MimirDatasetColumn) List of column names in the dataset schema and their corresponding names in the relational database table or view. table_name: string Reference to relational database table containing the dataset. row_counter: int Counter for unique row ids annotations: vizier.datastore.annotation.dataset.DatasetMetadata Annotations for dataset components """ super(MimirDatasetHandle, self).__init__( identifier=identifier, columns=columns, row_count=row_counter, annotations=annotations, name=name ) self.table_name = table_name self.row_counter = row_counter @staticmethod def from_file(filename, annotations=None): """Read dataset from file. Expects the file to be in Json format which is the default serialization format used by to_file(). Parameters ---------- filename: string Name of the file to read. annotations: vizier.datastore.annotation.dataset.DatasetMetadata, optional Annotations for dataset components Returns ------- vizier.datastore.mimir.dataset.MimirDatasetHandle """ with open(filename, 'r') as f: doc = load_json(f.read()) return MimirDatasetHandle( identifier=doc['id'], columns=[MimirDatasetColumn.from_dict(obj) for obj in doc['columns']], table_name=doc['tableName'], row_counter=doc['rowCounter'] ) def get_annotations(self, column_id=None, row_id=None): """Get list of annotations for a dataset component. If both identifier equal -1 all annotations for a dataset are returned. Parameters ---------- column_id: int, optional Unique column identifier row_id: string, optional Unique row identifier Returns ------- list(vizier.datastpre.annotation.base.DatasetAnnotation) """ if column_id is None and row_id is None: # TODO: If there is an option to get all annotations from Mimir for # all dataset cells we should add those annotations here. By now # this command will only return user-defined annotations for the # dataset. annotations = [] sql = 'SELECT * ' sql += 'FROM ' + self.table_name + ' ' annoList = mimir.explainEverythingJson(sql) for anno in annoList: annotations.append( DatasetAnnotation( key=ANNO_UNCERTAIN, value=anno ) ) #return [item for sublist in map(lambda (i,x): self.annotations.for_column(i).values(), enumerate(self.columns)) for item in sublist] #return self.annotations.values return annotations elif row_id is None: return self.annotations.for_column(column_id) elif column_id is None: return self.annotations.for_row(row_id) else: annotations = self.annotations.for_cell( column_id=column_id, row_id=row_id ) column = self.column_by_id(column_id) sql = 'SELECT * ' sql += 'FROM ' + self.table_name + ' ' buffer = mimir.explainCell(sql, column.name_in_rdb, str(row_id)) has_reasons = len(buffer) > 0 if has_reasons: for value in buffer: value = value['english'] if value != '': annotations.append( DatasetAnnotation( key=ANNO_UNCERTAIN, value=value, column_id=column_id, row_id=row_id ) ) return annotations def max_row_id(self): """Get maximum identifier for all rows in the dataset. If the dataset is empty the result is -1. Returns ------- int """ return self.row_counter def reader(self, offset=0, limit=-1, rowid=None): """Get reader for the dataset to access the dataset rows. The optional offset amd limit parameters are used to retrieve only a subset of rows. Parameters ---------- offset: int, optional Number of rows at the beginning of the list that are skipped. limit: int, optional Limits the number of rows that are returned. Returns ------- vizier.datastore.mimir.MimirDatasetReader """ return MimirDatasetReader( table_name=self.table_name, columns=self.columns, offset=offset, limit=limit, rowid=rowid ) def to_file(self, filename): """Write dataset to file. The default serialization format is Json. Parameters ---------- filename: string Name of the file to write """ doc = { 'id': self.identifier, 'columns': [col.to_dict() for col in self.columns], 'tableName': str(self.table_name), 'rowCounter': self.row_counter } with open(filename, 'w') as f: dump_json(doc, f)
python
#!/usr/bin/env python3 # author: https://blog.furas.pl # date: 2020.07.08 # import requests import pandas as pd url = "https://www.pokemondb.net/pokedex/all" html = requests.get(url) dfs = pd.read_html(html.text) print( dfs )
python
# # Autogenerated by Thrift Compiler (0.9.3) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py # from thrift.Thrift import TType, TMessageType, TException, TApplicationException import logging from ttypes import * from thrift.Thrift import TProcessor from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None class Iface: def exists(self, table, tget): """ Test for the existence of columns in the table, as specified in the TGet. @return true if the specified TGet matches one or more keys, false if not Parameters: - table: the table to check on - tget: the TGet to check for """ pass def existsAll(self, table, tgets): """ Test for the existence of columns in the table, as specified by the TGets. This will return an array of booleans. Each value will be true if the related Get matches one or more keys, false if not. Parameters: - table: the table to check on - tgets: a list of TGets to check for """ pass def get(self, table, tget): """ Method for getting data from a row. If the row cannot be found an empty Result is returned. This can be checked by the empty field of the TResult @return the result Parameters: - table: the table to get from - tget: the TGet to fetch """ pass def getMultiple(self, table, tgets): """ Method for getting multiple rows. If a row cannot be found there will be a null value in the result list for that TGet at the same position. So the Results are in the same order as the TGets. Parameters: - table: the table to get from - tgets: a list of TGets to fetch, the Result list will have the Results at corresponding positions or null if there was an error """ pass def put(self, table, tput): """ Commit a TPut to a table. Parameters: - table: the table to put data in - tput: the TPut to put """ pass def checkAndPut(self, table, row, family, qualifier, value, tput): """ Atomically checks if a row/family/qualifier value matches the expected value. If it does, it adds the TPut. @return true if the new put was executed, false otherwise Parameters: - table: to check in and put to - row: row to check - family: column family to check - qualifier: column qualifier to check - value: the expected value, if not provided the check is for the non-existence of the column in question - tput: the TPut to put if the check succeeds """ pass def putMultiple(self, table, tputs): """ Commit a List of Puts to the table. Parameters: - table: the table to put data in - tputs: a list of TPuts to commit """ pass def deleteSingle(self, table, tdelete): """ Deletes as specified by the TDelete. Note: "delete" is a reserved keyword and cannot be used in Thrift thus the inconsistent naming scheme from the other functions. Parameters: - table: the table to delete from - tdelete: the TDelete to delete """ pass def deleteMultiple(self, table, tdeletes): """ Bulk commit a List of TDeletes to the table. Throws a TIOError if any of the deletes fail. Always returns an empty list for backwards compatibility. Parameters: - table: the table to delete from - tdeletes: list of TDeletes to delete """ pass def checkAndDelete(self, table, row, family, qualifier, value, tdelete): """ Atomically checks if a row/family/qualifier value matches the expected value. If it does, it adds the delete. @return true if the new delete was executed, false otherwise Parameters: - table: to check in and delete from - row: row to check - family: column family to check - qualifier: column qualifier to check - value: the expected value, if not provided the check is for the non-existence of the column in question - tdelete: the TDelete to execute if the check succeeds """ pass def increment(self, table, tincrement): """ Parameters: - table: the table to increment the value on - tincrement: the TIncrement to increment """ pass def append(self, table, tappend): """ Parameters: - table: the table to append the value on - tappend: the TAppend to append """ pass def openScanner(self, table, tscan): """ Get a Scanner for the provided TScan object. @return Scanner Id to be used with other scanner procedures Parameters: - table: the table to get the Scanner for - tscan: the scan object to get a Scanner for """ pass def getScannerRows(self, scannerId, numRows): """ Grabs multiple rows from a Scanner. @return Between zero and numRows TResults Parameters: - scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function. - numRows: number of rows to return """ pass def closeScanner(self, scannerId): """ Closes the scanner. Should be called to free server side resources timely. Typically close once the scanner is not needed anymore, i.e. after looping over it to get all the required rows. Parameters: - scannerId: the Id of the Scanner to close * """ pass def mutateRow(self, table, trowMutations): """ mutateRow performs multiple mutations atomically on a single row. Parameters: - table: table to apply the mutations - trowMutations: mutations to apply """ pass def getScannerResults(self, table, tscan, numRows): """ Get results for the provided TScan object. This helper function opens a scanner, get the results and close the scanner. @return between zero and numRows TResults Parameters: - table: the table to get the Scanner for - tscan: the scan object to get a Scanner for - numRows: number of rows to return """ pass def getRegionLocation(self, table, row, reload): """ Given a table and a row get the location of the region that would contain the given row key. reload = true means the cache will be cleared and the location will be fetched from meta. Parameters: - table - row - reload """ pass def getAllRegionLocations(self, table): """ Get all of the region locations for a given table. Parameters: - table """ pass def checkAndMutate(self, table, row, family, qualifier, compareOp, value, rowMutations): """ Atomically checks if a row/family/qualifier value matches the expected value. If it does, it mutates the row. @return true if the row was mutated, false otherwise Parameters: - table: to check in and delete from - row: row to check - family: column family to check - qualifier: column qualifier to check - compareOp: comparison to make on the value - value: the expected value to be compared against, if not provided the check is for the non-existence of the column in question - rowMutations: row mutations to execute if the value matches """ pass class Client(Iface): def __init__(self, iprot, oprot=None): self._iprot = self._oprot = iprot if oprot is not None: self._oprot = oprot self._seqid = 0 def exists(self, table, tget): """ Test for the existence of columns in the table, as specified in the TGet. @return true if the specified TGet matches one or more keys, false if not Parameters: - table: the table to check on - tget: the TGet to check for """ self.send_exists(table, tget) return self.recv_exists() def send_exists(self, table, tget): self._oprot.writeMessageBegin('exists', TMessageType.CALL, self._seqid) args = exists_args() args.table = table args.tget = tget args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_exists(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = exists_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "exists failed: unknown result") def existsAll(self, table, tgets): """ Test for the existence of columns in the table, as specified by the TGets. This will return an array of booleans. Each value will be true if the related Get matches one or more keys, false if not. Parameters: - table: the table to check on - tgets: a list of TGets to check for """ self.send_existsAll(table, tgets) return self.recv_existsAll() def send_existsAll(self, table, tgets): self._oprot.writeMessageBegin('existsAll', TMessageType.CALL, self._seqid) args = existsAll_args() args.table = table args.tgets = tgets args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_existsAll(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = existsAll_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "existsAll failed: unknown result") def get(self, table, tget): """ Method for getting data from a row. If the row cannot be found an empty Result is returned. This can be checked by the empty field of the TResult @return the result Parameters: - table: the table to get from - tget: the TGet to fetch """ self.send_get(table, tget) return self.recv_get() def send_get(self, table, tget): self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid) args = get_args() args.table = table args.tget = tget args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_get(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = get_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result") def getMultiple(self, table, tgets): """ Method for getting multiple rows. If a row cannot be found there will be a null value in the result list for that TGet at the same position. So the Results are in the same order as the TGets. Parameters: - table: the table to get from - tgets: a list of TGets to fetch, the Result list will have the Results at corresponding positions or null if there was an error """ self.send_getMultiple(table, tgets) return self.recv_getMultiple() def send_getMultiple(self, table, tgets): self._oprot.writeMessageBegin('getMultiple', TMessageType.CALL, self._seqid) args = getMultiple_args() args.table = table args.tgets = tgets args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_getMultiple(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = getMultiple_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "getMultiple failed: unknown result") def put(self, table, tput): """ Commit a TPut to a table. Parameters: - table: the table to put data in - tput: the TPut to put """ self.send_put(table, tput) self.recv_put() def send_put(self, table, tput): self._oprot.writeMessageBegin('put', TMessageType.CALL, self._seqid) args = put_args() args.table = table args.tput = tput args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_put(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = put_result() result.read(iprot) iprot.readMessageEnd() if result.io is not None: raise result.io return def checkAndPut(self, table, row, family, qualifier, value, tput): """ Atomically checks if a row/family/qualifier value matches the expected value. If it does, it adds the TPut. @return true if the new put was executed, false otherwise Parameters: - table: to check in and put to - row: row to check - family: column family to check - qualifier: column qualifier to check - value: the expected value, if not provided the check is for the non-existence of the column in question - tput: the TPut to put if the check succeeds """ self.send_checkAndPut(table, row, family, qualifier, value, tput) return self.recv_checkAndPut() def send_checkAndPut(self, table, row, family, qualifier, value, tput): self._oprot.writeMessageBegin('checkAndPut', TMessageType.CALL, self._seqid) args = checkAndPut_args() args.table = table args.row = row args.family = family args.qualifier = qualifier args.value = value args.tput = tput args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_checkAndPut(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = checkAndPut_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndPut failed: unknown result") def putMultiple(self, table, tputs): """ Commit a List of Puts to the table. Parameters: - table: the table to put data in - tputs: a list of TPuts to commit """ self.send_putMultiple(table, tputs) self.recv_putMultiple() def send_putMultiple(self, table, tputs): self._oprot.writeMessageBegin('putMultiple', TMessageType.CALL, self._seqid) args = putMultiple_args() args.table = table args.tputs = tputs args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_putMultiple(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = putMultiple_result() result.read(iprot) iprot.readMessageEnd() if result.io is not None: raise result.io return def deleteSingle(self, table, tdelete): """ Deletes as specified by the TDelete. Note: "delete" is a reserved keyword and cannot be used in Thrift thus the inconsistent naming scheme from the other functions. Parameters: - table: the table to delete from - tdelete: the TDelete to delete """ self.send_deleteSingle(table, tdelete) self.recv_deleteSingle() def send_deleteSingle(self, table, tdelete): self._oprot.writeMessageBegin('deleteSingle', TMessageType.CALL, self._seqid) args = deleteSingle_args() args.table = table args.tdelete = tdelete args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_deleteSingle(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = deleteSingle_result() result.read(iprot) iprot.readMessageEnd() if result.io is not None: raise result.io return def deleteMultiple(self, table, tdeletes): """ Bulk commit a List of TDeletes to the table. Throws a TIOError if any of the deletes fail. Always returns an empty list for backwards compatibility. Parameters: - table: the table to delete from - tdeletes: list of TDeletes to delete """ self.send_deleteMultiple(table, tdeletes) return self.recv_deleteMultiple() def send_deleteMultiple(self, table, tdeletes): self._oprot.writeMessageBegin('deleteMultiple', TMessageType.CALL, self._seqid) args = deleteMultiple_args() args.table = table args.tdeletes = tdeletes args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_deleteMultiple(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = deleteMultiple_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteMultiple failed: unknown result") def checkAndDelete(self, table, row, family, qualifier, value, tdelete): """ Atomically checks if a row/family/qualifier value matches the expected value. If it does, it adds the delete. @return true if the new delete was executed, false otherwise Parameters: - table: to check in and delete from - row: row to check - family: column family to check - qualifier: column qualifier to check - value: the expected value, if not provided the check is for the non-existence of the column in question - tdelete: the TDelete to execute if the check succeeds """ self.send_checkAndDelete(table, row, family, qualifier, value, tdelete) return self.recv_checkAndDelete() def send_checkAndDelete(self, table, row, family, qualifier, value, tdelete): self._oprot.writeMessageBegin('checkAndDelete', TMessageType.CALL, self._seqid) args = checkAndDelete_args() args.table = table args.row = row args.family = family args.qualifier = qualifier args.value = value args.tdelete = tdelete args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_checkAndDelete(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = checkAndDelete_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndDelete failed: unknown result") def increment(self, table, tincrement): """ Parameters: - table: the table to increment the value on - tincrement: the TIncrement to increment """ self.send_increment(table, tincrement) return self.recv_increment() def send_increment(self, table, tincrement): self._oprot.writeMessageBegin('increment', TMessageType.CALL, self._seqid) args = increment_args() args.table = table args.tincrement = tincrement args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_increment(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = increment_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "increment failed: unknown result") def append(self, table, tappend): """ Parameters: - table: the table to append the value on - tappend: the TAppend to append """ self.send_append(table, tappend) return self.recv_append() def send_append(self, table, tappend): self._oprot.writeMessageBegin('append', TMessageType.CALL, self._seqid) args = append_args() args.table = table args.tappend = tappend args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_append(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = append_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "append failed: unknown result") def openScanner(self, table, tscan): """ Get a Scanner for the provided TScan object. @return Scanner Id to be used with other scanner procedures Parameters: - table: the table to get the Scanner for - tscan: the scan object to get a Scanner for """ self.send_openScanner(table, tscan) return self.recv_openScanner() def send_openScanner(self, table, tscan): self._oprot.writeMessageBegin('openScanner', TMessageType.CALL, self._seqid) args = openScanner_args() args.table = table args.tscan = tscan args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_openScanner(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = openScanner_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "openScanner failed: unknown result") def getScannerRows(self, scannerId, numRows): """ Grabs multiple rows from a Scanner. @return Between zero and numRows TResults Parameters: - scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function. - numRows: number of rows to return """ self.send_getScannerRows(scannerId, numRows) return self.recv_getScannerRows() def send_getScannerRows(self, scannerId, numRows): self._oprot.writeMessageBegin('getScannerRows', TMessageType.CALL, self._seqid) args = getScannerRows_args() args.scannerId = scannerId args.numRows = numRows args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_getScannerRows(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = getScannerRows_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io if result.ia is not None: raise result.ia raise TApplicationException(TApplicationException.MISSING_RESULT, "getScannerRows failed: unknown result") def closeScanner(self, scannerId): """ Closes the scanner. Should be called to free server side resources timely. Typically close once the scanner is not needed anymore, i.e. after looping over it to get all the required rows. Parameters: - scannerId: the Id of the Scanner to close * """ self.send_closeScanner(scannerId) self.recv_closeScanner() def send_closeScanner(self, scannerId): self._oprot.writeMessageBegin('closeScanner', TMessageType.CALL, self._seqid) args = closeScanner_args() args.scannerId = scannerId args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_closeScanner(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = closeScanner_result() result.read(iprot) iprot.readMessageEnd() if result.io is not None: raise result.io if result.ia is not None: raise result.ia return def mutateRow(self, table, trowMutations): """ mutateRow performs multiple mutations atomically on a single row. Parameters: - table: table to apply the mutations - trowMutations: mutations to apply """ self.send_mutateRow(table, trowMutations) self.recv_mutateRow() def send_mutateRow(self, table, trowMutations): self._oprot.writeMessageBegin('mutateRow', TMessageType.CALL, self._seqid) args = mutateRow_args() args.table = table args.trowMutations = trowMutations args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_mutateRow(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = mutateRow_result() result.read(iprot) iprot.readMessageEnd() if result.io is not None: raise result.io return def getScannerResults(self, table, tscan, numRows): """ Get results for the provided TScan object. This helper function opens a scanner, get the results and close the scanner. @return between zero and numRows TResults Parameters: - table: the table to get the Scanner for - tscan: the scan object to get a Scanner for - numRows: number of rows to return """ self.send_getScannerResults(table, tscan, numRows) return self.recv_getScannerResults() def send_getScannerResults(self, table, tscan, numRows): self._oprot.writeMessageBegin('getScannerResults', TMessageType.CALL, self._seqid) args = getScannerResults_args() args.table = table args.tscan = tscan args.numRows = numRows args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_getScannerResults(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = getScannerResults_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "getScannerResults failed: unknown result") def getRegionLocation(self, table, row, reload): """ Given a table and a row get the location of the region that would contain the given row key. reload = true means the cache will be cleared and the location will be fetched from meta. Parameters: - table - row - reload """ self.send_getRegionLocation(table, row, reload) return self.recv_getRegionLocation() def send_getRegionLocation(self, table, row, reload): self._oprot.writeMessageBegin('getRegionLocation', TMessageType.CALL, self._seqid) args = getRegionLocation_args() args.table = table args.row = row args.reload = reload args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_getRegionLocation(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = getRegionLocation_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "getRegionLocation failed: unknown result") def getAllRegionLocations(self, table): """ Get all of the region locations for a given table. Parameters: - table """ self.send_getAllRegionLocations(table) return self.recv_getAllRegionLocations() def send_getAllRegionLocations(self, table): self._oprot.writeMessageBegin('getAllRegionLocations', TMessageType.CALL, self._seqid) args = getAllRegionLocations_args() args.table = table args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_getAllRegionLocations(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = getAllRegionLocations_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "getAllRegionLocations failed: unknown result") def checkAndMutate(self, table, row, family, qualifier, compareOp, value, rowMutations): """ Atomically checks if a row/family/qualifier value matches the expected value. If it does, it mutates the row. @return true if the row was mutated, false otherwise Parameters: - table: to check in and delete from - row: row to check - family: column family to check - qualifier: column qualifier to check - compareOp: comparison to make on the value - value: the expected value to be compared against, if not provided the check is for the non-existence of the column in question - rowMutations: row mutations to execute if the value matches """ self.send_checkAndMutate(table, row, family, qualifier, compareOp, value, rowMutations) return self.recv_checkAndMutate() def send_checkAndMutate(self, table, row, family, qualifier, compareOp, value, rowMutations): self._oprot.writeMessageBegin('checkAndMutate', TMessageType.CALL, self._seqid) args = checkAndMutate_args() args.table = table args.row = row args.family = family args.qualifier = qualifier args.compareOp = compareOp args.value = value args.rowMutations = rowMutations args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_checkAndMutate(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = checkAndMutate_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.io is not None: raise result.io raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndMutate failed: unknown result") class Processor(Iface, TProcessor): def __init__(self, handler): self._handler = handler self._processMap = {} self._processMap["exists"] = Processor.process_exists self._processMap["existsAll"] = Processor.process_existsAll self._processMap["get"] = Processor.process_get self._processMap["getMultiple"] = Processor.process_getMultiple self._processMap["put"] = Processor.process_put self._processMap["checkAndPut"] = Processor.process_checkAndPut self._processMap["putMultiple"] = Processor.process_putMultiple self._processMap["deleteSingle"] = Processor.process_deleteSingle self._processMap["deleteMultiple"] = Processor.process_deleteMultiple self._processMap["checkAndDelete"] = Processor.process_checkAndDelete self._processMap["increment"] = Processor.process_increment self._processMap["append"] = Processor.process_append self._processMap["openScanner"] = Processor.process_openScanner self._processMap["getScannerRows"] = Processor.process_getScannerRows self._processMap["closeScanner"] = Processor.process_closeScanner self._processMap["mutateRow"] = Processor.process_mutateRow self._processMap["getScannerResults"] = Processor.process_getScannerResults self._processMap["getRegionLocation"] = Processor.process_getRegionLocation self._processMap["getAllRegionLocations"] = Processor.process_getAllRegionLocations self._processMap["checkAndMutate"] = Processor.process_checkAndMutate def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() if name not in self._processMap: iprot.skip(TType.STRUCT) iprot.readMessageEnd() x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name)) oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) x.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() return else: self._processMap[name](self, seqid, iprot, oprot) return True def process_exists(self, seqid, iprot, oprot): args = exists_args() args.read(iprot) iprot.readMessageEnd() result = exists_result() try: result.success = self._handler.exists(args.table, args.tget) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("exists", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_existsAll(self, seqid, iprot, oprot): args = existsAll_args() args.read(iprot) iprot.readMessageEnd() result = existsAll_result() try: result.success = self._handler.existsAll(args.table, args.tgets) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("existsAll", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_get(self, seqid, iprot, oprot): args = get_args() args.read(iprot) iprot.readMessageEnd() result = get_result() try: result.success = self._handler.get(args.table, args.tget) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("get", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_getMultiple(self, seqid, iprot, oprot): args = getMultiple_args() args.read(iprot) iprot.readMessageEnd() result = getMultiple_result() try: result.success = self._handler.getMultiple(args.table, args.tgets) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("getMultiple", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_put(self, seqid, iprot, oprot): args = put_args() args.read(iprot) iprot.readMessageEnd() result = put_result() try: self._handler.put(args.table, args.tput) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("put", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_checkAndPut(self, seqid, iprot, oprot): args = checkAndPut_args() args.read(iprot) iprot.readMessageEnd() result = checkAndPut_result() try: result.success = self._handler.checkAndPut(args.table, args.row, args.family, args.qualifier, args.value, args.tput) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("checkAndPut", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_putMultiple(self, seqid, iprot, oprot): args = putMultiple_args() args.read(iprot) iprot.readMessageEnd() result = putMultiple_result() try: self._handler.putMultiple(args.table, args.tputs) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("putMultiple", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_deleteSingle(self, seqid, iprot, oprot): args = deleteSingle_args() args.read(iprot) iprot.readMessageEnd() result = deleteSingle_result() try: self._handler.deleteSingle(args.table, args.tdelete) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("deleteSingle", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_deleteMultiple(self, seqid, iprot, oprot): args = deleteMultiple_args() args.read(iprot) iprot.readMessageEnd() result = deleteMultiple_result() try: result.success = self._handler.deleteMultiple(args.table, args.tdeletes) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("deleteMultiple", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_checkAndDelete(self, seqid, iprot, oprot): args = checkAndDelete_args() args.read(iprot) iprot.readMessageEnd() result = checkAndDelete_result() try: result.success = self._handler.checkAndDelete(args.table, args.row, args.family, args.qualifier, args.value, args.tdelete) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("checkAndDelete", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_increment(self, seqid, iprot, oprot): args = increment_args() args.read(iprot) iprot.readMessageEnd() result = increment_result() try: result.success = self._handler.increment(args.table, args.tincrement) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("increment", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_append(self, seqid, iprot, oprot): args = append_args() args.read(iprot) iprot.readMessageEnd() result = append_result() try: result.success = self._handler.append(args.table, args.tappend) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("append", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_openScanner(self, seqid, iprot, oprot): args = openScanner_args() args.read(iprot) iprot.readMessageEnd() result = openScanner_result() try: result.success = self._handler.openScanner(args.table, args.tscan) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("openScanner", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_getScannerRows(self, seqid, iprot, oprot): args = getScannerRows_args() args.read(iprot) iprot.readMessageEnd() result = getScannerRows_result() try: result.success = self._handler.getScannerRows(args.scannerId, args.numRows) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except TIllegalArgument as ia: msg_type = TMessageType.REPLY result.ia = ia except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("getScannerRows", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_closeScanner(self, seqid, iprot, oprot): args = closeScanner_args() args.read(iprot) iprot.readMessageEnd() result = closeScanner_result() try: self._handler.closeScanner(args.scannerId) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except TIllegalArgument as ia: msg_type = TMessageType.REPLY result.ia = ia except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("closeScanner", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_mutateRow(self, seqid, iprot, oprot): args = mutateRow_args() args.read(iprot) iprot.readMessageEnd() result = mutateRow_result() try: self._handler.mutateRow(args.table, args.trowMutations) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("mutateRow", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_getScannerResults(self, seqid, iprot, oprot): args = getScannerResults_args() args.read(iprot) iprot.readMessageEnd() result = getScannerResults_result() try: result.success = self._handler.getScannerResults(args.table, args.tscan, args.numRows) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("getScannerResults", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_getRegionLocation(self, seqid, iprot, oprot): args = getRegionLocation_args() args.read(iprot) iprot.readMessageEnd() result = getRegionLocation_result() try: result.success = self._handler.getRegionLocation(args.table, args.row, args.reload) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("getRegionLocation", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_getAllRegionLocations(self, seqid, iprot, oprot): args = getAllRegionLocations_args() args.read(iprot) iprot.readMessageEnd() result = getAllRegionLocations_result() try: result.success = self._handler.getAllRegionLocations(args.table) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("getAllRegionLocations", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_checkAndMutate(self, seqid, iprot, oprot): args = checkAndMutate_args() args.read(iprot) iprot.readMessageEnd() result = checkAndMutate_result() try: result.success = self._handler.checkAndMutate(args.table, args.row, args.family, args.qualifier, args.compareOp, args.value, args.rowMutations) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise except TIOError as io: msg_type = TMessageType.REPLY result.io = io except Exception as ex: msg_type = TMessageType.EXCEPTION logging.exception(ex) result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("checkAndMutate", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() # HELPER FUNCTIONS AND STRUCTURES class exists_args: """ Attributes: - table: the table to check on - tget: the TGet to check for """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRUCT, 'tget', (TGet, TGet.thrift_spec), None, ), # 2 ) def __init__(self, table=None, tget=None,): self.table = table self.tget = tget def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.tget = TGet() self.tget.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('exists_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tget is not None: oprot.writeFieldBegin('tget', TType.STRUCT, 2) self.tget.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tget is None: raise TProtocol.TProtocolException(message='Required field tget is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tget) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class exists_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('exists_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class existsAll_args: """ Attributes: - table: the table to check on - tgets: a list of TGets to check for """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.LIST, 'tgets', (TType.STRUCT,(TGet, TGet.thrift_spec)), None, ), # 2 ) def __init__(self, table=None, tgets=None,): self.table = table self.tgets = tgets def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.tgets = [] (_etype129, _size126) = iprot.readListBegin() for _i130 in xrange(_size126): _elem131 = TGet() _elem131.read(iprot) self.tgets.append(_elem131) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('existsAll_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tgets is not None: oprot.writeFieldBegin('tgets', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.tgets)) for iter132 in self.tgets: iter132.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tgets is None: raise TProtocol.TProtocolException(message='Required field tgets is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tgets) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class existsAll_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.LIST, 'success', (TType.BOOL,None), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_etype136, _size133) = iprot.readListBegin() for _i137 in xrange(_size133): _elem138 = iprot.readBool() self.success.append(_elem138) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('existsAll_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.BOOL, len(self.success)) for iter139 in self.success: oprot.writeBool(iter139) oprot.writeListEnd() oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class get_args: """ Attributes: - table: the table to get from - tget: the TGet to fetch """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRUCT, 'tget', (TGet, TGet.thrift_spec), None, ), # 2 ) def __init__(self, table=None, tget=None,): self.table = table self.tget = tget def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.tget = TGet() self.tget.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('get_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tget is not None: oprot.writeFieldBegin('tget', TType.STRUCT, 2) self.tget.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tget is None: raise TProtocol.TProtocolException(message='Required field tget is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tget) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class get_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.STRUCT, 'success', (TResult, TResult.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TResult() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('get_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getMultiple_args: """ Attributes: - table: the table to get from - tgets: a list of TGets to fetch, the Result list will have the Results at corresponding positions or null if there was an error """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.LIST, 'tgets', (TType.STRUCT,(TGet, TGet.thrift_spec)), None, ), # 2 ) def __init__(self, table=None, tgets=None,): self.table = table self.tgets = tgets def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.tgets = [] (_etype143, _size140) = iprot.readListBegin() for _i144 in xrange(_size140): _elem145 = TGet() _elem145.read(iprot) self.tgets.append(_elem145) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getMultiple_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tgets is not None: oprot.writeFieldBegin('tgets', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.tgets)) for iter146 in self.tgets: iter146.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tgets is None: raise TProtocol.TProtocolException(message='Required field tgets is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tgets) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getMultiple_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.LIST, 'success', (TType.STRUCT,(TResult, TResult.thrift_spec)), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_etype150, _size147) = iprot.readListBegin() for _i151 in xrange(_size147): _elem152 = TResult() _elem152.read(iprot) self.success.append(_elem152) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getMultiple_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) for iter153 in self.success: iter153.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class put_args: """ Attributes: - table: the table to put data in - tput: the TPut to put """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRUCT, 'tput', (TPut, TPut.thrift_spec), None, ), # 2 ) def __init__(self, table=None, tput=None,): self.table = table self.tput = tput def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.tput = TPut() self.tput.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('put_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tput is not None: oprot.writeFieldBegin('tput', TType.STRUCT, 2) self.tput.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tput is None: raise TProtocol.TProtocolException(message='Required field tput is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tput) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class put_result: """ Attributes: - io """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, io=None,): self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('put_result') if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class checkAndPut_args: """ Attributes: - table: to check in and put to - row: row to check - family: column family to check - qualifier: column qualifier to check - value: the expected value, if not provided the check is for the non-existence of the column in question - tput: the TPut to put if the check succeeds """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRING, 'row', None, None, ), # 2 (3, TType.STRING, 'family', None, None, ), # 3 (4, TType.STRING, 'qualifier', None, None, ), # 4 (5, TType.STRING, 'value', None, None, ), # 5 (6, TType.STRUCT, 'tput', (TPut, TPut.thrift_spec), None, ), # 6 ) def __init__(self, table=None, row=None, family=None, qualifier=None, value=None, tput=None,): self.table = table self.row = row self.family = family self.qualifier = qualifier self.value = value self.tput = tput def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.row = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.family = iprot.readString() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.qualifier = iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.value = iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRUCT: self.tput = TPut() self.tput.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('checkAndPut_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.row is not None: oprot.writeFieldBegin('row', TType.STRING, 2) oprot.writeString(self.row) oprot.writeFieldEnd() if self.family is not None: oprot.writeFieldBegin('family', TType.STRING, 3) oprot.writeString(self.family) oprot.writeFieldEnd() if self.qualifier is not None: oprot.writeFieldBegin('qualifier', TType.STRING, 4) oprot.writeString(self.qualifier) oprot.writeFieldEnd() if self.value is not None: oprot.writeFieldBegin('value', TType.STRING, 5) oprot.writeString(self.value) oprot.writeFieldEnd() if self.tput is not None: oprot.writeFieldBegin('tput', TType.STRUCT, 6) self.tput.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.row is None: raise TProtocol.TProtocolException(message='Required field row is unset!') if self.family is None: raise TProtocol.TProtocolException(message='Required field family is unset!') if self.qualifier is None: raise TProtocol.TProtocolException(message='Required field qualifier is unset!') if self.tput is None: raise TProtocol.TProtocolException(message='Required field tput is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.row) value = (value * 31) ^ hash(self.family) value = (value * 31) ^ hash(self.qualifier) value = (value * 31) ^ hash(self.value) value = (value * 31) ^ hash(self.tput) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class checkAndPut_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('checkAndPut_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class putMultiple_args: """ Attributes: - table: the table to put data in - tputs: a list of TPuts to commit """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.LIST, 'tputs', (TType.STRUCT,(TPut, TPut.thrift_spec)), None, ), # 2 ) def __init__(self, table=None, tputs=None,): self.table = table self.tputs = tputs def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.tputs = [] (_etype157, _size154) = iprot.readListBegin() for _i158 in xrange(_size154): _elem159 = TPut() _elem159.read(iprot) self.tputs.append(_elem159) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('putMultiple_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tputs is not None: oprot.writeFieldBegin('tputs', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.tputs)) for iter160 in self.tputs: iter160.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tputs is None: raise TProtocol.TProtocolException(message='Required field tputs is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tputs) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class putMultiple_result: """ Attributes: - io """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, io=None,): self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('putMultiple_result') if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class deleteSingle_args: """ Attributes: - table: the table to delete from - tdelete: the TDelete to delete """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRUCT, 'tdelete', (TDelete, TDelete.thrift_spec), None, ), # 2 ) def __init__(self, table=None, tdelete=None,): self.table = table self.tdelete = tdelete def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.tdelete = TDelete() self.tdelete.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('deleteSingle_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tdelete is not None: oprot.writeFieldBegin('tdelete', TType.STRUCT, 2) self.tdelete.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tdelete is None: raise TProtocol.TProtocolException(message='Required field tdelete is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tdelete) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class deleteSingle_result: """ Attributes: - io """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, io=None,): self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('deleteSingle_result') if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class deleteMultiple_args: """ Attributes: - table: the table to delete from - tdeletes: list of TDeletes to delete """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.LIST, 'tdeletes', (TType.STRUCT,(TDelete, TDelete.thrift_spec)), None, ), # 2 ) def __init__(self, table=None, tdeletes=None,): self.table = table self.tdeletes = tdeletes def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.tdeletes = [] (_etype164, _size161) = iprot.readListBegin() for _i165 in xrange(_size161): _elem166 = TDelete() _elem166.read(iprot) self.tdeletes.append(_elem166) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('deleteMultiple_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tdeletes is not None: oprot.writeFieldBegin('tdeletes', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.tdeletes)) for iter167 in self.tdeletes: iter167.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tdeletes is None: raise TProtocol.TProtocolException(message='Required field tdeletes is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tdeletes) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class deleteMultiple_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.LIST, 'success', (TType.STRUCT,(TDelete, TDelete.thrift_spec)), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_etype171, _size168) = iprot.readListBegin() for _i172 in xrange(_size168): _elem173 = TDelete() _elem173.read(iprot) self.success.append(_elem173) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('deleteMultiple_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) for iter174 in self.success: iter174.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class checkAndDelete_args: """ Attributes: - table: to check in and delete from - row: row to check - family: column family to check - qualifier: column qualifier to check - value: the expected value, if not provided the check is for the non-existence of the column in question - tdelete: the TDelete to execute if the check succeeds """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRING, 'row', None, None, ), # 2 (3, TType.STRING, 'family', None, None, ), # 3 (4, TType.STRING, 'qualifier', None, None, ), # 4 (5, TType.STRING, 'value', None, None, ), # 5 (6, TType.STRUCT, 'tdelete', (TDelete, TDelete.thrift_spec), None, ), # 6 ) def __init__(self, table=None, row=None, family=None, qualifier=None, value=None, tdelete=None,): self.table = table self.row = row self.family = family self.qualifier = qualifier self.value = value self.tdelete = tdelete def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.row = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.family = iprot.readString() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.qualifier = iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.value = iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRUCT: self.tdelete = TDelete() self.tdelete.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('checkAndDelete_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.row is not None: oprot.writeFieldBegin('row', TType.STRING, 2) oprot.writeString(self.row) oprot.writeFieldEnd() if self.family is not None: oprot.writeFieldBegin('family', TType.STRING, 3) oprot.writeString(self.family) oprot.writeFieldEnd() if self.qualifier is not None: oprot.writeFieldBegin('qualifier', TType.STRING, 4) oprot.writeString(self.qualifier) oprot.writeFieldEnd() if self.value is not None: oprot.writeFieldBegin('value', TType.STRING, 5) oprot.writeString(self.value) oprot.writeFieldEnd() if self.tdelete is not None: oprot.writeFieldBegin('tdelete', TType.STRUCT, 6) self.tdelete.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.row is None: raise TProtocol.TProtocolException(message='Required field row is unset!') if self.family is None: raise TProtocol.TProtocolException(message='Required field family is unset!') if self.qualifier is None: raise TProtocol.TProtocolException(message='Required field qualifier is unset!') if self.tdelete is None: raise TProtocol.TProtocolException(message='Required field tdelete is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.row) value = (value * 31) ^ hash(self.family) value = (value * 31) ^ hash(self.qualifier) value = (value * 31) ^ hash(self.value) value = (value * 31) ^ hash(self.tdelete) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class checkAndDelete_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('checkAndDelete_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class increment_args: """ Attributes: - table: the table to increment the value on - tincrement: the TIncrement to increment """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRUCT, 'tincrement', (TIncrement, TIncrement.thrift_spec), None, ), # 2 ) def __init__(self, table=None, tincrement=None,): self.table = table self.tincrement = tincrement def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.tincrement = TIncrement() self.tincrement.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('increment_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tincrement is not None: oprot.writeFieldBegin('tincrement', TType.STRUCT, 2) self.tincrement.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tincrement is None: raise TProtocol.TProtocolException(message='Required field tincrement is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tincrement) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class increment_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.STRUCT, 'success', (TResult, TResult.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TResult() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('increment_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class append_args: """ Attributes: - table: the table to append the value on - tappend: the TAppend to append """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRUCT, 'tappend', (TAppend, TAppend.thrift_spec), None, ), # 2 ) def __init__(self, table=None, tappend=None,): self.table = table self.tappend = tappend def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.tappend = TAppend() self.tappend.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('append_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tappend is not None: oprot.writeFieldBegin('tappend', TType.STRUCT, 2) self.tappend.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tappend is None: raise TProtocol.TProtocolException(message='Required field tappend is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tappend) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class append_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.STRUCT, 'success', (TResult, TResult.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TResult() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('append_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class openScanner_args: """ Attributes: - table: the table to get the Scanner for - tscan: the scan object to get a Scanner for """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRUCT, 'tscan', (TScan, TScan.thrift_spec), None, ), # 2 ) def __init__(self, table=None, tscan=None,): self.table = table self.tscan = tscan def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.tscan = TScan() self.tscan.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('openScanner_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tscan is not None: oprot.writeFieldBegin('tscan', TType.STRUCT, 2) self.tscan.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tscan is None: raise TProtocol.TProtocolException(message='Required field tscan is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tscan) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class openScanner_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.I32, 'success', None, None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.I32: self.success = iprot.readI32() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('openScanner_result') if self.success is not None: oprot.writeFieldBegin('success', TType.I32, 0) oprot.writeI32(self.success) oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getScannerRows_args: """ Attributes: - scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function. - numRows: number of rows to return """ thrift_spec = ( None, # 0 (1, TType.I32, 'scannerId', None, None, ), # 1 (2, TType.I32, 'numRows', None, 1, ), # 2 ) def __init__(self, scannerId=None, numRows=thrift_spec[2][4],): self.scannerId = scannerId self.numRows = numRows def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.scannerId = iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.numRows = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getScannerRows_args') if self.scannerId is not None: oprot.writeFieldBegin('scannerId', TType.I32, 1) oprot.writeI32(self.scannerId) oprot.writeFieldEnd() if self.numRows is not None: oprot.writeFieldBegin('numRows', TType.I32, 2) oprot.writeI32(self.numRows) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.scannerId is None: raise TProtocol.TProtocolException(message='Required field scannerId is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.scannerId) value = (value * 31) ^ hash(self.numRows) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getScannerRows_result: """ Attributes: - success - io - ia: if the scannerId is invalid """ thrift_spec = ( (0, TType.LIST, 'success', (TType.STRUCT,(TResult, TResult.thrift_spec)), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'ia', (TIllegalArgument, TIllegalArgument.thrift_spec), None, ), # 2 ) def __init__(self, success=None, io=None, ia=None,): self.success = success self.io = io self.ia = ia def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_etype178, _size175) = iprot.readListBegin() for _i179 in xrange(_size175): _elem180 = TResult() _elem180.read(iprot) self.success.append(_elem180) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.ia = TIllegalArgument() self.ia.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getScannerRows_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) for iter181 in self.success: iter181.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() if self.ia is not None: oprot.writeFieldBegin('ia', TType.STRUCT, 2) self.ia.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) value = (value * 31) ^ hash(self.ia) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class closeScanner_args: """ Attributes: - scannerId: the Id of the Scanner to close * """ thrift_spec = ( None, # 0 (1, TType.I32, 'scannerId', None, None, ), # 1 ) def __init__(self, scannerId=None,): self.scannerId = scannerId def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.scannerId = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('closeScanner_args') if self.scannerId is not None: oprot.writeFieldBegin('scannerId', TType.I32, 1) oprot.writeI32(self.scannerId) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.scannerId is None: raise TProtocol.TProtocolException(message='Required field scannerId is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.scannerId) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class closeScanner_result: """ Attributes: - io - ia: if the scannerId is invalid """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'ia', (TIllegalArgument, TIllegalArgument.thrift_spec), None, ), # 2 ) def __init__(self, io=None, ia=None,): self.io = io self.ia = ia def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.ia = TIllegalArgument() self.ia.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('closeScanner_result') if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() if self.ia is not None: oprot.writeFieldBegin('ia', TType.STRUCT, 2) self.ia.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.io) value = (value * 31) ^ hash(self.ia) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class mutateRow_args: """ Attributes: - table: table to apply the mutations - trowMutations: mutations to apply """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRUCT, 'trowMutations', (TRowMutations, TRowMutations.thrift_spec), None, ), # 2 ) def __init__(self, table=None, trowMutations=None,): self.table = table self.trowMutations = trowMutations def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.trowMutations = TRowMutations() self.trowMutations.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('mutateRow_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.trowMutations is not None: oprot.writeFieldBegin('trowMutations', TType.STRUCT, 2) self.trowMutations.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.trowMutations is None: raise TProtocol.TProtocolException(message='Required field trowMutations is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.trowMutations) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class mutateRow_result: """ Attributes: - io """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, io=None,): self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('mutateRow_result') if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getScannerResults_args: """ Attributes: - table: the table to get the Scanner for - tscan: the scan object to get a Scanner for - numRows: number of rows to return """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRUCT, 'tscan', (TScan, TScan.thrift_spec), None, ), # 2 (3, TType.I32, 'numRows', None, 1, ), # 3 ) def __init__(self, table=None, tscan=None, numRows=thrift_spec[3][4],): self.table = table self.tscan = tscan self.numRows = numRows def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.tscan = TScan() self.tscan.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.numRows = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getScannerResults_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.tscan is not None: oprot.writeFieldBegin('tscan', TType.STRUCT, 2) self.tscan.write(oprot) oprot.writeFieldEnd() if self.numRows is not None: oprot.writeFieldBegin('numRows', TType.I32, 3) oprot.writeI32(self.numRows) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.tscan is None: raise TProtocol.TProtocolException(message='Required field tscan is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.tscan) value = (value * 31) ^ hash(self.numRows) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getScannerResults_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.LIST, 'success', (TType.STRUCT,(TResult, TResult.thrift_spec)), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_etype185, _size182) = iprot.readListBegin() for _i186 in xrange(_size182): _elem187 = TResult() _elem187.read(iprot) self.success.append(_elem187) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getScannerResults_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) for iter188 in self.success: iter188.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getRegionLocation_args: """ Attributes: - table - row - reload """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRING, 'row', None, None, ), # 2 (3, TType.BOOL, 'reload', None, None, ), # 3 ) def __init__(self, table=None, row=None, reload=None,): self.table = table self.row = row self.reload = reload def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.row = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.BOOL: self.reload = iprot.readBool() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getRegionLocation_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.row is not None: oprot.writeFieldBegin('row', TType.STRING, 2) oprot.writeString(self.row) oprot.writeFieldEnd() if self.reload is not None: oprot.writeFieldBegin('reload', TType.BOOL, 3) oprot.writeBool(self.reload) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.row is None: raise TProtocol.TProtocolException(message='Required field row is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.row) value = (value * 31) ^ hash(self.reload) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getRegionLocation_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.STRUCT, 'success', (THRegionLocation, THRegionLocation.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = THRegionLocation() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getRegionLocation_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getAllRegionLocations_args: """ Attributes: - table """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 ) def __init__(self, table=None,): self.table = table def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getAllRegionLocations_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getAllRegionLocations_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.LIST, 'success', (TType.STRUCT,(THRegionLocation, THRegionLocation.thrift_spec)), None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_etype192, _size189) = iprot.readListBegin() for _i193 in xrange(_size189): _elem194 = THRegionLocation() _elem194.read(iprot) self.success.append(_elem194) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('getAllRegionLocations_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) for iter195 in self.success: iter195.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class checkAndMutate_args: """ Attributes: - table: to check in and delete from - row: row to check - family: column family to check - qualifier: column qualifier to check - compareOp: comparison to make on the value - value: the expected value to be compared against, if not provided the check is for the non-existence of the column in question - rowMutations: row mutations to execute if the value matches """ thrift_spec = ( None, # 0 (1, TType.STRING, 'table', None, None, ), # 1 (2, TType.STRING, 'row', None, None, ), # 2 (3, TType.STRING, 'family', None, None, ), # 3 (4, TType.STRING, 'qualifier', None, None, ), # 4 (5, TType.I32, 'compareOp', None, None, ), # 5 (6, TType.STRING, 'value', None, None, ), # 6 (7, TType.STRUCT, 'rowMutations', (TRowMutations, TRowMutations.thrift_spec), None, ), # 7 ) def __init__(self, table=None, row=None, family=None, qualifier=None, compareOp=None, value=None, rowMutations=None,): self.table = table self.row = row self.family = family self.qualifier = qualifier self.compareOp = compareOp self.value = value self.rowMutations = rowMutations def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.table = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.row = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.family = iprot.readString() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.qualifier = iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.I32: self.compareOp = iprot.readI32() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.value = iprot.readString() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRUCT: self.rowMutations = TRowMutations() self.rowMutations.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('checkAndMutate_args') if self.table is not None: oprot.writeFieldBegin('table', TType.STRING, 1) oprot.writeString(self.table) oprot.writeFieldEnd() if self.row is not None: oprot.writeFieldBegin('row', TType.STRING, 2) oprot.writeString(self.row) oprot.writeFieldEnd() if self.family is not None: oprot.writeFieldBegin('family', TType.STRING, 3) oprot.writeString(self.family) oprot.writeFieldEnd() if self.qualifier is not None: oprot.writeFieldBegin('qualifier', TType.STRING, 4) oprot.writeString(self.qualifier) oprot.writeFieldEnd() if self.compareOp is not None: oprot.writeFieldBegin('compareOp', TType.I32, 5) oprot.writeI32(self.compareOp) oprot.writeFieldEnd() if self.value is not None: oprot.writeFieldBegin('value', TType.STRING, 6) oprot.writeString(self.value) oprot.writeFieldEnd() if self.rowMutations is not None: oprot.writeFieldBegin('rowMutations', TType.STRUCT, 7) self.rowMutations.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.table is None: raise TProtocol.TProtocolException(message='Required field table is unset!') if self.row is None: raise TProtocol.TProtocolException(message='Required field row is unset!') if self.family is None: raise TProtocol.TProtocolException(message='Required field family is unset!') if self.qualifier is None: raise TProtocol.TProtocolException(message='Required field qualifier is unset!') if self.compareOp is None: raise TProtocol.TProtocolException(message='Required field compareOp is unset!') if self.rowMutations is None: raise TProtocol.TProtocolException(message='Required field rowMutations is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.table) value = (value * 31) ^ hash(self.row) value = (value * 31) ^ hash(self.family) value = (value * 31) ^ hash(self.qualifier) value = (value * 31) ^ hash(self.compareOp) value = (value * 31) ^ hash(self.value) value = (value * 31) ^ hash(self.rowMutations) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class checkAndMutate_result: """ Attributes: - success - io """ thrift_spec = ( (0, TType.BOOL, 'success', None, None, ), # 0 (1, TType.STRUCT, 'io', (TIOError, TIOError.thrift_spec), None, ), # 1 ) def __init__(self, success=None, io=None,): self.success = success self.io = io def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.io = TIOError() self.io.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('checkAndMutate_result') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 0) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.io is not None: oprot.writeFieldBegin('io', TType.STRUCT, 1) self.io.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.io) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other)
python
import random from cocos.actions import Move, CallFunc, Delay from cocos.layer import Layer, director from cocos.sprite import Sprite import cocos.collision_model as CollisionModel from app import gVariables from app.audioManager import SFX class Enemy(Layer): def __init__(self): super(Enemy, self).__init__() def set(self, gScene): self.gScene = gScene self.R = gScene.R # adding resources self.batch = gScene.batch # batch object self.player = gScene.PLAYER # player sprite self.collisionManager = gScene.collisionManager # Enemy Lists self.enemy_lists = set() # Schedule Timer self.schedule_interval(self.generateEnemyLists, 1) # Generate enemy every 2 second self.schedule(self.checkForCollision) def generateEnemyLists(self, dt): if self.player.is_playing: index = random.randint(0, 3) EO = EnemyObject((self, index)) self.collisionManager.add(EO) self.batch.add(EO) self.enemy_lists.add(EO) def checkForCollision(self, dt): eOBJ = set() for enemyObj in self.enemy_lists: if enemyObj.isDead == False: enemyObj.cshape.center = enemyObj.position collisions = self.collisionManager.objs_colliding(enemyObj) if collisions: if self.player.PLAYER in collisions: enemyObj.die(True) self.player.getHit() if enemyObj.position[0] < 0 - enemyObj.width: enemyObj.visible = False if enemyObj.visible == False: eOBJ.add(enemyObj) #delete the set obj for obj in eOBJ: self.enemy_lists.remove(obj) class EnemyObject(Sprite): def __init__(self, e): super(EnemyObject, self).__init__(e[0].R.ENEMY[e[1]]) #X(axis)-Location for enemy self.e = e self.isDead = False self.scale = 0.7 self.position = (director._window_virtual_width, random.randint(30,director._window_virtual_height - 34 - self.height/2)) self.velocity = (-100, 0) self.deadtemplate = Delay(0.5) + CallFunc(self.destroy) self.do(Move()) #Collision Shape self.cshape = CollisionModel.AARectShape(self.position, self.width/2, self.height/2) def die(self, collidewithplayer=False): try: if gVariables.g_IS_FX: SFX(self.e[0].R._SFX[1]) if collidewithplayer: self.e[0].gScene.HUD.sLists[self.e[0].gScene.PLAYER.total_lives - 1].visible = False self.e[0].gScene.collisionManager.remove_tricky(self) self.e[0].player.total_kill +=1 self.image = self.e[0].R.EFFECT[0] self.isDead = True self.velocity = (0, 0) self.do(self.deadtemplate) except: print "ERR" def destroy(self): self.visible = False
python
# SPDX-License-Identifier: Apache-2.0 """ Tests pipeline within pipelines. """ from textwrap import dedent import unittest from io import StringIO import numpy as np import pandas try: from sklearn.compose import ColumnTransformer except ImportError: # not available in 0.19 ColumnTransformer = None try: from sklearn.impute import SimpleImputer except ImportError: from sklearn.preprocessing import Imputer as SimpleImputer from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline from sklearn.preprocessing import ( MinMaxScaler, RobustScaler, StandardScaler, OneHotEncoder) from sklearn.feature_extraction.text import CountVectorizer from skl2onnx import convert_sklearn, to_onnx from skl2onnx.common.data_types import FloatTensorType, StringTensorType from test_utils import dump_data_and_model, TARGET_OPSET class TestSklearnPipelineWithinPipeline(unittest.TestCase): def test_pipeline_pca_pipeline_minmax(self): model = Pipeline( memory=None, steps=[ ( "PCA", PCA( copy=True, iterated_power="auto", n_components=0.15842105263157896, random_state=None, tol=0.0, svd_solver="auto", whiten=False, ), ), ( "Pipeline", Pipeline( memory=None, steps=[( "MinMax scaler", MinMaxScaler( copy=True, feature_range=(0, 3.7209871159509307), ), )], ), ), ], ) data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32) y = [0, 0, 1, 1] model.fit(data, y) model_onnx = convert_sklearn( model, "pipelinewithinpipeline", [("input", FloatTensorType(data.shape))], target_opset=TARGET_OPSET ) self.assertTrue(model_onnx is not None) dump_data_and_model( data, model, model_onnx, basename="SklearnPipelinePcaPipelineMinMax", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", ) def test_pipeline_pca_pipeline_none_lin(self): model = Pipeline( memory=None, steps=[ ( "PCA", PCA( copy=True, iterated_power="auto", n_components=0.15842105263157896, random_state=None, tol=0.0, svd_solver="auto", whiten=False, ), ), ( "Pipeline", Pipeline( memory=None, steps=[ ( "MinMax scaler", MinMaxScaler( copy=True, feature_range=(0, 3.7209871159509307), ), ), ("logreg", LogisticRegression(solver="liblinear")), ], ), ), ], ) data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32) y = [0, 0, 1, 1] model.fit(data, y) model_onnx = convert_sklearn( model, "pipelinewithinpipeline", [("input", FloatTensorType(data.shape))], target_opset=TARGET_OPSET ) self.assertTrue(model_onnx is not None) dump_data_and_model( data, model, model_onnx, basename="SklearnPipelinePcaPipelineMinMaxLogReg", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", ) def test_pipeline_pca_pipeline_multinomial(self): model = Pipeline( memory=None, steps=[ ( "PCA", PCA( copy=True, iterated_power="auto", n_components=2, random_state=None, svd_solver="auto", tol=0.0, whiten=False, ), ), ( "Pipeline", Pipeline( memory=None, steps=[ ( "MinMax scaler", MinMaxScaler( copy=True, feature_range=(0, 3.7209871159509307), ), ), ( "MultinomialNB", MultinomialNB( alpha=0.7368421052631579, class_prior=None, fit_prior=True, ), ), ], ), ), ], ) data = np.array( [[0, 0, 0], [0, 0, 0.1], [1, 1, 1.1], [1, 1.1, 1]], dtype=np.float32, ) y = [0, 0, 1, 1] model.fit(data, y) model_onnx = convert_sklearn( model, "pipelinewithinpipeline", [("input", FloatTensorType(data.shape))], target_opset=TARGET_OPSET ) self.assertTrue(model_onnx is not None) dump_data_and_model( data, model, model_onnx, basename="SklearnPipelinePcaPipelineMinMaxNB2", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", ) def test_pipeline_pca_pipeline_multinomial_none(self): model = Pipeline( memory=None, steps=[ ( "PCA", PCA( copy=True, iterated_power="auto", n_components=0.15842105263157896, random_state=None, tol=0.0, svd_solver="auto", whiten=False, ), ), ( "Pipeline", Pipeline( memory=None, steps=[ ( "MinMax scaler", MinMaxScaler( copy=True, feature_range=(0, 3.7209871159509307), ), ), ( "MultinomialNB", MultinomialNB( alpha=0.7368421052631579, class_prior=None, fit_prior=True, ), ), ], ), ), ], ) data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32) y = [0, 0, 1, 1] model.fit(data, y) model_onnx = convert_sklearn( model, "pipelinewithinpipeline", [("input", FloatTensorType(data.shape))], target_opset=TARGET_OPSET ) self.assertTrue(model_onnx is not None) dump_data_and_model( data, model, model_onnx, basename="SklearnPipelinePcaPipelineMinMaxNBNone", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", ) @unittest.skipIf( ColumnTransformer is None, reason="ColumnTransformer not available in 0.19") def test_pipeline_column_transformer_pipeline_imputer_scaler_lr(self): X = np.array([[1, 2], [3, np.nan], [3, 0]], dtype=np.float32) y = np.array([1, 0, 1]) model = Pipeline([ ( "ct", ColumnTransformer([ ( "pipeline1", Pipeline([ ("imputer", SimpleImputer()), ("scaler", StandardScaler()), ]), [0], ), ( "pipeline2", Pipeline([ ("imputer", SimpleImputer()), ("scaler", RobustScaler()), ]), [1], ), ]), ), ("lr", LogisticRegression(solver="liblinear")), ]) model.fit(X, y) model_onnx = convert_sklearn( model, "pipelinewithinpipeline", [("input", FloatTensorType([None, X.shape[1]]))], target_opset=TARGET_OPSET ) self.assertTrue(model_onnx is not None) dump_data_and_model( X, model, model_onnx, basename="SklearnPipelineCTPipelineImputerScalerLR", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", ) @unittest.skipIf( ColumnTransformer is None, reason="ColumnTransformer not available in 0.19") def test_complex_pipeline(self): df = pandas.read_csv(StringIO(dedent(""" CAT1,CAT2,TEXT A,M,clean B,N,text A,M,cleaning B,N,normalizing"""))) X_train = df y_train = np.array([[1, 0, 1, 0], [1, 0, 1, 0]]).T categorical_features = ['CAT1', 'CAT2'] textual_feature = 'TEXT' preprocessor = ColumnTransformer( transformers=[ ('cat_transform', OneHotEncoder(handle_unknown='ignore'), categorical_features), ('count_vector', Pipeline(steps=[ ('count_vect', CountVectorizer( max_df=0.8, min_df=0.05, max_features=1000))]), textual_feature)]) preprocessor.fit(X_train, y_train) initial_type = [('CAT1', StringTensorType([None, 1])), ('CAT2', StringTensorType([None, 1])), ('TEXTs', StringTensorType([None, 1]))] with self.assertRaises(RuntimeError): to_onnx(preprocessor, initial_types=initial_type, target_opset=TARGET_OPSET) initial_type = [('CAT1', StringTensorType([None, 1])), ('CAT2', StringTensorType([None, 1])), ('TEXT', StringTensorType([None, 1]))] onx = to_onnx(preprocessor, initial_types=initial_type, target_opset=TARGET_OPSET) dump_data_and_model( X_train, preprocessor, onx, basename="SklearnPipelineComplex") if __name__ == "__main__": unittest.main()
python
''' Created on 09.10.2017 @author: Henrik Pilz ''' from xml.sax import make_parser from datamodel import Feature, FeatureSet, Mime, OrderDetails, Price, PriceDetails, Product, ProductDetails, Reference, TreatmentClass from exporter.xml.bmecatExporter import BMEcatExporter from importer.xml.bmecatImportHandler import BMEcatImportHandler from resolver import DTDResolver from test.handler.basicHandlerTest import BasicHandlerTest class XmlTransformationNonStrictValidationTest(BasicHandlerTest): def testCreateBMEcatFullData(self): article = Product() article.productId = '12345' article.details = ProductDetails() article.details.deliveryTime = 10 article.details.description = 'Test Description\nTest Description Line 2 ' article.details.ean = '12345678901234' article.details.keywords = [ 'Keyword 1', 'Keyword 2'] article.details.manufacturerArticleId = '09876' article.details.manufacturerName = 'Manufacturer' article.details.articleStatus = "Bla" tc = TreatmentClass() tc.classType = 'TestClass' tc.value = '12345' article.details.specialTreatmentClasses = [ tc ] article.details.title = ' Test Article ' article.details.supplierAltId = '23456' reference = Reference() reference.referenceType = 'accessory' reference.supplierArticleId = '09876' article.addReference(reference) # Bilder mime = Mime() mime.mimeType = 'image/jpg' mime.order = 1 mime.purpose = 'detail' mime.source = 'manufacturer/Test.jpg' article.addMime(mime) mime = Mime() mime.mimeType = 'image/jpg' mime.order = 2 mime.purpose = 'detail' mime.source = 'manufacturer/Test2.jpg' article.addMime(mime) # LieferDetails article.orderDetails = OrderDetails() article.orderDetails.contentUnit = 'C62' article.orderDetails.orderUnit = 'C62' article.orderDetails.packingQuantity = 25 article.orderDetails.priceQuantity = 100 article.orderDetails.quantityMin = 4 article.orderDetails.quantityInterval = 1 # Preise priceDetails = PriceDetails() price1 = Price() price1.amount = 10.50 price1.priceType = 'net_customer' price1.lowerBound = 1 price1.tax = 0.19 priceDetails.addPrice(price1) price2 = Price() price2.amount = 17.50 price2.priceType = 'net_list' price2.lowerBound = 1 price2.tax = 0.19 priceDetails.addPrice(price2) article.addPriceDetails(priceDetails) # Attribute featureSet = FeatureSet() feature = Feature() feature.name = "Test1" feature.addValue(10) featureSet.addFeature(feature) feature = Feature() feature.name = "Test2" feature.addValue("Blabla") featureSet.addFeature(feature) feature = Feature() feature.name = "Test3" feature.addValue("Blub") featureSet.addFeature(feature) feature = Feature() feature.name = "Test4" feature.addValue("Zack") featureSet.addFeature(feature) article.addFeatureSet(featureSet) self.runAndCheck(article, 'testCreateBMEcatFullData.xml', 'nonstrict') def testCreateBMEcatMinimumDataPlusKeywords(self): article = Product() article.productId = '12345' article.details = ProductDetails() article.details.title = 'Test Article' article.orderDetails = OrderDetails() article.orderDetails.contentUnit = 'C62' article.orderDetails.orderUnit = 'C62' article.orderDetails.packingQuantity = 25 article.orderDetails.priceQuantity = 100 article.orderDetails.quantityMin = 4 article.orderDetails.quantityInterval = 1 priceDetails = PriceDetails() price = Price() price.amount = 10.50 price.priceType = 'net_customer' price.lowerBound = 1 price.tax = 0.19 priceDetails.addPrice(price) article.addPriceDetails(priceDetails) article.addKeyword("Testkeyword") self.runAndCheck(article, 'testCreateBMEcatMinimumDataPlusKeywords.xml', 'nonstrict') def testCreateBMEcatMinimumDataFloatDescription(self): article = Product() article.productId = '12345' article.details = ProductDetails() article.details.title = 'Test Article' article.details.description = 123.567 article.orderDetails = OrderDetails() article.orderDetails.contentUnit = 'C62' article.orderDetails.orderUnit = 'C62' article.orderDetails.packingQuantity = 25 article.orderDetails.priceQuantity = 100 article.orderDetails.quantityMin = 4 article.orderDetails.quantityInterval = 1 priceDetails = PriceDetails() price = Price() price.amount = 10.50 price.priceType = 'net_customer' price.lowerBound = 1 price.tax = 0.19 priceDetails.addPrice(price) article.addPriceDetails(priceDetails) self.runAndCheck(article, 'testCreateBMEcatMinimumDataFloatDescription.xml', 'nonstrict') def testCreateBMEcatMinimumData(self): article = Product() article.productId = '12345' article.details = ProductDetails() article.details.title = 'Test Article' article.orderDetails = OrderDetails() article.orderDetails.contentUnit = 'C62' article.orderDetails.orderUnit = 'C62' article.orderDetails.packingQuantity = 25 article.orderDetails.priceQuantity = 100 article.orderDetails.quantityMin = 4 article.orderDetails.quantityInterval = 1 priceDetails = PriceDetails() price = Price() price.amount = 10.50 price.priceType = 'net_customer' price.lowerBound = 1 price.tax = 0.19 priceDetails.addPrice(price) article.addPriceDetails(priceDetails) self.runAndCheck(article, 'testCreateBMEcatMinimumData.xml', 'nonstrict') def runTestMethod(self, article, filename, validation='nonstrict'): articles = { 'new' : [ article ]} # export bmecatExporter = BMEcatExporter(articles, filename, validation) bmecatExporter.writeBMEcatAsXML() # import again parser = make_parser() importHandler = BMEcatImportHandler("%Y-%m-%d") parser.setContentHandler(importHandler) parser.setEntityResolver(DTDResolver()) parser.parse("file:" + filename) return importHandler.articles['new'] # if __name__ == "__main__": # import sys;sys.argv = ['', 'Test.testName'] # unittest.main()
python
#!/usr/bin/env python3 # Written by Daniel Oaks <[email protected]> # Released under the ISC license import unittest from girc import formatting class FormattingTestCase(unittest.TestCase): """Tests our formatting.""" def setUp(self): errmsg = 'formatting.{} does not exist!' self.assertTrue(formatting.escape, msg=errmsg.format('escape')) self.assertTrue(formatting.unescape, msg=errmsg.format('unescape')) def test_removing_formatting(self): self.assertEqual(formatting.remove_formatting_codes('Lol \x03cool \x032tests\x0f!', irc=True), 'Lol cool tests!') self.assertEqual(formatting.remove_formatting_codes('Lol $c[]cool $c[blue]tests$r!'), 'Lol cool tests!') self.assertEqual(formatting.remove_formatting_codes('Lol $ccoo$c3,15l $c12,15tests$r!$$y'), 'Lol cool tests!$y') self.assertEqual(formatting.remove_formatting_codes('Lol co${yolo}ol ${$}tests!$'), 'Lol cool $tests!') def test_colour_codes(self): self.assertEqual(formatting._ctos(5), 'brown') self.assertEqual(formatting._ctos(452), 'unknown: 452') def test_escaping(self): self.assertEqual(formatting.escape('Strawberries are \x02cool\x0f'), 'Strawberries are $bcool$r') self.assertEqual(formatting.escape('Such \x1dcool\x1d things\x02!\x0f'), 'Such $icool$i things$b!$r') self.assertEqual(formatting.escape('Lol \x03cool \x032tests\x0f!'), 'Lol $c[]cool $c[blue]tests$r!') self.assertEqual(formatting.escape('Lol cool\x03'), 'Lol cool$c[]') self.assertEqual(formatting.escape('Lol \x034cool \x032,tests\x0f!'), 'Lol $c[red]cool $c[blue],tests$r!') self.assertEqual(formatting.escape('\x02Lol \x034,2cool \x033,8tests\x0f!'), '$bLol $c[red,blue]cool $c[green,yellow]tests$r!') def test_unescaping(self): self.assertEqual(formatting.unescape('Strawberries are $$cool$r'), 'Strawberries are $cool\x0f') self.assertEqual(formatting.unescape('Strawberries are $bcool$r'), 'Strawberries are \x02cool\x0f') self.assertEqual(formatting.unescape('Such $icool$i things$b!$r'), 'Such \x1dcool\x1d things\x02!\x0f') self.assertEqual(formatting.unescape('How cool$c'), 'How cool\x03') self.assertEqual(formatting.unescape('Lol $c[red]cool $c[blue]tests$r!'), 'Lol \x034cool \x032tests\x0f!') self.assertEqual(formatting.unescape('$bLol $c[red,blue]cool $c[green,yellow]tests$r!'), '\x02Lol \x034,2cool \x033,8tests\x0f!') # testing custom unescaping function def custom_unescape(*args, **kwargs): return '{}-{}'.format(','.join(args), ','.join('{}:{}'.format(k, v) for k, v in kwargs.items())) extra_dict = { 'custom': [custom_unescape, ['r', 't'], {'34': 'dfg'}], } self.assertEqual(formatting.unescape('lolo[${custom}]', extra_format_dict=extra_dict), 'lolo[r,t-34:dfg]') extra_dict = { 'custom': [custom_unescape, ['wer', 'hgd']], } self.assertEqual(formatting.unescape('fff--${custom}]', extra_format_dict=extra_dict), 'fff--wer,hgd-]') extra_dict = { 'custom': [custom_unescape], } self.assertEqual(formatting.unescape('abcd=${custom}=', extra_format_dict=extra_dict), 'abcd=-=')
python
#! /usr/bin/env python # -*- coding: utf-8 -*- # 2017 vby ############################ vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 #--------------------------------------------------------------------------------------------------- import sys import os, errno import math import collections import subprocess import json import random import pdb from main.network.psn import PSN from main.na_utils import * from .tmodel import tmodel from .nacommon import * def loadjson(filename): import json with open(filename, 'r') as fh: return json.load(fh) #--------------------------------------------------------------------------------------------------- from collections import namedtuple Edge = namedtuple("Edge", ["src", "dst", "fpp", "nop"]) Annotation = namedtuple("annotation", ["name", "lineno", "level"]) Stmt = namedtuple("Stmt", ["taskname", "annotation"]) class EdgeM(object): def __init__(self, src, dst, fpp, nop): self.src = src self.dst = dst self.fpp = fpp self.nop = nop def __repr__(self): return '{} {} {} {}'.format(self.src, self.dst, self.nop, self.fpp) class amodel(object): def __init__(self, nafile, nadefsfile, toolroot, types, hwkdecls, tasks, taskgroups, tinstances_unexpanded, tdefs_original, sysargs): self.args = sysargs self.toolroot = toolroot self.nafile_path = None self.nafile_postcpp = nafile self.namacros_file = nadefsfile self.types = types self.hwkdecls = hwkdecls self.tasks = tasks self.taskgroups = taskgroups self.tinstances_unexpanded = tinstances_unexpanded self.tdefs_original = tdefs_original self.tmodels = [] self.type_table = collections.OrderedDict() self.typetags = collections.OrderedDict() self.interfpga_links = [] self.psn = PSN(sysargs) self.global_task_map = collections.OrderedDict() self.task_partition_map = collections.OrderedDict() self.original_taskmap_json = collections.OrderedDict() self.hls_bviwrappers_outdir = None """ some default internal options """ # use explicit fifo buffering for flit-i/o between host and the network self.use_buffering_tofrom_host = False if self.args.buffered_sr_ports: self.use_buffering_tofrom_host = True self.buffer_sizing_specs = collections.OrderedDict() """ Generate a task graph for use with the taskgraph version0: basic - nodes are tasks - for edges foreach task, collect tuples ('send', destination, flits_per_packet, number_of_packets) version1: - nodes are tasks - for edges, consider """ def get_task_communication_graph_skeleton(self): gl = [] for tm in self.tmodels: dl = tm.get_unique_message_destinations() for d in dl: gl.append(EdgeM(src=tm.taskname, dst=d, fpp=0, nop=0)) return gl def taskgraph_gen(self): taskgraph_outdir = os.path.join(self.outdir, "taskgraph") """ ------------ Generate graph.txt --------------------------------------- """ G = [] allarcs = self.get_all_communication_arcs() # for tm in self.tmodels: # if tm.is_marked_off_chip: # # TODO handle later in a meaningful way # continue # info1 = tm.get_send_class_statement_info1() # for send_class, _, syminfo, destinations_,nodeobj in info1: # destinations = list(map(tm.resolve_address, destinations_)) # """ # TODO: after TLV send # """ # if send_class == 'send': # for dst in destinations: # # each struct is a packet, and entire array is sent by default # # flits per packet # fpp = self.get_flits_in_type(syminfo.typename) # # number of packets # nop = syminfo.arraysize # if not nodeobj.fullrange(): # nop = nodeobj.length - nodeobj.offset; # e = Edge(src=tm.taskname, dst=dst, fpp=fpp, nop=nop) # G.append(e) # elif send_class == 'scatter': # for dst in destinations: # # each struct is a packet # # flits per packet # fpp = self.get_flits_in_type(syminfo.typename) # # array is sliced into len(destinations) and sent # # number of packets # nop = syminfo.arraysize/len(destinations) # if not nodeobj.fullrange(): # nop = (nodeobj.length - nodeobj.offset)/len(destinations); # e = Edge(src=tm.taskname, dst=dst, fpp=fpp, nop=nop) # G.append(e) # elif send_class == 'broadcast': # pass # else: # raise CompilationError("Not implemented yet") # def to_graph_txt(G): # lines = [] # lines.append(len(self.tmodels)) # lines.append(len(G)) # lines.append(' '.join([x.taskname for x in self.tmodels])) # for e in G: # comm_vol_in_flits = e.fpp * e.nop # lines.append('{} {} {} {} {}'.format(e.src, e.dst, comm_vol_in_flits, e.lineno, e.level)) # return lines def merge_allarcs_into_tasklevel_arcs(all_arcs, skel_arcs): for skarc in skel_arcs: for a in all_arcs: if (a.src.taskname, a.dst.taskname) == (skarc.src, skarc.dst): skarc.fpp = a.fpp skarc.nop += a.nop return skel_arcs def to_graph_txt(G, merged=False): lines = [] lines.append(len(self.tmodels)) lines.append(len(G)) lines.append(' '.join([x.taskname for x in self.tmodels])) if not merged: for e in G: comm_vol_in_flits = e.fpp * e.nop lines.append('{} {} {}\t{} {} {}\t{} {} {}'.format(e.src.taskname, e.dst.taskname, comm_vol_in_flits, e.src.annotation.lineno, e.src.annotation.level, e.src.annotation.name, e.dst.annotation.lineno, e.dst.annotation.level, e.dst.annotation.name )) with open (os.path.join(taskgraph_outdir, 'graph_all.txt'), 'w') as fh: fh.write('\n'.join([str(x) for x in lines])) else: for e in G: comm_vol_in_flits = e.fpp * e.nop lines.append('{} {} {}'.format(e.src, e.dst, comm_vol_in_flits)) with open (os.path.join(taskgraph_outdir, 'graph.txt'), 'w') as fh: fh.write('\n'.join([str(x) for x in lines])) return lines G = merge_allarcs_into_tasklevel_arcs(allarcs, self.get_task_communication_graph_skeleton()) trymkdir(taskgraph_outdir) ll = to_graph_txt(G, merged=True) llnew = to_graph_txt(allarcs, merged=False) """ ------------ Generate config.json --------------------------------------- """ cfg = {} cfg['nocpath'] = self.psn.dir cfg['flitwidth_override'] = self.flit_width cfg['drop_precedence_constraints'] = False cfg['num_tasks_per_router_bound'] = 1 cfg['objective'] = 'both' cfg['gurobi_timelimit'] = 60*10 if self.psn.is_connect(): cfg['noctype'] = 'connect' elif self.psn.is_fnoc(): cfg['noctype'] = 'fnoc' else: pass with open(os.path.join(taskgraph_outdir, "config.json"), "w") as oh: json.dump(cfg, oh, indent=4) """ ------------ Generate specs.json --------------------------------------- """ from collections import namedtuple tasknames = [x.taskname for x in self.tmodels] KernelInfo = namedtuple("KernelInfo", ["name","energy", "duration"]) kspecs = {} if self.args.kernel_specs_file: kspecs = loadjson(self.args.kernel_specs_file) def get_task_kernel_list(task): if kspecs: f1 = KernelInfo(name="f1", energy=2, duration=kspecs[task]) else: f1 = KernelInfo(name="f1", energy=2, duration=2) return [f1._asdict()] dict = {} dict["energy_cost_per_bit"] = 0.05 dict["initial_map"] = {} dict["hop_latency"] = 1 dict["cycles_per_pkt"] = 3.0/2 if self.psn.is_fnoc(): dict['hop_latency'] = 3 dict['cycles_per_pkt'] = 8.0/2 dict['task_kernels'] = {task:get_task_kernel_list(task) for task in tasknames} with open(os.path.join(taskgraph_outdir, "specs.json"), "w") as oh: json.dump(dict, oh, indent=4) @property def enabled_lateral_data_io(self): return self.args.enable_lateral_bulk_io def has_scs_type(self, SCSNAME): for tm in self.tmodels: for k, v in tm.symbol_table.items(): if v.storage_class == SCSNAME: return True return False def get_vhls_portname(self, typename, instancename): # self.type_table[typename].xxx if len(self.type_table[typename].member_info_tuples)==1 and (self.type_table[typename].basictypes[0]): if (self.type_table[typename].basictypes[0][:3] != 'ap_'): return instancename + '_' + self.type_table[typename].member_info_tuples[0][0] if len(self.type_table[typename].member_info_tuples) == 1: mname = self.type_table[typename].member_info_tuples[0][0] # _V when member_info_tuples[0][1] >= 32, but let's see if mname[-1] == '_': return instancename + '_' + self.type_table[typename].member_info_tuples[0][0] + 'V' else: return instancename + '_' + self.type_table[typename].member_info_tuples[0][0] + '_V' return instancename @property def taskmap_json_file(self): return self.args.taskmap_json_file def all_instances_of_type(self, tmodel): return [tm1 for tm1 in self.tmodels if tm1.taskdefname == tmodel.taskdefname] def taskmap(self, taskname): #return self.global_task_map[taskname] if taskname in self.global_task_map: return self.global_task_map[taskname] else: # TODO neater if taskname == '@return': return 'saved_source_address' else: return taskname def get_lone_scemi_port_id(self): # tmpfix l = self.get_tasks_marked_for_exposing_flit_SR_ports() if len(l) == 1: return l[0][0] else: return 2 def has_nonhls_kernels(self): for d in self.hwkdecls: if not (d.tq == '__vivadohls__'): return True return False def trace_state_entry_exit(self): if self.args.simverbosity == 'state-entry-exit': return True return False """ assuming ./mainout/{src,sim,...} gen ./mainout/bviwrappers/ if na has hlspes, or well, regardless gen ./${bsvkernels} """ def make_wrapper_dirs(self): mainout = self.outdir hlsbvidir = os.path.join(mainout, "bviwrappers") self.hls_bviwrappers_outdir = hlsbvidir trymkdir(hlsbvidir) mainout_par = os.path.join(mainout, os.pardir) bsvkernels="bsvwrappers" bsvkernels = os.path.join(mainout_par, bsvkernels) if self.args.kernelwrapper_outdir: bsvkernels = self.args.kernelwrapper_outdir self.pelib_dir = bsvkernels if self.has_nonhls_kernels(): trymkdir(bsvkernels) if self.args.vhlswrap_outdir: #self.vhlswrappergen_dir = os.path.join(os.path.dirname(self.nafile_path), self.args.vhlswrap_outdir) self.vhlswrappergen_dir = self.args.vhlswrap_outdir # if not os.path.exists(self.pelib_dir): # raise ValueError(self.pelib_dir, # """does not exist, please create explicitly or specify a # directory with a switch # """) # VHLS directory if self.args.vhlswrap_outdir: trymkdir(self.args.vhlswrap_outdir) @property def hls_source_directory_abspath(self): pass @property def out_scriptdir(self): return os.path.join(self.outdir, 'tcl') @property def out_simdir(self): return os.path.join(self.outdir, 'sim') @property def out_swmodeldir(self): return os.path.join(self.outdir, 'mpimodel') def prepare_outdir_layout(self): # SETUP OUTDIR LAYOUT trymkdir(os.path.join(self.outdir, 'ispecs')) trymkdir(os.path.join(self.outdir, 'src')) trymkdir(os.path.join(self.outdir, 'tb')) trymkdir(self.out_simdir) trymkdir(os.path.join(self.outdir, 'data')) trymkdir(os.path.join(self.outdir, 'libs')) trymkdir(os.path.join(self.outdir, 'fpga')) trymkdir(os.path.join(self.outdir, 'libna')) trymkdir(os.path.join(self.outdir, 'scemi')) trymkdir(self.out_swmodeldir) if self.args.scemi: trymkdir(os.path.join(self.outdir, 'tbscemi')) trymkdir(self.out_scriptdir) if self.psn.is_connect(): force_symlink(self.psn.dir, os.path.join(self.outdir, 'connect')) if self.psn.is_fnoc(): force_symlink(self.psn.dir, os.path.join(self.outdir, 'forthnoc')) #force_symlink(os.path.join(self.toolroot, 'libs'), os.path.join(self.outdir, 'libs')) force_symlink(os.path.join(self.toolroot, 'libs/bsv'), os.path.join(self.outdir, 'libs/bsv')) if self.has_scs_type('__ram__') or self.has_scs_type('__mbus__'): force_symlink(os.path.join(self.toolroot, 'libs/bsv_reserve'), os.path.join(self.outdir, 'libs/bsv_reserve')) force_symlink(os.path.join(self.toolroot, 'libs/verilog'), os.path.join(self.outdir, 'libs/verilog')) force_symlink(os.path.join(self.toolroot, 'libs/xdc'), os.path.join(self.outdir, 'libs/xdc')) #force_symlink(os.path.join(self.toolroot, 'libs/libna'), os.path.join(self.outdir, 'libs/libna')) force_symlink(os.path.join(self.toolroot, 'libs/vhls_include'), os.path.join(self.outdir, 'libs/vhls_include')) self.make_wrapper_dirs() # Write taskmap json file with open(os.path.join(self.out_simdir, 'taskmap.json'), 'w') as fo: json.dump(self.global_task_map, fp=fo, indent=4) # Dump the mfpga_taskmap.json too if self.task_partition_map: with open(os.path.join(self.out_simdir, 'original_taskmap.json'), 'w') as fo: json.dump(self.original_taskmap_json, fp=fo, indent=4) with open(os.path.join(self.out_simdir, 'mfpga_taskmap.json'), 'w') as fo: json.dump(self.task_partition_map, fp=fo, indent=4) #readback = json.load(open('OUT_CGEN/src/taskmap.json')) with open(os.path.join(self.out_simdir, 'typetags.json'), 'w') as fo: json.dump(self.typetags, fp=fo, indent=4) def setup(self): self.nafile_path = self.args.nafile trymkdir(self.outdir) # Types # self.type_table = collections.OrderedDict() for t in self.types: self.type_table[t.struct_name] = t # Typetags self.typetags = collections.OrderedDict() for i, t in enumerate(self.type_table.keys()): self.typetags[t] = i # Hwkernels # # Tasks # self.tmodels = [tmodel(t) for t in self.tasks] for tm in self.tmodels: tm.setup() tm._gam = self if self.taskmap_json_file and os.path.exists(self.taskmap_json_file): self.global_task_map, self.task_partition_map = self.parse_taskmap_json(self.taskmap_json_file) # Add the interfpga link tasks to tmodels if self.has_tasks_marked_for_xfpga: link_tasks = self.get_interfpga_link_tasks() link_tmodels = [tmodel((None, t)) for t in link_tasks] for tm in link_tmodels: tm.setup() tm._gam = self self.tmodels.extend(link_tmodels) # task groups using a task instance array name as proxy for all instances, we expand def find_name_in_tmodels(name): if name in [x.taskname for x in self.tmodels]: return True def find_if_a_taskinstance_array_name(name): tms_with_array_decl = [t for t in self.tmodels if t.instanceparams and t.instanceparams.num_task_instances] # we have instance tasks that have been defined as arrays # we check if name matches any of these tasknames MINUS the _%d suffix for t in tms_with_array_decl: abc = t.taskname if abc[:abc.rfind('_')] == name: # found, so all the array instances should be accounted for, and sent account_for = t.instanceparams.num_task_instances for t in tms_with_array_decl: abc = t.taskname abc = abc[:abc.rfind('_')] if abc == name: account_for=account_for - 1 if account_for == 0: return True, t.instanceparams.num_task_instances for k, v in self.taskgroups.items(): for name in v.tasknamelist: if not find_name_in_tmodels(name): found, count = find_if_a_taskinstance_array_name(name) if found: v.tasknamelist.remove(name) v.tasknamelist.extend(["{}_{}".format(name, idx) for idx in range(count)]) self.set_a_task_map() # TODO temporary arrangement # 1. broadcast: assign address_list; to be done after task map # 2. recv from @any or @customgroup_name for tm in self.tmodels: tm.setup_broadcast_stmts() tm.setup_recv_taskgroup_stmts() tm.setup_send_taskgroup_stmts() tm.setup_scatter_taskgroup_stmts() tm.setup_gather_taskgroup_stmts() tm.setup_barrier_group_resolution() tm.setup_pragma_recvs_sends_declarations() def get_interfpga_link_tasks(self): ifpga_tdl = [] from main.nac import task_definition for link in self.original_taskmap_json['interfpga_links']: (fromfpga, fromnode), (tofpga, tonode) = link.items() qualifiers = ['xfpga'] fromlink_tname = '{}_{}'.format(fromfpga, fromnode) tolink_tname = '{}_{}'.format(tofpga, tonode) td = task_definition( (None, fromlink_tname, qualifiers) ) ifpga_tdl.append(td) td = task_definition( (None, tolink_tname, qualifiers) ) ifpga_tdl.append(td) return ifpga_tdl @property def number_user_send_ports(self): return int(self.psn.params['NUM_USER_SEND_PORTS']) @property def flit_width(self): return int(self.psn.params['FLIT_DATA_WIDTH']) @property def unused_flit_header_bitcount(self): if self.psn.is_fnoc(): # For FNOC we reserve self.number_user_send_ports for use with broadcast/multicast feature return self.flit_width - self.number_user_send_ports - self.get_network_address_width() - self.get_typetags_count_width() - 2 # 2 bits for bcast or multicast indicator elif self.psn.is_connect(): return self.flit_width - self.get_network_address_width() - self.get_typetags_count_width() def sanitychecks(self): # CHECK: whether flit width is enough to accomodate the `header flit' assert self.unused_flit_header_bitcount >= 0, "FLIT_WIDTH unsufficient to hold the header flit, should at least be {}".format(-self.unused_flit_header_bitcount+self.flit_width) pass def hwkernelname2modname(self, k): return k[0].upper()+k[1:] def hwmodname2kernelname(self, k): return k[0].lower()+k[1:] def get_network_address_width(self): nnodes = self.number_user_send_ports addr_width = int(math.ceil(math.log(nnodes, 2))) if 'FORCE_ADDRWIDTH' in self.psn.params: #print("Using FORCE_ADDRWIDTH") return self.psn.params['FORCE_ADDRWIDTH'] return addr_width def getBitWidth(self, count): return int(max(1, int(math.ceil(math.log(count, 2))))) def get_typetags_count_width(self): ntags = len(self.typetags) return self.getBitWidth(ntags) def getranges_tag_and_sourceaddr_info_in_flit(self): fw = self.flit_width nnodes = self.number_user_send_ports addr_width = int(math.ceil(math.log(nnodes, 2))) ntags = len(self.typetags) tag_width = int(max(1, int(math.ceil(math.log(ntags, 2))))) tag_range = str(addr_width+tag_width-1)+':'+str(addr_width) sourceaddr_range = str(addr_width-1)+':0'; opts_width = 4 opts_range = str(opts_width+tag_width+addr_width-1)+':'+str(addr_width+tag_width) assert addr_width + tag_width + opts_width <= fw, " #endpoints_addr_width + ln(#ntypes) <= FLIT_DATA_WIDTH " return (tag_range, sourceaddr_range, opts_range) def typename2tag(self, typename): if typename in self.typetags: return self.typetags[typename] else: pdb.set_trace() raise CompilationError("Unknown type %s" % typename) def parse_taskmap_json(self, taskmap_json_file): self.original_taskmap_json = collections.OrderedDict(json.load(open(self.taskmap_json_file))) x = collections.OrderedDict(json.load(open(self.taskmap_json_file))) if 'header' in x: hdr = x.pop('header') if hdr['multifpga']: interfpga_links = x.pop('interfpga_links') print("xfpgaLinks:", interfpga_links) rmap = collections.OrderedDict() for k,v in x.items(): rmap.update(v) # introduce interfpga link tasks for link in interfpga_links: (fromfpga, fromnode), (tofpga, tonode) = link.items() fromlink_tname = '{}_{}'.format(fromfpga, fromnode) tolink_tname = '{}_{}'.format(tofpga, tonode) rmap[fromlink_tname] = fromnode rmap[tolink_tname] = tonode # add to the partition specific map too x[fromfpga][fromlink_tname] = fromnode x[tofpga][tolink_tname] = tonode self.interfpga_links.append((fromfpga, fromnode, tofpga, tonode)) return rmap, x else: return x, {} return x, {} def set_a_task_map(self): if self.taskmap_json_file and os.path.exists(self.taskmap_json_file): # PARSED earlier #self.global_task_map, self.task_partition_map = self.parse_taskmap_json(self.taskmap_json_file) #collections.OrderedDict(json.load(open(self.taskmap_json_file))) #X self.global_task_map[self.tmodels[0].taskname] = 0 # off_chip tagged nodes are no special, whatever the taskmap says # but should be on the boundaries ideally for phy.impl for tm in self.tmodels: tm.mapped_to_node = self.global_task_map[tm.taskname] #tm.mapped_to_node = self.taskmap[tm.taskname] else: # some random assignment if not self.args.taskmap_use_random: random.seed(11) # CONNECT was misbaving for some some shuffles nplaces = int(self.psn.params['NUM_USER_SEND_PORTS']) # no special nodes as far as random mapping is concerned l = [i for i in range(0, nplaces)] # let 0 be the special node, fixed for now random.shuffle(l) for i, tm in enumerate(self.tmodels): tm.mapped_to_node = l[i] self.global_task_map[tm.taskname] = l[i] if None: # TODO review l = [i for i in range(1, nplaces)] # let 0 be the special node, fixed for now random.shuffle(l) self.tmodels[0].mapped_to_node = 0 # redundant, TODO remove self.global_task_map[self.tmodels[0].taskname] = 0 for i, tm in enumerate(self.tmodels[1:]): # except 0 tm.mapped_to_node = l[i] self.global_task_map[tm.taskname] = l[i] @property def outdir(self): return self.args.cgenoutdir @property def taskmap_json_file(self): return self.args.taskmap_json_file def get_project_sha(self): # TODO move def is_git_directory(path = '.'): return subprocess.call(['git', '-C', path, 'status'], stderr=subprocess.STDOUT, stdout = open(os.devnull, 'w')) == 0 def get_repo_sha(repo): sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=repo).decode('ascii').strip() return sha return 'disabled-sha' return get_repo_sha(self.toolroot) for subdir in os.listdir('.'): # TODO WHAT WAS THIS?! if is_git_directory(subdir): return get_repo_sha(subdir) assert False def has_off_chip_nodes(self): return len(self.get_off_chip_node_id_list())>0 def get_tasks_marked_for_exposing_flit_SR_ports(self): ll = [] for t in self.tmodels: if t.is_marked_EXPOSE_AS_SR_PORT: ll.append((t.mapped_to_node, t.taskname, t.off_chip_qualifier)) return ll def get_tasks_marked_for_exposing_quasiserdes_sr_ports(self): ll = [] for t in self.tmodels: if t.is_marked_EXPOSE_AS_XFPGA_SERDES_PORT: ll.append((t.mapped_to_node, t.taskname, t.off_chip_qualifier)) return ll def get_off_chip_node_id_list(self): ll = [] for t in self.tmodels: if t.is_marked_off_chip: ll.append((t.mapped_to_node, t.taskname, t.off_chip_qualifier)) return ll @property def has_tasks_marked_for_xfpga(self): if self.task_partition_map: return True return False def has_tasks_with_qualifier(self, qualname): for t in self.tmodels: if t.qualifiers: if qualname in t.qualifiers: return True return False def get_max_parcel_size(self): return 512-512%int(self.flit_width) def get_flits_in_type(self, ty): return self.get_struct_member_index_ranges_wrt_flitwidth(ty)[0] def get_type_size_in_bits(self, ty): ty_size = 0 for n, z, az in self.type_table[ty].member_info_tuples: z = z*az ty_size += z return ty_size def get_struct_member_start_pos_for_MPItypes(self, ty): d = collections.OrderedDict() ty_size = 0 startpos = 0 ll = list() for n, z, az,mtype in self.type_table[ty].member_n_z_az_ty: if mtype not in self.basic_type_list: if z <= 64: z = 64 else: raise NotSupportedException("nonbasic types longer than 64b not presently supported for MPI model") z = z*az ty_size += z endpos = startpos + z - 1 #ll.append((endpos, startpos, n, az)) ll.append(startpos) startpos = endpos + 1 return ll def get_struct_member_index_ranges_wrt_flitwidth(self, ty): d = collections.OrderedDict() fpaylwidth = int(self.psn.params["FLIT_DATA_WIDTH"]) ty_size = 0 startpos = 0 ll = list() for n, z, az in self.type_table[ty].member_info_tuples: z = z*az ty_size += z endpos = startpos + z - 1 ll.append((endpos, startpos, n, az)) startpos = endpos + 1 totalFlits = int((ty_size+fpaylwidth-1)/fpaylwidth) return (totalFlits, ll) def get_bsv_lib_paths(self): l = [self.hls_bviwrappers_outdir] if self.has_nonhls_kernels(): l.append(self.pelib_dir) return l def get_buffersize_offchipnode(self): return 64; def find_tmodel_by_name(self, name): if not [t for t in self.tmodels if t.taskname == name]: pdb.set_trace() [tm] = [t for t in self.tmodels if t.taskname == name] return tm def get_all_communication_arcs(self): """ SRC_stmt::(taskname, stmt_annotation, TypeName, transferAmount) DST_stmt::(taskname, stmt_annotation, TypeName, transferAmount) """ def srpair_likely_match(src_taskname, s, r): if src_taskname in r[3]: if s[2].typename == r[2].typename: # return True if r[0] == 'recv' and s[0] == 'send': cnd1 = s[4].fullrange() and (s[2].arraysize == r[2].arraysize) # cnd2 = not s[4].fullrange() and ((s[4].length - s[4].offset) == (r[4].length - r[4].offset)) cnd2 = True if cnd1 or cnd2: return True if not cnd2: return False return True return False srpairs = collections.OrderedDict() for tm in self.tmodels: srpairs[tm.taskname] = [] dl = tm.get_unique_message_destinations() sl = tm.get_unique_message_sources() send_class_stmts = tm.get_send_class_statement_info1() if not send_class_stmts and dl: # the placeholder host task for dst in dl: dst_tm = self.find_tmodel_by_name(dst) fl = filter(lambda x: tm.taskname in x[3], dst_tm.get_recv_class_statement_info1()) # TODO: let these get_recv/send_class info1 methods do the necessary work for info_dst_side in fl: # there are no actual send statements in this placeholder so we cook on up reconstructed_src_copy = ('send', info_dst_side[1], info_dst_side[2], [dst_tm.taskname], None) srpairs[tm.taskname].append((reconstructed_src_copy, info_dst_side, dst_tm.taskname)) for info in send_class_stmts: dst_address_list = info[3] for dst in dst_address_list: dst_tm = self.find_tmodel_by_name(dst) recv_class_stmts = dst_tm.get_recv_class_statement_info1() if not recv_class_stmts: reconstructed_dst_copy = ('recv', info[1], info[2], [tm.taskname], None) srpairs[tm.taskname].append((info, reconstructed_dst_copy, dst_tm.taskname)) else: fl = filter(lambda x: srpair_likely_match(tm.taskname, info, x), recv_class_stmts) for info_dst_side in fl: srpairs[tm.taskname].append((info, info_dst_side, dst_tm.taskname)) rl_srpairs = [] def _get_nop_fpp(snd, rcv): info = snd if not snd[4]: # reconstructed send for placerholder task info = rcv # flits per packet # TODO (packet size is fixed in terms of typesize) fpp = self.get_flits_in_type(info[2].typename) fpp = fpp + 1 # one header flit per packet # number of packets nop = info[2].arraysize if not info[4].fullrange(): nop = info[4].length - info[4].offset; return fpp, nop for k, v in srpairs.items(): for snd, rcv, dst_taskname in v: fpp, nop = _get_nop_fpp(snd, rcv) def getAnnotation(stmt): if not stmt: return Annotation(name='none',lineno=0,level=0) lno, lvl, name = stmt.get_annotations()[0]; return Annotation(name=name, lineno=lno, level=lvl) e = Edge(src=Stmt(taskname=k, annotation=getAnnotation(snd[4])), dst=Stmt(taskname=dst_taskname, annotation=getAnnotation(rcv[4])), fpp=fpp, nop=nop) #print(snd[4].get_annotations()[0], ' ==> ', rcv[4].get_annotations()[0], ' : ', dst_taskname) rl_srpairs.append(e) return rl_srpairs def get_line_annotations(self): d = collections.OrderedDict() for t in self.tdefs_original: ll = t.line_annotations() for l in ll: for e in l: if e[0] in d: d[e[0]].append(e) else: d[e[0]] = [e] return d def dump_line_annotations(self): ispecs_dir = os.path.join(self.outdir, 'ispecs') d = self.get_line_annotations() with open(os.path.join(ispecs_dir, 'line_annotations.json'), 'w') as fh: json.dump(d, fh, indent=4) @property def basic_type_list(self): return na_basic_type_list.keys() def to_mpi_typename(self, ty, width=None): if ty in na_basic_type_list: return na_basic_type_list[ty][1] if width: if width <= 64: return 'MPI_UNSIGNED_LONG' else: raise NotSupportedException("nonbasic types longer than 64b not presently supported for MPI model") #---------------------------------------------------------------------------------------------------
python
import pandas as pd import numpy as np from trav_lib.data_prep import reduce_memory def test_reduce_memory(): df = pd.DataFrame({'ints':[1,2,3,4],'floats':[.1,.2,.3,.4],'strings':['a','b','c','d']}) df2 = reduce_memory(df) assert df2['ints'].dtype == np.dtype('int8') assert df2['floats'].dtype == np.dtype('float32') assert df2['strings'].dtype == np.dtype('O') df = pd.DataFrame({'ints':[1,2,3,4],'floats':[.1,.2,.3,.4],'strings':['a','b','c','d']}) df3 = reduce_memory(df, cat_cols = ['strings']) assert df3['ints'].dtype == np.dtype('int8') assert df3['floats'].dtype == np.dtype('float32') assert df3['strings'].dtype.name == 'category'
python
from plotly.graph_objs import Ohlc
python
''' Created on May 28, 2015 @author: local ''' import sys import argparse import logging import subprocess import os import json logging.getLogger("spectrumbrowser").disabled = True def getProjectHome(): command = ['git', 'rev-parse', '--show-toplevel'] p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return out.strip() def setupConfig(host, configFile): msodConfig = json.load(open(os.environ.get("HOME") + "/.msod/MSODConfig.json")) if "DB_DATA_DIR" in msodConfig: mongoDir = msodConfig["DB_DATA_DIR"] else: mongoDir = getProjectHome() + "/data/db" configuration = Config.parse_local_config_file(configFile) configuration["HOST_NAME"] = host configuration["CERT"] = getProjectHome() + "/devel/certificates/dummy.crt" configuration["MONGO_DIR"] = mongoDir Config.setSystemConfig(configuration) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Process command line args') parser.add_argument('-host', help='Host') parser.add_argument('-f', help='config file') args = parser.parse_args() configFile = args.f host = args.host sys.path.append(getProjectHome() + "/flask") import Config setupConfig(host, configFile)
python
import numpy as np import pytest from inspect import currentframe, getframeinfo from pathlib import Path from ..flarelc import FlareLightCurve from ..lcio import from_K2SC_file #example paths: target1 = 'examples/hlsp_k2sc_k2_llc_210951703-c04_kepler_v2_lc.fits' target2 = 'examples/hlsp_k2sc_k2_llc_211119999-c04_kepler_v2_lc.fits' target3 = 'examples/hlsp_k2sc_k2_llc_211117077-c04_kepler_v2_lc.fits' #From lightkurve def test_invalid_lightcurve(): """Invalid FlareLightCurves should not be allowed.""" err_string = ("Input arrays have different lengths." " len(time)=5, len(flux)=4") time = np.array([1, 2, 3, 4, 5]) flux = np.array([1, 2, 3, 4]) with pytest.raises(ValueError) as err: FlareLightCurve(time=time, flux=flux) assert err_string == err.value.args[0] def test_find_gaps(): filename = getframeinfo(currentframe()).filename p = Path(filename).resolve().parents[1] lc = from_K2SC_file(p / 'examples/hlsp_k2sc_k2_llc_210951703-c04_kepler_v2_lc.fits') lc.find_gaps() assert lc.gaps == [(0, 2582), (2582, 3424)]
python
# Copyright (c) Niall Asher 2022 from socialserver.util.test import ( test_db, server_address, create_post_with_request, create_user_with_request, create_user_session_with_request ) from socialserver.constants import ErrorCodes import requests def test_get_unliked_post(test_db, server_address): new_post_id = create_post_with_request(test_db.access_token) r = requests.get( f"{server_address}/api/v3/posts/single", json={"post_id": new_post_id}, headers={"Authorization": f"Bearer {test_db.access_token}"}, ) assert r.status_code == 201 assert r.json()['meta']['user_likes_post'] == False assert r.json()['post']['like_count'] == 0 def test_like_post(test_db, server_address): new_post_id = create_post_with_request(test_db.access_token) r = requests.post(f"{server_address}/api/v3/posts/like", json={"post_id": new_post_id}, headers={"Authorization": f"Bearer {test_db.access_token}"}) assert r.status_code == 201 assert r.json()['liked'] == True assert r.json()['like_count'] == 1 r = requests.get( f"{server_address}/api/v3/posts/single", json={"post_id": new_post_id}, headers={"Authorization": f"Bearer {test_db.access_token}"}, ) assert r.status_code == 201 assert r.json()['meta']['user_likes_post'] == True assert r.json()['post']['like_count'] == 1 def test_unlike_post(test_db, server_address): new_post_id = create_post_with_request(test_db.access_token) r = requests.post(f"{server_address}/api/v3/posts/like", json={"post_id": new_post_id}, headers={"Authorization": f"Bearer {test_db.access_token}"}) assert r.status_code == 201 assert r.json()['liked'] == True assert r.json()['like_count'] == 1 r = requests.delete( f"{server_address}/api/v3/posts/like", json={"post_id": new_post_id}, headers={"Authorization": f"Bearer {test_db.access_token}"}, ) assert r.status_code == 200 assert r.json()['liked'] == False assert r.json()['like_count'] == 0 def test_like_post_already_liked(test_db, server_address): new_post_id = create_post_with_request(test_db.access_token) r = requests.post(f"{server_address}/api/v3/posts/like", json={"post_id": new_post_id}, headers={"Authorization": f"Bearer {test_db.access_token}"}) assert r.status_code == 201 assert r.json()['liked'] == True assert r.json()['like_count'] == 1 r2 = requests.post(f"{server_address}/api/v3/posts/like", json={"post_id": new_post_id}, headers={"Authorization": f"Bearer {test_db.access_token}"}) assert r2.status_code == 400 assert r2.json()["error"] == ErrorCodes.OBJECT_ALREADY_LIKED.value def test_unlike_post_not_liked(test_db, server_address): new_post_id = create_post_with_request(test_db.access_token) r = requests.delete(f"{server_address}/api/v3/posts/like", json={"post_id": new_post_id}, headers={"Authorization": f"Bearer {test_db.access_token}"}) assert r.status_code == 400 assert r.json()["error"] == ErrorCodes.OBJECT_NOT_LIKED.value def test_like_post_does_not_exist(test_db, server_address): r = requests.post(f"{server_address}/api/v3/posts/like", json={"post_id": 1293812}, headers={"Authorization": f"Bearer {test_db.access_token}"}) assert r.status_code == 404 assert r.json()["error"] == ErrorCodes.POST_NOT_FOUND.value def test_dislike_post_does_not_exist(test_db, server_address): r = requests.delete(f"{server_address}/api/v3/posts/like", json={"post_id": 1293812}, headers={"Authorization": f"Bearer {test_db.access_token}"}) assert r.status_code == 404 assert r.json()["error"] == ErrorCodes.POST_NOT_FOUND.value
python
import sys import struct """ Takes data from the Android IMU app and turns it into binary data. Data comes in as csv, data points will be turned into the format: Time Stamp Accelerometer Gyroscope x y z x y z ========================================= 0 1 2 3 4 5 6 """ ANDROID_IMU_DATA_FORMAT_STRING = 'ddddddd' HEADER_SIZE = 25 def main(): input_file_name = sys.argv[1] output_file_name = sys.argv[2] with open(output_file_name, "wb") as out_file: # write the format header out_file.write( ANDROID_IMU_DATA_FORMAT_STRING.ljust(HEADER_SIZE, ' ') ) with open(input_file_name, "r") as in_file: for line in in_file: # ??????????????? Is Ok? ?????????????????? clean_data = line_to_clean_data(line) if clean_data: out_file.write( struct.pack(ANDROID_IMU_DATA_FORMAT_STRING, *clean_data) ) in_file.close() out_file.close() def line_to_clean_data(line): if not '4,' in line: return None else: items_as_text = line.split(",") if len(items_as_text) < 13: # expected number of items in line return None item_values = [float(x) for x in items_as_text] data_items = [ item_values[0], # time stamp item_values[2], # accelerometer x item_values[3], # accelerometer y item_values[4], # accelerometer z item_values[6], # gyroscope x item_values[7], # gyroscope y item_values[8] # gyroscope z ] return data_items if __name__ == '__main__': main()
python
from Compiler.types import * from Compiler.instructions import * from Compiler.util import tuplify,untuplify from Compiler import instructions,instructions_base,comparison,program import inspect,math import random import collections from Compiler.library import * from Compiler.types_gc import * from operator import itemgetter import numpy as np def get_diff_types(data_list): cint_data = [d for d in data_list if type(d) == cint] pint_data = [(d, d.pid) for d in data_list if type(d) == pint] sint_data = [d for d in data_list if type(d) == sint] if len(pint_data) > 1: pint_data = sorted(pint_data, key=itemgetter(1)) return (cint_data, pint_data, sint_data) # This is not parallelized def int_add(data_list, nparallel=1): (cint_data, pint_data, sint_data) = get_diff_types(data_list) c_res = cint(0) for cd in cint_data: c_res += cd pd_res = [] current_pid = None for (pd, pid) in pint_data: if pid != current_pid: current_pid = pid pd_res.append(pint(0)) pd_res[-1] += pd res = cint(0) res += c_res for pd in pd_res: res += pd for sd in sint_data: res += sd return res def sum_lib(lst): flattened_lst = [] for i in range(len(lst)): print "TYPE?", type(lst[i]) if type(lst[i]) in (sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC): flattened_lst += flatten(lst[i]) print flattened_lst else: flattened_lst.append(lst[i]) return sum(flattened_lst) def max_lib(lst): flattened_lst = [] for i in range(len(lst)): print "TYPE?", type(lst[i]) if type(lst[i]) in (sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC): flattened_lst += flatten(lst[i]) print flattened_lst else: flattened_lst.append(lst[i]) return max(flattened_lst) def min_lib(lst): flattened_lst = [] for i in range(len(lst)): print "TYPE?", type(lst[i]) if type(lst[i]) in (sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC): flattened_lst += flatten(lst[i]) print flattened_lst else: flattened_lst.append(lst[i]) return min(flattened_lst) def flatten(A): lst = [] if type(A) in (sfixMatrix, sfixMatrixGC, cfixMatrix, cfixMatrixGC): for i in range(A.rows): for j in range(A.columns): lst.append(A[i][j]) return lst import functools def reduce_lib(lst, reduce_fn): flattened_lst = [] for i in range(len(lst)): if type(lst[i]) in(sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC): flattened_lst += flatten(lst[i]) else: flattened_lst.append(lst[i]) return reduce(reduce_fn, flattened_lst) # Copy a portion of the large matrix to the small matrix. def copy_matrix(dest, src, rows, cols, index): for i in range(rows): for j in range(cols): dest[i][j] = src[index * rows + j][j] # Tree-based multiplication def int_multiply(data_list, nparallel=2): length = len(data_list) data = [] data.append(Array(length, sint)) for i in range(length): data[0][i] = data_list[i] while length > 1: length = (length / 2) + (length % 2) data.append(Array(length, sint)) @for_range(length) def f(i): data[-1][i] = sint(0) level = 0 for x in range(len(data) - 1): print("level = {}, length = {}".format(level+1, data[level+1].length)) exec_len = data[level].length / 2 @for_range_multithread(nparallel, exec_len, exec_len) def _multiply(i): data[level+1][i] = data[level][2 * i] * data[level][2 * i + 1] if data[level].length % 2 > 0: data[level+1][data[level+1].length - 1] = data[level][data[level].length - 1] level += 1 return data[-1][0] def _transpose(A, B): @for_range(A.rows) def f(i): @for_range(A.columns) def g(j): B[j][i] = A[i][j] def _transpose_gc(A, B): for i in range(A.rows): for j in range(A.columns): B[j][i] = A[i][j] def transpose(A): if isinstance(A, np.ndarray): return A.transpose() if not isinstance(A, (Matrix, MatrixGC)): raise ValueError("Only matrix can be transposed") if isinstance(A, (sintMatrix, sfixMatrix, cintMatrix, cfixMatrix)): B = A.__class__(A.columns, A.rows) _transpose(A, B) return B elif isinstance(A, (sintMatrixGC, sfixMatrixGC)): B = A.__class__(A.columns, A.rows) _transpose_gc(A, B) return B else: raise NotImplementedError def _matmul(A, B, C, D, int_type, nparallel=1): total = A.rows * B.columns * A.columns @for_range_multithread(nparallel, total, total) def _multiply(i): i_index = i / (B.columns * A.columns) j_index = i % (B.columns * A.columns) / (A.columns) k_index = i % A.columns D[i] = A[i_index][k_index] * B[k_index][j_index] @for_range_multithread(nparallel, A.rows * B.columns, A.rows * B.columns) def _add(i): i_index = i / B.columns j_index = i % B.columns C[i_index][j_index] = int_type(0) @for_range(A.columns) def _add_element(j): C[i_index][j_index] += D[i * A.columns + j] return C # Not parallelized def _matmul_mix(A, B, nparallel=1): C = MixMatrix(A.rows, B.columns) @for_range(A.rows * B.columns) def f(i): @for_range(A.columns) def g(j): v = C.get(i) v += A.get(i * A.columns + j) * B.get(j * B.columns + i) C.set(i, v) return C def _matmul_gc(A, B, C): for i in range(A.rows): for j in range(B.columns): v = A[i][0] * B[0][j] for k in range(1, A.columns): v += A[i][k] * B[k][j] C[i][j] = v def matmul(A, B, left_rows, left_cols, right_rows, right_cols, mat_type, nparallel=1): if isinstance(A, np.ndarray) and isinstance(B, np.ndarray): return np.matmul(A, B) # Tentative, very janky. Yep, this doesn't work :(. Buyer BEWARE! if isinstance(A, sintMatrix) and isinstance(B, sintMatrix): C = sintMatrix(A.rows, B.columns) D = sintArray(A.rows * B.columns * A.columns) return _matmul(A, B, C, D, sint, nparallel) #C = sintMatrix(left_rows, right_cols) #D = sintArray(left_rows * right_cols * left_cols) #return _matmul(A, B, C, D, sint, nparallel) elif isinstance(A, cintMatrix) and isinstance(B, cintMatrix): C = cintMatrix(A.rows, B.columns) D = cintArray(A.rows * B.columns * A.columns) return _matmul(A, B, C, D, cint, nparallel) elif isinstance(A, sfixMatrix) and isinstance(B, sfixMatrix): C = sfixMatrix(A.rows, B.columns) D = sfixArray(A.rows * B.columns * A.columns) return _matmul(A, B, C, D, sfix, nparallel) elif isinstance(A, cfixMatrixGC) or isinstance(B, cfixMatrixGC): C = cfixMatrixGC(A.rows, B.columns) _matmul_gc(A, B, C) return C elif isinstance(A, sfixMatrixGC) or isinstance(B, sfixMatrixGC): C = sfixMatrixGC(A.rows, B.columns) _matmul_gc(A, B, C) return C elif isinstance(A, MixMatrix) and isinstance(B, MixMatrix): return _matmul_mix(A, B, nparallel) elif isinstance(A, (sintMatrix, cintMatrix, cfixMatrix, sfixMatrix)) and isinstance(B, (sintMatrix, cintMatrix, cfixMatrix, sfixMatrix)): C = sintMatrix(A.rows, B.columns) D = sintArray(A.rows * B.columns * A.columns) return _matmul(A, B, C, D, sint, nparallel) else: raise NotImplementedError def _matadd(A, B, C, int_type, nparallel=1): @for_range_multithread(nparallel, A.rows * A.columns, A.rows * A.columns) def _add(i): i_index = i / A.columns j_index = i % A.columns C[i_index][j_index] = A[i_index][j_index] + B[i_index][j_index] def matadd(A, B, nparallel=1): if isinstance(A, np.ndarray) and isinstance(B, np.ndarray): return np.add(A, B) if A.rows != B.rows or A.columns != B.columns: raise NotImplementedError if isinstance(A, cintMatrix) and isinstance(B, cintMatrix): C = cintMatrix(A.rows, A.columns) _matadd(A, B, C, cint, nparallel) return C elif isinstance(A, sintMatrix) and isinstance(B, sintMatrix): C = sintMatrix(A.rows, A.columns) _matadd(A, B, C, sint, nparallel) return C elif isinstance(A, sfixMatrix) and isinstance(B, sfixMatrix): C = sfixMatrix(A.rows, A.columns) _matadd(A, B, C, sfix, nparallel) return C elif type(A) in (sfixMatrix, cfixMatrix) and type(B) in (sfixMatrix, cfixMatrix): C = sfixMatrix(A.rows, A.columns) _matadd(A, B, C, sfix, nparallel) return C elif type(A) in (sfixMatrixGC, cfixMatrixGC) and type(B) in (sfixMatrixGC, cfixMatrixGC): C = cfixMatrixGC(A.rows, A.columns, cfix_gc) _matadd(A, B, C, cfix_gc, nparallel) return C def _matsub(A, B, C, int_type, nparallel=1): @for_range_multithread(nparallel, A.rows * A.columns, A.rows * A.columns) def _add(i): i_index = i / A.columns j_index = i % A.columns C[i_index][j_index] = A[i_index][j_index] - B[i_index][j_index] def _matsub_gc(A, B, C): for i in range(A.rows): for j in range(A.columns): C[i][j] = A[i][j] - B[i][j] def matsub(A, B, nparallel=1): if isinstance(A, np.ndarray) and isinstance(B, np.ndarray): return np.subtract(A, B) if A.rows != B.rows or A.columns != B.columns: raise ValueError("[matsub] Matrices must have the same sizes") if isinstance(A, cintMatrix) and isinstance(B, cintMatrix): C = cintMatrix(A.rows, A.columns) _matsub(A, B, C, cint, nparallel) return C elif isinstance(A, sintMatrix) and isinstance(B, sintMatrix): C = sintMatrix(A.rows, A.columns) _matsub(A, B, C, sint, nparallel) return C elif isinstance(A, sfixMatrix) and isinstance(B, sfixMatrix): C = sfixMatrix(A.rows, A.columns) _matsub(A, B, C, sfix, nparallel) return C elif isinstance(A, sfixMatrixGC) and isinstance(B, sfixMatrixGC): C = sfixMatrixGC(A.rows, A.columns) _matsub_gc(A, B, C) return C else: raise NotImplementedError # horizontally stack the input matrices def matstack_int(matrices): pid = None s = set([m.columns for m in matrices]) if s > 1: raise ValueError("Can only stack matrices with the same number of columns") num_rows_list = [m.rows for m in matrices] M_rows = sum(num_rows_list) M_columns = s.pop() M = cintMatrix(M_rows, M_columns) int_type = cint pid = 0 s = set(type(m) for m in matrices) if len(s) == 1 and cintMatrix in s: M = cintMatrix(M_rows, M_columns) int_type = cint elif len(s) == 1 and pintMatrix in s: parties = set([m.pid for m in matrices]) if len(parties) == 1: pid = parties.pop() M = pintMatrix(pid, M_rows, M_columns) int_type = pint else: M = sintMatrix(M_rows, M_columns) int_type = sint else: M = sintMatrix(M_rows, M_columns) int_type = sint row_count = 0 for m in matrices: @for_range(m.rows) def f(i): @for_range(m.columns) def g(j): if int_type == pint: M[row_count + i][j] = pint(pid, 0) else: M[row_count + i][j] = int_type(0) M[row_count + i][j] += m[i][j] return M def matstack(matrices): if isinstance(matrices[0], (cintMatrix, pintMatrix, sintMatrix)): return matstack_int(matrices) else: raise NotImplementedError def _sigmoid_sfix(v): sign_v = cfix(1) - cfix(2) * (v < 0) denom = (v * sign_v) + sfix(1) res = v / denom return res def _sigmoid_sfix_gc(v): abs_v = v.absolute() denom = abs_v + cfix_gc(1) res = v / denom return res def sigmoid(v, nparallel=1): if isinstance(v, sfix): return _sigmoid_sfix(v) elif isinstance(v, (sfixMatrix)): res = v.__class__(v.rows, v.columns) @for_range_multithread(nparallel, v.rows, v.rows) def a(i): @for_range_multithread(nparallel, v.columns, v.columns) def b(j): res[i][j] = _sigmoid_sfix(v[i][j]) return res elif isinstance(v, sfixMatrixGC): res = v.__class__(v.rows, v.columns) for i in range(v.rows): for j in range(v.columns): res[i][j] = _sigmoid_sfix_gc(v[i][j]) return res else: raise NotImplementedError def mat_const_mul(c, m, nparallel=1): if isinstance(m, np.ndarray): if type(c) in (float, int): return c * m else: raise ValueError("Type of constant is: {0} when expected float and int.".format(type(c))) if isinstance(m, sfixMatrix) or isinstance(m, cfixMatrix): if isinstance(m, sfixMatrix): res = sfixMatrix(m.rows, m.columns) else: res = cfixMatrix(m.rows, m.columns) """ @for_range_multithread(nparallel, m.rows * m.columns, m.rows * m.columns) def f(i): @for_range_multithread(nparallel, m.columns, m.columns) def g(j): res[i][j] = c * m[i][j] """ @for_range_multithread(nparallel, m.rows * m.columns, m.rows * m.columns) def loop(i): i_index = i / m.columns j_index = i % m.columns res[i_index][j_index] = c * m[i_index][j_index] return res elif isinstance(m, sfixMatrixGC) or isinstance(m, cfixMatrixGC): if isinstance(m, sfixMatrixGC): res = sfixMatrixGC(m.rows, m.columns) else: res = cfixMatrixGC(m.rows, m.columns) for i in range(m.rows): for j in range(m.columns): res[i][j] = c * m[i][j] return res else: raise NotImplementedError def mat_assign(o, i, nparallel=1): if isinstance(i, (Array, ArrayGC)): if o.length != i.length: raise ValueError("Arrays must be of the same sizes") if isinstance(i, Array): @for_range(i.length) def f(u): o[u] = i[u] elif isinstance(i, ArrayGC): for u in range(i.length): o[u] = i[u] elif isinstance(i, (Matrix, MatrixGC)): if o.rows != i.rows or o.columns != i.columns: raise ValueError("Matrices must be of the same sizes") if isinstance(i, Matrix): @for_range_multithread(nparallel, i.rows, i.rows) def f(u): @for_range_multithread(nparallel, i.columns, i.columns) def g(v): o[u][v] = i[u][v] elif isinstance(i, MatrixGC): for u in range(i.rows): for v in range(i.columns): o[u][v] = i[u][v] elif isinstance(i, list): for u in range(len(i)): o[u] = i[u] else: raise NotImplementedError def array_index_secret_load_if(condition, l, index_1, index_2, nparallel=1): supported_types_a = (sint, sfix) supported_types_b = (sint_gc, sfix_gc) if isinstance(index_1, supported_types_a) and isinstance(index_2, supported_types_a): index = ((1 - condition) * index_1) + (condition * index_2) return array_index_secret_load_a(l, index, nparallel=nparallel) elif isinstance(index_1, supported_types_b) and isinstance(index_2, supported_types_b): index = ((~condition) & index_1).__xor__(condition & index_2) return array_index_secret_load_gc(l, index) else: raise NotImplementedError def get_identity_matrix(value_type, n): if isinstance(value_type, (sfix, sfixMatrix)): ret = sfixMatrix(n, n) @for_range(n) def f(i): @for_range(n) def g(j): v = (i == j) v = sint(v) vfix = sfix.load_sint(v) ret[i][j] = vfix return ret elif isinstance(value_type, (sfix_gc, sfixMatrixGC, cfix_gc, cfixMatrixGC)): ret = sfixMatrixGC(n, n) for i in range(n): for j in range(n): ret[i][j] = cfix_gc(int(i == j)) return ret else: raise NotImplementedError def cond_assign(cond, val1, val2): res = ((~cond) & val1).__xor__(cond & val2) return res def matinv(A, nparallel=1): if isinstance(A, np.ndarray): return np.linalg.inv(A) #if not isinstance(A, sfixMatrix) and not isinstance(A, cfixMatrix): #raise NotImplementedError n = A.rows X = A.__class__(A.rows, A.columns, cfix_gc) mat_assign(X, A) I = get_identity_matrix(A, A.rows) for j in range(n): for i in range(j, n): b1 = X[i][j].__lt__(cfix_gc(0.00001)) b2 = X[i][j].__gt__(cfix_gc(-0.00001)) b = ~(b1 & b2) #1 - b1 * b2 X[i][j] = b & X[i][j] for k in range(n): a1 = X[j][k] a2 = X[i][k] X[j][k] = cond_assign(b, a2, a1) X[i][k] = cond_assign(b, a1, a2) a1 = I[j][k] a2 = I[i][k] I[j][k] = cond_assign(b, a2, a1) I[i][k] = cond_assign(b, a1, a2) xjj_inv = cfix_gc(1).__div__(X[j][j]) t = cond_assign(b, xjj_inv, cfix_gc(1)) for k in range(n): X[j][k] = t * X[j][k] I[j][k] = t * I[j][k] for L in range(j): t = cfix_gc(-1) * X[L][j] for k in range(n): a1 = X[L][k] + t * X[j][k] a2 = X[L][k] b1 = I[L][k] + t * I[j][k] b2 = I[L][k] X[L][k] = cond_assign(b, a1, a2) I[L][k] = cond_assign(b, b1, b2) for L in range(j+1, n): # from j+1 to n t = cfix_gc(-1) * X[L][j] for k in range(n): a1 = X[L][k] + t * X[j][k] a2 = X[L][k] b1 = I[L][k] + t * I[j][k] b2 = I[L][k] X[L][k] = cond_assign(b, a1, a2) I[L][k] = cond_assign(b, b1, b2) return I """ @for_range(n) def f0(j): #@for_range(j, n) @for_range(n) def f1(i): @if_(i >= j) def h(): b1 = X[i][j].__lt__(sfix(0.00001)) b2 = X[i][j].__gt__(sfix(-0.00001)) b = 1 - b1 * b2 X[i][j] = b * X[i][j] @for_range_multithread(nparallel, n, n) def f2(k): a1 = X[j][k] a2 = X[i][k] X[j][k] = cond_assign_a(b, a2, a1) X[i][k] = cond_assign_a(b, a1, a2) a1 = I[j][k] a2 = I[i][k] I[j][k] = cond_assign_a(b, a2, a1) I[i][k] = cond_assign_a(b, a1, a2) xjj_inv = sfix(1).__div__(X[j][j]) t = cond_assign_a(b, xjj_inv, sfix(1)) @for_range_multithread(nparallel, n, n) def f3(k): X[j][k] = t * X[j][k] I[j][k] = t * I[j][k] @for_range(n) def f4(L): @if_(L < j) def h(): t = sfix(-1) * X[L][j] @for_range_multithread(nparallel, n, n) def g0(k): a1 = X[L][k] + t * X[j][k] a2 = X[L][k] b1 = I[L][k] + t * I[j][k] b2 = I[L][k] X[L][k] = cond_assign_a(b, a1, a2) I[L][k] = cond_assign_a(b, b1, b2) # from j+1 to n @for_range(n) def f5(L): @if_(L > j) def h(): t = sfix(-1) * X[L][j] @for_range_multithread(nparallel, n, n) def g0(k): a1 = X[L][k] + t * X[j][k] a2 = X[L][k] b1 = I[L][k] + t * I[j][k] b2 = I[L][k] X[L][k] = cond_assign_a(b, a1, a2) I[L][k] = cond_assign_a(b, b1, b2) return I """ # Assumes that the piecewise function is public for now # Format: bounds in the form of [lower, upper] # Function in the form of a*x + b class Piecewise(object): def __init__(self, num_boundaries): self.lower_bound = sfixArray(3) self.upper_bound = sfixArray(3) self.boundary_points = sfixMatrix(num_boundaries - 2, 4) self.counter = regint(0) def add_boundary(self, lower, upper, a, b): if lower is None: self.lower_bound[0] = upper self.lower_bound[1] = a self.lower_bound[2] = b elif upper is None: self.upper_bound[0] = lower self.upper_bound[1] = a self.upper_bound[2] = b else: self.boundary_points[self.counter][0] = lower self.boundary_points[self.counter][1] = upper self.boundary_points[self.counter][2] = a self.boundary_points[self.counter][3] = b self.counter += regint(1) # For debugging purposes only def debug(self): print_ln("[-inf, %s],: %s * x + %s", self.lower_bound[0].reveal(), self.lower_bound[1].reveal(), self.lower_bound[2].reveal()) @for_range(self.boundary_points.rows) def f(i): print_ln("[%s, %s]: %s * x + %s", self.boundary_points[i][0].reveal(), self.boundary_points[i][1].reveal(), self.boundary_points[i][2].reveal(), self.boundary_points[i][3].reveal()) print_ln("[%s, inf],: %s * x + %s", self.upper_bound[0].reveal(), self.upper_bound[1].reveal(), self.upper_bound[2].reveal()) def evaluate(self, x): coefs = sfixArray(2) coefs[0] = sfix(0) coefs[1] = sfix(0) # Check for lower bound b = x.__le__(self.lower_bound[0]) coefs[0] += b * self.lower_bound[1] coefs[1] += b * self.lower_bound[2] @for_range(self.boundary_points.rows) def f(i): lower = self.boundary_points[i][0] upper = self.boundary_points[i][1] b1 = x.__gt__(lower) b2 = x.__le__(upper) b = b1 * b2 coefs[0] += b * self.boundary_points[i][2] coefs[1] += b * self.boundary_points[i][3] # Check for upper bound b = x.__gt__(self.upper_bound[0]) coefs[0] += b * self.upper_bound[1] coefs[1] += b * self.upper_bound[2] res = coefs[0] * x + coefs[1] return res def LogisticRegression(X, y, batch_size, sgd_iters, dim): assert(isinstance(X, Matrix)) assert(isinstance(y, Matrix)) if batch_size * sgd_iters >= X.rows: raise ValueError("batch_size * sgd_iters = {0} * {1} >= # of rows in X: {2}".format(batch_size, sgd_iters. X.rows)) if batch_size * sgd_iters >= y.rows: raise ValueError("batch_size * sgd_iters = {0} * {1} >= # of rows in X: {2}".format(batch_size, sgd_iters. X.rows)) if isinstance(X, sfixMatrix): w = sfixMatrix(dim, 1) #alpha_B = cfix(0.01 / batch_size) currently cfix and sfix multiplying doesn't work alpha_B = cfix(0.01 / batch_size) XB = sfixMatrix(batch_size, dim) yB = sfixMatrix(batch_size, 1) else: w = sfixMatrixGC(dim, 1) alpha_B = cfix_gc(0.01 / batch_size) XB = sfixMatrixGC(batch_size, dim) yB = sfixMatrixGC(batch_size, 1) for i in range(sgd_iters): batch_low = i * batch_size batch_high = (i + 1) * batch_size for j in range(batch_size): for d in range(dim): XB[j][d] = X[batch_low + j][d] yB[j][0] = y[batch_low + j][0] w_ret = matmul(XB, w, batch_size, dim, dim, 1, sfix) #reveal_all(w_ret, "w_ret") w_sigmoid = sigmoid(w_ret) #reveal_all(w_sigmoid, "w_sigmoid") w_sub = matsub(w_sigmoid, yB) XB_T = transpose(XB) w_1 = matmul(XB_T, w_sub, dim, batch_size, batch_size, 1, sfix) #reveal_all(w_1, "w_1") w_2 = mat_const_mul(alpha_B, w_1) #reveal_all(w_2, "w_2") w_res = matsub(w, w_2) mat_assign(w, w_res) #print_ln("Iter: %s", i) return w def DecisionTree(tree, levels): w = tree[0] for i in range(levels-1): index = w[0] split = w[1] left_child = w[2] right_child = w[3] f = x[index] cond = (f < split) w_res = array_index_secret_load_if(cond, tree, left_child, right_child) mat_assign(w, w_res) # Return the final prediction class. return w[1] def get_ith_matrix(mat, index, rows, cols, mat_type=sfixMatrix): #ret = s_fix_mat(rows, cols) #ret = sfixMatrix(rows, cols) ret = mat_type(rows, cols) for i in range(rows): for j in range(cols): ret[i][j] = mat[index * rows + i][j] return ret def copy_ith_matrix(dest, src, index, rows, cols): for i in range(rows): for j in range(cols): dest[index * rows + i][j] = src[i][j] # Local computation of weight vector. def admm_local(XXinv, Xy, u, z, rho, num_cols): temp = matsub(z, u) z_u = mat_const_mul(rho, temp) #for i in range(z_u.rows): #print_ln("Admm local z: %s, temp: %s", z_u[i][0].reveal(), temp[i][0].reveal()) second_term = matadd(Xy, z_u) #add_matrices(Xy, z_u, NUM_COLS, 1) w = matmul(XXinv, second_term, num_cols, num_cols, num_cols, 1, sfix) return w def soft_threshold_vec(threshold, vec, num_cols, mat_type=sfixMatrix): #vec_new = s_fix_mat(NUM_COLS, 1) #vec_new = sfixMatrix(num_cols, 1) vec_new = mat_type(num_cols, 1) neg_threshold = sfix(-1) * threshold #neg_threshold = threshold.__neg__() for i in range(num_cols): threshold_fn = Piecewise(3) threshold_fn.add_boundary(None, neg_threshold, sfix(0), vec[i][0] + threshold) #threshold_fn.add_boundary(None, neg_threshold, c_fix(0), vec[i][0] + threshold) threshold_fn.add_boundary(neg_threshold, threshold, sfix(0), sfix(0)) #threshold_fn.add_boundary(neg_threshold, threshold, c_fix(0), c_fix(0)) threshold_fn.add_boundary(threshold, None, sfix(0), vec[i][0] - threshold) #threshold_fn.add_boundary(threshold, None, c_fix(0), vec[i][0] - threshold) val = threshold_fn.evaluate(vec[i][0]) vec_new[i][0] = val return vec_new def admm_coordinate(w_list, u_list, z, rho, l, num_cols, num_parties, mat_type=sfixMatrix): #w_avg = s_fix_mat(num_cols, 1) #u_avg = s_fix_mat(num_cols, 1) #w_avg = sfixMatrix(num_cols, 1) #u_avg = sfixMatrix(num_cols, 1) w_avg = mat_type(num_cols, 1) u_avg = mat_type(num_cols, 1) w_avg = mat_const_mul(cfix(0), w_avg) u_avg = mat_const_mul(cfix(0), u_avg) for i in range(num_parties): w = get_ith_matrix(w_list, i, num_cols, 1, mat_type) u = get_ith_matrix(u_list, i, num_cols, 1, mat_type) new_w_avg = matadd(w_avg, w) #add_matrices(w_avg, w, NUM_COLS, 1) new_u_avg = matadd(u_avg, u) #add_matrices(u_avg, u, NUM_COLS, 1) mat_assign(w_avg, new_w_avg) mat_assign(u_avg, new_u_avg) #avg = c_fix(1.0 / NUM_PARTIES) cfix multiplication doesn't work if mat_type in [sfixMatrix, sintMatrix]: avg = sfix(1.0 / num_parties) # Changing THIS line to cfix completely breaks everything wtf. threshold = l / (rho * num_parties) #sfix(l/(rho * num_parties)) else: avg = sfix_gc(1.0 / num_parties) threshold = sfix_gc(l/(rho * num_parties)) """ for i in range(w_avg.rows): print_ln("w_avg_mul: %s, w_avg: %s", (w_avg[i][0] * cfix(1.0 / num_parties)).reveal(), w_avg[i][0].reveal()) print_ln("u_avg_mul: %s, u_avg: %s", (u_avg[i][0] * cfix(1.0 / num_parties)).reveal(), u_avg[i][0].reveal()) """ new_w_avg = mat_const_mul(avg, w_avg) new_u_avg = mat_const_mul(avg, u_avg) mat_assign(w_avg, new_w_avg) mat_assign(u_avg, new_u_avg) # Applying thresholding u_plus_w = matadd(w_avg, u_avg) z_new = soft_threshold_vec(threshold, u_plus_w, num_cols, mat_type) #u_list_new = s_fix_mat(num_parties * num_cols, 1) #neg_z = s_fix_mat(num_cols, 1) #u_list_new = sfixMatrix(num_parties * num_cols, 1) #neg_z = sfixMatrix(num_cols, 1) u_list_new = mat_type(num_parties * num_cols, 1) neg_z = mat_type(num_cols, 1) for i in range(z_new.rows): for j in range(z_new.columns): neg_z[i][j] = z_new[i][j].__neg__() for i in range(num_parties): u_i = get_ith_matrix(u_list, i, num_cols, 1, mat_type) w_i = get_ith_matrix(w_list, i, num_cols, 1, mat_type) intermediate_vec = matadd(u_i, w_i) #add_matrices(u_i, w_i, NUM_COLS, 1) sum_vec = matadd(intermediate_vec, neg_z) #add_matrices(intermediate_vec, neg_z, NUM_COLS, 1) copy_ith_matrix(u_list_new, sum_vec, i, num_cols, 1) #reveal_all(z_new, "intermediate_weights") return u_list_new, z_new def ADMM_preprocess(x_data, y_data, rho, num_parties, num_rows, num_cols, mat_type=sfixMatrix): #XTX_inv_lst = s_fix_mat(NUM_PARTIES * NUM_COLS, NUM_COLS) #XTy_lst = s_fix_mat(NUM_PARTIES * NUM_COLS, 1) #XTX_inv_lst = sfixMatrix(num_parties * num_cols, num_cols) #XTy_lst = sfixMatrix(num_parties * num_cols, 1) XTX_inv_lst = mat_type(num_parties * num_cols, num_cols) XTy_lst = mat_type(num_parties * num_cols, 1) for i in range(num_parties): x_i = get_ith_matrix(x_data, i, num_rows, num_cols, mat_type) y_i = get_ith_matrix(y_data, i, num_rows, 1, mat_type) X_T = transpose(x_i) XTy = matmul(X_T, y_i, num_cols, num_rows, num_rows, 1, sfix) XTX = matmul(X_T, x_i, num_cols, num_rows, num_rows, num_cols, sfix) #rho_identity = s_fix_mat(NUM_COLS, NUM_COLS) #rho_identity = sfixMatrix(num_cols, num_cols) rho_identity = mat_type(num_cols, num_cols) rho_identity = mat_const_mul(cfix(0), rho_identity) for j in range(num_cols): rho_identity[j][j] = rho #rho_val #sfix(rho_val) XTX_rho_identity = matadd(XTX, rho_identity) #add_matrices(XTX, rho_identity, NUM_COLS, NUM_COLS) XTX_inv = matinv(XTX_rho_identity) copy_ith_matrix(XTX_inv_lst, XTX_inv, i, num_cols, num_cols) copy_ith_matrix(XTy_lst, XTy, i, num_cols, 1) return XTX_inv_lst, XTy_lst def ADMM(XTX_inv_lst, XTy_lst, admm_iter, num_parties, num_cols, rho, l): #XTX_inv_lst, XTy_lst = local_compute(x_data, y_data, num_parties. num_rows, num_cols) #w_list = s_fix_mat(num_parties * num_cols, 1) mat_type = None if isinstance(XTX_inv_lst, sfixMatrix): mat_type = sfixMatrix elif isinstance(XTX_inv_lst, sfixMatrixGC): mat_type = sfixMatrixGC elif isinstance(XTX_inv_lst, sintMatrix): mat_type = sintMatrix else: raise ValueError("Type of matrix: {0} does not correspond to anything supported by this library".format(mat_type)) #w_list = sfixMatrix(num_parties * num_cols, 1) #u_list = sfixMatrix(num_parties * num_cols, 1) #z = sfixMatrix(num_cols, 1) w_list = mat_type(num_parties * num_cols, 1) u_list = mat_type(num_parties * num_cols, 1) z = mat_type(num_cols, 1) w_list = mat_const_mul(cfix(0), w_list) u_list = mat_const_mul(cfix(0), u_list) z = mat_const_mul(cfix(0), z) """ for i in range(w_list.rows): for j in range(w_list.columns): print_ln("%s, %s", w_list[i][j].reveal(), u_list[i][j].reveal()) """ for i in range(admm_iter): for j in range(num_parties): XTX_inv = get_ith_matrix(XTX_inv_lst, j, num_cols, num_cols, mat_type) XTy = get_ith_matrix(XTy_lst, j, num_cols, 1, mat_type) u = get_ith_matrix(u_list, j, num_cols, 1, mat_type) w = admm_local(XTX_inv, XTy, u, z, rho, num_cols) #reveal_all(w, "local_weight") copy_ith_matrix(w_list, w, j, num_cols, 1) new_u_lst, new_z = admm_coordinate(w_list, u_list, z, rho, l, num_cols, num_parties, mat_type) mat_assign(u_list, new_u_lst) mat_assign(z, new_z) return z
python
from flask_app.factory import create_app app = create_app('meeting-scheduler')
python
#!/usr/bin/env python3 import numpy as np class BPTTBatches(object): """Wraps a list of sequences as a contiguous batch iterator. This will iterate over batches of contiguous subsequences of size ``seq_length``. TODO: elaborate Example: .. code-block:: python # Dictionary # Sequence of length 1000 data = np.random.randint(10, size=1000) # Iterator with over subsequences of length 20 with batch size 5 batched_dataset = BPTTBatches(data, batch_size=5, seq_length=20) # Training loop for x, y in batched_dataset: # x has and y have shape (seq_length, batch_size) # y[i+1] == x[i] # Do something with x Args: data (list): List of numpy arrays containing the data targets (list): List of targets batch_size (int, optional): Batch size seq_length (int, optional): BPTT length """ def __init__( self, data, batch_size=32, seq_length=30, ): # Get one list if isinstance(data[0], list): data = [word for sent in data for word in sent] # Parameters self.num_samples = len(data) self.num_samples -= self.num_samples % batch_size self.num_positions = self.num_samples//batch_size self.num_batches = int(np.ceil(self.num_positions / seq_length)) self.batch_size = batch_size self.seq_length = seq_length # The data is stored as an array of shape (-1, batch_size) self.data = np.stack([ np.asarray( data[b*self.num_positions:(b+1)*self.num_positions], dtype=type(data[0]) ) for b in range(self.batch_size)], axis=-1 ) # Reset position and shuffle the order if applicable self.reset() def __len__(self): """This returns the number of **batches** in the dataset (not the total number of samples) Returns: int: Number of batches in the dataset ``ceil(len(data)/batch_size)`` """ return self.num_batches def __getitem__(self, index): """Returns the ``index`` th sample The result is a tuple ``x, next_x`` of numpy arrays of shape ``seq_len x batch_size`` ``seq_length`` is determined by the range specified by ``index``, and ``next_x[t]=x[t+1]`` for all ``t`` Args: index (int, slice): Index or slice Returns: tuple: ``x, next_x`` """ return self.data[index] def percentage_done(self): """What percent of the data has been covered in the current epoch""" return 100 * (self.position / self.num_positions) def just_passed_multiple(self, batch_number): """Checks whether the current number of batches processed has just passed a multiple of ``batch_number``. For example you can use this to report at regular interval (eg. every 10 batches) Args: batch_number (int): [description] Returns: bool: ``True`` if :math:`\\fraccurrent_batch` """ return (self.position // self.seq_length) % batch_number == 0 def reset(self): """Reset the iterator and shuffle the dataset if applicable""" self.position = 0 def __iter__(self): self.reset() return self def __next__(self): # Check for end of epoch if self.position >= self.num_positions-1: raise StopIteration # Batch index seq_len = min(self.seq_length, self.num_positions-1-self.position) batch = self[self.position:self.position+seq_len+1] # Increment position self.position += seq_len # Return batch return batch[:-1], batch[1:]
python
# A module to make your error messages less scary import sys from characters import AsciiCharacter def output_ascii(err_message="You certainly messed something up."): one_line = False err_line_1 = err_message.split('--')[0] try: err_line_2 = err_message.split('--')[1] except: one_line = True err_line_2 = err_line_1 if len(err_line_1) >= len(err_line_2): max_length = len(err_line_1) long_line_label = 1 else: max_length = len(err_line_2) long_line_label = 2 ascii_art = AsciiCharacter().character s1 = " " * 16 + "_" * (max_length + 6) s2 = " " * 15 + "/" + " " * (max_length + 6) + "\\" if not one_line: if long_line_label == 1: length_diff = len(err_line_1) - len(err_line_2) s3 = " " * 15 + "|" + " " * 3 + err_line_1 + " " * 3 + "|" s4 = " " * 15 + "|" + " " * 3 + err_line_2 + " " * length_diff + " " * 3 + "|" elif long_line_label == 2: length_diff = len(err_line_2) - len(err_line_1) s3 = " " * 15 + "|" + " " * 3 + err_line_1 + " " * length_diff + " " * 3 + "|" s4 = " " * 15 + "|" + " " * 3 + err_line_2 + " " * 3 + "|" else: s34 = " " * 15 + "|" + " " * 3 + err_message + " " * 3 + "|" s5 = " " * 15 + "\\" + " " * 2 + "_" * (max_length + 4) + "/" s6 = " " * 14 + "/ /" if not one_line: speech_bubble = s1 + "\n" + s2 + "\n" + s3 + "\n" + s4 + "\n" + s5 + '\n' + s6 else: speech_bubble = s1 + "\n" + s2 + "\n" + s34 + "\n" + s5 + '\n' + s6 print("\n\n\n" + speech_bubble + ascii_art + "\n\n\n") return
python
from keras.preprocessing.image import load_img, img_to_array target_image_path = 'img/a.jpg' style_image_path = 'img/a.png' width, height = load_img(target_image_path).size img_height = 400 img_width = int(width * img_height / height) import numpy as np from keras.applications import vgg19 def preprocess_image(image_path): img = load_img(image_path, target_size = (img_height, img_width)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg19.preprocess_input(img) return img def deprocess_image(x): x[:,:,0] += 103.939 x[:,:,1] += 116.779 x[:,:,2] += 123.68 x = x[:,:,::-1] x = np.clip(x,0,255).astype('uint8') return x from keras import backend as K target_image = K.constant(preprocess_image(target_image_path)) style_image = K.constant(preprocess_image(style_image_path)) combination_image = K.placeholder((1,img_height, img_width, 3)) input_tensor = K.concatenate([target_image, style_image, combination_image], axis = 0) model = vgg19.VGG19(input_tensor=input_tensor, weights='imagenet', include_top=False) model.summary() def content_loss(base, combination): return K.sum(K.square(combination - base)) def gram_matrix(x): features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram def style_loss(style, combination): S = gram_matrix(style) C = gram_matrix(combination) channels = 3 size = img_height * img_width return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2)) def total_variation_loss(x): a = K.square(x[:, :img_height-1, :img_width-1, :] - x[:, 1:, :img_width-1, :]) b = K.square(x[:, :img_height-1, :img_width-1, :] - x[:, :img_height-1, 1:, :]) return K.sum(K.pow(a + b, 1.25)) outputs_dict = dict([(layer.name, layer.output) for layer in model.layers]) content_layer = 'block5_conv2' style_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1'] total_variation_weight = 1e-4 style_weight = 1. content_weight = 0.025 loss = K.variable(0.) layer_features = outputs_dict[content_layer] target_image_features = layer_features[0,:,:,:] combination_features = layer_features[2,:,:,:] loss += content_weight * content_loss(target_image_features, combination_features) for layer_name in style_layers: layer_features = outputs_dict[layer_name] style_features = layer_features[1,:,:,:] combination_features = layer_features[2,:,:,:] sl = style_loss(style_features, combination_features) loss += (style_weight / len(style_layers)) * sl loss += total_variation_weight * total_variation_loss(combination_image) grads = K.gradients(loss, combination_image)[0] fetch_loss_and_grads = K.function([combination_image], [loss, grads]) class Evaluator(object): def __init__(self): self.loss_value = None self.grads_values = None def loss(self, x): assert self.loss_value is None x = x.reshape((1,img_height, img_width, 3)) outs = fetch_loss_and_grads([x]) loss_value = outs[0] grad_values = outs[1].flatten().astype('float64') self.loss_value = loss_value self.grad_values = grad_values return self.loss_value def grads(self, x): assert self.loss_value is not None grad_values = np.copy(self.grad_values) self.loss_value = None self.grad_values = None return grad_values evaluator = Evaluator() from scipy.optimize import fmin_l_bfgs_b from scipy.misc import imsave import time result_prefix = 'my_result' iterations = 20 x = preprocess_image(target_image_path) x = x.flatten() for i in range(iterations): print('Start of iteration', i) start_time = time.time() x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x, fprime=evaluator.grads, maxfun=20) print('Current loss value:', min_val) img = x.copy().reshape((img_height, img_width, 3)) img = deprocess_image(img) fname=result_prefix + '_at_iteration_%d.png' % i imsave(fname, img) print('Image saved as', fname) end_time = time.time() print('Iterations %d completed in %ds' % (i, end_time - start_time))
python
#!/usr/bin/python # -*- coding: utf-8 -*-  class Demo(object): __x = 0 def __init__(self, i): self.__i = i Demo.__x += 1 def __str__(self): return str(self.__i) def hello(self): print("hello " + self.__str__()) @classmethod def getX(cls): return cls.__x class Other(object): def __init__(self, k): self.k = k def __str__(self): return str(self.k) def hello(self): print("hello, world") def bye(self): print("Good-bye!", self.__str__()) class SubDemo(Demo, Other): def __init__(self, i, j): super(SubDemo, self).__init__(i) self.__j = j def __str__(self): return super(SubDemo, self).__str__() + "+" + str(self.__j)
python
import abc from typing import Callable from typing import Iterator from typing import List from typing import Optional from xsdata.codegen.models import Class from xsdata.models.config import GeneratorConfig from xsdata.utils.constants import return_true class ContainerInterface(metaclass=abc.ABCMeta): """Wrap a list of classes and expose a simple api for easy access and process.""" config: GeneratorConfig @abc.abstractmethod def iterate(self) -> Iterator[Class]: """Create an iterator for the class map values.""" @abc.abstractmethod def find(self, qname: str, condition: Callable = return_true) -> Optional[Class]: """Search by qualified name for a specific class with an optional condition callable.""" @abc.abstractmethod def find_inner(self, source: Class, qname: str) -> Class: """Search by qualified name for a specific inner class or fail.""" @abc.abstractmethod def add(self, item: Class): """Add class item to the container.""" @abc.abstractmethod def extend(self, items: List[Class]): """Add a list of classes the container.""" class HandlerInterface(metaclass=abc.ABCMeta): """Class handler interface.""" @abc.abstractmethod def process(self, target: Class): """Process the given target class."""
python
#testing the concept import re #file_name = raw_input("Enter textfile name (ex. hamlet.txt): ") def app(f_name): fd = open(f_name, 'r') fd = fd.read() lines = fd.split('\n') c1 = 0 while(c1 < len(lines)): #lines[c1] = re.sub('[^0-9a-zA-Z]+', '', lines[c1]) if len(lines[c1]) == 0: lines.pop(c1) c1+=1 return lines def game(): lines = app('hamlet.txt') print lines current_line = 0 while current_line < len(lines): if current_line == 0: guess = raw_input("\nFirst line: ") print guess if re.sub('[^0-9a-zA-Z]+','',guess).lower() == re.sub('[^0-9a-zA-Z]+','',lines[current_line]).lower(): current_line += 1 if current_line > 0: print "\nPrevious line: " + lines[current_line - 1] guess = raw_input("Line: ") """ print "===========" print re.sub('[^0-9a-zA-Z]+','',guess).lower() temp = lines[current_line] print re.sub('[^0-9a-zA-Z]+','',temp).lower() print "===========" """ if re.sub('[^0-9a-zA-Z]+','',guess).lower() == re.sub('[^0-9a-zA-Z]+','',lines[current_line]).lower(): current_line += 1 print "FINISHED!" game()
python
import pytest import os, time import sys from datetime import date, datetime from pytest_html_reporter.template import html_template from pytest_html_reporter.time_converter import time_converter from os.path import isfile, join import json import glob from collections import Counter from PIL import Image from io import BytesIO import shutil _total = _executed = 0 _pass = _fail = 0 _skip = _error = 0 _xpass = _xfail = 0 _apass = _afail = 0 _askip = _aerror = 0 _axpass = _axfail = 0 _astotal = 0 _aspass = 0 _asfail = 0 _asskip = 0 _aserror = 0 _asxpass = 0 _asxfail = 0 _asrerun = 0 _current_error = "" _suite_name = _test_name = None _scenario = [] _test_suite_name = [] _test_pass_list = [] _test_fail_list = [] _test_skip_list = [] _test_xpass_list = [] _test_xfail_list = [] _test_error_list = [] _test_status = None _start_execution_time = 0 _execution_time = _duration = 0 _test_metrics_content = _suite_metrics_content = "" _previous_suite_name = "None" _initial_trigger = True _spass_tests = 0 _sfail_tests = 0 _sskip_tests = 0 _serror_tests = 0 _srerun_tests = 0 _sxfail_tests = 0 _sxpass_tests = 0 _suite_length = 0 _archive_tab_content = "" _archive_body_content = "" _archive_count = "" archive_pass = 0 archive_fail = 0 archive_skip = 0 archive_xpass = 0 archive_xfail = 0 archive_error = 0 archives = {} highlights = {} p_highlights = {} max_failure_suite_name = '' max_failure_suite_name_final = '' max_failure_suite_count = 0 similar_max_failure_suite_count = 0 max_failure_total_tests = 0 max_failure_percent = '' trends_label = [] tpass = [] tfail = [] tskip = [] _previous_test_name = '' _suite_error = 0 _suite_fail = 0 _pvalue = 0 screen_base = '' screen_img = None _attach_screenshot_details = '' _title = 'PYTEST REPORT' def pytest_addoption(parser): group = parser.getgroup("report generator") group.addoption( "--html-report", action="store", dest="path", default=".", help="path to generate html report", ) group.addoption( "--title", action="store", dest="title", default="PYTEST REPORT", help="customize report title", ) def pytest_configure(config): path = config.getoption("path") clean_screenshots(path) title = config.getoption("title") custom_title(title) config._html = HTMLReporter(path, config) config.pluginmanager.register(config._html) def suite_highlights(data): global highlights, p_highlights for i in data['content']['suites']: if data['content']['suites'][i]['status']['total_fail'] == 0: l = data['content']['suites'][i]['suite_name'] if l not in p_highlights: p_highlights[l] = 1 else: p_highlights[l] += 1 else: k = data['content']['suites'][i]['suite_name'] if k not in highlights: highlights[k] = 1 else: highlights[k] += 1 def generate_suite_highlights(): global max_failure_suite_name, max_failure_suite_count, similar_max_failure_suite_count, max_failure_total_tests global max_failure_percent, max_failure_suite_name_final if highlights == {}: max_failure_suite_name_final = 'No failures in History' max_failure_suite_count = 0 max_failure_percent = '0' return max_failure_suite_name = max(highlights, key=highlights.get) max_failure_suite_count = highlights[max_failure_suite_name] if max_failure_suite_name in p_highlights: max_failure_total_tests = p_highlights[max_failure_suite_name] + max_failure_suite_count else: max_failure_total_tests = max_failure_suite_count max_failure_percent = (max_failure_suite_count / max_failure_total_tests) * 100 if max_failure_suite_name.__len__() > 25: max_failure_suite_name_final = ".." + max_failure_suite_name[-23:] else: max_failure_suite_name_final = max_failure_suite_name res = Counter(highlights.values()) if max(res.values()) > 1: similar_max_failure_suite_count = max(res.values()) def max_rerun(): indices = [i for i, s in enumerate(sys.argv) if 'reruns' in s] try: if "=" in sys.argv[int(indices[0])]: return int(sys.argv[int(indices[0])].split('=')[1]) else: return int(sys.argv[int(indices[0]) + 1]) except IndexError: return None def screenshot(data=None): global screen_base, screen_img screen_base = HTMLReporter.base_path screen_img = Image.open(BytesIO(data)) def clean_screenshots(path): screenshot_dir = os.path.abspath(os.path.expanduser(os.path.expandvars(path))) + '/pytest_screenshots' if os.path.isdir(screenshot_dir): shutil.rmtree(screenshot_dir) def custom_title(title): global _title _title = title[:26] + '...' if title.__len__() > 29 else title class HTMLReporter(object): def __init__(self, path, config): self.json_data = {'content': {'suites': {0: {'status': {}, 'tests': {0: {}}, }, }}} self.path = path self.config = config has_rerun = config.pluginmanager.hasplugin("rerunfailures") self.rerun = 0 if has_rerun else None def pytest_runtest_teardown(self, item, nextitem): global _test_name, _duration _test_name = item.name _test_end_time = time.time() _duration = _test_end_time - _start_execution_time if (self.rerun is not None) and (max_rerun() is not None): self.previous_test_name(_test_name) self._test_names(_test_name) self.append_test_metrics_row() def previous_test_name(self, _test_name): global _previous_test_name if _previous_test_name == _test_name: self.rerun += 1 else: _scenario.append(_test_name) self.rerun = 0 _previous_test_name = _test_name def pytest_runtest_setup(item): global _start_execution_time _start_execution_time = time.time() def pytest_sessionfinish(self, session): if _suite_name is not None: self.append_suite_metrics_row(_suite_name) def archive_data(self, base, filename): path = os.path.join(base, filename) if os.path.isfile(path) is True: os.makedirs(base + '/archive', exist_ok=True) f = 'output.json' if isfile(join(base, f)): fname = os.path.splitext(f) os.rename(base + '/' + f, os.path.join(base + '/archive', fname[0] + '_' + str(_start_execution_time) + fname[1])) @property def report_path(self): if '.html' in self.path: path = '.' if '.html' in self.path.rsplit('/', 1)[0] else self.path.rsplit('/', 1)[0] if path == '': path = '.' logfile = os.path.expanduser(os.path.expandvars(path)) HTMLReporter.base_path = os.path.abspath(logfile) return os.path.abspath(logfile), self.path.split('/')[-1] else: logfile = os.path.expanduser(os.path.expandvars(self.path)) HTMLReporter.base_path = os.path.abspath(logfile) return os.path.abspath(logfile), 'pytest_html_report.html' @pytest.hookimpl(hookwrapper=True) def pytest_terminal_summary(self, terminalreporter, exitstatus, config): yield global _execution_time _execution_time = time.time() - terminalreporter._sessionstarttime if _execution_time < 60: _execution_time = str(round(_execution_time, 2)) + " secs" else: _execution_time = str(time.strftime("%H:%M:%S", time.gmtime(round(_execution_time)))) + " Hrs" global _total _total = _pass + _fail + _xpass + _xfail + _skip + _error if _suite_name is not None: base = self.report_path[0] path = os.path.join(base, self.report_path[1]) os.makedirs(base, exist_ok=True) self.archive_data(base, self.report_path[1]) # generate json file self.generate_json_data(base) # generate trends self.update_trends(base) # generate archive template self.update_archives_template(base) # generate suite highlights generate_suite_highlights() # generate html report live_logs_file = open(path, 'w') message = self.renew_template_text('https://i.imgur.com/LRSRHJO.png') live_logs_file.write(message) live_logs_file.close() @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(self, item, call): outcome = yield rep = outcome.get_result() global _suite_name _suite_name = rep.nodeid.split("::")[0] if _initial_trigger: self.update_previous_suite_name() self.set_initial_trigger() if str(_previous_suite_name) != str(_suite_name): self.append_suite_metrics_row(_previous_suite_name) self.update_previous_suite_name() else: self.update_counts(rep) if rep.when == "call" and rep.passed: if hasattr(rep, "wasxfail"): self.increment_xpass() self.update_test_status("xPASS") global _current_error self.update_test_error("") else: self.increment_pass() self.update_test_status("PASS") self.update_test_error("") if rep.failed: if getattr(rep, "when", None) == "call": if hasattr(rep, "wasxfail"): self.increment_xpass() self.update_test_status("xPASS") self.update_test_error("") else: self.increment_fail() self.update_test_status("FAIL") if rep.longrepr: longerr = "" for line in rep.longreprtext.splitlines(): exception = line.startswith("E ") if exception: longerr += line + "\n" self.update_test_error(longerr.replace("E ", "")) else: self.increment_error() self.update_test_status("ERROR") if rep.longrepr: longerr = "" for line in rep.longreprtext.splitlines(): longerr += line + "\n" self.update_test_error(longerr) if rep.skipped: if hasattr(rep, "wasxfail"): self.increment_xfail() self.update_test_status("xFAIL") if rep.longrepr: longerr = "" for line in rep.longreprtext.splitlines(): exception = line.startswith("E ") if exception: longerr += line + "\n" self.update_test_error(longerr.replace("E ", "")) else: self.increment_skip() self.update_test_status("SKIP") if rep.longrepr: longerr = "" for line in rep.longreprtext.splitlines(): longerr += line + "\n" self.update_test_error(longerr) def append_test_metrics_row(self): global _test_metrics_content, _pvalue, _duration test_row_text = """ <tr> <td style="word-wrap: break-word;max-width: 200px; white-space: normal; text-align:left">__sname__</td> <td style="word-wrap: break-word;max-width: 200px; white-space: normal; text-align:left">__name__</td> <td>__stat__</td> <td>__dur__</td> <td style="word-wrap: break-word;max-width: 200px; white-space: normal; text-align:left""> __msg__ __floating_error_text__ </td> </tr> """ floating_error_text = """ <a data-toggle="modal" href="#myModal-__runt__" class="">(...)</a> <div class="modal fade in" id="myModal-__runt__" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"> <div class="modal-dialog"> <div class="modal-content"> <div class="modal-body"> <p> <svg xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" width="1.12em" height="1em" style="-ms-transform: rotate(360deg); -webkit-transform: rotate(360deg); transform: rotate(360deg);" preserveAspectRatio="xMidYMid meet" viewBox="0 0 1856 1664"><path d="M1056 1375v-190q0-14-9.5-23.5t-22.5-9.5H832q-13 0-22.5 9.5T800 1185v190q0 14 9.5 23.5t22.5 9.5h192q13 0 22.5-9.5t9.5-23.5zm-2-374l18-459q0-12-10-19q-13-11-24-11H818q-11 0-24 11q-10 7-10 21l17 457q0 10 10 16.5t24 6.5h185q14 0 23.5-6.5t10.5-16.5zm-14-934l768 1408q35 63-2 126q-17 29-46.5 46t-63.5 17H160q-34 0-63.5-17T50 1601q-37-63-2-126L816 67q17-31 47-49t65-18t65 18t47 49z" fill="#DC143C"/></svg> __full_msg__ </p> </div> <div class="modal-footer"> <button type="button" class="btn btn-primary" data-dismiss="modal">Close</button> </div> </div> </div> </div> """ if (self.rerun is not None) and (max_rerun() is not None): if (_test_status == 'FAIL') or (_test_status == 'ERROR'): _pvalue += 1 if (_pvalue == max_rerun() + 1) or (_test_status == 'PASS'): if ((_test_status == 'FAIL') or (_test_status == 'ERROR')) and ( screen_base != ''): self.generate_screenshot_data() test_row_text = test_row_text.replace("__sname__", str(_suite_name)) test_row_text = test_row_text.replace("__name__", str(_test_name)) test_row_text = test_row_text.replace("__stat__", str(_test_status)) test_row_text = test_row_text.replace("__dur__", str(round(_duration, 2))) test_row_text = test_row_text.replace("__msg__", str(_current_error[:50])) floating_error_text = floating_error_text.replace("__runt__", str(time.time()).replace('.', '')) if len(_current_error) < 49: test_row_text = test_row_text.replace("__floating_error_text__", str('')) else: test_row_text = test_row_text.replace("__floating_error_text__", str(floating_error_text)) test_row_text = test_row_text.replace("__full_msg__", str(_current_error)) _test_metrics_content += test_row_text _pvalue = 0 elif (self.rerun is not None) and ( (_test_status == 'xFAIL') or (_test_status == 'xPASS') or (_test_status == 'SKIP')): test_row_text = test_row_text.replace("__sname__", str(_suite_name)) test_row_text = test_row_text.replace("__name__", str(_test_name)) test_row_text = test_row_text.replace("__stat__", str(_test_status)) test_row_text = test_row_text.replace("__dur__", str(round(_duration, 2))) test_row_text = test_row_text.replace("__msg__", str(_current_error[:50])) floating_error_text = floating_error_text.replace("__runt__", str(time.time()).replace('.', '')) if len(_current_error) < 49: test_row_text = test_row_text.replace("__floating_error_text__", str('')) else: test_row_text = test_row_text.replace("__floating_error_text__", str(floating_error_text)) test_row_text = test_row_text.replace("__full_msg__", str(_current_error)) _test_metrics_content += test_row_text elif (self.rerun is None) or (max_rerun() is None): if ((_test_status == 'FAIL') or (_test_status == 'ERROR')) and ( screen_base != ''): self.generate_screenshot_data() test_row_text = test_row_text.replace("__sname__", str(_suite_name)) test_row_text = test_row_text.replace("__name__", str(_test_name)) test_row_text = test_row_text.replace("__stat__", str(_test_status)) test_row_text = test_row_text.replace("__dur__", str(round(_duration, 2))) test_row_text = test_row_text.replace("__msg__", str(_current_error[:50])) floating_error_text = floating_error_text.replace("__runt__", str(time.time()).replace('.', '')) if len(_current_error) < 49: test_row_text = test_row_text.replace("__floating_error_text__", str('')) else: test_row_text = test_row_text.replace("__floating_error_text__", str(floating_error_text)) test_row_text = test_row_text.replace("__full_msg__", str(_current_error)) _test_metrics_content += test_row_text self.json_data['content']['suites'].setdefault(len(_test_suite_name), {})['suite_name'] = str(_suite_name) self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests', {}).setdefault( len(_scenario) - 1, {})['status'] = str(_test_status) self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests', {}).setdefault( len(_scenario) - 1, {})['message'] = str(_current_error) self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests', {}).setdefault( len(_scenario) - 1, {})['test_name'] = str(_test_name) if (self.rerun is not None) and (max_rerun() is not None): self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests', {}).setdefault( len(_scenario) - 1, {})['rerun'] = str(self.rerun) else: self.json_data['content']['suites'].setdefault(len(_test_suite_name), {}).setdefault('tests', {}).setdefault( len(_scenario) - 1, {})['rerun'] = '0' def generate_screenshot_data(self): os.makedirs(screen_base + '/pytest_screenshots', exist_ok=True) _screenshot_name = round(time.time()) _screenshot_suite_name = _suite_name.split('/')[-1:][0].replace('.py', '') _screenshot_test_name = _test_name if len(_test_name) >= 19: _screenshot_test_name = _test_name[-17:] _screenshot_error = _current_error screen_img.save( screen_base + '/pytest_screenshots/' + str(_screenshot_name) + '.png' ) # attach screenshots self.attach_screenshots(_screenshot_name, _screenshot_suite_name, _screenshot_test_name, _screenshot_error) _screenshot_name = '' _screenshot_suite_name = '' _screenshot_test_name = '' _screenshot_error = '' def append_suite_metrics_row(self, name): global _spass_tests, _sfail_tests, _sskip_tests, _sxpass_tests, _sxfail_tests, _serror_tests, _srerun_tests, \ _error, _suite_error, _suite_fail self._test_names(_test_name, clear='yes') self._test_suites(name) self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[ 'total_pass'] = int(_spass_tests) self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[ 'total_skip'] = int(_sskip_tests) self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[ 'total_xpass'] = int(_sxpass_tests) self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[ 'total_xfail'] = int(_sxfail_tests) if (self.rerun is not None) and (max_rerun() is not None): _base_suite = self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {})['tests'] for i in _base_suite: _srerun_tests += int(_base_suite[int(i)]['rerun']) self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[ 'total_rerun'] = int(_srerun_tests) else: self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[ 'total_rerun'] = 0 for i in self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {})['tests']: if 'ERROR' in self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {})['tests'][i][ 'status']: _suite_error += 1 elif 'FAIL' == self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {})['tests'][i][ 'status']: _suite_fail += 1 self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[ 'total_fail'] = _suite_fail self.json_data['content']['suites'].setdefault(len(_test_suite_name) - 1, {}).setdefault('status', {})[ 'total_error'] = _suite_error suite_row_text = """ <tr> <td style="word-wrap: break-word;max-width: 200px; white-space: normal; text-align:left">__sname__</td> <td>__spass__</td> <td>__sfail__</td> <td>__sskip__</td> <td>__sxpass__</td> <td>__sxfail__</td> <td>__serror__</td> <td>__srerun__</td> </tr> """ suite_row_text = suite_row_text.replace("__sname__", str(name)) suite_row_text = suite_row_text.replace("__spass__", str(_spass_tests)) suite_row_text = suite_row_text.replace("__sfail__", str(_suite_fail)) suite_row_text = suite_row_text.replace("__sskip__", str(_sskip_tests)) suite_row_text = suite_row_text.replace("__sxpass__", str(_sxpass_tests)) suite_row_text = suite_row_text.replace("__sxfail__", str(_sxfail_tests)) suite_row_text = suite_row_text.replace("__serror__", str(_suite_error)) suite_row_text = suite_row_text.replace("__srerun__", str(_srerun_tests)) global _suite_metrics_content _suite_metrics_content += suite_row_text self._test_passed(int(_spass_tests)) self._test_failed(int(_suite_fail)) self._test_skipped(int(_sskip_tests)) self._test_xpassed(int(_sxpass_tests)) self._test_xfailed(int(_sxfail_tests)) self._test_error(int(_suite_error)) _spass_tests = 0 _sfail_tests = 0 _sskip_tests = 0 _sxpass_tests = 0 _sxfail_tests = 0 _serror_tests = 0 _srerun_tests = 0 _suite_fail = 0 _suite_error = 0 def set_initial_trigger(self): global _initial_trigger _initial_trigger = False def update_previous_suite_name(self): global _previous_suite_name _previous_suite_name = _suite_name def update_counts(self, rep): global _sfail_tests, _spass_tests, _sskip_tests, _serror_tests, _sxfail_tests, _sxpass_tests if rep.when == "call" and rep.passed: if hasattr(rep, "wasxfail"): _sxpass_tests += 1 else: _spass_tests += 1 if rep.failed: if getattr(rep, "when", None) == "call": if hasattr(rep, "wasxfail"): _sxpass_tests += 1 else: _sfail_tests += 1 else: pass if rep.skipped: if hasattr(rep, "wasxfail"): _sxfail_tests += 1 else: _sskip_tests += 1 def update_test_error(self, msg): global _current_error _current_error = msg def update_test_status(self, status): global _test_status _test_status = status def increment_xpass(self): global _xpass _xpass += 1 def increment_xfail(self): global _xfail _xfail += 1 def increment_pass(self): global _pass _pass += 1 def increment_fail(self): global _fail _fail += 1 def increment_skip(self): global _skip _skip += 1 def increment_error(self): global _error, _serror_tests _error += 1 _serror_tests += 1 def _date(self): return date.today().strftime("%B %d, %Y") def _test_suites(self, name): global _test_suite_name _test_suite_name.append(name.split('/')[-1].replace('.py', '')) def _test_names(self, name, **kwargs): global _scenario if (self.rerun is None) or (max_rerun() is None): _scenario.append(name) try: if kwargs['clear'] == 'yes': _scenario = [] except Exception: pass def _test_passed(self, value): global _test_pass_list _test_pass_list.append(value) def _test_failed(self, value): global _test_fail_list _test_fail_list.append(value) def _test_skipped(self, value): global _test_skip_list _test_skip_list.append(value) def _test_xpassed(self, value): global _test_xpass_list _test_xpass_list.append(value) def _test_xfailed(self, value): global _test_xfail_list _test_xfail_list.append(value) def _test_error(self, value): global _test_error_list _test_error_list.append(value) def renew_template_text(self, logo_url): template_text = html_template() template_text = template_text.replace("__custom_logo__", logo_url) template_text = template_text.replace("__execution_time__", str(_execution_time)) template_text = template_text.replace("__title__", _title) # template_text = template_text.replace("__executed_by__", str(platform.uname()[1])) # template_text = template_text.replace("__os_name__", str(platform.uname()[0])) # template_text = template_text.replace("__python_version__", str(sys.version.split(' ')[0])) # template_text = template_text.replace("__generated_date__", str(datetime.datetime.now().strftime("%b %d %Y, %H:%M"))) template_text = template_text.replace("__total__", str(_aspass + _asfail + _asskip + _aserror + _asxpass + _asxfail)) template_text = template_text.replace("__executed__", str(_executed)) template_text = template_text.replace("__pass__", str(_aspass)) template_text = template_text.replace("__fail__", str(_asfail)) template_text = template_text.replace("__skip__", str(_asskip)) template_text = template_text.replace("__error__", str(_aserror)) template_text = template_text.replace("__xpass__", str(_asxpass)) template_text = template_text.replace("__xfail__", str(_asxfail)) template_text = template_text.replace("__rerun__", str(_asrerun)) template_text = template_text.replace("__suite_metrics_row__", str(_suite_metrics_content)) template_text = template_text.replace("__test_metrics_row__", str(_test_metrics_content)) template_text = template_text.replace("__date__", str(self._date())) template_text = template_text.replace("__test_suites__", str(_test_suite_name)) template_text = template_text.replace("__test_suite_length__", str(len(_test_suite_name))) template_text = template_text.replace("__test_suite_pass__", str(_test_pass_list)) template_text = template_text.replace("__test_suites_fail__", str(_test_fail_list)) template_text = template_text.replace("__test_suites_skip__", str(_test_skip_list)) template_text = template_text.replace("__test_suites_xpass__", str(_test_xpass_list)) template_text = template_text.replace("__test_suites_xfail__", str(_test_xfail_list)) template_text = template_text.replace("__test_suites_error__", str(_test_error_list)) template_text = template_text.replace("__archive_status__", str(_archive_tab_content)) template_text = template_text.replace("__archive_body_content__", str(_archive_body_content)) template_text = template_text.replace("__archive_count__", str(_archive_count)) template_text = template_text.replace("__archives__", str(archives)) template_text = template_text.replace("__max_failure_suite_name_final__", str(max_failure_suite_name_final)) template_text = template_text.replace("__max_failure_suite_count__", str(max_failure_suite_count)) template_text = template_text.replace("__similar_max_failure_suite_count__", str(similar_max_failure_suite_count)) template_text = template_text.replace("__max_failure_total_tests__", str(max_failure_total_tests)) template_text = template_text.replace("__max_failure_percent__", str(max_failure_percent)) template_text = template_text.replace("__trends_label__", str(trends_label)) template_text = template_text.replace("__tpass__", str(tpass)) template_text = template_text.replace("__tfail__", str(tfail)) template_text = template_text.replace("__tskip__", str(tskip)) template_text = template_text.replace("__attach_screenshot_details__", str(_attach_screenshot_details)) return template_text def generate_json_data(self, base): global _asskip, _aserror, _aspass, _asfail, _asxpass, _asxfail, _asrerun self.json_data['date'] = self._date() self.json_data['start_time'] = _start_execution_time self.json_data['total_suite'] = len(_test_suite_name) suite = self.json_data['content']['suites'] for i in suite: for k in self.json_data['content']['suites'][i]['status']: if (k == 'total_fail' or k == 'total_error') and self.json_data['content']['suites'][i]['status'][ k] != 0: self.json_data['status'] = "FAIL" break else: continue try: if self.json_data['status'] == "FAIL": break except KeyError: if len(_test_suite_name) == i + 1: self.json_data['status'] = "PASS" for i in suite: for k in self.json_data['content']['suites'][i]['status']: if k == 'total_pass': _aspass += self.json_data['content']['suites'][i]['status'][k] elif k == 'total_fail': _asfail += self.json_data['content']['suites'][i]['status'][k] elif k == 'total_skip': _asskip += self.json_data['content']['suites'][i]['status'][k] elif k == 'total_error': _aserror += self.json_data['content']['suites'][i]['status'][k] elif k == 'total_xpass': _asxpass += self.json_data['content']['suites'][i]['status'][k] elif k == 'total_xfail': _asxfail += self.json_data['content']['suites'][i]['status'][k] elif k == 'total_rerun': _asrerun += self.json_data['content']['suites'][i]['status'][k] _astotal = _aspass + _asfail + _asskip + _aserror + _asxpass + _asxfail self.json_data.setdefault('status_list', {})['pass'] = str(_aspass) self.json_data.setdefault('status_list', {})['fail'] = str(_asfail) self.json_data.setdefault('status_list', {})['skip'] = str(_asskip) self.json_data.setdefault('status_list', {})['error'] = str(_aserror) self.json_data.setdefault('status_list', {})['xpass'] = str(_asxpass) self.json_data.setdefault('status_list', {})['xfail'] = str(_asxfail) self.json_data.setdefault('status_list', {})['rerun'] = str(_asrerun) self.json_data['total_tests'] = str(_astotal) with open(base + '/output.json', 'w') as outfile: json.dump(self.json_data, outfile) def update_archives_template(self, base): global _archive_count f = glob.glob(base + '/archive/*.json') cf = glob.glob(base + '/output.json') if len(f) > 0: _archive_count = len(f) + 1 self.load_archive(cf, value='current') f.sort(reverse=True) self.load_archive(f, value='history') else: _archive_count = 1 self.load_archive(cf, value='current') def load_archive(self, f, value): global archive_pass, archive_fail, archive_skip, archive_xpass, archive_xfail, archive_error, archives def state(data): if data == 'fail': return 'times', '#fc6766' elif data == 'pass': return 'check', '#98cc64' for i, val in enumerate(f): with open(val) as json_file: data = json.load(json_file) suite_highlights(data) archive_row_text = """ <a class ="list-group-item list-group-item-action" href="#list-item-__acount__" style="font-size: 1.1rem; color: dimgray; margin-bottom: -7%;"> <i class="fa fa-__astate__" aria-hidden="true" style="color: __astate_color__"></i> <span>__astatus__</span></br> <span style="font-size: 0.81rem; color: gray; padding-left: 12%;">__adate__</span> </a> """ archive_row_text = archive_row_text.replace("__astate__", state(data['status'].lower())[0]) archive_row_text = archive_row_text.replace("__astate_color__", state(data['status'].lower())[1]) if value == "current": archive_row_text = archive_row_text.replace("__astatus__", 'build #' + str(_archive_count)) archive_row_text = archive_row_text.replace("__acount__", str(_archive_count)) else: archive_row_text = archive_row_text.replace("__astatus__", 'build #' + str(len(f) - i)) archive_row_text = archive_row_text.replace("__acount__", str(len(f) - i)) adate = datetime.strptime( data['date'].split(None, 1)[0][:1 + 2:] + ' ' + data['date'].split(None, 1)[1].replace(',', ''), "%b %d %Y" ) atime = \ "".join(list(filter(lambda x: ':' in x, time.ctime(float(data['start_time'])).split(' ')))).rsplit( ':', 1)[0] archive_row_text = archive_row_text.replace("__adate__", str(adate.date()) + ' | ' + str(time_converter(atime))) global _archive_tab_content _archive_tab_content += archive_row_text _archive_body_text = """ <div id="list-item-__acount__" class="archive-body"> <div> <h4 class="archive-header"> Build #__acount__ </h4> <div class="archive-date"> <i class="fa fa-calendar-check-o" aria-hidden="true"></i>&nbsp;&nbsp;&nbsp; __date__ </div> </div> <div style="margin-top: -5%;"> <div id="archive-container-__iloop__" style="padding-top: 5%; position: absolute;"> <div style=""> <span class="total__tests">__total_tests__</span> </div> <div id="archive-label-__iloop__"> <span class="archive__label">TEST CASES</span> </div> </div> <div class="archive-chart-container"> <canvas id="archive-chart-__iloop__" style="margin-top: 10%; padding-left: 25%; margin-right: -16%; float: right;"></canvas> </div> </div> <div class="archive__bar"> <section id="statistic" class="statistic-section-__status__ one-page-section"> <div class="container" style="margin-top: -2%;"> <div class="row text-center"> <div class="col-xs-12 col-md-3" style="max-width: 14.2%;"> <div class="counter"> <h2 class="timer count-title count-number">__pass__</h2> <p class="stats-text">PASSED</p> </div> </div> <div class="col-xs-12 col-md-3" style="max-width: 14.2%;"> <div class="counter"> <h2 class="timer count-title count-number">__fail__ </h2> <p class="stats-text">FAILED</p> </div> </div> <div class="col-xs-12 col-md-3" style="max-width: 14.2%;"v> <div class="counter"> <h2 class="timer count-title count-number">__skip__</h2> <p class="stats-text">SKIPPED</p> </div> </div> <div class="col-xs-12 col-md-3" style="max-width: 14.2%;"> <div class="counter"> <h2 class="timer count-title count-number">__xpass__</h2> <p class="stats-text">XPASSED</p> </div> </div> <div class="col-xs-12 col-md-3" style="max-width: 14.2%;"> <div class="counter"> <h2 class="timer count-title count-number">__xfail__</h2> <p class="stats-text">XFAILED</p> </div> </div> <div class="col-xs-12 col-md-3" style="max-width: 14.2%;"> <div class="counter"> <h2 class="timer count-title count-number">__error__</h2> <p class="stats-text">ERROR</p> </div> </div> <div class="col-xs-12 col-md-3" style="max-width: 14.2%;"> <div class="counter"> <h2 class="timer count-title count-number">__rerun__</h2> <p class="stats-text">RERUN</p> </div> </div> </div> </div> </section> </div> </div> """ if value == "current": _archive_body_text = _archive_body_text.replace("__iloop__", str(i)) _archive_body_text = _archive_body_text.replace("__acount__", str(_archive_count)) else: _archive_body_text = _archive_body_text.replace("__iloop__", str(i + 1)) _archive_body_text = _archive_body_text.replace("__acount__", str(len(f) - i)) _archive_body_text = _archive_body_text.replace("__total_tests__", data['total_tests']) _archive_body_text = _archive_body_text.replace("__date__", data['date'].upper()) _archive_body_text = _archive_body_text.replace("__pass__", data['status_list']['pass']) _archive_body_text = _archive_body_text.replace("__fail__", data['status_list']['fail']) _archive_body_text = _archive_body_text.replace("__skip__", data['status_list']['skip']) _archive_body_text = _archive_body_text.replace("__xpass__", data['status_list']['xpass']) _archive_body_text = _archive_body_text.replace("__xfail__", data['status_list']['xfail']) _archive_body_text = _archive_body_text.replace("__error__", data['status_list']['error']) try: _archive_body_text = _archive_body_text.replace("__rerun__", data['status_list']['rerun']) except KeyError: _archive_body_text = _archive_body_text.replace("__rerun__", '0') _archive_body_text = _archive_body_text.replace("__status__", data['status'].lower()) index = i if value != "current": index = i + 1 archives.setdefault(str(index), {})['pass'] = data['status_list']['pass'] archives.setdefault(str(index), {})['fail'] = data['status_list']['fail'] archives.setdefault(str(index), {})['skip'] = data['status_list']['skip'] archives.setdefault(str(index), {})['xpass'] = data['status_list']['xpass'] archives.setdefault(str(index), {})['xfail'] = data['status_list']['xfail'] archives.setdefault(str(index), {})['error'] = data['status_list']['error'] try: archives.setdefault(str(index), {})['rerun'] = data['status_list']['rerun'] except KeyError: archives.setdefault(str(index), {})['rerun'] = '0' archives.setdefault(str(index), {})['total'] = data['total_tests'] global _archive_body_content _archive_body_content += _archive_body_text def update_trends(self, base): global tpass, tfail, tskip f2 = glob.glob(base + '/output.json') with open(f2[0]) as json_file: data = json.load(json_file) adate = datetime.strptime( data['date'].split(None, 1)[0][:1 + 2:] + ' ' + data['date'].split(None, 1)[1].replace(',', ''), "%b %d %Y" ) atime = \ "".join(list(filter(lambda x: ':' in x, time.ctime(float(data['start_time'])).split(' ')))).rsplit( ':', 1)[0] trends_label.append(str(time_converter(atime)).upper() + ' | ' + str(adate.date().strftime("%b")) + ' ' + str(adate.date().strftime("%d"))) tpass.append(data['status_list']['pass']) tfail.append(int(data['status_list']['fail']) + int(data['status_list']['error'])) tskip.append(data['status_list']['skip']) f = glob.glob(base + '/archive' + '/*.json') f.sort(reverse=True) for i, val in enumerate(f): with open(val) as json_file: data = json.load(json_file) adate = datetime.strptime( data['date'].split(None, 1)[0][:1 + 2:] + ' ' + data['date'].split(None, 1)[1].replace(',', ''), "%b %d %Y" ) atime = \ "".join(list(filter(lambda x: ':' in x, time.ctime(float(data['start_time'])).split(' ')))).rsplit( ':', 1)[0] trends_label.append(str(time_converter(atime)).upper() + ' | ' + str(adate.date().strftime("%b")) + ' ' + str(adate.date().strftime("%d"))) tpass.append(data['status_list']['pass']) tfail.append(int(data['status_list']['fail']) + int(data['status_list']['error'])) tskip.append(data['status_list']['skip']) if i == 4: break def attach_screenshots(self, screen_name, test_suite, test_case, test_error): global _attach_screenshot_details _screenshot_details = """ <div class="img-hover col-md-6 col-xl-3 p-3"> <div> <a class="video" href="__screenshot_base__/pytest_screenshots/__screen_name__.png" data-toggle="lightbox" style="background-image: url('__screenshot_base__/pytest_screenshots/__screen_name__.png');" data-fancybox="images" data-caption="SUITE: __ts__ :: SCENARIO: __tc__"> <span class="video-hover-desc video-hover-small"> <span style="font-size:23px;display: block;margin-bottom: 15px;"> __tc__</span> <span>__te__</span> </span> </a> <p class="text-desc"><strong>__ts__</strong><br /> __te__</p> </div> </div> <div class="desc-video-none"> <div class="desc-video" id="Video-desc-01"> <h2>__tc__</h2> <p><strong>__ts__</strong><br /> __te__</p> </div> </div> """ if len(test_case) == 17: test_case = '..' + test_case _screenshot_details = _screenshot_details.replace("__screen_name__", str(screen_name)) _screenshot_details = _screenshot_details.replace("__ts__", str(test_suite)) _screenshot_details = _screenshot_details.replace("__tc__", str(test_case)) _screenshot_details = _screenshot_details.replace("__te__", str(test_error)) _screenshot_details = _screenshot_details.replace("__screenshot_base__", str(screen_base)) _attach_screenshot_details += _screenshot_details
python
import datetime import logging import time import googleapiclient class TaskList: def __init__(self, id): self.id = id self.tasks = [] def update(self, service): try: results = service.tasks().list(tasklist = self.id, showCompleted = False, dueMax = rfc3339_today_midnight()).execute() except googleapiclient.errors.HttpError as e: logging.warning(e) logging.warning('Could not update task list.') return items = results.get('items') self.tasks = [] if not items: # empty list do nothing pass else: for item in items: self.tasks.append(item['title']) def delete_completed_tasks(self, service): results = service.tasks().list(tasklist = self.id, showCompleted = True, showHidden = True).execute() items = results.get('items') if not items: # empty list do nothing pass else: for item in items: # if the task has been completed delete it if item['status'] == 'completed': service.tasks().delete(tasklist = self.id, task = item['id']).execute() def rfc3339_today_midnight(): now = datetime.datetime.now() dt = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0).isoformat() timezone = int(time.timezone / 3600.0) if timezone < 0: dt = dt + '-' if abs(timezone) < 10: dt = dt + '0' + str( abs(timezone) ) + ':00' else: dt = dt + str( abs(timezone) ) + ':00' return dt
python
from datetime import * from dateutil.relativedelta import * now = datetime.now() print(now) now = now + relativedelta(months=1, weeks=1, hour=10) print(now)
python
FLASK_HOST = '0.0.0.0' FLASK_PORT = 5000 FLASK_DEBUG = False FLASK_THREADED = True import os ENV_SETUP = os.getenv('MONGO_DATABASE', None) is not None MONGO_DATABASE = os.getenv('MONGO_DATABASE', 'avoid_kuvid') MONGO_ROOT_USERNAME = os.getenv('MONGO_ROOT_USERNAME', 'admin') MONGO_ROOT_PASSWORD = os.getenv('MONGO_ROOT_PASSWORD', 'admin') MONGO_API = f'mongodb://{MONGO_ROOT_USERNAME}:{MONGO_ROOT_PASSWORD}@db:27017/{MONGO_DATABASE}' if ENV_SETUP else "mongodb://localhost:27017/" TIME_FORMAT = '%H:%M' DATE_FORMAT = f'%Y-%m-%d {TIME_FORMAT}'
python
# -*- coding: utf-8 -*- # @Author: Marc-Antoine # @Date: 2019-03-17 17:18:42 # @Last Modified by: Marc-Antoine Belanger # @Last Modified time: 2019-03-17 17:20:31 from gym.envs.registration import register register( id='cribbage-v0', entry_point='gym_cribbage.envs:CribbageEnv', )
python
# coding:utf-8 # 2019/9/3 """ 给定一个整数的数组,找出其中的pair(a, b),使得a+b=0,并返回这样的pair数目。(a, b)和(b, a)是同一组。 输入 整数数组 输出 找到的pair数目 样例输入 -1, 2, 4, 5, -2 样例输出 1 """ def solver(nums): maps = {} ret = 0 retList = [] for n in nums: if n in maps: if maps[n] == 1: if n not in retList and -n not in retList: retList.append(n) ret += 1 maps[-n] = maps.get(-n, 0) + 1 # print(maps, retList) return ret def test(): nums = [0,0,0, -1, 1, -1, 1] ret = solver(nums) print(ret) def inputs(): nums = list(map(int, input().strip().split(" "))) ret = solver(nums) print(ret) if __name__ == '__main__': test()
python
class UserErrorMessage(object): OPERATION_NOT_SUPPORTED = "Operation is not supported." NO_MODEL_PUBLISHED = "No model published for the current API." NO_ENDPOINT_PUBLISHED = "No service endpoint published in the current API." NO_OPERATION_PUBLISHED = "No operation published in the current API." CAN_NOT_CONNECT_TO_MODEL_REPO = "Can not connect to the model repository. Contact the publisher to correct the error." NOT_IMPLEMENTED = "{} is not supported." OPERATION_NOT_IN_STATUS = "Operation {} is not in {} status." INVALID_CERT = 'Invalid certificate.' INVALID_API_KEY = 'The api key is invalid.' API_NOT_EXIST = 'The API {} in application {} does not exist or you do not have permission to access it.' SUBSCRIPTION_NOT_EXIST = "The subscription {} doesn't exist or api key is invalid." API_VERSION_NOT_EXIST = "The specified API or API version does not exist or you do not have permission to access it." API_VERSION_REQUIRED = "The api-version query parameter is required." AAD_TOKEN_REQUIRED = "AAD token is required." INTERNAL_SERVER_ERROR = "The server encountered an internal error and was unable to complete your request."
python
# -*- encoding: utf-8 -*- from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.http import Http404, HttpResponse from django.shortcuts import render, get_object_or_404, redirect from accounts.models import User, Authority from common.constants import GROUP_WORKING_TYPE_ADMINSTRATION_AREA from common.decorators import superuser_required from logs.functions import list_for_content_type, list_for_object, list_for_user from reports.models import AdministrationArea, Report, ReportInvestigation, ReportLaboratoryCase, AnimalLaboratoryCause, \ AnimalLaboratoryCause from reports.serializers import AnimalLaboratoryCauseSerializer from supervisors.forms import SupervisorsUserForm, SupervisorsAuthorityForm, SupervisorsReportInvestigationForm, \ SupervisorsReportLaboratoryCaseForm from supervisors.functions import (get_querystring_filter_user_status, export_excel_users_to_create_authorities, import_authorities_excel, import_and_excel_users_to_create_authorities, print_invite_code_authorities) @login_required @superuser_required def supervisors_home(request): return redirect('supervisors_users') @login_required # @superuser_required def supervisors_users(request): if request.user.is_superuser: return redirect('supervisors_users_by_status', user_status='volunteer') return redirect('supervisors_report_investigation') # return render(request, 'supervisors/supervisors_users_list.html', { # 'areas': AdministrationArea.get_root_nodes(), # 'status': 'users', # }) @login_required @superuser_required def supervisors_users_by_status(request, user_status): if user_status not in ['volunteer', 'podd', 'livestock', 'public-health', 'additional-volunteer', 'additional-volunteer-dodd']: raise Http404 querystring = get_querystring_filter_user_status({}, user_status) return render(request, 'supervisors/supervisors_users_list.html', { 'status': user_status, 'users': User.objects.filter(**querystring).order_by('username'), }) def supervisors_export_users_excel_to_authorities(request): return export_excel_users_to_create_authorities() @login_required @superuser_required def supervisors_authorities(request): success = None error = None if request.method == 'POST': file = request.FILES.get('file') if file: success = import_authorities_excel(file) if success: messages.success(request, u'สร้างองค์กรใหม่สำเร็จ') else: messages.error(request, u'ไม่สามารถสร้างองค์กรใหม่สำเร็จ ไฟล์ไม่ถูกต้อง') return render(request, 'supervisors/supervisors_authorities_list.html', { 'authorities': Authority.objects.order_by('code'), }) @login_required @superuser_required def supervisors_new_authorities(request): response = {} if request.method == 'POST': file = request.FILES.get('file') if file: return import_and_excel_users_to_create_authorities(file) return HttpResponse('False') @login_required @superuser_required def supervisors_authorities_print_invitation_code(request): return print_invite_code_authorities() @login_required @superuser_required def supervisors_authorities_edit(request, authority_id): authority = get_object_or_404(Authority, id=authority_id) if request.method == 'POST': form = SupervisorsAuthorityForm(request.POST, instance=authority) if form.is_valid(): form.save() messages.success(request, u'แก้ไขข้อมูลเรียบร้อยแล้ว') else: form = SupervisorsAuthorityForm(instance=authority) return render(request, 'supervisors/supervisors_authorities_form.html', { 'authority': authority, 'form': form, }) @login_required @superuser_required def supervisors_users_by_area(request, area_id): return redirect('supervisors_users_by_area_and_status', user_status='volunteer', area_id=area_id) # area = get_object_or_404(AdministrationArea, id=area_id) # return render(request, 'supervisors/supervisors_users_list.html', { # 'areas': [area], # 'selected_area': area, # 'status': 'users', # }) @login_required @superuser_required def supervisors_users_by_area_and_status(request, user_status, area_id): if user_status not in ['volunteer', 'podd', 'livestock', 'public-health']: raise Http404 area = get_object_or_404(AdministrationArea, id=area_id) querystring = { 'groups__groupadministrationarea__administration_area': area, 'groups__type': GROUP_WORKING_TYPE_ADMINSTRATION_AREA, } querystring = get_querystring_filter_user_status(querystring, user_status) return render(request, 'supervisors/supervisors_users_list.html', { 'areas': [area], 'selected_area': area, 'status': user_status, 'users': User.objects.filter(**querystring).order_by('username'), }) @login_required @superuser_required def supervisors_users_edit(request, user_id): user = get_object_or_404(User, id=user_id) if request.method == 'POST': form = SupervisorsUserForm(request.POST, instance=user) if form.is_valid(): form.save(created_by=request.user) messages.success(request, u'แก้ไขข้อมูลเรียบร้อยแล้ว') else: form = SupervisorsUserForm(instance=user) return render(request, 'supervisors/supervisors_users_form.html', { 'user': user, 'form': form, }) @login_required @superuser_required def supervisors_logs_reports(request): logs = list_for_content_type(Report) paginator = Paginator(logs, 25) page = request.GET.get('page') try: logs = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. logs = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. logs = paginator.page(paginator.num_pages) return render(request, 'supervisors/supervisors_logs_reports.html', { 'logs': logs }) @login_required @superuser_required def supervisors_logs_reports_by_report(request, report_id): report = get_object_or_404(Report, pk=report_id) logs = list_for_object(report) paginator = Paginator(logs, 25) page = request.GET.get('page') try: logs = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. logs = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. logs = paginator.page(paginator.num_pages) return render(request, 'supervisors/supervisors_logs_reports.html', { 'logs': logs, 'item': report, 'log_header': u'Report #%d' % report.id, }) @login_required @superuser_required def supervisors_logs_reports_by_user(request, user_id): user = get_object_or_404(User, pk=user_id) logs = list_for_user(user) paginator = Paginator(logs, 25) page = request.GET.get('page') try: logs = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. logs = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. logs = paginator.page(paginator.num_pages) return render(request, 'supervisors/supervisors_logs_reports.html', { 'logs': logs, 'item': user, 'log_header': u'User %s' % user.username, }) @login_required @superuser_required def supervisors_logs_users(request): logs = list_for_content_type(User) paginator = Paginator(logs, 25) page = request.GET.get('page') try: logs = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. logs = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. logs = paginator.page(paginator.num_pages) return render(request, 'supervisors/supervisors_logs_users.html', { 'logs': logs }) @login_required @superuser_required def supervisors_logs_user(request, user_id): user = get_object_or_404(User, pk=user_id) logs = list_for_object(user) paginator = Paginator(logs, 25) page = request.GET.get('page') try: logs = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. logs = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. logs = paginator.page(paginator.num_pages) return render(request, 'supervisors/supervisors_logs_users.html', { 'logs': logs, 'item': user, 'log_header': u'%s' % user.username, }) @login_required # @superuser_required def supervisors_report_investigation_create(request): if request.method == 'POST': form = SupervisorsReportInvestigationForm(request.POST, request.FILES) if form.is_valid(): investigation = ReportInvestigation( domain=form.cleaned_data['report'].domain, report=form.cleaned_data['report'], note=form.cleaned_data['note'], investigation_date=form.cleaned_data['investigation_date'], result=form.cleaned_data['result'], file=form.cleaned_data['file'], created_by=request.user, updated_by=request.user ) investigation.save() messages.success(request, u'เพิ่มรายการสืบสวนโรคสำเร็จ') return redirect('supervisors_report_investigation') else: form = SupervisorsReportInvestigationForm() return render(request, 'supervisors/supervisors_report_investigation_form.html', { 'form': form, }) @login_required # @superuser_required def supervisors_report_investigation_edit(request, investigation_id): investigation = get_object_or_404(ReportInvestigation, id=investigation_id) if request.method == 'POST': form = SupervisorsReportInvestigationForm(request.POST, request.FILES) if form.is_valid(): investigation.report = form.cleaned_data['report'] investigation.note = form.cleaned_data['note'] investigation.investigation_date = form.cleaned_data['investigation_date'] investigation.result =form.cleaned_data['result'] if form.cleaned_data['file']: investigation.file = form.cleaned_data['file'] investigation.updated_by = request.user investigation.save() messages.success(request, u'แก้ไขการสืบสวนโรค #%s สำเร็จ' % investigation.id) return redirect('supervisors_report_investigation') else: form = SupervisorsReportInvestigationForm(initial={ 'report': investigation.report.id, 'note': investigation.note, 'investigation_date': investigation.investigation_date, 'result': 1 if investigation.result else 0, }) return render(request, 'supervisors/supervisors_report_investigation_form.html', { 'form': form, 'file': investigation.file, 'investigation': investigation, 'edit': True }) @login_required # @superuser_required def supervisors_report_investigation(request): investigation_list = ReportInvestigation.objects.order_by('-investigation_date') paginator = Paginator(investigation_list, 100) page = request.GET.get('page') try: investigations = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. investigations = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. investigations = paginator.page(paginator.num_pages) return render(request, 'supervisors/supervisors_report_investigation_list.html', { 'investigations': investigations }) @login_required # @superuser_required def supervisors_report_investigation_delete(request, investigation_id): investigation = get_object_or_404(ReportInvestigation, id=investigation_id) investigation.delete() messages.success(request, u'ลบรายการสำเร็จ') return redirect('supervisors_report_investigation') @login_required # @superuser_required def supervisors_report_laboratory(request): case_list = ReportLaboratoryCase.objects.order_by('-id') paginator = Paginator(case_list, 100) page = request.GET.get('page') try: cases = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. cases = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. cases = paginator.page(paginator.num_pages) return render(request, 'supervisors/supervisors_report_lab_case_list.html', { 'cases': cases }) @login_required # @superuser_required def supervisors_report_laboratory_create(request): if request.method == 'POST': data = request.POST.copy() data['created_by'] = request.user.id data['updated_by'] = request.user.id form = SupervisorsReportLaboratoryCaseForm(data) if form.is_valid(): instance = form.save() messages.success(request, u'เพิ่มรายการผลแลปสำเร็จ') return redirect('supervisors_report_laboratory_edit', instance.id) else: form = SupervisorsReportLaboratoryCaseForm() return render(request, 'supervisors/supervisors_report_lab_case_form.html', { 'form': form, }) @login_required # @superuser_required def supervisors_report_laboratory_edit(request, case_id): case = get_object_or_404(ReportLaboratoryCase, id=case_id) if request.method == 'POST': data = request.POST.copy() data['created_by'] = request.user.id data['updated_by'] = request.user.id form = SupervisorsReportLaboratoryCaseForm(data, instance=case) if form.is_valid(): form.save() messages.success(request, u'แก้ไขผลแลป #%s สำเร็จ' % case.id) return redirect('supervisors_report_laboratory') else: form = SupervisorsReportLaboratoryCaseForm(instance=case) items = case.laboratory_items.order_by('sample_no') files = case.laboratory_files.order_by('id') causes = AnimalLaboratoryCause.objects.order_by('name') import json json_cause = json.dumps((AnimalLaboratoryCauseSerializer(causes, many=True).data)) return render(request, 'supervisors/supervisors_report_lab_case_form.html', { 'case': case, 'form': form, 'items': items, 'files': files, 'causes': causes, 'json_cause': json_cause, 'edit': True }) @login_required # @superuser_required def supervisors_report_laboratory_delete(request, case_id): case = get_object_or_404(ReportLaboratoryCase, id=case_id) case.delete() messages.success(request, u'ลบรายการสำเร็จ') return redirect('supervisors_report_laboratory') @login_required # @superuser_required def supervisors_report_laboratory_cause(request): cause_list = AnimalLaboratoryCause.objects.order_by('name') paginator = Paginator(cause_list, 100) page = request.GET.get('page') try: causes = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. causes = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. causes = paginator.page(paginator.num_pages) return render(request, 'supervisors/supervisors_report_lab_cause_list.html', { 'causes': causes }) @login_required # @superuser_required def supervisors_report_laboratory_cause_delete(request, cause_id): cause = get_object_or_404(AnimalLaboratoryCause, id=cause_id) cause.delete() messages.success(request, u'ลบรายการสำเร็จ') return redirect('supervisors_report_laboratory_cause')
python
#!/usr/bin/env python # coding: utf-8 from xumm.resource import XummResource from typing import List class UserTokenValidity(XummResource): """ Attributes: model_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ required = { 'user_token': True, 'active': True, 'token_issued': True, 'token_expiration': True } model_types = { 'user_token': str, 'active': bool, 'token_issued': int, 'token_expiration': int } attribute_map = { 'user_token': 'user_token', 'active': 'active', 'token_issued': 'token_issued', 'token_expiration': 'token_expiration' } def refresh_from(cls, **kwargs): """Returns the dict as a model :param kwargs: A dict. :type: dict :return: The UserToken of this UserToken. # noqa: E501 :rtype: UserToken """ cls.sanity_check(kwargs) cls._user_token = None cls._active = None cls._token_issued = None cls._token_expiration = None cls.user_token = kwargs['user_token'] cls.active = kwargs['active'] cls.token_issued = kwargs['token_issued'] cls.token_expiration = kwargs['token_expiration'] return cls @property def user_token(cls) -> str: """Gets the user_token of this UserTokenValidity. :return: The user_token of this UserTokenValidity. :rtype: str """ return cls._user_token @user_token.setter def user_token(cls, user_token: str): """Sets the user_token of this UserTokenValidity. :param user_token: The user_token of this UserTokenValidity. :type user_token: str """ if user_token is None: raise ValueError("Invalid value for `user_token`, must not be `None`") # noqa: E501 cls._user_token = user_token @property def active(cls) -> str: """Gets the active of this UserTokenValidity. :return: The active of this UserTokenValidity. :rtype: str """ return cls._active @active.setter def active(cls, active: str): """Sets the active of this UserTokenValidity. :param active: The active of this UserTokenValidity. :type active: str """ if active is None: raise ValueError("Invalid value for `active`, must not be `None`") # noqa: E501 cls._active = active @property def token_issued(cls) -> int: """Gets the token_issued of this UserTokenValidity. :return: The token_issued of this UserTokenValidity. :rtype: int """ return cls._token_issued @token_issued.setter def token_issued(cls, token_issued: int): """Sets the token_issued of this UserTokenValidity. :param token_issued: The token_issued of this UserTokenValidity. :type token_issued: int """ if token_issued is None: raise ValueError("Invalid value for `token_issued`, must not be `None`") # noqa: E501 cls._token_issued = token_issued @property def token_expiration(cls) -> int: """Gets the token_expiration of this UserTokenValidity. :return: The token_expiration of this UserTokenValidity. :rtype: int """ return cls._token_expiration @token_expiration.setter def token_expiration(cls, token_expiration: int): """Sets the token_expiration of this UserTokenValidity. :param token_expiration: The token_expiration of this UserTokenValidity. # noqa: E501 :type token_expiration: int """ if token_expiration is None: raise ValueError("Invalid value for `token_expiration`, must not be `None`") # noqa: E501 cls._token_expiration = token_expiration class UserTokenResponse(XummResource): """ Attributes: model_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ required = { 'tokens': True, } model_types = { 'tokens': list, } attribute_map = { 'tokens': 'tokens', } def refresh_from(cls, **kwargs): """Returns the dict as a model :param dikt: A dict. :type: dict :return: The UserTokenResponse of this UserTokenResponse. # noqa: E501 :rtype: UserTokenResponse """ cls.sanity_check(kwargs) cls._tokens = None cls.tokens = [UserTokenValidity(**t) for t in kwargs['tokens']] @property def tokens(cls) -> List[UserTokenValidity]: """Gets the tokens of this UserTokenResponse. :return: The tokens of this UserTokenResponse. :rtype: List[UserTokenValidity] """ return cls._tokens @tokens.setter def tokens(cls, tokens: List[UserTokenValidity]): """Sets the tokens of this UserTokenResponse. :param tokens: The tokens of this UserTokenResponse. :type tokens: List[UserTokenValidity] """ if tokens is None: raise ValueError("Invalid value for `tokens`, must not be `None`") # noqa: E501 cls._tokens = tokens
python
#!/usr/bin/python import unittest import GT3 from tests.ShotBase import * from matplotlib.axes._axes import Axes from matplotlib.pyplot import Figure class CommonFunctions(object): """Tests to see if a shot has the expected attributes typical for a fully run shot.""" def test_gt3_has_core(cls): cls.assertTrue(hasattr(cls.plasma, "core")) def test_gt3_has_iol(cls): cls.assertTrue(hasattr(cls.plasma, "iol")) def test_gt3_has_nbi(cls): cls.assertTrue(hasattr(cls.plasma, "nbi")) def test_gt3_has_rtrans(cls): cls.assertTrue(hasattr(cls.plasma, "rtrans")) class SingleNullRun(SingleLowerNullTest, CommonFunctions): @classmethod def setUpClass(cls): super(SingleNullRun, cls).setUpClass() cls.plasma.run_radial_transport() class DoubleNullRun(DoubleNullTest, CommonFunctions): @classmethod def setUpClass(cls): super(DoubleNullRun, cls).setUpClass() cls.plasma.run_radial_transport() class NegativeTriangularityRun(NegativeTriangularityTest, CommonFunctions): @classmethod def setUpClass(cls): super(NegativeTriangularityRun, cls).setUpClass() cls.plasma.run_radial_transport() class RunModificationTest(SingleLowerNullTest): def test_sol_exists(self): self.plasma.run_SOL() self.assertTrue(hasattr(self.plasma, "sol")) self.assertIsInstance(self.plasma.sol, GT3.Sol) def test_iol_exists(self): self.plasma.run_IOL() self.assertTrue(hasattr(self.plasma, "iol")) self.assertIsInstance(self.plasma.iol, GT3.IOL) def test_nbi_exists(self): self.plasma.run_NBI() self.assertTrue(hasattr(self.plasma, "nbi")) self.assertIsInstance(self.plasma.nbi, GT3.BeamDeposition) def test_rtrans_exists(self): self.plasma.run_radial_transport() self.assertTrue(hasattr(self.plasma, "rtrans")) self.assertIsInstance(self.plasma.rtrans, GT3.RadialTransport) class PlotCoreTest(DoubleNullTest): @classmethod def setUpClass(cls): super(PlotCoreTest, cls).setUpClass() import matplotlib.pyplot as plt cls.plt = plt cls.plasma.run_radial_transport() cls.plt.ion() def plot_tester(self, plotter, edge=False): import inspect args = inspect.getfullargspec(plotter) if 'logPlot' in args and 'edge' in args: fig = plotter(logPlot=True, edge=True) elif 'logPlot' in args: fig = plotter(logPlot=True) elif 'edge' in args: fig = plotter(edge=True) else: fig = plotter() self.assertIsInstance(fig, (Figure, Axes)) self.plt.close(fig.get_figure()) def test_plot_core(self): """ Plot all plots in the Core module """ plot_vars = [self.plasma.core.n.i.fsa.plot, self.plasma.core.n.e.fsa.plot, self.plasma.core.n.n.s.plot2D, self.plasma.core.n.n.t.plot2D, self.plasma.core.n.n.tot.plot2D, self.plasma.core.T.i.ev.plot2D, self.plasma.core.T.i.J.plot2D, self.plasma.core.T.i.kev.plot2D, self.plasma.core.T.e.ev.plot2D, self.plasma.core.T.i.ev.L.plot2D, self.plasma.core.T.e.J.L.plot2D, self.plasma.core.n.i.L.plot2D, self.plasma.core.n.n.s.L.plot2D, self.plasma.core.n.n.tot.L.plot2D, self.plasma.core.v.D.pol.plot2D, self.plasma.core.v.C.tor.plot2D] for v in plot_vars: self.plot_tester(v) def test_plot_beams(self): """ Plot all plots in the NBI module """ plot_vars = [self.plasma.nbi.combined_beam_src_dens_lost.Snbi.plot, self.plasma.nbi.combined_beam_src_dens_lost.Qnbi.plot, self.plasma.nbi.combined_beam_src_dens_lost.Mnbi.plot, self.plasma.nbi.combined_beam_src_kept.Snbi.plot, self.plasma.nbi.combined_beam_src_kept.Qnbi.plot, self.plasma.nbi.combined_beam_src_kept.Mnbi.plot] for v in plot_vars: self.plot_tester(v) def test_plot_rtrans(self): """ Plot all plots in the Radial Transport module """ plot_vars = [self.plasma.rtrans.gamma.D.diff.plot, self.plasma.rtrans.gamma.D.int.plot, self.plasma.rtrans.gamma.e.int.plot, self.plasma.rtrans.gamma.C.int.plot, self.plasma.rtrans.gamma.plot_D, self.plasma.rtrans.gamma.plot_C, self.plasma.rtrans.plot_Q_sources, self.plasma.rtrans.plot_S_sources, self.plasma.rtrans.plot_chi_terms] for v in plot_vars: self.plot_tester(v) @classmethod def tearDownClass(cls): cls.plt.clf() cls.plt.close() class PlotIOLTest(DoubleNullTest): @classmethod def setUpClass(cls): super(PlotIOLTest, cls).setUpClass() import matplotlib.pyplot as plt cls.plt = plt cls.plasma.run_IOL() def test_plot_iol_F_i(self): self.plasma.iol.plot_F_i(edge=True) self.assertIsInstance(self.plasma.iol.plot_F_i(), Axes) class GT3TestClassTest(unittest.TestCase, CommonFunctions): @classmethod def setUpClass(cls): super(GT3TestClassTest, cls).setUpClass() from GT3 import gt3 from GT3.TestBase.testbase import TestClass cls.plasma = gt3(preparedInput=TestClass()) cls.plasma.run_radial_transport() TestClass().print_summary() if __name__ == '__main__': unittest.main()
python
import datetime def current_year(): """current_year This method used to get the current year """ return datetime.date.today().year
python
import streamlit as st import pandas as pd st.title("File uploader example") st.write( """ This is an example of how to use a file uploader. Here, we are simply going to upload a CSV file and display it. It should serve as a minimal example for you to jump off and do more complex things. """ ) st.header("Upload CSV") csv_file = st.file_uploader( label="Upload a CSV file", type=["csv"], encoding="utf-8" ) if csv_file is not None: data = pd.read_csv(csv_file) st.dataframe(data) st.header("Upload Images") st.write( """ Below is another example, where we upload an image and display it. """ ) image_file = st.file_uploader( label="Upload an image", type=["png", "jpg", "tiff"], encoding=None ) if image_file is not None: st.image(image_file)
python
""" ASGI config for scheduler project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ from os import environ from django.core.asgi import get_asgi_application # type: ignore environ.setdefault("DJANGO_SETTINGS_MODULE", "scheduler.settings") application = get_asgi_application()
python
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import json import datetime import time import os import boto3 from datetime import timedelta import random # Tries to find an existing or free game session and return the IP and Port to the client def lambda_handler(event, context): sqs_client = boto3.client('sqs') # 1. Check SQS Queue if there are sessions available # Try to receive message from SQS queue try: response = sqs_client.receive_message( QueueUrl=os.environ['SQS_QUEUE_URL'], MaxNumberOfMessages=1, VisibilityTimeout=15, WaitTimeSeconds=1 ) message = response['Messages'][0] print(message) receipt_handle = message['ReceiptHandle'] connection_info = message['Body'] print(receipt_handle) print("got session: " + connection_info) connection_splitted = connection_info.split(":") ip = connection_splitted[0] port = connection_splitted[1] print("IP: " + ip + " PORT: " + port) # Delete received message from queue sqs_client.delete_message( QueueUrl=os.environ['SQS_QUEUE_URL'], ReceiptHandle=receipt_handle ) # Return result to client return { "statusCode": 200, "body": json.dumps({ 'publicIP': ip, 'port': port }) } except: print("Failed getting a session from the SQS queue, will try claiming a new one") # 2. If not, try to claim a new session through FleetIQ client = boto3.client('gamelift') response = client.claim_game_server( GameServerGroupName='ExampleGameServerGroup', ) print(response) connection_info = response["GameServer"]["ConnectionInfo"] try: connection_splitted = connection_info.split(":") ip = connection_splitted[0] port = connection_splitted[1] print("IP: " + ip + " PORT: " + port) # Put a ticket in to the SQS for the next player (we match 1-v-1 sessions) response = sqs_client.send_message( QueueUrl=os.environ['SQS_QUEUE_URL'], MessageBody=( connection_info ) ) print(response['MessageId']) return { "statusCode": 200, "body": json.dumps({ 'publicIP': ip, 'port': port }) } except: print("Failed getting a new session") # 3. Failed to find a server return { "statusCode": 500, "body": json.dumps({ 'failed': 'couldnt find a free server spot'}) }
python
import anachronos from test.runner import http class PingTest(anachronos.TestCase): def test_ping(self): res = http.get("/ping") self.assertEqual(200, res.status_code) self.assertEqual("Pong!", res.text)
python
from setuptools import setup setup( name="minigit", version="1.0", packages=["minigit"], entry_points={"console_scripts": ["minigit = minigit.cli:main"]}, )
python
__author__ = 'cvl' class Domain_model(): def __init__(self, json_dict): self.free_domains = json_dict['free_domains'] self.paid_domains = json_dict['paid_domains']
python
import pandas as pd class CurrentPositionStatusSettler: def __init__(self, calculation_source): self.__calculation_source = calculation_source def settle_current_position_status(self) -> pd.DataFrame: self.__calculation_source = self.__calculation_source[ ~self.__calculation_source['status'].isin(['sold', 'delivered']) ].copy() self.__calculation_source.loc[:, 'status'] = 'holding' settled_current_position_status = self.__calculation_source[ [ 'contract', 'delivery_month', 'value', 'cost', 'close_price', 'status' ] ] settled_current_position_status = settled_current_position_status.rename( index=int, columns={'close_price': 'close_price_of_previous_trading_date'} ) return settled_current_position_status.reset_index(drop=True)
python
''' Stanle Bak Python F-16 Thrust function ''' import numpy as np import tensorflow as tf from util import fix, fix_tf def thrust(power, alt, rmach): 'thrust lookup-table version' a = np.array([[1060, 670, 880, 1140, 1500, 1860], \ [635, 425, 690, 1010, 1330, 1700], \ [60, 25, 345, 755, 1130, 1525], \ [-1020, -170, -300, 350, 910, 1360], \ [-2700, -1900, -1300, -247, 600, 1100], \ [-3600, -1400, -595, -342, -200, 700]], dtype=float).T b = np.array([[12680, 9150, 6200, 3950, 2450, 1400], \ [12680, 9150, 6313, 4040, 2470, 1400], \ [12610, 9312, 6610, 4290, 2600, 1560], \ [12640, 9839, 7090, 4660, 2840, 1660], \ [12390, 10176, 7750, 5320, 3250, 1930], \ [11680, 9848, 8050, 6100, 3800, 2310]], dtype=float).T c = np.array([[20000, 15000, 10800, 7000, 4000, 2500], \ [21420, 15700, 11225, 7323, 4435, 2600], \ [22700, 16860, 12250, 8154, 5000, 2835], \ [24240, 18910, 13760, 9285, 5700, 3215], \ [26070, 21075, 15975, 11115, 6860, 3950], \ [28886, 23319, 18300, 13484, 8642, 5057]], dtype=float).T if alt < 0: alt = 0.01 # uh, why not 0? h = .0001 * alt i = fix(h) if i >= 5: i = 4 dh = h - i rm = 5 * rmach m = fix(rm) if m >= 5: m = 4 elif m <= 0: m = 0 dm = rm - m cdh = 1 - dh # do not increment these, since python is 0-indexed while matlab is 1-indexed #i = i + 1 #m = m + 1 s = b[i, m] * cdh + b[i + 1, m] * dh t = b[i, m + 1] * cdh + b[i + 1, m + 1] * dh tmil = s + (t - s) * dm if power < 50: s = a[i, m] * cdh + a[i + 1, m] * dh t = a[i, m + 1] * cdh + a[i + 1, m + 1] * dh tidl = s + (t - s) * dm thrst = tidl + (tmil - tidl) * power * .02 else: s = c[i, m] * cdh + c[i + 1, m] * dh t = c[i, m + 1] * cdh + c[i + 1, m + 1] * dh tmax = s + (t - s) * dm thrst = tmil + (tmax - tmil) * (power - 50) * .02 return thrst def thrust_tf(power, alt, rmach): with tf.name_scope("threst"): a = tf.constant(np.array([[1060, 670, 880, 1140, 1500, 1860], \ [635, 425, 690, 1010, 1330, 1700], \ [60, 25, 345, 755, 1130, 1525], \ [-1020, -170, -300, 350, 910, 1360], \ [-2700, -1900, -1300, -247, 600, 1100], \ [-3600, -1400, -595, -342, -200, 700]], dtype=np.float32).T) b = tf.constant(np.array([[12680, 9150, 6200, 3950, 2450, 1400], \ [12680, 9150, 6313, 4040, 2470, 1400], \ [12610, 9312, 6610, 4290, 2600, 1560], \ [12640, 9839, 7090, 4660, 2840, 1660], \ [12390, 10176, 7750, 5320, 3250, 1930], \ [11680, 9848, 8050, 6100, 3800, 2310]], dtype=np.float32).T) c = tf.constant(np.array([[20000, 15000, 10800, 7000, 4000, 2500], \ [21420, 15700, 11225, 7323, 4435, 2600], \ [22700, 16860, 12250, 8154, 5000, 2835], \ [24240, 18910, 13760, 9285, 5700, 3215], \ [26070, 21075, 15975, 11115, 6860, 3950], \ [28886, 23319, 18300, 13484, 8642, 5057]], dtype=np.float32).T) with tf.name_scope("threst"): alt = tf.cond(tf.less(alt, 0), lambda: 0.01, lambda: alt) h = .0001 * alt i = fix_tf(h) i = tf.cond(tf.greater_equal(i, 5.0), lambda: 4.0, lambda: i) dh = h - i rm = 5 * rmach m = fix_tf(rm) m = tf.clip_by_value(m, 0, 4) dm = rm - m cdh = 1 - dh # do not increment these, since python is 0-indexed while matlab is 1-indexed #i = i + 1 #m = m + 1 i = tf.cast(i, tf.int32) m = tf.cast(m, tf.int32) s = b[i, m] * cdh + b[i + 1, m] * dh t = b[i, m + 1] * cdh + b[i + 1, m + 1] * dh tmil = s + (t - s) * dm def f1(): s = a[i, m] * cdh + a[i + 1, m] * dh t = a[i, m + 1] * cdh + a[i + 1, m + 1] * dh tidl = s + (t - s) * dm thrst = tidl + (tmil - tidl) * power * .02 return thrst def f2(): s = c[i, m] * cdh + c[i + 1, m] * dh t = c[i, m + 1] * cdh + c[i + 1, m + 1] * dh tmax = s + (t - s) * dm thrst = tmil + (tmax - tmil) * (power - 50) * .02 return thrst thrst = tf.cond(tf.less(power, 50), f1, f2) return thrst def test_thrust_tf(): def template(power, alt, rmach): power_tf = tf.constant(power, dtype=tf.float32) alt_tf = tf.constant(alt, dtype=tf.float32) rmach_tf = tf.constant(rmach, dtype=tf.float32) with tf.Session() as sess: print(sess.run(thrust_tf(power_tf, alt_tf, rmach_tf))) print(thrust(power, alt, rmach)) # alt < 0, alt < 500, alt > 500 # rmach < 0.8, rmach > 0.8 # power < 50, power > 50 for a in (-1, 499, 501): for r in (0.79, 0.81): for p in (49, 51): template(p, a, r) if __name__ == "__main__": test_thrust_tf()
python