file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
onewiresmoketest.go | // Copyright 2016 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// Package onewiresmoketest is leveraged by periph-smoketest to verify that a
// 1-wire bus search returns two devices, that a ds18b20 temperature sensor can
// be read, and that a ds2431 eeprom can be written and read.
//
// This assumes the presence of the periph-tester board, which includes these
// two devices.
// See https://github.com/periph/periph-tester
package onewiresmoketest
import (
"errors"
"flag"
"fmt"
"log"
"math/rand"
"time"
"periph.io/x/conn/v3/i2c/i2creg"
"periph.io/x/conn/v3/onewire"
"periph.io/x/conn/v3/physic"
"periph.io/x/devices/v3/ds18b20"
"periph.io/x/devices/v3/ds248x"
)
// SmokeTest is imported by periph-smoketest.
type SmokeTest struct {
}
func (s *SmokeTest) String() string {
return s.Name()
}
// Name implements the SmokeTest interface.
func (s *SmokeTest) Name() string {
return "onewire-testboard"
}
// Description implements the SmokeTest interface.
func (s *SmokeTest) Description() string {
return "Tests DS18B20 temp sensor and DS2431 EEPROM on periph-tester board"
}
// Run implements the SmokeTest interface.
func (s *SmokeTest) Run(f *flag.FlagSet, args []string) error {
busName := f.String("i2cbus", "", "I²C bus name for the DS2483 1-wire interface chip")
seed := f.Int64("seed", 0, "random number seed, default is to use the time")
if err := f.Parse(args); err != nil {
return err
}
if f.NArg() != 0 {
f.Usage()
return errors.New("unrecognized arguments")
}
// Open the i2c bus where the DS2483 is located.
i2cBus, err := i2creg.Open(*busName)
if err != nil {
return fmt.Errorf("cannot open I²C bus %s: %v", *busName, err)
}
defer i2cBus.Close()
// Open the ds2483 one-wire interface chip.
onewireBus, err := ds248x.New(i2cBus, 0x18, &ds248x.DefaultOpts)
if err != nil {
return fmt.Errorf("cannot open DS248x: %v", err)
}
// Init rand.
if *seed == 0 {
*seed = time.Now().UnixNano()
}
rand.Seed(*seed)
log.Printf("%s: random number seed %d", s, *seed)
// Run the tests.
addrs, err := s.search(onewireBus)
if err != nil {
return err
}
if err := s.ds18b20(onewireBus, addrs[0]); err != nil {
return err
}
return s.eeprom(onewireBus, addrs[1])
}
// search performs a search cycle on the bus and verifies that the two expected devices
// are actually found. It returns the two device addresses, ds18b20 first.
func (s *SmokeTest) search(bus onewire.Bus) ([]onewire.Address, error) {
addrs, err := bus.Search(false)
if err != nil {
return nil, fmt.Errorf("search failed: %v", err)
}
if len(addrs) != 2 {
return nil, fmt.Errorf("search expected 2 devices, found %d", len(addrs))
}
// Ensure we found devices with the correct family code and return them.
if addrs[1]&0xff == 0x28 && addrs[0]&0xff == 0x2D {
// Swap the order so the DS18b20 is first.
addrs[0], addrs[1] = addrs[1], addrs[0]
}
if addrs[0]&0xff == 0x28 && addrs[1]&0xff == 0x2D {
log.Printf("%s: found 2 devices %#x %#x", s, addrs[0], addrs[1])
return addrs, nil
}
return nil, fmt.Errorf("search expected device families 0x28 and 0x2D, found: %#x %#x", addrs[0], addrs[1])
}
// ds18b20 tests a Maxim DS18B20 (or MAX31820) 1-wire temperature sensor attached to the
// 1-wire bus. Such a chip is included on the periph-tester board.
func (s *SmokeTest) ds18b20(bus onewire.Bus, addr onewire.Address) error {
dev, err := ds18b20.New(bus, addr, 10)
if err != nil {
return err
}
e := physic.Env{}
if err := dev.Sense(&e); err != nil {
return err
}
if e.Temperature <= physic.ZeroCelsius || e.Temperature > 50*physic.Celsius+physic.ZeroCelsius {
return fmt.Errorf("ds18b20: expected temperature in the 0°C..50°C range, got %s", e.Temperature)
}
log.Printf("%s: temperature is %s", s, e.Temperature)
return nil
}
// eeprom tests a ds2431 1Kbit 1-wire EEPROM.
// Such a chip is included on the periph-tester board.
//
// The test currently only writes and reads the scratchpad memory.
// A test of the eeprom itself may be useful if a proper driver is written
// someday. But it's not like that would add any significant additional
// test coverage...
//
// Datasheet: http://datasheets.maximintegrated.com/en/ds/DS2431.pdf
func (s *SmokeTest) eeprom(bus onewire.Bus, addr onewire.Address) error {
d := onewire.Dev{Bus: bus, Addr: addr}
// Start by writing some data to the scratchpad
var data [8]byte
for i := range data {
data[i] = byte(rand.Intn(256))
}
var buf [13]byte // cmd, target-addr-low, target-addr-hi, data[8], crc16
buf[0] = 0x0f // write scratchpad
copy(buf[3:11], data[:])
if err := d.Tx(buf[:], nil); err != nil {
return fmt.Errorf("eeprom: error on the first scratchpad write")
}
// Read the scratchpad back
if err := d.Tx([]byte{0xaa}, buf[:]); err != nil {
return fmt.Errorf("eeprom: error reading the scratchpad")
}
for i := range data {
if data[i] != buf[i+3] {
| log.Printf("%s: eeprom test successful", s)
return nil
}
| return fmt.Errorf("eeprom: scratchpad data byte %d mismatch, expected %#x got %#x",
i, data[i], buf[i+3])
}
}
|
utils.py | import os
import torch
import glob
import numpy as np
import scipy.sparse as sp
import yaml
from sklearn.preprocessing import StandardScaler
from shaDow.globals import git_rev, timestamp, Logger
from torch_scatter import scatter
from copy import deepcopy
from typing import List, Union
from shaDow import TRAIN, VALID, TEST
from shaDow.data_converter import convert2shaDow, to_undirected
def load_data(prefix, dataset, config_data, os_='linux'):
Logger.printf("Loading training data..")
prefix_l = prefix['local']
fs_shadow = ['adj_full_raw.np[yz]', 'adj_train_raw.np[yz]', 'label_full.npy', 'feat_full.npy', 'split.npy']
if not all(glob.glob(f"{prefix_l}/{dataset}/{f}") for f in fs_shadow):
convert2shaDow(dataset, prefix_l)
role = np.load(f"./{prefix_l}/{dataset}/split.npy", allow_pickle=True)
if type(role) == np.ndarray:
role = role[()]
else:
assert type(role) == dict
# role is used as index, which is required to be int64 (node_set won't take much mem anyways)
node_set = {TRAIN: np.asarray(role[TRAIN], dtype=np.int64),
VALID: np.asarray(role[VALID], dtype=np.int64),
TEST : np.asarray(role[TEST], dtype=np.int64)}
# load adj. If we want to convert to_undirected, and the undirected adj has been stored as external file,
# then we skip the conversion in the program and directly load the undirected adj.
bin_adj_files = {TRAIN: {'indptr': None, 'indices': None, 'data': None},
VALID: {'indptr': None, 'indices': None, 'data': None},
TEST: {'indptr': None, 'indices': None, 'data': None}}
def fill_bin_adj_dict(mode_, split_, type_):
for d in ['indptr', 'indices', 'data']:
bin_adj_files[mode_][d] = f"{prefix_l}/{dataset}/cpp/adj_{split_}_{type_}_{d}.bin"
if config_data['to_undirected']:
if (adj_full := load_adj(prefix_l, dataset, 'undirected', 'full')) is None:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
adj_full = to_undirected(adj_full)
fill_bin_adj_dict(VALID, 'full', 'undirected')
fill_bin_adj_dict(TEST, 'full', 'undirected')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'undirected')
elif (adj_train := load_adj(prefix_l, dataset, 'undirected', 'train')) is None:
adj_train = load_adj(prefix_l, dataset, 'raw', 'train')
adj_train = to_undirected(adj_train)
fill_bin_adj_dict(TRAIN, 'train', 'undirected')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
else:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
fill_bin_adj_dict(VALID, 'full', 'raw')
fill_bin_adj_dict(TEST, 'full', 'raw')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'raw')
else:
adj_train = load_adj(prefix, dataset, 'raw', 'train')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
fill_bin_adj_dict(TRAIN, 'train', 'raw')
bin_adj_files = validate_bin_file(bin_adj_files)
Logger.printf(f"SETTING TO {'TRANS' if config_data['transductive'] else 'IN'}DUCTIVE LEARNING", style="red")
label_full = np.load(f"./{prefix_l}/{dataset}/label_full.npy")
label_full = torch.from_numpy(label_full)
# ======= deal with feats =======
mode_norm = 'all' if config_data['transductive'] else 'train'
if config_data['norm_feat'] and os.path.isfile(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy"):
feats = np.load(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy")
Logger.printf(f"Loading '{mode_norm}'-normalized features", style='yellow')
else:
feats = np.load(f"./{prefix_l}/{dataset}/feat_full.npy")
if config_data['norm_feat']:
feats_fit = feats if config_data['transductive'] else feats[node_set[TRAIN]]
scaler = StandardScaler()
scaler.fit(feats_fit)
feats = scaler.transform(feats)
Logger.printf(f"Normalizing node features (mode = {mode_norm})", style="yellow")
else:
Logger.printf("Not normalizing node features", style="yellow")
feats = torch.from_numpy(feats.astype(np.float32, copy=False))
Logger.printf("Done loading training data..")
return {'adj_full' : adj_full,
'adj_train' : adj_train,
'feat_full' : feats,
'label_full': label_full,
'node_set' : node_set,
'bin_adj_files': bin_adj_files}
def parse_n_prepare(task, args, name_graph, dir_log, os_='linux'):
# [config]
if args.configs is not None:
config_train = args.configs
else:
assert task in ['inference', 'postproc']
if task == 'inference':
if args.inference_configs is None:
assert not args.compute_complexity_only
dir_candy = args.inference_dir
else:
assert args.inference_dir is None and args.compute_complexity_only
dir_candy = None
config_train = args.inference_configs
else:
if args.postproc_dir is not None:
dir_candy = args.postproc_dir
else:
with open(args.postproc_configs) as f:
config_temp = yaml.load(f, Loader=yaml.FullLoader)
if 'dir_pred_mat' in config_temp: # all such dirs MUST contain the same yaml
dir_candy = config_temp['dir_pred_mat'][0]
elif 'dir_emb_mat' in config_temp: # all ens models should have the same arch (only differs in sampler)
dir_candy = next(iter(config_temp['dir_emb_mat'].values()))[0]
else:
raise NotImplementedError
if dir_candy is not None:
assert os.path.isdir(dir_candy)
f_yml = [f for f in os.listdir(dir_candy) if f.split('.')[-1] in ['yml', 'yaml']]
assert len(f_yml) == 1
config_train = f"{dir_candy}/{f_yml[0]}"
with open(config_train) as f_config_train:
config_train = yaml.load(f_config_train, Loader=yaml.FullLoader)
config_train_copy = deepcopy(config_train)
# [data]
config_data = {"to_undirected" : False,
"transductive" : False,
"norm_feat" : True}
config_data.update(config_train['data'])
# [arch]
arch_gnn = { # default values
"dim" : -1,
"aggr" : "sage",
"residue" : "none",
"pooling" : "center",
"loss" : "softmax",
"num_layers" : -1,
"act" : "I",
"heads" : -1,
"feature_augment" : "hops",
"feature_smoothen" : "none",
"label_smoothen" : "none", # label_smoothen is only considered if use_label != none
"ensemble_act" : "leakyrelu",
"branch_sharing" : False,
"use_label" : "none"
}
arch_gnn.update(config_train["architecture"])
assert arch_gnn['aggr'] in ['sage', 'gat', 'gatscat', 'gcn', 'mlp', 'gin', 'sgc', 'sign']
assert arch_gnn['use_label'].lower() in ['all', 'none', 'no_valid']
assert arch_gnn['pooling'].lower().split('-')[0] in ['mean', 'max', 'sum', 'center', 'sort']
assert arch_gnn['residue'].lower() in ['sum', 'concat', 'max', 'none']
assert arch_gnn['feature_augment'].lower() in ['hops', 'ppr', 'none']
if arch_gnn["feature_augment"] and arch_gnn["feature_augment"].lower() != "none":
arch_gnn["feature_augment"] = set(k for k in arch_gnn["feature_augment"].split("-"))
else:
arch_gnn['feature_augment'] = set()
# [params]
params_train = {
"lr" : 0.01,
"dropedge" : 0.0,
"ensemble_dropout" : "none"
}
params_train.update(config_train["hyperparameter"])
params_train["lr"] = float(params_train["lr"])
# [sampler]
sampler_preproc, sampler_train = [], []
for s in config_train['sampler']:
phase = s.pop('phase')
if phase == 'preprocess':
sampler_preproc.append(s)
elif phase == 'train':
sampler_train.append(s)
else:
raise NotImplementedError
batch_size = config_train["hyperparameter"]["batch_size"]
config_sampler_preproc = {"batch_size": batch_size, "configs": sampler_preproc}
config_sampler_train = {"batch_size": batch_size, "configs": sampler_train}
# add self-edges for certain arch. e.g., for GAT, will be divide-by-0 error in grad without self-edges
if arch_gnn["aggr"] in ["gcn", "gat", "gatscat"]:
for sc in config_sampler_train["configs"]:
num_ens = [len(v) for k, v in sc.items() if k != 'method']
assert max(num_ens) == min(num_ens)
sc["add_self_edge"] = [True] * num_ens[0]
# [copy yml]
name_key = f"{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
dir_log_full = log_dir(task, config_train_copy, name_key, dir_log, name_graph, git_rev, timestamp)
return params_train, config_sampler_preproc, config_sampler_train, config_data, arch_gnn, dir_log_full
def parse_n_prepare_postproc(dir_load, f_config, name_graph, dir_log, arch_gnn, logger):
if f_config is not None:
with open(f_config) as f:
config_postproc = yaml.load(f, Loader=yaml.FullLoader)
name_key = f"postproc-{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
log_dir('postproc', config_postproc, name_key, dir_log, name_graph, git_rev, timestamp)
skip_instantiate = []
if 'check_record' in config_postproc:
load_acc_record = config_postproc['check_record']
else:
load_acc_record = True
if config_postproc['method'] == 'cs': # C&S
acc_record = [] if load_acc_record else None
if dir_load is not None:
if 'dir_pred_mat' not in config_postproc:
config_postproc['dir_pred_mat'] = [dir_load]
elif os.path.realpath(dir_load) not in [os.path.realpath(pc) for pc in config_postproc['dir_pred_mat']]:
config_postproc['dir_pred_mat'].append(dir_load)
config_postproc['pred_mat'] = [None] * len(config_postproc['dir_pred_mat'])
for i, di in enumerate(config_postproc['dir_pred_mat']):
if load_acc_record:
acc_record.append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'cs' == f.split('.')[-1] and f.startswith('pred_mat'):
config_postproc['pred_mat'][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for m in config_postproc['pred_mat']):
skip_instantiate = ['data', 'model']
elif config_postproc['method'] == 'ensemble': # Variant of subgraph ensemble as postproc
acc_record = {s: [] for s in config_postproc['dir_emb_mat']} if load_acc_record else None
assert dir_load is None
config_postproc['emb_mat'] = {k: [None] * len(v) for k, v in config_postproc['dir_emb_mat'].items()}
for sname, dirs_l in config_postproc['dir_emb_mat'].items():
for i, di in enumerate(dirs_l):
if load_acc_record:
acc_record[sname].append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'ens' == f.split('.')[-1] and f.startswith('emb_mat'):
config_postproc['emb_mat'][sname][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for s, mat_l in config_postproc['emb_mat'].items() for m in mat_l):
skip_instantiate = ['model'] # you have to load data (role, labels) anyways
return config_postproc, acc_record, skip_instantiate
def log_dir(task, config_new, yml_name_key, dir_log, name_graph, git_rev, timestamp):
if task == 'train':
prefix = 'running'
elif task == 'inference':
prefix = 'INF'
elif task == 'postproc':
prefix = 'POST'
else:
raise NotImplementedError
log_dir = f"{dir_log}/{name_graph}/{prefix}/{timestamp}-{git_rev.strip():s}/"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
yml_file = f"{log_dir}/{yml_name_key}.yml"
with open(yml_file, 'w') as f:
yaml.dump(config_new, f, default_flow_style=False, sort_keys=False)
return log_dir
# =============== #
# ADJ UTILS #
# =============== #
def get_deg_torch_sparse(adj):
return scatter(adj._values(), adj._indices()[0], reduce="sum")
def adj_norm_rw(adj, deg=None, dropedge=0., sort_indices=True):
"""
Normalize adj according to the method of rw normalization.
Note that sym norm is used in the original GCN paper (kipf),
while rw norm is used in GraphSAGE and some other variants.
# Procedure:
# 1. adj add self-connection --> adj'
# 2. D' deg matrix from adj'
# 3. norm by D^{-1} x adj' | Note that after 'dot' the indices of a node would be in descending order
rather than ascending order
"""
if type(adj) == torch.Tensor:
assert deg is None
assert torch.sum(adj._values()).cpu().long().item() == adj._values().size()[0]
_deg_orig = get_deg_torch_sparse(adj)
if dropedge > 0:
masked_indices = torch.floor(torch.rand(int(adj._values().size()[0] * dropedge)) * adj._values().size()[0]).long()
adj._values()[masked_indices] = 0
_deg_dropped = get_deg_torch_sparse(adj)
else:
_deg_dropped = _deg_orig
_deg = torch.repeat_interleave(_deg_dropped, _deg_orig.long())
_deg = torch.clamp(_deg, min=1)
_val = adj._values()
_val /= _deg
adj_norm = adj
else:
assert dropedge == 0., "not supporting dropedge for scipy csr matrices"
assert adj.shape[0] == adj.shape[1]
diag_shape = (adj.shape[0], adj.shape[1])
D = adj.sum(1).flatten() if deg is None else deg
D = np.clip(D, 1, None) # if deg_v == 0, it doesn't matter what value we clip it to.
norm_diag = sp.dia_matrix((1 / D, 0), shape=diag_shape)
adj_norm = norm_diag.dot(adj)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def adj_norm_sym(adj, sort_indices=True, add_self_edge=False, dropedge=0.):
assert adj.shape[0] == adj.shape[1]
assert adj.data.sum() == adj.size, "symmetric normalization only supports binary input adj"
N = adj.shape[0]
# drop edges symmetrically
if dropedge > 0:
masked_indices = np.random.choice(adj.size, int(adj.size * dropedge))
adj.data[masked_indices] = 0
adjT = adj.tocsc()
data_add = adj.data + adjT.data
survived_indices = np.where(data_add == 2)[0]
adj.data *= 0
adj.data[survived_indices] = 1
# augment adj with self-connection
if add_self_edge:
indptr_new = np.zeros(N + 1)
neigh_list = [set(adj.indices[adj.indptr[v] : adj.indptr[v+1]]) for v in range(N)]
for i in range(len(neigh_list)):
neigh_list[i].add(i)
neigh_list[i] = np.sort(np.fromiter(neigh_list[i], int, len(neigh_list[i])))
indptr_new[i + 1] = neigh_list[i].size
indptr_new = indptr_new.cumsum()
indices_new = np.concatenate(neigh_list)
data_new = np.broadcast_to(np.ones(1), indices_new.size)
adj_aug = sp.csr_matrix((data_new, indices_new, indptr_new), shape=adj.shape)
# NOTE: no need to explicitly convert dtype, since adj_norm_sym is used for subg only
else:
adj_aug = adj
# normalize
D = np.clip(adj_aug.sum(1).flatten(), 1, None)
norm_diag = sp.dia_matrix((np.power(D, -0.5), 0), shape=adj_aug.shape)
adj_norm = norm_diag.dot(adj_aug).dot(norm_diag)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def coo_scipy2torch(adj):
"""
convert a scipy sparse COO matrix to torch
"""
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
return torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))
# ================= #
# ADJ FILE IO UTILS #
# ================= #
def load_adj(prefix, dataset, type_, split_):
"""
Try to load the prestored undirected adj. If the file does not exist, then you MUST return a None
"""
assert split_ in ['full', 'train'], "UNKNOWN ADJ SPLIT. ONLY ACCEPT [full] or [train]"
assert type_ in ['raw', 'undirected'], "UNKNOWN ADJ TYPE. ONLY ACCEPT [raw] or [undirected]"
file_adj = f"{prefix}/{dataset}/adj_{split_}_{type_}." + "{}"
if os.path.isfile(file_adj.format('npz')):
adj = sp.load_npz(file_adj.format('npz'))
elif os.path.isfile(file_adj.format('npy')):
adj_d = np.load(file_adj.format('npy'), allow_pickle=True)
if type(adj_d) == np.ndarray:
adj_d = adj_d[()]
else:
assert type(adj_d) == dict
indptr = adj_d['indptr']
indices = adj_d['indices']
if 'data' in adj_d:
data = adj_d['data']
else:
data = np.broadcast_to(np.ones(1, dtype=np.bool), indices.size)
num_nodes = indptr.size - 1
adj = sp.csr_matrix((data, indices, indptr), shape=(num_nodes, num_nodes))
else:
adj = None
return adj
def validate_bin_file(bin_adj_files):
for md, df in bin_adj_files.items():
assert set(df.keys()) == set(['indptr', 'indices', 'data'])
if not os.path.isfile(df['indptr']) or not os.path.isfile(df['indices']):
return {mmd: None for mmd in bin_adj_files}
if not os.path.isfile(df['data']):
df['data'] = ''
return bin_adj_files
def merge_stat_record(dict_l : List[dict]):
key_l = [set(d.keys()) for d in dict_l]
assert all(k == key_l[0] == set([TRAIN, VALID, TEST]) for k in key_l)
names_stat = set(dict_l[0][TRAIN].keys())
ret = {n: {TRAIN: [], VALID: [], TEST: []} for n in names_stat}
for d in dict_l:
for m in [TRAIN, VALID, TEST]:
assert set(d[m].keys()) == names_stat
for k, v in d[m].items():
ret[k][m].append(v)
return ret | if sort_indices is True, we re-sort the indices of the returned adj |
track_length_analysis_test.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 19 08:11:51 2021
@author: vik748
"""
import json
import numpy as np
import matplotlib.pyplot as plt
import sys,os
import pandas as pd
def tracks_histogram(recon_file, tracks_file, ax, model_num=0, bins=np.linspace(2,15,14)):
|
########################################
# Skerki_mud SIFT - RAW vs CLAHE - Model 0
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_tracks.csv'
fig1, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig1.suptitle('Skerki Mud SIFT')
tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Skerki_mud RAW - SIFT vs Zernike
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_tracks.csv'
fig2, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig2.suptitle('Skerki Mud RAW')
tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('SIFT')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], model_num=1, bins=np.linspace(2,15,14))
ax[1].set_title('ZERNIKE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Skerki_mud Zernike - RAW vs CLAHE
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_tracks.csv'
fig3, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig3.suptitle('Skerki Mud ZERNIKE')
tracks_histogram(recon_file, tracks_file, ax[0], model_num=1, bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_ZERNIKE_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Stingray - SIFT vs Zernike
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_tracks.csv'
fig4, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig4.suptitle('Stingray RAW')
tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('SIFT')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('ZERNIKE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Skerki_mud SIFT - RAW vs CLAHE - Combined
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_tracks.csv'
fig5, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig5.suptitle('Skerki Mud SIFT - Combined')
tracks_histogram(recon_file, tracks_file, ax[0], model_num=-1, bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Stingray SIFT - RAW vs CLAHE
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_tracks.csv'
fig6, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig6.suptitle('Stingray SIFT')
tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_SIFT_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_SIFT_tracks.csv'
tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
########################################
# Stingray Zernike - RAW vs CLAHE
########################################
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_tracks.csv'
fig7, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
fig7.suptitle('Stingray ZERNIKE')
counts0 = tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))
ax[0].set_xlim([2, None])
ax[0].set_yscale('log')
ax[0].set_ylim([None, 10000])
ax[0].set_title('RAW')
ax[0].set_xlabel('Feature Track Length')
ax[0].set_ylabel('Fequency')
ax[0].xaxis.set_major_locator(plt.MultipleLocator(1))
recon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_ZERNIKE_reconstruction.json'
tracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_ZERNIKE_tracks.csv'
counts1 = tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))
ax[1].set_title('CLAHE')
ax[1].set_xlabel('Feature Track Length')
ax[1].set_ylabel('Fequency')
plt.hist([counts1, counts2], np.linspace(2,15,14), label=['RAW', 'CLAHE'])
plt.legend(loc='upper right')
plt.show()
| '''
How the tracks.csv file is written
template <class S>
void WriteToStreamCurrentVersion(S& ostream, const TracksManager& manager) {
ostream << manager.TRACKS_HEADER << "_v" << manager.TRACKS_VERSION
<< std::endl;
const auto shotsIDs = manager.GetShotIds();
for (const auto& shotID : shotsIDs) {
const auto observations = manager.GetShotObservations(shotID);
for (const auto& observation : observations) {
ostream << shotID << "\t" << observation.first << "\t"
<< observation.second.id << "\t" << observation.second.point(0)
<< "\t" << observation.second.point(1) << "\t"
<< observation.second.scale << "\t" << observation.second.color(0)
<< "\t" << observation.second.color(1) << "\t"
<< observation.second.color(2) << std::endl;
}
}
}
'''
with open(recon_file) as f:
data = json.load(f)
if model_num == -1:
points_dict = {}
for d in data:
points_dict.update(d['points'])
else:
points_dict = data[model_num]['points']
model_0_point_ids_int = [int(k) for k in points_dict.keys()]
tracks_df = pd.read_csv(tracks_file, sep='\t', skiprows=1,
names=['image', 'track_id', 'feature_id', 'x', 'y',
'scale', 'r', 'g', 'b'])
track_id_counts = tracks_df.track_id.value_counts()
model_0_track_id_counts = track_id_counts[model_0_point_ids_int]
ax.hist(model_0_track_id_counts, bins=bins)
return model_0_track_id_counts |
mod.rs | //! Flat Abstract Machine:
//! This machine is an abstract CESK* for a subset of the
//! Scheme programming language.
//!
//! It features flat-closures, as opposed to the standard linked-closure. | //!
//! Using this method, we have a polynomial CFA algorithm!
//!
//! I also use the continuation-allocator from Pushdown For Free by Gilray et al.
//! This allows perfect stack precision for our continuation allocation.
mod domains;
mod evaluate;
mod prims;
pub use evaluate::evaluate;
pub const M: usize = 2; | //! This method is derived from Might et al.'s 2010 paper on the m-CFA algorithm.
//!
//! This is an Abstracted Abstract Machine, based off of the
//! DVH paper of a similar name (see readme). |
device_profile.rs | use crate::device::profile::device_resource::DeviceResource;
use std::collections::HashMap;
pub struct DeviceProfile {
id: i32, | resources: Vec<DeviceResource>,
labels: HashMap<String, String>,
}
impl DeviceProfile {
fn new(id: i32, resources: Vec<DeviceResource>) -> Self {
DeviceProfile {
id,
name: "Pascal".to_string(),
manufacturer: "HUAWEI".to_string(),
namespace: "Namespace".to_string(),
resources,
labels: HashMap::new(),
}
}
pub fn add_device_profile_labels(
&mut self,
field_name: String,
field_value: String,
) -> Option<String> {
self.labels.insert(field_name, field_value)
}
} | name: String,
manufacturer: String,
namespace: String, |
templates.go | // Code generated by go-bindata.
// sources:
// templates/dot.js
// templates/go.js
// templates/md.js
// templates/php.js
// templates/sh.js
// DO NOT EDIT!
package ttouch
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data, name string) ([]byte, error) {
gz, err := gzip.NewReader(strings.NewReader(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _dotJs = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x52\x2a\x2d\x4e\x55\x28\x2e\x29\xca\x4c\x2e\x51\xb2\xe6\x52\x4a\xc9\x4c\x2f\x4a\x2c\xc8\x50\xa8\x8e\xc9\x8b\xc9\xab\x8d\xc9\x53\xb2\xe6\x02\x04\x00\x00\xff\xff\x96\x31\x7b\x09\x22\x00\x00\x00"
func dotJsBytes() ([]byte, error) {
return bindataRead(
_dotJs,
"dot.js",
)
}
func dotJs() (*asset, error) {
bytes, err := dotJsBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "dot.js", size: 34, mode: os.FileMode(420), modTime: time.Unix(1529983994, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _goJs = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x91\x41\x4f\x83\x40\x10\x85\xef\xfb\x2b\x9e\x1b\xd3\x2c\x92\x50\x3d\x13\xae\x7a\xf2\xe2\xc1\x4b\xa9\xc9\x8a\x5b\xba\x29\x0c\x0d\x2c\x6a\x62\xf8\xef\x66\x16\x87\x50\x53\x4e\xec\xcc\x9b\x6f\x66\xde\xe8\x71\x70\x18\x42\xef\xab\xa0\x73\x65\x0e\x23\x55\xc1\x77\x04\x93\xe0\x47\x01\xc0\xa7\xed\xe1\xbe\xfd\x10\x3c\xd5\x28\xf0\xd4\x74\xef\x46\xdf\x65\x75\xa7\x93\x7c\x11\x9c\x4f\x9c\xd3\xad\xf5\xa4\xe7\xa8\x3f\xc0\x2c\x65\x9b\xcd\x82\xc8\x1a\x47\x75\x38\x0a\x5d\x00\x55\x47\xc1\x51\x40\x81\x17\x67\x3f\x1e\x7d\xe3\x96\xea\xdd\xfd\xfe\xaf\x93\x70\x45\x7c\x53\x14\xa0\xb1\x69\xd6\x30\x01\xb6\x36\x54\x47\x14\x02\xce\xe2\xdb\x6c\xdf\xce\xb6\x3a\xd9\xda\xc1\x94\x5f\x69\x72\xbb\x6d\x57\x68\xc1\xcf\xa5\x02\xe7\xe1\x63\xe4\xca\xe4\xf2\xcd\xeb\x47\xd5\xee\x61\x7f\x49\x9c\xd4\xe5\xdf\xa4\xfe\xed\x3c\xb0\x73\x2b\xd7\x22\x4c\xcc\xe4\xee\xaf\xcf\x19\x1b\x42\xb6\x75\x4b\x22\xfa\xbf\x9a\x64\xcd\xe2\x1b\x82\x45\x7c\xc3\x92\x4a\x9a\x4a\xb9\xca\xdc\xbc\x77\x61\xec\x09\x5a\xbc\xd0\x48\xe3\x0a\x29\x34\xcb\xf9\x29\xbc\x5c\x4d\x89\x49\x72\xf5\x1b\x00\x00\xff\xff\xf2\xd3\x1c\x3a\x27\x02\x00\x00"
func goJsBytes() ([]byte, error) {
return bindataRead(
_goJs,
"go.js",
)
}
func goJs() (*asset, error) {
bytes, err := goJsBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "go.js", size: 551, mode: os.FileMode(420), modTime: time.Unix(1529983994, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _mdJs = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x3c\x8c\xb1\x6a\xc3\x30\x18\x84\x77\x3d\xc5\xa1\x2e\x12\xa6\x72\xe9\x2a\x3c\x74\xe9\x56\x28\x14\xba\x58\x1e\x54\xa3\xda\x02\xe5\x97\x91\x7e\x67\x09\x79\xf7\x60\x0b\xb2\xdc\x70\xf7\xdd\x27\xf7\x1a\x50\xb9\xc4\x99\xa5\x15\xea\x7f\xa7\x99\x63\x26\x28\x8d\x9b\x00\x80\xab\x2f\xd8\x7c\xe1\x8a\x01\x3f\x5b\x8a\xfc\xed\x79\x55\xbf\x5f\xe6\xe3\xaf\x7e\xc6\x14\xc8\x5f\x82\x1e\xdf\x26\x53\x8f\x51\xf5\xa3\xeb\x9d\x9b\xfa\x45\xdb\xe7\xfd\x40\x30\x34\xcb\x78\xa6\x49\x81\x16\x5e\xf1\x8a\xf7\xa9\x71\x73\xa6\x9a\x53\x30\x29\x2f\xea\x54\xb6\xba\x04\xde\x0b\x41\xbe\x40\xa2\x6b\xa2\x0e\xd2\x91\x23\x69\xc5\x5d\x2b\x6d\xc5\x23\x00\x00\xff\xff\x01\xa5\x01\x62\xc2\x00\x00\x00"
func mdJsBytes() ([]byte, error) {
return bindataRead(
_mdJs,
"md.js",
)
}
func mdJs() (*asset, error) {
bytes, err := mdJsBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "md.js", size: 194, mode: os.FileMode(420), modTime: time.Unix(1529983994, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _phpJs = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x54\x5d\x6b\xdb\x3c\x14\xbe\xcf\xaf\x38\xd5\x5b\x8a\x5c\xbf\xb3\x3b\xe8\x9d\xa7\x95\xc1\xd6\x8b\x41\xd7\xb2\xb2\xdd\xd8\x1e\x28\x8e\x9c\x68\xb3\x65\xa1\x23\x17\x43\x97\xff\x3e\xe4\xaf\xca\x69\x03\xdd\x7c\x11\x12\xeb\xf9\xd0\x79\xa4\x27\xa4\x45\x01\x68\x8d\x2c\x2c\x49\x56\x65\xab\x0a\x2b\x1b\x05\x5b\x61\xef\xd0\xdc\x70\x4d\x03\x78\x5c\x01\x00\x3c\x70\x03\x0a\x11\x18\x3c\xee\x93\xf9\x8d\xb4\xa2\x76\xef\xee\x0b\xae\xbe\x69\x4a\x8a\xa6\xd6\x0d\x0a\x13\xfd\xc4\x46\x91\x60\x00\xca\x12\x68\x0f\x8c\x2a\xa1\xb6\x76\x07\xef\xe1\x62\x92\x9d\x84\x26\xe2\x1d\xb7\x3b\x60\x83\x6e\x7a\x91\x27\x2f\x82\x3e\x4a\xe3\x3c\x75\x25\xad\xc3\x53\x9f\x1c\xbc\xc0\x52\x56\x28\x0b\x0c\xbe\x0a\xbe\xb9\x96\x95\x58\x12\x9e\xd0\x6e\xa3\x13\xfa\x84\x31\x50\x6d\x55\xf9\x1b\x9d\x15\xdd\x74\xc0\xe0\xf3\xfd\xed\x97\x48\x73\x83\x62\xa2\x79\x62\xb3\xa0\x03\x47\xbc\xb5\x4d\xd5\xf0\x0d\x9c\x9d\xc1\xf2\x4d\x4a\x34\x9a\x37\x17\x24\x3f\x74\x72\x4f\xd9\x18\xa0\xce\xb2\x03\xa9\xfe\x86\xe8\x1e\x85\x98\xfa\x91\x85\xc7\x04\xd2\x2e\xcf\x81\x01\x21\xc9\x33\x9d\xfd\xea\xf8\xaf\xd7\x4f\x77\xf9\xaf\xd3\x1d\x21\xbe\x7e\xba\xcb\x79\xba\xee\xf5\xc3\x0d\xdf\x86\x4f\x23\x6c\x6b\x94\x73\x4b\x56\xfb\xa7\x82\x58\x23\xeb\xfb\x8a\xe3\x4e\x20\xc5\x69\x8b\x23\x16\x23\x23\x74\xc5\x0b\x41\x63\xfa\x23\xcb\xc2\xe0\x37\xcd\xb2\xf0\x34\x88\xb7\xff\x03\x71\xa5\xd8\xaf\xe8\x2c\xb4\x28\x58\xcd\x35\x30\xbf\x7c\x89\xd7\x3d\xef\x84\xe6\xe0\x6a\x17\x5c\xcd\xb5\x1f\x92\x3b\x96\xef\x37\xd1\x87\x35\xba\xbb\xae\x78\x2d\x22\xa9\x36\xa2\xbb\x2d\x69\x1d\x00\x63\x6c\xd9\xbe\xc9\x60\x73\x50\xaa\xa5\xc4\xb2\x56\x13\x07\xdb\xb2\x94\x1d\x30\x47\x8e\xb0\x5d\xa3\x35\xb4\x1e\x5b\x1e\x78\x29\x44\xe7\x57\x41\x9a\xc5\xf9\xf9\x69\x9f\xc1\xe9\x5b\xe2\xad\xa6\x59\x9c\x65\x79\xd8\xaf\x64\x19\x09\x9e\xdb\x68\x23\x06\x1b\x3f\xf5\x9a\xeb\xb4\xce\x0f\xd0\x7d\x4a\x3e\x6a\xa4\x86\xbd\x34\x84\xe3\x8e\x0f\x58\x6b\x23\xf8\xaf\xe4\xc5\xe3\x77\xf6\x46\x60\x5b\x59\x2f\xfe\x31\xe1\xeb\x8a\x6f\x31\xfa\xd4\x89\xa2\xb5\x7c\x5d\x09\x3f\xd5\x91\x13\x32\x20\xff\x9d\xc4\x2d\x9a\x78\x2d\x55\x2c\xd4\x03\xe8\x9d\xce\xd4\x28\x34\x5d\xb1\x19\xfb\xee\xaa\x5f\x9e\x01\xce\x49\x61\xff\x67\x44\xc8\x11\x7d\x77\x3c\xa8\x79\x21\xc0\x0d\xa8\xd0\x0d\x9b\x78\x1a\x8b\x7b\x3c\xf0\x92\xd5\x3e\x70\x97\xeb\x4f\x00\x00\x00\xff\xff\xcb\x97\x16\x4c\xfe\x05\x00\x00"
func phpJsBytes() ([]byte, error) {
return bindataRead(
_phpJs,
"php.js",
)
}
func phpJs() (*asset, error) {
bytes, err := phpJsBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "php.js", size: 1534, mode: os.FileMode(420), modTime: time.Unix(1529983994, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _shJs = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x52\x2a\x2d\x4e\x55\x28\x2e\x29\xca\x4c\x2e\x51\xb2\xe6\x52\x52\x56\xd4\x4f\xca\xcc\xd3\x2f\xce\x88\xc9\x2b\x4e\x2d\x51\xd0\x4d\x8d\xc9\x8b\xc9\x53\xb2\xe6\x02\x04\x00\x00\xff\xff\x3b\x36\xcd\x4e\x27\x00\x00\x00"
func shJsBytes() ([]byte, error) {
return bindataRead(
_shJs,
"sh.js",
)
}
func shJs() (*asset, error) {
bytes, err := shJsBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "sh.js", size: 39, mode: os.FileMode(420), modTime: time.Unix(1529983994, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"dot.js": dotJs,
"go.js": goJs,
"md.js": mdJs,
"php.js": phpJs,
"sh.js": shJs,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"dot.js": &bintree{dotJs, map[string]*bintree{}},
"go.js": &bintree{goJs, map[string]*bintree{}},
"md.js": &bintree{mdJs, map[string]*bintree{}},
"php.js": &bintree{phpJs, map[string]*bintree{}},
"sh.js": &bintree{shJs, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error |
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
| {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
} |
cost.py | from collections import defaultdict
from itertools import product
MULTITASK_PENALTY = 1
AUTHOR_PENALTY = 2
RELATION_COST = .05
DEFAULT_FLEXIBILITY = .1
OVERREQ_PENALTY = 0.5
def workload_diff(target, proposed):
"""
Helper for pitches_cost
:param target: <role, load>
:param proposed: <role, load>
:return: float
"""
total = 0
for role in target:
# flat penalty of -1 if no students are on a target role
diff = target[role] - (proposed[role] if role in proposed else -1)
# a negative diff means too much student were assigned on the role
if diff < 0:
# the penalty for going over requirements can be softened
diff *= OVERREQ_PENALTY
# the squared diff is added to the cost (so that greater discrepencies cost more)
total += diff ** 2
return total
def author_tasks(pitches, wishes):
tasks = {}
for pitch in pitches:
author = pitches[pitch]["author"]
for wpitch, role in wishes[author]:
if wpitch == pitch:
tasks[(wpitch, role)] = author |
class Cost:
def __init__(self, pitches, wishes, relations=None, flexibility=DEFAULT_FLEXIBILITY):
"""
:param pitches: <pitch, <role, load>>
:param wishes: <student, [(pitch, role)]>
:param relations: <student, <student, cost>>
:param flexibility: float in [0, 1]
"""
self.pitches = pitches
self.wishes = wishes
self.relations = relations if relations else {}
self.flexibility = flexibility
self.author_tasks = author_tasks(pitches, wishes)
def __call__(self, solution):
return (
(1 - self.flexibility) * self.pitches_cost(solution) +
self.flexibility * (self.wishes_cost(solution) +
RELATION_COST*self.relations_cost(solution))
)
def author_constraint(self, solution):
"""
cost of the authors not getting their roles on their pitch
:param solution: [student, wish index]
:return: float
"""
# <(pitch, role), author>
tasks_solution = {task: None for task in self.author_tasks}
for student, i in solution:
pitch, role = self.wishes[student][i]
if (pitch, role) in self.author_tasks:
if student == self.author_tasks[(pitch, role)] or tasks_solution[(pitch, role)] is None:
tasks_solution[(pitch, role)] = student
author_cost = 0
for task, student in tasks_solution.items():
if student != self.author_tasks[task]:
author_cost += 1
return author_cost
def pitches_cost(self, solution):
"""
cost of the pitches workload not being respected
:param solution: [student, wish index]
:return: float
"""
tasks_per_students = defaultdict(int)
for student, _ in solution:
tasks_per_students[student] += 1
workloads = defaultdict(lambda: defaultdict(float))
for student, i in solution:
pitch, role = self.wishes[student][i]
workloads[pitch][role] += 1/tasks_per_students[student]
# a penalty per additionnal task per student is added to avoid students multitasking too much
return (
# cost of workload diff between requirements and solution
sum(
workload_diff(self.pitches[pitch]
["workload"], workloads[pitch])
for pitch in self.pitches
if pitch in workloads
)
# cost of multitasking
+ MULTITASK_PENALTY * \
sum(tasks-1 for tasks in tasks_per_students.values())
# cost of author not having their roles
+ AUTHOR_PENALTY*self.author_constraint(solution)
)
def wishes_cost(self, solution):
"""
cost of the wishes not being respected
:param solution: [student, wish index]
:return: float
"""
return sum(
((i+1)/len(self.wishes[student]))**2
for student, i in solution
)
def relations_cost(self, solution):
"""
cost of the relations between students
:param solution: [student, wish index]
:return: float
"""
groups = defaultdict(list)
for student, i in solution:
pitch, role = self.wishes[student][i]
groups[pitch].append(student)
total = 0
for group in groups.values():
for student, other in product(filter(self.relations.__contains__, group), group):
if student != other:
if other not in self.relations[student]:
total += .5
elif self.relations[student][other] == -1:
total += 1
return total
def cost(pitches, wishes, solution, relations=None, flexibility=DEFAULT_FLEXIBILITY):
return Cost(pitches, wishes, relations, flexibility)(solution) | return tasks
|
dataset.py | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from data import get_train_transform, get_test_transform
class CustomDataset(Dataset):
img_aug = True
imgs = []
transform = None
def | (self, label_file, image_set, input_size):
with open(label_file, 'r', encoding="utf-8") as f:
self.imgs = list(map(lambda line: line.strip().split('|'), f))
if image_set == 'train':
self.transform = get_train_transform(size=input_size)
else:
self.transform = get_test_transform(size=input_size)
self.input_size = input_size
def __getitem__(self, index):
# print(self.imgs)
# print(index)
# print(len(self.imgs[index]))
img_path, label = self.imgs[index]
# print(img_path)
img = Image.open(img_path).convert('RGB')
if self.img_aug:
img = self.transform(img)
else:
img = np.array(img)
img = torch.from_numpy(img)
return img, torch.from_numpy(np.array(int(label)))
def __len__(self):
return len(self.imgs)
def get_datasets_and_dataloader(label_path, image_set, batch_size, input_size):
_dataset = CustomDataset(label_path, image_set=image_set, input_size=input_size)
_dataloader = DataLoader(_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
return _dataset, _dataloader
| __init__ |
ipset_test.go | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ipset
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
)
func | (t *testing.T) {
g := NewWithT(t)
postfix := "alongpostfix"
t.Run("name with postfix", func(t *testing.T) {
chaosName := "test"
networkChaos := &v1alpha1.NetworkChaos{
ObjectMeta: metav1.ObjectMeta{
Name: chaosName,
},
}
name := GenerateIPSetName(networkChaos, postfix)
g.Expect(name).Should(Equal(chaosName + "_" + postfix))
})
t.Run("length equal 27", func(t *testing.T) {
networkChaos := &v1alpha1.NetworkChaos{
ObjectMeta: metav1.ObjectMeta{
Name: "test-metav1object",
},
}
name := GenerateIPSetName(networkChaos, postfix)
g.Expect(len(name)).Should(Equal(27))
})
}
| Test_generateIPSetName |
predictive_iter.rs | use crate::utils;
use crate::Set;
/// Iterator to enumerate keys starting from a given string.
#[derive(Clone)]
pub struct PredictiveIter<'a> {
set: &'a Set,
dec: Vec<u8>,
key: Vec<u8>,
pos: usize,
id: usize,
}
impl<'a> PredictiveIter<'a> {
/// Makes an iterator [`PredictiveIter`].
///
/// # Arguments
///
/// - `set`: Front-coding dictionay.
/// - `key`: Prefix key.
pub fn new<P>(set: &'a Set, key: P) -> Self
where
P: AsRef<[u8]>,
{
Self {
key: key.as_ref().to_vec(),
set,
dec: Vec::with_capacity(set.max_length()),
pos: 0,
id: 0,
}
}
/// Resets the prefix key.
///
/// # Arguments
///
/// - `key`: Prefix key.
pub fn reset<P>(&mut self, key: P)
where
P: AsRef<[u8]>,
{
self.key = key.as_ref().to_vec();
self.dec.clear();
self.pos = 0;
self.id = 0;
}
fn search_first(&mut self) -> bool {
let (set, dec) = (&self.set, &mut self.dec);
if self.key.is_empty() {
self.pos = set.decode_header(0, dec);
self.id = 0;
return true;
}
let (bi, found) = set.search_bucket(&self.key);
self.pos = set.decode_header(bi, dec);
self.id = bi * set.bucket_size();
if found || utils::is_prefix(&self.key, dec) {
return true;
}
for bj in 1..set.bucket_size() {
if self.pos == set.serialized.len() {
break;
}
let (lcp, next_pos) = set.decode_lcp(self.pos);
self.pos = next_pos;
dec.resize(lcp, 0);
self.pos = set.decode_next(self.pos, dec);
if utils::is_prefix(&self.key, dec) {
self.id += bj;
return true;
}
}
false
}
}
impl<'a> Iterator for PredictiveIter<'a> {
type Item = (usize, Vec<u8>);
fn next(&mut self) -> Option<Self::Item> |
fn size_hint(&self) -> (usize, Option<usize>) {
(0, Some(self.set.len()))
}
}
| {
if self.pos == self.set.serialized.len() {
return None;
}
if self.dec.is_empty() {
if !self.search_first() {
self.dec.clear();
self.pos = self.set.serialized.len();
self.id = 0;
return None;
}
} else {
self.id += 1;
if self.set.pos_in_bucket(self.id) == 0 {
self.dec.clear();
} else {
let (lcp, next_pos) = self.set.decode_lcp(self.pos);
self.pos = next_pos;
self.dec.resize(lcp, 0);
}
self.pos = self.set.decode_next(self.pos, &mut self.dec);
}
if utils::is_prefix(&self.key, &self.dec) {
Some((self.id, self.dec.clone()))
} else {
self.dec.clear();
self.pos = self.set.serialized.len();
self.id = 0;
None
}
} |
scvoice.py | #!/usr/bin/env python
############################################################################
# Copyright (C) by GFZ Potsdam #
# #
# You can redistribute and/or modify this program under the #
# terms of the SeisComP Public License. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# SeisComP Public License for more details. #
############################################################################
import os, sys, subprocess, traceback
import seiscomp3.Client, seiscomp3.Seismology, seiscomp3.System
class VoiceAlert(seiscomp3.Client.Application):
def __init__(self, argc, argv):
seiscomp3.Client.Application.__init__(self, argc, argv)
self.setMessagingEnabled(True)
self.setDatabaseEnabled(True, True)
self.setLoadRegionsEnabled(True)
self.setMessagingUsername("")
self.setPrimaryMessagingGroup(seiscomp3.Communication.Protocol.LISTENER_GROUP)
self.addMessagingSubscription("EVENT")
self.addMessagingSubscription("LOCATION")
self.addMessagingSubscription("MAGNITUDE")
self.setAutoApplyNotifierEnabled(True)
self.setInterpretNotifierEnabled(True)
self.setLoadCitiesEnabled(True)
self.setLoadRegionsEnabled(True)
self._ampType = "snr"
self._citiesMaxDist = 20
self._citiesMinPopulation = 50000
self._eventDescriptionPattern = None
self._ampScript = None
self._alertScript = None
self._eventScript = None
self._ampProc = None
self._alertProc = None
self._eventProc = None
self._newWhenFirstSeen = False
self._prevMessage = {}
self._agencyIDs = []
def createCommandLineDescription(self):
self.commandline().addOption("Generic", "first-new", "calls an event a new event when it is seen the first time")
self.commandline().addGroup("Alert")
self.commandline().addStringOption("Alert", "amp-type", "specify the amplitude type to listen to", self._ampType)
self.commandline().addStringOption("Alert", "amp-script", "specify the script to be called when a stationamplitude arrived, network-, stationcode and amplitude are passed as parameters $1, $2 and $3")
self.commandline().addStringOption("Alert", "alert-script", "specify the script to be called when a preliminary origin arrived, latitude and longitude are passed as parameters $1 and $2")
self.commandline().addStringOption("Alert", "event-script", "specify the script to be called when an event has been declared; the message string, a flag (1=new event, 0=update event), the EventID, the arrival count and the magnitude (optional when set) are passed as parameter $1, $2, $3, $4 and $5")
self.commandline().addGroup("Cities")
self.commandline().addStringOption("Cities", "max-dist", "maximum distance for using the distance from a city to the earthquake")
self.commandline().addStringOption("Cities", "min-population", "minimum population for a city to become a point of interest")
self.commandline().addGroup("Debug")
self.commandline().addStringOption("Debug", "eventid,E", "specify Event ID")
return True
def init(self):
if not seiscomp3.Client.Application.init(self): return False
try: self._newWhenFirstSeen = self.configGetBool("firstNew");
except: pass
try: self._agencyIDs = [ self.configGetString("agencyID") ]
except: pass
try:
agencyIDs = self.configGetStrings("agencyIDs")
for item in agencyIDs:
item = item.strip()
if item not in self._agencyIDs:
self._agencyIDs.append(item)
except: pass
try:
if self.commandline().hasOption("first-new"): self._newWhenFirstSeen = True
except: pass
try: self._eventDescriptionPattern = self.configGetString("poi.message")
except: pass
try: self._citiesMaxDist = self.configGetDouble("poi.maxDist")
except: pass
try: self._citiesMaxDist = self.commandline().optionDouble("max-dist")
except: pass
try: self._citiesMinPopulation = self.configGetInt("poi.minPopulation")
except: pass
try: self._citiesMinPopulation = self.commandline().optionInt("min-population")
except: pass
try: self._ampType = self.commandline().optionString("amp-type")
except: pass
try: self._ampScript = self.commandline().optionString("amp-script")
except:
try: self._ampScript = self.configGetString("scripts.amplitude")
except: seiscomp3.Logging.warning("No amplitude script defined")
if self._ampScript:
self._ampScript = seiscomp3.System.Environment.Instance().absolutePath(self._ampScript)
try: self._alertScript = self.commandline().optionString("alert-script")
except:
try: self._alertScript = self.configGetString("scripts.alert")
except: seiscomp3.Logging.warning("No alert script defined")
if self._alertScript:
self._alertScript = seiscomp3.System.Environment.Instance().absolutePath(self._alertScript)
try: self._eventScript = self.commandline().optionString("event-script")
except:
try:
self._eventScript = self.configGetString("scripts.event")
seiscomp3.Logging.info("Using event script: %s" % self._eventScript)
except: seiscomp3.Logging.warning("No event script defined")
self._eventScript = seiscomp3.System.Environment.Instance().absolutePath(self._eventScript)
seiscomp3.Logging.info("Creating ringbuffer for 100 objects")
if not self.query():
seiscomp3.Logging.warning("No valid database interface to read from")
self._cache = seiscomp3.DataModel.PublicObjectRingBuffer(self.query(), 100)
if self._ampScript and self.connection():
self.connection().subscribe("AMPLITUDE")
if self._newWhenFirstSeen:
seiscomp3.Logging.info("A new event is declared when I see it the first time")
if not self._agencyIDs:
seiscomp3.Logging.info("agencyIDs: []")
else:
seiscomp3.Logging.info("agencyIDs: %s" % (" ".join(self._agencyIDs)))
return True
def run(self):
try:
try:
eventID = self.commandline().optionString("eventid")
event = self._cache.get(seiscomp3.DataModel.Event, eventID)
if event:
self.notifyEvent(event)
except: pass
return seiscomp3.Client.Application.run(self)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
return False
def runAmpScript(self, net, sta, amp):
if not self._ampScript: return
if self._ampProc != None:
if self._ampProc.poll() is None:
seiscomp3.Logging.warning("AmplitudeScript still in progress -> skipping message")
return
try:
self._ampProc = subprocess.Popen([self._ampScript, net, sta, "%.2f" % amp])
seiscomp3.Logging.info("Started amplitude script with pid %d" % self._ampProc.pid)
except:
seiscomp3.Logging.error("Failed to start amplitude script '%s'" % self._ampScript)
def | (self, lat, lon):
if not self._alertScript: return
if self._alertProc != None:
if self._alertProc.poll() is None:
seiscomp3.Logging.warning("AlertScript still in progress -> skipping message")
return
try:
self._alertProc = subprocess.Popen([self._alertScript, "%.1f" % lat, "%.1f" % lon])
seiscomp3.Logging.info("Started alert script with pid %d" % self._alertProc.pid)
except:
seiscomp3.Logging.error("Failed to start alert script '%s'" % self._alertScript)
def handleMessage(self, msg):
try:
dm = seiscomp3.Core.DataMessage.Cast(msg)
if dm:
for att in dm:
org = seiscomp3.DataModel.Origin.Cast(att)
if org:
try:
if org.evaluationStatus() == seiscomp3.DataModel.PRELIMINARY:
self.runAlert(org.latitude().value(), org.longitude().value())
except: pass
#ao = seiscomp3.DataModel.ArtificialOriginMessage.Cast(msg)
#if ao:
# org = ao.origin()
# if org:
# self.runAlert(org.latitude().value(), org.longitude().value())
# return
seiscomp3.Client.Application.handleMessage(self, msg)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
def addObject(self, parentID, object):
try:
obj = seiscomp3.DataModel.Amplitude.Cast(object)
if obj:
if obj.type() == self._ampType:
seiscomp3.Logging.debug("got new %s amplitude '%s'" % (self._ampType, obj.publicID()))
self.notifyAmplitude(obj)
obj = seiscomp3.DataModel.Origin.Cast(object)
if obj:
self._cache.feed(obj)
seiscomp3.Logging.debug("got new origin '%s'" % obj.publicID())
try:
if obj.evaluationStatus() == seiscomp3.DataModel.PRELIMINARY:
self.runAlert(obj.latitude().value(), obj.longitude().value())
except: pass
return
obj = seiscomp3.DataModel.Magnitude.Cast(object)
if obj:
self._cache.feed(obj)
seiscomp3.Logging.debug("got new magnitude '%s'" % obj.publicID())
return
obj = seiscomp3.DataModel.Event.Cast(object)
if obj:
org = self._cache.get(seiscomp3.DataModel.Origin, obj.preferredOriginID())
agencyID = org.creationInfo().agencyID()
seiscomp3.Logging.debug("got new event '%s'" % obj.publicID())
if not self._agencyIDs or agencyID in self._agencyIDs:
self.notifyEvent(obj, True)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
def updateObject(self, parentID, object):
try:
obj = seiscomp3.DataModel.Event.Cast(object)
if obj:
org = self._cache.get(seiscomp3.DataModel.Origin, obj.preferredOriginID())
agencyID = org.creationInfo().agencyID()
seiscomp3.Logging.debug("update event '%s'" % obj.publicID())
if not self._agencyIDs or agencyID in self._agencyIDs:
self.notifyEvent(obj, False)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
def notifyAmplitude(self, amp):
self.runAmpScript(amp.waveformID().networkCode(), amp.waveformID().stationCode(), amp.amplitude().value())
def notifyEvent(self, evt, newEvent=True, dtmax=3600):
try:
org = self._cache.get(seiscomp3.DataModel.Origin, evt.preferredOriginID())
if not org:
seiscomp3.Logging.warning("unable to get origin %s, ignoring event message" % evt.preferredOriginID())
return
preliminary = False
try:
if org.evaluationStatus() == seiscomp3.DataModel.PRELIMINARY:
preliminary = True
except: pass
if preliminary == False:
nmag = self._cache.get(seiscomp3.DataModel.Magnitude, evt.preferredMagnitudeID())
if nmag:
mag = nmag.magnitude().value()
mag = "magnitude %.1f" % mag
else:
if len(evt.preferredMagnitudeID()) > 0:
seiscomp3.Logging.warning("unable to get magnitude %s, ignoring event message" % evt.preferredMagnitudeID())
else:
seiscomp3.Logging.warning("no preferred magnitude yet, ignoring event message")
return
# keep track of old events
if self._newWhenFirstSeen:
if evt.publicID() in self._prevMessage:
newEvent = False
else:
newEvent = True
dsc = seiscomp3.Seismology.Regions.getRegionName(org.latitude().value(), org.longitude().value())
if self._eventDescriptionPattern:
try:
city,dist,azi = self.nearestCity(org.latitude().value(), org.longitude().value(), self._citiesMaxDist, self._citiesMinPopulation)
if city:
dsc = self._eventDescriptionPattern
region = seiscomp3.Seismology.Regions.getRegionName(org.latitude().value(), org.longitude().value())
distStr = str(int(seiscomp3.Math.deg2km(dist)))
dsc = dsc.replace("@region@", region).replace("@dist@", distStr).replace("@poi@", city.name())
except: pass
seiscomp3.Logging.debug("desc: %s" % dsc)
dep = org.depth().value()
now = seiscomp3.Core.Time.GMT()
otm = org.time().value()
dt = (now - otm).seconds()
# if dt > dtmax:
# return
if dt > 3600:
dt = "%d hours %d minutes ago" % (dt/3600, (dt%3600)/60)
elif dt > 120:
dt = "%d minutes ago" % (dt/60)
else:
dt = "%d seconds ago" % dt
if preliminary == True:
message = "earthquake, preliminary, %%s, %s" % dsc
else:
message = "earthquake, %%s, %s, %s, depth %d kilometers" % (dsc, mag, int(dep+0.5))
# at this point the message lacks the "ago" part
if evt.publicID() in self._prevMessage and self._prevMessage[evt.publicID()] == message:
seiscomp3.Logging.info("Suppressing repeated message '%s'" % message)
return
self._prevMessage[evt.publicID()] = message
message = message % dt # fill the "ago" part
seiscomp3.Logging.info(message)
if not self._eventScript: return
if self._eventProc != None:
if self._eventProc.poll() is None:
seiscomp3.Logging.warning("EventScript still in progress -> skipping message")
return
try:
param2 = 0
param3 = 0
param4 = ""
if newEvent: param2 = 1
org = self._cache.get(seiscomp3.DataModel.Origin, evt.preferredOriginID())
if org:
try: param3 = org.quality().associatedPhaseCount()
except: pass
nmag = self._cache.get(seiscomp3.DataModel.Magnitude, evt.preferredMagnitudeID())
if nmag:
param4 = "%.1f" % nmag.magnitude().value()
self._eventProc = subprocess.Popen([self._eventScript, message, "%d" % param2, evt.publicID(), "%d" % param3, param4])
seiscomp3.Logging.info("Started event script with pid %d" % self._eventProc.pid)
except:
seiscomp3.Logging.error("Failed to start event script '%s %s %d %d %s'" % (self._eventScript, message, param2, param3, param4))
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
app = VoiceAlert(len(sys.argv), sys.argv)
sys.exit(app())
| runAlert |
internals.py | # coding=utf-8
#
# Copyright © 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
from io import TextIOWrapper
from collections import deque, namedtuple
from splunklib import six
try:
from collections import OrderedDict # must be python 2.7
except ImportError:
from ..ordereddict import OrderedDict
from splunklib.six.moves import StringIO
from itertools import chain
from splunklib.six.moves import map as imap
from json import JSONDecoder, JSONEncoder
from json.encoder import encode_basestring_ascii as json_encode_string
from splunklib.six.moves import urllib
import csv
import gzip
import os
import re
import sys
import warnings
from . import environment
csv.field_size_limit(10485760) # The default value is 128KB; upping to 10MB. See SPL-12117 for background on this issue
def set_binary_mode(fh):
""" Helper method to set up binary mode for file handles.
Emphasis being sys.stdin, sys.stdout, sys.stderr.
For python3, we want to return .buffer
For python2+windows we want to set os.O_BINARY
"""
typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
# check for file handle
if not isinstance(fh, typefile):
return fh
# check for python3 and buffer
if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
return fh.buffer
# check for python3
elif sys.version_info >= (3, 0):
pass
# check for windows python2. SPL-175233 -- python3 stdout is already binary
elif sys.platform == 'win32':
# Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
# binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
# all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
from platform import python_implementation
implementation = python_implementation()
if implementation == 'PyPy':
return os.fdopen(fh.fileno(), 'wb', 0)
else:
import msvcrt
msvcrt.setmode(fh.fileno(), os.O_BINARY)
return fh
class CommandLineParser(object):
r""" Parses the arguments to a search command.
A search command line is described by the following syntax.
**Syntax**::
command = command-name *[wsp option] *[wsp [dquote] field-name [dquote]]
command-name = alpha *( alpha / digit )
option = option-name [wsp] "=" [wsp] option-value
option-name = alpha *( alpha / digit / "_" )
option-value = word / quoted-string
word = 1*( %01-%08 / %0B / %0C / %0E-1F / %21 / %23-%FF ) ; Any character but DQUOTE and WSP
quoted-string = dquote *( word / wsp / "\" dquote / dquote dquote ) dquote
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
**Note:**
This syntax is constrained to an 8-bit character set.
**Note:**
This syntax does not show that `field-name` values may be comma-separated when in fact they can be. This is
because Splunk strips commas from the command line. A custom search command will never see them.
**Example:**
countmatches fieldname = word_count pattern = \w+ some_text_field
Option names are mapped to properties in the targeted ``SearchCommand``. It is the responsibility of the property
setters to validate the values they receive. Property setters may also produce side effects. For example,
setting the built-in `log_level` immediately changes the `log_level`.
"""
@classmethod
def parse(cls, command, argv):
""" Splits an argument list into an options dictionary and a fieldname
list.
The argument list, `argv`, must be of the form::
*[option]... *[<field-name>]
Options are validated and assigned to items in `command.options`. Field names are validated and stored in the
list of `command.fieldnames`.
#Arguments:
:param command: Search command instance.
:type command: ``SearchCommand``
:param argv: List of search command arguments.
:type argv: ``list``
:return: ``None``
#Exceptions:
``SyntaxError``: Argument list is incorrectly formed.
``ValueError``: Unrecognized option/field name, or an illegal field value.
"""
debug = environment.splunklib_logger.debug
command_class = type(command).__name__
# Prepare
debug('Parsing %s command line: %r', command_class, argv)
command.fieldnames = None
command.options.reset()
argv = ' '.join(argv)
command_args = cls._arguments_re.match(argv)
if command_args is None:
raise SyntaxError('Syntax error: {}'.format(argv))
# Parse options
for option in cls._options_re.finditer(command_args.group('options')):
name, value = option.group('name'), option.group('value')
if name not in command.options:
raise ValueError(
'Unrecognized {} command option: {}={}'.format(command.name, name, json_encode_string(value)))
command.options[name].value = cls.unquote(value)
missing = command.options.get_missing()
if missing is not None:
if len(missing) > 1:
raise ValueError(
'Values for these {} command options are required: {}'.format(command.name, ', '.join(missing)))
raise ValueError('A value for {} command option {} is required'.format(command.name, missing[0]))
# Parse field names
fieldnames = command_args.group('fieldnames')
if fieldnames is None:
command.fieldnames = []
else:
command.fieldnames = [cls.unquote(value.group(0)) for value in cls._fieldnames_re.finditer(fieldnames)]
debug(' %s: %s', command_class, command)
@classmethod
def unquote(cls, string):
""" Removes quotes from a quoted string.
Splunk search command quote rules are applied. The enclosing double-quotes, if present, are removed. Escaped
double-quotes ('\"' or '""') are replaced by a single double-quote ('"').
**NOTE**
We are not using a json.JSONDecoder because Splunk quote rules are different than JSON quote rules. A
json.JSONDecoder does not recognize a pair of double-quotes ('""') as an escaped quote ('"') and will
decode single-quoted strings ("'") in addition to double-quoted ('"') strings.
"""
if len(string) == 0:
return ''
if string[0] == '"':
if len(string) == 1 or string[-1] != '"':
raise SyntaxError('Poorly formed string literal: ' + string)
string = string[1:-1]
if len(string) == 0:
return ''
def replace(match):
value = match.group(0)
if value == '""':
return '"'
if len(value) < 2:
raise SyntaxError('Poorly formed string literal: ' + string)
return value[1]
result = re.sub(cls._escaped_character_re, replace, string)
return result
# region Class variables
_arguments_re = re.compile(r"""
^\s*
(?P<options> # Match a leading set of name/value pairs
(?:
(?:(?=\w)[^\d]\w*) # name
\s*=\s* # =
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s* # value
)*
)\s*
(?P<fieldnames> # Match a trailing set of field names
(?:
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s*
)*
)\s*$
""", re.VERBOSE | re.UNICODE)
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
_fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
(?P<name>(?:(?=\w)[^\d]\w*)) # name
\s*=\s* # =
(?P<value>"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+) # value
""", re.VERBOSE | re.UNICODE)
# endregion
class ConfigurationSettingsType(type):
""" Metaclass for constructing ConfigurationSettings classes.
Instances of :class:`ConfigurationSettingsType` construct :class:`ConfigurationSettings` classes from classes from
a base :class:`ConfigurationSettings` class and a dictionary of configuration settings. The settings in the
dictionary are validated against the settings in the base class. You cannot add settings, you can only change their
backing-field values and you cannot modify settings without backing-field values. These are considered fixed
configuration setting values.
This is an internal class used in two places:
+ :meth:`decorators.Configuration.__call__`
Adds a ConfigurationSettings attribute to a :class:`SearchCommand` class.
+ :meth:`reporting_command.ReportingCommand.fix_up`
Adds a ConfigurationSettings attribute to a :meth:`ReportingCommand.map` method, if there is one.
"""
def __new__(mcs, module, name, bases):
mcs = super(ConfigurationSettingsType, mcs).__new__(mcs, str(name), bases, {})
return mcs
def __init__(cls, module, name, bases):
super(ConfigurationSettingsType, cls).__init__(name, bases, None)
cls.__module__ = module
@staticmethod
def validate_configuration_setting(specification, name, value):
if not isinstance(value, specification.type):
if isinstance(specification.type, type):
type_names = specification.type.__name__
else:
type_names = ', '.join(imap(lambda t: t.__name__, specification.type))
raise ValueError('Expected {} value, not {}={}'.format(type_names, name, repr(value)))
if specification.constraint and not specification.constraint(value):
raise ValueError('Illegal value: {}={}'.format(name, repr(value)))
return value
specification = namedtuple(
'ConfigurationSettingSpecification', (
'type',
'constraint',
'supporting_protocols'))
# P1 [ ] TODO: Review ConfigurationSettingsType.specification_matrix for completeness and correctness
specification_matrix = {
'clear_required_fields': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'distributed': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'generates_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'generating': specification(
type=bool,
constraint=None,
supporting_protocols=[1, 2]),
'local': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'maxinputs': specification(
type=int,
constraint=lambda value: 0 <= value <= six.MAXSIZE,
supporting_protocols=[2]),
'overrides_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'required_fields': specification(
type=(list, set, tuple),
constraint=None,
supporting_protocols=[1, 2]),
'requires_preop': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'retainsevents': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'run_in_preview': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'streaming': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'streaming_preop': specification(
type=(bytes, six.text_type),
constraint=None,
supporting_protocols=[1, 2]),
'type': specification(
type=(bytes, six.text_type),
constraint=lambda value: value in ('events', 'reporting', 'streaming'),
supporting_protocols=[2])}
class CsvDialect(csv.Dialect):
""" Describes the properties of Splunk CSV streams """
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
if sys.version_info >= (3, 0) and sys.platform == 'win32':
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
class InputHeader(dict):
""" Represents a Splunk input header as a collection of name/value pairs.
"""
def __str__(self):
return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
def read(self, ifile):
""" Reads an input header from an input file.
The input header is read as a sequence of *<name>***:***<value>* pairs separated by a newline. The end of the
input header is signalled by an empty line or an end-of-file.
:param ifile: File-like object that supports iteration over lines.
"""
name, value = None, None
for line in ifile:
if line == '\n':
break
item = line.split(':', 1)
if len(item) == 2:
# start of a new item
if name is not None:
self[name] = value[:-1] # value sans trailing newline
name, value = item[0], urllib.parse.unquote(item[1])
elif name is not None:
# continuation of the current item
value += urllib.parse.unquote(line)
if name is not None:
self[name] = value[:-1] if value[-1] == '\n' else value
Message = namedtuple('Message', ('type', 'text'))
class MetadataDecoder(JSONDecoder):
def __init__(self):
JSONDecoder.__init__(self, object_hook=self._object_hook)
@staticmethod
def _object_hook(dictionary):
object_view = ObjectView(dictionary)
stack = deque()
stack.append((None, None, dictionary))
while len(stack):
instance, member_name, dictionary = stack.popleft()
for name, value in six.iteritems(dictionary):
if isinstance(value, dict):
stack.append((dictionary, name, value))
if instance is not None:
instance[member_name] = ObjectView(dictionary)
return object_view
class MetadataEncoder(JSONEncoder):
def __init__(self):
JSONEncoder.__init__(self, separators=MetadataEncoder._separators)
def default(self, o):
return o.__dict__ if isinstance(o, ObjectView) else JSONEncoder.default(self, o)
_separators = (',', ':')
class ObjectView(object):
def __init__(self, dictionary):
self.__dict__ = dictionary
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return str(self.__dict__)
class Recorder(object):
def __init__(self, path, f):
self._recording = gzip.open(path + '.gz', 'wb')
self._file = f
def __getattr__(self, name):
return getattr(self._file, name)
def __iter__(self):
for line in self._file:
self._recording.write(line)
self._recording.flush()
yield line
def read(self, size=None):
value = self._file.read() if size is None else self._file.read(size)
self._recording.write(value)
self._recording.flush()
return value
def readline(self, size=None):
value = self._file.readline() if size is None else self._file.readline(size)
if len(value) > 0:
self._recording.write(value)
self._recording.flush()
return value
def record(self, *args):
for arg in args:
self._recording.write(arg)
def write(self, text):
self._recording.write(text)
self._file.write(text)
self._recording.flush()
class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
self._writer = csv.writer(self._buffer, dialect=CsvDialect)
self._writerow = self._writer.writerow
self._finished = False
self._flushed = False
self._inspector = OrderedDict()
self._chunk_count = 0
self._pending_record_count = 0
self._committed_record_count = 0
@property
def is_flushed(self):
r |
@is_flushed.setter
def is_flushed(self, value):
self._flushed = True if value else False
@property
def ofile(self):
return self._ofile
@ofile.setter
def ofile(self, value):
self._ofile = set_binary_mode(value)
@property
def pending_record_count(self):
return self._pending_record_count
@property
def _record_count(self):
warnings.warn(
"_record_count will be deprecated soon. Use pending_record_count instead.",
PendingDeprecationWarning
)
return self.pending_record_count
@property
def committed_record_count(self):
return self._committed_record_count
@property
def _total_record_count(self):
warnings.warn(
"_total_record_count will be deprecated soon. Use committed_record_count instead.",
PendingDeprecationWarning
)
return self.committed_record_count
def write(self, data):
bytes_type = bytes if sys.version_info >= (3, 0) else str
if not isinstance(data, bytes_type):
data = data.encode('utf-8')
self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
assert partial is None or isinstance(partial, bool)
assert not (finished is None and partial is None)
assert finished is None or partial is None
self._ensure_validity()
def write_message(self, message_type, message_text, *args, **kwargs):
self._ensure_validity()
self._inspector.setdefault('messages', []).append((message_type, message_text.format(*args, **kwargs)))
def write_record(self, record):
self._ensure_validity()
self._write_record(record)
def write_records(self, records):
self._ensure_validity()
write_record = self._write_record
for record in records:
write_record(record)
def _clear(self):
self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
assert self._record_count == 0 and len(self._inspector) == 0
raise RuntimeError('I/O operation on closed record writer')
def _write_record(self, record):
fieldnames = self._fieldnames
if fieldnames is None:
self._fieldnames = fieldnames = list(record.keys())
value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
get_value = record.get
values = []
for fieldname in fieldnames:
value = get_value(fieldname, None)
if value is None:
values += (None, None)
continue
value_t = type(value)
if issubclass(value_t, (list, tuple)):
if len(value) == 0:
values += (None, None)
continue
if len(value) > 1:
value_list = value
sv = ''
mv = '$'
for value in value_list:
if value is None:
sv += '\n'
mv += '$;$'
continue
value_t = type(value)
if value_t is not bytes:
if value_t is bool:
value = str(value.real)
elif value_t is six.text_type:
value = value
elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
else:
value = repr(value).encode('utf-8', errors='backslashreplace')
sv += value + '\n'
mv += value.replace('$', '$$') + '$;$'
values += (sv[:-1], mv[:-2])
continue
value = value[0]
value_t = type(value)
if value_t is bool:
values += (str(value.real), None)
continue
if value_t is bytes:
values += (value, None)
continue
if value_t is six.text_type:
if six.PY2:
value = value.encode('utf-8')
values += (value, None)
continue
if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
if issubclass(value_t, dict):
values += (str(''.join(RecordWriter._iterencode_json(value, 0))), None)
continue
values += (repr(value), None)
self._writerow(values)
self._pending_record_count += 1
if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
# noinspection PyUnresolvedReferences
from _json import make_encoder
except ImportError:
# We may be running under PyPy 2.5 which does not include the _json module
_iterencode_json = JSONEncoder(separators=(',', ':')).iterencode
else:
# Creating _iterencode_json this way yields a two-fold performance improvement on Python 2.7.9 and 2.7.10
from json.encoder import encode_basestring_ascii
@staticmethod
def _default(o):
raise TypeError(repr(o) + ' is not JSON serializable')
_iterencode_json = make_encoder(
{}, # markers (for detecting circular references)
_default, # object_encoder
encode_basestring_ascii, # string_encoder
None, # indent
':', ',', # separators
False, # sort_keys
False, # skip_keys
True # allow_nan
)
del make_encoder
class RecordWriterV1(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
if self._chunk_count == 0:
# Messages are written to the messages header when we write the first chunk of data
# Guarantee: These messages are displayed by splunkweb and the job inspector
if messages is not None:
message_level = RecordWriterV1._message_level.get
for level, text in messages:
self.write(message_level(level, level))
self.write('=')
self.write(text)
self.write('\r\n')
self.write('\r\n')
elif messages is not None:
# Messages are written to the messages header when we write subsequent chunks of data
# Guarantee: These messages are displayed by splunkweb and the job inspector, if and only if the
# command is configured with
#
# stderr_dest = message
#
# stderr_dest is a static configuration setting. This means that it can only be set in commands.conf.
# It cannot be set in code.
stderr = sys.stderr
for level, text in messages:
print(level, text, file=stderr)
self.write(self._buffer.getvalue())
self._chunk_count += 1
self._committed_record_count += self.pending_record_count
self._clear()
self._finished = finished is True
_message_level = {
'DEBUG': 'debug_message',
'ERROR': 'error_message',
'FATAL': 'error_message',
'INFO': 'info_message',
'WARN': 'warn_message'
}
class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if partial or not finished:
# Don't flush partial chunks, since the SCP v2 protocol does not
# provide a way to send partial chunks yet.
return
if not self.is_flushed:
self.write_chunk(finished=True)
def write_chunk(self, finished=None):
inspector = self._inspector
self._committed_record_count += self.pending_record_count
self._chunk_count += 1
# TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
# ChunkedExternProcessor (See SPL-103525)
#
# We will need to replace the following block of code with this block:
#
# metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
#
# if partial is True:
# finished = False
if len(inspector) == 0:
inspector = None
metadata = [item for item in (('inspector', inspector), ('finished', finished))]
self._write_chunk(metadata, self._buffer.getvalue())
self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
self.write('\n')
self._clear()
def write_metric(self, name, value):
self._ensure_validity()
self._inspector['metric.' + name] = value
def _clear(self):
super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
if sys.version_info >= (3, 0):
metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
if sys.version_info >= (3, 0):
body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
self.write(start_line)
self.write(metadata)
self.write(body)
self._ofile.flush()
self._flushed = True
| eturn self._flushed
|
nets.py | """An implementation of matrix capsules with EM routing.
"""
import tensorflow as tf
from core import _conv2d_wrapper, capsules_init, capsules_conv, capsules_fc
slim = tf.contrib.slim
# ------------------------------------------------------------------------------#
# -------------------------------- capsules net --------------------------------#
# ------------------------------------------------------------------------------#
def capsules_v0(inputs, num_classes, iterations, name='CapsuleEM-V0'):
"""Replicate the network in `Matrix Capsules with EM Routing.`
"""
with tf.variable_scope(name) as scope:
# inputs [N, H, W, C] -> conv2d, 5x5, strides 2, channels 32 -> nets [N, OH, OW, 32]
nets = _conv2d_wrapper(
inputs, shape=[5, 5, 1, 32], strides=[1, 2, 2, 1], padding='SAME', add_bias=True, activation_fn=tf.nn.relu, name='conv1'
)
# inputs [N, H, W, C] -> conv2d, 1x1, strides 1, channels 32x(4x4+1) -> (poses, activations)
nets = capsules_init(
nets, shape=[1, 1, 32, 32], strides=[1, 1, 1, 1], padding='VALID', pose_shape=[4, 4], name='capsule_init'
)
# inputs: (poses, activations) -> capsule-conv 3x3x32x32x4x4, strides 2 -> (poses, activations)
nets = capsules_conv(
nets, shape=[3, 3, 32, 32], strides=[1, 2, 2, 1], iterations=iterations, name='capsule_conv1'
)
# inputs: (poses, activations) -> capsule-conv 3x3x32x32x4x4, strides 1 -> (poses, activations)
nets = capsules_conv(
nets, shape=[3, 3, 32, 32], strides=[1, 1, 1, 1], iterations=iterations, name='capsule_conv2'
)
# inputs: (poses, activations) -> capsule-fc 1x1x32x10x4x4 shared view transform matrix within each channel -> (poses, activations)
nets = capsules_fc(
nets, num_classes, iterations=iterations, name='capsule_fc'
)
poses, activations = nets
return poses, activations
# ------------------------------------------------------------------------------#
# ------------------------------------ loss ------------------------------------#
# ------------------------------------------------------------------------------#
def spread_loss(labels, activations, margin, name):
"""This adds spread loss to total loss.
:param labels: [N, O], where O is number of output classes, one hot vector, tf.uint8.
:param activations: [N, O], activations.
:param margin: margin 0.2 - 0.9 fixed schedule during training.
:return: spread loss
"""
activations_shape = activations.get_shape().as_list()
with tf.variable_scope(name) as scope:
mask_t = tf.equal(labels, 1)
mask_i = tf.equal(labels, 0)
activations_t = tf.reshape(
tf.boolean_mask(activations, mask_t), [activations_shape[0], 1]
)
activations_i = tf.reshape(
tf.boolean_mask(activations, mask_i), [activations_shape[0], activations_shape[1] - 1]
) |
gap_mit = tf.reduce_sum(
tf.square(
tf.nn.relu(
margin - (activations_t - activations_i)
)
)
)
# tf.add_to_collection(
# tf.GraphKeys.LOSSES, gap_mit
# )
#
# total_loss = tf.add_n(
# tf.get_collection(
# tf.GraphKeys.LOSSES
# ), name='total_loss'
# )
tf.losses.add_loss(gap_mit)
return gap_mit
# ------------------------------------------------------------------------------# |
# margin = tf.Print(
# margin, [margin], 'margin', summarize=20
# ) |
cifar10_keras_sm.py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
from keras.callbacks import ModelCheckpoint
from keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D, BatchNormalization
from keras.models import Sequential
from keras.optimizers import Adam, SGD, RMSprop
import tensorflow as tf
from keras import backend as K
sess = tf.Session()
K.set_session(sess)
logging.getLogger().setLevel(logging.INFO)
tf.logging.set_verbosity(tf.logging.INFO)
HEIGHT = 32
WIDTH = 32
DEPTH = 3
NUM_CLASSES = 10
NUM_DATA_BATCHES = 5
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000 * NUM_DATA_BATCHES
INPUT_TENSOR_NAME = 'inputs_input' # needs to match the name of the first layer + "_input"
def keras_model_fn(learning_rate, weight_decay, optimizer, momentum):
"""keras_model_fn receives hyperparameters from the training job and returns a compiled keras model.
The model will be transformed into a TensorFlow Estimator before training and it will be saved in a
TensorFlow Serving SavedModel at the end of training.
Args:
hyperparameters: The hyperparameters passed to the SageMaker TrainingJob that runs your TensorFlow
training script.
Returns: A compiled Keras model
"""
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', name='inputs', input_shape=(HEIGHT, WIDTH, DEPTH)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))
size = 1
if optimizer.lower() == 'sgd':
opt = SGD(lr=learning_rate * size, decay=weight_decay, momentum=momentum)
elif optimizer.lower() == 'rmsprop':
opt = RMSprop(lr=learning_rate * size, decay=weight_decay)
else:
opt = Adam(lr=learning_rate * size, decay=weight_decay)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def get_filenames(channel_name, channel):
if channel_name in ['train', 'validation', 'eval']:
return [os.path.join(channel, channel_name + '.tfrecords')]
else:
raise ValueError('Invalid data subset "%s"' % channel_name)
def train_input_fn():
return _input(args.epochs, args.batch_size, args.train, 'train')
def eval_input_fn():
return _input(args.epochs, args.batch_size, args.eval, 'eval')
def validation_input_fn():
return _input(args.epochs, args.batch_size, args.validation, 'validation')
def _input(epochs, batch_size, channel, channel_name):
filenames = get_filenames(channel_name, channel)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.repeat(epochs)
dataset = dataset.prefetch(10)
# Parse records.
dataset = dataset.map(
_dataset_parser, num_parallel_calls=10)
# Potentially shuffle records.
if channel_name == 'train':
# Ensure that the capacity is sufficiently large to provide good random
# shuffling.
buffer_size = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * 0.4) + 3 * batch_size
dataset = dataset.shuffle(buffer_size=buffer_size)
# Batch it up.
dataset = dataset.batch(batch_size, drop_remainder=True)
iterator = dataset.make_one_shot_iterator()
image_batch, label_batch = iterator.get_next()
return {INPUT_TENSOR_NAME: image_batch}, label_batch
def _train_preprocess_fn(image):
"""Preprocess a single training image of layout [height, width, depth]."""
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_image_with_crop_or_pad(image, HEIGHT + 8, WIDTH + 8)
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
return image
def _dataset_parser(value):
"""Parse a CIFAR-10 record from value."""
featdef = {
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
}
example = tf.parse_single_example(value, featdef)
image = tf.decode_raw(example['image'], tf.uint8)
image.set_shape([DEPTH * HEIGHT * WIDTH])
# Reshape from [depth * height * width] to [depth, height, width].
image = tf.cast(
tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),
tf.float32)
label = tf.cast(example['label'], tf.int32)
image = _train_preprocess_fn(image)
return image, tf.one_hot(label, NUM_CLASSES)
def save_model(model, output):
signature = tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'inputs': model.input}, outputs={'scores': model.output})
builder = tf.saved_model.builder.SavedModelBuilder(output+'/1/')
builder.add_meta_graph_and_variables(
sess=K.get_session(),
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={"serving_default": signature})
builder.save()
logging.info("Model successfully saved at: {}".format(output))
return
def main(args):
logging.info("getting data")
train_dataset = train_input_fn()
eval_dataset = eval_input_fn()
validation_dataset = validation_input_fn()
logging.info("configuring model")
model = keras_model_fn(args.learning_rate, args.weight_decay, args.optimizer, args.momentum)
callbacks = []
# -----------수정 부분
# callbacks.append(ModelCheckpoint(args.model_dir + '/checkpoint-{epoch}.h5'))
callbacks.append(ModelCheckpoint(args.model_output_dir + '/checkpoint-{epoch}.h5'))
logging.info("Starting training")
model.fit(x=train_dataset[0], y=train_dataset[1],
steps_per_epoch=(num_examples_per_epoch('train') // args.batch_size),
epochs=args.epochs, validation_data=validation_dataset,
validation_steps=(num_examples_per_epoch('validation') // args.batch_size), callbacks=callbacks)
score = model.evaluate(eval_dataset[0], eval_dataset[1], steps=num_examples_per_epoch('eval') // args.batch_size,
verbose=0)
logging.info('Test loss:{}'.format(score[0]))
logging.info('Test accuracy:{}'.format(score[1]))
# -------------수정 부분
# return save_model(model, args.model_dir)
return save_model(model, args.model_output_dir)
def num_examples_per_epoch(subset='train'):
if subset == 'train':
return 40000
elif subset == 'validation':
return 10000
elif subset == 'eval':
return 10000
else:
raise ValueError('Invalid data subset "%s"' % subset)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--train',
type=str,
required=False,
default=os.environ.get('SM_CHANNEL_TRAIN'), # ----수정 부분
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--validation',
type=str,
required=False,
default=os.environ.get('SM_CHANNEL_VALIDATION'), # ----수정 부분
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--eval',
type=str,
required=False,
default=os.environ.get('SM_CHANNEL_EVAL'), # ----수정 부분
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--model_dir',
type=str,
required=True,
help='The directory where the model will be stored.')
parser.add_argument(
'--weight-decay',
type=float,
default=2e-4,
help='Weight decay for convolutions.')
parser.add_argument(
'--learning-rate',
type=float,
default=0.001,
help="""\
This is the inital learning rate value. The learning rate will decrease
during training. For more details check the model_fn implementation in
this file.\
""")
parser.add_argument(
'--epochs',
type=int,
default=10,
help='The number of steps to use for training.') | help='Batch size for training.')
parser.add_argument(
'--optimizer',
type=str,
default='adam')
parser.add_argument(
'--momentum',
type=float,
default='0.9')
# ----------추가 부분
parser.add_argument(
'--model_output_dir',
type=str,
default=os.environ.get('SM_MODEL_DIR'))
args = parser.parse_args()
main(args) | parser.add_argument(
'--batch-size',
type=int,
default=128, |
weather_list.js | import React, {Component} from 'react';
import {connect} from "react-redux";
import Chart from '../components/chart';
import GoogleMap from '../components/google_map';
class | extends Component {
constructor(props) {
super(props);
this.renderWeather = this.renderWeather.bind(this)
}
renderWeather(cityData) {
const name = cityData.city.name;
const temps = cityData.list.map( weather => weather.main.temp)
const humidity = cityData.list.map( weather => weather.main.humidity)
const pressure = cityData.list.map( weather => weather.main.pressure)
const {lat ,lon} = cityData.city.coord;
return (
<tr key= {name} >
<td>
<GoogleMap lon={lon} lat={lat}/>
</td>
<Chart data = {temps} color = "orange" units="K" />
<Chart data = {pressure} color = "blue" units="hPa" />
<Chart data = {humidity} color = "black" units='%' />
</tr>
)
}
render() {
return (
<table className="table table-hover">
<thead>
<tr>
<th> City</th>
<th> Temperature (K) </th>
<th> Pressure (hPa)</th>
<th> Humidity (%) </th>
</tr>
</thead>
<tbody>
{this.props.weather.map(this.renderWeather)}
</tbody>
</table>
)
}
} //state.weather
// connectiong to reducers
function MapStateToProps({ weather }){
return {weather}
}
export default connect(MapStateToProps)( Weather_list);
| Weather_list |
compute_management_client_enums.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class StatusLevelTypes(Enum):
info = "Info"
warning = "Warning"
error = "Error"
class OperatingSystemTypes(Enum):
windows = "Windows"
linux = "Linux"
class VirtualMachineSizeTypes(Enum):
basic_a0 = "Basic_A0"
basic_a1 = "Basic_A1"
basic_a2 = "Basic_A2"
basic_a3 = "Basic_A3"
basic_a4 = "Basic_A4"
standard_a0 = "Standard_A0"
standard_a1 = "Standard_A1"
standard_a2 = "Standard_A2"
standard_a3 = "Standard_A3"
standard_a4 = "Standard_A4"
standard_a5 = "Standard_A5"
standard_a6 = "Standard_A6"
standard_a7 = "Standard_A7"
standard_a8 = "Standard_A8"
standard_a9 = "Standard_A9"
standard_a10 = "Standard_A10"
standard_a11 = "Standard_A11"
standard_d1 = "Standard_D1"
standard_d2 = "Standard_D2"
standard_d3 = "Standard_D3"
standard_d4 = "Standard_D4"
standard_d11 = "Standard_D11"
standard_d12 = "Standard_D12"
standard_d13 = "Standard_D13"
standard_d14 = "Standard_D14"
standard_d1_v2 = "Standard_D1_v2"
standard_d2_v2 = "Standard_D2_v2"
standard_d3_v2 = "Standard_D3_v2"
standard_d4_v2 = "Standard_D4_v2"
standard_d5_v2 = "Standard_D5_v2"
standard_d11_v2 = "Standard_D11_v2"
standard_d12_v2 = "Standard_D12_v2"
standard_d13_v2 = "Standard_D13_v2"
standard_d14_v2 = "Standard_D14_v2"
standard_d15_v2 = "Standard_D15_v2"
standard_ds1 = "Standard_DS1"
standard_ds2 = "Standard_DS2"
standard_ds3 = "Standard_DS3"
standard_ds4 = "Standard_DS4"
standard_ds11 = "Standard_DS11"
standard_ds12 = "Standard_DS12"
standard_ds13 = "Standard_DS13"
standard_ds14 = "Standard_DS14"
standard_ds1_v2 = "Standard_DS1_v2"
standard_ds2_v2 = "Standard_DS2_v2"
standard_ds3_v2 = "Standard_DS3_v2"
standard_ds4_v2 = "Standard_DS4_v2"
standard_ds5_v2 = "Standard_DS5_v2"
standard_ds11_v2 = "Standard_DS11_v2"
standard_ds12_v2 = "Standard_DS12_v2"
standard_ds13_v2 = "Standard_DS13_v2"
standard_ds14_v2 = "Standard_DS14_v2"
standard_ds15_v2 = "Standard_DS15_v2"
standard_g1 = "Standard_G1"
standard_g2 = "Standard_G2"
standard_g3 = "Standard_G3"
standard_g4 = "Standard_G4"
standard_g5 = "Standard_G5"
standard_gs1 = "Standard_GS1"
standard_gs2 = "Standard_GS2"
standard_gs3 = "Standard_GS3"
standard_gs4 = "Standard_GS4"
standard_gs5 = "Standard_GS5"
class CachingTypes(Enum):
none = "None"
read_only = "ReadOnly"
read_write = "ReadWrite"
class DiskCreateOptionTypes(Enum):
from_image = "FromImage"
empty = "Empty"
attach = "Attach"
class PassNames(Enum):
oobe_system = "OobeSystem"
class | (Enum):
microsoft_windows_shell_setup = "Microsoft-Windows-Shell-Setup"
class SettingNames(Enum):
auto_logon = "AutoLogon"
first_logon_commands = "FirstLogonCommands"
class ProtocolTypes(Enum):
http = "Http"
https = "Https"
class ResourceIdentityType(Enum):
system_assigned = "SystemAssigned"
class UpgradeMode(Enum):
automatic = "Automatic"
manual = "Manual"
class VirtualMachineScaleSetSkuScaleType(Enum):
automatic = "Automatic"
none = "None"
class InstanceViewTypes(Enum):
instance_view = "instanceView"
| ComponentNames |
settings.py | # Database
MONGODB_SERVER = '127.0.0.1'
MONGODB_PORT = 27017
MONGODB_DB = 'nlc' # neural_loop_combiner
MONGO_USERNAME = ''
MONGO_PASSWORD = ''
MONGODB_TRACK_COL = 'tracks'
MONGODB_LOOP_COL = 'loops'
MONGODB_TAG_COL = 'tags'
MONGODB_DATASET_COL = 'datasets'
MONGODB_MODEL_COL = 'models'
# Directory
INT_DIR = 'files/inputs' # put tracks you want to extract here
OUT_DIR = 'files/outputs'
# Others
DUR = 2 | CACHE = True
LOG = True
# Threshold
HASH_TYPE = 'ahash'
HASH_THRESHOLD = 5
EXISTED_THRESHOLD = 0.2
# Datasets
TEST_SIZE = 100
SPLIT_RATIO = 0.8
NG_TYPES = {
'shift' : 1,
'reverse' : 1,
'rearrange': 1,
'random' : 1,
'selected' : 1
}
# Models (default settings)
LR = 0.001
MARGIN = 2
EPOCHS = 2
BATCH_SIZE = 128
LOG_INTERVAL = 10 | SR = 44100 |
python.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// TODO(swernli): The initial version of the parser exposes a subset of the llvm_ir crate API into
// python directly, along with some extensions that provide QIR specific support (such as `get_qubit_static_id`).
// Eventually this should be split up similar to how QIR emission functionality works; these wrappers will
// remain here and provide the pyclass-compatible implementation, the QIR specific extensions will be implemented
// as traits and extended onto the llvm_ir types as part of the qirlib such that they can be conveniently used
// from within rust, and wrappers for each class and function will be added to __init__.py so that the
// parser API can have full python doc comments for usability.
use super::parse::{
BasicBlockExt, CallExt, ConstantExt, FunctionExt, IntructionExt, ModuleExt, NameExt, PhiExt,
TypeExt,
};
use llvm_ir::{self, types::Typed};
use pyo3::{exceptions::PyRuntimeError, prelude::*};
use std::{convert::TryFrom, path::PathBuf};
#[pymodule]
#[pyo3(name = "_native")]
fn native_module(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<PyQirModule>()?;
m.add_class::<PyQirFunction>()?;
m.add_class::<PyQirParameter>()?;
m.add_class::<PyQirBasicBlock>()?;
m.add_class::<PyQirInstruction>()?;
m.add_class::<PyQirTerminator>()?;
m.add_class::<PyQirOperand>()?;
m.add_class::<PyQirConstant>()?;
m.add_class::<PyQirType>()?;
#[pyfn(m)]
fn module_from_bitcode(bc_path: PathBuf) -> PyResult<PyQirModule> {
llvm_ir::Module::from_bc_path(bc_path)
.map(|module| PyQirModule { module })
.map_err(PyRuntimeError::new_err)
}
Ok(())
}
#[pyclass]
pub struct PyQirModule {
pub(super) module: llvm_ir::Module,
}
#[pyclass]
pub struct PyQirFunction {
pub(super) function: llvm_ir::Function,
pub(super) types: llvm_ir::types::Types,
}
#[pyclass]
pub struct PyQirParameter {
pub(super) param: llvm_ir::function::Parameter,
}
#[pyclass]
pub struct PyQirBasicBlock {
pub(super) block: llvm_ir::BasicBlock,
pub(super) types: llvm_ir::types::Types,
}
#[pyclass]
pub struct PyQirInstruction {
pub(super) instr: llvm_ir::instruction::Instruction,
pub(super) types: llvm_ir::types::Types,
}
#[pyclass]
pub struct PyQirTerminator {
pub(super) term: llvm_ir::terminator::Terminator,
pub(super) types: llvm_ir::types::Types,
}
#[pyclass]
pub struct PyQirOperand {
pub(super) op: llvm_ir::Operand,
pub(super) types: llvm_ir::types::Types,
}
#[pyclass]
pub struct PyQirConstant {
pub(super) constantref: llvm_ir::ConstantRef,
pub(super) types: llvm_ir::types::Types,
}
#[pyclass]
pub struct PyQirType {
pub(super) typeref: llvm_ir::TypeRef,
}
macro_rules! match_contents {
($target:expr, $pattern:pat, $property:expr) => {
match $target {
$pattern => Some($property),
_ => None,
}
};
}
#[pymethods]
impl PyQirModule {
#[getter]
fn get_functions(&self) -> Vec<PyQirFunction> {
self.module
.functions
.iter()
.map(|f| PyQirFunction {
function: f.clone(),
types: self.module.types.clone(),
})
.collect()
}
fn get_func_by_name(&self, name: &str) -> Option<PyQirFunction> {
self.module.get_func_by_name(name).map(|f| PyQirFunction {
function: f.clone(),
types: self.module.types.clone(),
})
}
fn get_funcs_by_attr(&self, attr: &str) -> Vec<PyQirFunction> {
self.module
.get_funcs_by_attr_name(attr)
.iter()
.map(|f| PyQirFunction {
function: (*f).clone(),
types: self.module.types.clone(),
})
.collect()
}
fn get_entrypoint_funcs(&self) -> Vec<PyQirFunction> {
self.module
.get_entrypoint_funcs()
.iter()
.map(|f| PyQirFunction {
function: (*f).clone(),
types: self.module.types.clone(),
})
.collect()
}
fn get_interop_funcs(&self) -> Vec<PyQirFunction> {
self.module
.get_interop_funcs()
.iter()
.map(|f| PyQirFunction {
function: (*f).clone(),
types: self.module.types.clone(),
})
.collect()
}
}
#[pymethods]
impl PyQirFunction {
#[getter]
fn get_name(&self) -> String {
self.function.name.clone()
}
#[getter]
fn get_parameters(&self) -> Vec<PyQirParameter> {
self.function
.parameters
.iter()
.map(|p| PyQirParameter { param: p.clone() })
.collect()
}
#[getter]
fn get_return_type(&self) -> PyQirType {
PyQirType {
typeref: self.function.return_type.clone(),
}
}
#[getter]
fn get_blocks(&self) -> Vec<PyQirBasicBlock> {
self.function
.basic_blocks
.iter()
.map(|b| PyQirBasicBlock {
block: b.clone(),
types: self.types.clone(),
})
.collect()
}
#[getter]
fn get_required_qubits(&self) -> PyResult<Option<i64>> {
Ok(self.function.get_required_qubits()?)
}
#[getter]
fn get_required_results(&self) -> PyResult<Option<i64>> {
Ok(self.function.get_required_results()?)
}
fn get_attribute_value(&self, attr_name: &str) -> Option<String> {
self.function.get_attribute_value(attr_name)
}
fn get_block_by_name(&self, name: &str) -> Option<PyQirBasicBlock> {
Some(PyQirBasicBlock {
block: self
.function
.get_bb_by_name(&llvm_ir::Name::from(name.to_string()))?
.clone(),
types: self.types.clone(),
})
}
fn get_instruction_by_output_name(&self, name: &str) -> Option<PyQirInstruction> {
Some(PyQirInstruction {
instr: self.function.get_instruction_by_output_name(name)?.clone(),
types: self.types.clone(),
})
}
}
#[pymethods]
impl PyQirParameter {
#[getter]
fn get_name(&self) -> String {
self.param.name.get_string()
}
#[getter]
fn get_type(&self) -> PyQirType {
PyQirType {
typeref: self.param.ty.clone(),
}
}
}
#[pymethods]
impl PyQirBasicBlock {
#[getter]
fn get_name(&self) -> String {
self.block.name.get_string()
}
#[getter]
fn get_instructions(&self) -> Vec<PyQirInstruction> {
self.block
.instrs
.iter()
.map(|i| PyQirInstruction {
instr: i.clone(),
types: self.types.clone(),
})
.collect()
}
#[getter]
fn get_phi_nodes(&self) -> Vec<PyQirInstruction> {
self.block
.get_phi_nodes()
.iter()
.map(|phi| PyQirInstruction {
instr: llvm_ir::Instruction::from(phi.clone()),
types: self.types.clone(),
})
.collect()
}
fn get_phi_pairs_by_source_name(&self, name: &str) -> Vec<(String, PyQirOperand)> {
self.block
.get_phi_pairs_by_source_name(name)
.iter()
.map(|(n, op)| {
(
n.get_string(),
PyQirOperand {
op: op.clone(),
types: self.types.clone(),
},
)
})
.collect()
}
#[getter]
fn get_terminator(&self) -> PyQirTerminator {
PyQirTerminator {
term: self.block.term.clone(),
types: self.types.clone(),
}
}
}
#[pymethods]
impl PyQirInstruction {
#[getter]
fn get_target_operands(&self) -> Vec<PyQirOperand> {
self.instr
.get_target_operands()
.iter()
.map(|op| PyQirOperand {
op: op.clone(),
types: self.types.clone(),
})
.collect()
}
#[getter]
fn get_type(&self) -> PyQirType { | typeref: self.instr.get_type(&self.types),
}
}
#[getter]
fn get_is_add(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Add(_))
}
#[getter]
fn get_is_sub(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Sub(_))
}
#[getter]
fn get_is_mul(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Mul(_))
}
#[getter]
fn get_is_udiv(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::UDiv(_))
}
#[getter]
fn get_is_sdiv(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::SDiv(_))
}
#[getter]
fn get_is_urem(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::URem(_))
}
#[getter]
fn get_is_srem(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::SRem(_))
}
#[getter]
fn get_is_and(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::And(_))
}
#[getter]
fn get_is_or(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Or(_))
}
#[getter]
fn get_is_xor(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Xor(_))
}
#[getter]
fn get_is_shl(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Shl(_))
}
#[getter]
fn get_is_lshr(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::LShr(_))
}
#[getter]
fn get_is_ashr(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::AShr(_))
}
#[getter]
fn get_is_fadd(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FAdd(_))
}
#[getter]
fn get_is_fsub(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FSub(_))
}
#[getter]
fn get_is_fmul(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FMul(_))
}
#[getter]
fn get_is_fdiv(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FDiv(_))
}
#[getter]
fn get_is_frem(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FRem(_))
}
#[getter]
fn get_is_fneg(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FNeg(_))
}
#[getter]
fn get_is_extractelement(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::ExtractElement(_))
}
#[getter]
fn get_is_insertelement(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::InsertElement(_))
}
#[getter]
fn get_is_shufflevector(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::ShuffleVector(_))
}
#[getter]
fn get_is_extractvalue(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::ExtractValue(_))
}
#[getter]
fn get_is_insertvalue(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::InsertValue(_))
}
#[getter]
fn get_is_alloca(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Alloca(_))
}
#[getter]
fn get_is_load(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Load(_))
}
#[getter]
fn get_is_store(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Store(_))
}
#[getter]
fn get_is_getelementptr(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::GetElementPtr(_))
}
#[getter]
fn get_is_trunc(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Trunc(_))
}
#[getter]
fn get_is_zext(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::ZExt(_))
}
#[getter]
fn get_is_sext(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::SExt(_))
}
#[getter]
fn get_is_fptrunc(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FPTrunc(_))
}
#[getter]
fn get_is_fpext(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FPExt(_))
}
#[getter]
fn get_is_fptoui(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FPToUI(_))
}
#[getter]
fn get_is_fptosi(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FPToSI(_))
}
#[getter]
fn get_is_uitofp(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::UIToFP(_))
}
#[getter]
fn get_is_sitofp(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::SIToFP(_))
}
#[getter]
fn get_is_ptrtoint(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::PtrToInt(_))
}
#[getter]
fn get_is_inttoptr(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::IntToPtr(_))
}
#[getter]
fn get_is_bitcast(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::BitCast(_))
}
#[getter]
fn get_is_addrspacecast(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::AddrSpaceCast(_))
}
#[getter]
fn get_is_icmp(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::ICmp(_))
}
#[getter]
fn get_icmp_predicate(&self) -> Option<String> {
Some(
llvm_ir::instruction::ICmp::try_from(self.instr.clone())
.ok()?
.predicate
.to_string(),
)
}
#[getter]
fn get_is_fcmp(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::FCmp(_))
}
#[getter]
fn get_fcmp_predicate(&self) -> Option<String> {
Some(
llvm_ir::instruction::FCmp::try_from(self.instr.clone())
.ok()?
.predicate
.to_string(),
)
}
#[getter]
fn get_is_phi(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Phi(_))
}
#[getter]
fn get_phi_incoming_values(&self) -> Option<Vec<(PyQirOperand, String)>> {
Some(
llvm_ir::instruction::Phi::try_from(self.instr.clone())
.ok()?
.incoming_values
.iter()
.map(|(op, name)| {
(
PyQirOperand {
op: op.clone(),
types: self.types.clone(),
},
name.get_string(),
)
})
.collect(),
)
}
fn get_phi_incoming_value_for_name(&self, name: &str) -> Option<PyQirOperand> {
Some(PyQirOperand {
op: llvm_ir::instruction::Phi::try_from(self.instr.clone())
.ok()?
.get_incoming_value_for_name(name)?,
types: self.types.clone(),
})
}
#[getter]
fn get_is_select(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Select(_))
}
#[getter]
fn get_is_call(&self) -> bool {
matches!(self.instr, llvm_ir::Instruction::Call(_))
}
#[getter]
fn get_call_func_name(&self) -> Option<String> {
Some(
llvm_ir::instruction::Call::try_from(self.instr.clone())
.ok()?
.get_func_name()?
.get_string(),
)
}
#[getter]
fn get_call_func_params(&self) -> Option<Vec<PyQirOperand>> {
Some(
llvm_ir::instruction::Call::try_from(self.instr.clone())
.ok()?
.arguments
.iter()
.map(|o| PyQirOperand {
op: o.0.clone(),
types: self.types.clone(),
})
.collect(),
)
}
#[getter]
fn get_is_qis_call(&self) -> bool {
llvm_ir::instruction::Call::try_from(self.instr.clone()).map_or(false, |c| c.is_qis())
}
#[getter]
fn get_is_rt_call(&self) -> bool {
llvm_ir::instruction::Call::try_from(self.instr.clone()).map_or(false, |c| c.is_rt())
}
#[getter]
fn get_is_qir_call(&self) -> bool {
llvm_ir::instruction::Call::try_from(self.instr.clone()).map_or(false, |c| c.is_qir())
}
#[getter]
fn get_has_output(&self) -> bool {
self.instr.try_get_result().is_some()
}
#[getter]
fn get_output_name(&self) -> Option<String> {
Some(self.instr.try_get_result()?.get_string())
}
}
#[pymethods]
impl PyQirTerminator {
#[getter]
fn get_is_ret(&self) -> bool {
matches!(self.term, llvm_ir::Terminator::Ret(_))
}
#[getter]
fn get_ret_operand(&self) -> Option<PyQirOperand> {
match_contents!(
&self.term,
llvm_ir::Terminator::Ret(llvm_ir::terminator::Ret {
return_operand,
debugloc: _,
}),
PyQirOperand {
op: return_operand.as_ref()?.clone(),
types: self.types.clone(),
}
)
}
#[getter]
fn get_is_br(&self) -> bool {
matches!(self.term, llvm_ir::Terminator::Br(_))
}
#[getter]
fn get_br_dest(&self) -> Option<String> {
match_contents!(
&self.term,
llvm_ir::Terminator::Br(llvm_ir::terminator::Br { dest, debugloc: _ }),
dest.get_string()
)
}
#[getter]
fn get_is_condbr(&self) -> bool {
matches!(self.term, llvm_ir::Terminator::CondBr(_))
}
#[getter]
fn get_condbr_condition(&self) -> Option<PyQirOperand> {
match_contents!(
&self.term,
llvm_ir::Terminator::CondBr(llvm_ir::terminator::CondBr {
condition,
true_dest: _,
false_dest: _,
debugloc: _,
}),
PyQirOperand {
op: condition.clone(),
types: self.types.clone(),
}
)
}
#[getter]
fn get_condbr_true_dest(&self) -> Option<String> {
match_contents!(
&self.term,
llvm_ir::Terminator::CondBr(llvm_ir::terminator::CondBr {
condition: _,
true_dest,
false_dest: _,
debugloc: _,
}),
true_dest.get_string()
)
}
#[getter]
fn get_condbr_false_dest(&self) -> Option<String> {
match_contents!(
&self.term,
llvm_ir::Terminator::CondBr(llvm_ir::terminator::CondBr {
condition: _,
true_dest: _,
false_dest,
debugloc: _,
}),
false_dest.get_string()
)
}
#[getter]
fn get_is_switch(&self) -> bool {
matches!(self.term, llvm_ir::Terminator::Switch(_))
}
#[getter]
fn get_switch_operand(&self) -> Option<PyQirOperand> {
match_contents!(
&self.term,
llvm_ir::Terminator::Switch(llvm_ir::terminator::Switch {
operand,
dests: _,
default_dest: _,
debugloc: _,
}),
PyQirOperand {
op: operand.clone(),
types: self.types.clone(),
}
)
}
#[getter]
fn get_switch_dests(&self) -> Option<Vec<(PyQirOperand, String)>> {
match_contents!(
&self.term,
llvm_ir::Terminator::Switch(llvm_ir::terminator::Switch {
operand: _,
dests,
default_dest: _,
debugloc: _,
}),
dests
.iter()
.map(|(cref, name)| (
PyQirOperand {
op: llvm_ir::Operand::ConstantOperand(cref.clone()),
types: self.types.clone()
},
name.get_string()
))
.collect()
)
}
#[getter]
fn get_switch_default_dest(&self) -> Option<String> {
match_contents!(
&self.term,
llvm_ir::Terminator::Switch(llvm_ir::terminator::Switch {
operand: _,
dests: _,
default_dest,
debugloc: _,
}),
default_dest.get_string()
)
}
#[getter]
fn get_is_unreachable(&self) -> bool {
matches!(self.term, llvm_ir::Terminator::Unreachable(_))
}
}
#[pymethods]
impl PyQirOperand {
#[getter]
fn get_is_local(&self) -> bool {
matches!(self.op, llvm_ir::Operand::LocalOperand { name: _, ty: _ })
}
#[getter]
fn get_local_name(&self) -> Option<String> {
match_contents!(
&self.op,
llvm_ir::Operand::LocalOperand { name, ty: _ },
name.get_string()
)
}
#[getter]
fn get_local_type(&self) -> Option<PyQirType> {
match_contents!(
&self.op,
llvm_ir::Operand::LocalOperand { name: _, ty },
PyQirType {
typeref: ty.clone(),
}
)
}
#[getter]
fn get_is_constant(&self) -> bool {
matches!(self.op, llvm_ir::Operand::ConstantOperand(_))
}
#[getter]
fn get_constant(&self) -> Option<PyQirConstant> {
match_contents!(
&self.op,
llvm_ir::Operand::ConstantOperand(cref),
PyQirConstant {
constantref: cref.clone(),
types: self.types.clone(),
}
)
}
}
#[pymethods]
impl PyQirConstant {
#[getter]
fn get_is_int(&self) -> bool {
matches!(
self.constantref.as_ref(),
llvm_ir::Constant::Int { bits: _, value: _ }
)
}
#[getter]
fn get_int_value(&self) -> Option<u64> {
match_contents!(
self.constantref.as_ref(),
llvm_ir::Constant::Int { bits: _, value },
*value
)
}
#[getter]
fn get_int_width(&self) -> Option<u32> {
match_contents!(
&self.constantref.as_ref(),
llvm_ir::Constant::Int { bits, value: _ },
*bits
)
}
#[getter]
fn get_is_float(&self) -> bool {
matches!(self.constantref.as_ref(), llvm_ir::Constant::Float(_))
}
#[getter]
fn get_float_double_value(&self) -> Option<f64> {
match_contents!(
&self.constantref.as_ref(),
llvm_ir::Constant::Float(llvm_ir::constant::Float::Double(d)),
*d
)
}
#[getter]
fn get_is_null(&self) -> bool {
matches!(self.constantref.as_ref(), llvm_ir::Constant::Null(_))
}
#[getter]
fn get_is_aggregate_zero(&self) -> bool {
matches!(
self.constantref.as_ref(),
llvm_ir::Constant::AggregateZero(_)
)
}
#[getter]
fn get_is_array(&self) -> bool {
matches!(
self.constantref.as_ref(),
llvm_ir::Constant::Array {
element_type: _,
elements: _,
}
)
}
#[getter]
fn get_is_vector(&self) -> bool {
matches!(self.constantref.as_ref(), llvm_ir::Constant::Vector(_))
}
#[getter]
fn get_is_undef(&self) -> bool {
matches!(self.constantref.as_ref(), llvm_ir::Constant::Undef(_))
}
#[getter]
fn get_is_global_reference(&self) -> bool {
matches!(
self.constantref.as_ref(),
llvm_ir::Constant::GlobalReference { name: _, ty: _ }
)
}
#[getter]
fn get_type(&self) -> PyQirType {
PyQirType {
typeref: self.constantref.get_type(&self.types),
}
}
#[getter]
fn get_is_qubit(&self) -> bool {
self.get_type().get_is_qubit()
}
#[getter]
fn get_qubit_static_id(&self) -> Option<u64> {
self.constantref.qubit_id()
}
#[getter]
fn get_is_result(&self) -> bool {
self.get_type().get_is_result()
}
#[getter]
fn get_result_static_id(&self) -> Option<u64> {
self.constantref.result_id()
}
}
#[pymethods]
impl PyQirType {
#[getter]
fn get_is_void(&self) -> bool {
matches!(self.typeref.as_ref(), llvm_ir::Type::VoidType)
}
#[getter]
fn get_is_integer(&self) -> bool {
matches!(
self.typeref.as_ref(),
llvm_ir::Type::IntegerType { bits: _ }
)
}
#[getter]
fn get_integer_width(&self) -> Option<u32> {
match_contents!(
self.typeref.as_ref(),
llvm_ir::Type::IntegerType { bits },
*bits
)
}
#[getter]
fn get_is_pointer(&self) -> bool {
matches!(
self.typeref.as_ref(),
llvm_ir::Type::PointerType {
pointee_type: _,
addr_space: _,
}
)
}
#[getter]
fn get_pointer_type(&self) -> Option<PyQirType> {
match_contents!(
self.typeref.as_ref(),
llvm_ir::Type::PointerType {
pointee_type,
addr_space: _
},
PyQirType {
typeref: pointee_type.clone()
}
)
}
#[getter]
fn get_pointer_addrspace(&self) -> Option<u32> {
match_contents!(
self.typeref.as_ref(),
llvm_ir::Type::PointerType {
pointee_type: _,
addr_space
},
*addr_space
)
}
#[getter]
fn get_is_double(&self) -> bool {
matches!(
self.typeref.as_ref(),
llvm_ir::Type::FPType(llvm_ir::types::FPType::Double)
)
}
#[getter]
fn get_is_array(&self) -> bool {
matches!(
self.typeref.as_ref(),
llvm_ir::Type::ArrayType {
element_type: _,
num_elements: _,
}
)
}
#[getter]
fn get_array_element_type(&self) -> Option<PyQirType> {
match_contents!(
self.typeref.as_ref(),
llvm_ir::Type::ArrayType {
element_type,
num_elements: _,
},
PyQirType {
typeref: element_type.clone()
}
)
}
#[getter]
fn get_array_num_elements(&self) -> Option<usize> {
match_contents!(
self.typeref.as_ref(),
llvm_ir::Type::ArrayType {
element_type: _,
num_elements,
},
*num_elements
)
}
#[getter]
fn get_is_struct(&self) -> bool {
matches!(
self.typeref.as_ref(),
llvm_ir::Type::StructType {
element_types: _,
is_packed: _,
}
)
}
#[getter]
fn get_struct_element_types(&self) -> Option<Vec<PyQirType>> {
match_contents!(
self.typeref.as_ref(),
llvm_ir::Type::StructType {
element_types,
is_packed: _
},
element_types
.iter()
.map(|t| PyQirType { typeref: t.clone() })
.collect()
)
}
#[getter]
fn get_is_named_struct(&self) -> bool {
matches!(
self.typeref.as_ref(),
llvm_ir::Type::NamedStructType { name: _ }
)
}
#[getter]
fn get_named_struct_name(&self) -> Option<String> {
match_contents!(
self.typeref.as_ref(),
llvm_ir::Type::NamedStructType { name },
name.clone()
)
}
#[getter]
fn get_is_qubit(&self) -> bool {
self.typeref.is_qubit()
}
#[getter]
fn get_is_result(&self) -> bool {
self.typeref.is_result()
}
} | PyQirType { |
si5351.go | package si5351
import (
"errors"
"io"
)
// DefaultI2CAddress is the default address of the Si5351 on the I2C bus.
const DefaultI2CAddress uint8 = 0x60
// Si5351 represents the chip.
type Si5351 struct {
Crystal Crystal
InputDivider ClockDivider
pll []*PLL
fractionalOutput []*FractionalOutput
integerOutput []*IntegerOutput
bus Bus
}
// Bus on which to communicate with the Si5351.
type Bus interface {
ReadReg(reg uint8, p []byte) (int, error)
WriteReg(reg uint8, values ...byte) (int, error)
RegWriter(reg uint8) io.Writer
Err() error
Close() error
}
// New returns a new Si5351 instance.
func | (crystal Crystal, bus Bus) *Si5351 {
return &Si5351{
Crystal: crystal,
pll: loadPLLs(bus),
fractionalOutput: loadFractionalOutputs(bus),
integerOutput: loadIntegerOutputs(bus),
bus: bus,
}
}
// StartSetup starts the setup sequence of the Si5351:
// * disable all outputs
// * power down all output drivers
// * set the CLKIN input divider
// * set the internal load capacitance of the crystal
// After these steps the individual setup of PLLs and Clocks should take place.
// As last setup step, don't forget to call FinishSetup.
func (s *Si5351) StartSetup() error {
s.Shutdown()
s.bus.WriteReg(RegCrystalInternalLoadCapacitance, byte(s.Crystal.Load))
return s.bus.Err()
}
// FinishSetup finishes the setup sequence:
// * reset the PLLs
// * enable all outputs
func (s *Si5351) FinishSetup() error {
s.resetAllPLLs()
s.enableAllOutputs(true)
return s.bus.Err()
}
// PLL returns the PLL with the given index.
func (s *Si5351) PLL(pll PLLIndex) *PLL {
return s.pll[pll]
}
// PLLA returns PLL A.
func (s *Si5351) PLLA() *PLL {
return s.pll[PLLA]
}
// PLLB returns PLL B.
func (s *Si5351) PLLB() *PLL {
return s.pll[PLLB]
}
// Output returns the output with the given index.
func (s *Si5351) Output(output OutputIndex) *Output {
if output <= Clk5 {
return &s.fractionalOutput[output].Output
}
return &s.integerOutput[output].Output
}
// Clk0 returns the output CLK0.
func (s *Si5351) Clk0() *FractionalOutput {
return s.fractionalOutput[Clk0]
}
// Clk1 returns the output CLK1
func (s *Si5351) Clk1() *FractionalOutput {
return s.fractionalOutput[Clk1]
}
// Clk2 returns the output CLK2
func (s *Si5351) Clk2() *FractionalOutput {
return s.fractionalOutput[Clk2]
}
// Clk3 returns the output CLK3
func (s *Si5351) Clk3() *FractionalOutput {
return s.fractionalOutput[Clk3]
}
// Clk4 returns the output CLK4
func (s *Si5351) Clk4() *FractionalOutput {
return s.fractionalOutput[Clk4]
}
// Clk5 returns the output CLK5
func (s *Si5351) Clk5() *FractionalOutput {
return s.fractionalOutput[Clk5]
}
// Clk6 returns the output CLK6
func (s *Si5351) Clk6() *IntegerOutput {
return s.integerOutput[0]
}
// Clk7 returns the output CLK7
func (s *Si5351) Clk7() *IntegerOutput {
return s.integerOutput[1]
}
// SetupPLLInputSource writes the input source configuration to the Si5351's register.
func (s *Si5351) SetupPLLInputSource(clkinInputDivider ClockDivider, pllASource, pllBSource PLLInputSource) error {
value := byte((clkinInputDivider&0xF)<<4) |
byte((pllASource&1)<<s.PLLA().Register.InputSourceOffset) |
byte((pllBSource&1)<<s.PLLB().Register.InputSourceOffset)
s.bus.WriteReg(RegPLLInputSource, value)
if s.bus.Err() == nil {
s.InputDivider = clkinInputDivider
s.PLLA().InputSource = pllASource
s.PLLB().InputSource = pllBSource
}
return s.bus.Err()
}
// SetupPLLRaw directly sets the frequency multiplier parameters for the given PLL and resets it.
func (s *Si5351) SetupPLLRaw(pll PLLIndex, a, b, c uint32) error {
s.pll[pll].SetupMultiplier(FractionalRatio{A: a, B: b, C: c})
s.pll[pll].Reset()
return s.bus.Err()
}
// SetupMultisynthRaw directly sets the frequency divider and RDiv parameters for the Multisynth of the given output.
func (s *Si5351) SetupMultisynthRaw(output OutputIndex, a, b, c uint32, RDiv ClockDivider) error {
if int(output) >= len(s.fractionalOutput) {
return errors.New("only CLK0-CLK5 are currently supported")
}
s.fractionalOutput[output].SetupDivider(FractionalRatio{A: a, B: b, C: c})
return s.bus.Err()
}
// SetupPLL sets the given PLL to the closest possible value of the given frequency and resets it.
func (s *Si5351) SetupPLL(pll PLLIndex, frequency Frequency) (Frequency, error) {
multiplier := FindFractionalMultiplier(s.Crystal.Frequency(), frequency)
s.pll[pll].SetupMultiplier(multiplier)
s.pll[pll].Reset()
return multiplier.Multiply(s.Crystal.Frequency()), s.bus.Err()
}
// PrepareOutputs prepares the given outputs for use with the given PLL and control parameters.
func (s *Si5351) PrepareOutputs(pll PLLIndex, invert bool, inputSource ClockInputSource, drive OutputDrive, outputs ...OutputIndex) error {
for _, output := range outputs {
s.Output(output).SetupControl(false, false, pll, invert, inputSource, drive)
}
return s.bus.Err()
}
// SetOutputFrequency sets the given output to the closest possible value of the given frequency that can be
// generated with the PLL the output is associated with. Set the frequency of the PLL first.
// The method returns the effective output frequency.
func (s *Si5351) SetOutputFrequency(output OutputIndex, frequency Frequency) (Frequency, error) {
if int(output) >= len(s.fractionalOutput) {
return 0, errors.New("only CLK0-CLK5 are currently supported")
}
o := s.fractionalOutput[output]
pllFrequency := s.pll[o.PLL].Multiplier.Multiply(s.Crystal.Frequency())
divider := FindFractionalDivider(pllFrequency, frequency)
o.SetupDivider(divider)
return divider.Divide(pllFrequency), s.bus.Err()
}
// SetOutputDivider sets the divider of the given output.
// The method returns the effective output frequency.
func (s *Si5351) SetOutputDivider(output OutputIndex, a, b, c uint32) (Frequency, error) {
if int(output) >= len(s.fractionalOutput) {
return 0, errors.New("only CLK0-CLK5 are currently supported")
}
o := s.fractionalOutput[output]
pllFrequency := s.pll[o.PLL].Multiplier.Multiply(s.Crystal.Frequency())
divider := FractionalRatio{A: a, B: b, C: c}
o.SetupDivider(divider)
return divider.Divide(pllFrequency), s.bus.Err()
}
// SetupQuadratureOutput sets up the given PLL and the given outputs to generate the closest possible value
// of the given frequency with a quadrature signal (90° phase shifted) on the second output.
// The method returns the effective PLL frequency and the effective output frequency.
func (s *Si5351) SetupQuadratureOutput(pll PLLIndex, phase, quadrature OutputIndex, frequency Frequency) (Frequency, Frequency, error) {
if int(phase) >= len(s.fractionalOutput) || int(quadrature) >= len(s.fractionalOutput) {
return 0, 0, errors.New("only CLK0-CLK5 are currently supported")
}
p := s.pll[pll]
i := s.fractionalOutput[phase]
q := s.fractionalOutput[quadrature]
// Find the multiplier and an integer divider.
multiplier, divider := FindFractionalMultiplierWithIntegerDivider(s.Crystal.Frequency(), frequency)
pllFrequency := multiplier.Multiply(s.Crystal.Frequency())
outputFrequency := divider.Divide(pllFrequency)
i.SetPLL(pll)
q.SetPLL(pll)
p.SetupMultiplier(multiplier)
i.SetupDivider(divider)
q.SetupDivider(divider)
shift := uint8(divider.A & 0xFF)
i.SetupPhaseShift(0)
q.SetupPhaseShift(shift)
p.Reset()
return pllFrequency, outputFrequency, s.bus.Err()
}
// Shutdown the Si5351: disable all outputs, power down all output drivers.
func (s *Si5351) Shutdown() error {
s.enableAllOutputs(false)
s.powerDownAllOutputDrivers()
return s.bus.Err()
}
func (s *Si5351) enableAllOutputs(enabled bool) error {
var value byte
if enabled {
value = 0x00
} else {
value = 0xFF
}
_, err := s.bus.WriteReg(RegOutputEnableControl, value)
return err
}
func (s *Si5351) powerDownAllOutputDrivers() error {
// for all clocks: power down, fractional division mode, PLLA, not inverted, Multisynth, 2mA
_, err := s.bus.WriteReg(RegClk0Control,
0x80,
0x80,
0x80,
0x80,
0x80,
0x80,
0x80,
0x80,
)
return err
}
func (s *Si5351) resetAllPLLs() error {
value := byte((1 << 7) | (1 << 5))
s.bus.WriteReg(RegPLLReset, value)
return s.bus.Err()
}
| New |
UpdateModelVersionCommand.ts | import { FraudDetectorClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FraudDetectorClient";
import { UpdateModelVersionRequest, UpdateModelVersionResult } from "../models/models_0";
import {
deserializeAws_json1_1UpdateModelVersionCommand,
serializeAws_json1_1UpdateModelVersionCommand,
} from "../protocols/Aws_json1_1";
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
MiddlewareStack,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
export type UpdateModelVersionCommandInput = UpdateModelVersionRequest;
export type UpdateModelVersionCommandOutput = UpdateModelVersionResult & __MetadataBearer;
/**
* <p>Updates a model version. Updating a model version retrains an existing model version using updated training data and produces a new minor version of the model. You can update the training data set location and data access role attributes using this action. This action creates and trains a new minor version of the model, for example version 1.01, 1.02, 1.03.</p>
*/
export class UpdateModelVersionCommand extends $Command<
UpdateModelVersionCommandInput, | > {
private resolved = false;
// Start section: command_properties
// End section: command_properties
constructor(readonly input: UpdateModelVersionCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: FraudDetectorClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<UpdateModelVersionCommandInput, UpdateModelVersionCommandOutput> {
if (!this.resolved) {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
this.resolved = true;
}
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "FraudDetectorClient";
const commandName = "UpdateModelVersionCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: UpdateModelVersionRequest.filterSensitiveLog,
outputFilterSensitiveLog: UpdateModelVersionResult.filterSensitiveLog,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: UpdateModelVersionCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_json1_1UpdateModelVersionCommand(input, context);
}
private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<UpdateModelVersionCommandOutput> {
return deserializeAws_json1_1UpdateModelVersionCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
} | UpdateModelVersionCommandOutput,
FraudDetectorClientResolvedConfig |
_help.py | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['sf'] = """
type: group
short-summary: Manage and administer Azure Service Fabric clusters.
"""
helps['sf application'] = """
type: group
short-summary: Manage applications running on an Azure Service Fabric cluster. Only support ARM deployed applications.
"""
helps['sf application create'] = """
type: command
short-summary: Create a new application on an Azure Service Fabric cluster.
examples:
- name: Create application "testApp" with parameters. The application type "TestAppType" version "v1" should already exist in the cluster, and the application parameters should be defined in the application manifest.
text: >
az sf application create -g testRG -c testCluster --application-name testApp --application-type-name TestAppType \\
--application-type-version v1 --application-parameters key0=value0
- name: Create application "testApp" and app type version using the package url provided.
text: >
az sf application create -g testRG -c testCluster --application-name testApp --application-type-name TestAppType \\
--application-type-version v1 --package-url "https://sftestapp.blob.core.windows.net/sftestapp/testApp_1.0.sfpkg" \\
--application-parameters key0=value0
"""
helps['sf application update'] = """
type: command
short-summary: Update a Azure Service Fabric application. This allows updating the application parameters and/or upgrade the application type version which will trigger an application upgrade.
examples:
- name: Update application parameters and upgreade policy values and app type version to v2.
text: >
az sf application update -g testRG -c testCluster --application-name testApp --application-type-version v2 \\
--application-parameters key0=value0 --health-check-stable-duration 0 --health-check-wait-duration 0 --health-check-retry-timeout 0 \\
--upgrade-domain-timeout 5000 --upgrade-timeout 7000 --failure-action Rollback --upgrade-replica-set-check-timeout 300 --force-restart
- name: Update application minimum and maximum nodes.
text: >
az sf application update -g testRG -c testCluster --application-name testApp --minimum-nodes 1 --maximum-nodes 3
"""
helps['sf application certificate'] = """
type: group
short-summary: Manage the certificate of an application.
"""
helps['sf application certificate add'] = """
type: command
short-summary: Add a new certificate to the Virtual Machine Scale Sets that make up the cluster to be used by hosted applications.
examples:
- name: Add an application certificate.
text: >
az sf application certificate add -g group-name -c cluster1 --secret-identifier 'https://{KeyVault}.vault.azure.net/secrets/{Secret}'
"""
helps['sf application show'] = """
type: command
short-summary: Show the properties of an application on an Azure Service Fabric cluster.
examples:
- name: Get application.
text: >
az sf application show -g testRG -c testCluster --application-name testApp
"""
helps['sf application list'] = """
type: command
short-summary: List applications of a given cluster.
examples:
- name: List applications for a given cluster.
text: >
az sf application list -g testRG -c testCluster
"""
helps['sf application delete'] = """
type: command
short-summary: Delete an application.
examples:
- name: Delete application.
text: >
az sf application delete -g testRG -c testCluster --application-name testApp
"""
helps['sf application-type'] = """
type: group
short-summary: Manage applications types and its versions running on an Azure Service Fabric cluster. Only support ARM deployed application types.
"""
helps['sf application-type'] = """
type: group
short-summary: Manage application types on an Azure Service Fabric cluster.
"""
helps['sf application-type create'] = """
type: command
short-summary: Create a new application type on an Azure Service Fabric cluster.
examples:
- name: Create new application type.
text: >
az sf application-type create -g testRG -c testCluster --application-type-name testAppType
"""
helps['sf application-type show'] = """
type: command
short-summary: Show the properties of an application type on an Azure Service Fabric cluster.
examples:
- name: Get application type.
text: >
az sf application-type show -g testRG -c testCluster --application-type-name CalcServiceApp
"""
helps['sf application-type list'] = """
type: command
short-summary: List application types of a given cluster.
examples:
- name: List application types for a given cluster.
text: >
az sf application-type list -g testRG -c testCluster
"""
helps['sf application-type delete'] = """
type: command
short-summary: Delete an application type.
examples:
- name: Delete application type.
text: >
az sf application-type delete -g testRG -c testCluster --application-type-name CalcServiceApp
"""
helps['sf application-type version'] = """
type: group
short-summary: Manage application type versions on an Azure Service Fabric cluster. Only support ARM deployed application type versions.
"""
helps['sf application-type version create'] = """
type: command
short-summary: Create a new application type on an Azure Service Fabric cluster.
examples:
- name: Create new application type version using the provided package url. The version in the application manifest contained in the package should have the same version as the one specified in --version.
text: >
az sf application-type version create -g testRG -c testCluster --application-type-name testAppType \\
--version 1.0 --package-url "https://sftestapp.blob.core.windows.net/sftestapp/testApp_1.0.sfpkg"
"""
helps['sf application-type version show'] = """
type: command
short-summary: Show the properties of an application type version on an Azure Service Fabric cluster.
examples:
- name: Show the properties of an application type version on an Azure Service Fabric cluster.
text: >
az sf application-type version show -g testRG -c testCluster --application-type-name CalcServiceApp --version 1.0
"""
helps['sf application-type version list'] = """
type: command
short-summary: List version of a given application type.
examples:
- name: List versions for a particular application type.
text: >
az sf application-type version list -g testRG -c testCluster --application-type-name CalcServiceApp
"""
helps['sf application-type version delete'] = """
type: command
short-summary: Delete an application type version.
examples:
- name: Delete application type version.
text: >
az sf application-type version delete -g testRG -c testCluster --application-type-name CalcServiceApp --version 1.0
"""
helps['sf service'] = """
type: group
short-summary: Manage services running on an Azure Service Fabric cluster. Only support ARM deployed services.
"""
helps['sf service create'] = """
type: command
short-summary: Create a new service on an Azure Service Fabric cluster.
examples:
- name: Create a new stateless service "testApp~testService1" with instance count -1 (on all the nodes).
text: >
az sf service create -g testRG -c testCluster --application-name testApp --state stateless --service-name testApp~testService \\
--service-type testStateless --instance-count -1 --partition-scheme singleton
- name: Create a new stateful service "testApp~testService2" with a target of 5 nodes.
text: >
az sf service create -g testRG -c testCluster --application-name testApp --state stateful --service-name testApp~testService2 \\
--service-type testStatefulType --min-replica-set-size 3 --target-replica-set-size 5
"""
helps['sf service show'] = """
type: command
short-summary: Get a service.
examples:
- name: Show the properties of a service on an Azure Service Fabric cluster.
text: >
az sf service show -g testRG -c testCluster --application-name testApp --service-name testApp~testService
"""
helps['sf service list'] = """
type: command
short-summary: List services of a given application.
examples:
- name: List services.
text: >
az sf service list -g testRG -c testCluster --application-name testApp
"""
helps['sf service delete'] = """
type: command
short-summary: Delete a service.
examples:
- name: Delete service.
text: >
az sf service delete -g testRG -c testCluster --application-name testApp --service-name testApp~testService
"""
helps['sf cluster'] = """
type: group
short-summary: Manage an Azure Service Fabric cluster.
"""
helps['sf cluster certificate'] = """
type: group
short-summary: Manage a cluster certificate.
"""
helps['sf cluster certificate add'] = """
type: command
short-summary: Add a secondary cluster certificate to the cluster.
examples:
- name: Add a certificate to a cluster using a keyvault secret identifier.
text: |
az sf cluster certificate add -g group-name -c cluster1 \\
--secret-identifier 'https://{KeyVault}.vault.azure.net/secrets/{Secret}'
- name: Add a self-signed certificate to a cluster.
text: >
az sf cluster certificate add -g group-name -c cluster1 --certificate-subject-name test.com
- name: Add a secondary cluster certificate to the cluster. (autogenerated)
text: az sf cluster certificate add --cluster-name cluster1 --resource-group group-name --secret-identifier 'https://{KeyVault}.vault.azure.net/secrets/{Secret}' --vault-name MyVault
crafted: true
"""
helps['sf cluster certificate remove'] = """
type: command
short-summary: Remove a certificate from a cluster.
examples:
- name: Remove a certificate by thumbprint.
text: >
az sf cluster certificate remove -g group-name -c cluster1 --thumbprint '5F3660C715EBBDA31DB1FFDCF508302348DE8E7A'
"""
helps['sf cluster client-certificate'] = """
type: group
short-summary: Manage the client certificate of a cluster.
"""
helps['sf cluster client-certificate add'] = """
type: command
short-summary: Add a common name or certificate thumbprint to the cluster for client authentication.
examples:
- name: Add client certificate by thumbprint
text: >
az sf cluster client-certificate add -g group-name -c cluster1 --thumbprint '5F3660C715EBBDA31DB1FFDCF508302348DE8E7A'
"""
helps['sf cluster client-certificate remove'] = """
type: command
short-summary: Remove client certificates or subject names used for authentication.
examples:
- name: Remove a client certificate by thumbprint.
text: >
az sf cluster client-certificate remove -g group-name -c cluster1 --thumbprint '5F3660C715EBBDA31DB1FFDCF508302348DE8E7A'
"""
helps['sf cluster create'] = """
type: command
short-summary: Create a new Azure Service Fabric cluster.
examples:
- name: Create a cluster with a given size and self-signed certificate that is downloaded locally.
text: >
az sf cluster create -g group-name -c cluster1 -l westus --cluster-size 4 --vm-password Password#1234 --certificate-output-folder MyCertificates --certificate-subject-name cluster1
- name: Use a keyvault certificate and custom template to deploy a cluster.
text: >
az sf cluster create -g group-name -c cluster1 -l westus --template-file template.json \\
--parameter-file parameter.json --secret-identifier https://{KeyVault}.vault.azure.net:443/secrets/{MyCertificate}
"""
helps['sf cluster durability'] = """
type: group
short-summary: Manage the durability of a cluster.
"""
helps['sf cluster durability update'] = """
type: command
short-summary: Update the durability tier or VM SKU of a node type in the cluster.
examples:
- name: Change the cluster durability level to 'Silver'.
text: >
az sf cluster durability update -g group-name -c cluster1 --durability-level Silver --node-type nt1
"""
helps['sf cluster list'] = """
type: command
short-summary: List cluster resources.
"""
helps['sf cluster node'] = """
type: group
short-summary: Manage the node instance of a cluster.
"""
helps['sf cluster node add'] = """
type: command
short-summary: Add nodes to a node type in a cluster.
examples:
- name: Add 2 'nt1' nodes to a cluster.
text: >
az sf cluster node add -g group-name -c cluster1 --number-of-nodes-to-add 2 --node-type 'nt1'
"""
helps['sf cluster node remove'] = """
type: command
short-summary: Remove nodes from a node type in a cluster.
examples:
- name: Remove 2 'nt1' nodes from a cluster.
text: >
az sf cluster node remove -g group-name -c cluster1 --node-type 'nt1' --number-of-nodes-to-remove 2
"""
helps['sf cluster node-type'] = """
type: group
short-summary: Manage the node-type of a cluster.
"""
helps['sf cluster node-type add'] = """
type: command
short-summary: Add a new node type to a cluster.
examples:
- name: Add a new node type to a cluster.
text: >
az sf cluster node-type add -g group-name -c cluster1 --node-type 'n2' --capacity 5 --vm-user-name 'adminName' --vm-password testPassword0
"""
helps['sf cluster reliability'] = """
type: group
short-summary: Manage the reliability of a cluster.
"""
helps['sf cluster reliability update'] = """
type: command
short-summary: Update the reliability tier for the primary node in a cluster.
examples:
- name: Change the cluster reliability level to 'Silver'.
text: >
az sf cluster reliability update -g group-name -c cluster1 --reliability-level Silver
"""
helps['sf cluster setting'] = """
type: group
short-summary: Manage a cluster's settings.
"""
helps['sf cluster setting remove'] = """
type: command
short-summary: Remove settings from a cluster.
examples:
- name: Remove the `MaxFileOperationTimeout` setting from a cluster.
text: >
az sf cluster setting remove -g group-name -c cluster1 --section 'NamingService' --parameter 'MaxFileOperationTimeout'
"""
helps['sf cluster setting set'] = """
type: command
short-summary: Update the settings of a cluster.
examples:
- name: Set the `MaxFileOperationTimeout` setting for a cluster to 5 seconds.
text: >
az sf cluster setting set -g group-name -c cluster1 --section 'NamingService' --parameter 'MaxFileOperationTimeout' --value 5000
"""
helps['sf cluster upgrade-type'] = """
type: group
short-summary: Manage the upgrade type of a cluster.
"""
helps['sf cluster upgrade-type set'] = """
type: command
short-summary: Change the upgrade type for a cluster.
examples:
- name: Set a cluster to use the 'Automatic' upgrade mode.
text: >
az sf cluster upgrade-type set -g group-name -c cluster1 --upgrade-mode Automatic
"""
helps['sf managed-cluster'] = """
type: group
short-summary: Manage an Azure Service Fabric managed cluster.
"""
helps['sf managed-cluster show'] = """
type: command
short-summary: Show the properties of an Azure Service Fabric managed cluster.
examples:
- name: Get cluster.
text: >
az sf managed-cluster show -g testRG -c testCluster
"""
helps['sf managed-cluster list'] = """
type: command
short-summary: List managed clusters.
examples:
- name: List clusters by resource group.
text: >
az sf managed-cluster list -g testRG
- name: List clusters by subscription.
text: >
az sf managed-cluster list
"""
helps['sf managed-cluster create'] = """
type: command
short-summary: Delete a managed cluster.
examples:
- name: Create cluster with standard sku and client cert by thumbprint.
text: >
az sf managed-cluster create -g testRG -c testCluster -l eastus2 --cert-thumbprint XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX --cert-is-admin --admin-password PassTest123@ --sku Standard
- name: Create cluster with standard sku and client cert by common name.
text: >
az sf managed-cluster create -g testRG -c testCluster -l eastus2 --cert-common-name Contoso.com --cert-issuer-thumbprint XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX --cert-is-admin --admin-password PassTest123@ --sku Standard
"""
helps['sf managed-cluster update'] = """
type: command
short-summary: Update a managed cluster.
examples:
- name: Update cluster client port and dns name.
text: >
az sf managed-cluster update -g testRG -c testCluster --client-port 50000 --dns-name testnewdns
"""
helps['sf managed-cluster delete'] = """
type: command
short-summary: Delete a managed cluster.
examples:
- name: Delete cluster.
text: >
az sf managed-cluster delete -g testRG -c testCluster
"""
helps['sf managed-cluster client-certificate'] = """
type: group
short-summary: Manage client certificates of a manged cluster.
"""
helps['sf managed-cluster client-certificate add'] = """
type: command
short-summary: Add a new client certificate to the managed cluster.
examples:
- name: Add admin client certificate by thumbprint.
text: >
az sf managed-cluster client-certificate add -g testRG -c testCluster --thumbprint XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX --is-admin
- name: Add non admin client certificate by common name.
text: >
az sf managed-cluster client-certificate add -g testRG -c testCluster --common-name Contoso.com --issuer-thumbprint XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
"""
helps['sf managed-cluster client-certificate delete'] = """
type: command
short-summary: Delete a client certificate from the managed cluster.
examples:
- name: Delete client certificate by thumbprint.
text: >
az sf managed-cluster client-certificate delete -g testRG -c testCluster --thumbprint XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- name: Delete client certificate by common name.
text: >
az sf managed-cluster client-certificate delete -g testRG -c testCluster --common-name Contoso.com
"""
helps['sf managed-node-type'] = """
type: group
short-summary: Manage a node type of an Azure Service Fabric managed cluster.
"""
helps['sf managed-node-type show'] = """
type: command
short-summary: Show the properties of a node type.
examples:
- name: Get node type.
text: >
az sf managed-node-type show -g testRG -c testCluster -n pnt
"""
helps['sf managed-node-type list'] = """
type: command
short-summary: List node types of a managed cluster.
examples:
- name: List node types by cluster.
text: >
az sf managed-node-type list -g testRG -c testCluster
"""
helps['sf managed-node-type create'] = """
type: command
short-summary: Delete a managed cluster.
examples:
- name: Create primary node type with 5 nodes.
text: >
az sf managed-node-type create -g testRG -c testCluster -n pnt --instance-count 5 --primary
- name: Create non primary node type with placement properities, capacities and ports.
text: >
az sf managed-node-type create -g testRG -c testCluster -n snt --instance-count 5 --placement-property NodeColor=Green SomeProperty=5 --capacity ClientConnections=65536 --app-start-port 20575 --app-end-port 20605 --ephemeral-start-port 20606 --ephemeral-end-port 20861
"""
helps['sf managed-node-type update'] = """
type: command
short-summary: Update a managed cluster.
examples:
- name: Update the instance count of the node type.
text: >
az sf managed-node-type update -g testRG -c testCluster -n snt --instance-count 7
- name: Update placement properties of the node type. This will overwrite older placement properties if any.
text: >
az sf managed-node-type update -g testRG -c testCluster -n snt --placement-property NodeColor=Red SomeProperty=6
"""
helps['sf managed-node-type delete'] = """
type: command
short-summary: Delete node type from a cluster.
examples:
- name: Delete cluster.
text: >
az sf managed-node-type delete -g testRG -c testCluster -n snt
"""
helps['sf managed-node-type node'] = """
type: group
short-summary: Perform operations on nodes of a node type on managed clusters.
"""
helps['sf managed-node-type node restart'] = """
type: command
short-summary: Restart nodes of a node type.
examples:
- name: Restart 2 nodes.
text: >
az sf managed-node-type node restart -g testRG -c testCluster -n snt --node-name snt_0 snt_1
"""
helps['sf managed-node-type node reimage'] = """
type: command
short-summary: Reimage nodes of a node type.
examples:
- name: Reimage 2 nodes.
text: >
az sf managed-node-type node reimage -g testRG -c testCluster -n snt --node-name snt_0 snt_1
"""
helps['sf managed-node-type node delete'] = """
type: command
short-summary: Delete nodes of a node type.
examples:
- name: Delete 2 nodes.
text: >
az sf managed-node-type node delete -g testRG -c testCluster -n snt --node-name snt_0 snt_1
"""
helps['sf managed-node-type vm-extension'] = """
type: group
short-summary: Managed vm extension on a node type on managed clusters.
"""
helps['sf managed-node-type vm-extension add'] = """
type: command
short-summary: Add an extension to the node type.
examples:
- name: Add bg extension.
text: >
az sf managed-node-type vm-extension add -g testRG -c testCluster -n snt --extension-name csetest --publisher Microsoft.Compute --extension-type BGInfo --type-handler-version 2.1 --auto-upgrade-minor-version
"""
helps['sf managed-node-type vm-extension delete'] = """
type: command
short-summary: Delete an extension to the node type.
examples:
- name: Delete extension by name.
text: >
az sf managed-node-type vm-extension delete -g testRG -c testCluster -n snt --extension-name csetest
"""
helps['sf managed-node-type vm-secret'] = """
type: group
short-summary: Managed vm secrets on a node type on managed clusters.
"""
helps['sf managed-node-type vm-secret add'] = """
type: command
short-summary: Add a secret to the node type.
examples:
- name: Add certificate to the node type as a secret.
text: >
az sf managed-node-type vm-secret add -g testRG -c testCluster -n snt --source-vault-id /subscriptions/XXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/testRG/providers/Microsoft.KeyVault/vaults/testkv --certificate-url https://testskv.vault.azure.net:443/secrets/TestCert/xxxxxxxxxxxxxxxxxxxxxxxx --certificate-store my
"""
helps['sf managed-application'] = """
type: group
short-summary: Manage applications running on an Azure Service Fabric managed cluster. Only support ARM deployed applications.
"""
helps['sf managed-application create'] = """
type: command
short-summary: Create a new managed application on an Azure Service Fabric managed cluster.
examples:
- name: Create managed application "testApp" with parameters. The application type "TestAppType" version "v1" should already exist in the cluster, and the application parameters should be defined in the application manifest.
text: >
az sf managed-application create -g testRG -c testCluster --application-name testApp --application-type-name TestAppType \\
--application-type-version v1 --application-parameters key0=value0 --tags key1=value1
- name: Create application "testApp" and app type version using the package url provided.
text: >
az sf managed-application create -g testRG -c testCluster --application-name testApp --application-type-name TestAppType \\
--application-type-version v1 --package-url "https://sftestapp.blob.core.windows.net/sftestapp/testApp_1.0.sfpkg" \\
--application-parameters key0=value0
"""
helps['sf managed-application update'] = """
type: command
short-summary: Update a Azure Service Fabric managed application.
long-summary: This allows for updating the tags, the application parameters, value is the application UpgradePolicy and/or upgrade the application type version which will trigger an application upgrade.
examples:
- name: Update application parameters and upgreade policy values and app type version to v2.
text: >
az sf managed-application update -g testRG -c testCluster --application-name testApp --application-type-version v2 \\
--application-parameters key0=value0 --health-check-stable-duration 0 --health-check-wait-duration 0 --health-check-retry-timeout 0 \\
--upgrade-domain-timeout 5000 --upgrade-timeout 7000 --failure-action Rollback --upgrade-replica-set-check-timeout 300 --force-restart
- name: Update managed application service type health policy map.
text: >
az sf managed-application update -g testRG -c testCluster --application-name testApp --service-type-health-policy-map \"ServiceTypeName01\"=\"5,10,5\" \"ServiceTypeName02\"=\"5,5,5\"
"""
helps['sf managed-application show'] = """
type: command
short-summary: Show the properties of a managed application on an Azure Service Fabric managed cluster.
examples:
- name: Get managed application.
text: >
az sf managed-application show -g testRG -c testCluster --application-name testApp
"""
helps['sf managed-application list'] = """
type: command
short-summary: List managed applications of a given managed cluster.
examples:
- name: List managed applications for a given managed cluster.
text: >
az sf managed-application list -g testRG -c testCluster
"""
helps['sf managed-application delete'] = """
type: command
short-summary: Delete a managed application.
examples:
- name: Delete managed application.
text: >
az sf managed-application delete -g testRG -c testCluster --application-name testApp
"""
helps['sf managed-application-type'] = """
type: group
short-summary: Manage applications types and its versions running on an Azure Service Fabric managed cluster. Only support ARM deployed application types.
"""
helps['sf managed-application-type'] = """
type: group
short-summary: Manage application types on an Azure Service Fabric cluster.
"""
helps['sf managed-application-type create'] = """
type: command
short-summary: Create a new managed application type on an Azure Service Fabric managed cluster.
examples:
- name: Create new managed application type.
text: >
az sf managed-application-type create -g testRG -c testCluster --application-type-name testAppType
"""
helps['sf managed-application-type show'] = """
type: command
short-summary: Show the properties of a managed application type on an Azure Service Fabric managed cluster.
examples:
- name: Get managed application type.
text: >
az sf managed-application-type show -g testRG -c testCluster --application-type-name CalcServiceApp
"""
helps['sf managed-application-type list'] = """
type: command
short-summary: List managed application types of a given managed cluster.
examples:
- name: List managed application types for a given managed cluster.
text: >
az sf managed-application-type list -g testRG -c testCluster
"""
helps['sf managed-application-type update'] = """
type: command
short-summary: Update an managed application type.
long-summary: This allows for updating of application type tags.
examples:
- name: Update application type tags.
text: >
az sf managed-application-type update -g testRG -c testCluster --application-type-name CalcServiceApp --tags new=tags are=nice
"""
helps['sf managed-application-type delete'] = """
type: command
short-summary: Delete a managed application type.
examples:
- name: Delete managed application type.
text: >
az sf managed-application-type delete -g testRG -c testCluster --application-type-name CalcServiceApp
"""
helps['sf managed-application-type version'] = """
type: group
short-summary: Manage application type versions on an Azure Service Fabric managed cluster. Only support ARM deployed application type versions.
"""
helps['sf managed-application-type version create'] = """
type: command
short-summary: Create a new managed application type on an Azure Service Fabric managed cluster.
examples:
- name: Create new managed application type version using the provided package url. The version in the application manifest contained in the package should have the same version as the one specified in --version.
text: >
az sf managed-application-type version create -g testRG -c testCluster --application-type-name testAppType \\
--version 1.0 --package-url "https://sftestapp.blob.core.windows.net/sftestapp/testApp_1.0.sfpkg"
"""
helps['sf managed-application-type version show'] = """
type: command
short-summary: Show the properties of a managed application type version on an Azure Service Fabric managed cluster.
examples:
- name: Show the properties of a managed application type version on an Azure Service Fabric managed cluster.
text: >
az sf managed-application-type version show -g testRG -c testCluster --application-type-name CalcServiceApp --version 1.0
"""
helps['sf managed-application-type version list'] = """
type: command
short-summary: List versions of a given managed application type.
examples:
- name: List versions for a particular managed application type.
text: >
az sf managed-application-type version list -g testRG -c testCluster --application-type-name CalcServiceApp
"""
helps['sf managed-application-type version update'] = """
type: command
short-summary: Update a managed application type version.
long-summary: This allows for updating of application type version tags and the package url.
examples: | text: >
az sf managed-application-type version update -g testRG -c testCluster --application-type-name CalcServiceApp --version 1.0 --tags new=tags
"""
helps['sf managed-application-type version delete'] = """
type: command
short-summary: Delete a managed application type version.
examples:
- name: Delete managed application type version.
text: >
az sf managed-application-type version delete -g testRG -c testCluster --application-type-name CalcServiceApp --version 1.0
"""
helps['sf managed-service'] = """
type: group
short-summary: Manage services running on an Azure Service Fabric managed cluster. Only support ARM deployed services.
"""
helps['sf managed-service create'] = """
type: command
short-summary: Create a new managed service on an Azure Service Fabric managed cluster.
examples:
- name: Create a new stateless managed service "testService1" with instance count -1 (on all the nodes).
text: >
az sf managed-service create -g testRG -c testCluster --application-name testApp --state stateless --service-name testService \\
--service-type testStateless --instance-count -1 --partition-scheme singleton
- name: Create a new stateful service "testService2" with a target of 5 nodes.
text: >
az sf managed-service create -g testRG -c testCluster --application-name testApp --state stateful --service-name testService2 --has-persisted-state \\
--service-type testStatefulType --min-replica-set-size 3 --target-replica-set-size 5 --partition-scheme uniformint64range --partition-count 1 --low-key 0 --high-key 25
"""
helps['sf managed-service show'] = """
type: command
short-summary: Get a service.
examples:
- name: Show the properties of a managed service on an Azure Service Fabric managed cluster.
text: >
az sf managed-service show -g testRG -c testCluster --application-name testApp --service-name testService
"""
helps['sf managed-service list'] = """
type: command
short-summary: List managed services of a given managed application.
examples:
- name: List managed services.
text: >
az sf managed-service list -g testRG -c testCluster --application-name testApp
"""
helps['sf managed-service update'] = """
type: command
short-summary: Update a managed service.
examples:
- name: Update managed stateless service.
text: >
az sf managed-service update -g testRG -c testCluster --application-name testApp --service-name testService --min-instance-count 2 \\
--min-instance-percentage 20 --instance-close-delay-duration '00:11:00'
- name: Update managed stateful service.
text: >
az sf managed-service update -g testRG -c testCluster --application-name testApp --service-name testService2 --service-placement-time-limit '00:11:00' \\
--stand-by-replica-keep-duration '00:11:00' --replica-restart-wait-duration '00:11:00' --quorum-loss-wait-duration '00:11:00'
"""
helps['sf managed-service delete'] = """
type: command
short-summary: Delete a managed service.
examples:
- name: Delete managed service.
text: >
az sf managed-service delete -g testRG -c testCluster --application-name testApp --service-name testService
"""
helps['sf managed-service correlation-scheme'] = """
type: group
short-summary: Manage correlation schemes of services running on an Azure Service Fabric managed cluster. Only support ARM deployed services.
"""
helps['sf managed-service correlation-scheme create'] = """
type: command
short-summary: Create a new managed service correlation scheme on an Azure Service Fabric managed cluster.
long-summary: Create a new managed service correlation scheme on an Azure Service Fabric managed cluster. NOTE You can only have one service correlation per service.
examples:
- name: Create a new managed service correlation scheme.
text: >
az sf managed-service correlation-scheme create -g testRG -c testCluster --application-name testApp --service-name testService \\
--correlated-service-name "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/testRg/providers/Microsoft.ServiceFabric/managedclusters/testCluster/applications/testApp/services/testService2" \\
--scheme AlignedAffinity
"""
helps['sf managed-service correlation-scheme update'] = """
type: command
short-summary: Update a managed service correlation scheme.
examples:
- name: Update managed service correlation scheme.
text: >
az sf managed-service correlation-scheme update -g testRG -c testCluster --application-name testApp --service-name testService \\
--correlated-service-name "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/testRg/providers/Microsoft.ServiceFabric/managedclusters/testCluster/applications/testApp/services/testService2" \\
--scheme NonAlignedAffinity
"""
helps['sf managed-service correlation-scheme delete'] = """
type: command
short-summary: Delete a managed service correlation scheme.
examples:
- name: Delete managed service correlation scheme.
text: >
az sf managed-service correlation-scheme delete -g testRG -c testCluster --application-name testApp --service-name testService \\
--correlated-service-name "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/testRg/providers/Microsoft.ServiceFabric/managedclusters/testCluster/applications/testApp/services/testService2"
"""
helps['sf managed-service load-metrics'] = """
type: group
short-summary: Manage service load metrics running on an Azure Service Fabric managed cluster. Only support ARM deployed services.
"""
helps['sf managed-service load-metrics create'] = """
type: command
short-summary: Create a new managed service load metric on an Azure Service Fabric managed cluster.
examples:
- name: Create a new stateless managed service load metric.
text: >
az sf managed-service load-metrics create -g testRG -c testCluster --application-name testApp --service-name testService \\
--metric-name Metric1 --weight Low --default-load 3
- name: Create a new stateful service load metric.
text: >
az sf managed-service load-metrics create -g testRG -c testCluster --application-name testApp --service-name testService2 \\
--metric-name Metric2 --weight High --primary-default-load 3 --secondary-default-load 2
"""
helps['sf managed-service load-metrics update'] = """
type: command
short-summary: Update a managed service.
examples:
- name: Update a new stateless managed service load metric.
text: >
az sf managed-service load-metrics update -g testRG -c testCluster --application-name testApp --service-name testService \\
--metric-name Metric1 --weight Medium --default-load 5
- name: Update a new stateful service load metric.
text: >
az sf managed-service load-metrics update -g testRG -c testCluster --application-name testApp --service-name testService2 \\
--metric-name Metric2 --weight Low --primary-default-load 2 --secondary-default-load 1
"""
helps['sf managed-service load-metrics delete'] = """
type: command
short-summary: Delete a managed service.
examples:
- name: Delete managed service.
text: >
az sf managed-service load-metrics delete -g testRG -c testCluster --application-name testApp --service-name testService2 \\
--metric-name Metric1
""" | - name: Update managed application type version. |
clientset_generated.go | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
clientset "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset"
zfsv1 "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/typed/zfs/v1"
fakezfsv1 "github.com/openebs/zfs-localpv/pkg/generated/clientset/internalclientset/typed/zfs/v1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewSimpleClientset(objects ...runtime.Object) *Clientset |
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
tracker testing.ObjectTracker
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker
}
var _ clientset.Interface = &Clientset{}
// ZfsV1 retrieves the ZfsV1Client
func (c *Clientset) ZfsV1() zfsv1.ZfsV1Interface {
return &fakezfsv1.FakeZfsV1{Fake: &c.Fake}
}
| {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{tracker: o}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
} |
zz_generated_scheme.go | package v3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
GroupName = "management.cattle.io"
Version = "v3"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func | (resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this gets cleaned up when the types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&NodePool{},
&NodePoolList{},
&Node{},
&NodeList{},
&NodeDriver{},
&NodeDriverList{},
&NodeTemplate{},
&NodeTemplateList{},
&Project{},
&ProjectList{},
&GlobalRole{},
&GlobalRoleList{},
&GlobalRoleBinding{},
&GlobalRoleBindingList{},
&RoleTemplate{},
&RoleTemplateList{},
&PodSecurityPolicyTemplate{},
&PodSecurityPolicyTemplateList{},
&PodSecurityPolicyTemplateProjectBinding{},
&PodSecurityPolicyTemplateProjectBindingList{},
&ClusterRoleTemplateBinding{},
&ClusterRoleTemplateBindingList{},
&ProjectRoleTemplateBinding{},
&ProjectRoleTemplateBindingList{},
&Cluster{},
&ClusterList{},
&ClusterRegistrationToken{},
&ClusterRegistrationTokenList{},
&Catalog{},
&CatalogList{},
&Template{},
&TemplateList{},
&CatalogTemplate{},
&CatalogTemplateList{},
&CatalogTemplateVersion{},
&CatalogTemplateVersionList{},
&TemplateVersion{},
&TemplateVersionList{},
&TemplateContent{},
&TemplateContentList{},
&Group{},
&GroupList{},
&GroupMember{},
&GroupMemberList{},
&Principal{},
&PrincipalList{},
&User{},
&UserList{},
&AuthConfig{},
&AuthConfigList{},
&LdapConfig{},
&LdapConfigList{},
&Token{},
&TokenList{},
&DynamicSchema{},
&DynamicSchemaList{},
&Preference{},
&PreferenceList{},
&UserAttribute{},
&ProjectNetworkPolicy{},
&ProjectNetworkPolicyList{},
&ClusterLogging{},
&ClusterLoggingList{},
&ProjectLogging{},
&ProjectLoggingList{},
&ListenConfig{},
&ListenConfigList{},
&Setting{},
&SettingList{},
&ClusterAlert{},
&ClusterAlertList{},
&ProjectAlert{},
&ProjectAlertList{},
&Notifier{},
&NotifierList{},
&ClusterAlertGroup{},
&ClusterAlertGroupList{},
&ProjectAlertGroup{},
&ProjectAlertGroupList{},
&ClusterAlertRule{},
&ClusterAlertRuleList{},
&ProjectAlertRule{},
&ProjectAlertRuleList{},
&ComposeConfig{},
&ComposeConfigList{},
&ProjectCatalog{},
&ProjectCatalogList{},
&ClusterCatalog{},
&ClusterCatalogList{},
&MultiClusterApp{},
&MultiClusterAppList{},
&MultiClusterAppRevision{},
&MultiClusterAppRevisionList{},
&GlobalDNS{},
&GlobalDNSList{},
&GlobalDNSProvider{},
&GlobalDNSProviderList{},
&KontainerDriver{},
&KontainerDriverList{},
&EtcdBackup{},
&EtcdBackupList{},
&ClusterScan{},
&ClusterScanList{},
&MonitorMetric{},
&MonitorMetricList{},
&ClusterMonitorGraph{},
&ClusterMonitorGraphList{},
&ProjectMonitorGraph{},
&ProjectMonitorGraphList{},
&CloudCredential{},
&CloudCredentialList{},
&ClusterTemplate{},
&ClusterTemplateList{},
&ClusterTemplateRevision{},
&ClusterTemplateRevisionList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
| Resource |
demo.rs | //! Demo ページ
use yew::prelude::*;
#[derive(Properties, Clone, PartialEq)]
pub struct Props | pub demo_id: usize,
}
pub struct Demo {
props: Props,
}
impl Component for Demo {
type Message = ();
type Properties = Props;
fn create(props: Self::Properties, _: ComponentLink<Self>) -> Self {
Self { props }
}
fn update(&mut self, _: Self::Message) -> ShouldRender {
false
}
fn change(&mut self, _: Self::Properties) -> ShouldRender {
false
}
fn view(&self) -> Html {
match self.props.demo_id {
1 => { // U-Netで音楽パート分離
html! {
<>
<h2>{"U-Netで音楽パート分離"}</h2>
<a href="https://github.com/mori97/U-Net_MUSDB18">{"[GitHub]"}</a><br/>
{"ディープラーニングで楽曲を4つのパート"}
<ul>
<li>{"ボーカル(vocal)"}</li>
<li>{"ドラム(drums)"}</li>
<li>{"ベース(bass)"}</li>
<li>{"その他(others)"}</li>
</ul>
{"に分離します。"}
<h3>{"Demo"}</h3>
<h4>{"入力"}</h4>
<div>
<audio controls=true src="audio/unetdemo_mixture.wav">
{"Your browser does not support the <code>audio</code> element."}
</audio>
<figure class="spectrograms">
<img src="images/unetdemo_spec_mixture.png" alt="楽曲のスペクトログラム" />
<figcaption>{"楽曲のスペクトログラム"}</figcaption>
</figure>
</div>
<h4>{"出力"}</h4>
<h5>{"ボーカル(vocal)パート"}</h5>
<div>
<audio controls=true src="audio/unetdemo_vocal.wav">
{"Your browser does not support the <code>audio</code> element."}
</audio>
<figure class="spectrograms">
<img src="images/unetdemo_spec_vocal.png" alt="ボーカル(vocal)のスペクトログラム" />
<figcaption>{"ボーカル(vocal)のスペクトログラム"}</figcaption>
</figure>
</div>
<h5>{"ドラム(drums)パート"}</h5>
<div>
<audio controls=true src="audio/unetdemo_drum.wav">
{"Your browser does not support the <code>audio</code> element."}
</audio>
<figure class="spectrograms">
<img src="images/unetdemo_spec_drums.png" alt="ドラム(drums)のスペクトログラム" />
<figcaption>{"ドラム(drums)のスペクトログラム"}</figcaption>
</figure>
</div>
<h5>{"ベース(bass)パート"}</h5>
<div>
<audio controls=true src="audio/unetdemo_bass.wav">
{"Your browser does not support the <code>audio</code> element."}
</audio>
<figure class="spectrograms">
<img src="images/unetdemo_spec_bass.png" alt="ベース(Bass)のスペクトログラム" />
<figcaption>{"ベース(Bass)のスペクトログラム"}</figcaption>
</figure>
</div>
<h5>{"その他(others)パート"}</h5>
<div>
<audio controls=true src="audio/unetdemo_others.wav">
{"Your browser does not support the <code>audio</code> element."}
</audio>
<figure class="spectrograms">
<img src="images/unetdemo_spec_others.png" alt="その他(others)のスペクトログラム" />
<figcaption>{"その他(others)のスペクトログラム"}</figcaption>
</figure>
</div>
</>
}
},
_ => {
html! {}
},
}
}
}
| {
|
test_views.py | import pytest
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from wagtail.core.models import Page
from wagtail_personalisation.models import Segment
from wagtail_personalisation.rules import VisitCountRule
from wagtail_personalisation.views import (
SegmentModelAdmin, SegmentModelDeleteView)
@pytest.mark.django_db
def test_segment_user_data_view_requires_admin_access(site, client, django_user_model):
|
@pytest.mark.django_db
def test_segment_user_data_view(site, client, mocker, django_user_model):
user1 = django_user_model.objects.create(username='first')
user2 = django_user_model.objects.create(username='second')
admin_user = django_user_model.objects.create(
username='admin', is_superuser=True)
segment = Segment(type=Segment.TYPE_STATIC, count=1)
segment.save()
segment.static_users.add(user1)
segment.static_users.add(user2)
rule1 = VisitCountRule(counted_page=site.root_page, segment=segment)
rule2 = VisitCountRule(counted_page=site.root_page.get_last_child(),
segment=segment)
rule1.save()
rule2.save()
mocker.patch('wagtail_personalisation.rules.VisitCountRule.get_user_info_string',
side_effect=[3, 9, 0, 1])
client.force_login(admin_user)
response = client.get(
reverse('segment:segment_user_data', args=(segment.id,)))
assert response.status_code == 200
data_lines = response.content.decode().split("\n")
assert data_lines[0] == 'Username,Visit count - Test page,Visit count - Regular page\r'
assert data_lines[1] == 'first,3,9\r'
assert data_lines[2] == 'second,0,1\r'
@pytest.mark.django_db
def test_segment_delete_view_delete_instance(rf, segmented_page, user):
user.is_superuser = True
user.save()
segment = segmented_page.personalisation_metadata.segment
canonical_page = segmented_page.personalisation_metadata.canonical_page
variants_metadata = segment.get_used_pages()
page_variants = Page.objects.filter(pk__in=(
variants_metadata.values_list('variant_id', flat=True)
))
# Make sure all canonical page, variants and variants metadata exist
assert canonical_page
assert page_variants
assert variants_metadata
# Delete the segment via the method on the view.
request = rf.get('/'.format(segment.pk))
request.user = user
view = SegmentModelDeleteView(
instance_pk=str(segment.pk),
model_admin=SegmentModelAdmin()
)
view.request = request
view.delete_instance()
# Segment has been deleted.
with pytest.raises(segment.DoesNotExist):
segment.refresh_from_db()
# Canonical page stayed intact.
canonical_page.refresh_from_db()
# Variant pages and their metadata have been deleted.
assert not page_variants.all()
assert not variants_metadata.all()
@pytest.mark.django_db
def test_segment_delete_view_raises_permission_denied(rf, segmented_page, user):
segment = segmented_page.personalisation_metadata.segment
request = rf.get('/'.format(segment.pk))
request.user = user
view = SegmentModelDeleteView(
instance_pk=str(segment.pk),
model_admin=SegmentModelAdmin()
)
view.request = request
message = 'User have no permission to delete variant page objects.'
with pytest.raises(PermissionDenied):
view.delete_instance()
| user = django_user_model.objects.create(username='first')
segment = Segment(type=Segment.TYPE_STATIC, count=1)
segment.save()
client.force_login(user)
url = reverse('segment:segment_user_data', args=(segment.id,))
response = client.get(url)
assert response.status_code == 302
assert response.url == '/admin/login/?next=%s' % url |
create-markdown.js | const _ = require('lodash')
const fs = require('fs')
const path = require('path')
const {ChartJSNodeCanvas} = require('chartjs-node-canvas')
const exec = require('child_process').execFileSync
const LIB_FOLDER = path.join(__dirname, '../lib')
const UPDATES_FOLDER = path.join(LIB_FOLDER, 'markdown/updates')
const PARTIALS_FOLDER = path.join(LIB_FOLDER, 'markdown/')
const MD_TEMPLATE = fs.readFileSync(path.join(LIB_FOLDER, 'markdown/template.md'), 'utf8')
const createMarkdownString = _.template(MD_TEMPLATE)
const ALL_DATA = require('../.tmp/combined-data.json')
const ALL_CATEGORIES = require('../data/categories.json')
const DATA_BY_CATEGORY = _.groupBy(ALL_DATA, entity => entity.categories[0])
_.forEach(ALL_CATEGORIES, (value, id) =>
_.assign(value, {id, totalExecutionTime: _.sumBy(DATA_BY_CATEGORY[id], 'totalExecutionTime')})
)
function createUpdatesContent() {
let updates = []
for (const file of fs.readdirSync(UPDATES_FOLDER)) {
const dateRegex = /^(\d{4}-\d{2}-\d{2})/
if (!dateRegex.test(file)) continue
const datePart = file.match(dateRegex)[1]
updates.push(
`## ${datePart} dataset\n\n` + fs.readFileSync(path.join(UPDATES_FOLDER, file), 'utf8')
)
}
return updates.join('\n\n')
}
function createPartialsContent() {
const partials = {}
for (const file of fs.readdirSync(PARTIALS_FOLDER)) {
const partialsRegex = /^(.*)\.partial\.md$/
if (!file.includes('.partial.')) continue
if (!partialsRegex.test(file)) continue
const partialName = file.match(partialsRegex)[1]
partials[partialName] = fs
.readFileSync(path.join(PARTIALS_FOLDER, file), 'utf8')
.replace(/---(.|\s)*?---/m, '')
}
return partials
}
function createMarkdownTable(headers, rows) {
return [
`| ${headers.join(' | ')} |`,
`| ${headers.map(() => '--').join(' | ')} |`,
...rows.map(cells => `| ${cells.join(' | ')} |`),
].join('\n')
}
async function createChartImages() {
const categories = _(ALL_CATEGORIES)
.values()
.sortBy('totalExecutionTime')
.reverse()
.value()
const chartByCategory = new ChartJSNodeCanvas({width: 600, height: 300})
const buffer = await chartByCategory.renderToBuffer({
options: {
legend: {
position: 'left',
labels: {
fontStyle: 'bold'
}
},
},
type: 'doughnut',
data: {
labels: _.map(categories, 'title'),
datasets: [
{
label: 'Breakdown By Category',
backgroundColor: _.map(categories, 'color'),
borderWidth: 1.5,
data: _.map(categories, 'totalExecutionTime'),
},
],
},
}, 'image/png')
fs.writeFileSync(path.join(__dirname, '../by-category.png'), buffer)
}
function createCategorySection(category) {
const categoryRows = _.sortBy(DATA_BY_CATEGORY[category.id], 'averageExecutionTime')
.filter(entry => entry.totalOccurrences >= 1000)
.map((entry, rank) => [
rank + 1,
entry.homepage ? `[${entry.name}](${entry.homepage})` : entry.name,
entry.totalOccurrences.toLocaleString(),
Math.round(entry.averageExecutionTime) + ' ms',
])
const table = createMarkdownTable(['Rank', 'Name', 'Usage', 'Average Impact'], categoryRows)
return [
`<a name="${category.id}"></a>`,
`#### ${category.title}\n`,
`${category.description}\n`,
table,
].join('\n')
}
async function | () {
const categoryTOC = _.map(
ALL_CATEGORIES,
category => `1. [${category.title}](#${category.id})`
).join('\n ')
const categoryContents = _.map(ALL_CATEGORIES, createCategorySection).join('\n\n')
const topDataRows = _.sortBy(ALL_DATA, 'totalExecutionTime')
.reverse()
.slice(0, 200)
.map(entry => [
entry.homepage ? `[${entry.name}](${entry.homepage})` : entry.name,
entry.totalOccurrences.toLocaleString(),
Math.round(entry.totalExecutionTime / 1000).toLocaleString() + ' s',
Math.round(entry.averageExecutionTime) + ' ms',
])
await createChartImages()
const readmePath = path.join(__dirname, '../README.md')
fs.writeFileSync(
readmePath,
createMarkdownString({
partials: createPartialsContent(),
updates_contents: createUpdatesContent(),
category_table_of_contents: categoryTOC,
category_contents: categoryContents,
all_data: createMarkdownTable(
['Name', 'Popularity', 'Total Impact', 'Average Impact'],
topDataRows
),
})
)
exec(path.join(__dirname, '../node_modules/.bin/prettier'), ['--write', readmePath])
}
run().catch(console.error)
| run |
benchmark.rs | extern crate bytes;
extern crate criterion;
extern crate prost;
extern crate protobuf;
#[macro_use]
extern crate failure;
use std::fs::File;
use std::io::Read;
use std::result;
use criterion::{
Benchmark,
Criterion,
Throughput,
};
use prost::Message;
use protobuf::benchmarks::{
BenchmarkDataset,
proto2,
proto3,
};
type Result = result::Result<(), failure::Error>;
fn benchmark_dataset<M>(criterion: &mut Criterion, dataset: BenchmarkDataset) -> Result
where M: prost::Message + Default + 'static {
let payload_len = dataset.payload.iter().map(Vec::len).sum::<usize>();
let messages = dataset.payload.iter().map(|buf| M::decode(buf)).collect::<result::Result<Vec<_>, _>>()?;
let encoded_len = messages.iter().map(|message| message.encoded_len()).sum::<usize>();
let mut buf = Vec::with_capacity(encoded_len);
let encode = Benchmark::new("encode", move |b| {
b.iter(|| {
buf.clear();
for message in &messages {
message.encode(&mut buf).unwrap();
}
criterion::black_box(&buf);
})
}).throughput(Throughput::Bytes(encoded_len as u32));
let payload = dataset.payload.clone();
let decode = Benchmark::new("decode", move |b| {
b.iter(|| {
for buf in &payload { | })
}).throughput(Throughput::Bytes(payload_len as u32));
let payload = dataset.payload.clone();
let merge = Benchmark::new("merge", move |b| {
let mut message = M::default();
b.iter(|| {
for buf in &payload {
message.clear();
message.merge(buf).unwrap();
criterion::black_box(&message);
}
})
}).throughput(Throughput::Bytes(payload_len as u32));
criterion
.bench(&dataset.name, encode)
.bench(&dataset.name, decode)
.bench(&dataset.name, merge);
Ok(())
}
fn main() -> Result {
let mut criterion = Criterion::default().configure_from_args();
for dataset in protobuf::benchmarks::datasets() {
let dataset = {
let mut f = File::open(dataset)?;
let mut buf = Vec::new();
f.read_to_end(&mut buf)?;
protobuf::benchmarks::BenchmarkDataset::decode(buf)?
};
match dataset.message_name.as_str() {
"benchmarks.proto2.GoogleMessage1" => benchmark_dataset::<proto2::GoogleMessage1>(&mut criterion, dataset)?,
"benchmarks.proto3.GoogleMessage1" => benchmark_dataset::<proto3::GoogleMessage1>(&mut criterion, dataset)?,
/*
TODO: groups are not yet supported
"benchmarks.proto2.GoogleMessage2" => benchmark_dataset::<proto2::GoogleMessage2>(&mut criterion, dataset)?,
"benchmarks.google_message3.GoogleMessage3" => benchmark_dataset::<GoogleMessage3>(&mut criterion, dataset)?,
"benchmarks.google_message4.GoogleMessage4" => benchmark_dataset::<GoogleMessage4>(&mut criterion, dataset)?,
*/
"benchmarks.proto2.GoogleMessage2" => (),
"benchmarks.google_message3.GoogleMessage3" => (),
"benchmarks.google_message4.GoogleMessage4" => (),
_ => bail!("unknown dataset message type: {}", dataset.message_name),
}
}
criterion.final_summary();
Ok(())
} | criterion::black_box(M::decode(buf).unwrap());
} |
echotrace.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.
// Package echo provides functions to trace the labstack/echo package (https://github.com/labstack/echo).
package echo
import (
"math"
"net"
"strconv"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo/instrumentation/httpsec"
"github.com/labstack/echo/v4"
)
// Middleware returns echo middleware which will trace incoming requests.
func | (opts ...Option) echo.MiddlewareFunc {
cfg := new(config)
defaults(cfg)
for _, fn := range opts {
fn(cfg)
}
return func(next echo.HandlerFunc) echo.HandlerFunc {
if appsec.Enabled() {
next = withAppSec(next)
}
return func(c echo.Context) error {
request := c.Request()
resource := request.Method + " " + c.Path()
opts := []ddtrace.StartSpanOption{
tracer.ServiceName(cfg.serviceName),
tracer.ResourceName(resource),
tracer.SpanType(ext.SpanTypeWeb),
tracer.Tag(ext.HTTPMethod, request.Method),
tracer.Tag(ext.HTTPURL, request.URL.Path),
tracer.Measured(),
}
if !math.IsNaN(cfg.analyticsRate) {
opts = append(opts, tracer.Tag(ext.EventSampleRate, cfg.analyticsRate))
}
if spanctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(request.Header)); err == nil {
opts = append(opts, tracer.ChildOf(spanctx))
}
span, ctx := tracer.StartSpanFromContext(request.Context(), "http.request", opts...)
defer span.Finish()
// pass the span through the request context
c.SetRequest(request.WithContext(ctx))
// serve the request to the next middleware
err := next(c)
if err != nil {
span.SetTag(ext.Error, err)
// invokes the registered HTTP error handler
c.Error(err)
}
span.SetTag(ext.HTTPCode, strconv.Itoa(c.Response().Status))
return err
}
}
}
func withAppSec(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
req := c.Request()
span, ok := tracer.SpanFromContext(req.Context())
if !ok {
return next(c)
}
httpsec.SetAppSecTags(span)
args := httpsec.MakeHandlerOperationArgs(req)
op := httpsec.StartOperation(args, nil)
defer func() {
events := op.Finish(httpsec.HandlerOperationRes{Status: c.Response().Status})
if len(events) > 0 {
remoteIP, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
remoteIP = req.RemoteAddr
}
httpsec.SetSecurityEventTags(span, events, remoteIP, args.Headers, c.Response().Writer.Header())
}
}()
return next(c)
}
}
| Middleware |
api_op_CreateSqlInjectionMatchSet.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package wafregional
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/wafregional/types"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// This is AWS WAF Classic documentation. For more information, see AWS WAF Classic
// (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
// in the developer guide. For the latest version of AWS WAF, use the AWS WAFV2 API
// and see the AWS WAF Developer Guide
// (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). With
// the latest version, AWS WAF has a single set of endpoints for regional and
// global use. Creates a SqlInjectionMatchSet, which you use to allow, block, or
// count requests that contain snippets of SQL code in a specified part of web
// requests. AWS WAF searches for character sequences that are likely to be
// malicious strings. To create and configure a SqlInjectionMatchSet, perform the
// following steps:
//
// * Use GetChangeToken to get the change token that you provide
// in the ChangeToken parameter of a CreateSqlInjectionMatchSet request.
//
// * Submit
// a CreateSqlInjectionMatchSet request.
//
// * Use GetChangeToken to get the change
// token that you provide in the ChangeToken parameter of an
// UpdateSqlInjectionMatchSet request.
//
// * Submit an UpdateSqlInjectionMatchSet
// request to specify the parts of web requests in which you want to allow, block,
// or count malicious SQL code.
//
// For more information about how to use the AWS WAF
// API to allow or block HTTP requests, see the AWS WAF Developer Guide
// (https://docs.aws.amazon.com/waf/latest/developerguide/).
func (c *Client) CreateSqlInjectionMatchSet(ctx context.Context, params *CreateSqlInjectionMatchSetInput, optFns ...func(*Options)) (*CreateSqlInjectionMatchSetOutput, error) {
if params == nil {
params = &CreateSqlInjectionMatchSetInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateSqlInjectionMatchSet", params, optFns, addOperationCreateSqlInjectionMatchSetMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateSqlInjectionMatchSetOutput)
out.ResultMetadata = metadata
return out, nil
}
// A request to create a SqlInjectionMatchSet.
type CreateSqlInjectionMatchSetInput struct {
// The value returned by the most recent call to GetChangeToken.
//
// This member is required.
ChangeToken *string
// A friendly name or description for the SqlInjectionMatchSet that you're
// creating. You can't change Name after you create the SqlInjectionMatchSet.
//
// This member is required.
Name *string
}
// The response to a CreateSqlInjectionMatchSet request.
type CreateSqlInjectionMatchSetOutput struct {
// The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request.
// You can also use this value to query the status of the request. For more
// information, see GetChangeTokenStatus.
ChangeToken *string
// A SqlInjectionMatchSet.
SqlInjectionMatchSet *types.SqlInjectionMatchSet
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationCreateSqlInjectionMatchSetMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateSqlInjectionMatchSet{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateSqlInjectionMatchSet{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil |
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpCreateSqlInjectionMatchSetValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSqlInjectionMatchSet(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateSqlInjectionMatchSet(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "waf-regional",
OperationName: "CreateSqlInjectionMatchSet",
}
}
| {
return err
} |
main.go | package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"time"
"github.com/Shimbo/messenger"
)
var (
verifyToken = flag.String("verify-token", "mad-skrilla", "The token used to verify facebook (required)")
verify = flag.Bool("should-verify", false, "Whether or not the app should verify itself")
pageToken = flag.String("page-token", "not skrilla", "The token that is used to verify the page on facebook")
appSecret = flag.String("app-secret", "", "The app secret from the facebook developer portal (required)")
host = flag.String("host", "localhost", "The host used to serve the messenger bot")
port = flag.Int("port", 8080, "The port used to serve the messenger bot")
)
func main() {
flag.Parse()
if *verifyToken == "" || *appSecret == "" || *pageToken == "" {
fmt.Println("missing arguments")
fmt.Println()
flag.Usage()
os.Exit(-1)
}
// Create a new messenger client
client := messenger.New(messenger.Options{
Verify: *verify,
AppSecret: *appSecret,
VerifyToken: *verifyToken,
Token: *pageToken,
})
// Setup a handler to be triggered when a message is received
client.HandleMessage(func(m messenger.Message, r *messenger.Response) {
fmt.Printf("%v (Sent, %v)\n", m.Text, m.Time.Format(time.UnixDate))
p, err := client.ProfileByID(m.Sender.ID, []string{"name", "first_name", "last_name", "profile_pic"})
if err != nil {
fmt.Println("Something went wrong!", err)
}
r.Text(fmt.Sprintf("Hello, %v!", p.FirstName), messenger.ResponseType)
})
// Setup a handler to be triggered when a message is delivered
client.HandleDelivery(func(d messenger.Delivery, r *messenger.Response) {
fmt.Println("Delivered at:", d.Watermark().Format(time.UnixDate)) |
// Setup a handler to be triggered when a message is read
client.HandleRead(func(m messenger.Read, r *messenger.Response) {
fmt.Println("Read at:", m.Watermark().Format(time.UnixDate))
})
addr := fmt.Sprintf("%s:%d", *host, *port)
log.Println("Serving messenger bot on", addr)
log.Fatal(http.ListenAndServe(addr, client.Handler()))
} | }) |
5181f24c23aa_integrate_accuracy_report_with_auto_annotation.py | """Integrate accuracy report with auto annotation
Revision ID: 5181f24c23aa
Revises: 810f7e51911d
Create Date: 2021-07-22 19:38:41.741133
"""
"""
OpenVINO DL Workbench
Migration: Integrate accuracy report with auto annotation
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import closing
from typing import List
import sqlalchemy as sa
from alembic import op
from sqlalchemy import orm
from sqlalchemy.ext.declarative import declarative_base
from migrations.utils import SQLEnumMigrator
# revision identifiers, used by Alembic.
revision = '5181f24c23aa'
down_revision = '810f7e51911d'
branch_labels = None
depends_on = None
old_pipeline_types = (
'accuracy',
'remote_profiling',
'local_profiling',
'dev_cloud_profiling',
'local_int8_calibration',
'remote_int8_calibration',
'dev_cloud_int8_calibration',
'create_profiling_bundle',
'download_log',
'download_model',
'deployment_manager',
'export_project',
'setup',
'ping',
'inference_test_image',
'generate_dataset',
'upload_dataset',
'upload_model',
'download_omz_model',
'export_project_report',
'export_inference_report',
'local_winograd_tuning',
'annotate_dataset',
'per_tensor_report',
)
new_pipeline_types = (
*old_pipeline_types,
'predictions_relative_accuracy_report'
)
pipeline_type_enum_migrator = SQLEnumMigrator(
table_column_pairs=(('pipelines', 'type'),),
enum_name='pipelinetypeenum',
from_types=old_pipeline_types,
to_types=new_pipeline_types
)
Base = declarative_base()
class _JobsModel(Base):
__tablename__ = 'jobs'
job_type = sa.Column(sa.String(50))
__mapper_args__ = {
'polymorphic_identity': 'job',
'polymorphic_on': job_type
}
job_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
project_id = sa.Column(sa.Integer, nullable=True)
class _AccuracyJobModel(_JobsModel):
__tablename__ = 'accuracy_jobs'
__mapper_args__ = {
'polymorphic_identity': 'AccuracyJob',
}
job_id = sa.Column(sa.Integer, sa.ForeignKey('jobs.job_id'), primary_key=True)
accuracy = sa.Column(sa.Float, nullable=True)
accuracy_config = sa.Column(sa.Text, nullable=True)
target_dataset_id = sa.Column(sa.Integer, sa.ForeignKey('datasets.id'), nullable=False)
class _ProjectModel(Base):
__tablename__ = 'projects'
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
dataset_id = sa.Column(sa.Integer, sa.ForeignKey('datasets.id'), nullable=False)
class _DatasetModel(Base):
__tablename__ = 'datasets'
id = sa.Column(sa.Integer, primary_key=True)
def | ():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('accuracy_jobs', sa.Column('target_dataset_id', sa.Integer(), nullable=True))
op.create_foreign_key('accuracy_jobs_target_dataset_id_fkey', 'accuracy_jobs', 'datasets', ['target_dataset_id'],
['id'])
# ### end Alembic commands ###
pipeline_type_enum_migrator.upgrade()
# Data migration - filling non-nullable field `target_dataset_id`
bind = op.get_bind()
session = orm.Session(bind=bind)
with closing(session) as session:
accuracy_job_models: List[_AccuracyJobModel] = session.query(_AccuracyJobModel).all()
for accuracy_job_model in accuracy_job_models:
if accuracy_job_model.project_id:
project_model: _ProjectModel = session.query(_ProjectModel).get(accuracy_job_model.project_id)
accuracy_job_model.target_dataset_id = project_model.dataset_id
session.add(accuracy_job_model)
session.flush()
op.alter_column(table_name='accuracy_jobs', column_name='target_dataset_id', nullable=False)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('accuracy_jobs_target_dataset_id_fkey', 'accuracy_jobs', type_='foreignkey')
op.drop_column('accuracy_jobs', 'target_dataset_id')
# ### end Alembic commands ###
pipeline_type_enum_migrator.downgrade()
| upgrade |
bitrpc.py | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:6452")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:6452")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a TraderCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
| elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a TraderCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | |
dom_keyboard_event.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/vhdirk/gir-files.git)
// from webkit2gtk-gir-files
// DO NOT EDIT
use crate::DOMDOMWindow;
use crate::DOMEvent;
use crate::DOMObject;
use crate::DOMUIEvent;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
#[doc(alias = "WebKitDOMKeyboardEvent")]
pub struct DOMKeyboardEvent(Object<ffi::WebKitDOMKeyboardEvent, ffi::WebKitDOMKeyboardEventClass>) @extends DOMUIEvent, DOMEvent, DOMObject;
match fn {
type_ => || ffi::webkit_dom_keyboard_event_get_type(),
}
}
pub const NONE_DOM_KEYBOARD_EVENT: Option<&DOMKeyboardEvent> = None;
pub trait DOMKeyboardEventExt: 'static {
#[cfg_attr(feature = "v2_22", deprecated = "Since 2.22")]
#[doc(alias = "webkit_dom_keyboard_event_get_alt_graph_key")]
#[doc(alias = "get_alt_graph_key")]
fn is_alt_graph_key(&self) -> bool;
#[cfg_attr(feature = "v2_22", deprecated = "Since 2.22")]
#[doc(alias = "webkit_dom_keyboard_event_get_alt_key")]
#[doc(alias = "get_alt_key")]
fn is_alt_key(&self) -> bool; | #[doc(alias = "get_ctrl_key")]
fn is_ctrl_key(&self) -> bool;
#[cfg_attr(feature = "v2_22", deprecated = "Since 2.22")]
#[doc(alias = "webkit_dom_keyboard_event_get_key_identifier")]
#[doc(alias = "get_key_identifier")]
fn key_identifier(&self) -> Option<glib::GString>;
#[cfg_attr(feature = "v2_22", deprecated = "Since 2.22")]
#[doc(alias = "webkit_dom_keyboard_event_get_key_location")]
#[doc(alias = "get_key_location")]
fn key_location(&self) -> libc::c_ulong;
#[cfg_attr(feature = "v2_22", deprecated = "Since 2.22")]
#[doc(alias = "webkit_dom_keyboard_event_get_meta_key")]
#[doc(alias = "get_meta_key")]
fn is_meta_key(&self) -> bool;
#[cfg_attr(feature = "v2_22", deprecated = "Since 2.22")]
#[doc(alias = "webkit_dom_keyboard_event_get_modifier_state")]
#[doc(alias = "get_modifier_state")]
fn is_modifier_state(&self, keyIdentifierArg: &str) -> bool;
#[cfg_attr(feature = "v2_22", deprecated = "Since 2.22")]
#[doc(alias = "webkit_dom_keyboard_event_get_shift_key")]
#[doc(alias = "get_shift_key")]
fn is_shift_key(&self) -> bool;
#[cfg_attr(feature = "v2_22", deprecated = "Since 2.22")]
#[doc(alias = "webkit_dom_keyboard_event_init_keyboard_event")]
fn init_keyboard_event(
&self,
type_: &str,
canBubble: bool,
cancelable: bool,
view: &impl IsA<DOMDOMWindow>,
keyIdentifier: &str,
location: libc::c_ulong,
ctrlKey: bool,
altKey: bool,
shiftKey: bool,
metaKey: bool,
altGraphKey: bool,
);
#[doc(alias = "alt-graph-key")]
fn connect_alt_graph_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "alt-key")]
fn connect_alt_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "ctrl-key")]
fn connect_ctrl_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "key-identifier")]
fn connect_key_identifier_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "key-location")]
fn connect_key_location_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "meta-key")]
fn connect_meta_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "shift-key")]
fn connect_shift_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<DOMKeyboardEvent>> DOMKeyboardEventExt for O {
fn is_alt_graph_key(&self) -> bool {
unsafe {
from_glib(ffi::webkit_dom_keyboard_event_get_alt_graph_key(
self.as_ref().to_glib_none().0,
))
}
}
fn is_alt_key(&self) -> bool {
unsafe {
from_glib(ffi::webkit_dom_keyboard_event_get_alt_key(
self.as_ref().to_glib_none().0,
))
}
}
fn is_ctrl_key(&self) -> bool {
unsafe {
from_glib(ffi::webkit_dom_keyboard_event_get_ctrl_key(
self.as_ref().to_glib_none().0,
))
}
}
fn key_identifier(&self) -> Option<glib::GString> {
unsafe {
from_glib_full(ffi::webkit_dom_keyboard_event_get_key_identifier(
self.as_ref().to_glib_none().0,
))
}
}
fn key_location(&self) -> libc::c_ulong {
unsafe { ffi::webkit_dom_keyboard_event_get_key_location(self.as_ref().to_glib_none().0) }
}
fn is_meta_key(&self) -> bool {
unsafe {
from_glib(ffi::webkit_dom_keyboard_event_get_meta_key(
self.as_ref().to_glib_none().0,
))
}
}
fn is_modifier_state(&self, keyIdentifierArg: &str) -> bool {
unsafe {
from_glib(ffi::webkit_dom_keyboard_event_get_modifier_state(
self.as_ref().to_glib_none().0,
keyIdentifierArg.to_glib_none().0,
))
}
}
fn is_shift_key(&self) -> bool {
unsafe {
from_glib(ffi::webkit_dom_keyboard_event_get_shift_key(
self.as_ref().to_glib_none().0,
))
}
}
fn init_keyboard_event(
&self,
type_: &str,
canBubble: bool,
cancelable: bool,
view: &impl IsA<DOMDOMWindow>,
keyIdentifier: &str,
location: libc::c_ulong,
ctrlKey: bool,
altKey: bool,
shiftKey: bool,
metaKey: bool,
altGraphKey: bool,
) {
unsafe {
ffi::webkit_dom_keyboard_event_init_keyboard_event(
self.as_ref().to_glib_none().0,
type_.to_glib_none().0,
canBubble.into_glib(),
cancelable.into_glib(),
view.as_ref().to_glib_none().0,
keyIdentifier.to_glib_none().0,
location,
ctrlKey.into_glib(),
altKey.into_glib(),
shiftKey.into_glib(),
metaKey.into_glib(),
altGraphKey.into_glib(),
);
}
}
fn connect_alt_graph_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_alt_graph_key_trampoline<
P: IsA<DOMKeyboardEvent>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitDOMKeyboardEvent,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(DOMKeyboardEvent::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::alt-graph-key\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_alt_graph_key_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_alt_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_alt_key_trampoline<
P: IsA<DOMKeyboardEvent>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitDOMKeyboardEvent,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(DOMKeyboardEvent::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::alt-key\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_alt_key_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_ctrl_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_ctrl_key_trampoline<
P: IsA<DOMKeyboardEvent>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitDOMKeyboardEvent,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(DOMKeyboardEvent::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::ctrl-key\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_ctrl_key_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_key_identifier_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_key_identifier_trampoline<
P: IsA<DOMKeyboardEvent>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitDOMKeyboardEvent,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(DOMKeyboardEvent::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::key-identifier\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_key_identifier_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_key_location_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_key_location_trampoline<
P: IsA<DOMKeyboardEvent>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitDOMKeyboardEvent,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(DOMKeyboardEvent::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::key-location\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_key_location_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_meta_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_meta_key_trampoline<
P: IsA<DOMKeyboardEvent>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitDOMKeyboardEvent,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(DOMKeyboardEvent::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::meta-key\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_meta_key_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_shift_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_shift_key_trampoline<
P: IsA<DOMKeyboardEvent>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitDOMKeyboardEvent,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(DOMKeyboardEvent::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::shift-key\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_shift_key_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for DOMKeyboardEvent {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("DOMKeyboardEvent")
}
} |
#[cfg_attr(feature = "v2_22", deprecated = "Since 2.22")]
#[doc(alias = "webkit_dom_keyboard_event_get_ctrl_key")] |
index.ts | export { default as getNavigatorLanguage } from './getNavigatorLanguage'; | export * from './dateUtils';
export { default as getDateFnsLocale } from './getDateFnsLocale'; |
|
request_response.go | package client
// RequestHeader is used to decouple the message header/metadata writing from the actual message.
// It is able to accept a request and encode/write it according to Kafka Wire Protocol format
// adding the correlation id and client id to the request.
type RequestHeader struct {
correlationID int32
clientID string
request Request
}
// NewRequestHeader creates a new RequestHeader holding the correlation id, client id and the actual request.
func NewRequestHeader(correlationID int32, clientID string, request Request) *RequestHeader {
return &RequestHeader{
correlationID: correlationID,
clientID: clientID,
request: request,
}
}
// Size returns the size in bytes needed to write this request, including the length field. This value will be used when allocating memory for a byte array.
func (rw *RequestHeader) Size() int32 {
encoder := NewSizingEncoder()
rw.Write(encoder)
return encoder.Size()
}
// Write writes this RequestHeader into a given Encoder.
func (rw *RequestHeader) Write(encoder Encoder) {
// write the size of request excluding the length field with length 4
encoder.WriteInt32(encoder.Size() - 4)
encoder.WriteInt16(rw.request.Key())
encoder.WriteInt16(rw.request.Version())
encoder.WriteInt32(rw.correlationID)
encoder.WriteString(rw.clientID)
rw.request.Write(encoder)
}
// Request is a generic interface for any request issued to Kafka. Must be able to identify and write itself.
type Request interface {
// Writes the Request to the given Encoder.
Write(Encoder)
// Returns the Kafka API key for this Request.
Key() int16
// Returns the Kafka request version for backwards compatibility.
Version() int16
}
// Response is a generic interface for any response received from Kafka. Must be able to read itself.
type Response interface {
// Read the Response from the given Decoder. May return a DecodingError if the response is invalid.
Read(Decoder) *DecodingError
}
// DecodingError is an error that also holds the information about why it happened.
type DecodingError struct {
err error
reason string
}
// NewDecodingError creates a new DecodingError with a given error message and reason.
func | (err error, reason string) *DecodingError {
return &DecodingError{err, reason}
}
// Error returns the error message for this DecodingError.
func (de *DecodingError) Error() error {
return de.err
}
// Reason returns the reason for this DecodingError.
func (de *DecodingError) Reason() string {
return de.reason
}
| NewDecodingError |
base.py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Base plugin class
"""
# Standard library imports
import inspect
import os
import sys
# Third party imports
import qdarkstyle
from qtpy.QtCore import Qt, Slot
from qtpy.QtGui import QCursor, QKeySequence
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QToolButton)
# Local imports
from spyder.config.base import _
from spyder.config.gui import get_color_scheme, get_font, is_dark_interface
from spyder.config.manager import CONF
from spyder.config.user import NoDefault
from spyder.py3compat import configparser, is_text_string
from spyder.utils.icon_manager import ima
from spyder.utils.qthelpers import (
add_actions, create_action, create_toolbutton, MENU_SEPARATOR,
toggle_actions, set_menu_icons)
from spyder.widgets.dock import SpyderDockWidget
class BasePluginMixin(object):
"""Implementation of the basic functionality for Spyder plugins."""
# Define configuration name map for plugin to split configuration
# among several files. See spyder/config/main.py
# Status: Hidden
_CONF_NAME_MAP = None
def __init__(self, parent=None):
super(BasePluginMixin, self).__init__()
# Check compatibility
check_compatibility, message = self.check_compatibility()
self._register_plugin()
self._is_compatible = True
if not check_compatibility:
self._is_compatible = False
self._show_compatibility_message(message)
def _register_plugin(self):
"""Register plugin configuration."""
CONF.register_plugin(self)
def _set_option(self, option, value, section=None,
recursive_notification=True):
"""Set option in spyder.ini"""
section = self.CONF_SECTION if section is None else section
CONF.set(section, str(option), value,
recursive_notification=recursive_notification)
def _get_option(self, option, default=NoDefault, section=None):
"""Get option from spyder.ini."""
section = self.CONF_SECTION if section is None else section
return CONF.get(section, option, default)
def _remove_option(self, option, section=None):
"""Remove option from spyder.ini."""
section = self.CONF_SECTION if section is None else section
CONF.remove_option(section, option)
def _show_status_message(self, message, timeout=0):
"""Show message in main window's status bar."""
self.main.statusBar().showMessage(message, timeout)
def _show_compatibility_message(self, message):
"""Show a compatibility message."""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle('Compatibility Check')
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def _starting_long_process(self, message):
"""
Show message in main window's status bar and change cursor to
Qt.WaitCursor
"""
self._show_status_message(message)
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
def _ending_long_process(self, message=""):
"""
Clear main window's status bar and restore mouse cursor.
"""
QApplication.restoreOverrideCursor()
self._show_status_message(message, timeout=2000)
QApplication.processEvents()
def _get_plugin_path(self):
"""Return filesystem path to the root directory of the plugin."""
return os.path.dirname(inspect.getfile(self.__class__))
def _create_configwidget(self, dlg, main_window):
"""Create configuration dialog box page widget"""
if self.CONFIGWIDGET_CLASS is not None:
parent = self
main = dlg
if not hasattr(self, 'dockwidget'): | main = main_window
configwidget = self.CONFIGWIDGET_CLASS(parent, main)
configwidget.initialize()
return configwidget
class PluginWindow(QMainWindow):
"""MainWindow subclass that contains a Spyder Plugin."""
def __init__(self, plugin):
QMainWindow.__init__(self)
self.plugin = plugin
# Setting interface theme
if is_dark_interface():
self.setStyleSheet(qdarkstyle.load_stylesheet_from_environment())
def closeEvent(self, event):
"""Reimplement Qt method."""
self.plugin.set_ancestor(self.plugin.main)
self.plugin.dockwidget.setWidget(self.plugin)
self.plugin.dockwidget.setVisible(True)
self.plugin.switch_to_plugin()
QMainWindow.closeEvent(self, event)
# Qt might want to do something with this soon,
# So it should not be deleted by python yet.
# Fixes spyder-ide/spyder#10704
self.plugin.__unsafe__window = self
self.plugin._undocked_window = None
class BasePluginWidgetMixin(object):
"""
Implementation of the basic functionality for Spyder plugin widgets.
"""
def __init__(self, parent=None):
super(BasePluginWidgetMixin, self).__init__()
# Actions to add to the Options menu
self._plugin_actions = None
# Attribute to keep track if the plugin is undocked in a
# separate window
self._undocked_window = None
self._ismaximized = False
self._default_margins = None
self._isvisible = False
self.shortcut = None
# Options buttons
self.options_button = create_toolbutton(self, text=_('Options'),
icon=ima.icon('tooloptions'))
self.options_button.setPopupMode(QToolButton.InstantPopup)
# Don't show menu arrow and remove padding
if is_dark_interface():
self.options_button.setStyleSheet(
("QToolButton::menu-indicator{image: none;}\n"
"QToolButton{padding: 3px;}"))
else:
self.options_button.setStyleSheet(
"QToolButton::menu-indicator{image: none;}")
# Options menu
self._options_menu = QMenu(self)
# We decided to create our own toggle action instead of using
# the one that comes with dockwidget because it's not possible
# to raise and focus the plugin with it.
self._toggle_view_action = None
# Default actions for Options menu
self._dock_action = create_action(
self,
_("Dock"),
icon=ima.icon('dock'),
tip=_("Dock the pane"),
triggered=self._close_window)
self._undock_action = create_action(
self,
_("Undock"),
icon=ima.icon('undock'),
tip=_("Undock the pane"),
triggered=self._create_window)
self._close_plugin_action = create_action(
self,
_("Close"),
icon=ima.icon('close_pane'),
tip=_("Close the pane"),
triggered=self._plugin_closed)
def _initialize_plugin_in_mainwindow_layout(self):
"""
If this is the first time the plugin is shown, perform actions to
initialize plugin position in Spyder's window layout.
Use on_first_registration to define the actions to be run
by your plugin
"""
if self.get_option('first_time', True):
try:
self.on_first_registration()
except NotImplementedError:
return
self.set_option('first_time', False)
def _update_margins(self):
"""Update plugin margins"""
layout = self.layout()
if self._default_margins is None:
self._default_margins = layout.getContentsMargins()
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
layout.setContentsMargins(*[margin]*4)
else:
layout.setContentsMargins(*self._default_margins)
def _update_plugin_title(self):
"""Update plugin title, i.e. dockwidget or window title"""
if self.dockwidget is not None:
win = self.dockwidget
elif self._undocked_window is not None:
win = self._undocked_window
else:
return
win.setWindowTitle(self.get_plugin_title())
def _create_dockwidget(self):
"""Add to parent QMainWindow as a dock widget"""
# Creating dock widget
dock = SpyderDockWidget(self.get_plugin_title(), self.main)
# Set properties
dock.setObjectName(self.__class__.__name__+"_dw")
dock.setAllowedAreas(dock.ALLOWED_AREAS)
dock.setFeatures(dock.FEATURES)
dock.setWidget(self)
self._update_margins()
dock.visibilityChanged.connect(self._visibility_changed)
dock.topLevelChanged.connect(self._on_top_level_changed)
dock.sig_plugin_closed.connect(self._plugin_closed)
self.dockwidget = dock
# NOTE: Don't use the default option of CONF.get to assign a
# None shortcut to plugins that don't have one. That will mess
# the creation of our Keyboard Shortcuts prefs page
try:
context = '_'
name = 'switch to {}'.format(self.CONF_SECTION)
self.shortcut = CONF.get_shortcut(context, name,
plugin_name=self.CONF_SECTION)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
if self.shortcut is not None and self.main is not None:
sc = QShortcut(QKeySequence(self.shortcut), self.main,
self.switch_to_plugin)
self.register_shortcut(sc, "_", "Switch to {}".format(
self.CONF_SECTION))
return (dock, dock.LOCATION)
def _switch_to_plugin(self):
"""Switch to plugin."""
if (self.main.last_plugin is not None and
self.main.last_plugin._ismaximized and
self.main.last_plugin is not self):
self.main.maximize_dockwidget()
if not self._toggle_view_action.isChecked():
self._toggle_view_action.setChecked(True)
self._visibility_changed(True)
@Slot()
def _plugin_closed(self):
"""DockWidget was closed."""
if self._toggle_view_action:
self._toggle_view_action.setChecked(False)
def _get_font(self, rich_text=False):
"""Return plugin font."""
if rich_text:
option = 'rich_font'
font_size_delta = self.RICH_FONT_SIZE_DELTA
else:
option = 'font'
font_size_delta = self.FONT_SIZE_DELTA
return get_font(option=option, font_size_delta=font_size_delta)
def set_plugin_font(self):
"""
Set plugin font option.
Note: All plugins in Spyder use a global font. To define a different
size, the plugin must define a 'FONT_SIZE_DELTA' class variable.
"""
raise Exception("Plugins font is based on the general settings, "
"and cannot be set directly on the plugin."
"This method is deprecated.")
def _create_toggle_view_action(self):
"""Associate a toggle view action with each plugin"""
title = self.get_plugin_title()
if self.CONF_SECTION == 'editor':
title = _('Editor')
if self.shortcut is not None:
action = create_action(self, title,
toggled=lambda checked: self.toggle_view(checked),
shortcut=QKeySequence(self.shortcut),
context=Qt.WidgetShortcut)
else:
action = create_action(self, title, toggled=lambda checked:
self.toggle_view(checked))
self._toggle_view_action = action
@Slot()
def _close_window(self):
"""Close QMainWindow instance that contains this plugin."""
if self._undocked_window is not None:
self._undocked_window.close()
self._undocked_window = None
# Oddly, these actions can appear disabled after the Dock
# action is pressed
self._undock_action.setDisabled(False)
self._close_plugin_action.setDisabled(False)
@Slot()
def _create_window(self):
"""Create a QMainWindow instance containing this plugin."""
self._undocked_window = window = PluginWindow(self)
window.setAttribute(Qt.WA_DeleteOnClose)
icon = self.get_plugin_icon()
if is_text_string(icon):
icon = self.get_icon(icon)
window.setWindowIcon(icon)
window.setWindowTitle(self.get_plugin_title())
window.setCentralWidget(self)
window.resize(self.size())
self.refresh_plugin()
self.set_ancestor(window)
self.dockwidget.setFloating(False)
self.dockwidget.setVisible(False)
window.show()
@Slot(bool)
def _on_top_level_changed(self, top_level):
"""Actions to perform when a plugin is undocked to be moved."""
if top_level:
self._undock_action.setDisabled(True)
else:
self._undock_action.setDisabled(False)
def _visibility_changed(self, enable):
"""Dock widget visibility has changed."""
if self.dockwidget is None:
return
if enable:
self.dockwidget.raise_()
widget = self.get_focus_widget()
if widget is not None and self._undocked_window is not None:
widget.setFocus()
visible = self.dockwidget.isVisible() or self._ismaximized
if self.DISABLE_ACTIONS_WHEN_HIDDEN:
toggle_actions(self._plugin_actions, visible)
self._isvisible = enable and visible
if self._isvisible:
self.refresh_plugin()
def _refresh_actions(self):
"""Refresh Options menu."""
self._options_menu.clear()
# Decide what additional actions to show
if self._undocked_window is None:
additional_actions = [MENU_SEPARATOR,
self._undock_action,
self._close_plugin_action]
else:
additional_actions = [MENU_SEPARATOR,
self._dock_action]
# Create actions list
self._plugin_actions = self.get_plugin_actions() + additional_actions
add_actions(self._options_menu, self._plugin_actions)
if sys.platform == 'darwin':
set_menu_icons(self._options_menu, True)
def _setup(self):
"""
Setup Options menu, create toggle action and connect signals.
"""
# Creat toggle view action
self._create_toggle_view_action()
# Create Options menu
self._plugin_actions = self.get_plugin_actions() + [MENU_SEPARATOR,
self._undock_action]
add_actions(self._options_menu, self._plugin_actions)
self.options_button.setMenu(self._options_menu)
self._options_menu.aboutToShow.connect(self._refresh_actions)
# Show icons in Mac plugin menus
if sys.platform == 'darwin':
self._options_menu.aboutToHide.connect(
lambda menu=self._options_menu:
set_menu_icons(menu, False))
# Update title
self.sig_update_plugin_title.connect(self._update_plugin_title)
self.setWindowTitle(self.get_plugin_title())
def _register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=False):
"""Register a shortcut associated to a QAction or QShortcut."""
self.main.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip,
self.CONF_SECTION)
def _get_color_scheme(self):
"""Get the current color scheme."""
return get_color_scheme(CONF.get('appearance', 'selected'))
def _add_dockwidget(self):
"""Add dockwidget to the main window and set it up."""
self.main.add_dockwidget(self)
# This is not necessary for the Editor because it calls
# _setup directly on init.
if self.CONF_SECTION != 'editor':
self._setup()
def _tabify(self, core_plugin):
"""Tabify plugin next to a core plugin."""
self.main.tabify_plugins(core_plugin, self) | # Prevent QWidget assignment to a plugin that does not have
# a graphical widget.
parent = dlg |
day04.go | package aoc2016
import (
"sort"
"strconv"
"strings"
"unicode"
aoc "github.com/janreggie/aoc/internal"
"github.com/pkg/errors"
)
// room represents a room (Year 2016 Day 4).
//
// Syntax
//
// RAW is represented by ENCRYPTEDNAME-SECTORID[CHECKSUM],
// where ENCRYPTEDNAME is a string of length at least 1 contains lowercase letters separated by dashes,
// SECTORID is a three-digit integer,
// and CHECKSUM is a string of length five consisting solely of lowercase letters.
type room struct {
raw string // raw representation
letters map[byte]uint16 // letters in ENCRYPTEDNAME
sectorID uint16
checksum string
}
// newRoom creates a room struct reflecting the syntax of room.
// Will return an error if syntax is invalid.
func newRoom(raw string) (room, error) {
out := room{}
out.raw = raw
// Minimum string: a-100[abcde] is length 12
if len(raw) < 12 {
return out, errors.Errorf("%v is too short", raw)
}
// split everything into dashes
splitRaw := strings.Split(raw, "-")
// parse the letters
out.letters = make(map[byte]uint16)
for ii := 0; ii < len(splitRaw)-1; ii++ {
for _, letter := range splitRaw[ii] {
if !unicode.IsLower(letter) {
return out, errors.Errorf("%v in %v not lowercase letter", letter, splitRaw[ii])
}
out.letters[byte(letter)]++
}
}
// check last item in splitRaw
lastString := splitRaw[len(splitRaw)-1]
if len(lastString) != 10 || lastString[3] != '[' || lastString[9] != ']' {
return out, errors.Errorf("%v cannot be parsed", lastString)
}
parsedInt, err := strconv.ParseUint(lastString[0:3], 10, 16)
if err != nil {
return out, errors.Wrapf(err, "could not pase %v", lastString[0:3])
}
out.sectorID = uint16(parsedInt)
for _, letter := range lastString[4:9] {
if !unicode.IsLower(letter) {
return out, errors.Errorf("%v in %v not lowercase letter", letter, lastString[4:9])
}
}
out.checksum = lastString[4:9] | }
// isReal returns true if the room is "real" (Part 1)
// by checking its checksum and letters.
func (rm room) isReal() bool {
if len(rm.checksum) < 5 {
// too short
return false
}
// sort all rm.letters...
type letterCount struct {
letter byte
count uint16
}
allLetters := make([]letterCount, 0, len(rm.letters))
for kk, vv := range rm.letters {
allLetters = append(allLetters, letterCount{kk, vv})
}
sort.Slice(allLetters, func(i, j int) bool {
// check counts
if allLetters[i].count > allLetters[j].count {
return true
}
// check letters
if allLetters[i].count == allLetters[j].count &&
allLetters[i].letter < allLetters[j].letter {
return true
}
return false
})
for ii := range rm.checksum {
if allLetters[ii].letter != rm.checksum[ii] {
return false
}
}
return true
}
// decrypt decrypts the message found
func (rm room) decrypt() string {
input := strings.Split(strings.Split(rm.raw, "[")[0], "-")
input = input[:len(input)-1]
key := byte(rm.sectorID % 26)
parse := func(input byte) byte {
return 0x61 + (input-0x61+key)%26
}
output := make([]string, 0, len(input))
for _, istr := range input {
var sb strings.Builder
for ii := range istr {
sb.WriteByte(parse(istr[ii]))
}
output = append(output, sb.String())
}
return strings.Join(output, " ")
}
// Day04 solves the fourth day puzzle "Security Through Obscurity".
//
// Input
//
// A file where each line represents a room, which is of the format
// ENCRYPTEDNAME-SECTORID[CHECKSUM], where ENCRYPTED NAME consists of lowercase
// letters separated by dashes, SECTORID is an integer between 100 and 999,
// and CHECKSUM is a string containing five lowercase letters. For example:
//
// ipvohghykvbz-ihzrla-jbzavtly-zlycpjl-253[lzhvy]
// cybyjqho-whqtu-rqiauj-fkhsxqiydw-322[syzwi]
// tipfxvezt-sleep-tljkfdvi-jvimztv-425[tveif]
// ktiaaqnqml-xtiabqk-oziaa-xczkpiaqvo-616[aiqko]
// ckgvutofkj-xghhoz-gtgreyoy-306[nyhpz]
//
// The input contains at most 1000 lines,
// and it is guaranteed that ENCRYPTEDNAME is at most of length 60.
func Day04(input string) (answer1, answer2 string, err error) {
var a1 uint64
for _, line := range aoc.SplitLines(input) {
rm, e := newRoom(line)
if e != nil {
err = errors.Wrapf(e, "could not parse %v", line)
return
}
if !rm.isReal() {
continue
}
a1 += uint64(rm.sectorID)
if rm.decrypt() == "northpole object storage" {
answer2 = strconv.FormatUint(uint64(rm.sectorID), 10)
}
}
answer1 = strconv.FormatUint(a1, 10)
return
} |
return out, nil |
demo.js | import { obtainTerrain } from "@woosh/meep-engine/engine/ecs/terrain/util/obtainTerrain.js";
// import { seededRandom } from "@woosh/meep-engine/core/math/random/seededRandom.js";
import { EngineConfiguration } from "@woosh/meep-engine/engine/EngineConfiguration.js";
// import ScriptSystem from "@woosh/meep-engine/engine/ecs/systems/ScriptSystem.js";
// import PathFollowingSystem from "@woosh/meep-engine/engine/navigation/ecs/path_following/PathFollowingSystem.js";
// import TagSystem from "@woosh/meep-engine/engine/ecs/systems/TagSystem.js";
// import TimerSystem from "@woosh/meep-engine/engine/ecs/systems/TimerSystem.js";
// import TopDownCameraControllerSystem from "@woosh/meep-engine/engine/graphics/ecs/camera/topdown/TopDownCameraControllerSystem.js";
// import { TopDownCameraLanderSystem } from "@woosh/meep-engine/engine/graphics/ecs/camera/topdown/TopDownCameraLanderSystem.js";
// import { CameraSystem } from "@woosh/meep-engine/engine/graphics/ecs/camera/CameraSystem.js";
// import { MeshSystem } from "@woosh/meep-engine/engine/graphics/ecs/mesh/MeshSystem.js";
// import TerrainSystem from "@woosh/meep-engine/engine/ecs/terrain/ecs/TerrainSystem.js";
// import WaterSystem from "@woosh/meep-engine/engine/graphics/ecs/water/WaterSystem.js";
// import ViewportPositionSystem from "@woosh/meep-engine/engine/ecs/gui/position/ViewportPositionSystem.js";
// import { GridPosition2TransformSystem } from "@woosh/meep-engine/engine/grid/systems/GridPosition2TransformSystem.js";
// import { Transform2GridPositionSystem } from "@woosh/meep-engine/engine/grid/transform2grid/Transform2GridPositionSystem.js";
// import SynchronizePositionSystem from "@woosh/meep-engine/engine/ecs/systems/SynchronizePositionSystem.js";
// import GridObstacleSystem from "@woosh/meep-engine/engine/grid/systems/GridObstacleSystem.js";
// import GridPositionSystem from "@woosh/meep-engine/engine/grid/systems/GridPositionSystem.js";
// import InputControllerSystem from "@woosh/meep-engine/engine/input/ecs/systems/InputControllerSystem.js";
// import { InputSystem } from "@woosh/meep-engine/engine/input/ecs/systems/InputSystem.js";
// import MeshHighlightSystem from "@woosh/meep-engine/engine/graphics/ecs/highlight/system/MeshHighlightSystem.js";
// import LightSystem from "@woosh/meep-engine/engine/graphics/ecs/light/LightSystem.js";
// import RenderSystem from "@woosh/meep-engine/engine/ecs/systems/RenderSystem.js"; | // import { EngineHarness } from "@woosh/meep-engine/engine/EngineHarness.js";
import Vector2 from "@woosh/meep-engine/core/geom/Vector2.js";
import Vector3 from "@woosh/meep-engine/core/geom/Vector3.js";
// import { buildCanvasViewFromTexture } from "@woosh/meep-engine/engine/graphics/render/visibility/hiz/buildCanvasViewFromTexture.js";
// import EntityBuilder from "@woosh/meep-engine/engine/ecs/EntityBuilder.js";
// import ViewportPositionSystem from "@woosh/meep-engine/engine/ecs/gui/position/ViewportPositionSystem.js";
// import ViewportPosition from "@woosh/meep-engine/engine/ecs/gui/position/ViewportPosition.js";
// import GUIElement from "@woosh/meep-engine/engine/ecs/gui/GUIElement.js";
// import GUIElementSystem from "@woosh/meep-engine/engine/ecs/gui/GUIElementSystem.js";
// import {SerializationMetadata} from "@woosh/meep-engine/engine/ecs/components/SerializationMetadata.js";
import { TextureAssetLoader } from "@woosh/meep-engine/engine/asset/loaders/texture/TextureAssetLoader.js";
import { GameAssetType } from "@woosh/meep-engine/engine/asset/GameAssetType.js";
import { ImprovedNoise } from "three/examples/jsm/math/ImprovedNoise.js";
import { lerp } from "@woosh/meep-engine/core/math/lerp.js";
const eh = new EngineHarness();
/**
*
* @param {Engine} engine
* @return {EngineConfiguration}
*/
function makeConfig(engine) {
const r = new EngineConfiguration();
// configure engine here, add systems, loaders etc.
r.addLoader(GameAssetType.Texture, new TextureAssetLoader());
return r;
}
/**
*
* @param {Terrain} terrain
* @param {number} resolution
* @param {number} heightMin
* @param {number} heightMax
*/
async function buildRandomTerrainHeights({
terrain,
resolution = 64,
heightMin = -10,
heightMax = 30
}) {
const terrain_height = terrain.samplerHeight;
terrain_height.resize(resolution, resolution);
const noise = new ImprovedNoise();
let x, y;
for (y = 0; y < resolution; y++) {
for (x = 0; x < resolution; x++) {
const u = x / (resolution - 1);
const v = y / (resolution - 1);
const noise_value = noise.noise(u * 13.456, v * 13.456, 0) * noise.noise(u * 3.71, v * 3.71, 0.8);
const height_value = lerp(heightMin, heightMax, noise_value);
terrain_height.set(x, y, [height_value]);
}
}
await terrain.updateWorkerHeights();
terrain.updateHeightTexture();
terrain.buildLightMap();
terrain.tiles.rebuild();
}
/**
*
* @param {Engine} engine
*/
async function main(engine) {
await EngineHarness.buildBasics({
engine,
// heightMap = "data/textures/utility/black_pixel.png",
// heightRange = "data/textures/utility/white_pixel.png",
enableWater: false,
terrainResolution: 4,
terrainSize: new Vector2(64, 64),
focus: new Vector3(64, 0, 54),
yaw: 0,
pitch: 0.909,
distance: 169
});
/**
*
* @type {EntityComponentDataset}
*/
const ecd = engine.entityManager.dataset;
/**
*
* @type {Terrain|null}
*/
const terrain = obtainTerrain(ecd);
buildRandomTerrainHeights({ terrain: terrain });
}
/**
*
* @param {EngineHarness} harness
*/
async function init(harness) {
const engine = eh.engine;
await makeConfig(engine).apply(engine);
// enableEditor(engine);
await eh.initialize();
main(engine);
}
init(eh); | // import { makeEngineOptionsModel } from "@woosh/meep-engine/../model/game/options/makeEngineOptionsModel.js";
// import { enableEditor } from "@woosh/meep-engine/editor/enableEditor.js";
import { EngineHarness } from "./demo/EngineHarness.js"; |
test_views.py | from datetime import timedelta
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.models import User
from django.utils import timezone
from allauth.account.models import EmailAddress
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from challenges.models import Challenge
from hosts.models import ChallengeHost, ChallengeHostTeam
from participants.models import ParticipantTeam, Participant
class BaseAPITestClass(APITestCase):
def setUp(self):
self.client = APIClient(enforce_csrf_checks=True)
self.user = User.objects.create(
username="someuser",
email="[email protected]",
password="secret_password",
)
EmailAddress.objects.create(
user=self.user, email="[email protected]", primary=True, verified=True
)
self.invite_user = User.objects.create(
username="otheruser",
email="[email protected]",
password="other_secret_password",
)
self.participant_team = ParticipantTeam.objects.create(
team_name="Participant Team", created_by=self.user
)
self.participant = Participant.objects.create(
user=self.user, team=self.participant_team, status=Participant.SELF
)
self.client.force_authenticate(user=self.user)
class GetParticipantTeamTest(BaseAPITestClass):
url = reverse_lazy("participants:get_participant_team_list")
def setUp(self):
super(GetParticipantTeamTest, self).setUp()
self.user2 = User.objects.create(
username="user2",
email="[email protected]",
password="user2_password",
)
EmailAddress.objects.create(
user=self.user2,
email="[email protected]",
primary=True,
verified=True,
)
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team,
)
def test_get_challenge(self):
expected = [
{
"id": self.participant_team.pk,
"team_name": self.participant_team.team_name,
"created_by": self.user.username,
"team_url": self.participant_team.team_url,
"members": [
{
"member_name": self.participant.user.username,
"status": self.participant.status,
"member_id": self.participant.user.id,
},
{
"member_name": self.participant2.user.username,
"status": self.participant2.status,
"member_id": self.participant2.user.id,
},
],
}
]
response = self.client.get(self.url, {})
self.assertEqual(response.data["results"], expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateParticipantTeamTest(BaseAPITestClass):
url = reverse_lazy("participants:get_participant_team_list")
def setUp(self):
super(CreateParticipantTeamTest, self).setUp()
self.data = {"team_name": "New Participant Team"}
def test_create_participant_team_with_all_data(self):
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_participant_team_with_team_name_same_as_with_existing_team(
self
):
expected = {
"team_name": [
"participant team with this team name already exists."
]
}
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Creating team with same team name
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, expected)
def test_create_participant_team_with_no_data(self):
del self.data["team_name"]
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class GetParticularParticipantTeam(BaseAPITestClass):
def setUp(self):
super(GetParticularParticipantTeam, self).setUp()
self.url = reverse_lazy(
"participants:get_participant_team_details",
kwargs={"pk": self.participant_team.pk},
)
self.user2 = User.objects.create(
username="user2",
email="[email protected]",
password="user2_password",
)
EmailAddress.objects.create(
user=self.user2,
email="[email protected]",
primary=True,
verified=True,
)
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team,
)
def test_get_particular_participant_team(self):
expected = {
"id": self.participant_team.pk,
"team_name": self.participant_team.team_name,
"created_by": self.user.username,
"team_url": self.participant_team.team_url,
"members": [
{
"member_name": self.participant.user.username,
"status": self.participant.status,
"member_id": self.participant.user.id,
},
{
"member_name": self.participant2.user.username,
"status": self.participant2.status,
"member_id": self.participant2.user.id,
},
],
}
response = self.client.get(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_particular_participant_team_does_not_exist(self):
self.url = reverse_lazy(
"participants:get_participant_team_details",
kwargs={"pk": self.participant_team.pk + 1},
)
expected = {"error": "ParticipantTeam does not exist"}
response = self.client.get(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
class UpdateParticularParticipantTeam(BaseAPITestClass):
def setUp(self):
super(UpdateParticularParticipantTeam, self).setUp()
self.url = reverse_lazy(
"participants:get_participant_team_details",
kwargs={"pk": self.participant_team.pk},
)
self.partial_update_participant_team_name = (
"Partial Update Participant Team"
)
self.update_participant_team_name = "Update Test Participant Team"
self.data = {"team_name": self.update_participant_team_name}
def test_particular_participant_team_partial_update(self):
self.partial_update_data = {
"team_name": self.partial_update_participant_team_name
}
expected = {
"id": self.participant_team.pk,
"team_name": self.partial_update_participant_team_name,
"created_by": self.user.username,
"team_url": self.participant_team.team_url,
}
response = self.client.patch(self.url, self.partial_update_data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_particular_participant_team_update(self):
expected = {
"id": self.participant_team.pk,
"team_name": self.update_participant_team_name,
"created_by": self.user.username,
"team_url": self.participant_team.team_url,
}
response = self.client.put(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_particular_participant_team_update_with_no_data(self):
self.data = {"team_name": ""}
response = self.client.put(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class DeleteParticularParticipantTeam(BaseAPITestClass):
def setUp(self):
super(DeleteParticularParticipantTeam, self).setUp()
self.url = reverse_lazy(
"participants:get_participant_team_details",
kwargs={"pk": self.participant_team.pk},
)
def test_particular_participant_team_delete(self):
response = self.client.delete(self.url, {})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class InviteParticipantToTeamTest(BaseAPITestClass):
def setUp(self):
super(InviteParticipantToTeamTest, self).setUp()
self.data = {"email": self.invite_user.email}
self.url = reverse_lazy(
"participants:invite_participant_to_team",
kwargs={"pk": self.participant_team.pk},
)
def test_invite_participant_to_team_with_all_data(self):
expected = {"message": "User has been successfully added to the team!"}
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_invite_participant_to_team_with_no_data(self):
del self.data["email"]
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_invite_self_to_team(self):
self.data = {"email": self.user.email}
expected = {"error": "User is already part of the team!"}
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_invite_to_other_team_which_doesnot_belong_to_user(self):
temp_user = User.objects.create(
username="temp_user", password="test_password"
)
temp_participant_team = ParticipantTeam.objects.create(
team_name="Test Team 1", created_by=temp_user
)
expected = {"error": "You are not a member of this team!"}
self.url = reverse_lazy(
"participants:invite_participant_to_team",
kwargs={"pk": temp_participant_team.pk},
)
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_invite_user_which_does_not_exist_to_team(self):
self.data = {"email": "[email protected]"}
expected = {"error": "User does not exist with this email address!"}
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_particular_participant_team_for_invite_does_not_exist(self):
self.url = reverse_lazy(
"participants:invite_participant_to_team",
kwargs={"pk": self.participant_team.pk + 1},
)
expected = {"error": "Participant Team does not exist"}
response = self.client.post(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_invite_participant_to_team_when_user_cannot_be_invited(self):
"""
NOTE
user: host user
user1: participant 1
user2: participant 2
"""
self.user2 = User.objects.create(
username="user2",
email="[email protected]",
password="user2_password",
)
EmailAddress.objects.create(
user=self.user2,
email="[email protected]",
primary=True,
verified=True,
)
self.user3 = User.objects.create(
username="user3",
email="[email protected]",
password="user3_password",
)
EmailAddress.objects.create(
user=self.user3,
email="[email protected]",
primary=True,
verified=True,
)
self.participant_team2 = ParticipantTeam.objects.create(
team_name="Participant Team created by user 2",
created_by=self.user2,
)
self.participant_team3 = ParticipantTeam.objects.create(
team_name="Participant Team created by user 3",
created_by=self.user3,
)
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team2,
)
self.participant3 = Participant.objects.create(
user=self.user3,
status=Participant.ACCEPTED,
team=self.participant_team3,
)
self.challenge_host_team = ChallengeHostTeam.objects.create(
team_name="Test Challenge Host Team", created_by=self.user
)
self.challenge = Challenge.objects.create(
title="Test Challenge",
short_description="Short description for test challenge",
description="Description for test challenge",
terms_and_conditions="Terms and conditions for test challenge",
submission_guidelines="Submission guidelines for test challenge",
creator=self.challenge_host_team,
published=False,
enable_forum=True,
leaderboard_description=None,
anonymous_leaderboard=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
)
self.client.force_authenticate(user=self.user2)
self.challenge.participant_teams.add(self.participant_team2)
self.challenge.participant_teams.add(self.participant_team3)
self.data = {"email": self.user3.email}
self.url = reverse_lazy(
"participants:invite_participant_to_team",
kwargs={"pk": self.participant_team2.pk},
)
expected = {
"error": "Sorry, the invited user has already participated "
"in atleast one of the challenges which you are already"
" a part of. Please try creating a new team and then invite."
}
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
class DeleteParticipantFromTeamTest(BaseAPITestClass):
def setUp(self):
super(DeleteParticipantFromTeamTest, self).setUp()
self.participant = Participant.objects.create(
user=self.user, status=Participant.SELF, team=self.participant_team
)
self.user2 = User.objects.create(
username="user2",
email="[email protected]",
password="user2_password",
)
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team,
)
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.invite_user.pk,
},
)
def test_participant_does_not_exist_in_team(self):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.participant2.pk + 1,
},
)
expected = {"error": "Participant does not exist"}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_when_participant_team_does_not_exist(self):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk + 1,
"participant_pk": self.participant2.pk,
},
)
expected = {"error": "ParticipantTeam does not exist"}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_when_participant_is_admin_and_wants_to_delete_himself(self):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.participant.pk,
},
)
expected = {
"error": "You are not allowed to remove yourself since you are admin. Please delete the team if you want to do so!" # noqa: ignore=E501
}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_when_participant_does_not_have_permissions_to_remove_another_participant(
self
):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.participant2.pk,
},
)
self.user3 = User.objects.create(
username="user3",
email="[email protected]",
password="user3_password",
)
EmailAddress.objects.create(
user=self.user3,
email="[email protected]",
primary=True,
verified=True,
)
self.participant3 = Participant.objects.create(
user=self.user3,
status=Participant.ACCEPTED,
team=self.participant_team,
)
self.client.force_authenticate(user=self.user3)
expected = {
"error": "Sorry, you do not have permissions to remove this participant"
}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_when_a_participant_is_successfully_removed_from_team(self):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.participant2.pk,
},
)
response = self.client.delete(self.url, {})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class GetTeamsAndCorrespondingChallengesForAParticipant(BaseAPITestClass):
def setUp(self):
super(GetTeamsAndCorrespondingChallengesForAParticipant, self).setUp()
self.user2 = User.objects.create(
username="user2",
email="[email protected]",
password="user2_password",
)
EmailAddress.objects.create(
user=self.user2,
email="[email protected]",
primary=True,
verified=True,
)
self.participant_team2 = ParticipantTeam.objects.create(
team_name="Team B", created_by=self.user2
) # created by user2 and not user
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team2,
)
self.challenge_host_team = ChallengeHostTeam.objects.create(
team_name="Host Team 1", created_by=self.user2
)
self.challenge1 = Challenge.objects.create(
title="Test Challenge 1",
short_description="Short description for test challenge 1",
description="Description for test challenge 1",
terms_and_conditions="Terms and conditions for test challenge 1",
submission_guidelines="Submission guidelines for test challenge 1",
creator=self.challenge_host_team,
published=False,
is_registration_open=True,
enable_forum=True,
leaderboard_description="Lorem ipsum dolor sit amet, consectetur adipiscing elit",
anonymous_leaderboard=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
)
self.challenge1.slug = "{}-{}".format(
self.challenge1.title.replace(" ", "-").lower(), self.challenge1.pk
)[:199]
self.challenge1.save()
self.challenge2 = Challenge.objects.create(
title="Test Challenge 2",
short_description="Short description for test challenge 2",
description="Description for test challenge 2",
terms_and_conditions="Terms and conditions for test challenge 2",
submission_guidelines="Submission guidelines for test challenge 2",
creator=self.challenge_host_team,
published=False,
is_registration_open=True,
enable_forum=True,
anonymous_leaderboard=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
)
self.url = reverse_lazy(
"participants:get_teams_and_corresponding_challenges_for_a_participant",
kwargs={"challenge_pk": self.challenge1.pk},
)
self.time = timezone.now()
def test_get_teams_and_corresponding_challenges_for_a_participant(self):
|
def test_get_participant_team_challenge_list(self):
self.url = reverse_lazy(
"participants:get_participant_team_challenge_list",
kwargs={"participant_team_pk": self.participant_team.pk},
)
expected = [
{
"id": self.challenge1.id,
"title": self.challenge1.title,
"description": self.challenge1.description,
"short_description": self.challenge1.short_description,
"terms_and_conditions": self.challenge1.terms_and_conditions,
"submission_guidelines": self.challenge1.submission_guidelines,
"evaluation_details": self.challenge1.evaluation_details,
"image": self.challenge1.image,
"start_date": "{0}{1}".format(
self.challenge1.start_date.isoformat(), "Z"
).replace("+00:00", ""),
"end_date": "{0}{1}".format(
self.challenge1.end_date.isoformat(), "Z"
).replace("+00:00", ""),
"creator": {
"id": self.challenge_host_team.id,
"team_name": self.challenge_host_team.team_name,
"created_by": self.challenge_host_team.created_by.username,
"team_url": self.challenge_host_team.team_url,
},
"published": self.challenge1.published,
"is_registration_open": self.challenge1.is_registration_open,
"enable_forum": self.challenge1.enable_forum,
"leaderboard_description": self.challenge1.leaderboard_description,
"anonymous_leaderboard": self.challenge1.anonymous_leaderboard,
"is_active": True,
"allowed_email_domains": [],
"blocked_email_domains": [],
"banned_email_ids": [],
"approved_by_admin": False,
"forum_url": self.challenge1.forum_url,
"is_docker_based": self.challenge1.is_docker_based,
"slug": self.challenge1.slug,
"max_docker_image_size": self.challenge1.max_docker_image_size,
"cli_version": self.challenge1.cli_version,
}
]
self.challenge1.participant_teams.add(self.participant_team)
self.challenge1.save()
response = self.client.get(self.url, {})
self.assertEqual(response.data["results"], expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_when_participant_team_hasnot_participated_in_any_challenge(self):
expected = {
"challenge_participant_team_list": [
{
"challenge": None,
"participant_team": {
"id": self.participant_team.id,
"team_name": self.participant_team.team_name,
"created_by": self.participant_team.created_by.username,
"team_url": self.participant_team.team_url,
},
}
],
"is_challenge_host": False,
}
response = self.client.get(self.url, {})
# checking 'datetime_now' separately because of time difference in microseconds
self.assertTrue(
abs(response.data["datetime_now"] - self.time)
< timedelta(seconds=1)
)
# deleting field 'datetime_now' from response to check with expected response without time field
del response.data["datetime_now"]
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_when_there_is_no_participant_team_of_user(self):
self.participant_team.delete()
expected = {
"challenge_participant_team_list": [],
"is_challenge_host": False,
}
response = self.client.get(self.url, {})
# checking 'datetime_now' separately because of time difference in microseconds
self.assertTrue(
abs(response.data["datetime_now"] - self.time)
< timedelta(seconds=1)
)
# deleting field 'datetime_now' from response to check with expected response without time field
del response.data["datetime_now"]
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class RemoveSelfFromParticipantTeamTest(BaseAPITestClass):
def setUp(self):
super(RemoveSelfFromParticipantTeamTest, self).setUp()
# user who create a challenge host team
self.user2 = User.objects.create(
username="someuser2", password="some_secret_password"
)
self.challenge_host_team = ChallengeHostTeam.objects.create(
team_name="Some Test Challenge Host Team", created_by=self.user2
)
self.challenge_host2 = ChallengeHost.objects.create(
user=self.user2,
team_name=self.challenge_host_team,
status=ChallengeHost.ACCEPTED,
permissions=ChallengeHost.ADMIN,
)
self.challenge = Challenge.objects.create(
title="Some Test Challenge",
short_description="Short description for some test challenge",
description="Description for some test challenge",
terms_and_conditions="Terms and conditions for some test challenge",
submission_guidelines="Submission guidelines for some test challenge",
creator=self.challenge_host_team,
published=False,
is_registration_open=True,
enable_forum=True,
leaderboard_description="Fusce quis sapien eget sem accumsan euismod",
anonymous_leaderboard=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
)
self.url = reverse_lazy(
"participants:remove_self_from_participant_team",
kwargs={"participant_team_pk": self.participant_team.pk},
)
def test_when_participant_team_does_not_exist(self):
self.url = reverse_lazy(
"participants:remove_self_from_participant_team",
kwargs={"participant_team_pk": self.participant_team.pk + 1},
)
expected = {"error": "ParticipantTeam does not exist!"}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_when_a_participant_is_successfully_removed_from_team(self):
self.url = reverse_lazy(
"participants:remove_self_from_participant_team",
kwargs={"participant_team_pk": self.participant_team.pk},
)
response = self.client.delete(self.url, {})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_when_participant_team_has_taken_part_in_challenges(self):
self.challenge.participant_teams.add(self.participant_team)
expected = {
"error": "Sorry, you cannot delete this team since it has taken part in challenge(s)!"
}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_participant_team_remove_when_no_participants_exists(self):
self.url = reverse_lazy(
"participants:remove_self_from_participant_team",
kwargs={"participant_team_pk": self.participant_team.pk},
)
self.client.delete(self.url, {})
participant_teams = ParticipantTeam.objects.all()
self.assertEqual(participant_teams.count(), 0)
| self.challenge1.participant_teams.add(self.participant_team)
self.challenge1.save()
expected = {
"challenge_participant_team_list": [
{
"challenge": {
"id": self.challenge1.id,
"title": self.challenge1.title,
"description": self.challenge1.description,
"short_description": self.challenge1.short_description,
"terms_and_conditions": self.challenge1.terms_and_conditions,
"submission_guidelines": self.challenge1.submission_guidelines,
"evaluation_details": self.challenge1.evaluation_details,
"image": self.challenge1.image,
"start_date": "{0}{1}".format(
self.challenge1.start_date.isoformat(), "Z"
).replace("+00:00", ""),
"end_date": "{0}{1}".format(
self.challenge1.end_date.isoformat(), "Z"
).replace("+00:00", ""),
"creator": {
"id": self.challenge_host_team.id,
"team_name": self.challenge_host_team.team_name,
"created_by": self.challenge_host_team.created_by.username,
"team_url": self.challenge_host_team.team_url,
},
"published": self.challenge1.published,
"is_registration_open": self.challenge1.is_registration_open,
"enable_forum": self.challenge1.enable_forum,
"leaderboard_description": self.challenge1.leaderboard_description,
"anonymous_leaderboard": self.challenge1.anonymous_leaderboard,
"is_active": True,
"allowed_email_domains": [],
"blocked_email_domains": [],
"banned_email_ids": [],
"approved_by_admin": False,
"forum_url": self.challenge1.forum_url,
"is_docker_based": self.challenge1.is_docker_based,
"slug": self.challenge1.slug,
"max_docker_image_size": self.challenge1.max_docker_image_size,
"cli_version": self.challenge1.cli_version,
},
"participant_team": {
"id": self.participant_team.id,
"team_name": self.participant_team.team_name,
"created_by": self.participant_team.created_by.username,
"team_url": self.participant_team.team_url,
},
}
],
"is_challenge_host": False,
}
response = self.client.get(self.url, {})
# checking 'datetime_now' separately because of time difference in microseconds
self.assertTrue(
abs(response.data["datetime_now"] - self.time)
< timedelta(seconds=1)
)
# deleting field 'datetime_now' from response to check with expected response without time field
del response.data["datetime_now"]
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK) |
async_insert_queue.rs | // Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::Ordering;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::hash::Hash;
use std::sync::Arc;
use common_base::base::tokio::sync::Notify;
use common_base::base::tokio::time::interval;
use common_base::base::tokio::time::Duration;
use common_base::base::tokio::time::Instant;
use common_base::base::ProgressValues;
use common_base::base::Runtime;
use common_base::infallible::Mutex;
use common_base::infallible::RwLock;
use common_datablocks::DataBlock;
use common_exception::ErrorCode;
use common_exception::Result;
use common_planners::InsertPlan;
use common_planners::SelectPlan;
use super::InsertInterpreter;
use super::SelectInterpreter;
use crate::pipelines::new::executor::PipelineCompleteExecutor;
use crate::pipelines::new::processors::port::InputPort;
use crate::pipelines::new::processors::port::OutputPort;
use crate::pipelines::new::processors::BlocksSource;
use crate::pipelines::new::SinkPipeBuilder;
use crate::pipelines::new::SourcePipeBuilder;
use crate::sessions::QueryContext;
use crate::sessions::SessionManager;
use crate::sessions::SessionType;
use crate::sessions::Settings;
use crate::storages::memory::MemoryTableSink;
#[derive(Clone)]
pub struct InsertKey {
plan: Arc<InsertPlan>,
// settings different with default settings
changed_settings: Arc<Settings>,
}
impl InsertKey {
pub fn get_serialized_changed_settings(&self) -> String {
let mut serialized_settings = String::new();
let values = self.changed_settings.get_setting_values();
for value in values.into_iter() {
let serialized = serde_json::to_string(&value).unwrap();
serialized_settings.push_str(&serialized);
}
serialized_settings
}
}
impl PartialEq for InsertKey {
fn eq(&self, other: &Self) -> bool {
self.plan.eq(&other.plan)
&& self
.get_serialized_changed_settings()
.eq(&other.get_serialized_changed_settings())
}
}
impl Eq for InsertKey {}
impl Hash for InsertKey {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let table = format!(
"{}.{}.{}",
self.plan.catalog, self.plan.database, self.plan.table
);
state.write(table.as_bytes());
let values = self.changed_settings.get_setting_values();
let serialized = serde_json::to_string(&values).unwrap();
state.write(serialized.as_bytes());
}
}
impl InsertKey {
pub fn try_create(plan: Arc<InsertPlan>, changed_settings: Arc<Settings>) -> Self {
Self {
plan,
changed_settings,
}
}
}
#[derive(Clone)]
pub struct Entry {
block: DataBlock,
notify: Arc<Notify>,
finished: Arc<RwLock<bool>>,
timeout: Arc<RwLock<bool>>,
error: Arc<RwLock<ErrorCode>>,
}
impl Entry {
pub fn try_create(block: DataBlock) -> Self {
Self {
block,
notify: Arc::new(Notify::new()),
finished: Arc::new(RwLock::new(false)),
timeout: Arc::new(RwLock::new(false)),
error: Arc::new(RwLock::new(ErrorCode::Ok(""))),
}
}
pub fn finish(&self) {
let mut finished = self.finished.write();
*finished = true;
self.notify.notify_one();
}
pub fn finish_with_err(&self, err: ErrorCode) {
let mut error = self.error.write();
*error = err;
self.notify.notify_one();
}
pub fn finish_with_timeout(&self) {
let mut timeout = self.timeout.write();
*timeout = true;
self.notify.notify_one();
}
pub async fn wait(&self) -> Result<()> {
if *self.finished.read() {
return Ok(());
}
self.notify.clone().notified().await;
match self.is_finished() {
true => Ok(()),
false => match self.is_timeout() {
true => Err(ErrorCode::AsyncInsertTimeoutError("Async insert timeout.")),
false => Err((*self.error.read()).clone()),
},
}
}
pub fn is_finished(&self) -> bool {
return *self.finished.read();
}
pub fn is_timeout(&self) -> bool {
return *self.timeout.read();
}
}
#[derive(Clone)]
pub struct InsertData {
entries: Vec<EntryPtr>,
data_size: u64,
first_update: Instant,
last_update: Instant,
}
impl InsertData {
pub fn try_create(
entries: Vec<EntryPtr>,
data_size: u64,
first_update: Instant,
last_update: Instant,
) -> Self {
Self {
entries,
data_size,
first_update,
last_update,
}
}
}
type EntryPtr = Arc<Entry>;
type Queue = HashMap<InsertKey, InsertData>;
type QueryIdToEntry = HashMap<String, EntryPtr>;
#[derive(Clone)]
pub struct AsyncInsertQueue {
pub session_mgr: Arc<RwLock<Option<Arc<SessionManager>>>>,
runtime: Arc<Runtime>,
max_data_size: u64,
busy_timeout: Duration,
stale_timeout: Duration,
queue: Arc<RwLock<Queue>>,
current_processing_insert: Arc<RwLock<QueryIdToEntry>>,
}
impl AsyncInsertQueue {
pub fn try_create(
// TODO(fkuner): maybe circular reference
session_mgr: Arc<RwLock<Option<Arc<SessionManager>>>>,
runtime: Arc<Runtime>,
max_data_size: u64,
busy_timeout: Duration,
stale_timeout: Duration,
) -> Self {
Self {
session_mgr,
runtime,
max_data_size,
busy_timeout,
stale_timeout,
queue: Arc::new(RwLock::new(Queue::default())),
current_processing_insert: Arc::new(RwLock::new(QueryIdToEntry::default())),
}
}
pub async fn start(self: Arc<Self>) {
let self_arc = self.clone();
// busy timeout
let busy_timeout = self_arc.busy_timeout;
self_arc.clone().runtime.as_ref().inner().spawn(async move {
let mut intv = interval(busy_timeout);
loop {
intv.tick().await;
let timeout = self_arc.clone().busy_check();
intv = interval(timeout);
}
});
// stale timeout
let stale_timeout = self.stale_timeout;
if !stale_timeout.is_zero() {
self.clone().runtime.as_ref().inner().spawn(async move {
let mut intv = interval(stale_timeout);
loop {
intv.tick().await;
self.clone().stale_check();
}
});
}
}
pub async fn push(
self: Arc<Self>,
plan_node: Arc<InsertPlan>,
ctx: Arc<QueryContext>,
) -> Result<()> {
let self_arc = self.clone();
let plan = plan_node.clone();
let settings = ctx.get_changed_settings();
let data_block = match &plan_node.source {
common_planners::InsertInputSource::SelectPlan(plan) => {
let select_interpreter = SelectInterpreter::try_create(ctx.clone(), SelectPlan {
input: Arc::new((**plan).clone()),
})?;
let mut pipeline = select_interpreter.create_new_pipeline().await?;
let mut sink_pipeline_builder = SinkPipeBuilder::create();
for _ in 0..pipeline.output_len() {
let input_port = InputPort::create();
sink_pipeline_builder.add_sink(
input_port.clone(),
MemoryTableSink::create(input_port, ctx.clone()),
);
}
pipeline.add_pipe(sink_pipeline_builder.finalize());
let executor =
PipelineCompleteExecutor::try_create(self.runtime.clone(), pipeline).unwrap();
executor.execute()?;
drop(executor);
let blocks = ctx.consume_precommit_blocks();
DataBlock::concat_blocks(&blocks)?
}
common_planners::InsertInputSource::StreamingWithFormat(_) => todo!(),
common_planners::InsertInputSource::Values(values) => {
let data_block = values.block.clone();
let progress_values = ProgressValues {
rows: data_block.num_rows(),
bytes: data_block.memory_size(),
};
ctx.get_scan_progress().incr(&progress_values);
data_block
}
};
let entry = Arc::new(Entry::try_create(data_block.clone()));
let key = InsertKey::try_create(plan, settings);
let mut queue = self_arc.queue.write();
match queue.get_mut(&key) {
Some(value) => {
value.entries.push(entry.clone());
value.data_size += data_block.memory_size() as u64;
value.last_update = Instant::now();
if value.data_size > self_arc.max_data_size {
self_arc.clone().schedule(key.clone(), value.clone());
queue.remove(&key);
}
}
None => {
let entries = vec![entry.clone()];
let data_size = data_block.memory_size();
let first_update = Instant::now();
let last_update = Instant::now();
let value =
InsertData::try_create(entries, data_size as u64, first_update, last_update);
queue.insert(key.clone(), value.clone());
if data_size > self_arc.max_data_size as usize {
self_arc.clone().schedule(key.clone(), value);
queue.remove(&key);
}
}
}
let mut current_processing_insert = self.current_processing_insert.write();
current_processing_insert.insert(ctx.get_id(), entry);
Ok(())
}
pub fn get_entry(&self, query_id: &str) -> Result<EntryPtr> {
let current_processing_insert = self.current_processing_insert.read();
Ok(current_processing_insert.get(query_id).unwrap().clone())
}
pub fn delete_entry(&self, query_id: &str) -> Result<()> {
let mut current_processing_insert = self.current_processing_insert.write();
current_processing_insert.remove(query_id);
Ok(())
}
pub async fn wait_for_processing_insert(
self: Arc<Self>,
query_id: String,
time_out: Duration,
) -> Result<()> {
let entry = self.get_entry(&query_id)?;
let e = entry.clone();
self.runtime.as_ref().inner().spawn(async move {
let mut intv = interval(time_out);
intv.tick().await;
intv.tick().await;
e.finish_with_timeout();
});
match entry.wait().await {
Ok(_) => {
self.delete_entry(&query_id)?;
Ok(())
}
Err(err) => {
self.delete_entry(&query_id)?;
Err(err)
}
}
}
fn schedule(self: Arc<Self>, key: InsertKey, data: InsertData) {
self.runtime.as_ref().inner().spawn(async {
match self.process(key, data.clone()).await {
Ok(_) => {
for entry in data.entries.into_iter() {
entry.finish();
}
}
Err(err) => {
for entry in data.entries.into_iter() {
entry.finish_with_err(err.clone());
}
}
}
});
}
async fn process(self: Arc<Self>, key: InsertKey, data: InsertData) -> Result<()> {
let insert_plan = key.plan;
let session_mgr = self.session_mgr.read().clone().unwrap();
let session = session_mgr.create_session(SessionType::HTTPQuery).await;
let ctx = session.unwrap().create_query_context().await?;
ctx.apply_changed_settings(key.changed_settings.clone())?;
let interpreter =
InsertInterpreter::try_create(ctx.clone(), insert_plan.as_ref().clone(), true)?;
let output_port = OutputPort::create();
let blocks = Arc::new(Mutex::new(VecDeque::from_iter(
data.entries.iter().map(|x| x.block.clone()),
)));
let source = BlocksSource::create(ctx, output_port.clone(), blocks)?;
let mut builder = SourcePipeBuilder::create();
builder.add_source(output_port.clone(), source);
interpreter
.as_ref()
.set_source_pipe_builder(Some(builder))?;
interpreter.execute(None).await?;
Ok(())
}
fn busy_check(self: Arc<Self>) -> Duration {
let mut keys = Vec::new();
let mut queue = self.queue.write();
let mut timeout = self.busy_timeout;
for (key, value) in queue.iter() {
if value.data_size == 0 {
continue;
}
let time_lag = Instant::now() - value.first_update;
if time_lag.cmp(&self.clone().busy_timeout) == Ordering::Greater {
self.clone().schedule(key.clone(), value.clone());
keys.push(key.clone());
} else {
timeout = timeout.min(self.busy_timeout - time_lag);
}
}
for key in keys.iter() {
queue.remove(key);
}
timeout
}
fn stale_check(self: Arc<Self>) {
let mut keys = Vec::new();
let mut queue = self.queue.write();
for (key, value) in queue.iter() {
if value.data_size == 0 {
continue;
}
let time_lag = Instant::now() - value.last_update;
if time_lag.cmp(&self.clone().stale_timeout) == Ordering::Greater |
}
for key in keys.iter() {
queue.remove(key);
}
}
}
| {
self.clone().schedule(key.clone(), value.clone());
keys.push(key.clone());
} |
amaze_demosaic.py | import numpy as np
def amaze_demosaic(src, raw):
cfarray = raw.raw_colors
cfarray[cfarray == 3] = 1
rgb = amaze_demosaic_libraw(src, cfarray, raw.daylight_whitebalance)
return rgb
def amaze_demosaic_libraw(src, cfarray, daylight_wb):
TS = 512
winx = winy = 0
width = src.shape[1]
height = src.shape[0]
image = np.empty([height, width, 3], dtype=np.uint16)
clip_pt = min(daylight_wb[0], daylight_wb[1], daylight_wb[2])
v1 = TS
v2 = 2 * TS
v3 = 3 * TS
p1 = -TS + 1
p2 = -2 * TS + 2
p3 = -3 * TS + 3
m1 = TS + 1
m2 = 2 * TS + 2
m3 = 3 * TS + 3
nbr = [-v2,-2,2,v2,0]
eps, epssq = 1e-5, 1e-10
# adaptive ratios threshold
arthresh=0.75
# nyquist texture test threshold
nyqthresh=0.5
# diagonal interpolation test threshold
pmthresh=0.25
# factors for bounding interpolation in saturated regions
lbd, ubd = 1, 1 # lbd=0.66, ubd=1.5 alternative values;
# gaussian on 5x5 quincunx, sigma=1.2
gaussodd = [0.14659727707323927, 0.103592713382435, 0.0732036125103057, 0.0365543548389495]
# gaussian on 5x5, sigma=1.2
gaussgrad = [0.07384411893421103, 0.06207511968171489, 0.0521818194747806, 0.03687419286733595, 0.03099732204057846, 0.018413194161458882]
# gaussian on 3x3, sigma =0.7
gauss1 = [0.3376688223162362, 0.12171198028231786, 0.04387081413862306]
# gaussian on 5x5 alt quincunx, sigma=1.5
gausseven = [0.13719494435797422, 0.05640252782101291]
# guassian on quincunx grid
gquinc = [0.169917, 0.108947, 0.069855, 0.0287182]
rgb = np.empty([TS*TS, 3], dtype=np.float32)
delh = np.empty(TS*TS, dtype=np.float32)
delv = np.empty(TS*TS, dtype=np.float32)
delhsq = np.empty(TS*TS, dtype=np.float32)
delvsq = np.empty(TS*TS, dtype=np.float32)
dirwts = np.empty([TS*TS, 2], dtype=np.float32)
vcd = np.empty(TS*TS, dtype=np.float32)
hcd = np.empty(TS*TS, dtype=np.float32)
vcdalt = np.empty(TS*TS, dtype=np.float32)
hcdalt = np.empty(TS*TS, dtype=np.float32)
vcdsq = np.empty(TS*TS, dtype=np.float32)
hcdsq = np.empty(TS*TS, dtype=np.float32)
cddiffsq = np.empty(TS*TS, dtype=np.float32)
hvwt = np.empty(TS*TS, dtype=np.float32)
Dgrb = np.empty([TS*TS, 2], dtype=np.float32)
delp = np.empty(TS*TS, dtype=np.float32)
delm = np.empty(TS*TS, dtype=np.float32)
rbint = np.empty(TS*TS, dtype=np.float32)
Dgrbh2 = np.empty(TS*TS, dtype=np.float32)
Dgrbv2 = np.empty(TS*TS, dtype=np.float32)
dgintv = np.empty(TS*TS, dtype=np.float32)
dginth = np.empty(TS*TS, dtype=np.float32)
Dgrbpsq1 = np.empty(TS*TS, dtype=np.float32)
Dgrbmsq1 = np.empty(TS*TS, dtype=np.float32)
cfa = np.empty(TS*TS, dtype=np.float32)
pmwt = np.empty(TS*TS, dtype=np.float32)
rbp = np.empty(TS*TS, dtype=np.float32)
rbm = np.empty(TS*TS, dtype=np.float32)
nyquist = np.empty(TS*TS, dtype=np.int32)
# determine GRBG coset; (ey,ex) is the offset of the R subarray
if cfarray[0][0] == 1:
if cfarray[0][1] == 0:
ex, ey = 1, 0
else:
ex, ey = 0, 1
else:
if cfarray[0][0] == 0:
ex = ey = 0
else:
ex = ey = 1
# Start main loop
loop_cnt = 1
for top in range(winy-16, winy+height, TS-32):
for left in range(winx-16, winx+width, TS-32):
print("Loop [{}]: top: {} left: {}".format(loop_cnt, top, left))
loop_cnt += 1
# location of tile bottom edge
bottom = min(top+TS, winy+height+16)
# location of tile right edge
right = min(left+TS, winx+width+16)
# tile width (=TS except for right edge of image)
rr1 = bottom - top
# tile height (=TS except for bottom edge of image)
cc1 = right - left
# rgb from input CFA data
# rgb values should be floating point number between 0 and 1
# after white balance multipliers are applied
# a 16 pixel border is added to each side of the image
# bookkeeping for borders
rrmin = 16 if top < winy else 0
ccmin = 16 if left < winx else 0
rrmax = winy+height-top if bottom>(winy+height) else rr1
ccmax = winx+width-left if right>(winx+width) else cc1
for rr in range(rrmin, rrmax):
row = rr + top
for cc in range(ccmin, ccmax):
col = cc + left
c = cfarray[rr, cc]
indx1 = rr * TS + cc
indx = row * width + col
rgb[indx1, c] = src[row, col] / 65535
cfa[indx1] = rgb[indx1, c]
# fill borders
if rrmin > 0:
for rr in range(16):
for cc in range(ccmin, ccmax):
c = cfarray[rr, cc]
rgb[rr*TS+cc, c] = rgb[(32-rr)*TS+cc, c]
cfa[rr*TS+cc] = rgb[rr*TS+cc, c]
if rrmax < rr1:
for rr in range(16):
for cc in range(ccmin, ccmax):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+cc, c] = (src[(winy+height-rr-2), left+cc])/65535
cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc, c]
if ccmin > 0:
for rr in range(rrmin, rrmax):
for cc in range(16):
c = cfarray[rr, cc]
rgb[rr*TS+cc, c] = rgb[rr*TS+32-cc, c]
cfa[rr*TS+cc] = rgb[rr*TS+cc, c]
if ccmax < cc1:
for rr in range(rrmin, rrmax):
for cc in range(16):
c = cfarray[rr, cc]
rgb[rr*TS+ccmax+cc, c] = (src[(top+rr), (winx+width-cc-2)])/65535
cfa[rr*TS+ccmax+cc] = rgb[rr*TS+ccmax+cc, c]
# also, fill the image corners
if rrmin > 0 and ccmin > 0:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rr)*TS+cc][c] = rgb[(32-rr)*TS+(32-cc)][c]
cfa[(rr)*TS+cc] = rgb[(rr)*TS+cc][c]
if rrmax < rr1 and ccmax < cc1:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+ccmax+cc][c] = (src[(winy+height-rr-2)][(winx+width-cc-2)])/65535
cfa[(rrmax+rr)*TS+ccmax+cc] = rgb[(rrmax+rr)*TS+ccmax+cc][c]
if rrmin > 0 and ccmax < cc1:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rr)*TS+ccmax+cc][c] = (src[(winy+32-rr)][(winx+width-cc-2)])/65535
cfa[(rr)*TS+ccmax+cc] = rgb[(rr)*TS+ccmax+cc][c]
if rrmax < rr1 and ccmin > 0:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+cc][c] = (src[(winy+height-rr-2)][(winx+32-cc)])/65535
cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc][c]
# end of border fill
for rr in range(1, rr1-1):
for cc in range(1, cc1-1):
indx = rr*TS+cc
delh[indx] = abs(cfa[indx + 1] - cfa[indx - 1])
delv[indx] = abs(cfa[indx + v1] - cfa[indx - v1])
delhsq[indx] = SQR(delh[indx])
delvsq[indx] = SQR(delv[indx])
delp[indx] = abs(cfa[indx+p1]-cfa[indx-p1])
delm[indx] = abs(cfa[indx+m1]-cfa[indx-m1])
for rr in range(2, rr1-2):
for cc in range(2, cc1-2):
indx = rr*TS+cc
# vert directional averaging weights
dirwts[indx][0] = eps+delv[indx+v1]+delv[indx-v1]+delv[indx]
# horizontal weights
dirwts[indx][1] = eps+delh[indx+1]+delh[indx-1]+delh[indx]
if cfarray[rr, cc] & 1:
# for later use in diagonal interpolation
Dgrbpsq1[indx]=(SQR(cfa[indx]-cfa[indx-p1])+SQR(cfa[indx]-cfa[indx+p1]))
Dgrbmsq1[indx]=(SQR(cfa[indx]-cfa[indx-m1])+SQR(cfa[indx]-cfa[indx+m1]))
for rr in range(4, rr1 - 4):
for cc in range(4, cc1 - 4):
indx = rr*TS+cc
c = cfarray[rr, cc]
sgn = -1 if c & 1 else 1
# initialization of nyquist test
nyquist[indx]=0
# preparation for diag interp
rbint[indx]=0
# color ratios in each cardinal direction
cru = cfa[indx - v1] * (dirwts[indx - v2][0] + dirwts[indx][0]) / (dirwts[indx - v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx - v2]))
crd = cfa[indx + v1] * (dirwts[indx + v2][0] + dirwts[indx][0]) / (dirwts[indx + v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx + v2]))
crl = cfa[indx - 1] * (dirwts[indx - 2][1] + dirwts[indx][1]) / (dirwts[indx - 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx - 2]))
crr = cfa[indx + 1] * (dirwts[indx + 2][1] + dirwts[indx][1]) / (dirwts[indx + 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx + 2]))
# G interpolated in vert/hor directions using Hamilton-Adams method
guha = min(clip_pt, cfa[indx - v1] + 0.5 * (cfa[indx] - cfa[indx - v2]))
gdha = min(clip_pt, cfa[indx + v1] + 0.5 * (cfa[indx] - cfa[indx + v2]))
glha = min(clip_pt, cfa[indx - 1] + 0.5 * (cfa[indx] - cfa[indx - 2]))
grha = min(clip_pt, cfa[indx + 1] + 0.5 * (cfa[indx] - cfa[indx + 2]))
# G interpolated in vert/hor directions using adaptive ratios
guar = cfa[indx] * cru if abs(1-cru) < arthresh else guha
gdar = cfa[indx] * crd if abs(1-crd) < arthresh else gdha
glar = cfa[indx] * crl if abs(1-crl) < arthresh else glha
grar = cfa[indx] * crr if abs(1-crr) < arthresh else grha
# adaptive weights for vertical/horizontal directions
hwt = dirwts[indx - 1][1] / (dirwts[indx - 1][1] + dirwts[indx + 1][1])
vwt = dirwts[indx - v1][0] / (dirwts[indx + v1][0] + dirwts[indx - v1][0])
# interpolated G via adaptive weighTS of cardinal evaluations
Gintvar = vwt * gdar + (1-vwt) * guar
Ginthar = hwt * grar + (1-hwt) * glar
Gintvha = vwt * gdha + (1-vwt) * guha
Ginthha = hwt * grha + (1-hwt) * glha
# interpolated color differences
vcd[indx] = sgn * (Gintvar-cfa[indx])
hcd[indx] = sgn * (Ginthar-cfa[indx])
vcdalt[indx] = sgn * (Gintvha-cfa[indx])
hcdalt[indx] = sgn * (Ginthha-cfa[indx])
if cfa[indx] > 0.8 * clip_pt or Gintvha > 0.8 * clip_pt or Ginthha > 0.8 * clip_pt:
# use HA if highlighTS are (nearly) clipped
guar = guha
gdar = gdha
glar = glha
grar = grha
vcd[indx] = vcdalt[indx]
hcd[indx] = hcdalt[indx]
# differences of interpolations in opposite directions
dgintv[indx] = min((guha - gdha) ** 2, (guar - gdar) ** 2)
dginth[indx] = min((glha - grha) ** 2, (glar - grar) ** 2)
for rr in range(4, rr1-4):
for cc in range(4, cc1-4):
c = cfarray[rr, cc]
hcdvar = 3*(SQR(hcd[indx-2])+SQR(hcd[indx])+SQR(hcd[indx+2]))-SQR(hcd[indx-2]+hcd[indx]+hcd[indx+2])
hcdaltvar = 3*(SQR(hcdalt[indx-2])+SQR(hcdalt[indx])+SQR(hcdalt[indx+2]))-SQR(hcdalt[indx-2]+hcdalt[indx]+hcdalt[indx+2])
vcdvar = 3*(SQR(vcd[indx-v2])+SQR(vcd[indx])+SQR(vcd[indx+v2]))-SQR(vcd[indx-v2]+vcd[indx]+vcd[indx+v2])
vcdaltvar = 3*(SQR(vcdalt[indx-v2])+SQR(vcdalt[indx])+SQR(vcdalt[indx+v2]))-SQR(vcdalt[indx-v2]+vcdalt[indx]+vcdalt[indx+v2])
# choose the smallest variance; this yields a smoother interpolation
if hcdaltvar < hcdvar:
hcd[indx] = hcdalt[indx]
if vcdaltvar < vcdvar:
vcd[indx] = vcdalt[indx]
# bound the interpolation in regions of high saturation
# vertical and horizontal G interpolations
if c & 1: # G site
Ginth = -hcd[indx] + cfa[indx]
Gintv = -vcd[indx] + cfa[indx]
if hcd[indx] > 0:
if 3 * hcd[indx] > (Ginth + cfa[indx]):
hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]
else:
hwt = 1 - 3 * hcd[indx] / (eps + Ginth + cfa[indx])
hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (-np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx])
if vcd[indx] > 0:
if 3 * vcd[indx] > (Gintv + cfa[indx]):
vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]
else:
|
if Ginth > clip_pt:
hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]
if Gintv > clip_pt:
vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]
else: # R or B site
Ginth = hcd[indx] + cfa[indx]
Gintv = vcd[indx] + cfa[indx]
if hcd[indx] < 0:
if 3 * hcd[indx] < -(Ginth + cfa[indx]):
hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]
else:
hwt = 1 + 3 * hcd[indx] / (eps + Ginth + cfa[indx])
hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx])
if vcd[indx] < 0:
if 3 * vcd[indx] < -(Gintv + cfa[indx]):
vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]
else:
vwt = 1 + 3 * vcd[indx] / (eps + Gintv + cfa[indx])
vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx])
if Ginth > clip_pt:
hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]
if Gintv > clip_pt:
vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]
vcdsq[indx] = SQR(vcd[indx])
hcdsq[indx] = SQR(hcd[indx])
cddiffsq[indx] = SQR(vcd[indx]-hcd[indx])
for rr in range(6, rr1-6):
for cc in range(6+(cfarray[rr, 2]&1), cc1-6, 2):
indx = rr * TS + cc
# compute color difference variances in cardinal directions
Dgrbvvaru = 4*(vcdsq[indx]+vcdsq[indx-v1]+vcdsq[indx-v2]+vcdsq[indx-v3])-SQR(vcd[indx]+vcd[indx-v1]+vcd[indx-v2]+vcd[indx-v3])
Dgrbvvard = 4*(vcdsq[indx]+vcdsq[indx+v1]+vcdsq[indx+v2]+vcdsq[indx+v3])-SQR(vcd[indx]+vcd[indx+v1]+vcd[indx+v2]+vcd[indx+v3])
Dgrbhvarl = 4*(hcdsq[indx]+hcdsq[indx-1]+hcdsq[indx-2]+hcdsq[indx-3])-SQR(hcd[indx]+hcd[indx-1]+hcd[indx-2]+hcd[indx-3])
Dgrbhvarr = 4*(hcdsq[indx]+hcdsq[indx+1]+hcdsq[indx+2]+hcdsq[indx+3])-SQR(hcd[indx]+hcd[indx+1]+hcd[indx+2]+hcd[indx+3])
hwt = dirwts[indx-1][1]/(dirwts[indx-1][1]+dirwts[indx+1][1])
vwt = dirwts[indx-v1][0]/(dirwts[indx+v1][0]+dirwts[indx-v1][0])
vcdvar = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru
hcdvar = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl
# compute fluctuations in up/down and left/right interpolations of colors
Dgrbvvaru = (dgintv[indx])+(dgintv[indx-v1])+(dgintv[indx-v2])
Dgrbvvard = (dgintv[indx])+(dgintv[indx+v1])+(dgintv[indx+v2])
Dgrbhvarl = (dginth[indx])+(dginth[indx-1])+(dginth[indx-2])
Dgrbhvarr = (dginth[indx])+(dginth[indx+1])+(dginth[indx+2])
vcdvar1 = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru
hcdvar1 = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl
# determine adaptive weights for G interpolation
varwt=hcdvar/(vcdvar+hcdvar)
diffwt=hcdvar1/(vcdvar1+hcdvar1)
# if both agree on interpolation direction, choose the one with strongest directional discrimination;
# otherwise, choose the u/d and l/r difference fluctuation weights
if ((0.5 - varwt) * (0.5 - diffwt) > 0) and (abs(0.5 - diffwt) < abs(0.5 - varwt)):
hvwt[indx] = varwt
else:
hvwt[indx] = diffwt
# Nyquist test
for rr in range(6, rr1-6):
for cc in range(6 + (cfarray[rr, 2]&1), cc1 - 6, 2):
indx = rr * TS + cc
# nyquist texture test: ask if difference of vcd compared to hcd is larger or smaller than RGGB gradients
nyqtest = (gaussodd[0]*cddiffsq[indx] + gaussodd[1]*(cddiffsq[indx-m1]+cddiffsq[indx+p1] + cddiffsq[indx-p1]+cddiffsq[indx+m1]) + gaussodd[2]*(cddiffsq[indx-v2]+cddiffsq[indx-2]+ cddiffsq[indx+2]+cddiffsq[indx+v2]) + gaussodd[3]*(cddiffsq[indx-m2]+cddiffsq[indx+p2] + cddiffsq[indx-p2]+cddiffsq[indx+m2]))
nyqtest -= nyqthresh*(gaussgrad[0]*(delhsq[indx]+delvsq[indx])+gaussgrad[1]*(delhsq[indx-v1]+delvsq[indx-v1]+delhsq[indx+1]+delvsq[indx+1] + delhsq[indx-1]+delvsq[indx-1]+delhsq[indx+v1]+delvsq[indx+v1])+ gaussgrad[2]*(delhsq[indx-m1]+delvsq[indx-m1]+delhsq[indx+p1]+delvsq[indx+p1]+ delhsq[indx-p1]+delvsq[indx-p1]+delhsq[indx+m1]+delvsq[indx+m1])+ gaussgrad[3]*(delhsq[indx-v2]+delvsq[indx-v2]+delhsq[indx-2]+delvsq[indx-2]+ delhsq[indx+2]+delvsq[indx+2]+delhsq[indx+v2]+delvsq[indx+v2])+ gaussgrad[4]*(delhsq[indx-2*TS-1]+delvsq[indx-2*TS-1]+delhsq[indx-2*TS+1]+delvsq[indx-2*TS+1]+ delhsq[indx-TS-2]+delvsq[indx-TS-2]+delhsq[indx-TS+2]+delvsq[indx-TS+2]+ delhsq[indx+TS-2]+delvsq[indx+TS-2]+delhsq[indx+TS+2]+delvsq[indx-TS+2]+ delhsq[indx+2*TS-1]+delvsq[indx+2*TS-1]+delhsq[indx+2*TS+1]+delvsq[indx+2*TS+1])+ gaussgrad[5]*(delhsq[indx-m2]+delvsq[indx-m2]+delhsq[indx+p2]+delvsq[indx+p2]+ delhsq[indx-p2]+delvsq[indx-p2]+delhsq[indx+m2]+delvsq[indx+m2]))
if nyqtest > 0:
# nyquist=1 for nyquist region
nyquist[indx] = 1
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
areawt=(nyquist[indx-v2]+nyquist[indx-m1]+nyquist[indx+p1]+nyquist[indx-2]+nyquist[indx]+nyquist[indx+2]+nyquist[indx-p1]+nyquist[indx+m1]+nyquist[indx+v2])
# if most of your neighbors are named Nyquist, it's likely that you're one too
nyquist[indx] = 1 if areawt > 4 else 0
# end of Nyquist test
# in areas of Nyquist texture, do area interpolation
for rr in range(8, rr1 - 8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
if nyquist[indx]:
# area interpolation
sumh = sumv = sumsqh = sumsqv = areawt = 0
for i in range(-6, 7, 2):
for j in range(-6, 7, 2):
indx1 = (rr + i) * TS + cc + j
if nyquist[indx1]:
sumh += cfa[indx1] - 0.5 * (cfa[indx1-1]+cfa[indx1+1])
sumv += cfa[indx1] - 0.5 * (cfa[indx1-v1]+cfa[indx1+v1])
sumsqh += 0.5 * (SQR(cfa[indx1]-cfa[indx1-1]) + SQR(cfa[indx1]-cfa[indx1+1]))
sumsqv += 0.5 * (SQR(cfa[indx1]-cfa[indx1-v1]) + SQR(cfa[indx1]-cfa[indx1+v1]))
areawt += 1
# horizontal and vertical color differences, and adaptive weight
hcdvar = epssq + max(0, areawt*sumsqh-sumh*sumh)
vcdvar = epssq + max(0, areawt*sumsqv-sumv*sumv)
hvwt[indx] = hcdvar / (vcdvar + hcdvar)
# end of area interpolation
# populate G at R/B sites
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
# first ask if one gets more directional discrimination from nearby B/R sites
hvwtalt = 0.25 * (hvwt[indx-m1] + hvwt[indx+p1] + hvwt[indx-p1] + hvwt[indx+m1])
vo = abs(0.5 - hvwt[indx])
ve = abs(0.5 - hvwtalt)
# a better result was obtained from the neighbors
if vo < ve:
hvwt[indx>>1] = hvwtalt
# evaluate color differences
Dgrb[indx][0] = (hcd[indx]*(1-hvwt[indx]) + vcd[indx]*hvwt[indx])
# evaluate G
rgb[indx][1] = cfa[indx] + Dgrb[indx][0]
# local curvature in G (preparation for nyquist refinement step)
if nyquist[indx]:
Dgrbh2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-1][1]+rgb[indx+1][1]))
Dgrbv2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-v1][1]+rgb[indx+v1][1]))
else:
Dgrbh2[indx] = Dgrbv2[indx] = 0
# end of standard interpolation
# refine Nyquist areas using G curvatures
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
if nyquist[indx]:
# local averages (over Nyquist pixels only) of G curvature squared
gvarh = epssq + (gquinc[0]*Dgrbh2[indx]+gquinc[1]*(Dgrbh2[indx-m1]+Dgrbh2[indx+p1]+Dgrbh2[indx-p1]+Dgrbh2[indx+m1])+gquinc[2]*(Dgrbh2[indx-v2]+Dgrbh2[indx-2]+Dgrbh2[indx+2]+Dgrbh2[indx+v2])+gquinc[3]*(Dgrbh2[indx-m2]+Dgrbh2[indx+p2]+Dgrbh2[indx-p2]+Dgrbh2[indx+m2]))
gvarv = epssq + (gquinc[0]*Dgrbv2[indx]+gquinc[1]*(Dgrbv2[indx-m1]+Dgrbv2[indx+p1]+Dgrbv2[indx-p1]+Dgrbv2[indx+m1])+gquinc[2]*(Dgrbv2[indx-v2]+Dgrbv2[indx-2]+Dgrbv2[indx+2]+Dgrbv2[indx+v2])+gquinc[3]*(Dgrbv2[indx-m2]+Dgrbv2[indx+p2]+Dgrbv2[indx-p2]+Dgrbv2[indx+m2]))
# use the results as weights for refined G interpolation
Dgrb[indx][0] = (hcd[indx]*gvarv + vcd[indx]*gvarh)/(gvarv+gvarh)
rgb[indx][1] = cfa[indx] + Dgrb[indx][0]
# diagonal interpolation correction
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
rbvarp = epssq + (gausseven[0]*(Dgrbpsq1[indx-v1]+Dgrbpsq1[indx-1]+Dgrbpsq1[indx+1]+Dgrbpsq1[indx+v1]) + gausseven[1]*(Dgrbpsq1[indx-v2-1]+Dgrbpsq1[indx-v2+1]+Dgrbpsq1[indx-2-v1]+Dgrbpsq1[indx+2-v1]+ Dgrbpsq1[indx-2+v1]+Dgrbpsq1[indx+2+v1]+Dgrbpsq1[indx+v2-1]+Dgrbpsq1[indx+v2+1]))
rbvarm = epssq + (gausseven[0]*(Dgrbmsq1[indx-v1]+Dgrbmsq1[indx-1]+Dgrbmsq1[indx+1]+Dgrbmsq1[indx+v1]) + gausseven[1]*(Dgrbmsq1[indx-v2-1]+Dgrbmsq1[indx-v2+1]+Dgrbmsq1[indx-2-v1]+Dgrbmsq1[indx+2-v1]+ Dgrbmsq1[indx-2+v1]+Dgrbmsq1[indx+2+v1]+Dgrbmsq1[indx+v2-1]+Dgrbmsq1[indx+v2+1]))
# diagonal color ratios
crse=2*(cfa[indx+m1])/(eps+cfa[indx]+(cfa[indx+m2]))
crnw=2*(cfa[indx-m1])/(eps+cfa[indx]+(cfa[indx-m2]))
crne=2*(cfa[indx+p1])/(eps+cfa[indx]+(cfa[indx+p2]))
crsw=2*(cfa[indx-p1])/(eps+cfa[indx]+(cfa[indx-p2]))
# assign B/R at R/B sites
if abs(1 - crse) < arthresh:
rbse = cfa[indx] * crse
else:
rbse = cfa[indx + m1] + 0.5 * (cfa[indx] - cfa[indx + m2])
if abs(1 - crnw) < arthresh:
rbnw = (cfa[indx - m1]) + 0.5 *(cfa[indx] - cfa[indx - m2])
if abs(1 - crne) < arthresh:
rbne = cfa[indx] * crne
else:
rbne = (cfa[indx + p1]) + 0.5 * cfa[indx] - cfa[indx + p2]
if abs(1 - crsw) < arthresh:
rbsw = cfa[indx] * crsw
else:
rbsw = (cfa[indx - p1]) + 0.5 * (cfa[indx] - cfa[indx - p2])
wtse= eps+delm[indx]+delm[indx+m1]+delm[indx+m2] # same as for wtu,wtd,wtl,wtr
wtnw= eps+delm[indx]+delm[indx-m1]+delm[indx-m2]
wtne= eps+delp[indx]+delp[indx+p1]+delp[indx+p2]
wtsw= eps+delp[indx]+delp[indx-p1]+delp[indx-p2]
rbm[indx] = (wtse*rbnw+wtnw*rbse)/(wtse+wtnw)
rbp[indx] = (wtne*rbsw+wtsw*rbne)/(wtne+wtsw)
pmwt[indx] = rbvarm/(rbvarp+rbvarm)
# bound the interpolation in regions of high saturation
if rbp[indx] < cfa[indx]:
if 2 * (rbp[indx]) < cfa[indx]:
rbp[indx] = np.median([rbp[indx] , cfa[indx - p1], cfa[indx + p1]])
else:
pwt = 2 * (cfa[indx] - rbp[indx]) / (eps + rbp[indx] + cfa[indx])
rbp[indx] = pwt * rbp[indx] + (1 - pwt) * np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])
if rbm[indx] < cfa[indx]:
if 2 * (rbm[indx]) < cfa[indx]:
rbm[indx] = np.median([rbm[indx] , cfa[indx - m1], cfa[indx + m1]])
else:
mwt = 2 * (cfa[indx] - rbm[indx]) / (eps + rbm[indx] + cfa[indx])
rbm[indx] = mwt * rbm[indx] + (1 - mwt) * np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])
if rbp[indx] > clip_pt:
rbp[indx] = np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])
if rbm[indx] > clip_pt:
rbm[indx] = np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])
for rr in range(10, rr1-10):
for cc in range(10 + (cfarray[rr, 2]&1), cc1-10, 2):
indx = rr * TS + cc
# first ask if one geTS more directional discrimination from nearby B/R sites
pmwtalt = 0.25*(pmwt[indx-m1]+pmwt[indx+p1]+pmwt[indx-p1]+pmwt[indx+m1])
vo = abs(0.5-pmwt[indx])
ve = abs(0.5-pmwtalt)
if vo < ve:
pmwt[indx] = pmwtalt
rbint[indx] = 0.5*(cfa[indx] + rbm[indx]*(1-pmwt[indx]) + rbp[indx]*pmwt[indx])
for rr in range(12, rr1 - 12):
for cc in range(12 + (cfarray[rr, 2]&1), cc1 - 12, 2):
indx = rr * TS + cc
if abs(0.5 - pmwt[indx]) < abs(0.5 - hvwt[indx]):
continue
# now interpolate G vertically/horizontally using R+B values
# unfortunately, since G interpolation cannot be done diagonally this may lead to colour shifts
# colour ratios for G interpolation
cru = cfa[indx-v1]*2/(eps+rbint[indx]+rbint[indx-v2])
crd = cfa[indx+v1]*2/(eps+rbint[indx]+rbint[indx+v2])
crl = cfa[indx-1]*2/(eps+rbint[indx]+rbint[indx-2])
crr = cfa[indx+1]*2/(eps+rbint[indx]+rbint[indx+2])
# interpolated G via adaptive ratios or Hamilton-Adams in each cardinal direction
if abs(1 - cru) < arthresh:
gu = rbint[indx] * cru
else:
gu = cfa[indx - v1] + 0.5 * (rbint[indx] - rbint[(indx - v1)])
if abs(1 - crd) < arthresh:
gd = rbint[indx] * crd
else:
gd = cfa[indx + v1] + 0.5 * (rbint[indx] - rbint[(indx + v1)])
if abs(1 - crl) < arthresh:
gl = rbint[indx] * crl
else:
gl = cfa[indx - 1] + 0.5 * (rbint[indx] - rbint[(indx - 1)])
if abs(1 - crr) < arthresh:
gr = rbint[indx] * crr
else:
gr = cfa[indx + 1] + 0.5 * (rbint[indx] - rbint[(indx + 1)])
# interpolated G via adaptive weighTS of cardinal evaluations
Gintv = (dirwts[indx - v1][0] * gd + dirwts[indx + v1][0] * gu) / (dirwts[indx + v1][0] + dirwts[indx - v1][0])
Ginth = (dirwts[indx - 1][1] * gr + dirwts[indx + 1][1] * gl) / (dirwts[indx - 1][1] + dirwts[indx + 1][1])
# bound the interpolation in regions of high saturation
if Gintv < rbint[indx]:
if (2 * Gintv < rbint[indx]):
Gintv = np.median([Gintv , cfa[indx - v1], cfa[indx + v1]])
else:
vwt = 2 * (rbint[indx] - Gintv) / (eps + Gintv + rbint[indx])
Gintv = vwt * Gintv + (1 - vwt) * np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])
if Ginth < rbint[indx]:
if 2 * Ginth < rbint[indx]:
Ginth = np.median([Ginth , cfa[indx - 1], cfa[indx + 1]])
else:
hwt = 2 * (rbint[indx] - Ginth) / (eps + Ginth + rbint[indx])
Ginth = hwt * Ginth + (1 - hwt) * np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])
if Ginth > clip_pt:
Ginth = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])
if Gintv > clip_pt:
Gintv = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])
rgb[indx][1] = Ginth*(1-hvwt[indx]) + Gintv*hvwt[indx]
Dgrb[indx][0] = rgb[indx][1]-cfa[indx]
# end of diagonal interpolation correction
# fancy chrominance interpolation
# (ey,ex) is location of R site
for rr in range(13-ey, rr1-12, 2):
for cc in range(13-ex, cc1-12, 2):
indx = rr*TS+cc
Dgrb[indx][1]=Dgrb[indx][0] # split out G-B from G-R
Dgrb[indx][0]=0
for rr in range(12, rr1-12):
c = int(1- cfarray[rr, 12+(cfarray[rr,2]&1)]/2)
for cc in range(12+(cfarray[rr,2]&1), cc1-12, 2):
indx = rr * TS + cc
wtnw=1/(eps+abs(Dgrb[indx-m1][c]-Dgrb[indx+m1][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx-m3][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-m3][c]))
wtne=1/(eps+abs(Dgrb[indx+p1][c]-Dgrb[indx-p1][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx+p3][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+p3][c]))
wtsw=1/(eps+abs(Dgrb[indx-p1][c]-Dgrb[indx+p1][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+m3][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx-p3][c]))
wtse=1/(eps+abs(Dgrb[indx+m1][c]-Dgrb[indx-m1][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-p3][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx+m3][c]))
Dgrb[indx][c]=(wtnw*(1.325*Dgrb[indx-m1][c]-0.175*Dgrb[indx-m3][c]-0.075*Dgrb[indx-m1-2][c]-0.075*Dgrb[indx-m1-v2][c] )+ wtne*(1.325*Dgrb[indx+p1][c]-0.175*Dgrb[indx+p3][c]-0.075*Dgrb[indx+p1+2][c]-0.075*Dgrb[indx+p1+v2][c] )+ wtsw*(1.325*Dgrb[indx-p1][c]-0.175*Dgrb[indx-p3][c]-0.075*Dgrb[indx-p1-2][c]-0.075*Dgrb[indx-p1-v2][c] )+ wtse*(1.325*Dgrb[indx+m1][c]-0.175*Dgrb[indx+m3][c]-0.075*Dgrb[indx+m1+2][c]-0.075*Dgrb[indx+m1+v2][c] ))/(wtnw+wtne+wtsw+wtse)
for rr in range(12, rr1-12):
# c = int(cfarray[rr, 12+(cfarray[rr,1]&1)+1]/2)
for cc in range(12+(cfarray[rr,1]&1), cc1-12, 2):
for c in range(2):
Dgrb[indx][c]=((hvwt[indx-v1])*Dgrb[indx-v1][c]+(1-hvwt[indx+1])*Dgrb[indx+1][c]+(1-hvwt[indx-1])*Dgrb[indx-1][c]+(hvwt[indx+v1])*Dgrb[indx+v1][c])/((hvwt[indx-v1])+(1-hvwt[indx+1])+(1-hvwt[indx-1])+(hvwt[indx+v1]))
for rr in range(12, rr1-12):
for cc in range(12, cc1-12):
indx = rr * TS + cc
rgb[indx][0]=(rgb[indx][1]-Dgrb[indx][0])
rgb[indx][2]=(rgb[indx][1]-Dgrb[indx][1])
# copy smoothed results back to image matrix
for rr in range(16, rr1-16):
row = rr + top
for cc in range(16, cc1-16):
col = cc + left
for c in range(3):
image[row, col, c] = int(rgb[rr*TS+cc, c] * 65535 + 0.5)
# end of main loop
return image
# Define some utility functions for demosaicing
# For AMAzE
def fc(cfa, r, c):
return cfa[r&1, c&1]
def intp(a, b, c):
return a * (b - c) + c
def SQR(x):
return x ** 2 | vwt = 1 - 3 * vcd[indx] / (eps + Gintv + cfa[indx])
vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (-np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]) |
testdb.go | package main
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
)
func checkErr(err error) {
if err != nil |
}
func main() {
db, err := sql.Open("mysql", "base:imkloKuLiqNMc6Cn@tcp(172.31.215.37:3306)/innotree_data_online?charset=utf8")
checkErr(err)
rows, err := db.Query("SELECT id,date,downNum,ftpStartDate FROM pdf_down_parse_record limit 3")
checkErr(err)
for rows.Next() {
var id int
var date string
var downNum int
var ftpStartDate string
err = rows.Scan(&id, &date, &downNum, &ftpStartDate)
checkErr(err)
fmt.Println(id)
fmt.Println(date)
fmt.Println(downNum)
fmt.Println(ftpStartDate)
}
}
| {
panic(err)
} |
scales.js | /* scales.js
* In this file we define data as objects to populate the SCIM & PAIQI, scales
* created by George Marzloff | [email protected]
*/
// FIM
// Based on https://www.cms.gov/Medicare/Medicare-Fee-for-Service-Payment/InpatientRehabFacPPS/downloads/irfpai-manualint.pdf
// SCIM
// Based on http://sci2.rickhanseninstitute.org/images/sci2/SCIM/toolkit/SCIM_Clinical_Form.pdf
//PAIQI
// https://www.cms.gov/Medicare/Quality-Initiatives-Patient-Assessment-Instruments/IRF-Quality-Reporting/Downloads/DRAFT-IRF-PAI-FOR-OCT-2016.pdf
var FIM_Scale = function () {
this.name = "FIM Instrument";
this.dressingChoices = [
new Choice("Complete Independence - safely dresses & undresses self obtaining clothes from drawers/closets, managing bra, front garment, zippers, buttons, snaps, dons prosthesis/orthosis.",7),
new Choice("Modified Independence - requires special adaptive closure such as a Velcro fastener or assisitve device, or takes more than a reasonable amount of time.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (application of orthoses or assistive/adaptive devices, setting out clothes",5),
new Choice("Minimal Contact Assistance - performs 75% or more of dressing tasks.",4),
new Choice("Moderate Assistance - performs 50%-74% of dressing tasks.",3),
new Choice("Maximal Assistance - performs 25-49% of dressing tasks.",2),
new Choice("Total Assistance - performs <25% of dressing tasks",1),
new Choice("Activity Does Not Occur - enter code 0 only for the admission assessment. The patient does not dress self and is not dressed by a helper.",0)
];
this.questions = [new Question(
"39A. Eating (using suitable utensils to bring food to the mouth and the ability to chew and swallow the food once the meal is presented).",
[
new Choice("Complete Independence - eats from a dish while managing variety of food consistencies, and drinks from a cup/glass with meal presented on table/tray. Opens containers, butters bread, cuts meat, pours liquids, uses a spoon or fork to bring food to mouth where it is chewed and swallowed.",7),
new Choice("Modified Independence - requires an adaptive or assistive device e.g. long straw spork or rocking knife, requires more than a reasonable time to eat, or requires modified food consistency. If s/he uses PPN/PEG, s/he self-feeds.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (application of orthoses or assistive/adaptive devices) or another person is required to open containers, butter bread, cut meat, or pour liquids.",5),
new Choice("Minimal Contact Assistance - performs 75% or more of eating tasks.",4),
new Choice("Moderate Assistance - performs 50%-74% of eating tasks.",3),
new Choice("Maximal Assistance - performs 25-49% of eating tasks.",2),
new Choice("Total Assistance - performs <25% of eating tasks or fed via PPN/PEG and does not self-administer",1),
new Choice("Activity Does Not Occur - enter code 0 only for the admission assessment. The patient does not eat and does not receive PPN/PEG feeds during the entire assessment time frame.",0)
],
"Self-Care"),
new Question(
"39B. Grooming (oral care, hair grooming, washing their hands face and either shaving their face or applying makeup).",
[
new Choice("Complete Independence - cleans teeth/dentures, combs/brushes hair, washes hands, face and either shaves the face or applies make-up. activity is performed safely.",7),
new Choice("Modified Independence - requires specialized equipment (including prosthesis or orthosis) to groom or takes more than a reasonable time, or there are safety considerations.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (application of orthoses or assistive/adaptive devices, setting out grooming equipment or initial preparation such as applying toothpaste or opening make-up containers",5),
new Choice("Minimal Contact Assistance - performs 75% or more of grooming tasks.",4),
new Choice("Moderate Assistance - performs 50%-74% of grooming tasks.",3),
new Choice("Maximal Assistance - performs 25-49% of grooming tasks.",2),
new Choice("Total Assistance - performs <25% of grooming tasks",1),
new Choice("Activity Does Not Occur - enter code 0 only for the admission assessment. The patient does not groom and is not groomed by a helper.",0)
],
"Self-Care"),
new Question(
"39C. Bathing (washing, rinsing and drying body from neck down in either tub/shower/sponge bath. Performs activity safely).",
[
new Choice("Complete Independence - safely bathes (washes, rinses and dries) the body.",7),
new Choice("Modified Independence - requires specialized equipment (including prosthesis or orthosis) to bathe or takes more than a reasonable time, or there are safety considerations.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (application of orthoses or assistive/adaptive devices, setting out bathing equipment or initial preparation of water and washing materials",5),
new Choice("Minimal Contact Assistance - performs 75% or more of bathing tasks.",4),
new Choice("Moderate Assistance - performs 50%-74% of bathing tasks.",3),
new Choice("Maximal Assistance - performs 25-49% of bathing tasks.",2),
new Choice("Total Assistance - performs <25% of bathing tasks",1),
new Choice("Activity Does Not Occur - enter code 0 only for the admission assessment. The patient does not bathe self and is not bathed by a helper.",0)
],
"Self-Care"),
new Question(
"39D. Dressing - Upper (safely dressing and applying/removing prosthesis/orthosis).",
this.dressingChoices,
"Self-Care"),
new Question(
"39E. Dressing - Lower (safely dressing and applying/removing prosthesis/orthosis).",
this.dressingChoices,
"Self-Care"),
new Question(
"39F. Toileting (safely maintaining perineal hygiene and adjusting clothing before and after using a toilet, commode, bedpan, or urinal.)",
[
new Choice("Complete Independence - safely cleanses self after voiding and bowel movements, safely adjusts clothing before/after using toilet commode, bedpan or urinal",7),
new Choice("Modified Independence - requires specialized equipment (including prosthesis or orthosis) during toileting or takes more than a reasonable time, or there are safety considerations.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (application of adaptive devices or opening packages",5),
new Choice("Minimal Contact Assistance - performs 75% or more of toileting tasks.",4),
new Choice("Moderate Assistance - performs 50%-74% of toileting tasks.",3),
new Choice("Maximal Assistance - performs 25-49% of toileting tasks.",2),
new Choice("Total Assistance - performs <25% of toileting tasks",1),
new Choice("Activity Does Not Occur - enter code 0 only for the admission assessment. The patient or helper does not perform toileting tasks.",0)
],
"Self-Care"),
new Question(
"39G. Bladder Management (safe use of equipment or agents)",
[
new Choice("Complete Independence - controls bladder completely and intentionally without equipment or devices and is never incontinent",7),
new Choice("Modified Independence - requires urinal, bedpan, catheter, absorbent pad, diaper, urinary collecting device or diversion, or uses meds for control. " +
"If cath, pt cleans, sterilizes and sets up equipment for irrigation without assistance. If device used, (s)he assembles/applies external catheter "+
"with drainage bags or an ileal appliance without help. also empties, puts on, removes, cleans leg bag or ileal bag. Has no accidents. ",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (placing or emptying) equipment to maintain satisfactory voiding pattern or external device in the past 3 days.",5),
new Choice("Minimal Contact Assistance - to maintain external device, and performs >75% of bladder tasks in past 3 days.",4),
new Choice("Moderate Assistance - to maintain external device, and performs 50-74% of bladder tasks in past 3 days.",3),
new Choice("Maximal Assistance - performs 25-49% of bladder management tasks.",2),
new Choice("Total Assistance - performs <25% of bladder management tasks",1)
],
"Self-Care"),
new Question(
"39H. Bowel Management (safe use of equipment or agents)",
[
new Choice("Complete Independence - controls bowel completely and intentionally without equipment or devices and is never incontinent",7),
new Choice("Modified Independence - requires bedpad, dig stim, stool softener, suppositories, laxatives, enemas or other meds on a regular basis. Never incontinent.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup of equipment to maintain satisfactory excretory pattern or ostomy in the past 3 days.",5),
new Choice("Minimal Contact Assistance - to maintain satisfactory excretory pattern by suppositories, enemas or an external device, performs >75% tasks in past 3 days.",4),
new Choice("Moderate Assistance - to maintain satisfactory excretory pattern by suppositories, enemas or an external device, and performs 50-74% of tasks in past 3 days.",3),
new Choice("Maximal Assistance - performs 25-49% of bowel management tasks in past 3 days.",2),
new Choice("Total Assistance - performs <25% of bowel management tasks in past 3 days",1)
],
"Self-Care"),
new Question(
"39I. Transfers: Bed, Chair, Wheelchair (includes all aspects transferring to/from items, or sit-to-stand if patient is ambulatory)",
[
new Choice("Complete Independence - if walking - safely sits/stands from regular chair. if in wheelchair, aproaches bed/chair, locks brakes, lifts foot rests, removes arm rest, standing pivots or sliding transfer w/o board and returns.",7),
new Choice("Modified Independence - requires adaptive or assistive device such as a sliding board, lift, grab bars, or special seat/chair/brace/crutches, or it takes a long time, or is done unsafely.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (positioning sliding board, moving foot rests, etc)",5),
new Choice("Minimal Contact Assistance - requires no more help than touching and performs >75% transferring tasks",4),
new Choice("Moderate Assistance - requires more help than touching and performs 50-74% transferring tasks",3),
new Choice("Maximal Assistance - performs 25-49% of transferring tasks.",2),
new Choice("Total Assistance - performs <25% of transferring tasks",1)
],
"Mobility"),
new Question(
"39J. Transfers: Toilet (includes all aspects transferring to/from items, or sit-to-stand if patient is ambulatory)",
[
new Choice("Complete Independence - if walking - safely sits/stands from standard toilet. if in wheelchair, approaches toilet, locks brakes, lifts foot rests, removes arm rest, standing pivots or sliding transfer w/o board and returns.",7),
new Choice("Modified Independence - requires adaptive or assistive device such as a sliding board, lift, grab bars, or special seat, or it takes a long time, or is done unsafely.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (positioning sliding board, moving foot rests, etc)",5),
new Choice("Minimal Contact Assistance - requires no more help than touching and performs >75% transferring tasks",4),
new Choice("Moderate Assistance - requires more help than touching and performs 50-74% transferring tasks",3),
new Choice("Maximal Assistance - performs 25-49% of transferring tasks.",2),
new Choice("Total Assistance - performs <25% of transferring tasks",1),
new Choice("Activity Does Not Occur - only for admission, and patient or helper does not transfer to toilet (e.g. bedpan or urinal use only)",1)
],
"Mobility"),
new Question(
"39K. Transfers: Tub (getting into and out of tub safely)",
[
new Choice("Complete Independence - if walking - safely approaches tub, gets into and out of it. if in wheelchair, approaches tub, locks brakes, lifts foot rests, removes arm rest, standing pivots or sliding transfer w/o board and returns.",7),
new Choice("Modified Independence - requires adaptive or assistive device such as a sliding board, lift, grab bars, or special seat, or it takes a long time, or is done unsafely.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (positioning sliding board, moving foot rests, etc)",5),
new Choice("Minimal Contact Assistance - requires no more help than touching and performs >75% transferring tasks",4),
new Choice("Moderate Assistance - requires more help than touching and performs 50-74% transferring tasks",3),
new Choice("Maximal Assistance - performs 25-49% of transferring tasks.",2),
new Choice("Total Assistance - performs <25% of transferring tasks",1),
new Choice("Activity Does Not Occur - patient or helper does not transfer to tub",1)
],
"Mobility"),
new Question(
"39L. Transfers: Shower (getting into and out of shower safely)",
[
new Choice("Complete Independence - if walking - safely approaches shower stall, gets into and out of it. if in wheelchair, approaches shower stall, locks brakes, lifts foot rests, removes arm rest, standing pivots or sliding transfer w/o board and returns.",7),
new Choice("Modified Independence - requires adaptive or assistive device such as a sliding board, lift, grab bars, or special seat, or it takes a long time, or is done unsafely.",6),
new Choice("Supervision or Setup - requires supervision (standing by, cuing or coaxing) or setup (positioning sliding board, moving foot rests, etc)",5),
new Choice("Minimal Contact Assistance - requires no more help than touching and performs >75% transferring tasks",4),
new Choice("Moderate Assistance - requires more help than touching and performs 50-74% transferring tasks",3),
new Choice("Maximal Assistance - performs 25-49% of transferring tasks.",2),
new Choice("Total Assistance - performs <25% of transferring tasks",1),
new Choice("Activity Does Not Occur - patient or helper does not transfer in/out of shower",1)
],
"Mobility"),
new Question(
"39K. Locomotion: Walk (safely walking on level surface once in a standing position)",
[
new Choice("Complete Independence - safely walks minimum of 150 feet without assistive devices.",7),
new Choice("Modified Independence - walks min 150 ft, but uses brace (orthosis) or prosthesis, adaptive shoes, cane crutches, or walkerette, or is done slowly, or unsafely",6),
new Choice("Exception (Household Locomotion) - walks 50-150 ft independently with/without device. done slowly or unsafely",5),
new Choice("Supervision - requires standby supervision, cuing, coaxing to min 150ft",5),
new Choice("Minimal Contact Assistance - performs >75% walking effort to go minimum of 150ft",4),
new Choice("Moderate Assistance - performs 50-74% to go minimum 150ft",3),
new Choice("Maximal Assistance - performs 25-49% of walking effort to go minimum 50 ft, requires 1 person assist.",2),
new Choice("Total Assistance - performs <25% of walking effort or requires two person assist, or walks to less than 50 ft",1),
new Choice("Activity Does Not Occur - patient does not walk, only used for admission",1)
],
"Mobility"),
new Question(
"39L. Locomotion: Wheelchair (safely won a level surface, once in a seated position)",
[
new Choice("Do not use if patient uses a wheelchair.",7),
new Choice("Modified Independence - operates manual/motorized wheelchair independently for 150+ ft, turns around, maneuvers chair to a table, bed toilet, negotiates 3% grade, maneuvers on rugs and over door sills.",6),
new Choice("Exception (Household Locomotion) - operates a manual or motorized wheelchair independently only short distances",5),
new Choice("Supervision - requires standby supervision, cuing, coaxing to min 150ft",5),
new Choice("Minimal Contact Assistance - performs >75% walking effort to go minimum of 150ft",4),
new Choice("Moderate Assistance - performs 50-74% to go minimum 150ft",3),
new Choice("Maximal Assistance - performs 25-49% of walking effort to go minimum 50 ft, requires 1 person assist.",2),
new Choice("Total Assistance - performs <25% of walking effort or requires two person assist, or walks to less than 50 ft",1),
new Choice("Activity Does Not Occur - patient does not use a wheelchair or not pushed in a wheelchair by helper. only used for admission",1)
],
"Mobility"),
new Question(
"39M. Locomotion: Stairs",
[
new Choice("Independence - Safely goes up and down at least 1 flight of stairs without depending on handrail or support",7),
new Choice("Modified Independence - Goes up and down at least 1 flight of stairs but requires side support, handrail, cane or portable supports, or takes long time, or safety issue",6),
new Choice("Exception (Household Ambulation) - Goes up/down 4-6 stairs independently w or w/o device. takes long time or safety issue", 5),
new Choice("Minimal Contact Assistance - patient performs >75% effort to go up and down one flight of stairs",4),
new Choice("Moderate Assistance - patient performs 50-74% effort to go up and down one flight of stairs",3),
new Choice("Maximal Assistance - patient performs 25-49% effort to go up and down 4-6 stairs, requires 1-person assistance",2),
new Choice("Total Assistance - patient performs <25% of effort or requires 2-person assist or goes up/down <4 stairs",1),
new Choice("Activity does not occur (does not go up/down stairs or helper does not carry subject up/down stairs.",0)
],
"Mobility"),
new Question(
"39N. Comprehension",
[
new Choice("Complete Independence - understands complex or abstract directions and coversation, and understands either spoken or written language (not necessarily English)",7),
new Choice("Modified Independence - understands readily or with only mild difficulty complex or abstract directions and conversation. does not require prompting, though s/he may require hearing/visual aid or other assistive device or extra time.",6),
new Choice("Standby Prompting - understands directions and conversation about basic daily needs >90% of the time, requires prompting (slowed speech, use of repetition, stressing particular words/phrases, pauses, visual or gestural cues <10% time",5),
new Choice("Minimal Prompting - understands directions and conversation about daily neesd 75-90% of the time",4),
new Choice("Moderate Prompting - understands directions and conversation about basic daily needs 50-74% of the time",3),
new Choice("Maximal Prompting - understands directions and conversation about basic daily needs 25-49% of the time. understands only simple commonly used spoken expressions (hello, how are you) or gestures (waving good-bye). requires prompting >1/2 the time",2),
new Choice("Total Assistance - understands directions and conversation about basic daily needs <25% of the time or does not understand simple commonly used spoken expressions (hello, how are you) or gestures (waving good-bye) or does not respond appropriately despite prompting",1)
],
"Cognition"),
new Question(
"39O. Expression",
[
new Choice("Complete Independence - Expresses complex or abstract ideas clearly and fluently (not necessarily in English)",7),
new Choice("Modified Independence - usually expresses complex or abstract ideas clearly or with mild difficulty. no prompting needed but may require augmentative communication system.",6),
new Choice("Standby Prompting - expresses basic daily needs and ideas >90% of the time. requires prompting <10% of time to be understood.",5),
new Choice("Minimal Prompting - expresses basic daily needs and ideas 75-90% of the time.",4),
new Choice("Moderate Prompting - expresses basic daily needs and ideas 50-74% of the time.",3),
new Choice("Maximal Prompting - expresses basic daily needs and ideas 25-49% of the time. uses single words or gestures and needs prompting >50% time",2),
new Choice("Total Assistance - expresses basic daily needs <25% of the time, or does not express basic daily needs appropriately despite prompting.",1)
],
"Cognition"),
new Question(
"39P. Social Interaction",
[
new Choice("Complete Independence - interacts appropriately with staff, other patients and family members, no medication for control.",7),
new Choice("Modified Independence - usually interacts appropriately with staff, other patients and family members, rarely loses control. does not require supervision but may require more reasonable amonut of time to adjust to social situations or medication for control.",6),
new Choice("Supervision - requires supervision only under stressful or unfamiliar conditions < 10% time. may require encouragement to initiate participation.",5),
new Choice("Minimal Direction - interacts appropriately 75-90% of the time.",4),
new Choice("Moderate Direction - interacts appropriately 50-74% of the time.",3),
new Choice("Maximal Direction - interacts appropriately 25-49% of the time, but may need restraint due to socially inappropriate behaviors.",2),
new Choice("Total Assistance - interacts appropriately <25% of time and may need restraint due to socially inappropriate behaviors",1)
],
"Cognition"),
new Question(
"39Q. Problem Solving",
[
new Choice("Complete Independence - consistently recognizes problems when present, makes appropriate decisions, initiates and carries out sequence of steps to solve complex problems until the task is completed, and self-corrects if errors are made.",7),
new Choice("Modified Independence - usually recognizes present probme, with only mild difficulty makes appropriate decisions, initiates and carries out sequence of steps to solve complex problems, or requires more than a reasonable time to make appropriate decisions or complex probmlems.",6),
new Choice("Supervision - requires supervision (E.g. cuing or coaxing) to solve less routine problems only under stressful or unfamiliar conditions but <10% of the time",5),
new Choice("Minimal Direction - solves routine problems 75-90% of the time",4),
new Choice("Moderate Direction - solves routine problems 50-74% of the time",3),
new Choice("Maximal Direction - solves routine problems 25-49% of the time. needs direction more than half time to initiate, plan, or complete simple daily activities and may need restraint for safety.",2),
new Choice("Total Assistance - solves routine problems <25% of the time. needs direction nearly all the time, or does not effectively solve problems, and may require constant 1-to-1 direction to complete simple daily tasks. may need safety restraint.",1)
],
"Cognition"),
new Question(
"39R. Memory",
[
new Choice("Complete Independence - recognizes people frequently encountered, remembers daily routines, and executes requests of others without need for repetition",7),
new Choice("Modified Independence - appears to have only mild difficulty recognizing people frequently encountered, remembering daily routines, and responding to requests of others. May use self-initiated or environmental cues, prompts or aids.",6),
new Choice("Supervision - requires prompting (cuing, repetition, reminders) only under stressful or unfamiliar conditions, but no more than 10% of the time",5),
new Choice("Minimal Prompting - recognizes 75-90% of the time",4),
new Choice("Moderate Prompting - recognizes 50-74% of the time",3),
new Choice("Maximal Prompting - recognizes 25-49% of the time, needs prompting more than half the time",2),
new Choice("Total Assistance - recognizes <25% of the time, or does not effectively recognize and remember",1)
],
"Cognition")
];
this.userScores = Array(this.questions.length).fill(7); // initializes array to keep track of score for each question. 26 total questions
}
var SCIM_Scale = function () {
this.name = "Spinal Cord Independence Measure";
this.mobilityChoices = [
new Choice("Requires total assistance", 0),
new Choice("Needs electric wheelchair or partial assistance to operate manual wheelchair", 1),
new Choice("Moves independently in manual wheelchair", 2),
new Choice("Requires supervision while walking (with or without devices)", 3),
new Choice("Walks with a walking frame or crutches (swing)", 4),
new Choice("Walks with crutches or two canes (reciprocal walking)", 5),
new Choice("Walks with one cane", 6),
new Choice("Needs leg orthosis only", 7),
new Choice("Walks without walking aids", 8)
];
this.questions = [
new Question(
"1. Feeding (cutting, opening containers, pouring, bringing food to mouth, holding cup with fluid)",
[
new Choice("Needs parenteral, gastrostomy or fully assisted oral feeding", 0),
new Choice("Needs partial assistance for eating and/or drinking, or for wearing adaptive devices", 1),
new Choice("Eats independently; needs adaptive devices or assistance only for cutting food and/or pouring and/or opening containers", 2),
new Choice("Eats and drinks independently; does not require assistance or adaptive device", 3)
],
"Self-Care"),
new Question(
"2A. Bathing Upper Body (soaping, washing, drying body and head, manipulating water tap)",
[
new Choice("Requires total assistance", 0),
new Choice("Requires partial assistance", 1),
new Choice("Washes independently with adaptive devices or in a specific setting (e.g., bars, chair", 2),
new Choice("Washes independently; does not require adaptive devices or specific setting", 3)
],
"Self-Care"),
new Question(
"2B. Bathing Lower Body (soaping, washing, drying, manipulating water tap)",
[
new Choice("Requires total assistance", 0),
new Choice("Requires partial assistance", 1),
new Choice("Washes independently with adaptive devices or in a specific setting (e.g., bars, chair", 2),
new Choice("Washes independently; does not require adaptive devices or specific setting", 3)
],
"Self-Care"),
new Question(
"3A. Dressing Upper Body (clothes, shoes, permanent orthoses; dressing, wearing, undressing)",
[
new Choice("Requires total assistance", 0),
new Choice("Requires partial assistance with clothes without buttons, zippers or laces (cwobzl)", 1),
new Choice("Independent with cwobzl; requires adaptive devices and/or specific settings (adss)", 2),
new Choice("Independent with cwobzl; does not require adss; needs assistance or adss only for bzl.", 3),
new Choice("Dresses (any clothes) independently; does not require adaptive devices or specific setting", 4)
],
"Self-Care"),
new Question(
"3B. Dressing Lower Body (clothes, shoes, permanent orthoses; dressing, wearing, undressing)",
[
new Choice("Requires total assistance", 0),
new Choice("Requires partial assistance with clothes without buttons, zippers or laces (cwobzl)", 1),
new Choice("Independent with cwobzl; requires adaptive devices and/or specific settings (adss)", 2),
new Choice("Independent with (cwobzl) without adss; needs assistance or adss only for bzl.", 3),
new Choice("Dresses (any clothes) independently; does not require adaptive devices or specific setting", 4)
],
"Self-Care"),
new Question(
"4. Grooming (washing hands and face, brushing teeth, combing hair, shaving, applying makeup)",
[
new Choice("Requires total assistance", 0),
new Choice("Requires partial assistance", 1),
new Choice("Grooms independently with adaptive devices", 2),
new Choice("Grooms independently without adaptive devices", 3)
],
"Self-Care"),
new Question(
"5. Respiration",
[
new Choice("Requires tracheal tube (TT) and permanent or intermittent assisted ventilation (IAV)", 0),
new Choice("Breathes independently with TT; requires oxygen, much assistance in coughing or TT management", 2),
new Choice("Breathes independently with TT; requires little assistance in coughing or TT management", 4),
new Choice("Breathes independently without TT; requires oxygen, much assistance in coughing, a mask (e.g., peep) or IAV (bipap)", 6),
new Choice("Breathes independently without TT; requires little assistance or stimulation for coughing", 8),
new Choice("Breathes independently without assistance or device", 10),
],
"Respiration and Sphincter Management"),
new Question(
"6. Sphincter Management - Bladder",
[
new Choice("Indwelling catheter", 0),
new Choice("Residual urine volume (RUV) > 100cc; no regular catheterization or assisted intermittent catheterization", 3),
new Choice("RUV < 100cc or intermittent self-catheterization; needs assistance for applying drainage instrument", 6),
new Choice("Intermittent self-catheterization; uses external drainage instrument; does not need assistance for applying", 9),
new Choice("Intermittent self-catheterization; continent between catheterizations; does not use external drainage instrument", 11),
new Choice("RUV <100cc; needs only external urine drainage; no assistance is required for drainage", 13),
new Choice("RUV <100cc; continent; does not use external drainage instrument", 15),
],
"Respiration and Sphincter Management"),
new Question(
"7. Sphincter Management - Bowel",
[
new Choice("Irregular timing or very low frequency (less than once in 3 days) of bowel movements", 0),
new Choice("Regular timing, but requires assistance (e.g., for applying suppository); rare accidents (less than twice a month)", 5),
new Choice("Regular bowel movements, without assistance; rare accidents (less than twice a month)", 8),
new Choice("Regular bowel movements, without assistance; no accidents", 10)
],
"Respiration and Sphincter Management"),
new Question(
"8. Use of Toilet (perineal hygiene, adjustment of clothes before/after, use of napkins or diapers)",
[
new Choice("Requires total assistance", 0),
new Choice("Requires partial assistance; does not clean self", 1),
new Choice("Requires partial assistance; cleans self independently", 2),
new Choice("Uses toilet independently in all tasks but needs adaptive devices or special setting (e.g., bars)", 4),
new Choice("Uses toilet independently; does not require adaptive devices or special setting", 5)
],
"Respiration and Sphincter Management"),
new Question(
"9. Mobility in Bed and Action to Prevent Pressure Sores",
[
new Choice("Needs assistance in all activities: turning upper body in bed, turning lower body in bed, sitting up in bed, doing push-ups in wheelchair, with or without adaptive devices, but not with electric aids", 0),
new Choice("Performs one of the activities without assistance", 2),
new Choice("Performs two or three of the activities without assistance", 4),
new Choice("Performs all the bed mobility and pressure release activities independently", 6)
],
"Mobility (room and toilet)"),
new Question(
"10. Transfers: bed-wheelchair (locking wheelchair, lifting footrests, removing and adjusting arm rests, transferring, lifting feet)",
[
new Choice("Requires total assistance", 0),
new Choice("Needs partial assistance and/or supervision, and/or adaptive devices (e.g., sliding board)", 1),
new Choice("Independent (or does not require wheelchair)", 2)
],
"Mobility (room and toilet)"),
new Question(
"11. Transfers: wheelchair-toilet-tub (if uses toilet wheelchair: transfers to and from; if uses regular wheelchair: locking wheelchair, lifting footrests, removing and adjusting armrests, transferring, lifting feet)",
[
new Choice("Requires total assistance", 0),
new Choice("Needs partial assistance and/or supervision, and/or adaptive devices (e.g., grab-bars)", 1),
new Choice("Independent (or does not require wheelchair)", 2)
],
"Mobility (room and toilet)"),
new Question(
"12. Mobility Indoors",
this.mobilityChoices,
"Mobility (indoors and outdoors)"),
new Question(
"13. Mobility for Moderate Distances (10-100 meters)",
this.mobilityChoices,
"Mobility (indoors and outdoors)"),
new Question(
"14. Mobility Outdoors (more than 100 meters)",
this.mobilityChoices,
"Mobility (indoors and outdoors)"),
new Question(
"15. Stair Management",
[
new Choice("Unable to ascend or descend stairs", 0),
new Choice("Ascends and descends at least 3 steps with support or supervision of another person", 1),
new Choice("Ascends and descends at least 3 steps with support of handrail and/or crutch or cane", 2),
new Choice("Ascends and descends at least 3 steps without any support or supervision", 3)
],
"Mobility (indoors and outdoors)"),
new Question(
"16. Transfers - wheelchair-car (approach car, lock w/c, remove arm and footrests, transfers to/from car, brings w/c in/out of car)",
[
new Choice("Requires total assistance", 0),
new Choice("Needs partial assistance and/or supervision and/or adaptive devices", 1),
new Choice("Transfers independent; does not require adaptive devices (or does not require wheelchair)", 2)
],
"Mobility (indoors and outdoors)"),
new Question(
"17. Transfers - ground-wheelchair",
[
new Choice("Requires total assistance", 0),
new Choice("Transfers independent with or without adaptive devices (or does not require wheelchair)", 1)
],
"Mobility (indoors and outdoors)")
];
this.userScores = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]; // initializes array to keep track of score for each question. 19 total questions
this.sendReportByEmail = function(){
var bodyText = "";
for(i=0; i< this.questions.length; i++){
var q = this.questions[i];
var choice = q.choiceFromScore(this.userScores[i]);
bodyText += q.title + "\n" + "(" + choice.value + ") " + choice.description + "\n\n";
}
prepareEmailandSend({
id_text: $$('#case_id').val(),
reportTitle: "SCIM III Report",
assessmentType: $$('input[name="timePoint"]:checked').val(),
totalScore: $$('#scimScore span').html(),
bodyText: bodyText
});
};
}
var Question = function(title,choices,category){
this.title = title;
this.choices = choices;
this.category = category;
};
Question.prototype.choiceFromScore = function(score){
if(score != null){
for(k=0; k<this.choices.length; k++){
var c = this.choices[k];
if(c.value == score){
return c;
}
}
}else{
return {description: "error: score is null.", value: null};
}
};
var Choice = function(desc,value){
this.description = desc;
this.value = value;
};
// ******************* ****************** ******************* ******************* *******************
// ******************* ****************** ******************* ******************* *******************
// CMS PAI QI Assessment Class
// Reference: https://www.cms.gov/Medicare/Quality-Initiatives-Patient-Assessment-Instruments/IRF-Quality-Reporting/Downloads/DRAFT-IRF-PAI-FOR-OCT-2016.pdf
var PAIQI_Scale = function () {
this.categories = [
{
name: "Self-care",
items: [
new Activity("A. Eating", "The ability to use suitable utensils to bring food to the mouth and swallow food once the meal is presented on a table/tray. Includes modified food consistency."),
new Activity("B. Oral hygiene", "The ability to use suitable items to clean teeth. [Dentures (if applicable): The ability to remove and replace dentures from and to the mouth, and manage equipment for soaking and rinsing them.]"),
new Activity("C. Toileting hygiene", "The ability to maintain perineal hygiene, adjust clothes before and after using the toilet, commode, bedpan or urinal. If managing an ostomy, include wiping the opening but not managing equipment. "),
new Activity("E. Shower/bathe self", "The ability to bathe self in shower or tub, including washing, rinsing, and drying self. Does not include transferring in/out of tub/shower."),
new Activity("F. Upper body dressing", " The ability to put on and remove shirt or pajama top; includes buttoning, if applicable."),
new Activity("G. Lower body dressing", "The ability to dress and undress below the waist, including fasteners; does not include footwear."),
new Activity("H. Putting on/taking off footwear", "The ability to put on and take off socks and shoes or other footwear that is appropriate for safe mobility.")
]
},
{
name: "Mobility",
items: [
new Activity("A. Roll left and right", "The ability to roll from lying on back to left and right side, and return to lying on back."),
new Activity("B. Sit to lying", "The ability to move from sitting on side of bed to lying flat on the bed."),
new Activity("C. Lying to sitting on side of bed", "The ability to safely move from lying on the back to sitting on the side of the bed with feet flat on the floor, and with no back support."),
new Activity("D. Sit to stand", "The ability to safely come to a standing position from sitting in a chair or on the side of the bed."),
new Activity("E. Chair/bed-to-chair transfer", "The ability to safely transfer to and from a bed to a chair (or wheelchair)."),
new Activity("F. Toilet transfer", "The ability to safely get on and off a toilet or commode."),
new Activity("G. Car transfer", "The ability to transfer in and out of a car or van on the passenger side. Does not include the ability to open/close door or fasten seat belt."),
new Activity("I. Walk 10 feet", "Once standing, the ability to walk at least 10 feet in a room, corridor or similar space."),
new Activity("J. Walk 50 feet with two turns", "Once standing, the ability to walk at least 50 feet and make two turns. "),
new Activity("K. Walk 150 feet", "Once standing, the ability to walk at least 150 feet in a corridor or similar space."),
new Activity("L. Walking 10 feet on uneven surfaces", "The ability to walk 10 feet on uneven or sloping surfaces, such as grass or gravel."),
new Activity("M. 1 step (curb)", "The ability to step over a curb or up and down one step"),
new Activity("N. 4 steps", "The ability to go up and down four steps with or without a rail."),
new Activity("O. 12 steps", "The ability to go up and down 12 steps with or without a rail."),
new Activity("P. Picking up object", "The ability to bend/stoop from a standing position to pick up a small object, such as a spoon, from the floor."),
new Activity("R. Wheel 50 feet with two turns", "Once seated in wheelchair/scooter, the ability to wheel at least 50 feet and make two turns."),
new Activity("S. Wheel 150 feet", "Once seated in wheelchair/scooter, the ability to wheel at least 150 feet in a corridor or similar space.")
]
}
];
this.choices = [
new Choice("Independent - Patient completes the activity by him/herself with no assistance from a helper.",6),
new Choice("Setup or clean-up assistance - Helper sets up or cleans up; patient completes activity. Helper assists only prior to or following the activity.",5),
new Choice("Supervision or touching assistance - Helper provides verbal cues or touching/steadying assistance as patient completes activity. Assistance may be provided throughout the activity or intermittently.",4),
new Choice("Partial/moderate assistance - Helper does less than half the effort. Helper lifts, holds or supports trunk or limbs, but provides less than half the effort.",3),
new Choice("Substantial/maximal assistance - Helper does more than half the effort. Helper lifts or holds trunk or limbs and provides more than half the effort.",2),
new Choice("Dependent - Helper does ALL of the effort to complete activity. Or, requires 2-person assistance to complete",1)
];
const self = this; // cache workaround for 'this' scope conflict
this.totalItems = function(){
var n = 0;
for(i=0;i<self.categories.length;i++){
for(j=0;j<self.categories[i].items.length;j++){ n++; }
}
return n;
}();
// initializes array to keep track of score for each question.
this.userScores = [];
for(i=0;i<this.totalItems;i++){
this.userScores[i] = 6;
}
this.sendReportByEmail = function(){
var bodyText = "";
var totalItemCounter = 0;
for(i=0; i<this.categories.length; i++){
bodyText += this.categories[i].name + "\n\n";
for(j=0;j<this.categories[i].items.length;j++){
var activity = this.categories[i].items[j];
var choice = activity.choiceFromScore(this.userScores[totalItemCounter],this.choices);
bodyText += activity.title + "\n" + "(" + choice.value + ") " + choice.description + "\n\n";
totalItemCounter++;
}
}
prepareEmailandSend({
id_text: $$('#case_id').val(),
reportTitle: "PAI QI Report",
assessmentType: $$('input[name="timePoint"]:checked').val(),
totalScore: $$('#paiqiScore span').html(),
bodyText: bodyText
});
};
};
var Activity = function(title,description){
this.title = title;
this.description = description;
};
Activity.prototype.choiceFromScore = function(score,choices){
if(score != null){
for(k=0; k<choices.length; k++){
var c = choices[k];
if(c.value == score){
return c;
}
}
}else{
return {description: "error: score is null.", value: null};
}
};
// Generic Email function used for all scales.
// Receives expected data points as a params object
function | (params) {
// replaces any undefined values with empty strings
var data = {
id_text: params.id_text || "",
reportTitle: params.reportTitle || "",
assessmentType: params.assessmentType || "",
totalScore: params.totalScore || "",
bodyText: params.bodyText || ""
};
var now = new Date();
var timestampString = (now.getMonth()+1) + "/" + now.getDate() + "/" + now.getFullYear();
var subj = data.id_text + " " + data.reportTitle;
var reportText = data.reportTitle + "\nDate: " + timestampString + "\n" +
"Case ID: " + data.id_text + "\n" +
"Assessment Type: " + data.assessmentType + "\n" +
"Total Score: " + data.totalScore + "\n\n";
reportText += data.bodyText;
location.href = 'mailto:?&subject=' + encodeURIComponent(subj) + '&body='+ encodeURIComponent(reportText);
} | prepareEmailandSend |
dropout.py | import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer, Dropout
class SparseDropout(Layer):
def | (self, p=0.5):
super().__init__()
self.p = p
def call(self, x, training=None):
if training is None:
training = K.learning_phase()
if self.p and training:
values = tf.nn.dropout(x.values, self.p)
return tf.SparseTensor(x.indices, values, x.dense_shape)
return x
class MixedDropout(Layer):
def __init__(self, p=0.5):
super().__init__()
self.dense_dropout = Dropout(p)
self.sparse_dropout = SparseDropout(p)
def call(self, x):
if K.is_sparse(x):
return self.sparse_dropout(x)
else:
return self.dense_dropout(x)
| __init__ |
server.py | from flask_socketio import SocketIO
from flask import Flask, make_response, request, session
from flask import render_template, session, url_for, redirect
from threading import RLock
from threading import Thread
from utilslib import list_to_HTML_table
from time import sleep
from ClientStorage import Clients, User
from gameObjects import Game, GameContainter, Player, ChatMmsg
from random import shuffle
#Init server
app = Flask(__name__, template_folder='templates', static_folder='static')
app.config['SECRET_KEY'] = 'lskwod=91230?=)ASD?=)("")@'
socketio = SocketIO(app, async_mode='threading')
timerLock = RLock()
asyncLock = RLock()
clients = Clients()
games = GameContainter()
debugging = False
@app.route('/', methods = ['POST', 'GET'])
@app.route('/index', methods = ['POST', 'GET'])
def index():
verbose = (False or debugging)
error = request.args.get('error')
return make_response(render_template('makeGame.html', title = "Welcome", cool = 123, error = error))
@app.route('/gameRoom', methods = ['POST', 'GET'])
def gameRoom():
global games
verbose = (False or debugging)
argumentsMakeGame = ['name', 'gameName', 'nrOfRounds', 'time', 'newGame']
argumentsJoinGame = ['name', 'gameName', 'newGame']
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
return redirect(url_for('index') + '?error=No user. Refreshing')
if (not user.gameObject):
data = request.form
#MAKE A NEW GAME
if data['newGame'] == 'yes':
if verbose: print('In server:gameRoom() nrOfRounds set!')
for key in data.keys():
argumentsMakeGame.remove(key)
if argumentsMakeGame:
return redirect(url_for('index') + '?error=Not enough arguments when creating the game')
if verbose: print('In server:gameRoom() arguments needed for making a game are present')
#Creating player and game
game = games.add_Game(gameName=data['gameName'], nrOfRounds=data['nrOfRounds'], timePerRound=data['time'])
player = game.add_Player(name=data['name'], userObject=user)
if (not player):
return redirect(url_for('index') + '?error=Player name already exists in this game...')
if verbose: print('In server:gameRoom() game created with the name {} and user/player added'.format(game.gameName))
#Join an existing game
else:
data = request.form
if verbose: print('In server:gameRoom() joining a game!')
for key in data.keys():
argumentsJoinGame.remove(key)
if argumentsJoinGame:
return redirect(url_for('index') + '?error=Not enough arguments when joining the game')
if verbose: print('In server:gameRoom() Searching for game: {}'.format(data['gameName']))
#Check if game exists
game = games.find_Game_By_Name(data['gameName'], verbose)
if (not game):
if verbose: print('The game was not found')
return redirect(url_for('index') + '?error=Game not found')
#Check if name already taken
for player in game.players:
if player.name == data['name']:
return redirect(url_for('index') + '?error=Name already taken')
player = game.add_Player(name=data['name'], userObject=user)
if verbose: print('In server:gameRoom() Player joined game')
if verbose: print('In server:gameRoom() game created and user/player added')
sendMessageToGame(game, '{} joined the game'.format(data['name']))
emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock)
else:
if verbose: print('User alreade in game')
error = None
return make_response(render_template('gameRoom.html', title = "Game Room", gameName = user.gameObject.gameName, error = error))
@app.route('/gameRoomContent')
def gameRoomContent():
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if userNotComplete(user, verbose = (False or debugging)):
return 'ERROR: Something strange happened. Please leave game and rejoin'
game = user.gameObject
nrOfRounds = game.nrOfRounds
timePerRound = game.timePerRound
gameName = game.gameName
roundNr = game.currentRound
if (user.gameObject.get_Stage() == 'lobby'):
return render_template('lobbyContent.html',
gameName = gameName,
nrOfRounds = nrOfRounds,
timePerRound = timePerRound)
elif (user.gameObject.get_Stage() == 'roundStart'):
return render_template('roundContentStart.html',
timePerRound = timePerRound,
roundNr = roundNr,
nrOfRounds = nrOfRounds)
elif (user.gameObject.get_Stage() == 'roundSupply'):
game.spawnedThread = None
game.reset_Players_Ready()
emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock)
print('GameContent:')
print(game.get_Search_Strings(user.playerObject))
return render_template('roundContentSupply.html',
nrOfPlayers = game.get_Nr_Of_Players(),
searchStrings = game.get_Search_Strings(user.playerObject),
nrOfEntries = game.nrOfEntry)
elif (user.gameObject.get_Stage() == 'roundVote'):
game.reset_Players_Ready()
return makeVoteContent(user)
elif (user.gameObject.get_Stage() == 'roundEnd'):
game.reset_Players_Ready()
return makeRoundEnd(user)
elif (user.gameObject.get_Stage() == 'gameSummary'):
game.reset_Players_Ready()
return render_template('gameContentSummary.html')
def makeVoteContent(user):
|
def makeRoundEnd(user):
game = user.gameObject
playerObject = user.playerObject
playersPoints = {}
for player in game.players:
playersPoints[player.name] = player.points
searchStrings = {}
for entry in game.entries:
searchStrings[entry.searchString] = {}
return render_template('roundContentEnd.html', playersPoints = playersPoints)
@app.route('/playerList')
def playerList():
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
verbose = (False or debugging)
if userNotComplete(user, verbose = (False or debugging)):
return redirect(url_for('index') + '?error=User not in game')
playerList = user.gameObject.get_Player_Names_And_Status()
if verbose: print('Got {} players'.format(len(playerList)))
return render_template('playerList.html', playerList = playerList)
@app.route('/chatContent')
def chatContent():
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if userNotComplete(user, verbose = (False or debugging)):
return redirect(url_for('index') + '?error=User not in game')
chat = user.gameObject.chatMessages
msgs = []
players = []
for msg in chat:
player, msg = msg.get_Player_And_Msg()
msgs.append(str(msg))
players.append(str(player))
if players:
players.reverse()
msgs.reverse()
return render_template('chat.html', players = players, chatMsg = msgs)
@app.route('/leave_Game')
def leaveGame():
verbose = (False or debugging)
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
if verbose: print('No user')
return redirect(url_for('index'))
game = user.gameObject
game.remove_Player_By_User_Object(user)
name = user.playerObject.name
user.resetUser()
if len(game.players)<1:
games.removeGame(game=game, verbose = verbose)
else:
emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock)
emitToGame(game = game, arg = ('client_warning',{'msg': name+' left the game'}), lock = timerLock)
print (len(games._games))
return redirect(url_for('index'))
@socketio.on('submit_entry')
def submitEntry(msg):
verbose = (False or debugging)
if verbose: print ('Entry reveived by the server')
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if verbose: print ('User retrieved')
if (not user):
if verbose: print('No user found when collecting the data')
return
if user.playerObject.entry:
if verbose: print('User already submitted.')
return
if verbose: print ('Setting entry for user')
user.gameObject.add_Entry(msg['searchString'], msg['suggestion'], user.playerObject)
if verbose: print('Got entry')
if user.gameObject.nrOfEntry >= user.gameObject.get_Nr_Of_Players():
emitToGame(game = user.gameObject, arg = ('refresh_div_content',{'div': 'entryList', 'cont': '/gameRoomContent'}), lock = timerLock)
@socketio.on('submit_supply')
def submitSupply(data):
verbose = (False or debugging)
if verbose: print ('\n---------------------\nSupply reveived by the server')
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
if verbose: print('No user found when collecting the data')
return
game = user.gameObject
if verbose: print ('The data received is: {}'.format(data))
if verbose: print ('player {} found'.format(user.playerObject.name))
if (not data):
return
if verbose: print('')
if verbose: print('The actual data:')
for key, value in data.items():
if verbose: print('Key: {} \t Value: {}'.format(key, value))
if value == '':
continue
game.entries[int(key)].add_Autocomplete(value, user.playerObject)
if verbose: print('')
game.nrOfSupply += 1
if verbose: print ('The game has received {}nr of supplies\n---------------------\n'.format(game.nrOfSupply))
#All "supplies" are received
if user.gameObject.nrOfSupply >= user.gameObject.get_Nr_Of_Players():
if verbose: print ('We should now refresh the div content')
emitToGame(game = user.gameObject, arg = ('refresh_div_content', {'div': 'contentVote', 'cont': '/gameRoomContent'}), lock = timerLock)
#emitToGame(game = user.gameObject, arg = ('refresh_div_content',{'div': 'entryList', 'cont': '/gameRoomContent'}), lock = timerLock)
if verbose and False:
print('')
for entry in game.entries:
print('-------------------------------------------')
print('The entry with the serch string: \t {}\nHas the following autocompletes added:'.format(entry.searchString))
for supply in entry.otherAutocompletes:
print (supply.autoComplete)
print('-------------------------------------------')
print('')
@socketio.on('submit_favorite')
def submitFavorite(favorite):
print('The server received a favorite: {}'.format(favorite))
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
game = user.gameObject
autoComplete = game.get_Autocomlete_by_ID(favorite)
if (not autoComplete):
user.playerObject.points -= 1
return
user.playerObject.autocompleteVotedFor = autoComplete
if (autoComplete.isGoogle):
user.playerObject.points += 1
return
autoComplete.playerObject.points += 1
return
@socketio.on('toggle_ready')
def toggleReady(msg):
verbose = (True or debugging)
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
if verbose: print('No user found when toggling ready')
return
player = user.playerObject
if (not player):
if verbose: print('No player found for the user/client.')
player.ready = not player.ready
game = player.gameObject
#A game object will always exist if there is a playerObject
emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock)
playersReady = game.all_Players_Ready()
if verbose: print ('STAGE:', game.get_Stage())
#Start round
if playersReady and game.gameStarted == False and not game.spawnedThread:
game.gameStarted = True
game.reset_Players_Ready()
emitToGame(game = game, arg = ('change_content', {'url':'/gameRoomContent'}), lock = timerLock)
emitToGame(game = game, arg = ('client_message', {'msg':'Game started. Have fun!'}), lock = timerLock)
#Start timer
game.spawnedThread = RoundTimer(int(game.timePerRound), user)
game.spawnedThread.start()
return
#End round
if playersReady and game.get_Stage() == 'roundStart':
if verbose: print ('Round ended by users')
user.gameObject.end_Stage()
game.reset_Players_Ready()
if verbose: print('Current stage of game is: {}'.format(user.gameObject.get_Stage()))
emitToGame(game = user.gameObject, arg = ('round_End', {}), lock = timerLock)
emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock)
return
#End supply
if playersReady and game.get_Stage() == 'roundSupply':
user.gameObject.end_Stage()
game.reset_Players_Ready()
emitToGame(game = user.gameObject, arg = ('supply_End', {'nrOfEntries': user.gameObject.nrOfEntry}), lock = timerLock)
emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock)
return
#End vote
if playersReady and game.get_Stage() == 'roundVote':
user.gameObject.end_Stage()
game.reset_Players_Ready()
emitToGame(game = user.gameObject, arg = ('vote_End', {}), lock = timerLock)
emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Vote ended'}), lock = timerLock)
return
class RoundTimer(Thread):
def __init__(self, timeToWait, user):
Thread.__init__(self)
self.timeToWait = timeToWait
self.user = user
def run(self):
sleep(self.timeToWait)
if (not self.user.gameObject) or (self.user.gameObject.roundEnded):
return
self.user.gameObject.end_Stage()
emitToGame(game = self.user.gameObject, arg = ('round_End', {'url':'/gameRoomContent'}), lock = timerLock)
emitToGame(game = self.user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock)
return
@socketio.on('handle_chat')
def handleChat(msg):
#update_chat
verbose = (False or debugging)
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
if verbose: print('No user')
return redirect(url_for('index'))
game = user.gameObject
if (not game):
if verbose: print('No game found when handling chat')
return
game.add_Chat_Msg(chatMsg=msg, playerName=user.playerObject.name)
emitToGame(game=game, arg=('update_chat',{}), lock=timerLock)
@socketio.on('connected')
def client_connect():
verbose = (False or debugging)
'''
I need to identify the user. If the user reloads, the session ID will change.
A unique user-key is provisided for each new user, and the session ID is updated
when the user reconnects. The unique ID is stored in a cookie.
'''
if verbose: print('Someone connected with the IP: {}'.format(request.remote_addr))
uniqueID = request.cookies.get('uniqueID')
if verbose: print('\nUnique ID before update: {}'.format(uniqueID))
if uniqueID:
if verbose: print('Unique ID cookie found')
user = clients.find_User_By_uniqueID(uniqueID)
if user:
if verbose: print('User found')
if request.sid != user.sid:
user.sid = request.sid
if verbose: print('Updated the SID')
else:
user = clients.add_User(sid=request.sid)
if verbose: print('User created')
user.uniqueID = uniqueID
if verbose: print('Unique ID updated')
else:
if verbose: print('Made a new user')
user = clients.add_User(sid=request.sid)
if verbose: print('Emitted to server: set_cookie')
emit(arg=('set_cookie', {'name': 'uniqueID' , 'data': user.uniqueID}), uniqueID = None, lock = timerLock, user= user)
def sendMessageToGame(game, msg):
for player in game.players:
emit(arg = ('client_message', {'msg': msg}), uniqueID = None, lock = timerLock, user= player.userObject)
def emitToGame(arg, game, lock):
for player in game.players:
emit(arg = arg, uniqueID = None, lock = lock, user = player.userObject)
def emit(arg, uniqueID, lock, user = None):
'''
An emit method that requires a lock. Dunno if I need this...
TODO: Find out if i need the lock.
'''
verbose = (False or debugging)
with lock:
if verbose: print ('Did an emit')
if (not user):
userSID = clients.find_User_By_uniqueID(uniqueID).sid
else:
userSID = user.sid
socketio.emit(*arg, room = userSID)
def userNotComplete(user, verbose = (False or debugging)):
if verbose:
print('\nUser name: {}'.format(user.name))
print('User gameObject pointer {}'.format(user.gameObject))
print('User playerObject pointer {}\n'.format(user.playerObject))
if ((not user) or (not user.gameObject) or (not user.playerObject)):
return True
else:
return False
if __name__ == "__main__":
socketio.run(app, debug = False)
| game = user.gameObject
playerObject = user.playerObject
notReady = False
voteEntries = game.get_Vote_Entries(playerObject)
return render_template('roundContentVote.html',
notReady = notReady,
voteEntries = voteEntries) |
doridori.py | import cv2
import mediapipe as mp
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.spatial import distance
from scipy.signal import find_peaks
from celluloid import Camera
from tqdm import tqdm
class Doridori:
def __init__(self,filepath):
self.cap = cv2.VideoCapture(filepath)
self.total_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.df = np.array([])
self.distance_list = np.array([])
self.peaks = np.array([])
def detect_face(self):
frame_cnt = 0
nose_x = list()
nose_y = list()
nose_z = list()
mp_face_mesh = mp.solutions.face_mesh
with mp_face_mesh.FaceMesh(
static_image_mode=True,
max_num_faces=1,
min_detection_confidence=0.5) as face_mesh:
while(self.cap.isOpened()):
ret, frame = self.cap.read()
if ret:
frame_cnt += 1
results = face_mesh.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if results.multi_face_landmarks:
x, y, z = self.__getNose(results.multi_face_landmarks)
nose_x.append(x)
nose_y.append(y)
nose_z.append(z)
if frame_cnt >= self.total_frame:
print("============End Video============")
self.df = np.array([nose_x, nose_y, nose_z]).T
break
self.cap.release()
cv2.destroyAllWindows()
return self.df
def fit(self, data = np.array([]), threshold=0.004, min_peak_distance = 12, display_mode = True):
|
def save_video(self, filepath, display_frame = 100, frame_rate = 30.0, video_size=(25,8)):
fig, ax = plt.subplots(figsize=video_size)
camera = Camera(fig)
padding_nan = np.empty(display_frame)
padding_nan[:] = np.nan
distance_with_nan = np.concatenate([padding_nan, self.distance_list])
peaks_with_nan = np.concatenate([padding_nan, self.peaks])
for i in tqdm(range(display_frame, len(distance_with_nan))):
ax.plot(distance_with_nan[i-display_frame:i], c='blue')
ax.plot(peaks_with_nan[i-display_frame:i], 'ro')
camera.snap()
print(f"saving to {filepath}")
animation = camera.animate(interval=1000.0/frame_rate)
animation.save(filepath)
plt.close(fig)
def __getNose(self, landmarks):
x = 0
y = 0
z = 0
landmark = list(landmarks)
for mark in landmark:
x = mark.landmark[0].x
y = mark.landmark[0].y
z = mark.landmark[0].z
return x, y, z
| distance_list = list()
if data.size == 0:
df = self.df
else:
df = data
for i in range(1, len(df)):
distance_list.append(distance.euclidean(df[i-1,:], df[i,:]))
peaks_index = find_peaks(distance_list, distance=min_peak_distance)[0]
low_peak_index = list()
for i, j in enumerate (peaks_index):
if distance_list[j] < threshold:
low_peak_index.append(i)
peaks_index= np.delete(peaks_index, low_peak_index)
print(f"total_doridori_count : {len(peaks_index)}")
peaks = list()
for i, value in enumerate (distance_list):
if i in peaks_index:
peaks.append(value)
else:
peaks.append(np.nan)
if display_mode:
plt.figure(figsize=(25,8))
plt.plot(distance_list)
plt.plot(peaks, 'ro')
self.distance_list = distance_list
self.peaks = peaks
return len(peaks_index) |
scheduler.py | from rq_scheduler.scheduler import Scheduler
import rq
from redis import Redis
import os
import sys
import init_log
import logging
def | ():
with rq.Connection(Redis.from_url(os.environ.get("REDIS_URL") or "redis://")):
q = rq.Queue()
scheduler = Scheduler(queue=q)
scheduler.cron(
cron_string="0 7 * * 2", # every tuesday at 7:00,
func="merge_all_geojson.merge_geojson",
timeout="20m",
)
scheduler.cron(
cron_string="0 0 * * *", # every day at 00:00,
func="cleanup.cleanup_old_resources",
)
scheduler.run()
def _run_task(task):
"""debug task to manually trigger a task"""
from datetime import timedelta
logging.info(f"scheduling task {task} in 1s", extra={"task_id": "scheduler"})
with rq.Connection(Redis.from_url(os.environ.get("REDIS_URL") or "redis://")):
q = rq.Queue()
scheduler = Scheduler(queue=q)
scheduler.enqueue_in(
timedelta(seconds=1), func=task, timeout="20m",
)
if __name__ == "__main__":
init_log.config_worker_log()
if len(sys.argv) > 1:
# run custom task for debug, like:
# `python scheduler.py merge_all_geojson.merge_geojson`
# or
# `python scheduler.py cleanup.cleanup_old_resources`
_run_task(sys.argv[1])
else:
_run_scheduler()
| _run_scheduler |
de.rs | use npm_package_json::{Bug, Package, Repository, RepositoryReference};
use std::str::FromStr;
#[test]
fn test_de_minimal() |
#[test]
fn test_de_default() {
let s = include_str!("./default.json");
let package = Package::from_str(s).unwrap();
let git_url = "https://github.com/<user>/my_package.git";
assert_eq!(package.name, "my_package");
assert_eq!(package.version, "1.0.0");
assert!(package.description.unwrap().is_empty());
assert_eq!(package.main.unwrap(), "index.js");
assert_eq!(
package.repository.unwrap(),
RepositoryReference::Full(Repository {
r#type: "git".to_string(),
url: git_url.to_string(),
..Default::default()
})
);
assert!(package.keywords.is_empty());
assert_eq!(package.license.unwrap(), "ISC");
assert_eq!(
package.bugs.unwrap(),
Bug {
email: None,
url: Some("https://github.com/<user>/my_package/issues".to_string())
}
);
assert_eq!(
package.homepage.unwrap(),
git_url.trim_end_matches(".git").to_string()
);
}
#[test]
fn test_de_single_str_bugs() {
let s = "{\"name\": \"my-awesome-package\",\"version\": \"1.0.0\",\"bugs\": \"https://example.com\"}";
let package = Package::from_str(s).unwrap();
//assert_eq!(package.is_err(), false);
}
| {
let s = include_str!("./minimal.json");
let package = Package::from_str(s).unwrap();
assert_eq!(package.name, "my-awesome-package");
assert_eq!(package.version, "1.0.0");
} |
append.rs | use crate::{cmd, Command};
use ql2::term::TermType;
pub trait Arg {
fn arg(self) -> cmd::Arg<()>;
}
impl Arg for cmd::Arg<()> {
fn arg(self) -> cmd::Arg<()> {
self
}
}
impl Arg for Command {
fn arg(self) -> cmd::Arg<()> |
}
| {
Self::new(TermType::Append).with_arg(self).into_arg()
} |
TorrentClient.py | #!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Bram Cohen, Uoti Urpala, John Hoffman, and David Harrison
from __future__ import division
from BitTorrent.translation import _
import pdb
import sys
import os
from cStringIO import StringIO
import logging
from logging import ERROR
from time import strftime, sleep
import traceback
import BitTorrent.stackthreading as threading
from BitTorrent.defer import DeferredEvent
from BitTorrent import inject_main_logfile
from BitTorrent.MultiTorrent import Feedback, MultiTorrent
from BitTorrent.defaultargs import get_defaults
from BitTorrent.parseargs import printHelp
from BitTorrent.zurllib import urlopen
from BitTorrent.prefs import Preferences
from BitTorrent import configfile
from BitTorrent import BTFailure
from BitTorrent import version
from BitTorrent import console, stderr_console
from BitTorrent import GetTorrent
from BitTorrent.RawServer_twisted import RawServer, task
from BitTorrent.ConvertedMetainfo import ConvertedMetainfo
from BitTorrent.platform import get_temp_dir
inject_main_logfile()
from Axon.Ipc import shutdown, producerFinished
from Axon.ThreadedComponent import threadedcomponent
from Axon.Component import component
from TorrentIPC import *
"""\
=================
TorrentClient - a BitTorrent Client
=================
This component is for downloading and uploading data using the peer-to-peer
BitTorrent protocol.
I should start by saying "DO NOT USE THIS COMPONENT YOURSELF".
This component wraps the Mainline (official) BitTorrent client, which
unfortunately is not thread-safe (at least with the latest version - 4.20).
If you run two instances of this client simultaneously, Python will die
with an exception, or if you're really unlucky, a segfault.
But despair not! There is a solution - use TorrentHandler instead.
TorrentHandlers will organise the sharing of a single TorrentClient amongst
themselves and expose exactly the same interface (except that the
tickInterval optional argument cannot be set) with the key advantage
that you can run as many of them as you want.
For a description of the interfaces of TorrentClient see TorrentHandler.py.
How does it work?
-----------------
TorrentClient is a threadedcomponent that uses the libraries of the Mainline
(official) BitTorrent client to provide BitTorrent functionality. As Mainline
was designed to block (use blocking function calls) this makes it incompatible
with the normal structure of an Axon component - it cannot yield regularly.
As such it uses a threadedcomponent, allowing it to block with impunity.
Each torrent is assigned a unique id (currently equal to the count of torrents
seen but don't rely on it). Inboxes are checked periodically (every tickInterval
seconds, where tickInterval is 5 by default)
TorrentClient currently cannot be shutdown.
"""
class MakeshiftTorrent(object):
"""While a torrent is started, an instance of this class is used in place of
a real Torrent object (a class from Mainline) to store its metainfo"""
def __init__(self, metainfo):
super(MakeshiftTorrent, self).__init__()
self.metainfo = metainfo
class | (threadedcomponent):
"""\
TorrentClient([tickInterval]) -> component capable of downloading/sharing torrents.
Initialises the Mainline client.
Arguments:
- [tickInterval=5] -- the interval in seconds at which TorrentClient checks inboxes
Using threadedcomponent so we don't have to worry about blocking I/O or making
mainline yield periodically
"""
Inboxes = {
"inbox" : "Torrent IPC - add a torrent, stop a torrent etc.",
"control" : "NOT USED"
}
Outboxes = {
"outbox" : "Torrent IPC - status updates, completion, new torrent added etc.",
"signal" : "NOT USED"
}
def __init__(self, tickInterval = 5):
super(TorrentClient, self).__init__()
self.totaltorrents = 0
self.torrents = {}
self.torrentinfohashes = {}
self.tickInterval = tickInterval #seconds
def main(self):
"""\
Start the Mainline client and block forever listening for connectons
"""
uiname = "bittorrent-console"
defaults = get_defaults(uiname)
config, args = configfile.parse_configuration_and_args(defaults, uiname)
config = Preferences().initWithDict(config)
data_dir = config['data_dir']
self.core_doneflag = DeferredEvent()
self.rawserver_doneflag = DeferredEvent()
rawserver = RawServer(config) #event and I/O scheduler
self.multitorrent = MultiTorrent(config, rawserver, data_dir) #class used to add, control and remove torrents
self.tick() #add periodic function call
rawserver.add_task(0, self.core_doneflag.addCallback, lambda r: rawserver.external_add_task(0, shutdown))
rawserver.listen_forever(self.rawserver_doneflag)
self.send(producerFinished(self), "signal")
print "TorrentClient has shutdown"
def startTorrent(self, metainfo, save_incomplete_as, save_as, torrentid):
"""startTorrent causes MultiTorrent to begin downloading a torrent eventually.
Use it instead of _start_torrent."""
self._create_torrent(metainfo, save_incomplete_as, save_as)
self.multitorrent.rawserver.add_task(1, self._start_torrent, metainfo, torrentid)
def _create_torrent(self, metainfo, save_incomplete_as, save_as):
if not self.multitorrent.torrent_known(metainfo.infohash):
df = self.multitorrent.create_torrent(metainfo, save_incomplete_as, save_as)
#except Exception, e:
# print e
# return False
def _start_torrent(self, metainfo, torrentid):
#try:
t = None
if self.multitorrent.torrent_known( metainfo.infohash ):
t = self.multitorrent.get_torrent(metainfo.infohash)
# HACK!! Rewrite when INITIALIZING state is available.
if t is None or not t.is_initialized():
#self.logger.debug( "Waiting for torrent to initialize." )
self.multitorrent.rawserver.add_task(3, self._start_torrent, metainfo, torrentid)
return
if not self.multitorrent.torrent_running(metainfo.infohash):
df = self.multitorrent.start_torrent(metainfo.infohash)
self.torrents[torrentid] = self.multitorrent.get_torrent(metainfo.infohash)
#yield df
#df.getResult() # raises exception if one occurred in yield.
# print e
# print "Failed to start torrent"
def decodeTorrent(self, data):
"""\
Converts bencoded raw metadata (as one would find in a .torrent file) into
a metainfo object (which one can then get the torrent's properties from).
"""
from BitTorrent.bencode import bdecode, bencode
metainfo = None
try:
b = bdecode(data)
metainfo = ConvertedMetainfo(b)
except Exception, e:
pass
return metainfo
def tick(self):
"""\
Called periodically... by itself (gets rawserver to call it back after a delay of
tickInterval seconds). Checks inboxes and sends a status-update message for every
active torrent.
"""
self.multitorrent.rawserver.add_task(self.tickInterval, self.tick)
#print "Tick"
while self.dataReady("inbox"):
temp = self.recv("inbox")
if isinstance(temp, TIPCCreateNewTorrent) or isinstance(temp, str):
if isinstance(temp, str):
metainfo = self.decodeTorrent(temp)
else:
metainfo = self.decodeTorrent(temp.rawmetainfo)
if metainfo != None:
savefolder = os.path.join("./",metainfo.name_fs)
existingTorrentId = self.torrentinfohashes.get(metainfo.infohash, 0)
if existingTorrentId != 0:
self.send(TIPCTorrentAlreadyDownloading(torrentid=existingTorrentId), "outbox")
else:
self.totaltorrents += 1
self.torrentinfohashes[metainfo.infohash] = self.totaltorrents
self.torrents[self.totaltorrents] = MakeshiftTorrent(metainfo)
self.startTorrent(metainfo, savefolder, savefolder, self.totaltorrents)
self.send(TIPCNewTorrentCreated(torrentid=self.totaltorrents, savefolder=savefolder), "outbox")
elif isinstance(temp, TIPCCloseTorrent):
torrent = self.torrents.get(temp.torrentid, None)
if torrent != None:
self.multitorrent.remove_torrent(torrent.metainfo.infohash)
self.torrentinfohashes.erase(torrent.metainfo.infohash)
self.torrents.erase(temp.torrentid)
for torrentid, torrent in self.torrents.items():
if not isinstance(torrent, MakeshiftTorrent):
self.send(TIPCTorrentStatusUpdate(torrentid=torrentid, statsdictionary=torrent.get_status()), "outbox")
while self.dataReady("control"):
temp = self.recv("control")
if isinstance(temp, shutdown):
print "TorrentClient trying to shutdown"
#cause us to shutdown
self.rawserver_doneflag.set()
self.core_doneflag.set()
#if self.torrent is not None:
# status = self.torrent.get_status(self.config['spew'])
# self.d.display(status)
class BasicTorrentExplainer(component):
"""\
BasicTorrentExplainer() -> component useful for debugging TorrentClient/TorrentPatron
This component converts each torrent IPC messags it receives into human readable
line of text.
"""
def main(self):
while 1:
yield 1
while self.dataReady("inbox"):
temp = self.recv("inbox")
#try:
self.send(temp.getText() + "\n", "outbox")
#except:
# pass
self.pause()
if __name__ == '__main__':
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
import sys
sys.path.append("../Util/")
from TriggeredFileReader import TriggeredFileReader
from Axon.Component import component
# download a linux distro or whatever
# NOTE: Do not follow this example. It is used to illustrate/test the use of a TorrentClient component
# alone. TorrentPatron can and should be used in place of TorrentClient for user applications
# as it supports multiple instances (create two TorrentClients and see it all come falling down).
pipeline(
ConsoleReader(">>> ", ""),
TriggeredFileReader(),
TorrentClient(),
BasicTorrentExplainer(),
ConsoleEchoer(),
).run()
| TorrentClient |
utils.rs | use std::convert::TryFrom;
use once_cell::sync::Lazy;
use proc_macro2::TokenStream;
use quote::ToTokens;
use regex::Regex;
use std::ops::Deref;
use syn::{Attribute, Type};
use syn::{Error, Result};
macro_rules! syn_err {
($l:literal $(, $a:expr)*) => {
syn_err!(proc_macro2::Span::call_site(); $l $(, $a)*);
};
($s:expr; $l:literal $(, $a:expr)*) => {
return Err(syn::Error::new($s, format!($l $(, $a)*)));
};
}
macro_rules! impl_parse {
($i:ident ($input:ident, $out:ident) { $($k:pat => $e:expr),* $(,)? }) => {
impl std::convert::TryFrom<&syn::Attribute> for $i {
type Error = syn::Error;
fn try_from(attr: &syn::Attribute) -> syn::Result<Self> { attr.parse_args() }
}
impl syn::parse::Parse for $i {
fn parse($input: syn::parse::ParseStream) -> syn::Result<Self> {
let mut $out = $i::default();
loop {
let key: Ident = $input.call(syn::ext::IdentExt::parse_any)?;
match &*key.to_string() {
$($k => $e,)*
#[allow(unreachable_patterns)]
_ => syn_err!($input.span(); "unexpected attribute")
}
match $input.is_empty() {
true => break,
false => {
$input.parse::<syn::Token![,]>()?;
}
}
}
Ok($out)
}
}
};
}
/// Parse all `#[ts(..)]` attributes from the given slice.
pub fn | <'a, A>(attrs: &'a [Attribute]) -> Result<impl Iterator<Item = A>>
where
A: TryFrom<&'a Attribute, Error = Error>,
{
Ok(attrs
.iter()
.filter(|a| a.path.is_ident("ts"))
.map(A::try_from)
.collect::<Result<Vec<A>>>()?
.into_iter())
}
/// Parse all `#[serde(..)]` attributes from the given slice.
#[cfg(feature = "serde-compat")]
#[allow(unused)]
pub fn parse_serde_attrs<'a, A: TryFrom<&'a Attribute, Error = Error>>(
attrs: &'a [Attribute],
) -> impl Iterator<Item = A> {
attrs
.iter()
.filter(|a| a.path.is_ident("serde"))
.flat_map(|attr| match A::try_from(attr) {
Ok(attr) => Some(attr),
Err(_) => {
use quote::ToTokens;
warning::print_warning(
"failed to parse serde attribute",
format!("{}", attr.to_token_stream()),
"ts-rs failed to parse this attribute. It will be ignored.",
)
.unwrap();
None
}
})
.collect::<Vec<_>>()
.into_iter()
}
#[cfg(feature = "serde-compat")]
mod warning {
use std::fmt::Display;
use std::io::Write;
use termcolor::{BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
// Sadly, it is impossible to raise a warning in a proc macro.
// This function prints a message which looks like a compiler warning.
pub fn print_warning(
title: impl Display,
content: impl Display,
note: impl Display,
) -> std::io::Result<()> {
let make_color = |color: Color, bold: bool| {
let mut spec = ColorSpec::new();
spec.set_fg(Some(color)).set_bold(bold).set_intense(true);
spec
};
let yellow_bold = make_color(Color::Yellow, true);
let white_bold = make_color(Color::White, true);
let white = make_color(Color::White, false);
let blue = make_color(Color::Blue, true);
let writer = BufferWriter::stderr(ColorChoice::Auto);
let mut buffer = writer.buffer();
buffer.set_color(&yellow_bold)?;
write!(&mut buffer, "warning")?;
buffer.set_color(&white_bold)?;
writeln!(&mut buffer, ": {}", title)?;
buffer.set_color(&blue)?;
writeln!(&mut buffer, " | ")?;
write!(&mut buffer, " | ")?;
buffer.set_color(&white)?;
writeln!(&mut buffer, "{}", content)?;
buffer.set_color(&blue)?;
writeln!(&mut buffer, " | ")?;
write!(&mut buffer, " = ")?;
buffer.set_color(&white_bold)?;
write!(&mut buffer, "note: ")?;
buffer.set_color(&white)?;
writeln!(&mut buffer, "{}", note)?;
writer.print(&buffer)
}
}
static LIFETIME_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"'[^\s,>]+").unwrap());
pub fn convert_lifetime_to_static(ty: &Type) -> TokenStream {
let str = ty.to_token_stream().to_string();
LIFETIME_REGEX
.replace(str.as_str(), "'static")
.deref()
.parse()
.unwrap()
}
| parse_attrs |
issue-18425.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that codegen doesn't ICE when codegenning an array repeat
// expression with a count of 1 and a non-Copy element type.
// pretty-expanded FIXME #23616
fn | () {
let _ = [Box::new(1_usize); 1];
}
| main |
groupedDataframe.js | 'use strict';
exports.__esModule = true;
exports['default'] = undefined;
var _slicedToArray2 = require('babel-runtime/helpers/slicedToArray');
var _slicedToArray3 = _interopRequireDefault(_slicedToArray2);
var _entries = require('babel-runtime/core-js/object/entries');
var _entries2 = _interopRequireDefault(_entries);
var _objectWithoutProperties2 = require('babel-runtime/helpers/objectWithoutProperties');
var _objectWithoutProperties3 = _interopRequireDefault(_objectWithoutProperties2);
var _defineProperty2 = require('babel-runtime/helpers/defineProperty');
var _defineProperty3 = _interopRequireDefault(_defineProperty2);
var _extends3 = require('babel-runtime/helpers/extends');
var _extends4 = _interopRequireDefault(_extends3);
var _stringify = require('babel-runtime/core-js/json/stringify');
var _stringify2 = _interopRequireDefault(_stringify);
var _toConsumableArray2 = require('babel-runtime/helpers/toConsumableArray');
var _toConsumableArray3 = _interopRequireDefault(_toConsumableArray2);
var _regenerator = require('babel-runtime/regenerator');
var _regenerator2 = _interopRequireDefault(_regenerator);
var _getIterator2 = require('babel-runtime/core-js/get-iterator'); |
var _iterator3 = _interopRequireDefault(_iterator2);
var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck');
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _createClass2 = require('babel-runtime/helpers/createClass');
var _createClass3 = _interopRequireDefault(_createClass2);
var _symbol = require('babel-runtime/core-js/symbol');
var _symbol2 = _interopRequireDefault(_symbol);
var _dataframe = require('./dataframe');
var _dataframe2 = _interopRequireDefault(_dataframe);
var _errors = require('./errors');
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
var __groups__ = (0, _symbol2['default'])('groups');
var GroupedDataFrame = function () {
function GroupedDataFrame(df) {
(0, _classCallCheck3['default'])(this, GroupedDataFrame);
if (!(df instanceof _dataframe2['default'])) throw new _errors.ArgumentTypeError(df, 'DataFrame');
for (var _len = arguments.length, columnNames = Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {
columnNames[_key - 1] = arguments[_key];
}
this[__groups__] = this._groupBy(df, columnNames);
this.df = df;
this.on = columnNames.length > 0 ? columnNames : df.listColumns();
}
(0, _createClass3['default'])(GroupedDataFrame, [{
key: _iterator3['default'],
value: _regenerator2['default'].mark(function value() {
var _iteratorNormalCompletion, _didIteratorError, _iteratorError, _iterator, _step, group;
return _regenerator2['default'].wrap(function value$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
_iteratorNormalCompletion = true;
_didIteratorError = false;
_iteratorError = undefined;
_context.prev = 3;
_iterator = (0, _getIterator3['default'])(this[__groups__]);
case 5:
if (_iteratorNormalCompletion = (_step = _iterator.next()).done) {
_context.next = 12;
break;
}
group = _step.value;
_context.next = 9;
return group;
case 9:
_iteratorNormalCompletion = true;
_context.next = 5;
break;
case 12:
_context.next = 18;
break;
case 14:
_context.prev = 14;
_context.t0 = _context['catch'](3);
_didIteratorError = true;
_iteratorError = _context.t0;
case 18:
_context.prev = 18;
_context.prev = 19;
if (!_iteratorNormalCompletion && _iterator['return']) {
_iterator['return']();
}
case 21:
_context.prev = 21;
if (!_didIteratorError) {
_context.next = 24;
break;
}
throw _iteratorError;
case 24:
return _context.finish(21);
case 25:
return _context.finish(18);
case 26:
case 'end':
return _context.stop();
}
}
}, value, this, [[3, 14, 18, 26], [19,, 21, 25]]);
})
}, {
key: '_groupBy',
value: function _groupBy(df, columnNames) {
var hashedDF = df.withColumn('hash', function (row) {
return row.select.apply(row, (0, _toConsumableArray3['default'])(columnNames)).hash();
});
return hashedDF.distinct('hash').toArray('hash').map(function (hash) {
var _group$toCollection$;
var group = hashedDF.filter(function (row) {
return row.get('hash') === hash;
}).drop('hash');
return {
groupKey: (_group$toCollection$ = group.toCollection(true)[0]).select.apply(_group$toCollection$, (0, _toConsumableArray3['default'])(columnNames)).toDict(),
hash: hash,
group: group
};
}).filter(function (_ref) {
var group = _ref.group;
return group.count() > 0;
});
}
}, {
key: 'get',
value: function get(hash) {
return this.toCollection().find(function (group) {
return group.hash === hash;
});
}
}, {
key: 'toCollection',
value: function toCollection() {
return [].concat((0, _toConsumableArray3['default'])(this));
}
}, {
key: 'show',
value: function show() {
var quiet = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : false;
return [].concat((0, _toConsumableArray3['default'])(this)).map(function (_ref2) {
var group = _ref2.group,
groupKey = _ref2.groupKey;
var groupLog = '--\n[' + (0, _stringify2['default'])(groupKey) + ']\n--';
if (!quiet) {
console.log(groupLog);
}
return groupLog + '\n' + group.show(10, quiet);
}).reduce(function (p, n) {
return p + '\n' + n;
});
}
}, {
key: 'listGroups',
value: function listGroups() {
return [].concat((0, _toConsumableArray3['default'])(this)).map(function (_ref3) {
var groupKey = _ref3.groupKey;
return groupKey;
});
}
}, {
key: 'listHashs',
value: function listHashs() {
return [].concat((0, _toConsumableArray3['default'])(this)).map(function (_ref4) {
var hash = _ref4.hash;
return hash;
});
}
}, {
key: 'map',
value: function map(func) {
var _ref6;
var mapped = [].concat((0, _toConsumableArray3['default'])(this)).map(function (_ref5) {
var group = _ref5.group;
return group.map(func);
});
return this.df.__newInstance__((_ref6 = []).concat.apply(_ref6, (0, _toConsumableArray3['default'])(mapped.map(function (group) {
return group.toCollection();
}))), mapped[0].listColumns());
}
}, {
key: 'filter',
value: function filter(condition) {
var _ref8;
var mapped = [].concat((0, _toConsumableArray3['default'])(this)).map(function (_ref7) {
var group = _ref7.group;
return group.filter(condition);
}).filter(function (group) {
return group.listColumns().length > 0;
});
return mapped.length === 0 ? [] : this.df.__newInstance__((_ref8 = []).concat.apply(_ref8, (0, _toConsumableArray3['default'])(mapped.map(function (group) {
return group.toCollection();
}))), mapped[0].listColumns());
}
}, {
key: 'chain',
value: function chain() {
var _ref10;
for (var _len2 = arguments.length, funcs = Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {
funcs[_key2] = arguments[_key2];
}
var mapped = [].concat((0, _toConsumableArray3['default'])(this)).map(function (_ref9) {
var group = _ref9.group;
return group.chain.apply(group, funcs);
});
return this.df.__newInstance__((_ref10 = []).concat.apply(_ref10, (0, _toConsumableArray3['default'])(mapped.map(function (group) {
return group.toCollection();
}))), mapped[0].listColumns());
}
}, {
key: 'aggregate',
value: function aggregate(func) {
var columnName = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'aggregation';
return this.df.__newInstance__([].concat((0, _toConsumableArray3['default'])(this)).map(function (_ref11) {
var group = _ref11.group,
groupKey = _ref11.groupKey;
return (0, _extends4['default'])({}, groupKey, (0, _defineProperty3['default'])({}, columnName, func(group, groupKey)));
}), [].concat((0, _toConsumableArray3['default'])(this.on), [columnName]));
}
}, {
key: 'pivot',
value: function pivot(columnToPivot) {
var func = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : function (gdf) {
return gdf.count();
};
var columns = [].concat((0, _toConsumableArray3['default'])(this.on), (0, _toConsumableArray3['default'])(this.df.distinct(columnToPivot).toArray(columnToPivot)));
return this.df.__newInstance__(this.aggregate(function (group) {
return group.groupBy(columnToPivot).aggregate(function (gp, gk) {
return (0, _defineProperty3['default'])({}, gk[columnToPivot], func(gp, gk));
}).toArray('aggregation').reduce(function (p, n) {
return (0, _extends4['default'])({}, p, n);
}, {});
}).toCollection().map(function (_ref13) {
var aggregation = _ref13.aggregation,
rest = (0, _objectWithoutProperties3['default'])(_ref13, ['aggregation']);
return (0, _extends4['default'])({}, rest, aggregation);
}), columns);
}
}, {
key: 'melt',
value: function melt() {
var _this = this;
var variableColumnName = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 'variable';
var valueColumnName = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'value';
var columns = [].concat((0, _toConsumableArray3['default'])(this.on), [variableColumnName, valueColumnName]);
return this.df.__newInstance__(this.aggregate(function (group) {
return (0, _entries2['default'])(group.toDict()).reduce(function (tidy, _ref14) {
var _ref15 = (0, _slicedToArray3['default'])(_ref14, 2),
key = _ref15[0],
value = _ref15[1];
return [].concat((0, _toConsumableArray3['default'])(tidy), (0, _toConsumableArray3['default'])(value.reduce(function (p, n) {
var _ref16;
return !_this.on.includes(key) ? [].concat((0, _toConsumableArray3['default'])(p), [(_ref16 = {}, (0, _defineProperty3['default'])(_ref16, variableColumnName, key), (0, _defineProperty3['default'])(_ref16, valueColumnName, n), _ref16)]) : p;
}, [])));
}, []);
}).toCollection().reduce(function (p, _ref17) {
var aggregation = _ref17.aggregation,
rest = (0, _objectWithoutProperties3['default'])(_ref17, ['aggregation']);
return [].concat((0, _toConsumableArray3['default'])(p), (0, _toConsumableArray3['default'])(aggregation.map(function (x) {
return (0, _extends4['default'])({}, rest, x);
})));
}, []), columns);
}
}]);
return GroupedDataFrame;
}();
exports['default'] = GroupedDataFrame; |
var _getIterator3 = _interopRequireDefault(_getIterator2);
var _iterator2 = require('babel-runtime/core-js/symbol/iterator'); |
TwHistory.py | import calendar
import math
import pandas as pd
import time
import twstock
import requests
from datetime import datetime, timedelta
from dateutil import relativedelta
from db.Connection import session
from enum import Enum
from model.StockHistory import StockHistory
from sys import float_info
from talib import abstract
class HistoryType(Enum):
DAY = ("0", "日", "短線")
WEEK = ("1", "週", "中短線")
MONTH = ("2", "月", "中長線")
class HistoryTypeTo(Enum):
DB = 0
HUMAN = 1
EXPLAIN = 2
class TwHistory:
"""TwHistory class"""
dateFormatForTwStock = None
dateFormat = None
rsiDict = None
williamsDict = None
macdDict = None
bbandDict = None
def __init__(self):
self.dateFormatForTwStock = "%Y/%m/%d"
self.dateFormat = "%Y-%m-%d"
def transformStrToDateTimeForTwStock(self, targetStr):
return datetime.strptime(targetStr, self.dateFormatForTwStock)
def transformStrToDateTime(self, targetStr):
return datetime.strptime(targetStr, self.dateFormat)
def transformDateTimeToStr(self, date):
return date.strftime(self.dateFormat)
def retIfNaN(self, num):
if math.isnan(num):
return None
else:
return num
def createDataFrame(self, history):
df = pd.DataFrame([h.as_simple_dict() for h in history])
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
return df
def deleteHistory(self, code, type, startDate, endDate):
session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == type).\
filter(StockHistory.date >= self.transformDateTimeToStr(startDate)).\
filter(StockHistory.date <= self.transformDateTimeToStr(endDate)).\
delete()
session.commit()
def calculateRSI(self, df):
rsi = abstract.RSI(df, timeperiod=5)
self.rsiDict = {}
for index, number in rsi.iteritems():
self.rsiDict[self.transformDateTimeToStr(index)] = number
def calculateWilliams(self, df):
williams = abstract.WILLR(df, timeperiod=5)
self.williamsDict = {}
for index, number in williams.iteritems():
self.williamsDict[self.transformDateTimeToStr(index)] = number
def calculateMACD(self, df):
macd = abstract.MACD(df)
self.macdDict = {}
for index, row in macd.iterrows():
self.macdDict[self.transformDateTimeToStr(index)] = row
def calculateBBAND(self, df):
bband = abstract.BBANDS(df, timeperiod=22)
self.bbandDict = {}
for index, row in bband.iterrows():
self.bbandDict[self.transformDateTimeToStr(index)] = row
def updateHistoryTechnicalIndicator(self, history):
date = history.date
updateFlag = False
if history.rsi is None:
history.rsi = self.retIfNaN(self.rsiDict[date])
updateFlag = updateFlag or history.rsi is not None
if history.williams is None:
history.williams = self.retIfNaN(self.williamsDict[date])
updateFlag = updateFlag or history.williams is not None
if history.macd is None:
history.macd = self.retIfNaN(self.macdDict[date].macd)
updateFlag = updateFlag or history.macd is not None
if history.macdsignal is None:
history.macdsignal = self.retIfNaN(self.macdDict[date].macdsignal)
updateFlag = updateFlag or history.macdsignal is not None
if history.macdhist is None:
history.macdhist = self.retIfNaN(self.macdDict[date].macdhist)
updateFlag = updateFlag or history.macdhist is not None
if history.upperband is None:
history.upperband = self.retIfNaN(self.bbandDict[date].upperband)
updateFlag = updateFlag or history.upperband is not None
if history.middleband is None:
history.middleband = self.retIfNaN(self.bbandDict[date].middleband)
updateFlag = updateFlag or history.middleband is not None
if history.lowerband is None:
history.lowerband = self.retIfNaN(self.bbandDict[date].lowerband)
updateFlag = updateFlag or history.lowerband is not None
if updateFlag:
session.merge(history)
def dayHistory(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type):
print("dayHistory code: " + k)
dayType = self.translate(HistoryType.DAY, HistoryTypeTo.DB)
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == dayType).\
order_by(StockHistory.date.desc()).\
first()
nowDate = datetime.now()
endDateStr = self.transformDateTimeToStr(nowDate)
startDateStr = self.transformDateTimeToStr(self.transformStrToDateTimeForTwStock(v.start)) if history is None else history.date
self.finmindtrade(k, startDateStr, endDateStr, dayType)
def weekHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
weekStart = today - timedelta(days=today.weekday())
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("weekHistory code: " + k)
latestHistoryWeek = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.WEEK, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryWeek is None else self.transformStrToDateTime(latestHistoryWeek.date)
weekStartPast = startdate - timedelta(days=startdate.weekday())
weekEndPast = weekStartPast + timedelta(days=6)
while weekStartPast <= weekStart:
self.deleteHistory(k, self.translate(HistoryType.WEEK, HistoryTypeTo.DB), weekStartPast, weekEndPast)
historyWeek = StockHistory(code=k, type=self.translate(HistoryType.WEEK, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(weekStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(weekEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyWeek.date = self.transformDateTimeToStr(weekStartPast)
historyWeek.close = historyDay.close
historyWeek.capacity += historyDay.capacity
historyWeek.turnover += historyDay.turnover
if firstFlag:
historyWeek.open = historyDay.open
firstFlag = False
historyWeek.high = max(historyWeek.high, historyDay.high)
historyWeek.low = min(historyWeek.low, historyDay.low)
if not firstFlag:
session.merge(historyWeek)
weekStartPast += timedelta(days=7)
weekEndPast += timedelta(days=7)
session.commit()
def monthHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
monthStart = today.replace(day=1)
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("monthHistory code: " + k)
latestHistoryMonth = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.MONTH, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryMonth is None else self.transformStrToDateTime(latestHistoryMonth.date)
monthStartPast = startdate.replace(day=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
while monthStartPast <= monthStart:
self.deleteHistory(k, self.translate(HistoryType.MONTH, HistoryTypeTo.DB), monthStartPast, monthEndPast)
historyMonth = StockHistory(code=k, type=self.translate(HistoryType.MONTH, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(monthStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(monthEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyMonth.date = self.transformDateTimeToStr(monthStartPast)
historyMonth.close = historyDay.close
historyMonth.capacity += historyDay.capacity
historyMonth.turnover += historyDay.turnover
if firstFlag:
historyMonth.open = historyDay.open
firstFlag = False
historyMonth.high = max(historyMonth.high, historyDay.high)
historyMonth.low = min(historyMonth.low, historyDay.low)
if not firstFlag:
session.merge(historyMonth)
monthStartPast = monthStartPast + relativedelta.relativedelta(months=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
session.commit()
def technicalIndicator(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
for historyType in HistoryType:
print("technicalIndicator code: " + k + ", type: " + self.translate(historyType, HistoryTypeTo.HUMAN))
historyList = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
order_by(StockHistory.date.asc()).\
all()
if len(historyList) == 0:
continue
df = self.createDataFrame(historyList)
self.calculateRSI(df)
self.calculateWilliams(df)
self.calculateMACD(df)
self.calculateBBAND(df)
for history in historyList:
self.updateHistoryTechnicalIndicator(history)
session.commit()
def diverge(self, highRsi, lowRsi, highWilliams, lowWilliams):
turnoverDict = {}
nameDict = {}
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
turnoverDict[k] = history.turnover
nameDict[k] = v.name
rankDict = {k: v for k, v in sorted(turnoverDict.items(), key=lambda item: item[1], reverse=True)}
print("按當日成交值由大至小排名,背離條件: rsi > " + str(highRsi) + " or rsi < " + str(lowRsi))
for rankIdx, code in enumerate(rankDict.keys()):
closePrice = None
divergeDict = {}
for historyType in HistoryType:
historyTypeHuman = self.translate(historyType, HistoryTypeTo.HUMAN)
historyTypeExplain = self.translate(historyType, HistoryTypeTo.EXPLAIN)
historyList = session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
filter(StockHistory.rsi.isnot(None)).\
order_by(StockHistory.date.desc()).\
limit(self.recentHistoryLimit(historyType)).\
all()
historyListLength = len(historyList)
if historyListLength > 0:
closePrice = historyList[0].close
if historyListLength > 1:
if self.isHighRsi(highRsi, historyList) and historyList[0].rsi > historyList[1].rsi and historyList[0].williams < historyList[1].williams and historyList[0].upperband is not None and historyList[0].high > historyList[0].upperband and historyList[0].close < historyList[0].open:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看空"] = "rsi up williams down"
elif self.isLowRsi(lowRsi, historyList) and historyList[0].rsi < historyList[1].rsi and historyList[0].williams > historyList[1].williams and historyList[0].lowerband is not None and historyList[0].low < historyList[0].lowerband and historyList[0].close > historyList[0].open:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看多"] = "rsi down williams up"
# if historyListLength > 2:
# highPeak = []
# lowPeak = []
# for i, history in enumerate(historyList):
# if i == 0 or i == historyListLength - 1:
# continue
# if len(highPeak) < 2 and historyList[i-1].rsi < history.rsi and history.rsi > historyList[i+1].rsi:
# highPeak.append(history)
# if len(lowPeak) < 2 and historyList[i-1].rsi > history.rsi and history.rsi < historyList[i+1].rsi:
# lowPeak.append(history)
# if len(highPeak) == 2 and len(lowPeak) == 2:
# break
# if len(highPeak) == 2 and self.isHighRsi(highRsi, highPeak):
# if highPeak[0].rsi > highPeak[1].rsi and highPeak[0].williams < highPeak[1].williams:
# divergeDict[historyTypeHuman + " 波峰背離 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi up williams down"
# elif highPeak[0].rsi < highPeak[1].rsi and highPeak[0].williams > highPeak[1].williams and highPeak[0].williams >= highWilliams:
# for low in lowPeak:
# if highPeak[0].date > low.date and highPeak[1].date < low.date and low.williams <= lowWilliams:
# divergeDict[historyTypeHuman + " 波峰背離 反彈不過前高 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi down williams fast up"
# break
# if len(lowPeak) == 2 and self.isLowRsi(lowRsi, lowPeak):
# if lowPeak[0].rsi < lowPeak[1].rsi and lowPeak[0].williams > lowPeak[1].williams:
# divergeDict[historyTypeHuman + " 波谷背離 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi down williams up"
# elif lowPeak[0].rsi > lowPeak[1].rsi and lowPeak[0].williams < lowPeak[1].williams and lowPeak[0].williams <= lowWilliams:
# for high in highPeak:
# if lowPeak[0].date > high.date and lowPeak[1].date < high.date and high.williams >= highWilliams:
# divergeDict[historyTypeHuman + " 波谷背離 回測不過前低 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi up williams fast down"
# break
if len(divergeDict) > 0:
print("code: " + code + ", name: " + nameDict[code] + ", rank: " + str(rankIdx+1) + "/" + str(len(rankDict)) + ", close price: " + str(closePrice))
for k, v in divergeDict.items():
print(k + " => " + v)
print("")
print("========================================================================================")
def isStockOrETF(self, type):
return type == "股票" or type == "ETF"
def isHistoryExist(self, code):
return session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date == self.transformDateTimeToStr(datetime.now())).\
first() is not None
def isHighRsi(self, highRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi < highRsi:
return False
elif i == 2:
break
return True
def isLowRsi(self, lowRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi > lowRsi:
return False
elif i == 2:
| EK:
return 16
else:
return 6
def translate(self, historyType, historyTypeTo):
return historyType.value[historyTypeTo.value]
def finmindtrade(self, code, start, end, dayType):
url = "https://api.finmindtrade.com/api/v4/data"
parameter = {
"dataset": "TaiwanStockPrice",
"data_id": code,
"start_date": start,
"end_date": end,
"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJkYXRlIjoiMjAyMS0wOS0yMiAxMzo0MzoyOSIsInVzZXJfaWQiOiJzaGlueWxpbiIsImlwIjoiMjEwLjY0LjE4LjIifQ.0USWMl--2LhZ9W8nyEQncyyw3Jfm-hu5xzJrNOCuEUU"
}
resp = requests.get(url, params=parameter)
json = resp.json()
if json is not None:
for data in resp.json()["data"]:
history = StockHistory(code=code, type=dayType, date=data["date"],
capacity=data["Trading_Volume"], turnover=data["Trading_money"],
open=data["open"], high=data["max"], low=data["min"], close=data["close"])
session.merge(history)
session.commit()
time.sleep(6.1)
twHistory = TwHistory()
twHistory.dayHistory()
twHistory.weekHistory()
twHistory.monthHistory()
twHistory.technicalIndicator()
twHistory.diverge(90, 10, -20, -80)
twHistory.diverge(80, 20, -20, -80)
twHistory.diverge(70, 30, -20, -80) | break
return True
def recentHistoryLimit(self, historyType):
if historyType == HistoryType.DAY:
return 40
elif historyType == HistoryType.WE |
main.py | # Flappy Bird made by Thuongton999
# Ez-ist mode
from settings import *
from objects import *
def birdCollision(bird, column):
return (
bird.positionX < column.positionX + column.WIDTH and
bird.positionX + bird.WIDTH > column.positionX and
bird.positionY < column.positionY + column.HEIGHT and
bird.positionY + bird.HEIGHT > column.positionY
)
window = Window()
bird = Bird()
environment = Environment()
columns = Columns(interface=window)
score = Score(interface=window)
def gameQuit():
os.sys.exit("You dont want to play this game? Fvck you!")
pygame.quit()
def gameStartScreen():
startGame = False
startButton = Button(
position_x=window.WIDTH//2,
position_y=window.HEIGHT//2,
button_width=150, button_height=90
)
startButton.positionX -= startButton.WIDTH//2
startButton.positionY -= startButton.HEIGHT//2
while not startGame:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
window.backgroundPosX -= window.speed if window.backgroundPosX + window.WIDTH > 0 else -window.WIDTH
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
marginTop = 20
marginBottom = 10
titleRendered = bordered(
"Flappy Peter Teo",
gameDefaultSettings["DEFAULT_TITLE"],
gfcolor=colors["white"],
ocolor=colors["clotting"],
opx=5
)
header2Rendered = bordered(
"thuongton999 code this, ya :))",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["sun"],
ocolor=colors["white"],
opx=3
)
copyrightRendered = bordered(
"Copyright by thuongton999",
gameDefaultSettings["COPYRIGHT"],
gfcolor=colors["sun"],
ocolor=colors["white"],
opx=3
)
window.interface.blit(titleRendered, (window.WIDTH//2-titleRendered.get_width()//2, marginTop))
window.interface.blit(header2Rendered, (window.WIDTH//2-header2Rendered.get_width()//2, marginTop*2+titleRendered.get_height()))
window.interface.blit(
copyrightRendered,
(window.WIDTH//2-copyrightRendered.get_width()//2, window.HEIGHT-marginBottom-copyrightRendered.get_height())
)
window.interface.blit(startButton.buttonImage, (startButton.positionX, startButton.positionY))
mousePosX, mousePosY = pygame.mouse.get_pos()
mouseButtonPressed = pygame.mouse.get_pressed(3)
if startButton.onClick(mousePosX=mousePosX, mousePosY=mousePosY, clicked=mouseButtonPressed[0]):
startGame = True
break
pygame.display.update()
window.frame.tick(window.FPS)
while startGame:
bird.__init__()
columns.__init__(interface=window)
score.__init__(interface=window)
getReady()
gamePlay()
startGame = gameOver()
return startGame
def getReady():
ready = False
while not ready:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
window.backgroundPosX -= window.speed if window.backgroundPosX + window.WIDTH > 0 else -window.WIDTH
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
elif event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN:
return
marginLeft = 30
getReadyTextRendered = bordered(
"Get ready? Tap or press any key",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["grey"],
ocolor=colors["white"],
opx=3
)
window.interface.blit(bird.birdRotatedImage, (bird.positionX, bird.positionY))
window.interface.blit(
getReadyTextRendered,
(
bird.positionX+bird.WIDTH+marginLeft,
bird.positionY+getReadyTextRendered.get_height()//2
)
)
pygame.display.update()
window.frame.tick(window.FPS)
def gamePlay():
while not bird.dead:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
window.backgroundPosX -= window.speed if window.backgroundPosX + window.WIDTH > 0 else -window.WIDTH
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
environment.tapSound.play()
bird.positionY -= bird.speed if bird.positionY >= 0 else 0
bird.speed = bird.defaultSpeed
for topColumn, bottomColumn, passed in columns.columns:
topColumn.positionX -= window.speed
bottomColumn.positionX -= window.speed
window.interface.blit(topColumn.columnImage, (topColumn.positionX, -(topColumn.imageHeight - topColumn.HEIGHT)))
window.interface.blit(bottomColumn.columnImage, (bottomColumn.positionX, bottomColumn.positionY))
if birdCollision(bird, topColumn) or birdCollision(bird, bottomColumn):
bird.dead = True
break
if columns.columns[0][0].positionX + columns.columns[0][0].WIDTH < bird.positionX and not columns.columns[0][2]:
columns.columns[0][2] = True
score.points += 1
if columns.columns[0][0].positionX + columns.columns[0][0].WIDTH < 0:
columns.columns.pop(0)
columns.addNewColumn()
bird.positionY += bird.speed + 0.5*environment.gravity
bird.speed += environment.gravity
bird.birdRotatedImage = pygame.transform.rotate(bird.birdDefaultImage, -bird.speed*2)
bird.updateBirdSize()
window.interface.blit(bird.birdRotatedImage, (bird.positionX, bird.positionY))
score.render()
if not (0 <= bird.positionY <= window.HEIGHT - bird.HEIGHT):
bird.dead = True
pygame.display.update()
window.frame.tick(window.FPS)
def | ():
environment.gameOverSound.play()
scoreBoard = ScoreBoard(points=score.points, interface=window)
titleRendered = bordered(
"GAME OVER",
gameDefaultSettings["DEFAULT_TITLE"],
gfcolor=colors["white"],
ocolor=colors["clotting"],
opx=5
)
cakhiaRendered = bordered(
"You have been addicted xD",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["white"],
ocolor=colors["clotting"],
opx=3
)
notificationRendered = bordered(
"Press SPACE to play again or ESC to go back to Menu",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["lemon_chiffon"],
ocolor=colors["sun"],
opx=3
)
titleDropDownSpeed = 6
titlePositionX = window.WIDTH//2-titleRendered.get_width()//2
titlePositionY = -titleRendered.get_height()
titleHeight = titleRendered.get_height()
marginBottom = 10
marginTop = 20
notificationPositionX = window.WIDTH//2-notificationRendered.get_width()//2
notificationPositionY = scoreBoard.positionY+scoreBoard.HEIGHT+marginTop
cakhiaPositionX = window.WIDTH//2-cakhiaRendered.get_width()//2
cakhiaPositionY = scoreBoard.positionY-marginBottom-cakhiaRendered.get_height()
playAgain = False
while not playAgain:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
return True
elif event.key == pygame.K_ESCAPE:
return False
titlePositionY += titleDropDownSpeed if titlePositionY+titleHeight+marginBottom < cakhiaPositionY else 0
window.interface.blit(cakhiaRendered, (cakhiaPositionX, cakhiaPositionY))
window.interface.blit(notificationRendered, (notificationPositionX,notificationPositionY))
window.interface.blit(titleRendered, (titlePositionX, titlePositionY))
scoreBoard.renderScoreBoard()
pygame.display.update()
window.frame.tick(window.FPS)
return playAgain
if __name__ == "__main__":
os.system("cls")
home = True
while home:
gameStartScreen() | gameOver |
volumemode.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
"path/filepath"
"strings"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
clientset "k8s.io/client-go/kubernetes"
volevents "k8s.io/kubernetes/pkg/controller/volume/events"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
noProvisioner = "kubernetes.io/no-provisioner"
pvNamePrefix = "pv"
)
type volumeModeTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumeModeTestSuite{}
// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
func InitVolumeModeTestSuite() TestSuite {
return &volumeModeTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeMode",
testPatterns: []testpatterns.TestPattern{
testpatterns.FsVolModePreprovisionedPV,
testpatterns.FsVolModeDynamicPV,
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
},
}
}
func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeModeTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()
cs clientset.Interface
ns *v1.Namespace
// genericVolumeTestResource contains pv, pvc, sc, etc., owns cleaning that up
genericVolumeTestResource
intreeOps opCounts
migratedOps opCounts
}
var (
dInfo = driver.GetDriverInfo()
l local
)
// No preconditions to test. Normally they would be in a BeforeEach here.
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumemode")
init := func() {
l = local{}
l.ns = f.Namespace
l.cs = f.ClientSet
// Now do the more expensive test initialization.
l.config, l.driverCleanup = driver.PrepareTest(f)
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
}
// manualInit initializes l.genericVolumeTestResource without creating the PV & PVC objects.
manualInit := func() {
init()
fsType := pattern.FsType
volBindMode := storagev1.VolumeBindingImmediate
var (
scName string
pvSource *v1.PersistentVolumeSource
volumeNodeAffinity *v1.VolumeNodeAffinity
)
l.genericVolumeTestResource = genericVolumeTestResource{
driver: driver,
config: l.config,
pattern: pattern,
}
// Create volume for pre-provisioned volume tests
l.volume = CreateVolume(driver, l.config, pattern.VolType)
switch pattern.VolType {
case testpatterns.PreprovisionedPV:
if pattern.VolMode == v1.PersistentVolumeBlock {
scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name)
} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name)
}
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.volume)
if pvSource == nil {
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
}
storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
l.sc = storageClass
l.pv = e2epv.MakePersistentVolume(pvConfig)
l.pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
}
case testpatterns.DynamicPV:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType)
if l.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
l.sc.VolumeBindingMode = &volBindMode
l.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: dDriver.GetClaimSize(),
StorageClassName: &(l.sc.Name),
VolumeMode: &pattern.VolMode,
}, l.ns.Name)
}
default:
framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
}
}
cleanup := func() {
l.cleanupResource()
if l.driverCleanup != nil {
l.driverCleanup()
l.driverCleanup = nil
}
validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
}
// We register different tests depending on the drive
isBlockSupported := dInfo.Capabilities[CapBlock]
switch pattern.VolType {
case testpatterns.PreprovisionedPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func() {
manualInit()
defer cleanup()
var err error
ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err, "Failed to create sc")
ginkgo.By("Creating pv and pvc")
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
framework.ExpectNoError(err, "Failed to create pv")
// Prebind pv
l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
framework.ExpectNoError(err, "Failed to create pvc")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc), "Failed to bind pv and pvc")
ginkgo.By("Creating pod")
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Setting node
pod.Spec.NodeName = l.config.ClientNodeName
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
framework.ExpectNoError(err, "Failed to create pod")
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod")
}()
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod.Name,
"involvedObject.namespace": l.ns.Name,
"reason": events.FailedMountVolume,
}.AsSelector().String()
msg := "Unable to attach or mount volumes"
err = common.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
if err != nil {
framework.Logf("Warning: did not get event about FailedMountVolume")
}
// Check the pod is still not running
p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending")
})
}
case testpatterns.DynamicPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow]", func() {
manualInit()
defer cleanup()
var err error
ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err, "Failed to create sc")
ginkgo.By("Creating pv and pvc")
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
framework.ExpectNoError(err, "Failed to create pvc")
eventSelector := fields.Set{
"involvedObject.kind": "PersistentVolumeClaim",
"involvedObject.name": l.pvc.Name,
"involvedObject.namespace": l.ns.Name,
"reason": volevents.ProvisioningFailed,
}.AsSelector().String()
msg := "does not support block volume provisioning"
err = common.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.ClaimProvisionTimeout)
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
if err != nil {
framework.Logf("Warning: did not get event about provisioing failed")
}
// Check the pvc is still pending
pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(l.pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to re-read the pvc after event (or timeout)")
framework.ExpectEqual(pvc.Status.Phase, v1.ClaimPending, "PVC phase isn't pending")
})
}
default:
framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType)
}
ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func() {
skipTestIfBlockNotSupported(driver)
init()
l.genericVolumeTestResource = *createGenericVolumeTestResource(driver, l.config, pattern)
defer cleanup()
ginkgo.By("Creating pod")
var err error
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Change volumeMounts to volumeDevices and the other way around
pod = swapVolumeMode(pod)
// Run the pod
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
framework.ExpectNoError(err, "Failed to create pod")
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod")
}()
ginkgo.By("Waiting for the pod to fail")
// Wait for an event that the pod is invalid.
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod.Name,
"involvedObject.namespace": l.ns.Name,
"reason": events.FailedMountVolume,
}.AsSelector().String()
var msg string
if pattern.VolMode == v1.PersistentVolumeBlock {
msg = "has volumeMode Block, but is specified in volumeMounts"
} else {
msg = "has volumeMode Filesystem, but is specified in volumeDevices"
}
err = common.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
// Events are unreliable, don't depend on them. They're used only to speed up the test.
if err != nil {
framework.Logf("Warning: did not get event about mismatched volume use")
}
// Check the pod is still not running
p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending")
})
ginkgo.It("should not mount / map unused volumes in a pod", func() {
if pattern.VolMode == v1.PersistentVolumeBlock {
skipTestIfBlockNotSupported(driver)
}
init()
l.genericVolumeTestResource = *createGenericVolumeTestResource(driver, l.config, pattern)
defer cleanup()
ginkgo.By("Creating pod")
var err error
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].VolumeDevices = nil
pod.Spec.Containers[i].VolumeMounts = nil
}
// Run the pod
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod))
}()
err = e2epod.WaitForPodNameRunningInNamespace(l.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err)
// Reload the pod to get its node
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectNotEqual(pod.Spec.NodeName, "", "pod should be scheduled to a node")
node, err := l.cs.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Listing mounted volumes in the pod")
hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup()
volumePaths, devicePaths, err := listPodVolumePluginDirectory(hostExec, pod, node)
framework.ExpectNoError(err)
driverInfo := driver.GetDriverInfo()
volumePlugin := driverInfo.InTreePluginName
if len(volumePlugin) == 0 {
// TODO: check if it's a CSI volume first
volumePlugin = "kubernetes.io/csi"
}
ginkgo.By(fmt.Sprintf("Checking that volume plugin %s is not used in pod directory", volumePlugin))
safeVolumePlugin := strings.ReplaceAll(volumePlugin, "/", "~")
for _, path := range volumePaths {
gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be mounted into pod directory", volumePlugin))
}
for _, path := range devicePaths {
gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be symlinked into pod directory", volumePlugin))
}
})
}
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource, volumeNodeAffinity *v1.VolumeNodeAffinity) (*storagev1.StorageClass,
e2epv.PersistentVolumeConfig, e2epv.PersistentVolumeClaimConfig) {
// StorageClass
scConfig := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
},
Provisioner: noProvisioner,
VolumeBindingMode: &volBindMode,
}
// PV
pvConfig := e2epv.PersistentVolumeConfig{
PVSource: pvSource,
NodeAffinity: volumeNodeAffinity,
NamePrefix: pvNamePrefix,
StorageClassName: scName,
VolumeMode: &volMode,
}
// PVC
pvcConfig := e2epv.PersistentVolumeClaimConfig{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &scName,
VolumeMode: &volMode,
}
return scConfig, pvConfig, pvcConfig
}
// swapVolumeMode changes volumeMounts to volumeDevices and the other way around
func swapVolumeMode(podTemplate *v1.Pod) *v1.Pod {
pod := podTemplate.DeepCopy()
for c := range pod.Spec.Containers {
container := &pod.Spec.Containers[c]
container.VolumeDevices = []v1.VolumeDevice{}
container.VolumeMounts = []v1.VolumeMount{}
// Change VolumeMounts to VolumeDevices
for _, volumeMount := range podTemplate.Spec.Containers[c].VolumeMounts {
container.VolumeDevices = append(container.VolumeDevices, v1.VolumeDevice{
Name: volumeMount.Name,
DevicePath: volumeMount.MountPath,
})
}
// Change VolumeDevices to VolumeMounts
for _, volumeDevice := range podTemplate.Spec.Containers[c].VolumeDevices {
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: volumeDevice.Name,
MountPath: volumeDevice.DevicePath,
})
}
}
return pod
}
// listPodVolumePluginDirectory returns all volumes in /var/lib/kubelet/pods/<pod UID>/volumes/* and
// /var/lib/kubelet/pods/<pod UID>/volumeDevices/*
// Sample output:
// /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt
// /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0
func listPodVolumePluginDirectory(h utils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) {
mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes")
devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices")
mounts, err = listPodDirectory(h, mountPath, node)
if err != nil {
return nil, nil, err
}
devices, err = listPodDirectory(h, devicePath, node)
if err != nil {
return nil, nil, err
}
return mounts, devices, nil
}
func listPodDirectory(h utils.HostExec, path string, node *v1.Node) ([]string, error) | {
// Return no error if the directory does not exist (e.g. there are no block volumes used)
_, err := h.IssueCommandWithResult("test ! -d "+path, node)
if err == nil {
// The directory does not exist
return nil, nil
}
// The directory either exists or a real error happened (e.g. "access denied").
// Ignore the error, "find" will hit the error again and we report it there.
// Inside /var/lib/kubelet/pods/<pod>/volumes, look for <volume_plugin>/<volume-name>, hence depth 2
cmd := fmt.Sprintf("find %s -mindepth 2 -maxdepth 2", path)
out, err := h.IssueCommandWithResult(cmd, node)
if err != nil {
return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, node.Name, err)
}
return strings.Split(out, "\n"), nil
} |
|
errHandler.js | var gulp = require('gulp')
var $ = require('gulp-load-plugins')() | $.util.beep()
$.util.log($.util.colors.red('↓看这里看这里,报错了:'))
$.util.log(e)
return this
}
module.exports = errHandler |
function errHandler( e ) { |
setup.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, | import io
import os
import setuptools # type: ignore
version = '0.1.0'
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, 'README.rst')
with io.open(readme_filename, encoding='utf-8') as readme_file:
readme = readme_file.read()
setuptools.setup(
name='google-cloud-resourcemanager',
version=version,
long_description=readme,
packages=setuptools.PEP420PackageFinder.find(),
namespace_packages=('google', 'google.cloud'),
platforms='Posix; MacOS X; Windows',
include_package_data=True,
install_requires=(
'google-api-core[grpc] >= 1.27.0, < 3.0.0dev',
'libcst >= 0.2.5',
'proto-plus >= 1.15.0',
'packaging >= 14.3', 'grpc-google-iam-v1 >= 0.12.3, < 0.13dev', ),
python_requires='>=3.6',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
zip_safe=False,
) | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# |
finder.go | // Copyright 2017 Frédéric Guillot. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package subscription // import "miniflux.app/reader/subscription"
import (
"fmt"
"io"
"regexp"
"strings"
"miniflux.app/config"
"miniflux.app/errors"
"miniflux.app/http/client"
"miniflux.app/reader/browser"
"miniflux.app/reader/parser"
"miniflux.app/url"
"github.com/PuerkitoBio/goquery"
)
var (
errUnreadableDoc = "Unable to analyze this page: %v"
youtubeChannelRegex = regexp.MustCompile(`youtube\.com/channel/(.*)`)
youtubeVideoRegex = regexp.MustCompile(`youtube\.com/watch\?v=(.*)`)
)
// FindSubscriptions downloads and try to find one or more subscriptions from an URL.
func FindSubscriptions(websiteURL, userAgent, cookie, username, password string, fetchViaProxy, allowSelfSignedCertificates bool) (Subscriptions, *errors.LocalizedError) {
websiteURL = findYoutubeChannelFeed(websiteURL)
websiteURL = parseYoutubeVideoPage(websiteURL)
clt := client.NewClientWithConfig(websiteURL, config.Opts)
clt.WithCredentials(username, password)
clt.WithUserAgent(userAgent)
clt.WithCookie(cookie)
clt.AllowSelfSignedCertificates = allowSelfSignedCertificates
if fetchViaProxy {
clt.WithProxy()
}
response, err := browser.Exec(clt)
if err != nil {
return nil, err
}
body := response.BodyAsString()
if format := parser.DetectFeedFormat(body); format != parser.FormatUnknown {
var subscriptions Subscriptions
subscriptions = append(subscriptions, &Subscription{
Title: response.EffectiveURL,
URL: response.EffectiveURL,
Type: format,
})
return subscriptions, nil
}
subscriptions, err := parseWebPage(response.EffectiveURL, strings.NewReader(body))
if err != nil || subscriptions != nil {
return subscriptions, err
}
return tryWellKnownUrls(websiteURL, userAgent, cookie, username, password)
}
func parseWebPage(websiteURL string, data io.Reader) (Subscriptions, *errors.LocalizedError) {
var subscriptions Subscriptions
queries := map[string]string{
"link[type='application/rss+xml']": "rss",
"link[type='application/atom+xml']": "atom",
"link[type='application/json']": "json",
}
doc, err := goquery.NewDocumentFromReader(data)
if err != nil {
return nil, errors.NewLocalizedError(errUnreadableDoc, err)
}
for query, kind := range queries {
doc.Find(query).Each(func(i int, s *goquery.Selection) {
subscription := new(Subscription)
subscription.Type = kind
if title, exists := s.Attr("title"); exists {
subscription.Title = title
} else {
subscription.Title = "Feed"
}
if feedURL, exists := s.Attr("href"); exists {
subscription.URL, _ = url.AbsoluteURL(websiteURL, feedURL)
}
if subscription.Title == "" {
subscription.Title = subscription.URL
}
if subscription.URL != "" {
subscriptions = append(subscriptions, subscription)
}
})
}
return subscriptions, nil
}
func findYoutubeChannelFeed(websiteURL string) string {
| func parseYoutubeVideoPage(websiteURL string) string {
if !youtubeVideoRegex.MatchString(websiteURL) {
return websiteURL
}
clt := client.NewClientWithConfig(websiteURL, config.Opts)
response, browserErr := browser.Exec(clt)
if browserErr != nil {
return websiteURL
}
doc, docErr := goquery.NewDocumentFromReader(response.Body)
if docErr != nil {
return websiteURL
}
if channelID, exists := doc.Find(`meta[itemprop="channelId"]`).First().Attr("content"); exists {
return fmt.Sprintf(`https://www.youtube.com/feeds/videos.xml?channel_id=%s`, channelID)
}
return websiteURL
}
func tryWellKnownUrls(websiteURL, userAgent, cookie, username, password string) (Subscriptions, *errors.LocalizedError) {
var subscriptions Subscriptions
knownURLs := map[string]string{
"/atom.xml": "atom",
"/feed.xml": "atom",
"/feed/": "atom",
"/rss.xml": "rss",
}
lastCharacter := websiteURL[len(websiteURL)-1:]
if lastCharacter == "/" {
websiteURL = websiteURL[:len(websiteURL)-1]
}
for knownURL, kind := range knownURLs {
fullURL, err := url.AbsoluteURL(websiteURL, knownURL)
if err != nil {
continue
}
clt := client.NewClientWithConfig(fullURL, config.Opts)
clt.WithCredentials(username, password)
clt.WithUserAgent(userAgent)
clt.WithCookie(cookie)
// Some websites redirects unknown URLs to the home page.
// As result, the list of known URLs is returned to the subscription list.
// We don't want the user to choose between invalid feed URLs.
clt.WithoutRedirects()
response, err := clt.Get()
if err != nil {
continue
}
if response != nil && response.StatusCode == 200 {
subscription := new(Subscription)
subscription.Type = kind
subscription.Title = fullURL
subscription.URL = fullURL
if subscription.URL != "" {
subscriptions = append(subscriptions, subscription)
}
}
}
return subscriptions, nil
}
| matches := youtubeChannelRegex.FindStringSubmatch(websiteURL)
if len(matches) == 2 {
return fmt.Sprintf(`https://www.youtube.com/feeds/videos.xml?channel_id=%s`, matches[1])
}
return websiteURL
}
|
actions.ts | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import {
IInteractiveWindowMapping,
InteractiveWindowMessages
} from '../../../client/datascience/interactive-common/interactiveWindowTypes';
import { IJupyterVariable, IJupyterVariablesRequest } from '../../../client/datascience/types';
import {
CommonAction,
CommonActionType,
CommonActionTypeMapping,
ICellAction,
ICodeAction,
ICodeCreatedAction,
IEditCellAction,
ILinkClickAction,
IOpenSettingsAction,
IScrollAction,
IShowDataViewerAction,
IVariableExplorerHeight
} from '../../interactive-common/redux/reducers/types';
import { IMonacoModelContentChangeEvent } from '../../react-common/monacoHelpers';
// This function isn't made common and not exported, to ensure it isn't used elsewhere.
function createIncomingActionWithPayload<
M extends IInteractiveWindowMapping & CommonActionTypeMapping,
K extends keyof M
>(type: K, data: M[K]): CommonAction<M[K]> {
// tslint:disable-next-line: no-any
return { type, payload: { data, messageDirection: 'incoming' } as any } as any;
}
// This function isn't made common and not exported, to ensure it isn't used elsewhere.
function createIncomingAction(type: CommonActionType | InteractiveWindowMessages): CommonAction {
return { type, payload: { messageDirection: 'incoming', data: undefined } };
}
// See https://react-redux.js.org/using-react-redux/connect-mapdispatch#defining-mapdispatchtoprops-as-an-object
export const actionCreators = {
focusInput: (): CommonAction => createIncomingAction(CommonActionType.FOCUS_INPUT),
restartKernel: (): CommonAction => createIncomingAction(CommonActionType.RESTART_KERNEL),
interruptKernel: (): CommonAction => createIncomingAction(CommonActionType.INTERRUPT_KERNEL),
deleteAllCells: (): CommonAction => createIncomingAction(InteractiveWindowMessages.DeleteAllCells),
deleteCell: (cellId: string): CommonAction<ICellAction> =>
createIncomingActionWithPayload(CommonActionType.DELETE_CELL, { cellId }),
undo: (): CommonAction => createIncomingAction(InteractiveWindowMessages.Undo),
redo: (): CommonAction => createIncomingAction(InteractiveWindowMessages.Redo),
linkClick: (href: string): CommonAction<ILinkClickAction> =>
createIncomingActionWithPayload(CommonActionType.LINK_CLICK, { href }),
showPlot: (imageHtml: string) => createIncomingActionWithPayload(InteractiveWindowMessages.ShowPlot, imageHtml),
toggleInputBlock: (cellId: string): CommonAction<ICellAction> =>
createIncomingActionWithPayload(CommonActionType.TOGGLE_INPUT_BLOCK, { cellId }),
gotoCell: (cellId: string): CommonAction<ICellAction> =>
createIncomingActionWithPayload(CommonActionType.GOTO_CELL, { cellId }),
copyCellCode: (cellId: string): CommonAction<ICellAction> =>
createIncomingActionWithPayload(CommonActionType.COPY_CELL_CODE, { cellId }),
gatherCell: (cellId: string): CommonAction<ICellAction> =>
createIncomingActionWithPayload(CommonActionType.GATHER_CELL, { cellId }),
gatherCellToScript: (cellId: string): CommonAction<ICellAction> =>
createIncomingActionWithPayload(CommonActionType.GATHER_CELL_TO_SCRIPT, { cellId }),
clickCell: (cellId: string): CommonAction<ICellAction> =>
createIncomingActionWithPayload(CommonActionType.CLICK_CELL, { cellId }),
editCell: (cellId: string, e: IMonacoModelContentChangeEvent): CommonAction<IEditCellAction> =>
createIncomingActionWithPayload(CommonActionType.EDIT_CELL, {
cellId,
version: e.versionId,
modelId: e.model.id,
forward: e.forward,
reverse: e.reverse,
id: cellId,
code: e.model.getValue()
}),
submitInput: (code: string, cellId: string): CommonAction<ICodeAction> =>
createIncomingActionWithPayload(CommonActionType.SUBMIT_INPUT, { code, cellId }),
toggleVariableExplorer: (): CommonAction => createIncomingAction(CommonActionType.TOGGLE_VARIABLE_EXPLORER),
setVariableExplorerHeight: (containerHeight: number, gridHeight: number): CommonAction<IVariableExplorerHeight> =>
createIncomingActionWithPayload(CommonActionType.SET_VARIABLE_EXPLORER_HEIGHT, { containerHeight, gridHeight }),
expandAll: (): CommonAction => createIncomingAction(InteractiveWindowMessages.ExpandAll),
collapseAll: (): CommonAction => createIncomingAction(InteractiveWindowMessages.CollapseAll),
export: (): CommonAction => createIncomingAction(CommonActionType.EXPORT),
exportAs: (): CommonAction => createIncomingAction(CommonActionType.EXPORT_NOTEBOOK_AS),
showDataViewer: (variable: IJupyterVariable, columnSize: number): CommonAction<IShowDataViewerAction> =>
createIncomingActionWithPayload(CommonActionType.SHOW_DATA_VIEWER, { variable, columnSize }),
editorLoaded: (): CommonAction => createIncomingAction(CommonActionType.EDITOR_LOADED),
scroll: (isAtBottom: boolean): CommonAction<IScrollAction> =>
createIncomingActionWithPayload(CommonActionType.SCROLL, { isAtBottom }),
unfocus: (cellId: string | undefined): CommonAction<ICellAction> =>
createIncomingActionWithPayload(CommonActionType.UNFOCUS_CELL, { cellId }),
codeCreated: (cellId: string | undefined, modelId: string): CommonAction<ICodeCreatedAction> =>
createIncomingActionWithPayload(CommonActionType.CODE_CREATED, { cellId, modelId }),
editorUnmounted: (): CommonAction => createIncomingAction(CommonActionType.UNMOUNT),
selectKernel: (): CommonAction => createIncomingAction(InteractiveWindowMessages.SelectKernel),
selectServer: (): CommonAction => createIncomingAction(CommonActionType.SELECT_SERVER),
openSettings: (setting?: string): CommonAction<IOpenSettingsAction> =>
createIncomingActionWithPayload(CommonActionType.OPEN_SETTINGS, { setting }),
getVariableData: (
newExecutionCount: number,
startIndex: number = 0,
pageSize: number = 100
): CommonAction<IJupyterVariablesRequest> => | startIndex,
pageSize
}),
widgetFailed: (ex: Error): CommonAction<Error> =>
createIncomingActionWithPayload(CommonActionType.IPYWIDGET_RENDER_FAILURE, ex)
}; | createIncomingActionWithPayload(CommonActionType.GET_VARIABLE_DATA, {
executionCount: newExecutionCount,
sortColumn: 'name',
sortAscending: true, |
test_phase_change.py | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fluids.numerics import assert_close, assert_close1d
from chemicals.phase_change import *
from chemicals.phase_change import (Hvap_data_CRC, Hfus_data_CRC,
Hvap_data_Gharagheizi, Hsub_data_Gharagheizi,
Tb_data_Yaws, Tm_ON_data,
phase_change_data_Perrys2_150,
phase_change_data_Alibakhshi_Cs,
phase_change_data_VDI_PPDS_4)
from chemicals.miscdata import CRC_inorganic_data, CRC_organic_data
from chemicals.identifiers import check_CAS
def test_Watson():
Hvap = Watson(T=320, Hvap_ref=43908, T_ref=300.0, Tc=647.14)
assert_close(Hvap, 42928.990094915454, rtol=1e-12)
def test_Clapeyron():
Hvap = Clapeyron(294.0, 466.0, 5.55E6)
assert_close(Hvap, 26512.36357131963)
# Test at 1/2 bar, sZ=0.98
Hvap = Clapeyron(264.0, 466.0, 5.55E6, 0.98, 5E4)
assert_close(Hvap, 23370.947571814384)
def test_Pitzer():
Hvap = Pitzer(452, 645.6, 0.35017)
assert_close(Hvap, 36696.749078320056)
def test_SMK():
Hvap = SMK(553.15, 751.35, 0.302)
assert_close(Hvap, 39866.18999046232)
def test_MK():
# Problem in article for SMK function.
Hv1 = MK(553.15, 751.35, 0.302)
# data in [1]_., should give 26.43 KJ/mol
Hv2 = MK(298.15, 469.69, 0.2507)
assert_close(Hv1, 38728.00667307733, rtol=1e-12)
assert_close(Hv2, 25940.988533726406, rtol=1e-12)
def test_Velasco():
Hv1 = Velasco(553.15, 751.35, 0.302)
Hv2 = Velasco(333.2, 476.0, 0.5559)
assert_close(Hv1, 39524.251054691274, rtol=1e-12)
assert_close(Hv2, 33299.428636069264, rtol=1e-12)
def test_Riedel():
# same problem as in Perry's examples
Hv1 = Riedel(294.0, 466.0, 5.55E6)
# Pyridine, 0.0% err vs. exp: 35090 J/mol; from Poling [2]_.
Hv2 = Riedel(388.4, 620.0, 56.3E5)
assert_close(Hv1, 26828.59040728512, rtol=1e-12)
assert_close(Hv2, 35089.80179000598, rtol=1e-12)
def test_Chen():
Hv1 = Chen(294.0, 466.0, 5.55E6)
assert_close(Hv1, 26705.902558030946)
def test_Liu():
Hv1 = Liu(294.0, 466.0, 5.55E6)
assert_close(Hv1, 26378.575260517395)
def test_Vetere():
Hv1 = Vetere(294.0, 466.0, 5.55E6)
assert_close(Hv1, 26363.43895706672)
def test_Hvap_CRC_data():
HvapTb_tot = Hvap_data_CRC['HvapTb'].sum()
assert_close(HvapTb_tot, 30251890.0)
Hvap298_tot = Hvap_data_CRC['Hvap298'].sum()
assert_close(Hvap298_tot, 29343710.0)
Tb_tot = Hvap_data_CRC['Tb'].sum()
assert_close(Tb_tot, 407502.95600000001)
assert Hvap_data_CRC.index.is_unique
assert Hvap_data_CRC.shape == (926, 5)
assert all([check_CAS(i) for i in list(Hvap_data_CRC.index)])
def test_Hfus_CRC_data():
Hfus_total = Hfus_data_CRC['Hfus'].sum()
assert_close(Hfus_total, 29131241)
assert Hfus_data_CRC.index.is_unique
assert Hfus_data_CRC.shape == (1112, 3)
assert all([check_CAS(i) for i in list(Hfus_data_CRC.index)])
def test_Hfus():
assert_close(Hfus('462-06-6', method='CRC'), 11310.0, rtol=1e-12)
assert_close(Hfus('462-06-6'), 11310.0, rtol=1e-12)
assert_close(Hfus(CASRN='75-07-0'), 2310.0)
assert Hfus(CASRN='75000-07-0') is None
assert Hfus_methods('7732-18-5') == ['CRC']
def test_Gharagheizi_Hvap_data():
# 51 CAS number DO NOT validate
Hvap298_tot = Hvap_data_Gharagheizi['Hvap298'].sum()
assert_close(Hvap298_tot, 173584900)
assert Hvap_data_Gharagheizi.index.is_unique
assert Hvap_data_Gharagheizi.shape == (2730, 2)
def | ():
tots = [Hsub_data_Gharagheizi[i].sum() for i in ['Hsub', 'error']]
assert_close(tots[0], 130537650)
assert_close(tots[1], 1522960.0)
assert Hsub_data_Gharagheizi.index.is_unique
assert Hsub_data_Gharagheizi.shape == (1241, 3)
def test_Yaws_Tb_data():
tot = Tb_data_Yaws.sum()
assert_close(tot, 6631287.51)
assert Tb_data_Yaws.index.is_unique
assert Tb_data_Yaws.shape == (13461, 1)
@pytest.mark.slow
def test_Yaws_Tb_CAS_valid():
assert all([check_CAS(i) for i in Tb_data_Yaws.index])
def test_Tm_ON_data():
tot = Tm_ON_data.sum()
assert_close(tot, 4059989.425)
assert Tm_ON_data.shape == (11549, 1)
assert Tm_ON_data.index.is_unique
@pytest.mark.slow
def test_Tm_ON_data_CAS_valid():
assert all([check_CAS(i) for i in Tm_ON_data.index])
def test_Perrys2_150_data():
# rtol=2E-4 for Tmin; only helium-4 needs a higher tolerance
# Everything hits 0 at Tmax except Difluoromethane, methane, and water;
# those needed their Tmax adjusted to their real Tc.
# C1 is divided by 1000, to give units of J/mol instead of J/kmol
# Terephthalic acid removed, was a constant value only.
assert all([check_CAS(i) for i in phase_change_data_Perrys2_150.index])
tots_calc = [phase_change_data_Perrys2_150[i].abs().sum() for i in [u'Tc', u'C1', u'C2', u'C3', u'C4', u'Tmin', u'Tmax']]
tots = [189407.42499999999, 18617223.739999998, 174.34494000000001, 112.51209900000001, 63.894040000000004, 70810.849999999991, 189407.005]
assert_close1d(tots_calc, tots)
assert phase_change_data_Perrys2_150.index.is_unique
assert phase_change_data_Perrys2_150.shape == (344, 8)
def test_Alibakhshi_Cs_data():
# Oops, a bunch of these now-lonely coefficients have an invalid CAS...
# assert all([check_CAS(i) for i in phase_change_data_Alibakhshi_Cs.index])
tots_calc = [phase_change_data_Alibakhshi_Cs[i].abs().sum() for i in [u'C']]
tots = [28154.361500000003]
assert_close1d(tots_calc, tots)
assert phase_change_data_Alibakhshi_Cs.index.is_unique
assert phase_change_data_Alibakhshi_Cs.shape == (1890, 2)
def test_VDI_PPDS_4_data():
"""I believe there are no errors here."""
assert all([check_CAS(i) for i in phase_change_data_VDI_PPDS_4.index])
tots_calc = [phase_change_data_VDI_PPDS_4[i].abs().sum() for i in [u'A', u'B', u'C', u'D', u'E', u'Tc', u'MW']]
tots = [1974.2929800000002, 2653.9399000000003, 2022.530649, 943.25633100000005, 3124.9258610000002, 150142.28, 27786.919999999998]
assert_close1d(tots_calc, tots)
assert phase_change_data_VDI_PPDS_4.index.is_unique
assert phase_change_data_VDI_PPDS_4.shape == (272, 8)
@pytest.mark.slow
@pytest.mark.fuzz
def test_Tb_all_values():
s1 = CRC_inorganic_data.index[CRC_inorganic_data['Tb'].notnull()]
s2 = CRC_organic_data.index[CRC_organic_data['Tb'].notnull()]
s3 = Tb_data_Yaws.index
tots = []
tots_exp = [639213.2310000042, 2280667.079999829, 6631287.510000873]
# These should match the sums of the respective series
for s, method in zip([s1, s2, s3], ['CRC_INORG', 'CRC_ORG', 'YAWS']):
tots.append(sum([Tb(i, method=method) for i in s]))
assert_close1d(tots, tots_exp, rtol=1e-11)
s = set(); s.update(s1); s.update(s2); s.update(s3)
assert len(s) == 13868
def test_Tb():
# CRC_inorg, CRC org, Yaws
Tbs_calc = Tb('993-50-0'), Tb('626-94-8'), Tb('7631-99-4')
Tbs = [399.15, 412.15, 653.15]
assert_close1d(Tbs, Tbs_calc)
hits = [Tb_methods(i) for i in ['993-50-0', '626-94-8', '7631-99-4']]
assert hits == [['CRC_INORG'], ['CRC_ORG'], ['YAWS']]
with pytest.raises(Exception):
Tb('993-50-0', method='BADMETHOD')
assert None == Tb('9923443-50-0')
assert [] == Tb_methods('9923443-50-0')
w_methods = Tb_methods('7732-18-5')
assert w_methods == ['CRC_INORG', 'YAWS']
Tbs = [Tb('7732-18-5', method=i) for i in w_methods]
assert_close1d(Tbs, [373.124, 373.15])
@pytest.mark.slow
@pytest.mark.fuzz
def test_Tm_all_values():
s1 = CRC_inorganic_data.index[CRC_inorganic_data['Tm'].notnull()]
s2 = CRC_organic_data.index[CRC_organic_data['Tm'].notnull()]
s3 = Tm_ON_data.index
tots = []
tots_exp = [1543322.6125999668, 2571284.480399755, 4059989.4249993376]
# These should match the sums of the respective series
for s, method in zip([s1, s2, s3], ['CRC_INORG', 'CRC_ORG', 'OPEN_NTBKM']):
tots.append(sum([Tm(i, method=method) for i in s]))
assert_close1d(tots, tots_exp, rtol=1e-11)
s = set(); s.update(s1); s.update(s2); s.update(s3)
assert len(s) == 14723
def test_Tm():
# Open notebook, CRC organic, CRC inorg
Tms_calc = Tm('996-50-9'), Tm('999-78-0'), Tm('993-50-0')
Tms = [263.15, 191.15, 274.15]
assert_close1d(Tms, Tms_calc)
hits = [Tm_methods(i) for i in ['996-50-9', '999-78-0', '993-50-0']]
assert hits == [['OPEN_NTBKM'], ['CRC_ORG'], ['CRC_INORG']]
with pytest.raises(Exception):
Tm('993-50-0', method='BADMETHOD')
assert Tm('9923443-50-0') is None
assert [] == Tm_methods('9923443-50-0')
w_methods = Tm_methods('7732-18-5')
assert w_methods == ['OPEN_NTBKM', 'CRC_INORG']
Tms = [Tm('7732-18-5', method=i) for i in w_methods]
assert_close1d(Tms, [273.15, 273.15])
def test_Alibakhshi():
Hvap = Alibakhshi(T=320.0, Tc=647.14, C=-16.7171)
assert_close(Hvap, 41961.30490225752, rtol=1e-13)
def test_PPDS12():
Hvap = PPDS12(300.0, 591.75, 4.60584, 13.97224, -10.592315, 2.120205, 4.277128)
assert_close(Hvap, 37948.76862035927, rtol=1e-13)
| test_Gharagheizi_Hsub_data |
singleton.py | from typing import List, Tuple
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_spend import CoinSpend
from chia.types.condition_opcodes import ConditionOpcode
from chia.util.hash import std_hash
from chia.util.ints import uint64
from chia.wallet.puzzles.singleton_top_layer import (
SINGLETON_LAUNCHER,
SINGLETON_LAUNCHER_HASH,
P2_SINGLETON_MOD,
solution_for_singleton,
)
from cic.load_clvm import load_clvm
SINGLETON_MOD = load_clvm("singleton_top_layer_v1_1.clsp", package_or_requirement="cic.clsp.singleton")
solve_singleton = solution_for_singleton
# Return the puzzle reveal of a singleton with specific ID and innerpuz
def construct_singleton(launcher_id: bytes32, inner_puz: Program) -> Program:
return SINGLETON_MOD.curry(
(SINGLETON_MOD.get_tree_hash(), (launcher_id, SINGLETON_LAUNCHER_HASH)),
inner_puz,
)
def generate_launch_conditions_and_coin_spend(
coin: Coin,
inner_puzzle: Program,
amount: uint64,
) -> Tuple[List[Program], CoinSpend]:
if (amount % 2) == 0:
raise ValueError("Coin amount cannot be even. Subtract one mojo.")
launcher_coin = Coin(coin.name(), SINGLETON_LAUNCHER_HASH, amount)
curried_singleton: Program = construct_singleton(launcher_coin.name(), inner_puzzle)
launcher_solution = Program.to(
[
curried_singleton.get_tree_hash(),
amount,
[],
]
)
create_launcher = Program.to(
[
ConditionOpcode.CREATE_COIN,
SINGLETON_LAUNCHER_HASH,
amount,
],
)
assert_launcher_announcement = Program.to(
[
ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT,
std_hash(launcher_coin.name() + launcher_solution.get_tree_hash()),
],
)
conditions = [create_launcher, assert_launcher_announcement]
launcher_coin_spend = CoinSpend(
launcher_coin, | SINGLETON_LAUNCHER,
launcher_solution,
)
return conditions, launcher_coin_spend
def construct_p2_singleton(launcher_id: bytes32) -> Program:
return P2_SINGLETON_MOD.curry(SINGLETON_MOD.get_tree_hash(), launcher_id, SINGLETON_LAUNCHER_HASH)
def solve_p2_singleton(p2_singleton_coin: Coin, singleton_inner_puzhash: bytes32) -> Program:
return Program.to([singleton_inner_puzhash, p2_singleton_coin.name()]) | |
sri.dashboard.controller.DataCaptured.js | 'use strict'; | angular.module('sri.controller.DataCaptured', [])
.controller('DataCapturedCTRL', ['$scope', function( $scope ) {
$scope.truck = $scope.truck || {};
$scope.dataCaptured = $scope.dataCaptured || {};
$scope.dataCaptured.id = 'dataCaptured';
$scope.dataCaptured.title = 'Data Captured';
var currentTruck = $scope.truck.current;
$scope.dataCaptured.licensePlate = currentTruck.licensePlate;
$scope.dataCaptured.dotNumber = currentTruck.dotNumber;
$scope.dataCaptured.weight = currentTruck.weight;
$scope.$watch('truck', function( newValue, oldValue ) {
if( newValue !== oldValue ) {
currentTruck = $scope.truck.current;
$scope.dataCaptured.licensePlate = currentTruck.licensePlate;
$scope.dataCaptured.dotNumber = currentTruck.dotNumber;
$scope.dataCaptured.weight = currentTruck.weight;
}
}, true);
}]); | |
queue.js | class QueueNode {
constructor(val) {
this.val = val;
this.next = null;
}
}
class Queue {
constructor() {
this.first = null;
this.last = null;
}
push(val) {
if(this.last)
this.last.next = new QueueNode(val);
else {
this.last = new QueueNode(val);
this.first = this.last;
}
if(this.last.next)
this.last = this.last.next; |
front() {
if(this.first == null) {
return null;
}
let val = this.first.val;
this.first = this.first.next;
return val;
}
}
// let queue = new Queue();
// queue.push(10);
// queue.push(20);
// queue.push(30);
// console.log(queue);
// console.log("front: " + queue.front());
// console.log("front: " + queue.front());
// queue.push(40);
// console.log("front: " + queue.front());
// console.log("front: " + queue.front());
// console.log("front: " + queue.front()); | } |
service.go | package user
type UserUpdate struct {
User *User
Status string
}
type UserService interface {
GetUser(string) (*User, error)
PutUser(*User) (*User, error)
DeleteUser(string) error
GetUsersByCountry(string) ([]*User, error)
}
type Resources struct {
Repo Repository
UserChannel chan *UserUpdate
}
type service struct {
*Resources
}
func | (r *Resources) (*service, error) {
return &service{
Resources: r,
}, nil
}
| NewService |
augmentedrw.go | // Copyright 2016 Martin Angers. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package augmentedrw implements a middleware that replaces the standard
// http.ResponseWriter with one that records the Size and Status of the
// response. This is primarily useful for logging requests.
package augmentedrw
import (
"bufio"
"errors"
"net"
"net/http"
)
// Wrap returns a handler that calls h with an augmented http.ResponseWriter,
// that is, one that records the Size and Status code of the response.
func Wrap(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// do not create the augmented response writer if it already implements
// Size and Status.
if _, ok := w.(interface {
Size() int
Status() int
}); !ok {
w = &responseWriter{w, 0, 0}
}
h.ServeHTTP(w, r)
})
}
// responseWriter is an augmented response writer that keeps track
// of the response's status and body size.
type responseWriter struct {
http.ResponseWriter
size int
status int
}
func (w *responseWriter) Size() int {
return w.size
}
func (w *responseWriter) Status() int {
return w.status
}
func (w *responseWriter) WriteHeader(code int) {
if w.status == 0 {
w.status = code
}
w.ResponseWriter.WriteHeader(code)
}
func (w *responseWriter) Write(b []byte) (int, error) {
if w.status == 0 {
w.status = 200
}
n, err := w.ResponseWriter.Write(b)
w.size += n
return n, err
}
func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hj, ok := w.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, errors.New("hijack is not supported")
}
return hj.Hijack()
}
func (w *responseWriter) CloseNotify() <-chan bool {
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (w *responseWriter) Flush() {
f, ok := w.ResponseWriter.(http.Flusher)
if ok |
}
| {
f.Flush()
} |
gp.py | import os
import pickle
# from matplotlib import pyplot as plt
import numpy as np
from . import AGD_decomposer
from . import gradient_descent
from . import ioHDF5
class GaussianDecomposer(object):
def __init__(self, filename=None, phase="one"):
if filename:
temp = pickle.load(open(filename, "rb"))
self.p = temp.p
else:
self.p = {
"alpha1": None,
"alpha2": None,
"alpha_em": None,
"alpha1_em": None,
"alpha2_em": None,
"training_results": None,
"phase": "one",
"SNR2_thresh": 5.0,
"SNR_thresh": 5.0,
"SNR_em": 5.0,
"wiggle": 0.1,
"drop_width": 10,
"min_dv": 0,
"deblend": True,
"mode": "python",
"BLFrac": 0.1,
"verbose": False,
# "plot": False,
"perform_final_fit": True,
}
def load_training_data(self, filename):
self.p["training_data"] = pickle.load(open(filename, "rb"))
def load_hdf5_data(self, filename):
return ioHDF5.fromHDF5(filename)
def dump_hdf5_data(self, data, filename):
ioHDF5.toHDF5(data, filename)
def train(
self,
alpha1_initial=None,
alpha2_initial=None,
# plot=False,
verbose=False,
mode="python",
learning_rate=0.9,
eps=0.25,
MAD=0.1,
):
""" Solve for optimal values of alpha1 (and alpha2) using training data """
if ((self.p["phase"] == "one") and (not alpha1_initial)) or (
(self.p["phase"] == "two")
and ((not alpha1_initial) or (not alpha1_initial))
):
print("Must choose initial guesses.")
print("e.g., train(alpha1_initial=1.0, alpha2_initial=100.)")
return
if not self.p["training_data"]:
print("Must first load training data.")
print('e.g., load_training_data("training_data.pickle")')
return
print("Training...")
(
self.p["alpha1"],
self.p["alpha2"],
self.p["training_results"],
) = gradient_descent.train(
alpha1_initial=alpha1_initial,
alpha2_initial=alpha2_initial,
training_data=self.p["training_data"],
phase=self.p["phase"],
SNR_thresh=self.p["SNR_thresh"],
SNR2_thresh=self.p["SNR2_thresh"],
# plot=plot,
eps=eps,
verbose=verbose,
mode=mode,
learning_rate=learning_rate,
MAD=MAD,
)
def decompose(self, xdata, ydata, edata):
""" Decompose a single spectrum using current parameters """
if (self.p["phase"] == "one") and (not self.p["alpha1"]):
print("phase = one, and alpha1 is unset")
return
if (self.p["phase"] == "two") and (
(not self.p["alpha1"]) or (not self.p["alpha2"])
):
print("phase = two, and either alpha1 or alpha2 is unset")
return
if self.p["mode"] != "conv":
a1 = 10 ** self.p["alpha1"]
a2 = 10 ** self.p["alpha2"] if self.p["phase"] == "two" else None
else:
a1 = self.p["alpha1"]
a2 = self.p["alpha2"] if self.p["phase"] == "two" else None
status, results = AGD_decomposer.AGD(
xdata,
ydata,
edata,
alpha1=a1,
alpha2=a2,
phase=self.p["phase"],
mode=self.p["mode"],
verbose=self.p["verbose"],
SNR_thresh=self.p["SNR_thresh"],
BLFrac=self.p["BLFrac"],
SNR2_thresh=self.p["SNR2_thresh"],
deblend=self.p["deblend"],
perform_final_fit=self.p["perform_final_fit"],
# plot=self.p["plot"],
)
return results
def decompose_double(self, xdata, ydata, xdata_em, ydata_em, edata, edata_em):
""" Decompose an absorption and emission pair simultaneously """
if (self.p["phase"] == "one") and (not self.p["alpha1"]):
print("phase = one, and alpha1 is unset")
return
if (self.p["phase"] == "two") and (
(not self.p["alpha1"]) or (not self.p["alpha2"])
):
print("phase = two, and either alpha1 or alpha2 is unset")
return
if self.p["mode"] != "conv":
a1 = 10 ** self.p["alpha1"]
a2 = 10 ** self.p["alpha2"] if self.p["phase"] == "two" else None
aem = 10 ** self.p["alpha_em"]
wgle = self.p["wiggle"]
dw = self.p["drop_width"]
mdv = self.p["min_dv"]
else:
a1 = self.p["alpha1"]
a2 = self.p["alpha2"] if self.p["phase"] == "two" else None
aem = self.p["alpha_em"]
status, results = AGD_decomposer.AGD_double(
xdata,
ydata,
xdata_em,
ydata_em,
edata,
edata_em,
# scale=self.p["scale"],
alpha1=a1,
alpha2=a2,
alpha_em=aem,
wiggle=self.p["wiggle"],
drop_width=self.p["drop_width"],
min_dv=self.p["min_dv"],
phase=self.p["phase"],
mode=self.p["mode"],
verbose=self.p["verbose"],
SNR_thresh=self.p["SNR_thresh"],
BLFrac=self.p["BLFrac"],
SNR2_thresh=self.p["SNR2_thresh"],
SNR_em=self.p["SNR_em"],
deblend=self.p["deblend"],
perform_final_fit=self.p["perform_final_fit"],
# plot=self.p["plot"],
)
return results
def status(self):
""" Return current values of parameters """
print("Current Parameters:")
print("---" * 10)
for index, key in enumerate(self.p):
if key in [
"data_list",
"errors",
"x_values",
"amplitudes",
"fwhms",
"means",
"amplitudes_fit",
"fwhms_fit",
"means_fit",
]:
print("len({0}) = {1}".format(key, len(self.p[key])))
else:
print(key, " = ", self.p[key])
def set(self, key, value):
if key in self.p:
self.p[key] = value
else:
print("Given key does not exist.")
def | (self, filename, clobber=False):
""" Save the current decomposer object, and all
associated parameters to a python pickle file."""
if os.path.isfile(filename):
if clobber:
os.remove(filename)
else:
print("File exists: ", filename)
return
pickle.dump(self, open(filename, "wb"))
def batch_decomposition(self, science_data_path, ilist=None):
""" Science data sould be AGD format
ilist is either None or an integer list"""
# Dump information to hard drive to allow multiprocessing
pickle.dump(
[self, science_data_path, ilist], open("batchdecomp_temp.pickle", "wb")
)
from . import batch_decomposition
batch_decomposition.init()
result_list = batch_decomposition.func()
print("SUCCESS")
new_keys = [
"index_fit",
"amplitudes_fit",
"fwhms_fit",
"means_fit",
"index_initial",
"amplitudes_initial",
"fwhms_initial",
"means_initial",
"amplitudes_fit_err",
"fwhms_fit_err",
"means_fit_err",
"best_fit_rchi2",
"amplitudes_fit_em",
"fwhms_fit_em",
"means_fit_em",
"means_fit_err_em",
"amplitudes_fit_err_em",
"fwhms_fit_err_em",
"fit_labels",
]
output_data = dict((key, []) for key in new_keys)
for i, result in enumerate(result_list):
# print(result.keys())
# print(result)
# Save best-fit parameters
ncomps = result["N_components"]
amps = result["best_fit_parameters"][0:ncomps] if ncomps > 0 else []
fwhms = (
result["best_fit_parameters"][ncomps : 2 * ncomps] if ncomps > 0 else []
)
offsets = (
result["best_fit_parameters"][2 * ncomps : 3 * ncomps]
if ncomps > 0
else []
)
output_data["amplitudes_fit"].append(amps)
output_data["fwhms_fit"].append(fwhms)
output_data["means_fit"].append(offsets)
output_data["index_fit"].append([i for j in range(ncomps)])
# Save initial guesses if something was found
ncomps_initial = len(result["initial_parameters"]) // 3
amps_initial = (
result["initial_parameters"][0:ncomps_initial]
if ncomps_initial > 0
else []
)
fwhms_initial = (
result["initial_parameters"][ncomps_initial : 2 * ncomps_initial]
if ncomps_initial > 0
else []
)
offsets_initial = (
result["initial_parameters"][2 * ncomps_initial : 3 * ncomps_initial]
if ncomps_initial > 0
else []
)
output_data["means_initial"].append(offsets_initial)
output_data["fwhms_initial"].append(fwhms_initial)
output_data["amplitudes_initial"].append(amps_initial)
output_data["index_initial"].append([i for j in range(ncomps_initial)])
# Final fit errors
rchi2 = [result["rchi2"]] if "rchi2" in result else None
amps_err = result["best_fit_errors"][0:ncomps] if ncomps_initial > 0 else []
fwhms_err = (
result["best_fit_errors"][ncomps : 2 * ncomps]
if ncomps_initial > 0
else []
)
offsets_err = (
result["best_fit_errors"][2 * ncomps : 3 * ncomps]
if ncomps_initial > 0
else []
)
output_data["best_fit_rchi2"].append(rchi2)
output_data["means_fit_err"].append(offsets_err)
output_data["fwhms_fit_err"].append(fwhms_err)
output_data["amplitudes_fit_err"].append(amps_err)
if self.p["alpha_em"] is not None:
ncomps = (
len(result["best_fit_parameters_em"]) // 3
if "best_fit_parameters_em" in result
else 0
)
# print("to save:", ncomps)
amps = (
result["best_fit_parameters_em"][0:ncomps]
if "best_fit_parameters_em" in result
else []
)
fwhms = (
result["best_fit_parameters_em"][ncomps : 2 * ncomps]
if "best_fit_parameters_em" in result
else []
)
offsets = (
result["best_fit_parameters_em"][2 * ncomps : 3 * ncomps]
if "best_fit_parameters_em" in result
else []
)
fit_labels = (
result["fit_labels"] if "best_fit_parameters_em" in result else []
)
output_data["amplitudes_fit_em"].append(amps)
output_data["fwhms_fit_em"].append(fwhms)
output_data["means_fit_em"].append(offsets)
output_data["fit_labels"].append(fit_labels)
amps_err = (
result["best_fit_errors_em"][0:ncomps]
if "best_fit_parameters_em" in result
else []
)
fwhms_err = (
result["best_fit_errors_em"][ncomps : 2 * ncomps]
if "best_fit_parameters_em" in result
else []
)
offsets_err = (
result["best_fit_errors_em"][2 * ncomps : 3 * ncomps]
if "best_fit_parameters_em" in result
else []
)
output_data["means_fit_err_em"].append(offsets_err)
output_data["fwhms_fit_err_em"].append(fwhms_err)
output_data["amplitudes_fit_err_em"].append(amps_err)
print("100 finished.%")
return output_data
# def plot_components(
# self,
# data,
# index,
# xlabel="x",
# ylabel="y",
# xlim=None,
# ylim=None,
# guesses=False,
# plot_true=False,
# ):
# # Extract info from data (must contain 'fit' categories)
# x = data["x_values"][index]
# y = data["data_list"][index]
#
# fwhms = data["fwhms_fit"][index]
# amps = data["amplitudes_fit"][index]
# means = data["means_fit"][index]
#
# fwhms_guess = data["fwhms_initial"][index]
# amps_guess = data["amplitudes_initial"][index]
# means_guess = data["means_initial"][index]
#
# ncomps = len(amps)
#
# if "amplitudes" in data:
# fwhms_true = data["fwhms"][index]
# amps_true = data["amplitudes"][index]
# means_true = data["means"][index]
#
# plt.plot(x, y, "-k", label="data", lw=1.5)
#
# # Plot fitted, components
# sum_fit = x * 0.0
# for i, amp in enumerate(amps):
# model = amp * np.exp(-(x - means[i]) ** 2 / 2.0 / (fwhms[i] / 2.355) ** 2)
# model_guess = amps_guess[i] * np.exp(
# -(x - means_guess[i]) ** 2 / 2.0 / (fwhms_guess[i] / 2.355) ** 2
# )
# sum_fit = sum_fit + model
# plt.plot(x, model, "-g", lw=0.5)
# if guesses:
# plt.plot(x, model_guess, "--g", lw=1)
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16)
# if xlim:
# plt.xlim(*xlim)
# if ylim:
# plt.ylim(*ylim)
# plt.plot(x, sum_fit, "-g", lw=1.0, label="Fit")
#
# # If available, plot True components
# sum_true = x * 0.0
# if ("amplitudes" in data) and plot_true:
# for i, amp in enumerate(amps_true):
# model_true = amps_true[i] * np.exp(
# -(x - means_true[i]) ** 2 / 2.0 / (fwhms_true[i] / 2.355) ** 2
# )
# sum_true = sum_true + model_true
# plt.plot(x, model_true, "-r", lw=0.5)
# plt.plot(x, sum_true, "-r", lw=1.0, label="True")
#
# plt.title("index = {0}, ncomps = {1}".format(index, ncomps), fontsize=16)
# plt.legend(loc=0)
# plt.legend(loc=1)
# plt.show()
| save_state |
proposal_layer_tf.py | # -*- coding:utf-8 -*-
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import numpy as np
import yaml
from .generate_anchors import generate_anchors
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
from ..fast_rcnn.bbox_transform import bbox_transform_inv, clip_boxes
from ..fast_rcnn.nms_wrapper import nms
# <<<< obsolete
DEBUG = False
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def proposal_layer(rpn_cls_prob_reshape, rpn_bbox_pred, im_info, cfg_key, _feat_stride = [16,], anchor_scales = [16,]):
| max_ratio) & (rs >= min_ratio))[0]
return keep
| """
Parameters
----------
rpn_cls_prob_reshape: (1 , H , W , Ax2) outputs of RPN, prob of bg or fg
NOTICE: the old version is ordered by (1, H, W, 2, A) !!!!
rpn_bbox_pred: (1 , H , W , Ax4), rgs boxes output of RPN
im_info: a list of [image_height, image_width, scale_ratios]
cfg_key: 'TRAIN' or 'TEST'
_feat_stride: the downsampling ratio of feature map to the original input image
anchor_scales: the scales to the basic_anchor (basic anchor is [16, 16])
----------
Returns
----------
rpn_rois : (1 x H x W x A, 5) e.g. [0, x1, y1, x2, y2]
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
#layer_params = yaml.load(self.param_str_)
"""
_anchors = generate_anchors(scales=np.array(anchor_scales))#生成基本的9个anchor
_num_anchors = _anchors.shape[0]#9个anchor
im_info = im_info[0]#原始图像的高宽、缩放尺度
assert rpn_cls_prob_reshape.shape[0] == 1, \
'Only single item batches are supported'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N#12000,在做nms之前,最多保留的候选box数目
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N#2000,做完nms之后,最多保留的box的数目
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH#nms用参数,阈值是0.7
min_size = cfg[cfg_key].RPN_MIN_SIZE#候选box的最小尺寸,目前是16,高宽均要大于16
#TODO 后期需要修改这个最小尺寸,改为8?
height, width = rpn_cls_prob_reshape.shape[1:3]#feature-map的高宽
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
# (1, H, W, A)
scores = np.reshape(np.reshape(rpn_cls_prob_reshape, [1, height, width, _num_anchors, 2])[:,:,:,:,1],
[1, height, width, _num_anchors])
#提取到object的分数,non-object的我们不关心
#并reshape到1*H*W*9
# TODO: NOTICE: the old version is ordered by (1, H, W, 2, A) !!!!
# TODO: if you use the old trained model, VGGnet_fast_rcnn_iter_70000.ckpt, uncomment this line
# scores = rpn_cls_prob_reshape[:,:,:,_num_anchors:]
bbox_deltas = rpn_bbox_pred#模型输出的pred是相对值,需要进一步处理成真实图像中的坐标
#im_info = bottom[2].data[0, :]
if DEBUG:
print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
print 'scale: {}'.format(im_info[2])
# 1. Generate proposals from bbox deltas and shifted anchors
if DEBUG:
print 'score map size: {}'.format(scores.shape)
# Enumerate all shifts
# 同anchor-target-layer-tf这个文件一样,生成anchor的shift,进一步得到整张图像上的所有anchor
shift_x = np.arange(0, width) * _feat_stride
shift_y = np.arange(0, height) * _feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = _num_anchors
K = shifts.shape[0]
anchors = _anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))#这里得到的anchor就是整张图像上的所有anchor
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = bbox_deltas.reshape((-1, 4)) #(HxWxA, 4)
# Same story for the scores:
scores = scores.reshape((-1, 1))
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, bbox_deltas)#做逆变换,得到box在图像上的真实坐标
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])#将所有的proposal修建一下,超出图像范围的将会被修剪掉
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = _filter_boxes(proposals, min_size * im_info[2])#移除那些proposal小于一定尺寸的proposal
proposals = proposals[keep, :]#保留剩下的proposal
scores = scores[keep]
bbox_deltas=bbox_deltas[keep,:]
# # remove irregular boxes, too fat too tall
# keep = _filter_irregular_boxes(proposals)
# proposals = proposals[keep, :]
# scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]#score按得分的高低进行排序
if pre_nms_topN > 0: #保留12000个proposal进去做nms
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
bbox_deltas=bbox_deltas[order,:]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
keep = nms(np.hstack((proposals, scores)), nms_thresh)#进行nms操作,保留2000个proposal
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
bbox_deltas=bbox_deltas[keep,:]
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
blob = np.hstack((scores.astype(np.float32, copy=False), proposals.astype(np.float32, copy=False)))
'''
if cfg_key == 'TEST':
blob = np.hstack((scores.astype(np.float32, copy=False), proposals.astype(np.float32, copy=False)))
else:
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32) # 返回最后的rois
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
'''
return blob,bbox_deltas
#top[0].reshape(*(blob.shape))
#top[0].data[...] = blob
# [Optional] output scores blob
#if len(top) > 1:
# top[1].reshape(*(scores.shape))
# top[1].data[...] = scores
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
def _filter_irregular_boxes(boxes, min_ratio = 0.2, max_ratio = 5):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
rs = ws / hs
keep = np.where((rs <= |
advanced_example.py | # coding: utf-8
import json
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
try: |
print('Loading data...')
# load or create your dataset
df_train = pd.read_csv('../binary_classification/binary.train', header=None, sep='\t')
df_test = pd.read_csv('../binary_classification/binary.test', header=None, sep='\t')
W_train = pd.read_csv('../binary_classification/binary.train.weight', header=None)[0]
W_test = pd.read_csv('../binary_classification/binary.test.weight', header=None)[0]
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
num_train, num_feature = X_train.shape
# create dataset for lightgbm
# if you want to re-use data, remember to set free_raw_data=False
lgb_train = lgb.Dataset(X_train, y_train,
weight=W_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,
weight=W_test, free_raw_data=False)
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
# generate feature names
feature_name = ['feature_' + str(col) for col in range(num_feature)]
print('Starting training...')
# feature_name and categorical_feature
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train, # eval training data
feature_name=feature_name,
categorical_feature=[21])
print('Finished first 10 rounds...')
# check feature name
print('7th feature name is:', lgb_train.feature_name[6])
print('Saving model...')
# save model to file
gbm.save_model('model.txt')
print('Dumping model to JSON...')
# dump model to JSON (and save to file)
model_json = gbm.dump_model()
with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4)
# feature names
print('Feature names:', gbm.feature_name())
# feature importances
print('Feature importances:', list(gbm.feature_importance()))
print('Loading model to predict...')
# load model to predict
bst = lgb.Booster(model_file='model.txt')
# can only predict with the best iteration (or the saving iteration)
y_pred = bst.predict(X_test)
# eval with loaded model
print("The rmse of loaded model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
print('Dumping and loading model with pickle...')
# dump model with pickle
with open('model.pkl', 'wb') as fout:
pickle.dump(gbm, fout)
# load model with pickle to predict
with open('model.pkl', 'rb') as fin:
pkl_bst = pickle.load(fin)
# can predict with any iteration when loaded in pickle way
y_pred = pkl_bst.predict(X_test, num_iteration=7)
# eval with loaded model
print("The rmse of pickled model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
# continue training
# init_model accepts:
# 1. model file name
# 2. Booster()
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model='model.txt',
valid_sets=lgb_eval)
print('Finished 10 - 20 rounds with model file...')
# decay learning rates
# learning_rates accepts:
# 1. list/tuple with length = num_boost_round
# 2. function(curr_iter)
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
learning_rates=lambda iter: 0.05 * (0.99 ** iter),
valid_sets=lgb_eval)
print('Finished 20 - 30 rounds with decay learning rates...')
# change other parameters during training
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
valid_sets=lgb_eval,
callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])
print('Finished 30 - 40 rounds with changing bagging_fraction...')
# self-defined objective function
# f(preds: array, train_data: Dataset) -> grad: array, hess: array
# log likelihood loss
def loglikelihood(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
# self-defined eval metric
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
# binary error
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# For example, we are doing log likelihood loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization
def binary_error(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'error', np.mean(labels != (preds > 0.5)), False
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=binary_error,
valid_sets=lgb_eval)
print('Finished 40 - 50 rounds with self-defined objective function and eval metric...')
# another self-defined eval metric
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
# accuracy
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# For example, we are doing log likelihood loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization
def accuracy(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'accuracy', np.mean(labels == (preds > 0.5)), True
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=lambda preds, train_data: [binary_error(preds, train_data),
accuracy(preds, train_data)],
valid_sets=lgb_eval)
print('Finished 50 - 60 rounds with self-defined objective function '
'and multiple self-defined eval metrics...')
print('Starting a new training job...')
# callback
def reset_metrics():
def callback(env):
lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)
if env.iteration - env.begin_iteration == 5:
print('Add a new valid dataset at iteration 5...')
env.model.add_valid(lgb_eval_new, 'new_valid')
callback.before_iteration = True
callback.order = 0
return callback
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train,
callbacks=[reset_metrics()])
print('Finished first 10 rounds with callback function...') | import cPickle as pickle
except BaseException:
import pickle |
sub.rs | use super::add::handle_add_op;
use crate::{object::Array, Evaluator, Object, RuntimeErrorKind};
/// This calls the add op under the hood
/// We negate the RHS and send it to the add op
pub fn handle_sub_op(
left: Object,
right: Object,
evaluator: &mut Evaluator,
) -> Result<Object, RuntimeErrorKind> | {
let negated_right = match right {
Object::Null => {
return Err(RuntimeErrorKind::UnstructuredError {
message: "cannot do an operation with the null object".to_string(),
})
}
Object::Arithmetic(arith) => Object::Arithmetic(-&arith),
Object::Constants(c) => Object::Constants(-c),
Object::Linear(linear) => Object::Linear(-&linear),
Object::Integer(_rhs_integer) => {
let left_int = left.integer();
match left_int {
Some(left_int) => return Ok(Object::Integer(left_int.sub(right, evaluator)?)),
None => {
return Err(RuntimeErrorKind::UnstructuredError {
message: "rhs is an integer, however the lhs is not".to_string(),
})
}
}
}
Object::Array(_right_arr) => {
let left_arr = left.array();
match left_arr {
Some(left_arr) => {
return Ok(Object::Array(Array::sub(left_arr, _right_arr, evaluator)?))
}
None => {
return Err(RuntimeErrorKind::UnstructuredError {
message: "rhs is an integer, however the lhs is not".to_string(),
})
}
}
}
};
handle_add_op(left, negated_right, evaluator)
} |
|
base_events.py | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback: |
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
# bpo-37228
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.5.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled) | del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name) |
run_turicreate.py | import os
os.environ["OMP_NUM_THREADS"] = "10"
import sys
import pandas as pd
import numpy as np
import turicreate as tc
for i in range(1, 14):
print("running batch %d" % i)
batch = pd.read_csv("batches/batch_%d_train.dat" % i)
test_users = pd.read_csv("batches/batch_%d_test.dat" % i)
model = tc.ranking_factorization_recommender.create(
tc.SFrame(batch),
'user',
'item',
num_factors=10,
verbose=True,
solver='ials',
max_iterations=50,
ials_confidence_scaling_factor=30
) | results = model.recommend(users=test_users.user.values, k=100, exclude_known=True, verbose=False)
results.to_dataframe()[['user', 'item', 'rank']].to_csv('batches/batch_%d_predictions.dat' % i, sep=' ', header=False, index=False) | |
kv.go | // Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package loader
import (
"bufio"
"bytes"
"fmt"
"os"
"path"
"strings"
"unicode"
"unicode/utf8"
"github.com/pkg/errors"
"github.com/irairdon/kustomize/v3/pkg/ifc"
"github.com/irairdon/kustomize/v3/pkg/types"
)
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
func (fl *fileLoader) Validator() ifc.Validator {
return fl.validator
}
func (fl *fileLoader) LoadKvPairs(
args types.GeneratorArgs) (all []types.Pair, err error) {
pairs, err := fl.keyValuesFromEnvFiles(args.EnvSources)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf(
"env source files: %v",
args.EnvSources))
}
all = append(all, pairs...)
pairs, err = keyValuesFromLiteralSources(args.LiteralSources)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf(
"literal sources %v", args.LiteralSources))
}
all = append(all, pairs...)
pairs, err = fl.keyValuesFromFileSources(args.FileSources)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf(
"file sources: %v", args.FileSources))
}
return append(all, pairs...), nil
}
func keyValuesFromLiteralSources(sources []string) ([]types.Pair, error) {
var kvs []types.Pair
for _, s := range sources {
k, v, err := parseLiteralSource(s)
if err != nil {
return nil, err
}
kvs = append(kvs, types.Pair{Key: k, Value: v})
}
return kvs, nil
}
func (fl *fileLoader) keyValuesFromFileSources(sources []string) ([]types.Pair, error) {
var kvs []types.Pair
for _, s := range sources {
k, fPath, err := parseFileSource(s)
if err != nil {
return nil, err
}
content, err := fl.Load(fPath) | }
return kvs, nil
}
func (fl *fileLoader) keyValuesFromEnvFiles(paths []string) ([]types.Pair, error) {
var kvs []types.Pair
for _, p := range paths {
content, err := fl.Load(p)
if err != nil {
return nil, err
}
more, err := fl.keyValuesFromLines(content)
if err != nil {
return nil, err
}
kvs = append(kvs, more...)
}
return kvs, nil
}
// keyValuesFromLines parses given content in to a list of key-value pairs.
func (fl *fileLoader) keyValuesFromLines(content []byte) ([]types.Pair, error) {
var kvs []types.Pair
scanner := bufio.NewScanner(bytes.NewReader(content))
currentLine := 0
for scanner.Scan() {
// Process the current line, retrieving a key/value pair if
// possible.
scannedBytes := scanner.Bytes()
kv, err := fl.keyValuesFromLine(scannedBytes, currentLine)
if err != nil {
return nil, err
}
currentLine++
if len(kv.Key) == 0 {
// no key means line was empty or a comment
continue
}
kvs = append(kvs, kv)
}
return kvs, nil
}
// KeyValuesFromLine returns a kv with blank key if the line is empty or a comment.
// The value will be retrieved from the environment if necessary.
func (fl *fileLoader) keyValuesFromLine(line []byte, currentLine int) (types.Pair, error) {
kv := types.Pair{}
if !utf8.Valid(line) {
return kv, fmt.Errorf("line %d has invalid utf8 bytes : %v", line, string(line))
}
// We trim UTF8 BOM from the first line of the file but no others
if currentLine == 0 {
line = bytes.TrimPrefix(line, utf8bom)
}
// trim the line from all leading whitespace first
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
// If the line is empty or a comment, we return a blank key/value pair.
if len(line) == 0 || line[0] == '#' {
return kv, nil
}
data := strings.SplitN(string(line), "=", 2)
key := data[0]
if err := fl.validator.IsEnvVarName(key); err != nil {
return kv, err
}
if len(data) == 2 {
kv.Value = data[1]
} else {
// No value (no `=` in the line) is a signal to obtain the value
// from the environment.
kv.Value = os.Getenv(key)
}
kv.Key = key
return kv, nil
}
// ParseFileSource parses the source given.
//
// Acceptable formats include:
// 1. source-path: the basename will become the key name
// 2. source-name=source-path: the source-name will become the key name and
// source-path is the path to the key file.
//
// Key names cannot include '='.
func parseFileSource(source string) (keyName, filePath string, err error) {
numSeparators := strings.Count(source, "=")
switch {
case numSeparators == 0:
return path.Base(source), source, nil
case numSeparators == 1 && strings.HasPrefix(source, "="):
return "", "", fmt.Errorf("key name for file path %v missing", strings.TrimPrefix(source, "="))
case numSeparators == 1 && strings.HasSuffix(source, "="):
return "", "", fmt.Errorf("file path for key name %v missing", strings.TrimSuffix(source, "="))
case numSeparators > 1:
return "", "", errors.New("key names or file paths cannot contain '='")
default:
components := strings.Split(source, "=")
return components[0], components[1], nil
}
}
// ParseLiteralSource parses the source key=val pair into its component pieces.
// This functionality is distinguished from strings.SplitN(source, "=", 2) since
// it returns an error in the case of empty keys, values, or a missing equals sign.
func parseLiteralSource(source string) (keyName, value string, err error) {
// leading equal is invalid
if strings.Index(source, "=") == 0 {
return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source)
}
// split after the first equal (so values can have the = character)
items := strings.SplitN(source, "=", 2)
if len(items) != 2 {
return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source)
}
return items[0], strings.Trim(items[1], "\"'"), nil
} | if err != nil {
return nil, err
}
kvs = append(kvs, types.Pair{Key: k, Value: string(content)}) |
Token.py | #!/usr/bin/python
#-*- coding:utf-8 -*-
"""
Author: AsherYang
Email: [email protected]
Date: 2017/7/24
"""
class Token():
@property
def access_token(self):
return self.access_token
@property
def access_token(self, value):
self.access_token = value
@property
def expire_in(self):
return self.expire_in
@property
def expire_in(self, value):
self.expire_in = value
@property
def | (self):
return self.update_time
@property
def update_time(self, value):
self.update_time = value
| update_time |
environment.ts | /**
* @license
* Copyright Akveo. All Rights Reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*/
// The file contents for the current environment will overwrite these during build.
// The build system defaults to the dev environment which uses `environment.ts`, but if you do
// `ng build --env=prod` then `environment.prod.ts` will be used instead.
// The list of which env maps to which file can be found in `.angular-cli.json`.
export const environment = {
production: false, | apiUrl: 'http://localhost:8080',
}; |
|
array_queue.rs | //! The implementation is based on Dmitry Vyukov's bounded MPMC queue.
//!
//! Source:
//! - <http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue>
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::PhantomData;
use core::mem::{self, MaybeUninit};
use core::sync::atomic::{self, AtomicUsize, Ordering};
use crossbeam_utils::{Backoff, CachePadded};
/// A slot in a queue.
struct Slot<T> {
/// The current stamp.
///
/// If the stamp equals the tail, this node will be next written to. If it equals head + 1,
/// this node will be next read from.
stamp: AtomicUsize,
/// The value in this slot.
value: UnsafeCell<MaybeUninit<T>>,
}
/// A bounded multi-producer multi-consumer queue.
///
/// This queue allocates a fixed-capacity buffer on construction, which is used to store pushed
/// elements. The queue cannot hold more elements than the buffer allows. Attempting to push an
/// element into a full queue will fail. Having a buffer allocated upfront makes this queue a bit
/// faster than [`SegQueue`].
///
/// [`SegQueue`]: super::SegQueue
///
/// # Examples
///
/// ```
/// use mco::std::queue::array_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(2);
///
/// assert_eq!(q.push('a'), Ok(()));
/// assert_eq!(q.push('b'), Ok(()));
/// assert_eq!(q.push('c'), Err('c'));
/// assert_eq!(q.pop(), Some('a'));
/// ```
pub struct ArrayQueue<T> {
/// The head of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a
/// single `usize`. The lower bits represent the index, while the upper bits represent the lap.
///
/// Elements are popped from the head of the queue.
head: CachePadded<AtomicUsize>,
/// The tail of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a
/// single `usize`. The lower bits represent the index, while the upper bits represent the lap.
///
/// Elements are pushed into the tail of the queue.
tail: CachePadded<AtomicUsize>,
/// The buffer holding slots.
buffer: Vec<Slot<T>>,
/// The queue capacity.
cap: usize,
/// A stamp with the value of `{ lap: 1, index: 0 }`.
one_lap: usize,
/// Indicates that dropping an `ArrayQueue<T>` may drop elements of type `T`.
_marker: PhantomData<T>,
}
unsafe impl<T: Send> Sync for ArrayQueue<T> {}
unsafe impl<T: Send> Send for ArrayQueue<T> {}
impl<T> ArrayQueue<T> {
/// Creates a new bounded queue with the given capacity.
///
/// # Panics
///
/// Panics if the capacity is zero.
///
/// # Examples
///
/// ```
/// use mco::std::queue::array_queue::ArrayQueue;
///
/// let q = ArrayQueue::<i32>::new(100);
/// ```
pub fn new(cap: usize) -> ArrayQueue<T> {
assert!(cap > 0, "capacity must be non-zero");
// Head is initialized to `{ lap: 0, index: 0 }`.
// Tail is initialized to `{ lap: 0, index: 0 }`.
let head = 0;
let tail = 0;
// Allocate a buffer of `cap` slots initialized
// with stamps.
let buffer = {
// let mut boxed: Box<[Slot<T>]> = (0..cap)
// .map(|i| {
// // Set the stamp to `{ lap: 0, index: i }`.
// Slot {
// stamp: AtomicUsize::new(i),
// value: UnsafeCell::new(MaybeUninit::uninit()),
// }
// })
// .collect();
let mut v = Vec::with_capacity(cap);
unsafe {
v.set_len(cap);
}
for i in 0..cap {
unsafe {
v[i] = Slot {
stamp: AtomicUsize::new(i),
value: UnsafeCell::new(MaybeUninit::uninit()),
};
}
}
v
};
// One lap is the smallest power of two greater than `cap`.
let one_lap = (cap + 1).next_power_of_two();
ArrayQueue {
buffer,
cap,
one_lap,
head: CachePadded::new(AtomicUsize::new(head)),
tail: CachePadded::new(AtomicUsize::new(tail)),
_marker: PhantomData,
}
}
/// Attempts to push an element into the queue.
///
/// If the queue is full, the element is returned back as an error.
///
/// # Examples
///
/// ```
/// use mco::std::queue::array_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(1);
///
/// assert_eq!(q.push(10), Ok(()));
/// assert_eq!(q.push(20), Err(20));
/// ```
pub fn push(&self, value: T) -> Result<(), T> {
let backoff = Backoff::new();
let mut tail = self.tail.load(Ordering::Relaxed);
loop {
// Deconstruct the tail.
let index = tail & (self.one_lap - 1);
let lap = tail & !(self.one_lap - 1);
// Inspect the corresponding slot.
let slot = unsafe { self.buffer.get_unchecked(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the tail and the stamp match, we may attempt to push.
if tail == stamp {
let new_tail = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, index: index + 1 }`.
tail + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the tail.
match self.tail.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Write the value into the slot and update the stamp.
unsafe {
slot.value.get().write(MaybeUninit::new(value));
}
slot.stamp.store(tail + 1, Ordering::Release);
return Ok(());
}
Err(t) => {
tail = t;
backoff.spin();
}
}
} else if stamp.wrapping_add(self.one_lap) == tail + 1 {
atomic::fence(Ordering::SeqCst);
let head = self.head.load(Ordering::Relaxed);
// If the head lags one lap behind the tail as well...
if head.wrapping_add(self.one_lap) == tail {
// ...then the queue is full.
return Err(value);
}
backoff.spin();
tail = self.tail.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
tail = self.tail.load(Ordering::Relaxed);
}
}
}
/// Attempts to pop an element from the queue.
///
/// If the queue is empty, `None` is returned.
///
/// # Examples
///
/// ```
/// use mco::std::queue::array_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(1);
/// assert_eq!(q.push(10), Ok(()));
///
/// assert_eq!(q.pop(), Some(10));
/// assert!(q.pop().is_none());
/// ```
pub fn pop(&self) -> Option<T> {
let backoff = Backoff::new();
let mut head = self.head.load(Ordering::Relaxed);
loop {
// Deconstruct the head.
let index = head & (self.one_lap - 1);
let lap = head & !(self.one_lap - 1);
// Inspect the corresponding slot.
let slot = unsafe { self.buffer.get_unchecked(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the the stamp is ahead of the head by 1, we may attempt to pop.
if head + 1 == stamp {
let new = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, index: index + 1 }`.
head + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the head.
match self.head.compare_exchange_weak(
head,
new,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Read the value from the slot and update the stamp.
let msg = unsafe { slot.value.get().read().assume_init() };
slot.stamp
.store(head.wrapping_add(self.one_lap), Ordering::Release);
return Some(msg);
}
Err(h) => {
head = h;
backoff.spin();
}
}
} else if stamp == head {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.load(Ordering::Relaxed);
// If the tail equals the head, that means the channel is empty.
if tail == head {
return None;
}
backoff.spin();
head = self.head.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
head = self.head.load(Ordering::Relaxed);
}
}
}
/// Returns the capacity of the queue.
///
/// # Examples
///
/// ```
/// use mco::std::queue::array_queue::ArrayQueue;
///
/// let q = ArrayQueue::<i32>::new(100);
///
/// assert_eq!(q.capacity(), 100);
/// ```
pub fn capacity(&self) -> usize {
self.cap
}
/// Returns `true` if the queue is empty.
///
/// # Examples
///
/// ```
/// use mco::std::queue::array_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(100);
///
/// assert!(q.is_empty());
/// q.push(1).unwrap();
/// assert!(!q.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
let head = self.head.load(Ordering::SeqCst);
let tail = self.tail.load(Ordering::SeqCst);
// Is the tail lagging one lap behind head?
// Is the tail equal to the head?
//
// Note: If the head changes just before we load the tail, that means there was a moment
// when the channel was not empty, so it is safe to just return `false`.
tail == head
}
/// Returns `true` if the queue is full.
///
/// # Examples
///
/// ```
/// use mco::std::queue::array_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(1);
///
/// assert!(!q.is_full());
/// q.push(1).unwrap();
/// assert!(q.is_full());
/// ```
pub fn is_full(&self) -> bool {
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// Is the head lagging one lap behind tail?
//
// Note: If the tail changes just before we load the head, that means there was a moment
// when the queue was not full, so it is safe to just return `false`.
head.wrapping_add(self.one_lap) == tail
}
/// Returns the number of elements in the queue.
///
/// # Examples
///
/// ```
/// use mco::std::queue::array_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(100);
/// assert_eq!(q.len(), 0);
///
/// q.push(10).unwrap();
/// assert_eq!(q.len(), 1);
///
/// q.push(20).unwrap();
/// assert_eq!(q.len(), 2);
/// ```
pub fn len(&self) -> usize {
loop {
// Load the tail, then load the head.
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// If the tail didn't change, we've got consistent values to work with.
if self.tail.load(Ordering::SeqCst) == tail {
let hix = head & (self.one_lap - 1);
let tix = tail & (self.one_lap - 1);
return if hix < tix {
tix - hix
} else if hix > tix {
self.cap - hix + tix
} else if tail == head {
0
} else {
self.cap
};
}
}
}
}
impl<T> Drop for ArrayQueue<T> {
fn drop(&mut self) {
// Get the index of the head.
let hix = self.head.load(Ordering::Relaxed) & (self.one_lap - 1);
// Loop over all slots that hold a message and drop them.
for i in 0..self.len() {
// Compute the index of the next slot holding a message.
let index = if hix + i < self.cap {
hix + i
} else {
hix + i - self.cap
};
unsafe {
let p = {
let slot = &mut self.buffer.get_unchecked(index);
let value = &mut *slot.value.get();
value.as_mut_ptr()
};
p.drop_in_place();
}
}
}
}
impl<T> fmt::Debug for ArrayQueue<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("ArrayQueue { .. }")
}
}
#[derive(Debug)]
pub struct IntoIter<T> {
value: ArrayQueue<T>,
}
impl<T> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> |
}
#[cfg(test)]
mod test {
use crate::std::queue::array_queue::ArrayQueue;
#[test]
fn test_insert() {
let q = ArrayQueue::new(2);
q.push(1);
q.push(2);
q.push(3);
assert_eq!(q.len(), 2);
assert_eq!(q.pop().unwrap(), 1);
assert_eq!(q.pop().unwrap(), 2);
}
} | {
let value = &mut self.value;
let head = *value.head.get_mut();
if value.head.get_mut() != value.tail.get_mut() {
let index = head & (value.one_lap - 1);
let lap = head & !(value.one_lap - 1);
// SAFETY: We have mutable access to this, so we can read without
// worrying about concurrency. Furthermore, we know this is
// initialized because it is the value pointed at by `value.head`
// and this is a non-empty queue.
let val = unsafe {
debug_assert!(index < value.buffer.len());
let slot = value.buffer.get_unchecked_mut(index);
slot.value.get().read().assume_init()
};
let new = if index + 1 < value.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, index: index + 1 }`.
head + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
lap.wrapping_add(value.one_lap)
};
*value.head.get_mut() = new;
Option::Some(val)
} else {
Option::None
}
} |
virtio.rs | // Copyright © 2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Virtio related errors
#[derive(Debug)]
pub enum Error {
UnsupportedDevice,
LegacyOnly,
FeatureNegotiationFailed,
QueueTooSmall,
}
/// Trait to allow separation of transport from block driver | fn get_status(&self) -> u32;
fn set_status(&self, status: u32);
fn add_status(&self, status: u32);
fn reset(&self);
fn get_features(&self) -> u64;
fn set_features(&self, features: u64);
fn set_queue(&self, queue: u16);
fn get_queue_max_size(&self) -> u16;
fn set_queue_size(&self, queue_size: u16);
fn set_descriptors_address(&self, address: u64);
fn set_avail_ring(&self, address: u64);
fn set_used_ring(&self, address: u64);
fn set_queue_enable(&self);
fn notify_queue(&self, queue: u16);
fn read_device_config(&self, offset: u64) -> u32;
} | pub trait VirtioTransport {
fn init(&mut self, device_type: u32) -> Result<(), Error>; |
sequence_generator.py | import pandas as pd
from itertools import groupby
from operator import itemgetter
class | :
def __init__(self, csvfile, jsThreshold):
self.datafile = csvfile
self.jsThreshold = jsThreshold
"""
Convert the input csv file into dataframe
"""
def _csv2df(self):
return pd.read_csv(self.datafile, dtype={'item_id':int, 'user_id':str})
"""
Generate database by selecting the non-null sequences satisfying the js-distance threshold
"""
def generate_db(self):
db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'js_distance']].sort_values(by=['item_id','rev_timestamp'])
filter = db.loc[db['js_distance'] >= self.jsThreshold][['item_id', 'user_id', 'edit_type']]
return filter[filter.user_id.notnull()]
def generate_dev_db(self, dev):
db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'prediction', 'js_distance']].sort_values(by=['item_id', 'rev_timestamp'])
filter = db.loc[(db['js_distance']>=self.jsThreshold) & (db['prediction']==dev)][['item_id', 'user_id', 'edit_type']]
return filter[filter.user_id.notnull()]
"""
Generate the sequence database by integrating all edits conducted upon one article in a list, where
the serial edits from the same editor are collapsed into one sub-list
Args:
csv file of scheme: article_id : int
editor_id : int
edit_type : string
Return:
A list of list [[a], [b]], where a and b are collapsed edit types
"""
def generate_sequence(self):
db = self.generate_db()
df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list})
result = df.groupby(['item_id']).agg({'edit_type': list})
tmp = []
for ls in result.values.tolist():
tmp.append(ls[0])
return tmp
def generate_dev_sequence(self, dev):
db = self.generate_dev_db(dev=dev)
df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list})
return df.values.tolist() | SequenceGenerator |
security_score_aggregation.go | // Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
// Cloud Guard APIs
//
// A description of the Cloud Guard APIs
//
package cloudguard
| // SecurityScoreAggregation Provides the dimensions and their corresponding count value.
type SecurityScoreAggregation struct {
// The key-value pairs of dimensions and their names.
DimensionsMap map[string]string `mandatory:"true" json:"dimensionsMap"`
// The security rating with given dimension/s
SecurityRating SecurityRatingEnum `mandatory:"true" json:"securityRating"`
// The security score with given dimension/s
SecurityScore *int `mandatory:"true" json:"securityScore"`
}
func (m SecurityScoreAggregation) String() string {
return common.PointerString(m)
} | import (
"github.com/oracle/oci-go-sdk/v49/common"
)
|
create_completed_audit_page.py | from datetime import datetime
from kivy.app import App
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import Screen
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from cilantro_audit import globals
from cilantro_audit.constants import PROD_DB
from cilantro_audit.constants import VIEW_AUDIT_TEMPLATES
from cilantro_audit.constants import ANSWER_MODULE_DISPLACEMENT
from cilantro_audit.audit_template import AuditTemplate
from cilantro_audit.answer_module import AnswerModule
from cilantro_audit.completed_audit import Answer
from cilantro_audit.completed_audit import Response
from cilantro_audit.completed_audit import CompletedAuditBuilder
from cilantro_audit.create_audit_template_page import ErrorPop
from mongoengine import connect
connect(PROD_DB)
class CreateCompletedAuditPage(Screen):
stack_list = ObjectProperty()
title_label = ObjectProperty()
auditor_name = ObjectProperty()
scrolling_panel = ObjectProperty()
audit_title = StringProperty()
questions = []
# Populates the questions/answers of a completed audit
def populate_page(self, audit_title):
self.audit_title = audit_title
completed_audit = AuditTemplate.objects().filter(title=self.audit_title).first()
for question in completed_audit.questions:
a_temp = AnswerModule()
a_temp.question = question
a_temp.question_text = question.text
self.stack_list.add_widget(a_temp)
self.questions.append(a_temp)
self.stack_list.height += ANSWER_MODULE_DISPLACEMENT
self.scrolling_panel.scroll_y = 1
self.auditor_name.text = ''
# Popup for the back button
def back_pop(self):
show = ConfirmationPop()
# YES consequences (stack order)
show.yes.bind(on_release=lambda _: show.dismiss())
show.yes.bind(on_release=lambda _: self.clear_page())
show.yes.bind(on_release=lambda _: self.switch_back())
# NO consequences
show.no.bind(on_release=lambda _: show.dismiss())
show.open()
# Popup for the submit button
def submit_pop(self):
error_message = self.is_filled_out()
# No missing fields (ready to submit)
if error_message == "":
show = ConfirmationPop()
# YES consequences (stack order)
show.yes.bind(on_release=lambda _: show.dismiss())
show.yes.bind(on_release=lambda _: self.clear_page())
show.yes.bind(on_release=lambda _: self.switch_back())
show.yes.bind(on_release=lambda _: self.submit_audit())
# NO consequences
show.no.bind(on_release=lambda _: show.dismiss())
show.open()
# Some fields were missing
else:
show = ErrorPop()
show.error_message.text = error_message
show.open()
# Saves a completely filled audit to the database
def | (self):
completed_audit = CompletedAuditBuilder()
completed_audit.with_title(self.audit_title)
# The object returned from the .kv is a TextField, with a member text
completed_audit.with_auditor(self.auditor_name.text)
completed_audit.with_datetime(datetime.utcnow())
for a in self.questions:
if a.other_comments.text:
temp_answer = Answer(text=a.question.text,
severity=self.get_question_severity(a),
response=a.response,
comment=a.other_comments.text)
else:
temp_answer = Answer(text=a.question.text,
severity=self.get_question_severity(a),
response=a.response)
completed_audit.with_answer(temp_answer)
# Save audit
completed_audit.build().save()
# Update audit locked status
AuditTemplate.objects().filter(title=self.audit_title).update(upsert=False, multi=True, locked=True)
def switch_back(self):
globals.screen_manager.get_screen(VIEW_AUDIT_TEMPLATES).populate_page()
globals.screen_manager.current = VIEW_AUDIT_TEMPLATES
# Empties stack list and question list, should enable leaving early without a problem...
def clear_page(self):
self.audit_title = ""
for question in self.questions:
self.stack_list.remove_widget(question)
self.stack_list.height -= 200
self.questions.clear()
# Check whether the audit has been filled out before submitting it
def is_filled_out(self):
for child in self.questions:
child.no_answer_flag.opacity = 0
child.no_comment_flag.opacity = 0
error_message = ""
# Check if 'auditor name' is entered.
if not self.auditor_name.text:
error_message = "Please enter your name."
for question in self.questions:
# Check if all questions are answered
if question.response is None:
error_message = "Must respond to all questions."
question.no_answer_flag.opacity = 1
# Check if 'other' responses have comments.
elif question.other_has_comments() is False:
error_message = "Answers with 'Other' must have comments."
question.no_comment_flag.opacity = 1
return error_message
# Return the associated severity with question's response
def get_question_severity(self, question):
if question.response == Response.yes():
return question.question.yes
elif question.response == Response.no():
return question.question.no
return question.question.other
class ConfirmationPop(Popup):
yes = ObjectProperty(None)
no = ObjectProperty(None)
class TestApp(App):
def build(self):
return CreateCompletedAuditPage()
if __name__ == "__main__":
TestApp().run()
| submit_audit |
binance.py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderImmediatelyFillable
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.precise import Precise
class binance(Exchange):
def describe(self):
return self.deep_extend(super(binance, self).describe(), {
'id': 'binance',
'name': 'Binance',
'countries': ['JP', 'MT'], # Japan, Malta
'rateLimit': 50,
'certified': True,
'pro': True,
# new metainfo interface
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': None,
'createOrder': True,
'fetchBalance': True,
'fetchBorrowRate': True,
'fetchBorrowRates': False,
'fetchBidsAsks': True,
'fetchClosedOrders': 'emulated',
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingFees': True,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchIsolatedPositions': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchTransactions': False,
'fetchTransfers': True,
'fetchWithdrawals': True,
'setLeverage': True,
'setMarginMode': True,
'setPositionMode': True,
'addMargin': True,
'reduceMargin': True,
'transfer': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/29604020-d5483cdc-87ee-11e7-94c7-d1a8d9169293.jpg',
'test': {
'dapiPublic': 'https://testnet.binancefuture.com/dapi/v1',
'dapiPrivate': 'https://testnet.binancefuture.com/dapi/v1',
'fapiPublic': 'https://testnet.binancefuture.com/fapi/v1',
'fapiPrivate': 'https://testnet.binancefuture.com/fapi/v1',
'fapiPrivateV2': 'https://testnet.binancefuture.com/fapi/v2',
'public': 'https://testnet.binance.vision/api/v3',
'private': 'https://testnet.binance.vision/api/v3',
'v1': 'https://testnet.binance.vision/api/v1',
},
'api': {
'wapi': 'https://api.binance.com/wapi/v3',
'sapi': 'https://api.binance.com/sapi/v1',
'dapiPublic': 'https://dapi.binance.com/dapi/v1',
'dapiPrivate': 'https://dapi.binance.com/dapi/v1',
'dapiPrivateV2': 'https://dapi.binance.com/dapi/v2',
'dapiData': 'https://dapi.binance.com/futures/data',
'fapiPublic': 'https://fapi.binance.com/fapi/v1',
'fapiPrivate': 'https://fapi.binance.com/fapi/v1',
'fapiData': 'https://fapi.binance.com/futures/data',
'fapiPrivateV2': 'https://fapi.binance.com/fapi/v2',
'public': 'https://api.binance.com/api/v3',
'private': 'https://api.binance.com/api/v3',
'v1': 'https://api.binance.com/api/v1',
},
'www': 'https://www.binance.com',
# 'referral': {
# 'url': 'https://www.binance.com/en/register?ref=BLEJC98C',
# 'discount': 0.2,
# },
'doc': [
'https://binance-docs.github.io/apidocs/spot/en',
],
'api_management': 'https://www.binance.com/en/usercenter/settings/api-management',
'fees': 'https://www.binance.com/en/fee/schedule',
},
'depth': 1,
'api': {
# the API structure below will need 3-layer apidefs
'sapi': {
'get': {
'accountSnapshot': 1,
'system/status': 1,
# these endpoints require self.apiKey
'margin/asset': 1,
'margin/pair': 1,
'margin/allAssets': 1,
'margin/allPairs': 1,
'margin/priceIndex': 1,
# these endpoints require self.apiKey + self.secret
'asset/assetDividend': 1,
'asset/dribblet': 1,
'asset/transfer': 1,
'asset/assetDetail': 1,
'asset/tradeFee': 1,
'asset/get-funding-asset': 1,
'margin/loan': 1,
'margin/repay': 1,
'margin/account': 1,
'margin/transfer': 1,
'margin/interestHistory': 1,
'margin/forceLiquidationRec': 1,
'margin/order': 1,
'margin/openOrders': 1,
'margin/allOrders': 1,
'margin/myTrades': 1,
'margin/maxBorrowable': 5,
'margin/maxTransferable': 5,
'margin/isolated/transfer': 1,
'margin/isolated/account': 1,
'margin/isolated/pair': 1,
'margin/isolated/allPairs': 1,
'margin/isolated/accountLimit': 1,
'margin/interestRateHistory': 1,
'margin/orderList': 2,
'margin/allOrderList': 10,
'margin/openOrderList': 3,
'loan/income': 1,
'fiat/orders': 1,
'fiat/payments': 1,
'futures/transfer': 5,
'futures/loan/borrow/history': 1,
'futures/loan/repay/history': 1,
'futures/loan/wallet': 1,
'futures/loan/configs': 1,
'futures/loan/calcAdjustLevel': 1,
'futures/loan/calcMaxAdjustAmount': 1,
'futures/loan/adjustCollateral/history': 1,
'futures/loan/liquidationHistory': 1,
# https://binance-docs.github.io/apidocs/spot/en/#withdraw-sapi
'capital/config/getall': 1, # get networks for withdrawing USDT ERC20 vs USDT Omni
'capital/deposit/address': 1,
'capital/deposit/hisrec': 1,
'capital/deposit/subAddress': 1,
'capital/deposit/subHisrec': 1,
'capital/withdraw/history': 1,
'account/status': 1,
'account/apiTradingStatus': 1,
'account/apiRestrictions/ipRestriction': 1,
'bnbBurn': 1,
'sub-account/assets': 1,
'sub-account/futures/account': 1,
'sub-account/futures/accountSummary': 1,
'sub-account/futures/positionRisk': 1,
'sub-account/futures/internalTransfer': 1,
'sub-account/list': 1,
'sub-account/margin/account': 1,
'sub-account/margin/accountSummary': 1,
'sub-account/spotSummary': 5,
'sub-account/status': 1,
'sub-account/sub/transfer/history': 1,
'sub-account/transfer/subUserHistory': 1,
'sub-account/universalTransfer': 1,
# lending endpoints
'lending/daily/product/list': 1,
'lending/daily/userLeftQuota': 1,
'lending/daily/userRedemptionQuota': 1,
'lending/daily/token/position': 1,
'lending/union/account': 1,
'lending/union/purchaseRecord': 1,
'lending/union/redemptionRecord': 1,
'lending/union/interestHistory': 1,
'lending/project/list': 1,
'lending/project/position/list': 1,
# mining endpoints
'mining/pub/algoList': 1,
'mining/pub/coinList': 1,
'mining/worker/detail': 5,
'mining/worker/list': 5,
'mining/payment/list': 5,
'mining/statistics/user/status': 5,
'mining/statistics/user/list': 5,
# liquid swap endpoints
'bswap/pools': 1,
'bswap/liquidity': {'cost': 1, 'noPoolId': 10},
'bswap/liquidityOps': 2,
'bswap/quote': 2,
'bswap/swap': 1,
'bswap/poolConfigure': 1,
'bswap/addLiquidityPreview': 1,
'bswap/removeLiquidityPreview': 1,
# leveraged token endpoints
'blvt/tokenInfo': 1,
'blvt/subscribe/record': 1,
'blvt/redeem/record': 1,
'blvt/userLimit': 1,
# broker api
'apiReferral/ifNewUser': 1,
'apiReferral/customization': 1,
'apiReferral/userCustomization': 1,
'apiReferral/rebate/recentRecord': 1,
'apiReferral/rebate/historicalRecord': 1,
'apiReferral/kickback/recentRecord': 1,
'apiReferral/kickback/historicalRecord': 1,
# brokerage API
'broker/subAccountApi': 1,
'broker/subAccount': 1,
'broker/subAccountApi/commission/futures': 1,
'broker/subAccountApi/commission/coinFutures': 1,
'broker/info': 1,
'broker/transfer': 1,
'broker/transfer/futures': 1,
'broker/rebate/recentRecord': 1,
'broker/rebate/historicalRecord': 1,
'broker/subAccount/bnbBurn/status': 1,
'broker/subAccount/depositHist': 1,
'broker/subAccount/spotSummary': 1,
'broker/subAccount/marginSummary': 1,
'broker/subAccount/futuresSummary': 1,
'broker/rebate/futures/recentRecord': 1,
'broker/subAccountApi/ipRestriction': 1,
'broker/universalTransfer': 1,
# v2 not supported yet
# GET /sapi/v2/broker/subAccount/futuresSummary
'account/apiRestrictions': 1,
# subaccounts
'managed-subaccount/asset': 1,
# c2c / p2p
'c2c/orderMatch/listUserOrderHistory': 1,
},
'post': {
'asset/dust': 1,
'asset/transfer': 1,
'asset/get-funding-asset': 1,
'account/disableFastWithdrawSwitch': 1,
'account/enableFastWithdrawSwitch': 1,
'account/apiRestrictions/ipRestriction': 1,
'account/apiRestrictions/ipRestriction/ipList': 1,
'capital/withdraw/apply': 1,
'margin/transfer': 1,
'margin/loan': 1,
'margin/repay': 1,
'margin/order': 4,
'margin/order/oco': 1,
'margin/isolated/create': 1,
'margin/isolated/transfer': 1,
'margin/isolated/account': 1,
'bnbBurn': 1,
'sub-account/margin/transfer': 1,
'sub-account/margin/enable': 1,
# 'sub-account/margin/enable': 1,
'sub-account/futures/enable': 1,
'sub-account/futures/transfer': 1,
'sub-account/futures/internalTransfer': 1,
'sub-account/transfer/subToSub': 1,
'sub-account/transfer/subToMaster': 1,
'sub-account/universalTransfer': 1,
'managed-subaccount/deposit': 1,
'managed-subaccount/withdraw': 1,
'userDataStream': 1,
'userDataStream/isolated': 1,
'futures/transfer': 1,
'futures/loan/borrow': 20,
'futures/loan/repay': 20,
'futures/loan/adjustCollateral': 20,
# lending
'lending/customizedFixed/purchase': 1,
'lending/daily/purchase': 1,
'lending/daily/redeem': 1,
# liquid swap endpoints
'bswap/liquidityAdd': 2,
'bswap/liquidityRemove': 2,
'bswap/swap': 2,
# leveraged token endpoints
'blvt/subscribe': 1,
'blvt/redeem': 1,
# brokerage API
'apiReferral/customization': 1,
'apiReferral/userCustomization': 1,
'apiReferral/rebate/historicalRecord': 1,
'apiReferral/kickback/historicalRecord': 1,
'broker/subAccount': 1,
'broker/subAccount/margin': 1,
'broker/subAccount/futures': 1,
'broker/subAccountApi': 1,
'broker/subAccountApi/permission': 1,
'broker/subAccountApi/commission': 1,
'broker/subAccountApi/commission/futures': 1,
'broker/subAccountApi/commission/coinFutures': 1,
'broker/transfer': 1,
'broker/transfer/futures': 1,
'broker/rebate/historicalRecord': 1,
'broker/subAccount/bnbBurn/spot': 1,
'broker/subAccount/bnbBurn/marginInterest': 1,
'broker/subAccount/blvt': 1,
'broker/subAccountApi/ipRestriction': 1,
'broker/subAccountApi/ipRestriction/ipList': 1,
'broker/universalTransfer': 1,
'broker/subAccountApi/permission/universalTransfer': 1,
'broker/subAccountApi/permission/vanillaOptions': 1,
},
'put': {
'userDataStream': 1,
'userDataStream/isolated': 1,
},
'delete': {
'account/apiRestrictions/ipRestriction/ipList': 1,
'margin/openOrders': 1,
'margin/order': 1,
'margin/orderList': 1,
'margin/isolated/account': 1,
'userDataStream': 1,
'userDataStream/isolated': 1,
# brokerage API
'broker/subAccountApi': 1,
'broker/subAccountApi/ipRestriction/ipList': 1,
},
},
# deprecated
'wapi': {
'post': {
'withdraw': 1,
'sub-account/transfer': 1,
},
'get': {
'depositHistory': 1,
'withdrawHistory': 1,
'depositAddress': 1,
'accountStatus': 1,
'systemStatus': 1,
'apiTradingStatus': 1,
'userAssetDribbletLog': 1,
'tradeFee': 1,
'assetDetail': 1,
'sub-account/list': 1,
'sub-account/transfer/history': 1,
'sub-account/assets': 1,
},
},
'dapiPublic': {
'get': {
'ping': 1,
'time': 1,
'exchangeInfo': 1,
'depth': {'cost': 2, 'byLimit': [[50, 2], [100, 5], [500, 10], [1000, 20]]},
'trades': 1,
'historicalTrades': 20,
'aggTrades': 20,
'premiumIndex': 10,
'fundingRate': 1,
'klines': {'cost': 1, 'byLimit': [[99, 1], [499, 2], [1000, 5], [10000, 10]]},
'continuousKlines': {'cost': 1, 'byLimit': [[99, 1], [499, 2], [1000, 5], [10000, 10]]},
'indexPriceKlines': {'cost': 1, 'byLimit': [[99, 1], [499, 2], [1000, 5], [10000, 10]]},
'markPriceKlines': {'cost': 1, 'byLimit': [[99, 1], [499, 2], [1000, 5], [10000, 10]]},
'ticker/24hr': {'cost': 1, 'noSymbol': 40},
'ticker/price': {'cost': 1, 'noSymbol': 2},
'ticker/bookTicker': {'cost': 1, 'noSymbol': 2},
'openInterest': 1,
},
},
'dapiData': {
'get': {
'openInterestHist': 1,
'topLongShortAccountRatio': 1,
'topLongShortPositionRatio': 1,
'globalLongShortAccountRatio': 1,
'takerBuySellVol': 1,
'basis': 1,
},
},
'dapiPrivate': {
'get': {
'positionSide/dual': 30,
'order': 1,
'openOrder': 1,
'openOrders': {'cost': 1, 'noSymbol': 5},
'allOrders': {'cost': 20, 'noSymbol': 40},
'balance': 1,
'account': 5,
'positionMargin/history': 1,
'positionRisk': 1,
'userTrades': {'cost': 20, 'noSymbol': 40},
'income': 20,
'leverageBracket': 1,
'forceOrders': {'cost': 20, 'noSymbol': 50},
'adlQuantile': 5,
},
'post': {
'positionSide/dual': 1,
'order': 4,
'batchOrders': 5,
'countdownCancelAll': 10,
'leverage': 1,
'marginType': 1,
'positionMargin': 1,
'listenKey': 1,
},
'put': {
'listenKey': 1,
},
'delete': {
'order': 1,
'allOpenOrders': 1,
'batchOrders': 5,
'listenKey': 1,
},
},
'dapiPrivateV2': {
'get': {
'leverageBracket': 1,
},
},
'fapiPublic': {
'get': {
'ping': 1,
'time': 1,
'exchangeInfo': 1,
'depth': {'cost': 2, 'byLimit': [[50, 2], [100, 5], [500, 10], [1000, 20]]},
'trades': 1,
'historicalTrades': 20,
'aggTrades': 20,
'klines': {'cost': 1, 'byLimit': [[99, 1], [499, 2], [1000, 5], [10000, 10]]},
'continuousKlines': {'cost': 1, 'byLimit': [[99, 1], [499, 2], [1000, 5], [10000, 10]]},
'markPriceKlines': {'cost': 1, 'byLimit': [[99, 1], [499, 2], [1000, 5], [10000, 10]]},
'indexPriceKlines': {'cost': 1, 'byLimit': [[99, 1], [499, 2], [1000, 5], [10000, 10]]},
'fundingRate': 1,
'premiumIndex': 1,
'ticker/24hr': {'cost': 1, 'noSymbol': 40},
'ticker/price': {'cost': 1, 'noSymbol': 2},
'ticker/bookTicker': {'cost': 1, 'noSymbol': 2},
'openInterest': 1,
'indexInfo': 1,
'apiTradingStatus': {'cost': 1, 'noSymbol': 10},
'lvtKlines': 1,
},
},
'fapiData': {
'get': {
'openInterestHist': 1,
'topLongShortAccountRatio': 1,
'topLongShortPositionRatio': 1,
'globalLongShortAccountRatio': 1,
'takerlongshortRatio': 1,
},
},
'fapiPrivate': {
'get': {
'forceOrders': {'cost': 20, 'noSymbol': 50},
'allOrders': 5,
'openOrder': 1,
'openOrders': 1,
'order': 1,
'account': 5,
'balance': 5,
'leverageBracket': 1,
'positionMargin/history': 1,
'positionRisk': 5,
'positionSide/dual': 30,
'userTrades': 5,
'income': 30,
'commissionRate': 20,
'apiTradingStatus': 1,
'multiAssetsMargin': 30,
# broker endpoints
'apiReferral/ifNewUser': 1,
'apiReferral/customization': 1,
'apiReferral/userCustomization': 1,
'apiReferral/traderNum': 1,
'apiReferral/overview': 1,
'apiReferral/tradeVol': 1,
'apiReferral/rebateVol': 1,
'apiReferral/traderSummary': 1,
'adlQuantile': 5,
},
'post': {
'batchOrders': 5,
'positionSide/dual': 1,
'positionMargin': 1,
'marginType': 1,
'order': 4,
'leverage': 1,
'listenKey': 1,
'countdownCancelAll': 10,
'multiAssetsMargin': 1,
# broker endpoints
'apiReferral/customization': 1,
'apiReferral/userCustomization': 1,
},
'put': {
'listenKey': 1,
},
'delete': {
'batchOrders': 1,
'order': 1,
'allOpenOrders': 1,
'listenKey': 1,
},
},
'fapiPrivateV2': {
'get': {
'account': 1,
'balance': 1,
'positionRisk': 1,
},
},
'public': {
'get': {
'ping': 1,
'time': 1,
'depth': {'cost': 1, 'byLimit': [[100, 1], [500, 5], [1000, 10], [5000, 50]]},
'trades': 1,
'aggTrades': 1,
'historicalTrades': 5,
'klines': 1,
'ticker/24hr': {'cost': 1, 'noSymbol': 40},
'ticker/price': {'cost': 1, 'noSymbol': 2},
'ticker/bookTicker': {'cost': 1, 'noSymbol': 2},
'exchangeInfo': 10,
},
'put': {
'userDataStream': 1,
},
'post': {
'userDataStream': 1,
},
'delete': {
'userDataStream': 1,
},
},
'private': {
'get': {
'allOrderList': 10, # oco
'openOrderList': 3, # oco
'orderList': 2, # oco
'order': 2,
'openOrders': {'cost': 3, 'noSymbol': 40},
'allOrders': 10,
'account': 10,
'myTrades': 10,
'rateLimit/order': 20,
},
'post': {
'order/oco': 1,
'order': 4,
'order/test': 1,
},
'delete': {
'openOrders': 1, # added on 2020-04-25 for canceling all open orders per symbol
'orderList': 1, # oco
'order': 1,
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'taker': self.parse_number('0.001'),
'maker': self.parse_number('0.001'),
},
'future': {
'trading': {
'feeSide': 'quote',
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.000400'),
'maker': self.parse_number('0.000200'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.000400')],
[self.parse_number('250'), self.parse_number('0.000400')],
[self.parse_number('2500'), self.parse_number('0.000350')],
[self.parse_number('7500'), self.parse_number('0.000320')],
[self.parse_number('22500'), self.parse_number('0.000300')],
[self.parse_number('50000'), self.parse_number('0.000270')],
[self.parse_number('100000'), self.parse_number('0.000250')],
[self.parse_number('200000'), self.parse_number('0.000220')],
[self.parse_number('400000'), self.parse_number('0.000200')],
[self.parse_number('750000'), self.parse_number('0.000170')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.000200')],
[self.parse_number('250'), self.parse_number('0.000160')],
[self.parse_number('2500'), self.parse_number('0.000140')],
[self.parse_number('7500'), self.parse_number('0.000120')],
[self.parse_number('22500'), self.parse_number('0.000100')],
[self.parse_number('50000'), self.parse_number('0.000080')],
[self.parse_number('100000'), self.parse_number('0.000060')],
[self.parse_number('200000'), self.parse_number('0.000040')],
[self.parse_number('400000'), self.parse_number('0.000020')],
[self.parse_number('750000'), self.parse_number('0')],
],
},
},
},
'delivery': {
'trading': {
'feeSide': 'base',
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.000500'),
'maker': self.parse_number('0.000100'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.000500')],
[self.parse_number('250'), self.parse_number('0.000450')],
[self.parse_number('2500'), self.parse_number('0.000400')],
[self.parse_number('7500'), self.parse_number('0.000300')],
[self.parse_number('22500'), self.parse_number('0.000250')],
[self.parse_number('50000'), self.parse_number('0.000240')],
[self.parse_number('100000'), self.parse_number('0.000240')],
[self.parse_number('200000'), self.parse_number('0.000240')],
[self.parse_number('400000'), self.parse_number('0.000240')],
[self.parse_number('750000'), self.parse_number('0.000240')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.000100')],
[self.parse_number('250'), self.parse_number('0.000080')],
[self.parse_number('2500'), self.parse_number('0.000050')],
[self.parse_number('7500'), self.parse_number('0.0000030')],
[self.parse_number('22500'), self.parse_number('0')],
[self.parse_number('50000'), self.parse_number('-0.000050')],
[self.parse_number('100000'), self.parse_number('-0.000060')],
[self.parse_number('200000'), self.parse_number('-0.000070')],
[self.parse_number('400000'), self.parse_number('-0.000080')],
[self.parse_number('750000'), self.parse_number('-0.000090')],
],
},
},
},
},
'commonCurrencies': {
'BCC': 'BCC', # kept for backward-compatibility https://github.com/ccxt/ccxt/issues/4848
'YOYO': 'YOYOW',
},
# exchange-specific options
'options': {
'fetchCurrencies': True, # self is a private call and it requires API keys
# 'fetchTradesMethod': 'publicGetAggTrades', # publicGetTrades, publicGetHistoricalTrades
'defaultTimeInForce': 'GTC', # 'GTC' = Good To Cancel(default), 'IOC' = Immediate Or Cancel
'defaultType': 'spot', # 'spot', 'future', 'margin', 'delivery'
'hasAlreadyAuthenticatedSuccessfully': False,
'warnOnFetchOpenOrdersWithoutSymbol': True,
'fetchPositions': 'positionRisk', # or 'account'
'recvWindow': 5 * 1000, # 5 sec, binance default
'timeDifference': 0, # the difference between system clock and Binance clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
'newOrderRespType': {
'market': 'FULL', # 'ACK' for order id, 'RESULT' for full order or 'FULL' for order with fills
'limit': 'FULL', # we change it from 'ACK' by default to 'FULL'(returns immediately if limit is not hit)
},
'quoteOrderQty': True, # whether market orders support amounts in quote currency
'broker': {
'spot': 'x-R4BD3S82',
'margin': 'x-R4BD3S82',
'future': 'x-xcKtGhcu',
'delivery': 'x-xcKtGhcu',
},
'accountsByType': {
'main': 'MAIN',
'spot': 'MAIN',
'funding': 'FUNDING',
'margin': 'MARGIN',
'future': 'UMFUTURE',
'delivery': 'CMFUTURE',
'mining': 'MINING',
},
'typesByAccount': {
'MAIN': 'spot',
'FUNDING': 'funding',
'MARGIN': 'margin',
'UMFUTURE': 'future',
'CMFUTURE': 'delivery',
'MINING': 'mining',
},
'networks': {
'ERC20': 'ETH',
'TRC20': 'TRX',
'BEP2': 'BNB',
'BEP20': 'BSC',
'OMNI': 'OMNI',
'EOS': 'EOS',
'SPL': 'SOL',
},
'reverseNetworks': {
'tronscan.org': 'TRC20',
'etherscan.io': 'ERC20',
'bscscan.com': 'BSC',
'explorer.binance.org': 'BEP2',
'bithomp.com': 'XRP',
'bloks.io': 'EOS',
'stellar.expert': 'XLM',
'blockchair.com/bitcoin': 'BTC',
'blockchair.com/bitcoin-cash': 'BCH',
'blockchair.com/ecash': 'XEC',
'explorer.litecoin.net': 'LTC',
'explorer.avax.network': 'AVAX',
'solscan.io': 'SOL',
'polkadot.subscan.io': 'DOT',
'dashboard.internetcomputer.org': 'ICP',
'explorer.chiliz.com': 'CHZ',
'cardanoscan.io': 'ADA',
'mainnet.theoan.com': 'AION',
'algoexplorer.io': 'ALGO',
'explorer.ambrosus.com': 'AMB',
'viewblock.io/zilliqa': 'ZIL',
'viewblock.io/arweave': 'AR',
'explorer.ark.io': 'ARK',
'atomscan.com': 'ATOM',
'www.mintscan.io': 'CTK',
'explorer.bitcoindiamond.org': 'BCD',
'btgexplorer.com': 'BTG',
'bts.ai': 'BTS',
'explorer.celo.org': 'CELO',
'explorer.nervos.org': 'CKB',
'cerebro.cortexlabs.ai': 'CTXC',
'chainz.cryptoid.info': 'VIA',
'explorer.dcrdata.org': 'DCR',
'digiexplorer.info': 'DGB',
'dock.subscan.io': 'DOCK',
'dogechain.info': 'DOGE',
'explorer.elrond.com': 'EGLD',
'blockscout.com': 'ETC',
'explore-fetchhub.fetch.ai': 'FET',
'filfox.info': 'FIL',
'fio.bloks.io': 'FIO',
'explorer.firo.org': 'FIRO',
'neoscan.io': 'NEO',
'ftmscan.com': 'FTM',
'explorer.gochain.io': 'GO',
'block.gxb.io': 'GXS',
'hash-hash.info': 'HBAR',
'www.hiveblockexplorer.com': 'HIVE',
'explorer.helium.com': 'HNT',
'tracker.icon.foundation': 'ICX',
'www.iostabc.com': 'IOST',
'explorer.iota.org': 'IOTA',
'iotexscan.io': 'IOTX',
'irishub.iobscan.io': 'IRIS',
'kava.mintscan.io': 'KAVA',
'scope.klaytn.com': 'KLAY',
'kmdexplorer.io': 'KMD',
'kusama.subscan.io': 'KSM',
'explorer.lto.network': 'LTO',
'polygonscan.com': 'POLYGON',
'explorer.ont.io': 'ONT',
'minaexplorer.com': 'MINA',
'nanolooker.com': 'NANO',
'explorer.nebulas.io': 'NAS',
'explorer.nbs.plus': 'NBS',
'explorer.nebl.io': 'NEBL',
'nulscan.io': 'NULS',
'nxscan.com': 'NXS',
'explorer.harmony.one': 'ONE',
'explorer.poa.network': 'POA',
'qtum.info': 'QTUM',
'explorer.rsk.co': 'RSK',
'www.oasisscan.com': 'ROSE',
'ravencoin.network': 'RVN',
'sc.tokenview.com': 'SC',
'secretnodes.com': 'SCRT',
'explorer.skycoin.com': 'SKY',
'steemscan.com': 'STEEM',
'explorer.stacks.co': 'STX',
'www.thetascan.io': 'THETA',
'scan.tomochain.com': 'TOMO',
'explore.vechain.org': 'VET',
'explorer.vite.net': 'VITE',
'www.wanscan.org': 'WAN',
'wavesexplorer.com': 'WAVES',
'wax.eosx.io': 'WAXP',
'waltonchain.pro': 'WTC',
'chain.nem.ninja': 'XEM',
'verge-blockchain.info': 'XVG',
'explorer.yoyow.org': 'YOYOW',
'explorer.zcha.in': 'ZEC',
'explorer.zensystem.io': 'ZEN',
},
'impliedNetworks': {
'ETH': {'ERC20': 'ETH'},
'TRX': {'TRC20': 'TRX'},
},
'legalMoney': {
'MXN': True,
'UGX': True,
'SEK': True,
'CHF': True,
'VND': True,
'AED': True,
'DKK': True,
'KZT': True,
'HUF': True,
'PEN': True,
'PHP': True,
'USD': True,
'TRY': True,
'EUR': True,
'NGN': True,
'PLN': True,
'BRL': True,
'ZAR': True,
'KES': True,
'ARS': True,
'RUB': True,
'AUD': True,
'NOK': True,
'CZK': True,
'GBP': True,
'UAH': True,
'GHS': True,
'HKD': True,
'CAD': True,
'INR': True,
'JPY': True,
'NZD': True,
},
},
# https://binance-docs.github.io/apidocs/spot/en/#error-codes-2
'exceptions': {
'exact': {
'System is under maintenance.': OnMaintenance, # {"code":1,"msg":"System is under maintenance."}
'System abnormality': ExchangeError, # {"code":-1000,"msg":"System abnormality"}
'You are not authorized to execute self request.': PermissionDenied, # {"msg":"You are not authorized to execute self request."}
'API key does not exist': AuthenticationError,
'Order would trigger immediately.': OrderImmediatelyFillable,
'Stop price would trigger immediately.': OrderImmediatelyFillable, # {"code":-2010,"msg":"Stop price would trigger immediately."}
'Order would immediately match and take.': OrderImmediatelyFillable, # {"code":-2010,"msg":"Order would immediately match and take."}
'Account has insufficient balance for requested action.': InsufficientFunds,
'Rest API trading is not enabled.': ExchangeNotAvailable,
"You don't have permission.": PermissionDenied, # {"msg":"You don't have permission.","success":false}
'Market is closed.': ExchangeNotAvailable, # {"code":-1013,"msg":"Market is closed."}
'Too many requests. Please try again later.': DDoSProtection, # {"msg":"Too many requests. Please try again later.","success":false}
'-1000': ExchangeNotAvailable, # {"code":-1000,"msg":"An unknown error occured while processing the request."}
'-1001': ExchangeNotAvailable, # 'Internal error; unable to process your request. Please try again.'
'-1002': AuthenticationError, # 'You are not authorized to execute self request.'
'-1003': RateLimitExceeded, # {"code":-1003,"msg":"Too much request weight used, current limit is 1200 request weight per 1 MINUTE. Please use the websocket for live updates to avoid polling the API."}
'-1013': InvalidOrder, # createOrder -> 'invalid quantity'/'invalid price'/MIN_NOTIONAL
'-1015': RateLimitExceeded, # 'Too many new orders; current limit is %s orders per %s.'
'-1016': ExchangeNotAvailable, # 'This service is no longer available.',
'-1020': BadRequest, # 'This operation is not supported.'
'-1021': InvalidNonce, # 'your time is ahead of server'
'-1022': AuthenticationError, # {"code":-1022,"msg":"Signature for self request is not valid."}
'-1100': BadRequest, # createOrder(symbol, 1, asdf) -> 'Illegal characters found in parameter 'price'
'-1101': BadRequest, # Too many parameters; expected %s and received %s.
'-1102': BadRequest, # Param %s or %s must be sent, but both were empty
'-1103': BadRequest, # An unknown parameter was sent.
'-1104': BadRequest, # Not all sent parameters were read, read 8 parameters but was sent 9
'-1105': BadRequest, # Parameter %s was empty.
'-1106': BadRequest, # Parameter %s sent when not required.
'-1111': BadRequest, # Precision is over the maximum defined for self asset.
'-1112': InvalidOrder, # No orders on book for symbol.
'-1114': BadRequest, # TimeInForce parameter sent when not required.
'-1115': BadRequest, # Invalid timeInForce.
'-1116': BadRequest, # Invalid orderType.
'-1117': BadRequest, # Invalid side.
'-1118': BadRequest, # New client order ID was empty.
'-1119': BadRequest, # Original client order ID was empty.
'-1120': BadRequest, # Invalid interval.
'-1121': BadSymbol, # Invalid symbol.
'-1125': AuthenticationError, # This listenKey does not exist.
'-1127': BadRequest, # More than %s hours between startTime and endTime.
'-1128': BadRequest, # {"code":-1128,"msg":"Combination of optional parameters invalid."}
'-1130': BadRequest, # Data sent for paramter %s is not valid.
'-1131': BadRequest, # recvWindow must be less than 60000
'-2008': AuthenticationError, # {"code":-2008,"msg":"Invalid Api-Key ID."}
'-2010': ExchangeError, # generic error code for createOrder -> 'Account has insufficient balance for requested action.', {"code":-2010,"msg":"Rest API trading is not enabled."}, etc...
'-2011': OrderNotFound, # cancelOrder(1, 'BTC/USDT') -> 'UNKNOWN_ORDER'
'-2013': OrderNotFound, # fetchOrder(1, 'BTC/USDT') -> 'Order does not exist'
'-2014': AuthenticationError, # {"code":-2014, "msg": "API-key format invalid."}
'-2015': AuthenticationError, # "Invalid API-key, IP, or permissions for action."
'-2019': InsufficientFunds, # {"code":-2019,"msg":"Margin is insufficient."}
'-3005': InsufficientFunds, # {"code":-3005,"msg":"Transferring out not allowed. Transfer out amount exceeds max amount."}
'-3006': InsufficientFunds, # {"code":-3006,"msg":"Your borrow amount has exceed maximum borrow amount."}
'-3008': InsufficientFunds, # {"code":-3008,"msg":"Borrow not allowed. Your borrow amount has exceed maximum borrow amount."}
'-3010': ExchangeError, # {"code":-3010,"msg":"Repay not allowed. Repay amount exceeds borrow amount."}
'-3015': ExchangeError, # {"code":-3015,"msg":"Repay amount exceeds borrow amount."}
'-3022': AccountSuspended, # You account's trading is banned.
'-4028': BadRequest, # {"code":-4028,"msg":"Leverage 100 is not valid"}
'-3020': InsufficientFunds, # {"code":-3020,"msg":"Transfer out amount exceeds max amount."}
'-3041': InsufficientFunds, # {"code":-3041,"msg":"Balance is not enough"}
'-5013': InsufficientFunds, # Asset transfer failed: insufficient balance"
'-11008': InsufficientFunds, # {"code":-11008,"msg":"Exceeding the account's maximum borrowable limit."}
'-4051': InsufficientFunds, # {"code":-4051,"msg":"Isolated balance insufficient."}
},
'broad': {
'has no operation privilege': PermissionDenied,
'MAX_POSITION': InvalidOrder, # {"code":-2010,"msg":"Filter failure: MAX_POSITION"}
},
},
})
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['quote'], self.precisionMode, self.paddingMode)
def currency_to_precision(self, currency, fee):
# info is available in currencies only if the user has configured his api keys
if self.safe_value(self.currencies[currency], 'precision') is not None:
return self.decimal_to_precision(fee, TRUNCATE, self.currencies[currency]['precision'], self.precisionMode, self.paddingMode)
else:
return self.number_to_string(fee)
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
def fetch_time(self, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTime', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'publicGetTime'
if type == 'future':
method = 'fapiPublicGetTime'
elif type == 'delivery':
method = 'dapiPublicGetTime'
response = getattr(self, method)(query)
return self.safe_integer(response, 'serverTime')
def load_time_difference(self, params={}):
serverTime = self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - serverTime
return self.options['timeDifference']
def fetch_currencies(self, params={}):
fetchCurrenciesEnabled = self.safe_value(self.options, 'fetchCurrencies')
if not fetchCurrenciesEnabled:
return None
# self endpoint requires authentication
# while fetchCurrencies is a public API method by design
# therefore we check the keys here
# and fallback to generating the currencies from the markets
if not self.check_required_credentials(False):
return None
# sandbox/testnet does not support sapi endpoints
apiBackup = self.safe_string(self.urls, 'apiBackup')
if apiBackup is not None:
return None
response = self.sapiGetCapitalConfigGetall(params)
result = {}
for i in range(0, len(response)):
#
# {
# coin: 'LINK',
# depositAllEnable: True,
# withdrawAllEnable: True,
# name: 'ChainLink',
# free: '0.06168',
# locked: '0',
# freeze: '0',
# withdrawing: '0',
# ipoing: '0',
# ipoable: '0',
# storage: '0',
# isLegalMoney: False,
# trading: True,
# networkList: [
# {
# network: 'BNB',
# coin: 'LINK',
# withdrawIntegerMultiple: '0',
# isDefault: False,
# depositEnable: True,
# withdrawEnable: True,
# depositDesc: '',
# withdrawDesc: '',
# specialTips: 'Both a MEMO and an Address are required to successfully deposit your LINK BEP2 tokens to Binance.',
# name: 'BEP2',
# resetAddressStatus: False,
# addressRegex: '^(bnb1)[0-9a-z]{38}$',
# memoRegex: '^[0-9A-Za-z\\-_]{1,120}$',
# withdrawFee: '0.002',
# withdrawMin: '0.01',
# withdrawMax: '9999999',
# minConfirm: 1,
# unLockConfirm: 0
# },
# {
# network: 'BSC',
# coin: 'LINK',
# withdrawIntegerMultiple: '0.00000001',
# isDefault: False,
# depositEnable: True,
# withdrawEnable: True,
# depositDesc: '',
# withdrawDesc: '',
# specialTips: '',
# name: 'BEP20(BSC)',
# resetAddressStatus: False,
# addressRegex: '^(0x)[0-9A-Fa-f]{40}$',
# memoRegex: '',
# withdrawFee: '0.005',
# withdrawMin: '0.01',
# withdrawMax: '9999999',
# minConfirm: 15,
# unLockConfirm: 0
# },
# {
# network: 'ETH',
# coin: 'LINK',
# withdrawIntegerMultiple: '0.00000001',
# isDefault: True,
# depositEnable: True,
# withdrawEnable: True,
# depositDesc: '',
# withdrawDesc: '',
# name: 'ERC20',
# resetAddressStatus: False,
# addressRegex: '^(0x)[0-9A-Fa-f]{40}$',
# memoRegex: '',
# withdrawFee: '0.34',
# withdrawMin: '0.68',
# withdrawMax: '0',
# minConfirm: 12,
# unLockConfirm: 0
# }
# ]
# }
#
entry = response[i]
id = self.safe_string(entry, 'coin')
name = self.safe_string(entry, 'name')
code = self.safe_currency_code(id)
precision = None
isWithdrawEnabled = True
isDepositEnabled = True
networkList = self.safe_value(entry, 'networkList', [])
fees = {}
fee = None
for j in range(0, len(networkList)):
networkItem = networkList[j]
network = self.safe_string(networkItem, 'network')
# name = self.safe_string(networkItem, 'name')
withdrawFee = self.safe_number(networkItem, 'withdrawFee')
depositEnable = self.safe_value(networkItem, 'depositEnable')
withdrawEnable = self.safe_value(networkItem, 'withdrawEnable')
isDepositEnabled = isDepositEnabled or depositEnable
isWithdrawEnabled = isWithdrawEnabled or withdrawEnable
fees[network] = withdrawFee
isDefault = self.safe_value(networkItem, 'isDefault')
if isDefault or fee is None:
fee = withdrawFee
trading = self.safe_value(entry, 'trading')
active = (isWithdrawEnabled and isDepositEnabled and trading)
result[code] = {
'id': id,
'name': name,
'code': code,
'precision': precision,
'info': entry,
'active': active,
'networks': networkList,
'fee': fee,
'fees': fees,
'limits': self.limits,
}
return result
def fetch_markets(self, params={}):
defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
if (type != 'spot') and (type != 'future') and (type != 'margin') and (type != 'delivery'):
raise ExchangeError(self.id + " does not support '" + type + "' type, set exchange.options['defaultType'] to 'spot', 'margin', 'delivery' or 'future'") # eslint-disable-line quotes
method = 'publicGetExchangeInfo'
if type == 'future':
method = 'fapiPublicGetExchangeInfo'
elif type == 'delivery':
method = 'dapiPublicGetExchangeInfo'
response = getattr(self, method)(query)
#
# spot / margin
#
# {
# "timezone":"UTC",
# "serverTime":1575416692969,
# "rateLimits":[
# {"rateLimitType":"REQUEST_WEIGHT","interval":"MINUTE","intervalNum":1,"limit":1200},
# {"rateLimitType":"ORDERS","interval":"SECOND","intervalNum":10,"limit":100},
# {"rateLimitType":"ORDERS","interval":"DAY","intervalNum":1,"limit":200000}
# ],
# "exchangeFilters":[],
# "symbols":[
# {
# "symbol":"ETHBTC",
# "status":"TRADING",
# "baseAsset":"ETH",
# "baseAssetPrecision":8,
# "quoteAsset":"BTC",
# "quotePrecision":8,
# "baseCommissionPrecision":8,
# "quoteCommissionPrecision":8,
# "orderTypes":["LIMIT","LIMIT_MAKER","MARKET","STOP_LOSS_LIMIT","TAKE_PROFIT_LIMIT"],
# "icebergAllowed":true,
# "ocoAllowed":true,
# "quoteOrderQtyMarketAllowed":true,
# "isSpotTradingAllowed":true,
# "isMarginTradingAllowed":true,
# "filters":[
# {"filterType":"PRICE_FILTER","minPrice":"0.00000100","maxPrice":"100000.00000000","tickSize":"0.00000100"},
# {"filterType":"PERCENT_PRICE","multiplierUp":"5","multiplierDown":"0.2","avgPriceMins":5},
# {"filterType":"LOT_SIZE","minQty":"0.00100000","maxQty":"100000.00000000","stepSize":"0.00100000"},
# {"filterType":"MIN_NOTIONAL","minNotional":"0.00010000","applyToMarket":true,"avgPriceMins":5},
# {"filterType":"ICEBERG_PARTS","limit":10},
# {"filterType":"MARKET_LOT_SIZE","minQty":"0.00000000","maxQty":"63100.00000000","stepSize":"0.00000000"},
# {"filterType":"MAX_NUM_ALGO_ORDERS","maxNumAlgoOrders":5}
# ]
# },
# ],
# }
#
# futures/usdt-margined(fapi)
#
# {
# "timezone":"UTC",
# "serverTime":1575417244353,
# "rateLimits":[
# {"rateLimitType":"REQUEST_WEIGHT","interval":"MINUTE","intervalNum":1,"limit":1200},
# {"rateLimitType":"ORDERS","interval":"MINUTE","intervalNum":1,"limit":1200}
# ],
# "exchangeFilters":[],
# "symbols":[
# {
# "symbol":"BTCUSDT",
# "status":"TRADING",
# "maintMarginPercent":"2.5000",
# "requiredMarginPercent":"5.0000",
# "baseAsset":"BTC",
# "quoteAsset":"USDT",
# "pricePrecision":2,
# "quantityPrecision":3,
# "baseAssetPrecision":8,
# "quotePrecision":8,
# "filters":[
# {"minPrice":"0.01","maxPrice":"100000","filterType":"PRICE_FILTER","tickSize":"0.01"},
# {"stepSize":"0.001","filterType":"LOT_SIZE","maxQty":"1000","minQty":"0.001"},
# {"stepSize":"0.001","filterType":"MARKET_LOT_SIZE","maxQty":"1000","minQty":"0.001"},
# {"limit":200,"filterType":"MAX_NUM_ORDERS"},
# {"multiplierDown":"0.8500","multiplierUp":"1.1500","multiplierDecimal":"4","filterType":"PERCENT_PRICE"}
# ],
# "orderTypes":["LIMIT","MARKET","STOP"],
# "timeInForce":["GTC","IOC","FOK","GTX"]
# }
# ]
# }
#
# delivery/coin-margined(dapi)
#
# {
# "timezone": "UTC",
# "serverTime": 1597667052958,
# "rateLimits": [
# {"rateLimitType":"REQUEST_WEIGHT","interval":"MINUTE","intervalNum":1,"limit":6000},
# {"rateLimitType":"ORDERS","interval":"MINUTE","intervalNum":1,"limit":6000}
# ],
# "exchangeFilters": [],
# "symbols": [
# {
# "symbol": "BTCUSD_200925",
# "pair": "BTCUSD",
# "contractType": "CURRENT_QUARTER",
# "deliveryDate": 1601020800000,
# "onboardDate": 1590739200000,
# "contractStatus": "TRADING",
# "contractSize": 100,
# "marginAsset": "BTC",
# "maintMarginPercent": "2.5000",
# "requiredMarginPercent": "5.0000",
# "baseAsset": "BTC",
# "quoteAsset": "USD",
# "pricePrecision": 1,
# "quantityPrecision": 0,
# "baseAssetPrecision": 8,
# "quotePrecision": 8,
# "equalQtyPrecision": 4,
# "filters": [
# {"minPrice":"0.1","maxPrice":"100000","filterType":"PRICE_FILTER","tickSize":"0.1"},
# {"stepSize":"1","filterType":"LOT_SIZE","maxQty":"100000","minQty":"1"},
# {"stepSize":"0","filterType":"MARKET_LOT_SIZE","maxQty":"100000","minQty":"1"},
# {"limit":200,"filterType":"MAX_NUM_ORDERS"},
# {"multiplierDown":"0.9500","multiplierUp":"1.0500","multiplierDecimal":"4","filterType":"PERCENT_PRICE"}
# ],
# "orderTypes": ["LIMIT","MARKET","STOP","STOP_MARKET","TAKE_PROFIT","TAKE_PROFIT_MARKET","TRAILING_STOP_MARKET"],
# "timeInForce": ["GTC","IOC","FOK","GTX"]
# },
# {
# "symbol": "BTCUSD_PERP",
# "pair": "BTCUSD",
# "contractType": "PERPETUAL",
# "deliveryDate": 4133404800000,
# "onboardDate": 1596006000000,
# "contractStatus": "TRADING",
# "contractSize": 100,
# "marginAsset": "BTC",
# "maintMarginPercent": "2.5000",
# "requiredMarginPercent": "5.0000",
# "baseAsset": "BTC",
# "quoteAsset": "USD",
# "pricePrecision": 1,
# "quantityPrecision": 0,
# "baseAssetPrecision": 8,
# "quotePrecision": 8,
# "equalQtyPrecision": 4,
# "filters": [
# {"minPrice":"0.1","maxPrice":"100000","filterType":"PRICE_FILTER","tickSize":"0.1"},
# {"stepSize":"1","filterType":"LOT_SIZE","maxQty":"100000","minQty":"1"},
# {"stepSize":"1","filterType":"MARKET_LOT_SIZE","maxQty":"100000","minQty":"1"},
# {"limit":200,"filterType":"MAX_NUM_ORDERS"},
# {"multiplierDown":"0.8500","multiplierUp":"1.1500","multiplierDecimal":"4","filterType":"PERCENT_PRICE"}
# ],
# "orderTypes": ["LIMIT","MARKET","STOP","STOP_MARKET","TAKE_PROFIT","TAKE_PROFIT_MARKET","TRAILING_STOP_MARKET"],
# "timeInForce": ["GTC","IOC","FOK","GTX"]
# }
# ]
# }
#
if self.options['adjustForTimeDifference']:
self.load_time_difference()
markets = self.safe_value(response, 'symbols', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
spot = (type == 'spot')
future = (type == 'future')
delivery = (type == 'delivery')
id = self.safe_string(market, 'symbol')
lowercaseId = self.safe_string_lower(market, 'symbol')
baseId = self.safe_string(market, 'baseAsset')
quoteId = self.safe_string(market, 'quoteAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
contractType = self.safe_string(market, 'contractType')
idSymbol = (future or delivery) and (contractType != 'PERPETUAL')
symbol = None
expiry = None
if idSymbol:
symbol = id
expiry = self.safe_integer(market, 'deliveryDate')
else:
symbol = base + '/' + quote
filters = self.safe_value(market, 'filters', [])
filtersByType = self.index_by(filters, 'filterType')
precision = {
'base': self.safe_integer(market, 'baseAssetPrecision'),
'quote': self.safe_integer(market, 'quotePrecision'),
'amount': self.safe_integer(market, 'quantityPrecision'),
'price': self.safe_integer(market, 'pricePrecision'),
}
status = self.safe_string_2(market, 'status', 'contractStatus')
active = (status == 'TRADING')
margin = self.safe_value(market, 'isMarginTradingAllowed', False)
contractSize = None
fees = self.fees
if future or delivery:
contractSize = self.safe_string(market, 'contractSize', '1')
fees = self.fees[type]
maker = fees['trading']['maker']
taker = fees['trading']['taker']
settleId = self.safe_string(market, 'marginAsset')
settle = self.safe_currency_code(settleId)
entry = {
'id': id,
'lowercaseId': lowercaseId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'spot': spot,
'type': type,
'margin': margin,
'future': future,
'delivery': delivery,
'linear': future,
'inverse': delivery,
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'settleId': settleId,
'settle': settle,
'active': active,
'precision': precision,
'contractSize': contractSize,
'maker': maker,
'taker': taker,
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
}
if 'PRICE_FILTER' in filtersByType:
filter = self.safe_value(filtersByType, 'PRICE_FILTER', {})
tickSize = self.safe_string(filter, 'tickSize')
entry['precision']['price'] = self.precision_from_string(tickSize)
# PRICE_FILTER reports zero values for maxPrice
# since they updated filter types in November 2018
# https://github.com/ccxt/ccxt/issues/4286
# therefore limits['price']['max'] doesn't have any meaningful value except None
entry['limits']['price'] = {
'min': self.safe_number(filter, 'minPrice'),
'max': self.safe_number(filter, 'maxPrice'),
}
entry['precision']['price'] = self.precision_from_string(filter['tickSize'])
if 'LOT_SIZE' in filtersByType:
filter = self.safe_value(filtersByType, 'LOT_SIZE', {})
stepSize = self.safe_string(filter, 'stepSize')
entry['precision']['amount'] = self.precision_from_string(stepSize)
entry['limits']['amount'] = {
'min': self.safe_number(filter, 'minQty'),
'max': self.safe_number(filter, 'maxQty'),
}
if 'MARKET_LOT_SIZE' in filtersByType:
filter = self.safe_value(filtersByType, 'MARKET_LOT_SIZE', {})
entry['limits']['market'] = {
'min': self.safe_number(filter, 'minQty'),
'max': self.safe_number(filter, 'maxQty'),
}
if 'MIN_NOTIONAL' in filtersByType:
filter = self.safe_value(filtersByType, 'MIN_NOTIONAL', {})
entry['limits']['cost']['min'] = self.safe_number_2(filter, 'minNotional', 'notional')
result.append(entry)
return result
def fetch_balance(self, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
method = 'privateGetAccount'
if type == 'future':
options = self.safe_value(self.options, type, {})
fetchBalanceOptions = self.safe_value(options, 'fetchBalance', {})
method = self.safe_string(fetchBalanceOptions, 'method', 'fapiPrivateV2GetAccount')
elif type == 'delivery':
options = self.safe_value(self.options, type, {})
fetchBalanceOptions = self.safe_value(options, 'fetchBalance', {})
method = self.safe_string(fetchBalanceOptions, 'method', 'dapiPrivateGetAccount')
elif type == 'margin':
method = 'sapiGetMarginAccount'
elif type == 'savings':
method = 'sapiGetLendingUnionAccount'
elif type == 'funding':
method = 'sapiPostAssetGetFundingAsset'
query = self.omit(params, 'type')
response = getattr(self, method)(query)
#
# spot
#
# {
# makerCommission: 10,
# takerCommission: 10,
# buyerCommission: 0,
# sellerCommission: 0,
# canTrade: True,
# canWithdraw: True,
# canDeposit: True,
# updateTime: 1575357359602,
# accountType: "MARGIN",
# balances: [
# {asset: "BTC", free: "0.00219821", locked: "0.00000000" },
# ]
# }
#
# margin
#
# {
# "borrowEnabled":true,
# "marginLevel":"999.00000000",
# "totalAssetOfBtc":"0.00000000",
# "totalLiabilityOfBtc":"0.00000000",
# "totalNetAssetOfBtc":"0.00000000",
# "tradeEnabled":true,
# "transferEnabled":true,
# "userAssets":[
# {"asset":"MATIC","borrowed":"0.00000000","free":"0.00000000","interest":"0.00000000","locked":"0.00000000","netAsset":"0.00000000"},
# {"asset":"VET","borrowed":"0.00000000","free":"0.00000000","interest":"0.00000000","locked":"0.00000000","netAsset":"0.00000000"},
# {"asset":"USDT","borrowed":"0.00000000","free":"0.00000000","interest":"0.00000000","locked":"0.00000000","netAsset":"0.00000000"}
# ],
# }
#
# futures(fapi)
#
# fapiPrivateGetAccount
#
# {
# "feeTier":0,
# "canTrade":true,
# "canDeposit":true,
# "canWithdraw":true,
# "updateTime":0,
# "totalInitialMargin":"0.00000000",
# "totalMaintMargin":"0.00000000",
# "totalWalletBalance":"4.54000000",
# "totalUnrealizedProfit":"0.00000000",
# "totalMarginBalance":"4.54000000",
# "totalPositionInitialMargin":"0.00000000",
# "totalOpenOrderInitialMargin":"0.00000000",
# "maxWithdrawAmount":"4.54000000",
# "assets":[
# {
# "asset":"USDT",
# "walletBalance":"4.54000000",
# "unrealizedProfit":"0.00000000",
# "marginBalance":"4.54000000",
# "maintMargin":"0.00000000",
# "initialMargin":"0.00000000",
# "positionInitialMargin":"0.00000000",
# "openOrderInitialMargin":"0.00000000",
# "maxWithdrawAmount":"4.54000000"
# }
# ],
# "positions":[
# {
# "symbol":"BTCUSDT",
# "initialMargin":"0.00000",
# "maintMargin":"0.00000",
# "unrealizedProfit":"0.00000000",
# "positionInitialMargin":"0.00000",
# "openOrderInitialMargin":"0.00000"
# }
# ]
# }
#
# fapiPrivateV2GetAccount
#
# {
# "feeTier":0,
# "canTrade":true,
# "canDeposit":true,
# "canWithdraw":true,
# "updateTime":0,
# "totalInitialMargin":"0.00000000",
# "totalMaintMargin":"0.00000000",
# "totalWalletBalance":"0.00000000",
# "totalUnrealizedProfit":"0.00000000",
# "totalMarginBalance":"0.00000000",
# "totalPositionInitialMargin":"0.00000000",
# "totalOpenOrderInitialMargin":"0.00000000",
# "totalCrossWalletBalance":"0.00000000",
# "totalCrossUnPnl":"0.00000000",
# "availableBalance":"0.00000000",
# "maxWithdrawAmount":"0.00000000",
# "assets":[
# {
# "asset":"BNB",
# "walletBalance":"0.01000000",
# "unrealizedProfit":"0.00000000",
# "marginBalance":"0.01000000",
# "maintMargin":"0.00000000",
# "initialMargin":"0.00000000",
# "positionInitialMargin":"0.00000000",
# "openOrderInitialMargin":"0.00000000",
# "maxWithdrawAmount":"0.01000000",
# "crossWalletBalance":"0.01000000",
# "crossUnPnl":"0.00000000",
# "availableBalance":"0.01000000"
# }
# ],
# "positions":[
# {
# "symbol":"BTCUSDT",
# "initialMargin":"0",
# "maintMargin":"0",
# "unrealizedProfit":"0.00000000",
# "positionInitialMargin":"0",
# "openOrderInitialMargin":"0",
# "leverage":"20",
# "isolated":false,
# "entryPrice":"0.00000",
# "maxNotional":"5000000",
# "positionSide":"BOTH"
# },
# ]
# }
#
# fapiPrivateV2GetBalance
#
# [
# {
# "accountAlias":"FzFzXquXXqoC",
# "asset":"BNB",
# "balance":"0.01000000",
# "crossWalletBalance":"0.01000000",
# "crossUnPnl":"0.00000000",
# "availableBalance":"0.01000000",
# "maxWithdrawAmount":"0.01000000"
# }
# ]
#
# savings
#
# {
# "totalAmountInBTC": "0.3172",
# "totalAmountInUSDT": "10000",
# "totalFixedAmountInBTC": "0.3172",
# "totalFixedAmountInUSDT": "10000",
# "totalFlexibleInBTC": "0",
# "totalFlexibleInUSDT": "0",
# "positionAmountVos": [
# {
# "asset": "USDT",
# "amount": "10000",
# "amountInBTC": "0.3172",
# "amountInUSDT": "10000"
# },
# {
# "asset": "BUSD",
# "amount": "0",
# "amountInBTC": "0",
# "amountInUSDT": "0"
# }
# ]
# }
#
# binance pay
#
# [
# {
# "asset": "BUSD",
# "free": "1129.83",
# "locked": "0",
# "freeze": "0",
# "withdrawing": "0"
# }
# ]
#
result = {
'info': response,
}
timestamp = None
if (type == 'spot') or (type == 'margin'):
timestamp = self.safe_integer(response, 'updateTime')
balances = self.safe_value_2(response, 'balances', 'userAssets', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'free')
account['used'] = self.safe_string(balance, 'locked')
result[code] = account
elif type == 'savings':
positionAmountVos = self.safe_value(response, 'positionAmountVos')
for i in range(0, len(positionAmountVos)):
entry = positionAmountVos[i]
currencyId = self.safe_string(entry, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
usedAndTotal = self.safe_string(entry, 'amount')
account['total'] = usedAndTotal
account['used'] = usedAndTotal
result[code] = account
elif type == 'funding':
for i in range(0, len(response)):
entry = response[i]
account = self.account()
currencyId = self.safe_string(entry, 'asset')
code = self.safe_currency_code(currencyId)
account['free'] = self.safe_string(entry, 'free')
frozen = self.safe_string(entry, 'freeze')
withdrawing = self.safe_string(entry, 'withdrawing')
locked = self.safe_string(entry, 'locked')
account['used'] = Precise.string_add(frozen, Precise.string_add(locked, withdrawing))
result[code] = account
else:
balances = response
if not isinstance(response, list):
balances = self.safe_value(response, 'assets', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'availableBalance')
account['used'] = self.safe_string(balance, 'initialMargin')
account['total'] = self.safe_string_2(balance, 'marginBalance', 'balance')
result[code] = account
result['timestamp'] = timestamp
result['datetime'] = self.iso8601(timestamp)
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 100, max 5000, see https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#order-book
method = 'publicGetDepth'
if market['linear']:
method = 'fapiPublicGetDepth'
elif market['inverse']:
method = 'dapiPublicGetDepth'
response = getattr(self, method)(self.extend(request, params))
#
# future
#
# {
# "lastUpdateId":333598053905,
# "E":1618631511986,
# "T":1618631511964,
# "bids":[
# ["2493.56","20.189"],
# ["2493.54","1.000"],
# ["2493.51","0.005"],["2493.37","0.280"],["2493.31","0.865"],["2493.30","0.514"],["2493.29","2.309"],["2493.25","1.500"],["2493.23","0.012"],["2493.22","7.240"],["2493.21","3.349"],["2493.20","2.030"],["2493.19","58.118"],["2493.18","174.836"],["2493.17","14.436"],["2493.12","2.000"],["2493.09","3.232"],["2493.08","2.010"],["2493.07","2.000"],["2493.06","2.000"],["2493.05","2.684"],["2493.04","2.000"],["2493.03","2.000"],["2493.02","5.000"],["2493.01","2.000"],["2493.00","1.035"],["2492.99","8.546"],["2492.98","4.012"],["2492.96","40.937"],["2492.95","40.595"],["2492.94","21.051"],["2492.92","4.012"],["2492.91","0.200"],["2492.85","2.000"],["2492.83","24.929"],["2492.81","50.000"],["2492.80","0.030"],["2492.76","0.264"],["2492.73","32.098"],["2492.71","32.664"],["2492.70","4.228"],["2492.65","1.230"],["2492.61","5.598"],["2492.60","34.786"],["2492.58","10.393"],["2492.54","4.543"],["2492.50","0.400"],["2492.49","0.600"],["2492.48","4.941"],["2492.45","1.207"],["2492.43","4.878"],["2492.40","4.762"],["2492.39","36.489"],["2492.37","3.000"],["2492.36","4.882"],["2492.33","28.117"],["2492.29","0.490"],["2492.28","76.365"],["2492.27","0.200"],["2492.23","3.804"],["2492.22","1.000"],["2492.19","20.011"],["2492.17","13.500"],["2492.16","4.058"],["2492.14","35.673"],["2492.13","1.915"],["2492.12","76.896"],["2492.10","8.050"],["2492.01","16.615"],["2492.00","10.335"],["2491.95","5.880"],["2491.93","10.000"],["2491.92","3.916"],["2491.90","0.795"],["2491.87","22.000"],["2491.85","1.260"],["2491.84","4.014"],["2491.83","6.668"],["2491.73","0.855"],["2491.72","7.572"],["2491.71","7.000"],["2491.68","3.916"],["2491.66","2.500"],["2491.64","4.945"],["2491.63","2.302"],["2491.62","4.012"],["2491.61","16.170"],["2491.60","0.793"],["2491.59","0.403"],["2491.57","17.445"],["2491.56","88.177"],["2491.53","10.000"],["2491.47","0.013"],["2491.45","0.157"],["2491.44","11.733"],["2491.39","3.593"],["2491.38","3.570"],["2491.36","28.077"],["2491.35","0.808"],["2491.30","0.065"],["2491.29","4.880"],["2491.27","22.000"],["2491.24","9.021"],["2491.23","68.393"],["2491.22","0.050"],["2491.21","1.316"],["2491.20","4.000"],["2491.19","0.108"],["2491.18","0.498"],["2491.17","5.000"],["2491.14","10.000"],["2491.13","0.383"],["2491.12","125.959"],["2491.10","0.870"],["2491.08","10.518"],["2491.05","54.743"],["2491.01","7.980"],["2490.96","3.916"],["2490.95","0.135"],["2490.91","0.140"],["2490.89","8.424"],["2490.88","5.930"],["2490.84","1.208"],["2490.83","2.005"],["2490.82","5.517"],["2490.81","73.707"],["2490.80","1.042"],["2490.79","9.626"],["2490.72","3.916"],["2490.70","0.148"],["2490.69","0.403"],["2490.68","0.012"],["2490.67","21.887"],["2490.66","0.008"],["2490.64","11.500"],["2490.61","0.005"],["2490.58","68.175"],["2490.55","0.218"],["2490.54","14.132"],["2490.53","5.157"],["2490.50","0.018"],["2490.49","9.216"],["2490.48","3.979"],["2490.47","1.884"],["2490.44","0.003"],["2490.36","14.132"],["2490.35","2.008"],["2490.34","0.200"],["2490.33","0.015"],["2490.30","0.065"],["2490.29","5.500"],["2490.28","24.203"],["2490.26","4.373"],["2490.25","0.026"],["2490.24","4.000"],["2490.23","177.628"],["2490.22","14.132"],["2490.21","0.181"],["2490.20","0.645"],["2490.19","9.024"],["2490.18","0.108"],["2490.17","0.085"],["2490.16","0.077"],["2490.14","0.275"],["2490.10","0.080"],["2490.07","0.015"],["2490.04","6.056"],["2490.00","6.796"],["2489.98","0.005"],["2489.97","0.258"],["2489.96","10.084"],["2489.95","1.202"],["2489.91","10.121"],["2489.90","10.084"],["2489.88","0.040"],["2489.87","0.004"],["2489.85","0.003"],["2489.76","3.916"],["2489.73","10.084"],["2489.71","0.272"],["2489.70","12.834"],["2489.67","0.403"],["2489.66","0.362"],["2489.64","0.738"],["2489.63","193.236"],["2489.62","14.152"],["2489.61","0.157"],["2489.59","4.011"],["2489.57","0.015"],["2489.55","0.046"],["2489.52","3.921"],["2489.51","0.005"],["2489.45","80.000"],["2489.44","0.649"],["2489.43","10.088"],["2489.39","0.009"],["2489.37","14.132"],["2489.35","72.262"],["2489.34","10.084"],["2489.33","14.136"],["2489.32","23.953"],["2489.30","0.065"],["2489.28","8.136"],["2489.24","8.022"],["2489.19","14.132"],["2489.18","0.085"],["2489.17","0.108"],["2489.14","10.084"],["2489.13","3.142"],["2489.12","77.827"],["2489.11","10.084"],["2489.10","0.080"],["2489.09","50.024"],["2489.04","3.916"],["2489.03","0.008"],["2489.01","10.084"],["2488.99","0.135"],["2488.98","0.187"],["2488.96","0.324"],["2488.92","0.064"],["2488.85","16.056"],["2488.83","14.132"],["2488.80","3.916"],["2488.79","10.084"],["2488.77","4.414"],["2488.76","0.005"],["2488.75","13.685"],["2488.73","0.020"],["2488.69","0.157"],["2488.60","80.000"],["2488.58","10.164"],["2488.57","0.004"],["2488.56","3.933"],["2488.54","3.311"],["2488.51","12.814"],["2488.50","80.099"],["2488.48","0.684"],["2488.44","0.024"],["2488.42","68.180"],["2488.39","4.412"],["2488.38","26.138"],["2488.34","44.134"],["2488.32","8.014"],["2488.30","0.065"],["2488.29","0.009"],["2488.27","4.513"],["2488.26","4.222"],["2488.25","80.000"],["2488.23","0.007"],["2488.22","0.281"],["2488.19","0.100"],["2488.18","80.100"],["2488.17","80.000"],["2488.16","8.197"],["2488.15","79.184"],["2488.13","0.025"],["2488.11","0.050"],["2488.10","0.080"],["2488.08","3.919"],["2488.04","40.103"],["2488.03","0.120"],["2488.02","0.008"],["2488.01","0.140"],["2488.00","0.406"],["2487.99","0.384"],["2487.98","0.060"],["2487.96","8.010"],["2487.94","0.246"],["2487.93","0.020"],["2487.91","0.136"],["2487.87","0.403"],["2487.84","17.910"],["2487.81","0.005"],["2487.80","0.073"],["2487.74","36.000"],["2487.73","3.225"],["2487.72","0.018"],["2487.71","0.319"],["2487.70","0.006"],["2487.66","0.003"],["2487.64","0.003"],["2487.63","0.008"],["2487.62","0.040"],["2487.60","3.916"],["2487.54","0.805"],["2487.52","0.022"],["2487.51","0.003"],["2487.50","0.051"],["2487.49","6.081"],["2487.47","80.015"],["2487.46","4.735"],["2487.45","30.000"],["2487.41","0.096"],["2487.40","0.078"],["2487.39","0.103"],["2487.37","2.279"],["2487.36","8.152"],["2487.35","2.145"],["2487.32","12.816"],["2487.31","10.023"],["2487.30","0.157"],["2487.27","0.005"],["2487.26","4.010"],["2487.25","0.008"],["2487.24","0.003"],["2487.23","0.014"],["2487.20","0.085"],["2487.17","0.011"],["2487.14","3.217"],["2487.12","3.916"],["2487.11","0.300"],["2487.10","0.088"],["2487.08","10.097"],["2487.07","1.467"],["2487.04","0.600"],["2487.01","18.363"],["2487.00","0.292"],["2486.99","0.014"],["2486.98","0.144"],["2486.97","0.443"],["2486.92","0.005"],["2486.91","0.016"],["2486.89","3.364"],["2486.88","4.166"],["2486.84","24.306"],["2486.83","0.181"],["2486.81","0.015"],["2486.80","0.082"],["2486.79","0.007"],["2486.76","0.011"],["2486.74","0.050"],["2486.73","0.782"],["2486.72","0.004"],["2486.69","0.003"],["2486.68","8.018"],["2486.66","10.004"],["2486.65","40.391"],["2486.64","3.916"],["2486.61","0.489"],["2486.60","0.196"],["2486.57","0.396"],["2486.55","4.015"],["2486.51","3.000"],["2486.50","0.003"],["2486.48","0.005"],["2486.47","0.010"],["2486.45","4.011"],["2486.44","0.602"],["2486.43","0.566"],["2486.42","3.140"],["2486.40","3.958"],["2486.39","0.003"],["2486.34","0.010"],["2486.31","6.281"],["2486.27","0.005"],["2486.26","0.004"],["2486.23","10.088"],["2486.22","0.015"],["2486.17","0.030"],["2486.16","3.916"],["2486.15","0.020"],["2486.13","13.130"],["2486.12","82.414"],["2486.11","0.244"],["2486.10","0.132"],["2486.08","0.720"],["2486.06","0.385"],["2486.01","0.004"],["2486.00","2.359"],["2485.99","154.159"],["2485.98","20.054"],["2485.96","1.000"],["2485.95","0.190"],["2485.92","4.463"],["2485.90","1.557"],["2485.87","0.402"],["2485.85","0.114"],["2485.81","0.900"],["2485.76","4.700"],["2485.75","0.300"],["2485.74","0.196"],["2485.73","4.010"],["2485.72","0.323"],["2485.70","0.263"],["2485.69","0.261"],["2485.68","3.688"],["2485.67","0.005"],["2485.64","1.216"],["2485.63","0.005"],["2485.62","0.015"],["2485.61","0.033"],["2485.60","0.004"],["2485.58","2.012"],["2485.56","0.020"],["2485.54","0.699"],["2485.52","0.003"],["2485.51","1.830"],["2485.48","5.964"],["2485.47","0.015"],["2485.44","7.251"],["2485.43","0.006"],["2485.42","0.644"],["2485.40","8.026"],["2485.38","0.489"],["2485.36","0.014"],["2485.35","0.005"],["2485.31","1.507"],["2485.30","2.107"],["2485.29","0.039"],["2485.28","0.642"],["2485.26","1.990"],["2485.25","4.996"],["2485.23","0.003"],["2485.22","0.277"],["2485.21","0.121"],["2485.20","3.952"],["2485.18","0.006"],["2485.17","0.043"],["2485.15","4.008"],["2485.14","4.434"],["2485.13","1.003"],["2485.05","0.204"],["2485.04","0.254"],["2485.02","5.000"],["2485.01","0.050"],["2485.00","80.821"],["2484.96","3.941"],["2484.95","10.023"],["2484.94","13.935"],["2484.92","0.059"],["2484.90","150.000"],["2484.89","0.004"],["2484.88","150.127"],["2484.87","0.004"],["2484.85","0.100"],["2484.83","0.006"],["2484.82","0.030"],["2484.81","1.246"],["2484.80","0.003"],["2484.79","0.045"],["2484.77","0.003"],["2484.74","0.036"],["2484.72","3.919"],["2484.70","0.134"],["2484.68","1.111"],["2484.66","76.955"],["2484.60","2.580"],["2484.59","31.432"],["2484.58","1.468"],["2484.55","1.153"],["2484.54","0.265"],["2484.53","20.024"],["2484.51","1.047"],["2484.50","0.818"],["2484.49","0.022"],["2484.48","3.887"],["2484.46","0.048"],["2484.45","0.224"],["2484.44","0.174"],["2484.43","223.079"],["2484.42","0.014"],["2484.41","1.115"],["2484.39","26.090"],["2484.38","0.066"],["2484.37","0.121"],["2484.34","0.255"],["2484.33","23.968"],["2484.29","0.085"],["2484.27","1.128"],["2484.26","1.456"],["2484.24","3.916"],["2484.23","28.126"],["2484.22","1.329"],["2484.19","2.015"],["2484.18","0.263"],["2484.15","15.489"],["2484.14","1.135"],["2484.13","0.572"],["2484.12","8.032"],["2484.11","0.021"],["2484.09","0.059"],["2484.08","0.038"],["2484.07","0.147"],["2484.05","24.156"],["2484.04","0.008"],["2484.01","1.184"],["2484.00","4.641"],["2483.99","0.006"],["2483.97","0.294"],["2483.96","0.424"],["2483.94","3.660"],["2483.93","2.067"],["2483.92","0.008"],["2483.89","0.141"],["2483.88","1.089"],
# ["2483.87","110.000"],["2483.85","4.018"],["2483.81","150.077"],["2483.80","0.003"],["2483.77","0.020"]
# ],
# "asks":[
# ["2493.57","0.877"],
# ["2493.62","0.063"],
# ["2493.71","12.054"],
# ]
# }
timestamp = self.safe_integer(response, 'T')
orderbook = self.parse_order_book(response, symbol, timestamp)
orderbook['nonce'] = self.safe_integer(response, 'lastUpdateId')
return orderbook
def parse_ticker(self, ticker, market=None):
#
# {
# symbol: 'ETHBTC',
# priceChange: '0.00068700',
# priceChangePercent: '2.075',
# weightedAvgPrice: '0.03342681',
# prevClosePrice: '0.03310300',
# lastPrice: '0.03378900',
# lastQty: '0.07700000',
# bidPrice: '0.03378900',
# bidQty: '7.16800000',
# askPrice: '0.03379000',
# askQty: '24.00000000',
# openPrice: '0.03310200',
# highPrice: '0.03388900',
# lowPrice: '0.03306900',
# volume: '205478.41000000',
# quoteVolume: '6868.48826294',
# openTime: 1601469986932,
# closeTime: 1601556386932,
# firstId: 196098772,
# lastId: 196186315,
# count: 87544
# }
#
# coinm
# {
# baseVolume: '214549.95171161',
# closeTime: '1621965286847',
# count: '1283779',
# firstId: '152560106',
# highPrice: '39938.3',
# lastId: '153843955',
# lastPrice: '37993.4',
# lastQty: '1',
# lowPrice: '36457.2',
# openPrice: '37783.4',
# openTime: '1621878840000',
# pair: 'BTCUSD',
# priceChange: '210.0',
# priceChangePercent: '0.556',
# symbol: 'BTCUSD_PERP',
# volume: '81990451',
# weightedAvgPrice: '38215.08713747'
# }
#
timestamp = self.safe_integer(ticker, 'closeTime')
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'lastPrice')
isCoinm = ('baseVolume' in ticker)
baseVolume = None
quoteVolume = None
if isCoinm:
baseVolume = self.safe_number(ticker, 'baseVolume')
quoteVolume = self.safe_number(ticker, 'volume')
else:
baseVolume = self.safe_number(ticker, 'volume')
quoteVolume = self.safe_number(ticker, 'quoteVolume')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'highPrice'),
'low': self.safe_number(ticker, 'lowPrice'),
'bid': self.safe_number(ticker, 'bidPrice'),
'bidVolume': self.safe_number(ticker, 'bidQty'),
'ask': self.safe_number(ticker, 'askPrice'),
'askVolume': self.safe_number(ticker, 'askQty'),
'vwap': self.safe_number(ticker, 'weightedAvgPrice'),
'open': self.safe_number(ticker, 'openPrice'),
'close': last,
'last': last,
'previousClose': self.safe_number(ticker, 'prevClosePrice'), # previous day close
'change': self.safe_number(ticker, 'priceChange'),
'percentage': self.safe_number(ticker, 'priceChangePercent'),
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_status(self, params={}):
response = self.sapiGetSystemStatus(params)
status = self.safe_string(response, 'status')
if status is not None:
status = 'ok' if (status == '0') else 'maintenance'
self.status = self.extend(self.status, {
'status': status,
'updated': self.milliseconds(),
})
return self.status
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = 'publicGetTicker24hr'
if market['linear']:
method = 'fapiPublicGetTicker24hr'
elif market['inverse']:
method = 'dapiPublicGetTicker24hr'
response = getattr(self, method)(self.extend(request, params))
if isinstance(response, list):
firstTicker = self.safe_value(response, 0, {})
return self.parse_ticker(firstTicker, market)
return self.parse_ticker(response, market)
def fetch_bids_asks(self, symbols=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchBidsAsks', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = None
if type == 'future':
method = 'fapiPublicGetTickerBookTicker'
elif type == 'delivery':
method = 'dapiPublicGetTickerBookTicker'
else:
method = 'publicGetTickerBookTicker'
response = getattr(self, method)(query)
return self.parse_tickers(response, symbols)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
defaultMethod = None
if type == 'future':
defaultMethod = 'fapiPublicGetTicker24hr'
elif type == 'delivery':
defaultMethod = 'dapiPublicGetTicker24hr'
else:
defaultMethod = 'publicGetTicker24hr'
method = self.safe_string(self.options, 'fetchTickersMethod', defaultMethod)
response = getattr(self, method)(query)
return self.parse_tickers(response, symbols)
def parse_ohlcv(self, ohlcv, market=None):
# when api method = publicGetKlines or fapiPublicGetKlines or dapiPublicGetKlines
# [
# 1591478520000, # open time
# "0.02501300", # open
# "0.02501800", # high
# "0.02500000", # low
# "0.02500000", # close
# "22.19000000", # volume
# 1591478579999, # close time
# "0.55490906", # quote asset volume
# 40, # number of trades
# "10.92900000", # taker buy base asset volume
# "0.27336462", # taker buy quote asset volume
# "0" # ignore
# ]
#
# when api method = fapiPublicGetMarkPriceKlines or fapiPublicGetIndexPriceKlines
# [
# [
# 1591256460000, # Open time
# "9653.29201333", # Open
# "9654.56401333", # High
# "9653.07367333", # Low
# "9653.07367333", # Close(or latest price)
# "0", # Ignore
# 1591256519999, # Close time
# "0", # Ignore
# 60, # Number of bisic data
# "0", # Ignore
# "0", # Ignore
# "0" # Ignore
# ]
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
# binance docs say that the default limit 500, max 1500 for futures, max 1000 for spot markets
# the reality is that the time range wider than 500 candles won't work right
defaultLimit = 500
maxLimit = 1500
price = self.safe_string(params, 'price')
params = self.omit(params, 'price')
limit = defaultLimit if (limit is None) else min(limit, maxLimit)
request = {
'interval': self.timeframes[timeframe],
'limit': limit,
}
if price == 'index':
request['pair'] = market['id'] # Index price takes self argument instead of symbol
else:
request['symbol'] = market['id']
# duration = self.parse_timeframe(timeframe)
if since is not None:
request['startTime'] = since
#
# It didn't work before without the endTime
# https://github.com/ccxt/ccxt/issues/8454
#
# if since > 0:
# endTime = self.sum(since, limit * duration * 1000 - 1)
# now = self.milliseconds()
# request['endTime'] = min(now, endTime)
# }
method = 'publicGetKlines'
if price == 'mark':
if market['inverse']:
method = 'dapiPublicGetMarkPriceKlines'
else:
method = 'fapiPublicGetMarkPriceKlines'
elif price == 'index':
if market['inverse']:
method = 'dapiPublicGetIndexPriceKlines'
else:
method = 'fapiPublicGetIndexPriceKlines'
elif market['linear']:
method = 'fapiPublicGetKlines'
elif market['inverse']:
method = 'dapiPublicGetKlines'
response = getattr(self, method)(self.extend(request, params))
#
# [
# [1591478520000,"0.02501300","0.02501800","0.02500000","0.02500000","22.19000000",1591478579999,"0.55490906",40,"10.92900000","0.27336462","0"],
# [1591478580000,"0.02499600","0.02500900","0.02499400","0.02500300","21.34700000",1591478639999,"0.53370468",24,"7.53800000","0.18850725","0"],
# [1591478640000,"0.02500800","0.02501100","0.02500300","0.02500800","154.14200000",1591478699999,"3.85405839",97,"5.32300000","0.13312641","0"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'mark',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'index',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_trade(self, trade, market=None):
if 'isDustTrade' in trade:
return self.parse_dust_trade(trade, market)
#
# aggregate trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
#
# {
# "a": 26129, # Aggregate tradeId
# "p": "0.01633102", # Price
# "q": "4.70443515", # Quantity
# "f": 27781, # First tradeId
# "l": 27781, # Last tradeId
# "T": 1498793709153, # Timestamp
# "m": True, # Was the buyer the maker?
# "M": True # Was the trade the best price match?
# }
#
# recent public trades and old public trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#old-trade-lookup-market_data
#
# {
# "id": 28457,
# "price": "4.00000100",
# "qty": "12.00000000",
# "time": 1499865549590,
# "isBuyerMaker": True,
# "isBestMatch": True
# }
#
# private trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-trade-list-user_data
#
# {
# "symbol": "BNBBTC",
# "id": 28457,
# "orderId": 100234,
# "price": "4.00000100",
# "qty": "12.00000000",
# "commission": "10.10000000",
# "commissionAsset": "BNB",
# "time": 1499865549590,
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
#
# futures trades
# https://binance-docs.github.io/apidocs/futures/en/#account-trade-list-user_data
#
# {
# "accountId": 20,
# "buyer": False,
# "commission": "-0.07819010",
# "commissionAsset": "USDT",
# "counterPartyId": 653,
# "id": 698759,
# "maker": False,
# "orderId": 25851813,
# "price": "7819.01",
# "qty": "0.002",
# "quoteQty": "0.01563",
# "realizedPnl": "-0.91539999",
# "side": "SELL",
# "symbol": "BTCUSDT",
# "time": 1569514978020
# }
# {
# "symbol": "BTCUSDT",
# "id": 477128891,
# "orderId": 13809777875,
# "side": "SELL",
# "price": "38479.55",
# "qty": "0.001",
# "realizedPnl": "-0.00009534",
# "marginAsset": "USDT",
# "quoteQty": "38.47955",
# "commission": "-0.00076959",
# "commissionAsset": "USDT",
# "time": 1612733566708,
# "positionSide": "BOTH",
# "maker": True,
# "buyer": False
# }
#
# {respType: FULL}
#
# {
# "price": "4000.00000000",
# "qty": "1.00000000",
# "commission": "4.00000000",
# "commissionAsset": "USDT",
# "tradeId": "1234",
# }
#
timestamp = self.safe_integer_2(trade, 'T', 'time')
price = self.safe_string_2(trade, 'p', 'price')
amount = self.safe_string_2(trade, 'q', 'qty')
cost = self.safe_string_2(trade, 'quoteQty', 'baseQty') # inverse futures
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
id = self.safe_string_2(trade, 't', 'a')
id = self.safe_string_2(trade, 'id', 'tradeId', id)
side = None
orderId = self.safe_string(trade, 'orderId')
if 'm' in trade:
side = 'sell' if trade['m'] else 'buy' # self is reversed intentionally
elif 'isBuyerMaker' in trade:
side = 'sell' if trade['isBuyerMaker'] else 'buy'
elif 'side' in trade:
side = self.safe_string_lower(trade, 'side')
else:
if 'isBuyer' in trade:
side = 'buy' if trade['isBuyer'] else 'sell' # self is a True side
fee = None
if 'commission' in trade:
fee = {
'cost': self.safe_string(trade, 'commission'),
'currency': self.safe_currency_code(self.safe_string(trade, 'commissionAsset')),
}
takerOrMaker = None
if 'isMaker' in trade:
takerOrMaker = 'maker' if trade['isMaker'] else 'taker'
if 'maker' in trade:
takerOrMaker = 'maker' if trade['maker'] else 'taker'
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'fromId': 123, # ID to get aggregate trades from INCLUSIVE.
# 'startTime': 456, # Timestamp in ms to get aggregate trades from INCLUSIVE.
# 'endTime': 789, # Timestamp in ms to get aggregate trades until INCLUSIVE.
# 'limit': 500, # default = 500, maximum = 1000
}
defaultType = self.safe_string_2(self.options, 'fetchTrades', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
defaultMethod = None
if type == 'future':
defaultMethod = 'fapiPublicGetAggTrades'
elif type == 'delivery':
defaultMethod = 'dapiPublicGetAggTrades'
else:
defaultMethod = 'publicGetAggTrades'
method = self.safe_string(self.options, 'fetchTradesMethod', defaultMethod)
if method == 'publicGetAggTrades':
if since is not None:
request['startTime'] = since
# https://github.com/ccxt/ccxt/issues/6400
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
request['endTime'] = self.sum(since, 3600000)
if type == 'future':
method = 'fapiPublicGetAggTrades'
elif type == 'delivery':
method = 'dapiPublicGetAggTrades'
elif method == 'publicGetHistoricalTrades':
if type == 'future':
method = 'fapiPublicGetHistoricalTrades'
elif type == 'delivery':
method = 'dapiPublicGetHistoricalTrades'
if limit is not None:
request['limit'] = limit # default = 500, maximum = 1000
#
# Caveats:
# - default limit(500) applies only if no other parameters set, trades up
# to the maximum limit may be returned to satisfy other parameters
# - if both limit and time window is set and time window contains more
# trades than the limit then the last trades from the window are returned
# - 'tradeId' accepted and returned by self method is "aggregate" trade id
# which is different from actual trade id
# - setting both fromId and time window results in error
response = getattr(self, method)(self.extend(request, query))
#
# aggregate trades
#
# [
# {
# "a": 26129, # Aggregate tradeId
# "p": "0.01633102", # Price
# "q": "4.70443515", # Quantity
# "f": 27781, # First tradeId
# "l": 27781, # Last tradeId
# "T": 1498793709153, # Timestamp
# "m": True, # Was the buyer the maker?
# "M": True # Was the trade the best price match?
# }
# ]
#
# recent public trades and historical public trades
#
# [
# {
# "id": 28457,
# "price": "4.00000100",
# "qty": "12.00000000",
# "time": 1499865549590,
# "isBuyerMaker": True,
# "isBestMatch": True
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'NEW': 'open',
'PARTIALLY_FILLED': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
'PENDING_CANCEL': 'canceling', # currently unused
'REJECTED': 'rejected',
'EXPIRED': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# spot
#
# {
# "symbol": "LTCBTC",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "0.0",
# "cummulativeQuoteQty": "0.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "icebergQty": "0.0",
# "time": 1499827319559,
# "updateTime": 1499827319559,
# "isWorking": True
# }
#
# futures
#
# {
# "symbol": "BTCUSDT",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "1.0",
# "cumQuote": "10.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "updateTime": 1499827319559
# }
#
# createOrder with {"newOrderRespType": "FULL"}
#
# {
# "symbol": "BTCUSDT",
# "orderId": 5403233939,
# "orderListId": -1,
# "clientOrderId": "x-R4BD3S825e669e75b6c14f69a2c43e",
# "transactTime": 1617151923742,
# "price": "0.00000000",
# "origQty": "0.00050000",
# "executedQty": "0.00050000",
# "cummulativeQuoteQty": "29.47081500",
# "status": "FILLED",
# "timeInForce": "GTC",
# "type": "MARKET",
# "side": "BUY",
# "fills": [
# {
# "price": "58941.63000000",
# "qty": "0.00050000",
# "commission": "0.00007050",
# "commissionAsset": "BNB",
# "tradeId": 737466631
# }
# ]
# }
#
# delivery
#
# {
# "orderId": "18742727411",
# "symbol": "ETHUSD_PERP",
# "pair": "ETHUSD",
# "status": "FILLED",
# "clientOrderId": "x-xcKtGhcu3e2d1503fdd543b3b02419",
# "price": "0",
# "avgPrice": "4522.14",
# "origQty": "1",
# "executedQty": "1",
# "cumBase": "0.00221134",
# "timeInForce": "GTC",
# "type": "MARKET",
# "reduceOnly": False,
# "closePosition": False,
# "side": "SELL",
# "positionSide": "BOTH",
# "stopPrice": "0",
# "workingType": "CONTRACT_PRICE",
# "priceProtect": False,
# "origType": "MARKET",
# "time": "1636061952660",
# "updateTime": "1636061952660"
# }
#
status = self.parse_order_status(self.safe_string(order, 'status'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
filled = self.safe_string(order, 'executedQty', '0')
timestamp = None
lastTradeTimestamp = None
if 'time' in order:
timestamp = self.safe_integer(order, 'time')
elif 'transactTime' in order:
timestamp = self.safe_integer(order, 'transactTime')
elif 'updateTime' in order:
if status == 'open':
if Precise.string_gt(filled, '0'):
lastTradeTimestamp = self.safe_integer(order, 'updateTime')
else:
timestamp = self.safe_integer(order, 'updateTime')
average = self.safe_string(order, 'avgPrice')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'origQty')
# - Spot/Margin market: cummulativeQuoteQty
# - Futures market: cumQuote.
# Note self is not the actual cost, since Binance futures uses leverage to calculate margins.
cost = self.safe_string_2(order, 'cummulativeQuoteQty', 'cumQuote')
cost = self.safe_string(order, 'cumBase', cost)
id = self.safe_string(order, 'orderId')
type = self.safe_string_lower(order, 'type')
side = self.safe_string_lower(order, 'side')
fills = self.safe_value(order, 'fills', [])
clientOrderId = self.safe_string(order, 'clientOrderId')
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = (type == 'limit_maker') or (timeInForce == 'GTX')
if type == 'limit_maker':
type = 'limit'
stopPriceString = self.safe_string(order, 'stopPrice')
stopPrice = self.parse_number(self.omit_zero(stopPriceString))
return self.safe_order2({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': None,
'trades': fills,
}, market)
def create_reduce_only_order(self, symbol, type, side, amount, price=None, params={}):
request = {
'reduceOnly': True,
}
return self.create_order(symbol, type, side, amount, price, self.extend(request, params))
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'createOrder', 'defaultType', 'spot')
orderType = self.safe_string(params, 'type', defaultType)
clientOrderId = self.safe_string_2(params, 'newClientOrderId', 'clientOrderId')
postOnly = self.safe_value(params, 'postOnly', False)
params = self.omit(params, ['type', 'newClientOrderId', 'clientOrderId', 'postOnly'])
reduceOnly = self.safe_value(params, 'reduceOnly')
if reduceOnly is not None:
if (orderType != 'future') and (orderType != 'delivery'):
raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + orderType + ' orders, reduceOnly orders are supported for futures and perpetuals only')
method = 'privatePostOrder'
if orderType == 'future':
method = 'fapiPrivatePostOrder'
elif orderType == 'delivery':
method = 'dapiPrivatePostOrder'
elif orderType == 'margin':
method = 'sapiPostMarginOrder'
# the next 5 lines are added to support for testing orders
if market['spot']:
test = self.safe_value(params, 'test', False)
if test:
method += 'Test'
params = self.omit(params, 'test')
# only supported for spot/margin api(all margin markets are spot markets)
if postOnly:
type = 'LIMIT_MAKER'
uppercaseType = type.upper()
validOrderTypes = self.safe_value(market['info'], 'orderTypes')
if not self.in_array(uppercaseType, validOrderTypes):
raise InvalidOrder(self.id + ' ' + type + ' is not a valid order type in market ' + symbol)
request = {
'symbol': market['id'],
'type': uppercaseType,
'side': side.upper(),
}
if clientOrderId is None:
broker = self.safe_value(self.options, 'broker')
if broker is not None:
brokerId = self.safe_string(broker, orderType)
if brokerId is not None:
request['newClientOrderId'] = brokerId + self.uuid22()
else:
request['newClientOrderId'] = clientOrderId
if (orderType == 'spot') or (orderType == 'margin'):
request['newOrderRespType'] = self.safe_value(self.options['newOrderRespType'], type, 'RESULT') # 'ACK' for order id, 'RESULT' for full order or 'FULL' for order with fills
else:
# delivery and future
request['newOrderRespType'] = 'RESULT' # "ACK", "RESULT", default "ACK"
# additional required fields depending on the order type
timeInForceIsRequired = False
priceIsRequired = False
stopPriceIsRequired = False
quantityIsRequired = False
#
# spot/margin
#
# LIMIT timeInForce, quantity, price
# MARKET quantity or quoteOrderQty
# STOP_LOSS quantity, stopPrice
# STOP_LOSS_LIMIT timeInForce, quantity, price, stopPrice
# TAKE_PROFIT quantity, stopPrice
# TAKE_PROFIT_LIMIT timeInForce, quantity, price, stopPrice
# LIMIT_MAKER quantity, price
#
# futures
#
# LIMIT timeInForce, quantity, price
# MARKET quantity
# STOP/TAKE_PROFIT quantity, price, stopPrice
# STOP_MARKET stopPrice
# TAKE_PROFIT_MARKET stopPrice
# TRAILING_STOP_MARKET callbackRate
#
if uppercaseType == 'MARKET':
quoteOrderQty = self.safe_value(self.options, 'quoteOrderQty', False)
if quoteOrderQty:
quoteOrderQty = self.safe_number(params, 'quoteOrderQty')
precision = market['precision']['price']
if quoteOrderQty is not None:
request['quoteOrderQty'] = self.decimal_to_precision(quoteOrderQty, TRUNCATE, precision, self.precisionMode)
params = self.omit(params, 'quoteOrderQty')
elif price is not None:
request['quoteOrderQty'] = self.decimal_to_precision(amount * price, TRUNCATE, precision, self.precisionMode)
else:
quantityIsRequired = True
else:
quantityIsRequired = True
elif uppercaseType == 'LIMIT':
priceIsRequired = True
timeInForceIsRequired = True
quantityIsRequired = True
elif (uppercaseType == 'STOP_LOSS') or (uppercaseType == 'TAKE_PROFIT'):
stopPriceIsRequired = True
quantityIsRequired = True
if market['linear'] or market['inverse']:
priceIsRequired = True
elif (uppercaseType == 'STOP_LOSS_LIMIT') or (uppercaseType == 'TAKE_PROFIT_LIMIT'):
quantityIsRequired = True
stopPriceIsRequired = True
priceIsRequired = True
timeInForceIsRequired = True
elif uppercaseType == 'LIMIT_MAKER':
priceIsRequired = True
quantityIsRequired = True
elif uppercaseType == 'STOP':
quantityIsRequired = True
stopPriceIsRequired = True
priceIsRequired = True
elif (uppercaseType == 'STOP_MARKET') or (uppercaseType == 'TAKE_PROFIT_MARKET'):
closePosition = self.safe_value(params, 'closePosition')
if closePosition is None:
quantityIsRequired = True
stopPriceIsRequired = True
elif uppercaseType == 'TRAILING_STOP_MARKET':
quantityIsRequired = True
callbackRate = self.safe_number(params, 'callbackRate')
if callbackRate is None:
raise InvalidOrder(self.id + ' createOrder() requires a callbackRate extra param for a ' + type + ' order')
if quantityIsRequired:
request['quantity'] = self.amount_to_precision(symbol, amount)
if priceIsRequired:
if price is None:
raise InvalidOrder(self.id + ' createOrder() requires a price argument for a ' + type + ' order')
request['price'] = self.price_to_precision(symbol, price)
if timeInForceIsRequired:
request['timeInForce'] = self.options['defaultTimeInForce'] # 'GTC' = Good To Cancel(default), 'IOC' = Immediate Or Cancel
if stopPriceIsRequired:
stopPrice = self.safe_number(params, 'stopPrice')
if stopPrice is None:
raise InvalidOrder(self.id + ' createOrder() requires a stopPrice extra param for a ' + type + ' order')
else:
params = self.omit(params, 'stopPrice')
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
response = getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
method = 'privateGetOrder'
if type == 'future':
method = 'fapiPrivateGetOrder'
elif type == 'delivery':
method = 'dapiPrivateGetOrder'
elif type == 'margin':
method = 'sapiGetMarginOrder'
request = {
'symbol': market['id'],
}
clientOrderId = self.safe_value_2(params, 'origClientOrderId', 'clientOrderId')
if clientOrderId is not None:
request['origClientOrderId'] = clientOrderId
else:
request['orderId'] = id
query = self.omit(params, ['type', 'clientOrderId', 'origClientOrderId'])
response = getattr(self, method)(self.extend(request, query))
return self.parse_order(response, market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrders', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
method = 'privateGetAllOrders'
if type == 'future':
method = 'fapiPrivateGetAllOrders'
elif type == 'delivery':
method = 'dapiPrivateGetAllOrders'
elif type == 'margin':
method = 'sapiGetMarginAllOrders'
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "LTCBTC",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "0.0",
# "cummulativeQuoteQty": "0.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "icebergQty": "0.0",
# "time": 1499827319559,
# "updateTime": 1499827319559,
# "isWorking": True
# }
# ]
#
# futures
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "1.0",
# "cumQuote": "10.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "updateTime": 1499827319559
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
query = None
type = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
defaultType = self.safe_string_2(self.options, 'fetchOpenOrders', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
elif self.options['warnOnFetchOpenOrdersWithoutSymbol']:
symbols = self.symbols
numSymbols = len(symbols)
fetchOpenOrdersRateLimit = int(numSymbols / 2)
raise ExchangeError(self.id + ' fetchOpenOrders WARNING: fetching open orders without specifying a symbol is rate-limited to one call per ' + str(fetchOpenOrdersRateLimit) + ' seconds. Do not call self method frequently to avoid ban. Set ' + self.id + '.options["warnOnFetchOpenOrdersWithoutSymbol"] = False to suppress self warning message.')
else:
defaultType = self.safe_string_2(self.options, 'fetchOpenOrders', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'privateGetOpenOrders'
if type == 'future':
method = 'fapiPrivateGetOpenOrders'
elif type == 'delivery':
method = 'dapiPrivateGetOpenOrders'
elif type == 'margin':
method = 'sapiGetMarginOpenOrders'
response = getattr(self, method)(self.extend(request, query))
return self.parse_orders(response, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOpenOrders', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
# https://github.com/ccxt/ccxt/issues/6507
origClientOrderId = self.safe_value_2(params, 'origClientOrderId', 'clientOrderId')
request = {
'symbol': market['id'],
# 'orderId': id,
# 'origClientOrderId': id,
}
if origClientOrderId is None:
request['orderId'] = id
else:
request['origClientOrderId'] = origClientOrderId
method = 'privateDeleteOrder'
if type == 'future':
method = 'fapiPrivateDeleteOrder'
elif type == 'delivery':
method = 'dapiPrivateDeleteOrder'
elif type == 'margin':
method = 'sapiDeleteMarginOrder'
query = self.omit(params, ['type', 'origClientOrderId', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, query))
return self.parse_order(response, market)
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
defaultType = self.safe_string_2(self.options, 'cancelAllOrders', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'privateDeleteOpenOrders'
if type == 'margin':
method = 'sapiDeleteMarginOpenOrders'
elif type == 'future':
method = 'fapiPrivateDeleteAllOpenOrders'
elif type == 'delivery':
method = 'dapiPrivateDeleteAllOpenOrders'
response = getattr(self, method)(self.extend(request, query))
if isinstance(response, list):
return self.parse_orders(response, market)
else:
return response
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchMyTrades', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = None
if type == 'spot':
method = 'privateGetMyTrades'
elif type == 'margin':
method = 'sapiGetMarginMyTrades'
elif type == 'future':
method = 'fapiPrivateGetUserTrades'
elif type == 'delivery':
method = 'dapiPrivateGetUserTrades'
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# spot trade
#
# [
# {
# "symbol": "BNBBTC",
# "id": 28457,
# "orderId": 100234,
# "price": "4.00000100",
# "qty": "12.00000000",
# "commission": "10.10000000",
# "commissionAsset": "BNB",
# "time": 1499865549590,
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True,
# }
# ]
#
# futures trade
#
# [
# {
# "accountId": 20,
# "buyer": False,
# "commission": "-0.07819010",
# "commissionAsset": "USDT",
# "counterPartyId": 653,
# "id": 698759,
# "maker": False,
# "orderId": 25851813,
# "price": "7819.01",
# "qty": "0.002",
# "quoteQty": "0.01563",
# "realizedPnl": "-0.91539999",
# "side": "SELL",
# "symbol": "BTCUSDT",
# "time": 1569514978020
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_my_dust_trades(self, symbol=None, since=None, limit=None, params={}):
#
# Binance provides an opportunity to trade insignificant(i.e. non-tradable and non-withdrawable)
# token leftovers(of any asset) into `BNB` coin which in turn can be used to pay trading fees with it.
# The corresponding trades history is called the `Dust Log` and can be requested via the following end-point:
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#dustlog-user_data
#
self.load_markets()
request = {}
if since is not None:
request['startTime'] = since
request['endTime'] = self.sum(since, 7776000000)
response = self.sapiGetAssetDribblet(self.extend(request, params))
# {
# "total": "4",
# "userAssetDribblets": [
# {
# "operateTime": "1627575731000",
# "totalServiceChargeAmount": "0.00001453",
# "totalTransferedAmount": "0.00072693",
# "transId": "70899815863",
# "userAssetDribbletDetails": [
# {
# "fromAsset": "LTC",
# "amount": "0.000006",
# "transferedAmount": "0.00000267",
# "serviceChargeAmount": "0.00000005",
# "operateTime": "1627575731000",
# "transId": "70899815863"
# },
# {
# "fromAsset": "GBP",
# "amount": "0.15949157",
# "transferedAmount": "0.00072426",
# "serviceChargeAmount": "0.00001448",
# "operateTime": "1627575731000",
# "transId": "70899815863"
# }
# ]
# },
# ]
# }
results = self.safe_value(response, 'userAssetDribblets', [])
rows = self.safe_integer(response, 'total', 0)
data = []
for i in range(0, rows):
logs = self.safe_value(results[i], 'userAssetDribbletDetails', [])
for j in range(0, len(logs)):
logs[j]['isDustTrade'] = True
data.append(logs[j])
trades = self.parse_trades(data, None, since, limit)
return self.filter_by_since_limit(trades, since, limit)
def parse_dust_trade(self, trade, market=None):
#
# {
# "fromAsset": "USDT",
# "amount": "0.009669",
# "transferedAmount": "0.00002992",
# "serviceChargeAmount": "0.00000059",
# "operateTime": "1628076010000",
# "transId": "71416578712",
# "isDustTrade": True
# }
#
orderId = self.safe_string(trade, 'transId')
timestamp = self.safe_integer(trade, 'operateTime')
currencyId = self.safe_string(trade, 'fromAsset')
tradedCurrency = self.safe_currency_code(currencyId)
bnb = self.currency('BNB')
earnedCurrency = bnb['code']
applicantSymbol = earnedCurrency + '/' + tradedCurrency
tradedCurrencyIsQuote = False
if applicantSymbol in self.markets:
tradedCurrencyIsQuote = True
feeCostString = self.safe_string(trade, 'serviceChargeAmount')
fee = {
'currency': earnedCurrency,
'cost': self.parse_number(feeCostString),
}
symbol = None
amountString = None
costString = None
side = None
if tradedCurrencyIsQuote:
symbol = applicantSymbol
amountString = self.safe_string(trade, 'transferedAmount')
costString = self.safe_string(trade, 'amount')
side = 'buy'
else:
symbol = tradedCurrency + '/' + earnedCurrency
amountString = self.safe_string(trade, 'amount')
costString = self.safe_string(trade, 'transferedAmount')
side = 'sell'
priceString = None
if costString is not None:
if amountString:
priceString = Precise.string_div(costString, amountString)
id = None
amount = self.parse_number(amountString)
price = self.parse_number(priceString)
cost = self.parse_number(costString)
type = None
takerOrMaker = None
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'amount': amount,
'price': price,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
response = None
request = {}
legalMoney = self.safe_value(self.options, 'legalMoney', {})
if code in legalMoney:
if code is not None:
currency = self.currency(code)
request['transactionType'] = 0
if since is not None:
request['beginTime'] = since
raw = self.sapiGetFiatOrders(self.extend(request, params))
response = self.safe_value(raw, 'data')
# {
# "code": "000000",
# "message": "success",
# "data": [
# {
# "orderNo": "25ced37075c1470ba8939d0df2316e23",
# "fiatCurrency": "EUR",
# "indicatedAmount": "15.00",
# "amount": "15.00",
# "totalFee": "0.00",
# "method": "card",
# "status": "Failed",
# "createTime": 1627501026000,
# "updateTime": 1627501027000
# }
# ],
# "total": 1,
# "success": True
# }
else:
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['startTime'] = since
# max 3 months range https://github.com/ccxt/ccxt/issues/6495
request['endTime'] = self.sum(since, 7776000000)
if limit is not None:
request['limit'] = limit
response = self.sapiGetCapitalDepositHisrec(self.extend(request, params))
# [
# {
# "amount": "0.01844487",
# "coin": "BCH",
# "network": "BCH",
# "status": 1,
# "address": "1NYxAJhW2281HK1KtJeaENBqHeygA88FzR",
# "addressTag": "",
# "txId": "bafc5902504d6504a00b7d0306a41154cbf1d1b767ab70f3bc226327362588af",
# "insertTime": 1610784980000,
# "transferType": 0,
# "confirmTimes": "2/2"
# },
# {
# "amount": "4500",
# "coin": "USDT",
# "network": "BSC",
# "status": 1,
# "address": "0xc9c923c87347ca0f3451d6d308ce84f691b9f501",
# "addressTag": "",
# "txId": "Internal transfer 51376627901",
# "insertTime": 1618394381000,
# "transferType": 1,
# "confirmTimes": "1/15"
# }
# ]
return self.parse_transactions(response, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
legalMoney = self.safe_value(self.options, 'legalMoney', {})
request = {}
response = None
currency = None
if code in legalMoney:
if code is not None:
currency = self.currency(code)
request['transactionType'] = 1
if since is not None:
request['beginTime'] = since
raw = self.sapiGetFiatOrders(self.extend(request, params))
response = self.safe_value(raw, 'data')
# {
# "code": "000000",
# "message": "success",
# "data": [
# {
# "orderNo": "CJW706452266115170304",
# "fiatCurrency": "GBP",
# "indicatedAmount": "10001.50",
# "amount": "100.00",
# "totalFee": "1.50",
# "method": "bank transfer",
# "status": "Successful",
# "createTime": 1620037745000,
# "updateTime": 1620038480000
# },
# {
# "orderNo": "CJW706287492781891584",
# "fiatCurrency": "GBP",
# "indicatedAmount": "10001.50",
# "amount": "100.00",
# "totalFee": "1.50",
# "method": "bank transfer",
# "status": "Successful",
# "createTime": 1619998460000,
# "updateTime": 1619998823000
# }
# ],
# "total": 39,
# "success": True
# }
else:
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['startTime'] = since
# max 3 months range https://github.com/ccxt/ccxt/issues/6495
request['endTime'] = self.sum(since, 7776000000)
if limit is not None:
request['limit'] = limit
response = self.sapiGetCapitalWithdrawHistory(self.extend(request, params))
# [
# {
# "id": "69e53ad305124b96b43668ceab158a18",
# "amount": "28.75",
# "transactionFee": "0.25",
# "coin": "XRP",
# "status": 6,
# "address": "r3T75fuLjX51mmfb5Sk1kMNuhBgBPJsjza",
# "addressTag": "101286922",
# "txId": "19A5B24ED0B697E4F0E9CD09FCB007170A605BC93C9280B9E6379C5E6EF0F65A",
# "applyTime": "2021-04-15 12:09:16",
# "network": "XRP",
# "transferType": 0
# },
# {
# "id": "9a67628b16ba4988ae20d329333f16bc",
# "amount": "20",
# "transactionFee": "20",
# "coin": "USDT",
# "status": 6,
# "address": "0x0AB991497116f7F5532a4c2f4f7B1784488628e1",
# "txId": "0x77fbf2cf2c85b552f0fd31fd2e56dc95c08adae031d96f3717d8b17e1aea3e46",
# "applyTime": "2021-04-15 12:06:53",
# "network": "ETH",
# "transferType": 0
# },
# {
# "id": "a7cdc0afbfa44a48bd225c9ece958fe2",
# "amount": "51",
# "transactionFee": "1",
# "coin": "USDT",
# "status": 6,
# "address": "TYDmtuWL8bsyjvcauUTerpfYyVhFtBjqyo",
# "txId": "168a75112bce6ceb4823c66726ad47620ad332e69fe92d9cb8ceb76023f9a028",
# "applyTime": "2021-04-13 12:46:59",
# "network": "TRX",
# "transferType": 0
# }
# ]
return self.parse_transactions(response, currency, since, limit)
def parse_transaction_status_by_type(self, status, type=None):
statusesByType = {
'deposit': {
'0': 'pending',
'1': 'ok',
# Fiat
# Processing, Failed, Successful, Finished, Refunding, Refunded, Refund Failed, Order Partial credit Stopped
'Processing': 'pending',
'Failed': 'failed',
'Successful': 'ok',
'Refunding': 'canceled',
'Refunded': 'canceled',
'Refund Failed': 'failed',
},
'withdrawal': {
'0': 'pending', # Email Sent
'1': 'canceled', # Cancelled(different from 1 = ok in deposits)
'2': 'pending', # Awaiting Approval
'3': 'failed', # Rejected
'4': 'pending', # Processing
'5': 'failed', # Failure
'6': 'ok', # Completed
# Fiat
# Processing, Failed, Successful, Finished, Refunding, Refunded, Refund Failed, Order Partial credit Stopped
'Processing': 'pending',
'Failed': 'failed',
'Successful': 'ok',
'Refunding': 'canceled',
'Refunded': 'canceled',
'Refund Failed': 'failed',
},
}
statuses = self.safe_value(statusesByType, type, {})
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "amount": "4500",
# "coin": "USDT",
# "network": "BSC",
# "status": 1,
# "address": "0xc9c923c87347ca0f3451d6d308ce84f691b9f501",
# "addressTag": "",
# "txId": "Internal transfer 51376627901",
# "insertTime": 1618394381000,
# "transferType": 1,
# "confirmTimes": "1/15"
# }
#
# fetchWithdrawals
#
# {
# "id": "69e53ad305124b96b43668ceab158a18",
# "amount": "28.75",
# "transactionFee": "0.25",
# "coin": "XRP",
# "status": 6,
# "address": "r3T75fuLjX51mmfb5Sk1kMNuhBgBPJsjza",
# "addressTag": "101286922",
# "txId": "19A5B24ED0B697E4F0E9CD09FCB007170A605BC93C9280B9E6379C5E6EF0F65A",
# "applyTime": "2021-04-15 12:09:16",
# "network": "XRP",
# "transferType": 0
# }
#
# fiat transaction
# withdraw
# {
# "orderNo": "CJW684897551397171200",
# "fiatCurrency": "GBP",
# "indicatedAmount": "29.99",
# "amount": "28.49",
# "totalFee": "1.50",
# "method": "bank transfer",
# "status": "Successful",
# "createTime": 1614898701000,
# "updateTime": 1614898820000
# }
#
# deposit
# {
# "orderNo": "25ced37075c1470ba8939d0df2316e23",
# "fiatCurrency": "EUR",
# "indicatedAmount": "15.00",
# "amount": "15.00",
# "totalFee": "0.00",
# "method": "card",
# "status": "Failed",
# "createTime": "1627501026000",
# "updateTime": "1627501027000"
# }
#
id = self.safe_string_2(transaction, 'id', 'orderNo')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'addressTag') # set but unused
if tag is not None:
if len(tag) < 1:
tag = None
txid = self.safe_string(transaction, 'txId')
if (txid is not None) and (txid.find('Internal transfer ') >= 0):
txid = txid[18:]
currencyId = self.safe_string_2(transaction, 'coin', 'fiatCurrency')
code = self.safe_currency_code(currencyId, currency)
timestamp = None
insertTime = self.safe_integer_2(transaction, 'insertTime', 'createTime')
applyTime = self.parse8601(self.safe_string(transaction, 'applyTime'))
type = self.safe_string(transaction, 'type')
if type is None:
if (insertTime is not None) and (applyTime is None):
type = 'deposit'
timestamp = insertTime
elif (insertTime is None) and (applyTime is not None):
type = 'withdrawal'
timestamp = applyTime
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'status'), type)
amount = self.safe_number(transaction, 'amount')
feeCost = self.safe_number_2(transaction, 'transactionFee', 'totalFee')
fee = None
if feeCost is not None:
fee = {'currency': code, 'cost': feeCost}
updated = self.safe_integer_2(transaction, 'successTime', 'updateTime')
internal = self.safe_integer(transaction, 'transferType', False)
internal = True if internal else False
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': tag,
'tagTo': tag,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'internal': internal,
'fee': fee,
}
def parse_transfer_status(self, status):
statuses = {
'CONFIRMED': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transfer(self, transfer, currency=None):
#
# transfer
#
# {
# "tranId":13526853623
# }
#
# fetchTransfers
#
# {
# timestamp: 1614640878000,
# asset: 'USDT',
# amount: '25',
# type: 'MAIN_UMFUTURE',
# status: 'CONFIRMED',
# tranId: 43000126248
# }
#
id = self.safe_string(transfer, 'tranId')
currencyId = self.safe_string(transfer, 'asset')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(transfer, 'amount')
type = self.safe_string(transfer, 'type')
fromAccount = None
toAccount = None
typesByAccount = self.safe_value(self.options, 'typesByAccount', {})
if type is not None:
parts = type.split('_')
fromAccount = self.safe_value(parts, 0)
toAccount = self.safe_value(parts, 1)
fromAccount = self.safe_string(typesByAccount, fromAccount, fromAccount)
toAccount = self.safe_string(typesByAccount, toAccount, toAccount)
timestamp = self.safe_integer(transfer, 'timestamp')
status = self.parse_transfer_status(self.safe_string(transfer, 'status'))
return {
'info': transfer,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': code,
'amount': amount,
'fromAccount': fromAccount,
'toAccount': toAccount,
'status': status,
}
def | (self, income, market=None):
#
# {
# "symbol": "ETHUSDT",
# "incomeType": "FUNDING_FEE",
# "income": "0.00134317",
# "asset": "USDT",
# "time": "1621584000000",
# "info": "FUNDING_FEE",
# "tranId": "4480321991774044580",
# "tradeId": ""
# }
#
marketId = self.safe_string(income, 'symbol')
symbol = self.safe_symbol(marketId, market)
amount = self.safe_number(income, 'income')
currencyId = self.safe_string(income, 'asset')
code = self.safe_currency_code(currencyId)
id = self.safe_string(income, 'tranId')
timestamp = self.safe_integer(income, 'time')
return {
'info': income,
'symbol': symbol,
'code': code,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': id,
'amount': amount,
}
def parse_incomes(self, incomes, market=None, since=None, limit=None):
result = []
for i in range(0, len(incomes)):
entry = incomes[i]
parsed = self.parse_income(entry, market)
result.append(parsed)
sorted = self.sort_by(result, 'timestamp')
return self.filter_by_since_limit(sorted, since, limit)
def transfer(self, code, amount, fromAccount, toAccount, params={}):
self.load_markets()
currency = self.currency(code)
type = self.safe_string(params, 'type')
if type is None:
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromAccount = fromAccount.lower()
toAccount = toAccount.lower()
fromId = self.safe_string(accountsByType, fromAccount)
toId = self.safe_string(accountsByType, toAccount)
if fromId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))
type = fromId + '_' + toId
request = {
'asset': currency['id'],
'amount': self.currency_to_precision(code, amount),
'type': type,
}
response = self.sapiPostAssetTransfer(self.extend(request, params))
#
# {
# "tranId":13526853623
# }
#
transfer = self.parse_transfer(response, currency)
return self.extend(transfer, {
'amount': amount,
'currency': code,
'fromAccount': fromAccount,
'toAccount': toAccount,
})
def fetch_transfers(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
defaultType = self.safe_string_2(self.options, 'fetchTransfers', 'defaultType', 'spot')
fromAccount = self.safe_string(params, 'fromAccount', defaultType)
defaultTo = 'spot' if (fromAccount == 'future') else 'future'
toAccount = self.safe_string(params, 'toAccount', defaultTo)
type = self.safe_string(params, 'type')
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount)
toId = self.safe_string(accountsByType, toAccount)
if type is None:
if fromId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' fromAccount parameter must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' toAccount parameter must be one of ' + ', '.join(keys))
type = fromId + '_' + toId
request = {
'type': type,
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['size'] = limit
response = self.sapiGetAssetTransfer(self.extend(request, params))
#
# {
# total: 3,
# rows: [
# {
# timestamp: 1614640878000,
# asset: 'USDT',
# amount: '25',
# type: 'MAIN_UMFUTURE',
# status: 'CONFIRMED',
# tranId: 43000126248
# },
# ]
# }
#
rows = self.safe_value(response, 'rows', [])
return self.parse_transfers(rows, currency, since, limit)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
# 'network': 'ETH', # 'BSC', 'XMR', you can get network and isDefault in networkList in the response of sapiGetCapitalConfigDetail
}
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string(networks, network, network) # handle ERC20>ETH alias
if network is not None:
request['network'] = network
params = self.omit(params, 'network')
# has support for the 'network' parameter
# https://binance-docs.github.io/apidocs/spot/en/#deposit-address-supporting-network-user_data
response = self.sapiGetCapitalDepositAddress(self.extend(request, params))
#
# {
# currency: 'XRP',
# address: 'rEb8TK3gBgk5auZkwc6sHnwrGVJH8DuaLh',
# tag: '108618262',
# info: {
# coin: 'XRP',
# address: 'rEb8TK3gBgk5auZkwc6sHnwrGVJH8DuaLh',
# tag: '108618262',
# url: 'https://bithomp.com/explorer/rEb8TK3gBgk5auZkwc6sHnwrGVJH8DuaLh'
# }
# }
#
address = self.safe_string(response, 'address')
url = self.safe_string(response, 'url')
impliedNetwork = None
if url is not None:
reverseNetworks = self.safe_value(self.options, 'reverseNetworks', {})
parts = url.split('/')
topLevel = self.safe_string(parts, 2)
if (topLevel == 'blockchair.com') or (topLevel == 'viewblock.io'):
subLevel = self.safe_string(parts, 3)
if subLevel is not None:
topLevel = topLevel + '/' + subLevel
impliedNetwork = self.safe_string(reverseNetworks, topLevel)
impliedNetworks = self.safe_value(self.options, 'impliedNetworks', {
'ETH': {'ERC20': 'ETH'},
'TRX': {'TRC20': 'TRX'},
})
if code in impliedNetworks:
conversion = self.safe_value(impliedNetworks, code, {})
impliedNetwork = self.safe_string(conversion, impliedNetwork, impliedNetwork)
tag = self.safe_string(response, 'tag', '')
if len(tag) == 0:
tag = None
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': impliedNetwork,
'info': response,
}
def fetch_funding_fees(self, codes=None, params={}):
self.load_markets()
response = self.sapiGetCapitalConfigGetall(params)
#
# [
# {
# coin: 'BAT',
# depositAllEnable: True,
# withdrawAllEnable: True,
# name: 'Basic Attention Token',
# free: '0',
# locked: '0',
# freeze: '0',
# withdrawing: '0',
# ipoing: '0',
# ipoable: '0',
# storage: '0',
# isLegalMoney: False,
# trading: True,
# networkList: [
# {
# network: 'BNB',
# coin: 'BAT',
# withdrawIntegerMultiple: '0.00000001',
# isDefault: False,
# depositEnable: True,
# withdrawEnable: True,
# depositDesc: '',
# withdrawDesc: '',
# specialTips: 'The name of self asset is Basic Attention Token(BAT). Both a MEMO and an Address are required to successfully deposit your BEP2 tokens to Binance.',
# name: 'BEP2',
# resetAddressStatus: False,
# addressRegex: '^(bnb1)[0-9a-z]{38}$',
# memoRegex: '^[0-9A-Za-z\\-_]{1,120}$',
# withdrawFee: '0.27',
# withdrawMin: '0.54',
# withdrawMax: '10000000000',
# minConfirm: '1',
# unLockConfirm: '0'
# },
# {
# network: 'BSC',
# coin: 'BAT',
# withdrawIntegerMultiple: '0.00000001',
# isDefault: False,
# depositEnable: True,
# withdrawEnable: True,
# depositDesc: '',
# withdrawDesc: '',
# specialTips: 'The name of self asset is Basic Attention Token. Please ensure you are depositing Basic Attention Token(BAT) tokens under the contract address ending in 9766e.',
# name: 'BEP20(BSC)',
# resetAddressStatus: False,
# addressRegex: '^(0x)[0-9A-Fa-f]{40}$',
# memoRegex: '',
# withdrawFee: '0.27',
# withdrawMin: '0.54',
# withdrawMax: '10000000000',
# minConfirm: '15',
# unLockConfirm: '0'
# },
# {
# network: 'ETH',
# coin: 'BAT',
# withdrawIntegerMultiple: '0.00000001',
# isDefault: True,
# depositEnable: True,
# withdrawEnable: True,
# depositDesc: '',
# withdrawDesc: '',
# specialTips: 'The name of self asset is Basic Attention Token. Please ensure you are depositing Basic Attention Token(BAT) tokens under the contract address ending in 887ef.',
# name: 'ERC20',
# resetAddressStatus: False,
# addressRegex: '^(0x)[0-9A-Fa-f]{40}$',
# memoRegex: '',
# withdrawFee: '27',
# withdrawMin: '54',
# withdrawMax: '10000000000',
# minConfirm: '12',
# unLockConfirm: '0'
# }
# ]
# }
# ]
#
withdrawFees = {}
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'coin')
code = self.safe_currency_code(currencyId)
networkList = self.safe_value(entry, 'networkList')
withdrawFees[code] = {}
for j in range(0, len(networkList)):
networkEntry = networkList[j]
networkId = self.safe_string(networkEntry, 'network')
networkCode = self.safe_currency_code(networkId)
fee = self.safe_number(networkEntry, 'withdrawFee')
withdrawFees[code][networkCode] = fee
return {
'withdraw': withdrawFees,
'deposit': {},
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'address': address,
'amount': amount,
# https://binance-docs.github.io/apidocs/spot/en/#withdraw-sapi
# issue sapiGetCapitalConfigGetall() to get networks for withdrawing USDT ERC20 vs USDT Omni
# 'network': 'ETH', # 'BTC', 'TRX', etc, optional
}
if tag is not None:
request['addressTag'] = tag
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string(networks, network, network) # handle ERC20>ETH alias
if network is not None:
request['network'] = network
params = self.omit(params, 'network')
response = self.sapiPostCapitalWithdrawApply(self.extend(request, params))
# {id: '9a67628b16ba4988ae20d329333f16bc'}
return {
'info': response,
'id': self.safe_string(response, 'id'),
}
def parse_trading_fee(self, fee, market=None):
#
# {
# "symbol": "ADABNB",
# "makerCommission": 0.001,
# "takerCommission": 0.001
# }
#
marketId = self.safe_string(fee, 'symbol')
symbol = self.safe_symbol(marketId)
return {
'info': fee,
'symbol': symbol,
'maker': self.safe_number(fee, 'makerCommission'),
'taker': self.safe_number(fee, 'takerCommission'),
}
def fetch_trading_fee(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.sapiGetAssetTradeFee(self.extend(request, params))
#
# [
# {
# "symbol": "BTCUSDT",
# "makerCommission": "0.001",
# "takerCommission": "0.001"
# }
# ]
#
first = self.safe_value(response, 0, {})
return self.parse_trading_fee(first)
def fetch_trading_fees(self, params={}):
self.load_markets()
method = None
defaultType = self.safe_string_2(self.options, 'fetchFundingRates', 'defaultType', 'future')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
if (type == 'spot') or (type == 'margin'):
method = 'sapiGetAssetTradeFee'
elif type == 'future':
method = 'fapiPrivateGetAccount'
elif type == 'delivery':
method = 'dapiPrivateGetAccount'
response = getattr(self, method)(query)
#
# sapi / spot
#
# [
# {
# "symbol": "ZRXBNB",
# "makerCommission": "0.001",
# "takerCommission": "0.001"
# },
# {
# "symbol": "ZRXBTC",
# "makerCommission": "0.001",
# "takerCommission": "0.001"
# },
# ]
#
# fapi / future / linear
#
# {
# "feeTier": 0, # account commisssion tier
# "canTrade": True, # if can trade
# "canDeposit": True, # if can transfer in asset
# "canWithdraw": True, # if can transfer out asset
# "updateTime": 0,
# "totalInitialMargin": "0.00000000", # total initial margin required with current mark price(useless with isolated positions), only for USDT asset
# "totalMaintMargin": "0.00000000", # total maintenance margin required, only for USDT asset
# "totalWalletBalance": "23.72469206", # total wallet balance, only for USDT asset
# "totalUnrealizedProfit": "0.00000000", # total unrealized profit, only for USDT asset
# "totalMarginBalance": "23.72469206", # total margin balance, only for USDT asset
# "totalPositionInitialMargin": "0.00000000", # initial margin required for positions with current mark price, only for USDT asset
# "totalOpenOrderInitialMargin": "0.00000000", # initial margin required for open orders with current mark price, only for USDT asset
# "totalCrossWalletBalance": "23.72469206", # crossed wallet balance, only for USDT asset
# "totalCrossUnPnl": "0.00000000", # unrealized profit of crossed positions, only for USDT asset
# "availableBalance": "23.72469206", # available balance, only for USDT asset
# "maxWithdrawAmount": "23.72469206" # maximum amount for transfer out, only for USDT asset
# ...
# }
#
# dapi / delivery / inverse
#
# {
# "canDeposit": True,
# "canTrade": True,
# "canWithdraw": True,
# "feeTier": 2,
# "updateTime": 0
# }
#
if (type == 'spot') or (type == 'margin'):
#
# [
# {
# "symbol": "ZRXBNB",
# "makerCommission": "0.001",
# "takerCommission": "0.001"
# },
# {
# "symbol": "ZRXBTC",
# "makerCommission": "0.001",
# "takerCommission": "0.001"
# },
# ]
#
result = {}
for i in range(0, len(response)):
fee = self.parse_trading_fee(response[i])
symbol = fee['symbol']
result[symbol] = fee
return result
elif type == 'future':
#
# {
# "feeTier": 0, # account commisssion tier
# "canTrade": True, # if can trade
# "canDeposit": True, # if can transfer in asset
# "canWithdraw": True, # if can transfer out asset
# "updateTime": 0,
# "totalInitialMargin": "0.00000000", # total initial margin required with current mark price(useless with isolated positions), only for USDT asset
# "totalMaintMargin": "0.00000000", # total maintenance margin required, only for USDT asset
# "totalWalletBalance": "23.72469206", # total wallet balance, only for USDT asset
# "totalUnrealizedProfit": "0.00000000", # total unrealized profit, only for USDT asset
# "totalMarginBalance": "23.72469206", # total margin balance, only for USDT asset
# "totalPositionInitialMargin": "0.00000000", # initial margin required for positions with current mark price, only for USDT asset
# "totalOpenOrderInitialMargin": "0.00000000", # initial margin required for open orders with current mark price, only for USDT asset
# "totalCrossWalletBalance": "23.72469206", # crossed wallet balance, only for USDT asset
# "totalCrossUnPnl": "0.00000000", # unrealized profit of crossed positions, only for USDT asset
# "availableBalance": "23.72469206", # available balance, only for USDT asset
# "maxWithdrawAmount": "23.72469206" # maximum amount for transfer out, only for USDT asset
# ...
# }
#
symbols = list(self.markets.keys())
result = {}
feeTier = self.safe_integer(response, 'feeTier')
feeTiers = self.fees[type]['trading']['tiers']
maker = feeTiers['maker'][feeTier][1]
taker = feeTiers['taker'][feeTier][1]
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = {
'info': {
'feeTier': feeTier,
},
'symbol': symbol,
'maker': maker,
'taker': taker,
}
return result
elif type == 'delivery':
#
# {
# "canDeposit": True,
# "canTrade": True,
# "canWithdraw": True,
# "feeTier": 2,
# "updateTime": 0
# }
#
symbols = list(self.markets.keys())
result = {}
feeTier = self.safe_integer(response, 'feeTier')
feeTiers = self.fees[type]['trading']['tiers']
maker = feeTiers['maker'][feeTier][1]
taker = feeTiers['taker'][feeTier][1]
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = {
'info': {
'feeTier': feeTier,
},
'symbol': symbol,
'maker': maker,
'taker': taker,
}
return result
def futures_transfer(self, code, amount, type, params={}):
if (type < 1) or (type > 4):
raise ArgumentsRequired(self.id + ' type must be between 1 and 4')
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
'amount': amount,
'type': type,
}
response = self.sapiPostFuturesTransfer(self.extend(request, params))
#
# {
# "tranId": 100000001
# }
#
return self.parse_transfer(response, currency)
def fetch_funding_rate(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = None
if market['linear']:
method = 'fapiPublicGetPremiumIndex'
elif market['inverse']:
method = 'dapiPublicGetPremiumIndex'
else:
raise NotSupported(self.id + ' fetchFundingRate() supports linear and inverse contracts only')
response = getattr(self, method)(self.extend(request, params))
if market['inverse']:
response = response[0]
#
# {
# "symbol": "BTCUSDT",
# "markPrice": "45802.81129892",
# "indexPrice": "45745.47701915",
# "estimatedSettlePrice": "45133.91753671",
# "lastFundingRate": "0.00063521",
# "interestRate": "0.00010000",
# "nextFundingTime": "1621267200000",
# "time": "1621252344001"
# }
#
return self.parse_funding_rate(response, market)
def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
#
# Gets a history of funding rates with their timestamps
# (param) symbol: Future currency pair(e.g. "BTC/USDT")
# (param) limit: maximum number of data points returned
# (param) since: Unix timestamp in miliseconds for the time of the earliest requested funding rate
# (param) params: Object containing more params for the request
# - until: Unix timestamp in miliseconds for the time of the earliest requested funding rate
# return: [{symbol, fundingRate, timestamp}]
#
self.load_markets()
request = {}
method = None
defaultType = self.safe_string_2(self.options, 'fetchFundingRateHistory', 'defaultType', 'future')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
if type == 'future':
method = 'fapiPublicGetFundingRate'
elif type == 'delivery':
method = 'dapiPublicGetFundingRate'
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if market['linear']:
method = 'fapiPublicGetFundingRate'
elif market['inverse']:
method = 'dapiPublicGetFundingRate'
if method is None:
raise NotSupported(self.id + ' fetchFundingRateHistory() not supported for ' + type + ' markets')
if since is not None:
request['startTime'] = since
till = self.safe_integer(params, 'till') # unified in milliseconds
endTime = self.safe_string(params, 'endTime', till) # exchange-specific in milliseconds
params = self.omit(params, ['endTime', 'till'])
if endTime is not None:
request['endTime'] = endTime
if limit is not None:
request['limit'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# {
# "symbol": "BTCUSDT",
# "fundingRate": "0.00063521",
# "fundingTime": "1621267200000",
# }
#
rates = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_integer(entry, 'fundingTime')
rates.append({
'info': entry,
'symbol': self.safe_symbol(self.safe_string(entry, 'symbol')),
'fundingRate': self.safe_number(entry, 'fundingRate'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
def fetch_funding_rates(self, symbols=None, params={}):
self.load_markets()
method = None
defaultType = self.safe_string_2(self.options, 'fetchFundingRates', 'defaultType', 'future')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
if type == 'future':
method = 'fapiPublicGetPremiumIndex'
elif type == 'delivery':
method = 'dapiPublicGetPremiumIndex'
else:
raise NotSupported(self.id + ' fetchFundingRates() supports linear and inverse contracts only')
response = getattr(self, method)(query)
result = []
for i in range(0, len(response)):
entry = response[i]
parsed = self.parse_funding_rate(entry)
result.append(parsed)
return self.filter_by_array(result, 'symbol', symbols)
def parse_funding_rate(self, premiumIndex, market=None):
# ensure it matches with https://www.binance.com/en/futures/funding-history/0
#
# {
# "symbol": "BTCUSDT",
# "markPrice": "45802.81129892",
# "indexPrice": "45745.47701915",
# "estimatedSettlePrice": "45133.91753671",
# "lastFundingRate": "0.00063521",
# "interestRate": "0.00010000",
# "nextFundingTime": "1621267200000",
# "time": "1621252344001"
# }
#
timestamp = self.safe_integer(premiumIndex, 'time')
marketId = self.safe_string(premiumIndex, 'symbol')
symbol = self.safe_symbol(marketId, market)
markPrice = self.safe_number(premiumIndex, 'markPrice')
indexPrice = self.safe_number(premiumIndex, 'indexPrice')
interestRate = self.safe_number(premiumIndex, 'interestRate')
estimatedSettlePrice = self.safe_number(premiumIndex, 'estimatedSettlePrice')
nextFundingRate = self.safe_number(premiumIndex, 'lastFundingRate')
nextFundingTime = self.safe_integer(premiumIndex, 'nextFundingTime')
previousFundingTime = nextFundingTime - (8 * 3600000)
return {
'info': premiumIndex,
'symbol': symbol,
'markPrice': markPrice,
'indexPrice': indexPrice,
'interestRate': interestRate,
'estimatedSettlePrice': estimatedSettlePrice,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'previousFundingRate': None,
'nextFundingRate': nextFundingRate,
'previousFundingTimestamp': previousFundingTime, # subtract 8 hours
'nextFundingTimestamp': nextFundingTime,
'previousFundingDatetime': self.iso8601(previousFundingTime),
'nextFundingDatetime': self.iso8601(nextFundingTime),
}
def parse_account_positions(self, account):
positions = self.safe_value(account, 'positions')
assets = self.safe_value(account, 'assets')
balances = {}
for i in range(0, len(assets)):
entry = assets[i]
currencyId = self.safe_string(entry, 'asset')
code = self.safe_currency_code(currencyId)
crossWalletBalance = self.safe_string(entry, 'crossWalletBalance')
crossUnPnl = self.safe_string(entry, 'crossUnPnl')
balances[code] = {
'crossMargin': Precise.string_add(crossWalletBalance, crossUnPnl),
'crossWalletBalance': crossWalletBalance,
}
result = []
for i in range(0, len(positions)):
position = positions[i]
marketId = self.safe_string(position, 'symbol')
market = self.safe_market(marketId)
code = market['quote'] if (self.options['defaultType'] == 'future') else market['base']
# sometimes not all the codes are correctly returned...
if code in balances:
parsed = self.parse_account_position(self.extend(position, {
'crossMargin': balances[code]['crossMargin'],
'crossWalletBalance': balances[code]['crossWalletBalance'],
}), market)
result.append(parsed)
return result
def parse_account_position(self, position, market=None):
#
# usdm
# {
# "symbol": "BTCBUSD",
# "initialMargin": "0",
# "maintMargin": "0",
# "unrealizedProfit": "0.00000000",
# "positionInitialMargin": "0",
# "openOrderInitialMargin": "0",
# "leverage": "20",
# "isolated": False,
# "entryPrice": "0.0000",
# "maxNotional": "100000",
# "positionSide": "BOTH",
# "positionAmt": "0.000",
# "notional": "0",
# "isolatedWallet": "0",
# "updateTime": "0",
# "crossMargin": "100.93634809",
# }
#
# coinm
# {
# "symbol": "BTCUSD_210625",
# "initialMargin": "0.00024393",
# "maintMargin": "0.00002439",
# "unrealizedProfit": "-0.00000163",
# "positionInitialMargin": "0.00024393",
# "openOrderInitialMargin": "0",
# "leverage": "10",
# "isolated": False,
# "positionSide": "BOTH",
# "entryPrice": "41021.20000069",
# "maxQty": "100",
# "notionalValue": "0.00243939",
# "isolatedWallet": "0",
# "crossMargin": "0.314"
# "crossWalletBalance": "34",
# }
#
marketId = self.safe_string(position, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
leverageString = self.safe_string(position, 'leverage')
leverage = int(leverageString)
initialMarginString = self.safe_string(position, 'initialMargin')
initialMargin = self.parse_number(initialMarginString)
initialMarginPercentageString = Precise.string_div('1', leverageString, 8)
rational = (1000 % leverage) == 0
if not rational:
initialMarginPercentageString = Precise.string_div(Precise.string_add(initialMarginPercentageString, '1e-8'), '1', 8)
usdm = ('notional' in position)
maintenanceMarginString = self.safe_string(position, 'maintMargin')
maintenanceMargin = self.parse_number(maintenanceMarginString)
entryPriceString = self.safe_string(position, 'entryPrice')
entryPrice = self.parse_number(entryPriceString)
notionalString = self.safe_string_2(position, 'notional', 'notionalValue')
notionalStringAbs = Precise.string_abs(notionalString)
notionalFloat = float(notionalString)
notionalFloatAbs = float(notionalStringAbs)
notional = self.parse_number(Precise.string_abs(notionalString))
contractsString = self.safe_string(position, 'positionAmt')
contractsStringAbs = Precise.string_abs(contractsString)
if contractsString is None:
entryNotional = Precise.string_mul(Precise.string_mul(leverageString, initialMarginString), entryPriceString)
contractsString = Precise.string_div(entryNotional, market['contractSize'])
contractsStringAbs = Precise.string_div(Precise.string_add(contractsString, '0.5'), '1', 0)
contracts = self.parse_number(contractsStringAbs)
leverageBrackets = self.safe_value(self.options, 'leverageBrackets', {})
leverageBracket = self.safe_value(leverageBrackets, symbol, [])
maintenanceMarginPercentageString = None
for i in range(0, len(leverageBracket)):
bracket = leverageBracket[i]
if notionalFloatAbs < bracket[0]:
break
maintenanceMarginPercentageString = bracket[1]
maintenanceMarginPercentage = self.parse_number(maintenanceMarginPercentageString)
unrealizedPnlString = self.safe_string(position, 'unrealizedProfit')
unrealizedPnl = self.parse_number(unrealizedPnlString)
timestamp = self.safe_integer(position, 'updateTime')
if timestamp == 0:
timestamp = None
isolated = self.safe_value(position, 'isolated')
marginType = None
collateralString = None
walletBalance = None
if isolated:
marginType = 'isolated'
walletBalance = self.safe_string(position, 'isolatedWallet')
collateralString = Precise.string_add(walletBalance, unrealizedPnlString)
else:
marginType = 'cross'
walletBalance = self.safe_string(position, 'crossWalletBalance')
collateralString = self.safe_string(position, 'crossMargin')
collateral = self.parse_number(collateralString)
marginRatio = None
side = None
percentage = None
liquidationPriceStringRaw = None
liquidationPrice = None
if notionalFloat == 0.0:
entryPrice = None
else:
side = 'short' if (notionalFloat < 0) else 'long'
marginRatio = self.parse_number(Precise.string_div(Precise.string_add(Precise.string_div(maintenanceMarginString, collateralString), '5e-5'), '1', 4))
percentage = self.parse_number(Precise.string_mul(Precise.string_div(unrealizedPnlString, initialMarginString, 4), '100'))
if usdm:
# calculate liquidation price
#
# liquidationPrice = (walletBalance / (contracts * (±1 + mmp))) + (±entryPrice / (±1 + mmp))
#
# mmp = maintenanceMarginPercentage
# where ± is negative for long and positive for short
# TODO: calculate liquidation price for coinm contracts
onePlusMaintenanceMarginPercentageString = None
entryPriceSignString = entryPriceString
if side == 'short':
onePlusMaintenanceMarginPercentageString = Precise.string_add('1', maintenanceMarginPercentageString)
else:
onePlusMaintenanceMarginPercentageString = Precise.string_add('-1', maintenanceMarginPercentageString)
entryPriceSignString = Precise.string_mul('-1', entryPriceSignString)
leftSide = Precise.string_div(walletBalance, Precise.string_mul(contractsStringAbs, onePlusMaintenanceMarginPercentageString))
rightSide = Precise.string_div(entryPriceSignString, onePlusMaintenanceMarginPercentageString)
liquidationPriceStringRaw = Precise.string_add(leftSide, rightSide)
else:
# calculate liquidation price
#
# liquidationPrice = (contracts * contractSize(±1 - mmp)) / (±1/entryPrice * contracts * contractSize - walletBalance)
#
onePlusMaintenanceMarginPercentageString = None
entryPriceSignString = entryPriceString
if side == 'short':
onePlusMaintenanceMarginPercentageString = Precise.string_sub('1', maintenanceMarginPercentageString)
else:
onePlusMaintenanceMarginPercentageString = Precise.string_sub('-1', maintenanceMarginPercentageString)
entryPriceSignString = Precise.string_mul('-1', entryPriceSignString)
size = Precise.string_mul(contractsStringAbs, market['contractSize'])
leftSide = Precise.string_mul(size, onePlusMaintenanceMarginPercentageString)
rightSide = Precise.string_sub(Precise.string_mul(Precise.string_div('1', entryPriceSignString), size), walletBalance)
liquidationPriceStringRaw = Precise.string_div(leftSide, rightSide)
pricePrecision = market['precision']['price']
pricePrecisionPlusOne = pricePrecision + 1
pricePrecisionPlusOneString = str(pricePrecisionPlusOne)
# round half up
rounder = Precise('5e-' + pricePrecisionPlusOneString)
rounderString = str(rounder)
liquidationPriceRoundedString = Precise.string_add(rounderString, liquidationPriceStringRaw)
truncatedLiquidationPrice = Precise.string_div(liquidationPriceRoundedString, '1', pricePrecision)
if truncatedLiquidationPrice[0] == '-':
# user cannot be liquidated
# since he has more collateral than the size of the position
truncatedLiquidationPrice = None
liquidationPrice = self.parse_number(truncatedLiquidationPrice)
positionSide = self.safe_string(position, 'positionSide')
hedged = positionSide != 'BOTH'
return {
'info': position,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'initialMargin': initialMargin,
'initialMarginPercentage': self.parse_number(initialMarginPercentageString),
'maintenanceMargin': maintenanceMargin,
'maintenanceMarginPercentage': maintenanceMarginPercentage,
'entryPrice': entryPrice,
'notional': notional,
'leverage': self.parse_number(leverageString),
'unrealizedPnl': unrealizedPnl,
'contracts': contracts,
'contractSize': self.parse_number(market['contractSize']),
'marginRatio': marginRatio,
'liquidationPrice': liquidationPrice,
'markPrice': None,
'collateral': collateral,
'marginType': marginType,
'side': side,
'hedged': hedged,
'percentage': percentage,
}
def parse_position_risk(self, position, market=None):
#
# usdm
# {
# "symbol": "BTCUSDT",
# "positionAmt": "0.001",
# "entryPrice": "43578.07000",
# "markPrice": "43532.30000000",
# "unRealizedProfit": "-0.04577000",
# "liquidationPrice": "21841.24993976",
# "leverage": "2",
# "maxNotionalValue": "300000000",
# "marginType": "isolated",
# "isolatedMargin": "21.77841506",
# "isAutoAddMargin": "false",
# "positionSide": "BOTH",
# "notional": "43.53230000",
# "isolatedWallet": "21.82418506",
# "updateTime": "1621358023886"
# }
#
# coinm
# {
# "symbol": "BTCUSD_PERP",
# "positionAmt": "2",
# "entryPrice": "37643.10000021",
# "markPrice": "38103.05510455",
# "unRealizedProfit": "0.00006413",
# "liquidationPrice": "25119.97445760",
# "leverage": "2",
# "maxQty": "1500",
# "marginType": "isolated",
# "isolatedMargin": "0.00274471",
# "isAutoAddMargin": "false",
# "positionSide": "BOTH",
# "notionalValue": "0.00524892",
# "isolatedWallet": "0.00268058"
# }
#
marketId = self.safe_string(position, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
leverageBrackets = self.safe_value(self.options, 'leverageBrackets', {})
leverageBracket = self.safe_value(leverageBrackets, symbol, [])
notionalString = self.safe_string_2(position, 'notional', 'notionalValue')
notionalStringAbs = Precise.string_abs(notionalString)
notionalFloatAbs = float(notionalStringAbs)
notionalFloat = float(notionalString)
maintenanceMarginPercentageString = None
for i in range(0, len(leverageBracket)):
bracket = leverageBracket[i]
if notionalFloatAbs < bracket[0]:
break
maintenanceMarginPercentageString = bracket[1]
notional = self.parse_number(notionalStringAbs)
contractsAbs = Precise.string_abs(self.safe_string(position, 'positionAmt'))
contracts = self.parse_number(contractsAbs)
unrealizedPnlString = self.safe_string(position, 'unRealizedProfit')
unrealizedPnl = self.parse_number(unrealizedPnlString)
leverageString = self.safe_string(position, 'leverage')
leverage = int(leverageString)
liquidationPriceString = self.omit_zero(self.safe_string(position, 'liquidationPrice'))
liquidationPrice = self.parse_number(liquidationPriceString)
collateralString = None
marginType = self.safe_string(position, 'marginType')
side = None
if notionalFloat > 0:
side = 'long'
elif notionalFloat < 0:
side = 'short'
entryPriceString = self.safe_string(position, 'entryPrice')
entryPrice = self.parse_number(entryPriceString)
if marginType == 'cross':
# calculate collateral
if market['linear']:
# walletBalance = (liquidationPrice * (±1 + mmp) ± entryPrice) * contracts
onePlusMaintenanceMarginPercentageString = None
entryPriceSignString = entryPriceString
if side == 'short':
onePlusMaintenanceMarginPercentageString = Precise.string_add('1', maintenanceMarginPercentageString)
entryPriceSignString = Precise.string_mul('-1', entryPriceSignString)
else:
onePlusMaintenanceMarginPercentageString = Precise.string_add('-1', maintenanceMarginPercentageString)
inner = Precise.string_mul(liquidationPriceString, onePlusMaintenanceMarginPercentageString)
leftSide = Precise.string_add(inner, entryPriceSignString)
collateralString = Precise.string_div(Precise.string_mul(leftSide, contractsAbs), '1', market['precision']['quote'])
else:
# walletBalance = (contracts * contractSize) * (±1/entryPrice - (±1 - mmp) / liquidationPrice)
onePlusMaintenanceMarginPercentageString = None
entryPriceSignString = entryPriceString
if side == 'short':
onePlusMaintenanceMarginPercentageString = Precise.string_sub('1', maintenanceMarginPercentageString)
else:
onePlusMaintenanceMarginPercentageString = Precise.string_sub('-1', maintenanceMarginPercentageString)
entryPriceSignString = Precise.string_mul('-1', entryPriceSignString)
leftSide = Precise.string_mul(contractsAbs, market['contractSize'])
rightSide = Precise.string_sub(Precise.string_div('1', entryPriceSignString), Precise.string_div(onePlusMaintenanceMarginPercentageString, liquidationPriceString))
collateralString = Precise.string_div(Precise.string_mul(leftSide, rightSide), '1', market['precision']['base'])
else:
collateralString = self.safe_string(position, 'isolatedMargin')
collateralString = '0' if (collateralString is None) else collateralString
collateralFloat = float(collateralString)
collateral = self.parse_number(collateralString)
markPrice = self.parse_number(self.omit_zero(self.safe_string(position, 'markPrice')))
timestamp = self.safe_integer(position, 'updateTime')
if timestamp == 0:
timestamp = None
maintenanceMarginPercentage = self.parse_number(maintenanceMarginPercentageString)
maintenanceMarginString = Precise.string_mul(maintenanceMarginPercentageString, notionalStringAbs)
maintenanceMargin = self.parse_number(maintenanceMarginString)
initialMarginPercentageString = Precise.string_div('1', leverageString, 8)
rational = (1000 % leverage) == 0
if not rational:
initialMarginPercentageString = Precise.string_add(initialMarginPercentageString, '1e-8')
initialMarginString = Precise.string_div(Precise.string_mul(notionalStringAbs, initialMarginPercentageString), '1', 8)
initialMargin = self.parse_number(initialMarginString)
marginRatio = None
percentage = None
if collateralFloat != 0.0:
marginRatio = self.parse_number(Precise.string_div(Precise.string_add(Precise.string_div(maintenanceMarginString, collateralString), '5e-5'), '1', 4))
percentage = self.parse_number(Precise.string_mul(Precise.string_div(unrealizedPnlString, initialMarginString, 4), '100'))
positionSide = self.safe_string(position, 'positionSide')
hedged = positionSide != 'BOTH'
return {
'info': position,
'symbol': symbol,
'contracts': contracts,
'contractSize': self.parse_number(market['contractSize']),
'unrealizedPnl': unrealizedPnl,
'leverage': self.parse_number(leverageString),
'liquidationPrice': liquidationPrice,
'collateral': collateral,
'notional': notional,
'markPrice': markPrice,
'entryPrice': entryPrice,
'timestamp': timestamp,
'initialMargin': initialMargin,
'initialMarginPercentage': self.parse_number(initialMarginPercentageString),
'maintenanceMargin': maintenanceMargin,
'maintenanceMarginPercentage': maintenanceMarginPercentage,
'marginRatio': marginRatio,
'datetime': self.iso8601(timestamp),
'marginType': marginType,
'side': side,
'hedged': hedged,
'percentage': percentage,
}
def load_leverage_brackets(self, reload=False, params={}):
self.load_markets()
# by default cache the leverage bracket
# it contains useful stuff like the maintenance margin and initial margin for positions
leverageBrackets = self.safe_value(self.options, 'leverageBrackets')
if (leverageBrackets is None) or (reload):
method = None
defaultType = self.safe_string(self.options, 'defaultType', 'future')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
if type == 'future':
method = 'fapiPrivateGetLeverageBracket'
elif type == 'delivery':
method = 'dapiPrivateV2GetLeverageBracket'
else:
raise NotSupported(self.id + ' loadLeverageBrackets() supports linear and inverse contracts only')
response = getattr(self, method)(query)
self.options['leverageBrackets'] = {}
for i in range(0, len(response)):
entry = response[i]
marketId = self.safe_string(entry, 'symbol')
symbol = self.safe_symbol(marketId)
brackets = self.safe_value(entry, 'brackets')
result = []
for j in range(0, len(brackets)):
bracket = brackets[j]
# we use floats here internally on purpose
floorValue = self.safe_float_2(bracket, 'notionalFloor', 'qtyFloor')
maintenanceMarginPercentage = self.safe_string(bracket, 'maintMarginRatio')
result.append([floorValue, maintenanceMarginPercentage])
self.options['leverageBrackets'][symbol] = result
return self.options['leverageBrackets']
def fetch_positions(self, symbols=None, params={}):
defaultMethod = self.safe_string(self.options, 'fetchPositions', 'positionRisk')
if defaultMethod == 'positionRisk':
return self.fetch_positions_risk(symbols, params)
elif defaultMethod == 'account':
return self.fetch_account_positions(symbols, params)
else:
raise NotSupported(self.id + '.options["fetchPositions"] = "' + defaultMethod + '" is invalid, please choose between "account" and "positionRisk"')
def fetch_account_positions(self, symbols=None, params={}):
if symbols is not None:
if not isinstance(symbols, list):
raise ArgumentsRequired(self.id + ' fetchPositions requires an array argument for symbols')
self.load_markets()
self.load_leverage_brackets()
method = None
defaultType = self.safe_string(self.options, 'defaultType', 'future')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
if type == 'future':
method = 'fapiPrivateGetAccount'
elif type == 'delivery':
method = 'dapiPrivateGetAccount'
else:
raise NotSupported(self.id + ' fetchPositions() supports linear and inverse contracts only')
account = getattr(self, method)(query)
result = self.parse_account_positions(account)
return self.filter_by_array(result, 'symbol', symbols, False)
def fetch_positions_risk(self, symbols=None, params={}):
if symbols is not None:
if not isinstance(symbols, list):
raise ArgumentsRequired(self.id + ' fetchPositions requires an array argument for symbols')
self.load_markets()
self.load_leverage_brackets()
request = {}
method = None
defaultType = 'future'
defaultType = self.safe_string(self.options, 'defaultType', defaultType)
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
if (type == 'future') or (type == 'linear'):
method = 'fapiPrivateGetPositionRisk'
elif (type == 'delivery') or (type == 'inverse'):
method = 'dapiPrivateGetPositionRisk'
else:
raise NotSupported(self.id + ' fetchIsolatedPositions() supports linear and inverse contracts only')
response = getattr(self, method)(self.extend(request, params))
result = []
for i in range(0, len(response)):
parsed = self.parse_position_risk(response[i])
result.append(parsed)
return self.filter_by_array(result, 'symbol', symbols, False)
def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
method = None
defaultType = 'future'
request = {
'incomeType': 'FUNDING_FEE', # "TRANSFER","WELCOME_BONUS", "REALIZED_PNL","FUNDING_FEE", "COMMISSION" and "INSURANCE_CLEAR"
}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if market['linear']:
defaultType = 'future'
elif market['inverse']:
defaultType = 'delivery'
else:
raise NotSupported(self.id + ' fetchFundingHistory() supports linear and inverse contracts only')
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
defaultType = self.safe_string_2(self.options, 'fetchFundingHistory', 'defaultType', defaultType)
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
if (type == 'future') or (type == 'linear'):
method = 'fapiPrivateGetIncome'
elif (type == 'delivery') or (type == 'inverse'):
method = 'dapiPrivateGetIncome'
else:
raise NotSupported(self.id + ' fetchFundingHistory() supports linear and inverse contracts only')
response = getattr(self, method)(self.extend(request, params))
return self.parse_incomes(response, market, since, limit)
def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
if (leverage < 1) or (leverage > 125):
raise BadRequest(self.id + ' leverage should be between 1 and 125')
self.load_markets()
market = self.market(symbol)
method = None
if market['linear']:
method = 'fapiPrivatePostLeverage'
elif market['inverse']:
method = 'dapiPrivatePostLeverage'
else:
raise NotSupported(self.id + ' setLeverage() supports linear and inverse contracts only')
request = {
'symbol': market['id'],
'leverage': leverage,
}
return getattr(self, method)(self.extend(request, params))
def set_margin_mode(self, marginType, symbol=None, params={}):
#
# {"code": -4048 , "msg": "Margin type cannot be changed if there exists position."}
#
# or
#
# {"code": 200, "msg": "success"}
#
marginType = marginType.upper()
if (marginType != 'ISOLATED') and (marginType != 'CROSSED'):
raise BadRequest(self.id + ' marginType must be either isolated or crossed')
self.load_markets()
market = self.market(symbol)
method = None
if market['linear']:
method = 'fapiPrivatePostMarginType'
elif market['inverse']:
method = 'dapiPrivatePostMarginType'
else:
raise NotSupported(self.id + ' setMarginMode() supports linear and inverse contracts only')
request = {
'symbol': market['id'],
'marginType': marginType,
}
return getattr(self, method)(self.extend(request, params))
def set_position_mode(self, hedged, symbol=None, params={}):
defaultType = self.safe_string(self.options, 'defaultType', 'future')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, ['type'])
dualSidePosition = None
if hedged:
dualSidePosition = 'true'
else:
dualSidePosition = 'false'
request = {
'dualSidePosition': dualSidePosition,
}
method = None
if type == 'delivery':
method = 'dapiPrivatePostPositionSideDual'
else:
# default to future
method = 'fapiPrivatePostPositionSideDual'
#
# {
# "code": 200,
# "msg": "success"
# }
#
return getattr(self, method)(self.extend(request, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
if not (api in self.urls['api']):
raise NotSupported(self.id + ' does not have a testnet/sandbox URL for ' + api + ' endpoints')
url = self.urls['api'][api]
url += '/' + path
if api == 'wapi':
url += '.html'
if path == 'historicalTrades':
if self.apiKey:
headers = {
'X-MBX-APIKEY': self.apiKey,
}
else:
raise AuthenticationError(self.id + ' historicalTrades endpoint requires `apiKey` credential')
userDataStream = (path == 'userDataStream') or (path == 'listenKey')
if userDataStream:
if self.apiKey:
# v1 special case for userDataStream
headers = {
'X-MBX-APIKEY': self.apiKey,
'Content-Type': 'application/x-www-form-urlencoded',
}
if method != 'GET':
body = self.urlencode(params)
else:
raise AuthenticationError(self.id + ' userDataStream endpoint requires `apiKey` credential')
elif (api == 'private') or (api == 'sapi') or (api == 'wapi' and path != 'systemStatus') or (api == 'dapiPrivate') or (api == 'dapiPrivateV2') or (api == 'fapiPrivate') or (api == 'fapiPrivateV2'):
self.check_required_credentials()
query = None
recvWindow = self.safe_integer(self.options, 'recvWindow', 5000)
if (api == 'sapi') and (path == 'asset/dust'):
query = self.urlencode_with_array_repeat(self.extend({
'timestamp': self.nonce(),
'recvWindow': recvWindow,
}, params))
elif (path == 'batchOrders') or (path.find('sub-account') >= 0):
query = self.rawencode(self.extend({
'timestamp': self.nonce(),
'recvWindow': recvWindow,
}, params))
else:
query = self.urlencode(self.extend({
'timestamp': self.nonce(),
'recvWindow': recvWindow,
}, params))
signature = self.hmac(self.encode(query), self.encode(self.secret))
query += '&' + 'signature=' + signature
headers = {
'X-MBX-APIKEY': self.apiKey,
}
if (method == 'GET') or (method == 'DELETE') or (api == 'wapi'):
url += '?' + query
else:
body = query
headers['Content-Type'] = 'application/x-www-form-urlencoded'
else:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if (code == 418) or (code == 429):
raise DDoSProtection(self.id + ' ' + str(code) + ' ' + reason + ' ' + body)
# error response in a form: {"code": -1013, "msg": "Invalid quantity."}
# following block cointains legacy checks against message patterns in "msg" property
# will switch "code" checks eventually, when we know all of them
if code >= 400:
if body.find('Price * QTY is zero or less') >= 0:
raise InvalidOrder(self.id + ' order cost = amount * price is zero or less ' + body)
if body.find('LOT_SIZE') >= 0:
raise InvalidOrder(self.id + ' order amount should be evenly divisible by lot size ' + body)
if body.find('PRICE_FILTER') >= 0:
raise InvalidOrder(self.id + ' order price is invalid, i.e. exceeds allowed price precision, exceeds min price or max price limits or is invalid float value in general, use self.price_to_precision(symbol, amount) ' + body)
if response is None:
return # fallback to default error handler
# check success value for wapi endpoints
# response in format {'msg': 'The coin does not exist.', 'success': True/false}
success = self.safe_value(response, 'success', True)
if not success:
message = self.safe_string(response, 'msg')
parsedMessage = None
if message is not None:
try:
parsedMessage = json.loads(message)
except Exception as e:
# do nothing
parsedMessage = None
if parsedMessage is not None:
response = parsedMessage
message = self.safe_string(response, 'msg')
if message is not None:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, self.id + ' ' + message)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, self.id + ' ' + message)
# checks against error codes
error = self.safe_string(response, 'code')
if error is not None:
# https://github.com/ccxt/ccxt/issues/6501
# https://github.com/ccxt/ccxt/issues/7742
if (error == '200') or Precise.string_equals(error, '0'):
return
# a workaround for {"code":-2015,"msg":"Invalid API-key, IP, or permissions for action."}
# despite that their message is very confusing, it is raised by Binance
# on a temporary ban, the API key is valid, but disabled for a while
if (error == '-2015') and self.options['hasAlreadyAuthenticatedSuccessfully']:
raise DDoSProtection(self.id + ' temporary banned: ' + body)
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], error, feedback)
raise ExchangeError(feedback)
if not success:
raise ExchangeError(self.id + ' ' + body)
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noSymbol' in config) and not ('symbol' in params):
return config['noSymbol']
elif ('noPoolId' in config) and not ('poolId' in params):
return config['noPoolId']
elif ('byLimit' in config) and ('limit' in params):
limit = params['limit']
byLimit = config['byLimit']
for i in range(0, len(byLimit)):
entry = byLimit[i]
if limit <= entry[0]:
return entry[1]
return self.safe_integer(config, 'cost', 1)
def request(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
response = self.fetch2(path, api, method, params, headers, body, config, context)
# a workaround for {"code":-2015,"msg":"Invalid API-key, IP, or permissions for action."}
if (api == 'private') or (api == 'wapi'):
self.options['hasAlreadyAuthenticatedSuccessfully'] = True
return response
def modify_margin_helper(self, symbol, amount, addOrReduce, params={}):
# used to modify isolated positions
defaultType = self.safe_string(self.options, 'defaultType', 'future')
if defaultType == 'spot':
defaultType = 'future'
type = self.safe_string(params, 'type', defaultType)
if (type == 'margin') or (type == 'spot'):
raise NotSupported(self.id + ' add / reduce margin only supported with type future or delivery')
self.load_markets()
market = self.market(symbol)
request = {
'type': addOrReduce,
'symbol': market['id'],
'amount': amount,
}
method = None
code = None
if type == 'future':
method = 'fapiPrivatePostPositionMargin'
code = market['quote']
else:
method = 'dapiPrivatePostPositionMargin'
code = market['base']
response = getattr(self, method)(self.extend(request, params))
#
# {
# "code": 200,
# "msg": "Successfully modify position margin.",
# "amount": 0.001,
# "type": 1
# }
#
rawType = self.safe_integer(response, 'type')
resultType = 'add' if (rawType == 1) else 'reduce'
resultAmount = self.safe_number(response, 'amount')
errorCode = self.safe_string(response, 'code')
status = 'ok' if (errorCode == '200') else 'failed'
return {
'info': response,
'type': resultType,
'amount': resultAmount,
'code': code,
'symbol': market['symbol'],
'status': status,
}
def reduce_margin(self, symbol, amount, params={}):
return self.modify_margin_helper(symbol, amount, 2, params)
def add_margin(self, symbol, amount, params={}):
return self.modify_margin_helper(symbol, amount, 1, params)
def fetch_borrow_rate(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
# 'vipLevel': self.safe_integer(params, 'vipLevel'),
}
response = self.sapiGetMarginInterestRateHistory(self.extend(request, params))
#
# [
# {
# "asset": "USDT",
# "timestamp": 1638230400000,
# "dailyInterestRate": "0.0006",
# "vipLevel": 0
# },
# ...
# ]
#
rate = self.safe_value(response, 0)
timestamp = self.safe_number(rate, 'timestamp')
return {
'currency': code,
'rate': self.safe_number(rate, 'dailyInterestRate'),
'period': 86400000,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'info': response,
}
| parse_income |
About.style.tsx | import styled from 'styled-components';
import { COLORS } from 'common';
import { TeamList } from 'screens/About/About';
import { COLOR_MAP } from 'common/colors';
export const Wrapper = styled.div`
background-color: white;
min-height: calc(100vh - 64px);
`;
export const Content = styled.div`
max-width: 900px;
margin: auto;
padding: 1rem 0 3rem;
@media (max-width: 932px) {
padding: 1rem;
}
`;
export const Header = styled.div`
background-color: ${COLORS.LIGHTGRAY};
`;
export const TextContent = styled.div`
max-width: 600px;
`;
export const ActiveAlumniButtonContainer = styled.div`
display: flex;
width: 180px;
border: 1px solid #e0e0e0;
margin-bottom: 2rem;
border-radius: 4px;
font-size: 15px;
`;
export const ActiveAlumniButton = styled.div<{
teamList?: TeamList;
}>`
width: 50%;
display: flex;
justify-content: center;
cursor: pointer;
font-family: 'Roboto', 'Helvetica', 'Arial', sans-serif;
padding: 0.1rem;
color: #828282;
line-height: 1;
padding: 14px 0 12px;
&:first-child {
border-right: 1px solid #e0e0e0;
border-bottom: ${({ teamList }) =>
teamList === TeamList.Active && `2px solid ${COLOR_MAP.GREEN.BASE}`};
border-bottom-left-radius: ${({ teamList }) =>
teamList === TeamList.Active && `3px`};
color: ${({ teamList }) => teamList === TeamList.Active && `black`};
font-weight: ${({ teamList }) => teamList === TeamList.Active && `bold`};
} | teamList === TeamList.Alumni && `2px solid ${COLOR_MAP.GREEN.BASE}`};
border-bottom-right-radius: ${({ teamList }) =>
teamList === TeamList.Alumni && `3px`};
color: ${({ teamList }) => teamList === TeamList.Alumni && `black`};
font-weight: ${({ teamList }) => teamList === TeamList.Alumni && `bold`};
}
`; |
&:last-child {
border-bottom: ${({ teamList }) => |
xcmp.rs | //! Setup of XCMP for parachain to allow cross chain transfers and other operations.
//! Very similar to https://github.com/galacticcouncil/Basilisk-node/blob/master/runtime/basilisk/src/xcm.rs
#![allow(unused_imports)] // allow until v2 xcm released (instead creating 2 runtimes)
use super::*; // recursive dependency onto runtime
use codec::{Decode, Encode};
use common::{xcmp::*, PriceConverter};
use composable_traits::{
defi::Ratio,
oracle::MinimalOracle,
xcm::assets::{RemoteAssetRegistryInspect, XcmAssetLocation},
};
use cumulus_primitives_core::{IsSystem, ParaId};
use frame_support::{
construct_runtime, ensure, log, parameter_types,
traits::{
Contains, Everything, KeyOwnerProofSystem, Nothing, OriginTrait, Randomness, StorageInfo,
},
weights::{
constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND},
DispatchClass, IdentityFee, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients,
WeightToFeePolynomial,
},
PalletId, RuntimeDebug,
};
use orml_traits::{
location::{AbsoluteReserveProvider, RelativeReserveProvider, Reserve},
parameter_type_with_key, MultiCurrency,
};
use orml_xcm_support::{
DepositToAlternative, IsNativeConcrete, MultiCurrencyAdapter, MultiNativeAsset, OnDepositFail,
};
use pallet_xcm::XcmPassthrough;
use polkadot_parachain::primitives::Sibling;
use primitives::currency::WellKnownCurrency;
use sp_api::impl_runtime_apis;
use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
use sp_runtime::{
traits::{AccountIdLookup, BlakeTwo256, Convert, ConvertInto, Zero},
transaction_validity::{TransactionSource, TransactionValidity},
ApplyExtrinsicResult, DispatchError,
};
use sp_std::{marker::PhantomData, prelude::*};
use xcm::latest::{prelude::*, Error};
use xcm_builder::{
AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom,
AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, EnsureXcmOrigin, FixedWeightBounds,
LocationInverter, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative,
SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32,
SovereignSignedViaLocation, TakeRevenue, TakeWeightCredit,
};
use xcm_executor::{
traits::{
ConvertOrigin, DropAssets, FilterAssetLocation, ShouldExecute, TransactAsset, WeightTrader,
},
Assets, Config, XcmExecutor,
};
parameter_types! {
pub KsmLocation: MultiLocation = MultiLocation::parent();
pub const RelayNetwork: NetworkId = NetworkId::Kusama;
pub RelayOrigin: Origin = cumulus_pallet_xcm::Origin::Relay.into();
pub Ancestry: MultiLocation = Parachain(ParachainInfo::parachain_id().into()).into();
}
pub type Barrier = (
XcmpDebug,
//DebugAllowUnpaidExecutionFrom<WellKnownsChains>,
// Expected responses are OK.
AllowKnownQueryResponses<RelayerXcm>,
// Subscriptions for version tracking are OK.
AllowSubscriptionsFrom<Everything>,
AllowTopLevelPaidExecutionFrom<Everything>,
TakeWeightCredit,
);
pub type LocalOriginToLocation = SignedToAccountId32<Origin, AccountId, RelayNetwork>;
/// The means for routing XCM messages which are not for local execution into the right message
/// queues.
pub type XcmRouter = (
// Two routers - use UMP to communicate with the relay chain:
cumulus_primitives_utility::ParentAsUmp<ParachainSystem, ()>,
// ..and XCMP to communicate with the sibling chains.
XcmpQueue,
);
/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used
/// when determining ownership of accounts for asset transacting and when attempting to use XCM
/// `Transact` in order to determine the dispatch Origin.
pub type LocationToAccountId = (
// The parent (Relay-chain) origin converts to the parent `AccountId`.
ParentIsPreset<AccountId>,
// Sibling parachain origins convert to AccountId via the `ParaId::into`.
SiblingParachainConvertsVia<Sibling, AccountId>,
// Straight up local `AccountId32` origins just alias directly to `AccountId`.
AccountId32Aliases<RelayNetwork, AccountId>,
);
/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance,
/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can
/// biases the kind of local `Origin` it will become.
pub type XcmOriginToTransactDispatchOrigin = (
// Sovereign account converter; this attempts to derive an `AccountId` from the origin location
// using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for
// foreign chains who want to have a local sovereign account on this chain which they control.
SovereignSignedViaLocation<LocationToAccountId, Origin>,
// Native converter for Relay-chain (Parent) location; will converts to a `Relay` origin when
// recognised.
RelayChainAsNative<RelayOrigin, Origin>,
// Native converter for sibling Parachains; will convert to a `SiblingPara` origin when
// recognised.
SiblingParachainAsNative<cumulus_pallet_xcm::Origin, Origin>,
// Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a
// transaction from the Root origin.
xcm_builder::ParentAsSuperuser<Origin>,
// Native signed account converter; this just converts an `AccountId32` origin into a normal
// `Origin::Signed` origin of the same 32-byte value.
SignedAccountId32AsNative<RelayNetwork, Origin>,
// Xcm origins can be represented natively under the Xcm pallet's Xcm origin.
XcmPassthrough<Origin>,
);
pub struct StaticAssetsMap;
pub mod parachains {
pub mod karura {
pub const ID: u32 = 3000;
pub const KUSD_KEY: &[u8] = &[0, 129];
}
}
impl XcmpAssets for StaticAssetsMap {
fn remote_to_local(location: MultiLocation) -> Option<CurrencyId> {
match location {
MultiLocation { parents: 1, interior: X2(Parachain(para_id), GeneralKey(key)) } =>
match (para_id, &key[..]) {
(parachains::karura::ID, parachains::karura::KUSD_KEY) =>
Some(CurrencyId::kUSD),
_ => None,
},
_ => None,
}
}
}
pub type LocalAssetTransactor = MultiCurrencyAdapter<
crate::Assets,
UnknownTokens,
IsNativeConcrete<CurrencyId, AssetsIdConverter>,
AccountId,
LocationToAccountId,
CurrencyId,
AssetsIdConverter,
DepositToAlternative<TreasuryAccount, Tokens, CurrencyId, AccountId, Balance>,
>;
pub struct RelayReserveFromParachain;
impl FilterAssetLocation for RelayReserveFromParachain {
fn filter_asset_location(asset: &MultiAsset, origin: &MultiLocation) -> bool {
// NOTE: In Acala there is not such thing
// if asset is KSM and send from some parachain then allow for that
AbsoluteReserveProvider::reserve(asset) == Some(MultiLocation::parent()) &&
matches!(origin, MultiLocation { parents: 1, interior: X1(Parachain(_)) })
}
}
type IsReserveAssetLocationFilter =
(DebugMultiNativeAsset, MultiNativeAsset<AbsoluteReserveProvider>, RelayReserveFromParachain);
type AssetsIdConverter =
CurrencyIdConvert<AssetsRegistry, CurrencyId, ParachainInfo, StaticAssetsMap>;
pub type Trader = TransactionFeePoolTrader<
AssetsIdConverter,
PriceConverter<AssetsRegistry>,
ToTreasury<AssetsIdConverter, crate::Assets, TreasuryAccount>,
WeightToFee,
>;
pub struct CaptureDropAssets<
Treasury: TakeRevenue,
PriceConverter: MinimalOracle,
AssetConverter: Convert<MultiLocation, Option<CurrencyId>>,
>(PhantomData<(Treasury, PriceConverter, AssetConverter)>);
/// if asset put into Holding Registry of XCM VM, but did nothing to this
/// or if too small to pay weight,
/// it will get here
/// if asset location and origin is known, put into treasury,
/// else if asset location and origin not know, hash it until it will be added
impl<
Treasury: TakeRevenue,
PriceConverter: MinimalOracle,
AssetConverter: Convert<MultiLocation, Option<CurrencyId>>,
> DropAssets for CaptureDropAssets<Treasury, PriceConverter, AssetConverter>
{
fn drop_assets(origin: &MultiLocation, assets: Assets) -> Weight {
let multi_assets: Vec<MultiAsset> = assets.into();
let mut can_return_on_request = vec![];
log::info!(target : "xcmp", "drop_assets");
let mut weight = Weight::zero();
for asset in multi_assets {
if let MultiAsset { id: Concrete(location), fun: Fungible(_amount) } = asset.clone() {
if let Some(_converted) = AssetConverter::convert(location) {
Treasury::take_revenue(asset);
} else {
can_return_on_request.push(asset);
}
} else {
can_return_on_request.push(asset);
}
}
if !can_return_on_request.is_empty() {
weight += RelayerXcm::drop_assets(origin, can_return_on_request.into());
}
weight
}
}
pub type CaptureAssetTrap = CaptureDropAssets<
ToTreasury<AssetsIdConverter, crate::Assets, TreasuryAccount>,
PriceConverter<AssetsRegistry>,
AssetsIdConverter,
>;
pub struct XcmConfig;
impl xcm_executor::Config for XcmConfig {
type Call = Call;
type XcmSender = XcmRouter;
type AssetTransactor = LocalAssetTransactor;
type OriginConverter = XcmOriginToTransactDispatchOrigin;
type IsReserve = IsReserveAssetLocationFilter;
type IsTeleporter = (); // <- should be enough to allow teleportation of PICA
type LocationInverter = LocationInverter<Ancestry>;
type Barrier = Barrier;
type Weigher = FixedWeightBounds<UnitWeightCost, Call, MaxInstructions>;
type Trader = Trader;
type ResponseHandler = RelayerXcm;
type SubscriptionService = RelayerXcm;
type AssetClaims = RelayerXcm;
type AssetTrap = CaptureAssetTrap;
}
parameter_types! {
pub SelfLocation: MultiLocation = MultiLocation::new(1, X1(Parachain(ParachainInfo::parachain_id().into())));
// safe value to start to transfer 1 asset only in one message (as in Acala)
pub const MaxAssetsForTransfer: usize = 1;
}
parameter_type_with_key! {
pub ParachainMinFee: |location: MultiLocation| -> Balance {
#[allow(clippy::match_ref_pats)] // false positive
#[allow(clippy::match_single_binding)]
match (location.parents, location.first_interior()) {
// relay KSM
(1, None) => 400_000_000_000,
// if amount is not enough, it should be trapped by target chain or discarded as spam, so bear the risk
// we use Acala's team XTokens which are opinionated - PANIC in case of zero
(1, Some(Parachain(id))) => {
let location = XcmAssetLocation::new(location.clone());
AssetsRegistry::min_xcm_fee(ParaId::from(*id), location).unwrap_or(u128::MAX)
},
_ => u128::MAX,
}
};
}
impl orml_xtokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type CurrencyId = CurrencyId;
type CurrencyIdConvert = AssetsIdConverter;
type AccountIdToMultiLocation = AccountIdToMultiLocation;
type SelfLocation = SelfLocation;
type XcmExecutor = XcmExecutor<XcmConfig>;
type Weigher = FixedWeightBounds<UnitWeightCost, Call, MaxInstructions>;
type BaseXcmWeight = BaseXcmWeight;
type LocationInverter = LocationInverter<Ancestry>;
type MaxAssetsForTransfer = MaxAssetsForTransfer;
type MinXcmFee = ParachainMinFee;
type MultiLocationsFilter = Everything;
type ReserveProvider = AbsoluteReserveProvider;
}
impl orml_unknown_tokens::Config for Runtime {
type Event = Event;
}
// make setup as in Acala, max instructions seems resoanble, for weigth may consider to settle with
// our PICA
parameter_types! {
// One XCM operation is 200_000_000 weight, cross-chain transfer ~= 2x of transfer.
pub const UnitWeightCost: Weight = 200_000_000;
pub const MaxInstructions: u32 = 100;
}
impl pallet_xcm::Config for Runtime {
type Event = Event;
type SendXcmOrigin = EnsureXcmOrigin<Origin, LocalOriginToLocation>;
type XcmRouter = XcmRouter;
type ExecuteXcmOrigin = EnsureXcmOrigin<Origin, LocalOriginToLocation>;
/// https://medium.com/kusama-network/kusamas-governance-thwarts-would-be-attacker-9023180f6fb
type XcmExecuteFilter = Nothing;
type XcmExecutor = XcmExecutor<XcmConfig>;
type XcmTeleportFilter = Everything;
type XcmReserveTransferFilter = Everything;
type LocationInverter = LocationInverter<Ancestry>;
type Weigher = FixedWeightBounds<UnitWeightCost, Call, MaxInstructions>;
type Origin = Origin;
type Call = Call;
const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100;
type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion;
}
/// cumulus is defaultt implementation of queue integrated with polkadot and kusama runtimes
impl cumulus_pallet_xcm::Config for Runtime {
type Event = Event;
type XcmExecutor = XcmExecutor<XcmConfig>;
}
pub struct | <Origin>(PhantomData<Origin>);
impl<Origin: OriginTrait> ConvertOrigin<Origin> for SystemParachainAsSuperuser<Origin> {
fn convert_origin(
origin: impl Into<MultiLocation>,
kind: OriginKind,
) -> Result<Origin, MultiLocation> {
let origin = origin.into();
if kind == OriginKind::Superuser &&
matches!(
origin,
MultiLocation {
parents: 1,
interior: X1(Parachain(id)),
} if ParaId::from(id).is_system(),
) {
Ok(Origin::root())
} else {
Err(origin)
}
}
}
impl cumulus_pallet_xcmp_queue::Config for Runtime {
type Event = Event;
type XcmExecutor = XcmExecutor<XcmConfig>;
type VersionWrapper = RelayerXcm;
type ChannelInfo = ParachainSystem;
type ControllerOrigin = EnsureRootOrHalfCouncil;
type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin;
// NOTE: we could consider allowance for some chains (see Acala tests ports PRs)
type ExecuteOverweightOrigin = EnsureRootOrHalfCouncil;
type WeightInfo = cumulus_pallet_xcmp_queue::weights::SubstrateWeight<Self>;
}
impl cumulus_pallet_dmp_queue::Config for Runtime {
type Event = Event;
type XcmExecutor = XcmExecutor<XcmConfig>;
type ExecuteOverweightOrigin = EnsureRootOrHalfCouncil;
}
| SystemParachainAsSuperuser |
cr.rs | use proc_macro::TokenStream;
use quote::{format_ident, quote, ToTokens};
use syn::{Fields, PathSegment, Type, TypePath};
use crate::to_snake_name;
// #[allow(non_upper_case_globals)]
// const TypeName_CArray: &str = "CArray";
const TYPE_NAME_C_CHAR: &str = "c_char";
pub fn dl_cr(type_name: &str, fields: &Fields) -> TokenStream {
const NAME: &str = "";//CExtrinsicContext,CInitParameters
let r_name = {
let mut str = type_name.to_owned();
str.remove(0);
format_ident!("{}",str)
};
//test
if type_name == NAME {
println!("===gen impl dl_cr start: {}", type_name);
}
let mut to_c_quote = Vec::new();
let mut ptr_rust_quote = Vec::new();
for field in fields.iter() {
if let Some(ident) = field.ident.as_ref() {
let c_field_name = ident;
let r_field_name = format_ident!("{}",to_snake_name(&c_field_name.to_string()));
//test
if type_name == NAME {
println!("field name: {}:", c_field_name.to_string());
}
if let Type::Ptr(t) = &field.ty {
let type_stream = if let Type::Path(TypePath { path, .. }) = t.elem.as_ref() {
//test
if type_name == NAME {
println!("ptr: {}:", t.elem.to_token_stream());
println!("ptr -path: {}", path.to_token_stream().to_string());
}
if let Some(PathSegment { ident, .. }) = path.segments.last() {
// if ident.to_string().as_str() == TypeName_CArray {
Some(ident.to_token_stream())
} else {
None
}
} else {
None
};
let type_stream = type_stream.expect(&format!("can not find the type of field {}::{}\nfield type: \n{:?}", type_name, c_field_name, field));
match type_stream.to_string().as_str() {
//*mut c_char类型
TYPE_NAME_C_CHAR => {
to_c_quote.push(quote! {
c.#c_field_name = to_c_char(&r.#r_field_name)
});
ptr_rust_quote.push(quote! {
r.#r_field_name = to_str(c.#c_field_name).to_owned()
});
}
"" => {
panic!("dl_cr can not find the type of field {} -- {} -- not TypePath,\nfield type: \n{:?}", type_name, c_field_name, field);
}
_ => {
let c_type = type_stream;
//test
if type_name == NAME {
println!("field type : {}", c_type);
}
to_c_quote.push(quote! {
c.#c_field_name = #c_type::to_c_ptr(&r.#r_field_name)
}); | });
}
}
} else {
to_c_quote.push(quote! {
//c.#c_field_name = r.#r_field_name
c.#c_field_name.assignment_c(&r.#r_field_name);
});
ptr_rust_quote.push(quote! {
// r.#r_field_name = c.#c_field_name
c.#c_field_name.assignment_r(&mut r.#r_field_name);
});
}
}
}
let c_name = format_ident!("{}",type_name);
let gen = quote! {
impl CR<#c_name,#r_name> for #c_name {
fn to_c(r: &#r_name) -> #c_name {
let mut c = #c_name::default();
#(#to_c_quote;)*
c
}
fn to_c_ptr(r: &#r_name) -> *mut #c_name {
Box::into_raw(Box::new(#c_name::to_c(r)))
}
fn to_rust(c: &#c_name) -> #r_name {
let mut r = #r_name::default();
#(#ptr_rust_quote;)*
r
}
fn ptr_rust(c: *mut #c_name) -> #r_name {
#c_name::to_rust(unsafe { &*c })
}
}
};
if cfg!(feature = "print_macro") {
println!("............gen impl dl_cr {}:", c_name);
println!("{}", gen);
// let _ = rustfmt::run(rustfmt::Input::Text(gen.to_string()), &rustfmt::config::Config::default());
}
//test
if type_name == NAME {
println!("===gen impl dl_cr end: {}, {}", type_name, r_name);
// let _ = rustfmt::run(rustfmt::Input::Text(gen.to_string()), &rustfmt::config::Config::default());
println!("{}", gen);
}
gen.into()
}
#[cfg(test)]
mod tests {
#[test]
fn dl_cr_test() {
//can not test the fn
}
} | ptr_rust_quote.push(quote! {
r.#r_field_name = #c_type::ptr_rust(c.#c_field_name) |
serde.rs | use crate::{DashMap, DashSet};
use core::fmt;
use core::hash::{Hash, BuildHasher};
use core::marker::PhantomData;
use serde::de::{Deserialize, MapAccess, SeqAccess, Visitor};
use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer};
use serde::Deserializer;
pub struct DashMapVisitor<K, V, S> {
marker: PhantomData<fn() -> DashMap<K, V, S>>,
}
impl<K, V, S> DashMapVisitor<K, V, S>
where
K: Eq + Hash,
S: BuildHasher + Clone,
{
fn new() -> Self {
DashMapVisitor {
marker: PhantomData,
}
}
}
impl<'de, K, V, S> Visitor<'de> for DashMapVisitor<K, V, S>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Clone + Default,
{
type Value = DashMap<K, V, S>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a DashMap")
}
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let map = DashMap::with_capacity_and_hasher(access.size_hint().unwrap_or(0), Default::default());
while let Some((key, value)) = access.next_entry()? {
map.insert(key, value);
}
Ok(map)
}
}
impl<'de, K, V, S> Deserialize<'de> for DashMap<K, V, S>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Clone + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
|
}
impl<K, V, H> Serialize for DashMap<K, V, H>
where
K: Serialize + Eq + Hash,
V: Serialize,
H: BuildHasher + Clone,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.len()))?;
for ref_multi in self.iter() {
map.serialize_entry(ref_multi.key(), ref_multi.value())?;
}
map.end()
}
}
pub struct DashSetVisitor<K, S> {
marker: PhantomData<fn() -> DashSet<K, S>>,
}
impl<K, S> DashSetVisitor<K, S>
where
K: Eq + Hash,
S: BuildHasher + Clone,
{
fn new() -> Self {
DashSetVisitor {
marker: PhantomData,
}
}
}
impl<'de, K, S> Visitor<'de> for DashSetVisitor<K, S>
where
K: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Clone + Default,
{
type Value = DashSet<K, S>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a DashSet")
}
fn visit_seq<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: SeqAccess<'de>,
{
let map = DashSet::with_capacity_and_hasher(access.size_hint().unwrap_or(0), Default::default());
while let Some(key) = access.next_element()? {
map.insert(key);
}
Ok(map)
}
}
impl<'de, K, S> Deserialize<'de> for DashSet<K, S>
where
K: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Clone + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(DashSetVisitor::<K, S>::new())
}
}
impl<K, H> Serialize for DashSet<K, H>
where
K: Serialize + Eq + Hash,
H: BuildHasher + Clone,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for ref_multi in self.iter() {
seq.serialize_element(ref_multi.key())?;
}
seq.end()
}
}
| {
deserializer.deserialize_map(DashMapVisitor::<K, V, S>::new())
} |
fs.go | /*
Copyright 2020-2021 The UnDistro authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs
import (
"embed"
"io/fs"
"net/http"
"os"
"path"
)
//go:embed clustertemplates/*
var FS embed.FS
//go:generate helm package -u ../../charts/metallb -d chart
//go:generate helm package -u ../../charts/cert-manager -d chart
//go:generate helm package -u ../../charts/cluster-api -d chart
//go:generate helm package -u ../../charts/ingress-nginx -d chart
//go:generate helm package -u ../../charts/undistro -d chart
//go:generate helm package -u ../../charts/undistro-aws -d chart
//go:embed chart/*
var ChartFS embed.FS
//go:embed frontend/*
var frontFS embed.FS
//go:embed apps/*
var AppsFS embed.FS
//go:embed defaultarch/*
var DefaultArchFS embed.FS
//go:embed policies/disallow-add-capabilities.yaml
//go:embed policies/disallow-default-namespace.yaml
//go:embed policies/disallow-delete-kyverno.yaml
//go:embed policies/disallow-host-namespace.yaml
//go:embed policies/disallow-host-path.yaml
//go:embed policies/disallow-host-port.yaml
//go:embed policies/disallow-latest-tag.yaml
//go:embed policies/require-resources.yaml
//go:embed policies/network-policy.yaml
var PoliciesFS embed.FS
type fsFunc func(name string) (fs.File, error)
func (f fsFunc) Open(name string) (fs.File, error) {
return f(name)
}
// ReactHandler returns an http.Handler that will serve files from the frontFS embed.FS.
// When locating a file, it will strip the given prefix from the request and prepend the | // root to the filesystem lookup: typical prefix might be "" and root would be frontend.
func ReactHandler(prefix, root string) http.Handler {
handler := fsFunc(func(name string) (fs.File, error) {
assetPath := path.Join(root, name)
// If we can't find the asset, return the default index.html content
f, err := frontFS.Open(assetPath)
if os.IsNotExist(err) {
return frontFS.Open("frontend/index.html")
}
// Otherwise assume this is a legitimate request routed correctly
return f, err
})
return http.StripPrefix(prefix, http.FileServer(http.FS(handler)))
} | |
polyint.py | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.misc import factorial
from scipy.lib.six.moves import xrange
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator", "barycentric_interpolate", "PiecewisePolynomial", "piecewise_polynomial_interpolate","approximate_taylor_polynomial", "pchip"]
class KroghInterpolator(object):
"""
The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points,
optionally with specified derivatives at those points.
Allows evaluation of the polynomial and all its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on [1]_.
Parameters
----------
xi : array_like, length N
Known x-coordinates
yi : array_like, N by R
Known y-coordinates, interpreted as vectors of length R,
or scalars if R=1. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
References
----------
.. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation", 1970.
"""
def __init__(self, xi, yi):
"""Construct an interpolator passing through the specified points
The polynomial passes through all the pairs (xi,yi). One may additionally
specify a number of derivatives at each point xi; this is done by
repeating the value xi and specifying the derivatives as successive
yi values.
Parameters
----------
xi : array-like, length N
known x-coordinates
yi : array-like, N by R
known y-coordinates, interpreted as vectors of length R,
or scalars if R=1. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
Examples
--------
To produce a polynomial that is zero at 0 and 1 and has
derivative 2 at 0, call
>>> KroghInterpolator([0,0,1],[0,2,0])
This constructs the quadratic 2*X**2-2*X. The derivative condition
is indicated by the repeated zero in the xi array; the corresponding
yi values are 0, the function value, and 2, the derivative value.
For another example, given xi, yi, and a derivative ypi for each
point, appropriate arrays can be constructed as:
>>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
>>> KroghInterpolator(xi_k, yi_k)
To produce a vector-valued polynomial, supply a higher-dimensional
array for yi:
>>> KroghInterpolator([0,1],[[2,3],[4,5]])
This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
"""
self.xi = np.asarray(xi)
self.yi = np.asarray(yi)
if len(self.yi.shape)==1:
self.vector_valued = False
self.yi = self.yi[:,np.newaxis]
elif len(self.yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n = len(xi)
self.n = n
nn, r = self.yi.shape
if nn!=n:
raise ValueError("%d x values provided and %d y values; must be equal" % (n, nn))
self.r = r
c = np.zeros((n+1,r))
c[0] = yi[0]
Vk = np.zeros((n,r))
for k in xrange(1,n):
s = 0
while s<=k and xi[k-s]==xi[k]:
s += 1
s -= 1
Vk[0] = yi[k]/float(factorial(s))
for i in xrange(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s==0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def __call__(self,x):
"""Evaluate the polynomial at the point x
Parameters
----------
x : scalar or array-like of length N
Returns
-------
y : scalar, array of length R, array of length N, or array of length N by R
If x is a scalar, returns either a vector or a scalar depending on
whether the interpolator is vector-valued or scalar-valued.
If x is a vector, returns a vector of values.
"""
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
pi = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w = x - self.xi[k-1]
pi = w*pi
p = p + np.multiply.outer(pi,self.c[k])
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def derivatives(self,x,der=None):
"""
Evaluate many derivatives of the polynomial at the point x
Produce an array of all derivative values at the point x.
Parameters
----------
x : scalar or array_like of length N
Point or points at which to evaluate the derivatives
der : None or integer
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points). This number includes the function value as 0th
derivative.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be der by N by R. If x is a scalar,
the middle dimension will be dropped; if R is 1 then the
last dimension will be dropped.
Examples
--------
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
array([1.0,2.0,3.0])
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
array([[1.0,1.0],
[2.0,2.0],
[3.0,3.0]])
"""
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
r = self.r
if der is None:
der = self.n
dern = min(self.n,der)
pi = np.zeros((n,m))
w = np.zeros((n,m))
pi[0] = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1]*pi[k-1]
p += np.multiply.outer(pi[k],self.c[k])
cn = np.zeros((max(der,n+1),m,r))
cn[:n+1,...] += self.c[:n+1,np.newaxis,:]
cn[0] = p
for k in xrange(1,n):
for i in xrange(1,n-k+1):
pi[i] = w[k+i-1]*pi[i-1]+pi[i]
cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
cn[k]*=factorial(k)
cn[n,...] = 0
if not self.vector_valued:
if scalar:
return cn[:der,0,0]
else:
return cn[:der,:,0]
else:
if scalar:
return cn[:der,0]
else:
return cn[:der]
def derivative(self,x,der):
"""
Evaluate one derivative of the polynomial at the point x
Parameters
----------
x : scalar or array_like of length N
Point or points at which to evaluate the derivatives
der : None or integer
Which derivative to extract. This number includes the
function value as 0th derivative.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be N by R. If x is a scalar,
the middle dimension will be dropped; if R is 1 then the
last dimension will be dropped.
Notes
-----
This is computed by evaluating all derivatives up to the desired
one (using self.derivatives()) and then discarding the rest.
"""
return self.derivatives(x,der=der+1)[der]
def krogh_interpolate(xi,yi,x,der=0):
"""
Convenience function for polynomial interpolation.
Constructs a polynomial that passes through a given set of points,
optionally with specified derivatives at those points.
Evaluates the polynomial or some of its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on Krogh 1970, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation"
The polynomial passes through all the pairs (xi,yi). One may additionally
specify a number of derivatives at each point xi; this is done by
repeating the value xi and specifying the derivatives as successive
yi values.
Parameters
----------
xi : array_like, length N
known x-coordinates
yi : array_like, N by R
known y-coordinates, interpreted as vectors of length R,
or scalars if R=1
x : scalar or array_like of length N
Point or points at which to evaluate the derivatives
der : integer or list
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be the number of derivatives by N by R.
If x is a scalar, the middle dimension will be dropped; if
the yi are scalars then the last dimension will be dropped.
Notes
-----
Construction of the interpolating polynomial is a relatively expensive
process. If you want to evaluate it repeatedly consider using the class
KroghInterpolator (which is what this function uses).
"""
P = KroghInterpolator(xi, yi)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
"""
Estimate the Taylor polynomial of f at x by polynomial fitting.
Parameters
----------
f : callable
The function whose Taylor polynomial is sought. Should accept
a vector of x values.
x : scalar
The point at which the polynomial is to be evaluated.
degree : int
The degree of the Taylor polynomial
scale : scalar
The width of the interval to use to evaluate the Taylor polynomial.
Function values spread over a range this wide are used to fit the
polynomial. Must be chosen carefully.
order : int or None
The order of the polynomial to be used in the fitting; f will be
evaluated ``order+1`` times. If None, use `degree`.
Returns
-------
p : poly1d instance
The Taylor polynomial (translated to the origin, so that
for example p(0)=f(x)).
Notes
-----
The appropriate choice of "scale" is a trade-off; too large and the
function differs from its Taylor polynomial too much to get a good
answer, too small and round-off errors overwhelm the higher-order terms.
The algorithm used becomes numerically unstable around order 30 even
under ideal circumstances.
Choosing order somewhat larger than degree may improve the higher-order
terms.
"""
if order is None:
order=degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n%1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(object):
"""The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points.
Allows evaluation of the polynomial, efficient changing of the y
values to be interpolated, and updating by adding more x values.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial.
This class uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
"""
def __init__(self, xi, yi=None):
"""Construct an object capable of interpolating functions sampled at xi
The values yi need to be provided before the function is evaluated,
but none of the preprocessing depends on them, so rapid updates
are possible.
Parameters
----------
xi : array-like of length N
The x coordinates of the points the polynomial should pass through
yi : array-like N by R or None
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued. If None the y values
will be supplied later.
"""
self.n = len(xi)
self.xi = np.asarray(xi)
if yi is not None and len(yi)!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.set_yi(yi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in xrange(1,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def set_yi(self, yi):
"""
Update the y values to be interpolated
The barycentric interpolation algorithm requires the calculation
of weights, but these depend only on the xi. The yi can be changed
at any time.
Parameters
----------
yi : array_like N by R
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued. If None the y values
will be supplied later.
"""
if yi is None:
self.yi = None
return
yi = np.asarray(yi)
if len(yi.shape)==1:
self.vector_valued = False
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n, r = yi.shape
if n!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.yi = yi
self.r = r
def add_xi(self, xi, yi=None):
"""
Add more x values to the set to be interpolated
The barycentric interpolation algorithm allows easy updating by
adding more points for the polynomial to pass through.
Parameters
----------
xi : array_like of length N1
The x coordinates of the points the polynomial should pass through
yi : array_like N1 by R or None
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued. If None the y values
will be supplied later. The yi should be specified if and only if
the interpolator has y values specified.
"""
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = np.asarray(yi)
if len(yi.shape)==1:
if self.vector_valued:
raise ValueError("Cannot extend dimension %d y vectors with scalars" % self.r)
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
n, r = yi.shape
if r!=self.r:
raise ValueError("Cannot extend dimension %d y vectors with dimension %d y vectors" % (self.r, r))
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi**=-1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in xrange(old_n,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def __call__(self, x):
"""Evaluate the interpolating polynomial at the points x
Parameters
----------
x : scalar or array-like of length M
Returns
-------
y : scalar or array-like of length R or length M or M by R
The shape of y depends on the shape of x and whether the
interpolator is vector-valued or scalar-valued.
Notes
-----
Currently the code computes an outer product between x and the
weights, that is, it constructs an intermediate array of size
N by M, where N is the degree of the polynomial.
"""
scalar = _isscalar(x)
x = np.atleast_1d(x)
c = np.subtract.outer(x,self.xi)
z = c==0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[:,np.newaxis]
i, j = np.nonzero(z)
p[i] = self.yi[j]
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def barycentric_interpolate(xi, yi, x):
"""
Convenience function for polynomial interpolation
Constructs a polynomial that passes through a given set of points,
then evaluates the polynomial. For reasons of numerical stability,
this function does not compute the coefficients of the polynomial.
This function uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
Parameters
----------
xi : array_like of length N
The x coordinates of the points the polynomial should pass through
yi : array_like N by R
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued.
x : scalar or array_like of length M
Returns
-------
y : scalar or array_like of length R or length M or M by R
The shape of y depends on the shape of x and whether the
interpolator is vector-valued or scalar-valued.
Notes
-----
Construction of the interpolation weights is a relatively slow process.
If you want to call this many times with the same xi (but possibly
varying yi or x) you should use the class BarycentricInterpolator.
This is what this function uses internally.
"""
return BarycentricInterpolator(xi, yi)(x)
class PiecewisePolynomial(object):
"""Piecewise polynomial curve specified by points and derivatives
This class represents a curve that is a piecewise polynomial. It
passes through a list of points and has specified derivatives at
each point. The degree of the polynomial may very from segment to
segment, as may the number of derivatives available. The degree
should not exceed about thirty.
Appending points to the end of the curve is efficient.
"""
def __init__(self, xi, yi, orders=None, direction=None):
"""Construct a piecewise polynomial
Parameters
----------
xi : array-like of length N
a sorted list of x-coordinates
yi : list of lists of length N
yi[i] is the list of derivatives known at xi[i]
orders : list of integers, or integer
a list of polynomial orders, or a single universal order
direction : {None, 1, -1}
indicates whether the xi are increasing or decreasing
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two xi
Notes
-----
If orders is None, or orders[i] is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If orders[i] is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
"""
yi0 = np.asarray(yi[0])
if len(yi0.shape)==2:
self.vector_valued = True
self.r = yi0.shape[1]
elif len(yi0.shape)==1:
self.vector_valued = False
self.r = 1
else:
raise ValueError("Each derivative must be a vector, not a higher-rank array")
self.xi = [xi[0]]
self.yi = [yi0]
self.n = 1
self.direction = direction
self.orders = []
self.polynomials = []
self.extend(xi[1:],yi[1:],orders)
def _make_polynomial(self,x1,y1,x2,y2,order,direction):
"""Construct the interpolating polynomial object
Deduces the number of derivatives to match at each end
from order and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
"""
n = order+1
n1 = min(n//2,len(y1))
n2 = min(n-n1,len(y2))
n1 = min(n-n2,len(y1))
if n1+n2!=n:
raise ValueError("Point %g has %d derivatives, point %g has %d derivatives, but order %d requested" % (x1, len(y1), x2, len(y2), order))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with length y1 or y2.")
xi = np.zeros(n)
if self.vector_valued:
yi = np.zeros((n,self.r))
else:
yi = np.zeros((n,))
xi[:n1] = x1
yi[:n1] = y1[:n1]
xi[n1:] = x2
yi[n1:] = y2[:n2]
return KroghInterpolator(xi,yi)
def append(self, xi, yi, order=None):
"""
Append a single point with derivatives to the PiecewisePolynomial
Parameters
----------
xi : float
yi : array_like
yi is the list of derivatives known at xi
order : integer or None
a polynomial order, or instructions to use the highest
possible order
"""
yi = np.asarray(yi)
if self.vector_valued:
if (len(yi.shape)!=2 or yi.shape[1]!=self.r):
raise ValueError("Each derivative must be a vector of length %d" % self.r)
else:
if len(yi.shape)!=1:
raise ValueError("Each derivative must be a scalar")
if self.direction is None:
self.direction = np.sign(xi-self.xi[-1])
elif (xi-self.xi[-1])*self.direction < 0:
raise ValueError("x coordinates must be in the %d direction: %s" % (self.direction, self.xi))
self.xi.append(xi)
self.yi.append(yi)
if order is None:
n1 = len(self.yi[-2])
n2 = len(self.yi[-1])
n = n1+n2
order = n-1
self.orders.append(order)
self.polynomials.append(self._make_polynomial(
self.xi[-2], self.yi[-2],
self.xi[-1], self.yi[-1],
order, self.direction))
self.n += 1
def extend(self, xi, yi, orders=None):
"""
Extend the PiecewisePolynomial by a list of points
Parameters
----------
xi : array_like of length N1
a sorted list of x-coordinates
yi : list of lists of length N1
yi[i] is the list of derivatives known at xi[i]
orders : list of integers, or integer
a list of polynomial orders, or a single universal order
direction : {None, 1, -1}
indicates whether the xi are increasing or decreasing
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two xi
"""
for i in xrange(len(xi)):
if orders is None or _isscalar(orders):
self.append(xi[i],yi[i],orders)
else:
self.append(xi[i],yi[i],orders[i])
def __call__(self, x):
"""Evaluate the piecewise polynomial
Parameters
----------
x : scalar or array-like of length N
Returns
-------
y : scalar or array-like of length R or length N or N by R
"""
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos](x)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((m,self.r))
else:
y = np.zeros(m)
for i in xrange(self.n-1):
c = pos==i
y[c] = self.polynomials[i](x[c])
return y
def derivative(self, x, der):
"""
Evaluate a derivative of the piecewise polynomial
Parameters
----------
x : scalar or array_like of length N
der : integer
which single derivative to extract
Returns
-------
y : scalar or array_like of length R or length N or N by R
Notes
-----
This currently computes (using self.derivatives()) all derivatives
of the curve segment containing each x but returns only one.
"""
return self.derivatives(x,der=der+1)[der]
|
Parameters
----------
x : scalar or array_like of length N
der : integer
how many derivatives (including the function value as
0th derivative) to extract
Returns
-------
y : array_like of shape der by R or der by N or der by N by R
"""
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos].derivatives(x,der=der)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((der,m,self.r))
else:
y = np.zeros((der,m))
for i in xrange(self.n-1):
c = pos==i
y[:,c] = self.polynomials[i].derivatives(x[c],der=der)
return y
def piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0):
"""
Convenience function for piecewise polynomial interpolation
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : list of lists
yi[i] is the list of derivatives known at xi[i]. Of length N.
x : scalar or array_like
Of length M.
orders : int or list of ints
a list of polynomial orders, or a single universal order
der : int
Which single derivative to extract.
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
Notes
-----
If orders is None, or orders[i] is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If orders[i] is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
Construction of these piecewise polynomials can be an expensive process;
if you repeatedly evaluate the same polynomial, consider using the class
PiecewisePolynomial (which is what this function does).
"""
P = PiecewisePolynomial(xi, yi, orders)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def _isscalar(x):
"""Check whether x is if a scalar type, or 0-dim"""
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
def _edge_case(m0, d1):
return np.where((d1==0) | (m0==0), 0.0, 1.0/(1.0/m0+1.0/d1))
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
smk = np.sign(mk)
condition = ((smk[1:] != smk[:-1]) | (mk[1:]==0) | (mk[:-1]==0))
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1])
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0/whmean[~condition]
# For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless
# one of d_1 or m_0 is 0, then choose d_0 = 0
dk[0] = _edge_case(mk[0],dk[1])
dk[-1] = _edge_case(mk[-1],dk[-2])
return dk
def pchip(x, y):
"""PCHIP 1-d monotonic cubic interpolation
x and y are arrays of values used to approximate some function f, with
``y = f(x)``. This class factory function returns a callable class whose
``__call__`` method uses monotonic cubic, interpolation to find the value
of new points.
Parameters
----------
x : array
A 1D array of monotonically increasing real values. x cannot
include duplicate values (otherwise f is overspecified)
y : array
A 1-D array of real values. y's length along the interpolation
axis must be equal to the length of x.
Assumes x is sorted in monotonic order (e.g. ``x[1] > x[0]``).
Returns
-------
pchip : PiecewisePolynomial instance
The result of the interpolation.
"""
derivs = _find_derivatives(x,y)
return PiecewisePolynomial(x, list(zip(y, derivs)), orders=3, direction=None) | def derivatives(self, x, der):
"""
Evaluate a derivative of the piecewise polynomial |
trace_test.go | package stackdriver
import (
"bytes"
"encoding/json"
"reflect"
"testing"
"github.com/kr/pretty"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
)
func TestTrace(t *testing.T) {
var out bytes.Buffer
logger := logrus.New()
logger.Out = &out
logger.Formatter = NewFormatter(
WithService("test"),
WithVersion("0.1"),
)
logger.WithField(KeyTrace, "my-trace").WithField(KeySpanID, "my-span").Info("my log entry")
var got map[string]interface{}
json.Unmarshal(out.Bytes(), &got)
want := map[string]interface{}{
"severity": "INFO",
"message": "my log entry",
"context": map[string]interface{}{},
"serviceContext": map[string]interface{}{
"service": "test",
"version": "0.1",
},
"logging.googleapis.com/trace": "my-trace",
"logging.googleapis.com/spanId": "my-span",
}
require.True(t, reflect.DeepEqual(got, want), "unexpected output = %# v; \n want = %# v; \n diff: %# v", pretty.Formatter(got), pretty.Formatter(want), pretty.Diff(got, want))
}
func TestTraceWithProjectID(t *testing.T) | {
var out bytes.Buffer
logger := logrus.New()
logger.Out = &out
logger.Formatter = NewFormatter(
WithService("test"),
WithVersion("0.1"),
WithProjectID("my-project"),
)
logger.WithField(KeyTrace, "my-trace").WithField(KeySpanID, "my-span").Info("my log entry")
var got map[string]interface{}
json.Unmarshal(out.Bytes(), &got)
want := map[string]interface{}{
"severity": "INFO",
"message": "my log entry",
"context": map[string]interface{}{},
"serviceContext": map[string]interface{}{
"service": "test",
"version": "0.1",
},
"trace_id": "my-trace",
"logging.googleapis.com/trace": "projects/my-project/traces/my-trace",
"logging.googleapis.com/spanId": "my-span",
}
require.True(t, reflect.DeepEqual(got, want), "unexpected output = %# v; \n want = %# v; \n diff: %# v", pretty.Formatter(got), pretty.Formatter(want), pretty.Diff(got, want))
} |
|
ssh_deploy.py | # -*- coding: utf-8 -*-
import logging
import os
import re
import time
from collections import OrderedDict
LOG = logging.getLogger(__name__)
def md5sum_command(directory='.', find_type='f', match='', not_match=''):
return ' '.join([i for i in [
'find', directory,
('-type %s' % find_type) if find_type else '',
'-regextype posix-extended' if match or not_match else '',
('-regex %s' % match) if match else '',
('! -regex "%s"' % not_match) if not_match else '',
"""-print0 | xargs -0 md5sum | awk '{printf "%-50s %s\\n", $2, $1}' | sort"""
] if i])
def check_sum(chain, local_path, remote_path, *md5sum_args, **md5sum_kwargs):
title = re.sub('[^a-zA-Z0-9]', '-', local_path) + '.' + time.strftime('%Y%m%d-%H%I%S')
cmd_md5sum = md5sum_command(*md5sum_args, **md5sum_kwargs)
# ---------- get md5sum ----------
# locally
command = 'cd ' + local_path + '; ' + cmd_md5sum
LOG.info('local command: %s', command)
content = os.popen(command).read()
with open('/tmp/%s.a.txt' % title, 'w') as _file:
_file.write(content)
local_sums = OrderedDict((_file, _sum) for _file, _sum in [line.split() for line in content.splitlines()])
# remotely
command = 'cd ' + remote_path + '; ' + cmd_md5sum
LOG.info('remote command: %s', command)
code, out, err = chain.execute('cd ' + remote_path + '; ' + cmd_md5sum, buff_size=1024000)
out = out.decode('utf-8')
with open('/tmp/%s.b.txt' % title, 'w') as _file:
_file.write(out)
remote_sums = OrderedDict((_file, _sum) for _file, _sum in [line.split() for line in out.splitlines()])
# ---------- compare result ----------
LOG.info('*' * 50)
LOG.info('')
is_synced = True
for _file in local_sums:
if _file not in remote_sums:
is_synced = False
LOG.info(u'🐈 [LOCAL] ' + _file)
continue
if local_sums[_file] != remote_sums[_file]:
is_synced = False
LOG.info(u'🐍 [DIFF] ' + _file)
continue
# LOG.info('[SAME] ' + _file + ' ignore it')
for _file in remote_sums:
if _file not in local_sums:
is_synced = False
LOG.info(u'🐦 [REMOTE] ' + _file)
if is_synced:
LOG.info(u'㊗️ ㊗️ ㊗️ Perfect!!! ㊗️ ㊗️ ㊗️'.center(44))
LOG.info('')
LOG.info('*' * 50)
def sftp_download(chain, files_will_transferred):
for remote_path, local_path in files_will_transferred:
try:
chain.use().download(remote_path, local_path)
except Exception as error:
LOG.warning(error)
def download_files(chain, local_path, remote_path, files=None):
# download specified files
if not files:
LOG.debug('Download, but no file specified, over!')
return
move_tasks = [(os.path.join(remote_path, path), os.path.join(local_path, path)) for path in files]
sftp_download(chain, move_tasks)
def sftp_upload(chain, files_will_transferred):
""" SFTP upload
Args:
chain: object of SSHChain
files_will_transferred: list[tuple]
"""
LOG.info(files_will_transferred)
for local_path, remote_path in files_will_transferred:
chain.use().upload(local_path, remote_path)
def upload_files(chain, local_path, remote_path, files=None, ignore_patterns=None):
"""Upload local files or directory, can ignore some files by pattern
Args:
chain:
local_path:
remote_path:
files:
ignore_patterns:
"""
files = files or []
ignore_patterns = ignore_patterns or []
re_ignore = re.compile('(%s)' % (')|('.join(ignore_patterns))) if ignore_patterns else ''
move_tasks = []
for path in files:
fullpath = os.path.join(local_path, path)
if not os.path.exists(fullpath):
LOG.error('The file need uploaded not found: %s', fullpath)
exit()
if os.path.isfile(fullpath):
move_tasks.append((fullpath, os.path.join(remote_path, path)))
continue
assert os.path.isdir(fullpath)
for root, dirs, _files in os.walk(fullpath):
for _file in _files:
_fullpath = os.path.join(root, _file)
if re_ignore and re_ignore.search(_fullpath):
continue
relpath = os.path.relpath(_fullpath, local_path)
move_tasks.append((_fullpath, os.path.join(remote_path, relpath)))
sftp_upload(chain, move_tasks)
def file_sync(chain, local_path, remote_path,
files_upload=None, ignore_patterns=None, # upload arguments
files_download=None): # download arguments
if files_download:
download_files(chain, local_path, remote_path, files_download)
if files_upload:
upload_files(chain, local_path, remote_path, files_upload, ignore_patterns)
ACTIONS = 'check', 'sync', 'all',
def main(chain, local_path, remote_path, action='check',
files_upload=None, ignore_patterns=None, files_download=None,
*md5sum_args, **md5sum_kwargs):
"""
Args:
chain: object of SSHChain
local_path: str, absolute path
remote_path: str, absolute path
action: str
files_upload: list of files to upload
ignore_patterns
files_download: list of files to download
md5sum_args:
md5sum_kwargs: like: directory='.', find_type='f', match='', not_match=''
"""
if action not in ACTIONS:
return
def _file_sync():
file_sync(chain, local_path, remote_path, files_upload, ignore_patterns, files_download)
def _check_sum():
check_sum(chain, local_path, remote_path, *md5sum_args, **md5sum_kwargs)
if action == 'sync':
_file_sync()
return
if action == 'check':
_check_sum()
return
| ()
| _file_sync()
_check_sum |
codigomate.js | import React from 'react';
import Layout from '../../layout/Layout';
import SetCrumbs from '../../config/SetCrumbs';
const Codigomate = () => {
return (
<Layout> | <SetCrumbs second="Acerca" third="Codigo Mate" />
Codigo Mate
</Layout>
)
}
export default Codigomate; | |
__init__.py | #!/usr/bin/env python3
from flask import Flask, render_template
import flask_site.model as model
def create_app(test_config=None):
"""Create and configure the app.
Parameters
----------
test_config - Defaults to None, but can be used to set
up config for testing.
Returns
-------
Returns the app.
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev'
)
if test_config is None:
# Load the instance config, if it exists, when not testing.
app.config.from_pyfile('config.py', silent=True)
else:
#Load the test config if passed in.
app.config.from_mapping(test_config)
@app.route('/')
def index():
return 'Index Page'
@app.route('/hello')
def hello():
return 'Hello, World'
@app.route('/webdev')
def | ():
mod = model.Model()
real = mod.get_tweet()
fake = mod.get_fake()
return render_template('index.html', real=real, fake=fake)
return app
| webdev |
reward.go | package executor
import (
"bytes"
"github.com/PhenixChain/devchain/types"
pt "github.com/PhenixChain/devchain/plugin/dapp/paracross/types"
)
// reward 挖矿奖励, 主要处理挖矿分配逻辑,先实现基本策略,后面根据需求进行重构
func (a *action) reward(nodeStatus *pt.ParacrossNodeStatus, stat *pt.ParacrossHeightStatus) (*types.Receipt, error) {
//获取挖矿相关配置,这里需注意是共识的高度,而不是交易的高度
coinReward := types.MGInt("mver.consensus.paracross.coinReward", nodeStatus.Height) * types.Coin
fundReward := types.MGInt("mver.consensus.paracross.coinDevFund", nodeStatus.Height) * types.Coin
fundAddr := types.MGStr("mver.consensus.fundKeyAddr", nodeStatus.Height)
minerAddrs := getMiners(stat.Details, nodeStatus.BlockHash)
//分配给矿工的单位奖励
minerUnit := coinReward / int64(len(minerAddrs))
receipt := &types.Receipt{Ty: types.ExecOk}
if minerUnit > 0 {
//如果不等分转到发展基金
fundReward += coinReward % minerUnit
for _, addr := range minerAddrs {
rep, err := a.coinsAccount.ExecDeposit(addr, a.execaddr, minerUnit)
if err != nil {
clog.Error("paracross miner reward deposit err", "height", nodeStatus.Height,
"execAddr", a.execaddr, "minerAddr", addr, "amount", minerUnit, "err", err)
return nil, err
}
receipt = mergeReceipt(receipt, rep)
}
}
if fundReward > 0 {
rep, err := a.coinsAccount.ExecDeposit(fundAddr, a.execaddr, fundReward)
if err != nil {
clog.Error("paracross fund reward deposit err", "height", nodeStatus.Height,
"execAddr", a.execaddr, "fundAddr", fundAddr, "amount", fundReward, "err", err)
return nil, err
}
receipt = mergeReceipt(receipt, rep)
}
return receipt, nil
}
// getMiners 获取提交共识消息的矿工地址
func getMiners(detail *pt.ParacrossStatusDetails, blockHash []byte) []string {
addrs := make([]string, 0)
for i, hash := range detail.BlockHash {
if bytes.Equal(hash, blockHash) {
addrs = app | , detail.Addrs[i])
}
}
return addrs
}
//
func mergeReceipt(receipt1, receipt2 *types.Receipt) *types.Receipt {
if receipt2 != nil {
receipt1.KV = append(receipt1.KV, receipt2.KV...)
receipt1.Logs = append(receipt1.Logs, receipt2.Logs...)
}
return receipt1
}
| end(addrs |
schema.go | package openapi3
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"math"
"math/big"
"regexp"
"strconv"
"unicode/utf16"
"github.com/calmisland/go-openapi/jsoninfo"
)
var (
// SchemaErrorDetailsDisabled disables printing of details about schema errors.
SchemaErrorDetailsDisabled = false
errSchema = errors.New("Input does not match the schema")
ErrSchemaInputNaN = errors.New("NaN is not allowed")
ErrSchemaInputInf = errors.New("Inf is not allowed")
)
// Float64Ptr is a helper for defining OpenAPI schemas.
func Float64Ptr(value float64) *float64 {
return &value
}
// BoolPtr is a helper for defining OpenAPI schemas.
func BoolPtr(value bool) *bool {
return &value
}
// Int64Ptr is a helper for defining OpenAPI schemas.
func Int64Ptr(value int64) *int64 {
return &value
}
// Uint64Ptr is a helper for defining OpenAPI schemas.
func Uint64Ptr(value uint64) *uint64 {
return &value
}
// Schema is specified by OpenAPI/Swagger 3.0 standard.
type Schema struct {
ExtensionProps
OneOf []*SchemaRef `json:"oneOf,omitempty" yaml:"oneOf,omitempty"`
AnyOf []*SchemaRef `json:"anyOf,omitempty" yaml:"anyOf,omitempty"`
AllOf []*SchemaRef `json:"allOf,omitempty" yaml:"allOf,omitempty"`
Not *SchemaRef `json:"not,omitempty" yaml:"not,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
Title string `json:"title,omitempty" yaml:"title,omitempty"`
Format string `json:"format,omitempty" yaml:"format,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
Enum []interface{} `json:"enum,omitempty" yaml:"enum,omitempty"`
Default interface{} `json:"default,omitempty" yaml:"default,omitempty"`
Example interface{} `json:"example,omitempty" yaml:"example,omitempty"`
ExternalDocs interface{} `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"`
// Object-related, here for struct compactness
AdditionalPropertiesAllowed *bool `json:"-" multijson:"additionalProperties,omitempty" yaml:"-"`
// Array-related, here for struct compactness
UniqueItems bool `json:"uniqueItems,omitempty" yaml:"uniqueItems,omitempty"`
// Number-related, here for struct compactness
ExclusiveMin bool `json:"exclusiveMinimum,omitempty" yaml:"exclusiveMinimum,omitempty"`
ExclusiveMax bool `json:"exclusiveMaximum,omitempty" yaml:"exclusiveMaximum,omitempty"`
// Properties
Nullable bool `json:"nullable,omitempty" yaml:"nullable,omitempty"`
ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"`
WriteOnly bool `json:"writeOnly,omitempty" yaml:"writeOnly,omitempty"`
XML interface{} `json:"xml,omitempty" yaml:"xml,omitempty"`
// Number
Min *float64 `json:"minimum,omitempty" yaml:"minimum,omitempty"`
Max *float64 `json:"maximum,omitempty" yaml:"maximum,omitempty"`
MultipleOf *float64 `json:"multipleOf,omitempty" yaml:"multipleOf,omitempty"`
// String
MinLength uint64 `json:"minLength,omitempty" yaml:"minLength,omitempty"`
MaxLength *uint64 `json:"maxLength,omitempty" yaml:"maxLength,omitempty"`
Pattern string `json:"pattern,omitempty" yaml:"pattern,omitempty"`
compiledPattern *compiledPattern
// Array
MinItems uint64 `json:"minItems,omitempty" yaml:"minItems,omitempty"`
MaxItems *uint64 `json:"maxItems,omitempty" yaml:"maxItems,omitempty"`
Items *SchemaRef `json:"items,omitempty" yaml:"items,omitempty"`
// Object
Required []string `json:"required,omitempty" yaml:"required,omitempty"`
Properties map[string]*SchemaRef `json:"properties,omitempty" yaml:"properties,omitempty"`
MinProps uint64 `json:"minProperties,omitempty" yaml:"minProperties,omitempty"`
MaxProps *uint64 `json:"maxProperties,omitempty" yaml:"maxProperties,omitempty"`
AdditionalProperties *SchemaRef `json:"-" multijson:"additionalProperties,omitempty" yaml:"-"`
Discriminator *Discriminator `json:"discriminator,omitempty" yaml:"discriminator,omitempty"`
PatternProperties string `json:"patternProperties,omitempty" yaml:"patternProperties,omitempty"`
compiledPatternProperties *compiledPattern
}
func NewSchema() *Schema {
return &Schema{}
}
func (schema *Schema) MarshalJSON() ([]byte, error) {
return jsoninfo.MarshalStrictStruct(schema)
}
func (schema *Schema) UnmarshalJSON(data []byte) error {
return jsoninfo.UnmarshalStrictStruct(data, schema)
}
func (schema *Schema) NewRef() *SchemaRef {
return &SchemaRef{ |
func NewOneOfSchema(schemas ...*Schema) *Schema {
refs := make([]*SchemaRef, len(schemas))
for i, schema := range schemas {
refs[i] = &SchemaRef{Value: schema}
}
return &Schema{
OneOf: refs,
}
}
func NewAnyOfSchema(schemas ...*Schema) *Schema {
refs := make([]*SchemaRef, len(schemas))
for i, schema := range schemas {
refs[i] = &SchemaRef{Value: schema}
}
return &Schema{
AnyOf: refs,
}
}
func NewAllOfSchema(schemas ...*Schema) *Schema {
refs := make([]*SchemaRef, len(schemas))
for i, schema := range schemas {
refs[i] = &SchemaRef{Value: schema}
}
return &Schema{
AllOf: refs,
}
}
func NewBoolSchema() *Schema {
return &Schema{
Type: "boolean",
}
}
func NewFloat64Schema() *Schema {
return &Schema{
Type: "number",
}
}
func NewIntegerSchema() *Schema {
return &Schema{
Type: "integer",
}
}
func NewInt32Schema() *Schema {
return &Schema{
Type: "integer",
Format: "int32",
}
}
func NewInt64Schema() *Schema {
return &Schema{
Type: "integer",
Format: "int64",
}
}
func NewStringSchema() *Schema {
return &Schema{
Type: "string",
}
}
func NewDateTimeSchema() *Schema {
return &Schema{
Type: "string",
Format: "date-time",
}
}
func NewUuidSchema() *Schema {
return &Schema{
Type: "string",
Format: "uuid",
}
}
func NewBytesSchema() *Schema {
return &Schema{
Type: "string",
Format: "byte",
}
}
func NewArraySchema() *Schema {
return &Schema{
Type: "array",
}
}
func NewObjectSchema() *Schema {
return &Schema{
Type: "object",
Properties: make(map[string]*SchemaRef),
}
}
type compiledPattern struct {
Regexp *regexp.Regexp
ErrReason string
}
func (schema *Schema) WithNullable() *Schema {
schema.Nullable = true
return schema
}
func (schema *Schema) WithMin(value float64) *Schema {
schema.Min = &value
return schema
}
func (schema *Schema) WithMax(value float64) *Schema {
schema.Max = &value
return schema
}
func (schema *Schema) WithExclusiveMin(value bool) *Schema {
schema.ExclusiveMin = value
return schema
}
func (schema *Schema) WithExclusiveMax(value bool) *Schema {
schema.ExclusiveMax = value
return schema
}
func (schema *Schema) WithEnum(values ...interface{}) *Schema {
schema.Enum = values
return schema
}
func (schema *Schema) WithFormat(value string) *Schema {
schema.Format = value
return schema
}
func (schema *Schema) WithLength(i int64) *Schema {
n := uint64(i)
schema.MinLength = n
schema.MaxLength = &n
return schema
}
func (schema *Schema) WithMinLength(i int64) *Schema {
n := uint64(i)
schema.MinLength = n
return schema
}
func (schema *Schema) WithMaxLength(i int64) *Schema {
n := uint64(i)
schema.MaxLength = &n
return schema
}
func (schema *Schema) WithLengthDecodedBase64(i int64) *Schema {
n := uint64(i)
v := (n*8 + 5) / 6
schema.MinLength = v
schema.MaxLength = &v
return schema
}
func (schema *Schema) WithMinLengthDecodedBase64(i int64) *Schema {
n := uint64(i)
schema.MinLength = (n*8 + 5) / 6
return schema
}
func (schema *Schema) WithMaxLengthDecodedBase64(i int64) *Schema {
n := uint64(i)
schema.MinLength = (n*8 + 5) / 6
return schema
}
func (schema *Schema) WithPattern(pattern string) *Schema {
schema.Pattern = pattern
return schema
}
func (schema *Schema) WithItems(value *Schema) *Schema {
schema.Items = &SchemaRef{
Value: value,
}
return schema
}
func (schema *Schema) WithMinItems(i int64) *Schema {
n := uint64(i)
schema.MinItems = n
return schema
}
func (schema *Schema) WithMaxItems(i int64) *Schema {
n := uint64(i)
schema.MaxItems = &n
return schema
}
func (schema *Schema) WithUniqueItems(unique bool) *Schema {
schema.UniqueItems = unique
return schema
}
func (schema *Schema) WithProperty(name string, propertySchema *Schema) *Schema {
return schema.WithPropertyRef(name, &SchemaRef{
Value: propertySchema,
})
}
func (schema *Schema) WithPropertyRef(name string, ref *SchemaRef) *Schema {
properties := schema.Properties
if properties == nil {
properties = make(map[string]*SchemaRef)
schema.Properties = properties
}
properties[name] = ref
return schema
}
func (schema *Schema) WithProperties(properties map[string]*Schema) *Schema {
result := make(map[string]*SchemaRef, len(properties))
for k, v := range properties {
result[k] = &SchemaRef{
Value: v,
}
}
schema.Properties = result
return schema
}
func (schema *Schema) WithMinProperties(i int64) *Schema {
n := uint64(i)
schema.MinProps = n
return schema
}
func (schema *Schema) WithMaxProperties(i int64) *Schema {
n := uint64(i)
schema.MaxProps = &n
return schema
}
func (schema *Schema) WithAnyAdditionalProperties() *Schema {
schema.AdditionalProperties = nil
t := true
schema.AdditionalPropertiesAllowed = &t
return schema
}
func (schema *Schema) WithAdditionalProperties(v *Schema) *Schema {
if v == nil {
schema.AdditionalProperties = nil
} else {
schema.AdditionalProperties = &SchemaRef{
Value: v,
}
}
return schema
}
func (schema *Schema) IsEmpty() bool {
if schema.Type != "" || schema.Format != "" || len(schema.Enum) != 0 ||
schema.UniqueItems || schema.ExclusiveMin || schema.ExclusiveMax ||
!schema.Nullable ||
schema.Min != nil || schema.Max != nil || schema.MultipleOf != nil ||
schema.MinLength != 0 || schema.MaxLength != nil || schema.Pattern != "" ||
schema.MinItems != 0 || schema.MaxItems != nil ||
len(schema.Required) != 0 ||
schema.MinProps != 0 || schema.MaxProps != nil {
return false
}
if n := schema.Not; n != nil && !n.Value.IsEmpty() {
return false
}
if ap := schema.AdditionalProperties; ap != nil && !ap.Value.IsEmpty() {
return false
}
if apa := schema.AdditionalPropertiesAllowed; apa != nil && !*apa {
return false
}
if items := schema.Items; items != nil && !items.Value.IsEmpty() {
return false
}
for _, s := range schema.Properties {
if !s.Value.IsEmpty() {
return false
}
}
for _, s := range schema.OneOf {
if !s.Value.IsEmpty() {
return false
}
}
for _, s := range schema.AnyOf {
if !s.Value.IsEmpty() {
return false
}
}
for _, s := range schema.AllOf {
if !s.Value.IsEmpty() {
return false
}
}
return true
}
func (schema *Schema) Validate(c context.Context) error {
return schema.validate(c, make([]*Schema, 2))
}
func (schema *Schema) validate(c context.Context, stack []*Schema) (err error) {
for _, existing := range stack {
if existing == schema {
return
}
}
stack = append(stack, schema)
for _, item := range schema.OneOf {
v := item.Value
if v == nil {
return foundUnresolvedRef(item.Ref)
}
if err = v.validate(c, stack); err == nil {
return
}
}
for _, item := range schema.AnyOf {
v := item.Value
if v == nil {
return foundUnresolvedRef(item.Ref)
}
if err = v.validate(c, stack); err != nil {
return
}
}
for _, item := range schema.AllOf {
v := item.Value
if v == nil {
return foundUnresolvedRef(item.Ref)
}
if err = v.validate(c, stack); err != nil {
return
}
}
if ref := schema.Not; ref != nil {
v := ref.Value
if v == nil {
return foundUnresolvedRef(ref.Ref)
}
if err = v.validate(c, stack); err != nil {
return
}
}
schemaType := schema.Type
switch schemaType {
case "boolean":
case "number":
if format := schema.Format; len(format) > 0 {
switch format {
case "float", "double":
default:
return unsupportedFormat(format)
}
}
case "integer":
if format := schema.Format; len(format) > 0 {
switch format {
case "int32", "int64":
default:
return unsupportedFormat(format)
}
}
case "string":
if format := schema.Format; len(format) > 0 {
switch format {
// Supported by OpenAPIv3.0.1:
case "byte", "binary", "date", "date-time", "password":
// In JSON Draft-07 (not validated yet though):
case "regex":
case "time", "email", "idn-email":
case "hostname", "idn-hostname", "ipv4", "ipv6":
case "uri", "uri-reference", "iri", "iri-reference", "uri-template":
case "json-pointer", "relative-json-pointer":
default:
// Try to check for custom defined formats
if _, ok := SchemaStringFormats[format]; !ok {
return unsupportedFormat(format)
}
}
}
case "array":
if schema.Items == nil {
return errors.New("When schema type is 'array', schema 'items' must be non-null")
}
case "object":
default:
return fmt.Errorf("Unsupported 'type' value '%s'", schemaType)
}
if ref := schema.Items; ref != nil {
v := ref.Value
if v == nil {
return foundUnresolvedRef(ref.Ref)
}
if err = v.validate(c, stack); err != nil {
return
}
}
for _, ref := range schema.Properties {
v := ref.Value
if v == nil {
return foundUnresolvedRef(ref.Ref)
}
if err = v.validate(c, stack); err != nil {
return
}
}
if ref := schema.AdditionalProperties; ref != nil {
v := ref.Value
if v == nil {
return foundUnresolvedRef(ref.Ref)
}
if err = v.validate(c, stack); err != nil {
return
}
}
return
}
func (schema *Schema) IsMatching(value interface{}) bool {
return schema.visitJSON(value, true) == nil
}
func (schema *Schema) IsMatchingJSONBoolean(value bool) bool {
return schema.visitJSON(value, true) == nil
}
func (schema *Schema) IsMatchingJSONNumber(value float64) bool {
return schema.visitJSON(value, true) == nil
}
func (schema *Schema) IsMatchingJSONString(value string) bool {
return schema.visitJSON(value, true) == nil
}
func (schema *Schema) IsMatchingJSONArray(value []interface{}) bool {
return schema.visitJSON(value, true) == nil
}
func (schema *Schema) IsMatchingJSONObject(value map[string]interface{}) bool {
return schema.visitJSON(value, true) == nil
}
func (schema *Schema) VisitJSON(value interface{}) error {
return schema.visitJSON(value, false)
}
func (schema *Schema) visitJSON(value interface{}, fast bool) (err error) {
switch value := value.(type) {
case float64:
if math.IsNaN(value) {
return ErrSchemaInputNaN
}
if math.IsInf(value, 0) {
return ErrSchemaInputInf
}
}
if schema.IsEmpty() {
return
}
if err = schema.visitSetOperations(value, fast); err != nil {
return
}
switch value := value.(type) {
case nil:
return schema.visitJSONNull(fast)
case bool:
return schema.visitJSONBoolean(value, fast)
case float64:
return schema.visitJSONNumber(value, fast)
case string:
return schema.visitJSONString(value, fast)
case []interface{}:
return schema.visitJSONArray(value, fast)
case map[string]interface{}:
return schema.visitJSONObject(value, fast)
default:
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "type",
Reason: fmt.Sprintf("Not a JSON value: %T", value),
}
}
}
func (schema *Schema) visitSetOperations(value interface{}, fast bool) (err error) {
if enum := schema.Enum; len(enum) != 0 {
for _, v := range enum {
if value == v {
return
}
}
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "enum",
Reason: "JSON value is not one of the allowed values",
}
}
if ref := schema.Not; ref != nil {
v := ref.Value
if v == nil {
return foundUnresolvedRef(ref.Ref)
}
if err := v.visitJSON(value, true); err == nil {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "not",
}
}
}
if v := schema.OneOf; len(v) > 0 {
ok := 0
for _, item := range v {
v := item.Value
if v == nil {
return foundUnresolvedRef(item.Ref)
}
if err := v.visitJSON(value, true); err == nil {
ok++
}
}
if ok != 1 {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "oneOf",
}
}
}
if v := schema.AnyOf; len(v) > 0 {
ok := false
for _, item := range v {
v := item.Value
if v == nil {
return foundUnresolvedRef(item.Ref)
}
if err := v.visitJSON(value, true); err == nil {
ok = true
break
}
}
if !ok {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "anyOf",
}
}
}
for _, item := range schema.AllOf {
v := item.Value
if v == nil {
return foundUnresolvedRef(item.Ref)
}
if err := v.visitJSON(value, false); err != nil {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "allOf",
Origin: err,
}
}
}
return
}
func (schema *Schema) visitJSONNull(fast bool) (err error) {
if schema.Nullable {
return
}
if fast {
return errSchema
}
return &SchemaError{
Value: nil,
Schema: schema,
SchemaField: "nullable",
Reason: "Value is not nullable",
}
}
func (schema *Schema) VisitJSONBoolean(value bool) error {
return schema.visitJSONBoolean(value, false)
}
func (schema *Schema) visitJSONBoolean(value bool, fast bool) (err error) {
if schemaType := schema.Type; schemaType != "" && schemaType != "boolean" {
return schema.expectedType("boolean", fast)
}
return
}
func (schema *Schema) VisitJSONNumber(value float64) error {
return schema.visitJSONNumber(value, false)
}
func (schema *Schema) visitJSONNumber(value float64, fast bool) (err error) {
schemaType := schema.Type
if schemaType == "integer" {
if bigFloat := big.NewFloat(value); !bigFloat.IsInt() {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "type",
Reason: "Value must be an integer",
}
}
} else if schemaType != "" && schemaType != "number" {
return schema.expectedType("number, integer", fast)
}
// "exclusiveMinimum"
if v := schema.ExclusiveMin; v && !(*schema.Min < value) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "exclusiveMinimum",
Reason: fmt.Sprintf("Number must be more than %g", *schema.Min),
}
}
// "exclusiveMaximum"
if v := schema.ExclusiveMax; v && !(*schema.Max > value) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "exclusiveMaximum",
Reason: fmt.Sprintf("Number must be less than %g", *schema.Max),
}
}
// "minimum"
if v := schema.Min; v != nil && !(*v <= value) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "minimum",
Reason: fmt.Sprintf("Number must be at least %g", *v),
}
}
// "maximum"
if v := schema.Max; v != nil && !(*v >= value) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "maximum",
Reason: fmt.Sprintf("Number must be most %g", *v),
}
}
// "multipleOf"
if v := schema.MultipleOf; v != nil {
// "A numeric instance is valid only if division by this keyword's
// value results in an integer."
if bigFloat := big.NewFloat(value / *v); !bigFloat.IsInt() {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "multipleOf",
}
}
}
return
}
func (schema *Schema) VisitJSONString(value string) error {
return schema.visitJSONString(value, false)
}
func (schema *Schema) visitJSONString(value string, fast bool) (err error) {
if schemaType := schema.Type; schemaType != "" && schemaType != "string" {
return schema.expectedType("string", fast)
}
// "minLength" and "maxLength"
minLength := schema.MinLength
maxLength := schema.MaxLength
if minLength != 0 || maxLength != nil {
// JSON schema string lengths are UTF-16, not UTF-8!
length := int64(0)
for _, r := range value {
if utf16.IsSurrogate(r) {
length += 2
} else {
length++
}
}
if minLength != 0 && length < int64(minLength) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "minLength",
Reason: fmt.Sprintf("Minimum string length is %d", minLength),
}
}
if maxLength != nil && length > int64(*maxLength) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "maxLength",
Reason: fmt.Sprintf("Maximum string length is %d", *maxLength),
}
}
}
// "format" and "pattern"
cp := schema.compiledPattern
if cp == nil {
pattern := schema.Pattern
if v := schema.Pattern; len(v) > 0 {
// Pattern
re, err := regexp.Compile(v)
if err != nil {
return fmt.Errorf("Error while compiling regular expression '%s': %v", pattern, err)
}
cp = &compiledPattern{
Regexp: re,
ErrReason: "JSON string doesn't match the regular expression '" + v + "'",
}
schema.compiledPattern = cp
} else if v := schema.Format; len(v) > 0 {
// No pattern, but does have a format
re := SchemaStringFormats[v]
if re != nil {
cp = &compiledPattern{
Regexp: re,
ErrReason: "JSON string doesn't match the format '" + v + " (regular expression `" + re.String() + "`)'",
}
schema.compiledPattern = cp
}
}
}
if cp != nil {
if !cp.Regexp.MatchString(value) {
field := "format"
if schema.Pattern != "" {
field = "pattern"
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: field,
Reason: cp.ErrReason,
}
}
}
return
}
func (schema *Schema) VisitJSONArray(value []interface{}) error {
return schema.visitJSONArray(value, false)
}
func (schema *Schema) visitJSONArray(value []interface{}, fast bool) (err error) {
if schemaType := schema.Type; schemaType != "" && schemaType != "array" {
return schema.expectedType("array", fast)
}
lenValue := int64(len(value))
// "minItems"
if v := schema.MinItems; v != 0 && lenValue < int64(v) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "minItems",
Reason: fmt.Sprintf("Minimum number of items is %d", v),
}
}
// "maxItems"
if v := schema.MaxItems; v != nil && lenValue > int64(*v) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "maxItems",
Reason: fmt.Sprintf("Maximum number of items is %d", *v),
}
}
// "uniqueItems"
if v := schema.UniqueItems; v && !isSliceOfUniqueItems(value) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "uniqueItems",
Reason: fmt.Sprintf("Duplicate items found"),
}
}
// "items"
if itemSchemaRef := schema.Items; itemSchemaRef != nil {
itemSchema := itemSchemaRef.Value
if itemSchema == nil {
return foundUnresolvedRef(itemSchemaRef.Ref)
}
for i, item := range value {
if err := itemSchema.VisitJSON(item); err != nil {
return markSchemaErrorIndex(err, i)
}
}
}
return
}
func (schema *Schema) VisitJSONObject(value map[string]interface{}) error {
return schema.visitJSONObject(value, false)
}
func (schema *Schema) visitJSONObject(value map[string]interface{}, fast bool) (err error) {
if schemaType := schema.Type; schemaType != "" && schemaType != "object" {
return schema.expectedType("object", fast)
}
// "properties"
properties := schema.Properties
lenValue := int64(len(value))
// "minProperties"
if v := schema.MinProps; v != 0 && lenValue < int64(v) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "minProperties",
Reason: fmt.Sprintf("There must be at least %d properties", v),
}
}
// "maxProperties"
if v := schema.MaxProps; v != nil && lenValue > int64(*v) {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "maxProperties",
Reason: fmt.Sprintf("There must be at most %d properties", *v),
}
}
// "patternProperties"
var cp *compiledPattern
patternProperties := schema.PatternProperties
if len(patternProperties) > 0 {
cp = schema.compiledPatternProperties
if cp == nil {
re, err := regexp.Compile(patternProperties)
if err != nil {
return fmt.Errorf("Error while compiling regular expression '%s': %v", patternProperties, err)
}
cp = &compiledPattern{
Regexp: re,
ErrReason: "JSON property doesn't match the regular expression '" + patternProperties + "'",
}
schema.compiledPatternProperties = cp
}
}
// "additionalProperties"
var additionalProperties *Schema
if ref := schema.AdditionalProperties; ref != nil {
additionalProperties = ref.Value
}
for k, v := range value {
if properties != nil {
propertyRef := properties[k]
if propertyRef != nil {
p := propertyRef.Value
if p == nil {
return foundUnresolvedRef(propertyRef.Ref)
}
if err := p.VisitJSON(v); err != nil {
if fast {
return errSchema
}
return markSchemaErrorKey(err, k)
}
continue
}
}
allowed := schema.AdditionalPropertiesAllowed
if additionalProperties != nil || allowed == nil || (allowed != nil && *allowed) {
if cp != nil {
if !cp.Regexp.MatchString(k) {
return &SchemaError{
Schema: schema,
SchemaField: "patternProperties",
Reason: cp.ErrReason,
}
}
}
if additionalProperties != nil {
if err := additionalProperties.VisitJSON(v); err != nil {
if fast {
return errSchema
}
return markSchemaErrorKey(err, k)
}
}
continue
}
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "properties",
Reason: fmt.Sprintf("Property '%s' is unsupported", k),
}
}
for _, k := range schema.Required {
if _, ok := value[k]; !ok {
if fast {
return errSchema
}
return &SchemaError{
Value: value,
Schema: schema,
SchemaField: "required",
Reason: fmt.Sprintf("Property '%s' is missing", k),
}
}
}
return
}
func (schema *Schema) expectedType(typ string, fast bool) error {
if fast {
return errSchema
}
return &SchemaError{
Value: typ,
Schema: schema,
SchemaField: "type",
Reason: "Field must be set to " + schema.Type + " or not be present",
}
}
type SchemaError struct {
Value interface{}
reversePath []string
Schema *Schema
SchemaField string
Reason string
Origin error
}
func markSchemaErrorKey(err error, key string) error {
if v, ok := err.(*SchemaError); ok {
v.reversePath = append(v.reversePath, key)
return v
}
return err
}
func markSchemaErrorIndex(err error, index int) error {
if v, ok := err.(*SchemaError); ok {
v.reversePath = append(v.reversePath, strconv.FormatInt(int64(index), 10))
return v
}
return err
}
func (err *SchemaError) JSONPointer() []string {
reversePath := err.reversePath
path := make([]string, len(reversePath))
for i := range path {
path[i] = reversePath[len(path)-1-i]
}
return path
}
func (err *SchemaError) Error() string {
if err.Origin != nil {
return err.Origin.Error()
}
buf := bytes.NewBuffer(make([]byte, 0, 256))
if len(err.reversePath) > 0 {
buf.WriteString(`Error at "`)
reversePath := err.reversePath
for i := len(reversePath) - 1; i >= 0; i-- {
buf.WriteByte('/')
buf.WriteString(reversePath[i])
}
buf.WriteString(`":`)
}
reason := err.Reason
if reason == "" {
buf.WriteString(`Doesn't match schema "`)
buf.WriteString(err.SchemaField)
buf.WriteString(`"`)
} else {
buf.WriteString(reason)
}
if !SchemaErrorDetailsDisabled {
buf.WriteString("\nSchema:\n ")
encoder := json.NewEncoder(buf)
encoder.SetIndent(" ", " ")
if err := encoder.Encode(err.Schema); err != nil {
panic(err)
}
buf.WriteString("\nValue:\n ")
if err := encoder.Encode(err.Value); err != nil {
panic(err)
}
}
return buf.String()
}
func isSliceOfUniqueItems(xs []interface{}) bool {
s := len(xs)
m := make(map[interface{}]struct{}, s)
for _, x := range xs {
m[x] = struct{}{}
}
return s == len(m)
}
func unsupportedFormat(format string) error {
return fmt.Errorf("Unsupported 'format' value '%s'", format)
} | Value: schema,
}
} |
__main__.py | import os
import argparse
import heapq
import pandas as pd
import pickle as pkl
from embedding_utils import EmbeddingLoader
from sklearn.model_selection import RandomizedSearchCV, train_test_split
from sklearn.model_selection._search import BaseSearchCV
def print_cv_result(result, n):
|
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, help="parent dir to load embeddings")
parser.add_argument("--output", required=True, help="parent dir to dump search results")
# uses python reflection to dynamically load model
parser.add_argument("--classifier", required=True,
help="classifier to use, must be existent under model/, such as model/KNN.py")
parser.add_argument("--corpus", default="title", help="title, text, or concatenated")
parser.add_argument("--embedding", default="d2v",
help="embeddings model to use, must be one of [d2v, nd2v, onehot], default is d2v")
parser.add_argument("--n_iter", default=100, type=int, help="number of trials to run during cross-validation. "
"default=100. This is NOT epochs to train d2v")
parser.add_argument("--n_jobs", default=1, type=int, help="number of cpu workers to run in parallel")
parser.add_argument("--cv", default=5, type=int, help="number of folds for cross-validation, default=5")
# hyperparameters for doc2vec
parser.add_argument("--vec_size", default=300, type=int,
help="size of vectors, default is 300, recommended to be left untouched")
parser.add_argument("--win_size", default=13, type=int,
help="window size, used if model is d2v, default = 13")
parser.add_argument("--min_count", default=5, type=int,
help="min count for inclusion in dict, used if model is d2v, default = 5")
parser.add_argument("--dm", action="store_true",
help="whether to use DM or DBOW, used if model is d2v, default is DBOW")
parser.add_argument("--epochs", default=100, type=int,
help="number of epochs to train the model for, used if model is d2v, default = 100. This is "
"NOT the epochs for RandomizedSearch")
# hyperparameters for naive doc2vec
parser.add_argument("--normalizer", default=None,
help="normalizer for naive doc2vec, either l2 or mean, default is None")
# hyperparameters for one-hot
parser.add_argument("--scorer", default="count",
help="scorer function for one-hot, either tfidf or count, default is count")
opt = parser.parse_args()
print(opt)
loader = EmbeddingLoader(opt.input)
# filename is saved for dumping CV results later
if opt.embedding == "d2v":
filename = loader.get_d2v_filename(corpus=opt.corpus, vec_size=opt.vec_size, win_size=opt.win_size,
min_count=opt.min_count, dm=opt.dm, epochs=opt.epochs)
embeddings = loader.get_d2v(corpus=opt.corpus, vec_size=opt.vec_size, win_size=opt.win_size,
min_count=opt.min_count, dm=opt.dm, epochs=opt.epochs)
elif opt.embedding == "nd2v":
filename = loader.get_nd2v_filename(corpus=opt.corpus, normalizer=opt.normalizer)
embeddings = loader.get_nd2v(corpus=opt.corpus, normalizer=opt.normalizer)
elif opt.embedding == "onehot":
filename = loader.get_onehot_filename(corpus=opt.corpus, scorer=opt.scorer, normalize=opt.normalize is not None)
embeddings = loader.get_onehot(corpus=opt.corpus, scorer=opt.scorer, normalize=opt.normalize is not None)
else:
print("unrecognized embedding method: {}; proceed with d2v as fall back".format(opt.embedding))
filename = loader.get_d2v_filename(corpus=opt.corpus, vec_size=opt.vec_size, win_size=opt.win_size,
min_count=opt.min_count, dm=opt.dm, epochs=opt.epochs)
embeddings = loader.get_d2v(corpus=opt.corpus, vec_size=opt.vec_size, win_size=opt.win_size,
min_count=opt.min_count, dm=opt.dm, epochs=opt.epochs)
labels = loader.get_label()
seed = 0
embeddings_train, embeddings_test, labels_train, labels_test = \
train_test_split(embeddings, labels, test_size=0.25, random_state=seed, stratify=labels)
# import the target file
try:
module = __import__("model." + opt.classifier)
module = getattr(module, opt.classifier)
except ModuleNotFoundError as e:
print("There is no such file, double check that you have a `model/{}.py`".format(opt.classifier))
print("If you have checked and the problem persist, make sure to run this script from ROOTDIR instead of "
"ROOTDIR/model, your command should look like `python -m model ...`")
raise e
print("Successfully imported module {}".format(module))
# get the model from the target file
try:
model = getattr(module, "model")
except AttributeError as e:
print("There is no `model` attribute in `model/{}.py`".format(opt.classifier))
print("Make sure to include a variable named `model` in your file")
raise e
print("Successfully obtained model {}".format(model))
# get the hyperparameters to be trained
try:
param_dist = getattr(module, "param_dist")
except AttributeError as e:
print("There is no `param_dist` attribute in `model/{}.py`".format(opt.classifier))
print("Make sure to include a variable named `param_dist` in your file")
raise e
print("Successfully obtained param_dist {}".format(param_dist))
verbose = opt.cv * opt.n_iter
searcher = RandomizedSearchCV(model, param_distributions=param_dist, n_iter=opt.n_iter, scoring='f1', cv=opt.cv,
verbose=verbose, random_state=seed, error_score=0, return_train_score=False,
n_jobs=opt.n_jobs)
searcher.fit(embeddings_train, labels_train)
print("best: {}\n{}\n{}\n{}".format(searcher.best_index_, searcher.best_score_, searcher.best_estimator_,
searcher.best_params_))
# The following line is meant for floydhub renderer to grep
print('{"metric": "highest_val", "value": %f}' % searcher.best_score_)
results = pd.DataFrame(searcher.cv_results_)
filename_classifier = opt.classifier
dump_filename = "{}-{}".format(opt.classifier, filename)
with open(os.path.join(opt.output, dump_filename), "wb") as f:
pkl.dump(results, f)
print_cv_result(results, n=-1)
# uses all training samples to refit the model
searcher.best_estimator_.fit(embeddings_train, labels_train)
test_score = searcher.best_estimator_.score(embeddings_test, labels_test)
print("Final test score of the best performing model: {}".format(test_score))
# The following line is meant for floydhub renderer to grep
print('{"metric": "test", "value": %f}' % test_score)
| if isinstance(result, BaseSearchCV):
result = result.cv_results_
scores = result['mean_test_score']
params = result['params']
if n < 0:
n = len(scores)
print("Cross Validation result in descending order: (totalling {} trials)".format(n))
for rank, candidate, in enumerate(heapq.nlargest(n, zip(scores, params), key=lambda tup: tup[0])):
print("rank {}, score = {}\n hyperparams = {}".format(rank + 1, *candidate)) |
control.rs | #![cfg_attr(debug_assertions, allow(dead_code, unused_imports))]
use crate::common::{
await_actor_count, await_provider_count, HTTPSRV_OCI, KVCOUNTER_OCI, NATS_OCI, REDIS_OCI,
};
use ::wasmcloud_control_interface::Client;
use log::info;
use std::collections::HashMap;
use wasmcloud_actor_http_server::{deserialize, serialize};
use std::io::Read;
use std::time::Duration;
use wascap::prelude::KeyPair;
use wasmcloud_host::{Actor, HostBuilder};
use wasmcloud_host::{NativeCapability, Result};
// NOTE: this test does verify a number of error and edge cases, so when it is
// running -properly- you will see warnings and errors in the output log
pub(crate) async fn basics() -> Result<()> {
// Ensure that we're not accidentally using the replication feature on KV cache
::std::env::remove_var("KVCACHE_NATS_URL");
let nc = nats::asynk::connect("0.0.0.0:4222").await?;
let nc3 = nats::asynk::connect("0.0.0.0:4222").await?;
let h = HostBuilder::new()
.with_namespace("controlbasics")
.with_rpc_client(nc3)
.with_control_client(nc)
.oci_allow_latest()
.with_label("testing", "test-one")
.build();
h.start().await?;
let hid = h.id();
let nc2 = nats::asynk::connect("0.0.0.0:4222").await?;
let ctl_client = Client::new(
nc2,
Some("controlbasics".to_string()),
Duration::from_secs(20),
);
// Cannot stop a non-existent actor
assert!(ctl_client
.stop_actor(&hid, KVCOUNTER_OCI)
.await?
.failure
.is_some());
// Cannot stop a non-existent provider
assert!(ctl_client
.stop_provider(&hid, "fooref", "default", "wasmcloud:testing")
.await?
.failure
.is_some());
let a_ack = ctl_client.start_actor(&hid, KVCOUNTER_OCI).await?;
await_actor_count(&h, 1, Duration::from_millis(50), 20).await?;
println!("Received ACK from host {}", a_ack.host_id);
let claims = ctl_client.get_claims().await?;
assert_eq!(1, claims.claims.len());
assert!(a_ack.failure.is_none());
let a_ack2 = ctl_client.start_actor(&hid, KVCOUNTER_OCI).await?;
assert!(a_ack2.failure.is_some()); // cannot start the same actor twice
assert_eq!(
a_ack2.failure.unwrap(),
format!(
"Actor with image ref '{}' is already running on this host",
KVCOUNTER_OCI
)
);
let stop_ack = ctl_client.stop_actor(&hid, KVCOUNTER_OCI).await?;
assert!(stop_ack.failure.is_none());
await_actor_count(&h, 0, Duration::from_millis(50), 20).await?;
let _ = ctl_client.start_actor(&hid, KVCOUNTER_OCI).await?;
let redis_ack = ctl_client.start_provider(&hid, REDIS_OCI, None).await?;
await_provider_count(&h, 3, Duration::from_millis(50), 20).await?;
println!("Redis {:?} started", redis_ack);
actix_rt::time::sleep(Duration::from_millis(500)).await;
// Stop and re-start a provider
assert!(ctl_client
.stop_provider(&hid, REDIS_OCI, "default", "wasmcloud:keyvalue")
.await?
.failure
.is_none());
await_provider_count(&h, 2, Duration::from_millis(50), 20).await?;
actix_rt::time::sleep(Duration::from_secs(1)).await;
assert!(ctl_client
.start_provider(&hid, REDIS_OCI, None)
.await?
.failure
.is_none());
await_provider_count(&h, 3, Duration::from_millis(50), 20).await?;
actix_rt::time::sleep(Duration::from_secs(1)).await;
let nats_ack = ctl_client.start_provider(&hid, NATS_OCI, None).await?;
await_provider_count(&h, 4, Duration::from_millis(50), 200).await?;
println!("NATS {:?} started", nats_ack);
let http_ack = ctl_client.start_provider(&hid, HTTPSRV_OCI, None).await?;
await_provider_count(&h, 5, Duration::from_millis(50), 10).await?;
println!("HTTP Server {:?} started", http_ack);
let http_ack2 = ctl_client.start_provider(&hid, HTTPSRV_OCI, None).await?;
assert!(http_ack2.failure.is_some());
assert_eq!(
http_ack2.failure.unwrap(),
format!(
"Provider with image ref '{}' is already running on this host.",
HTTPSRV_OCI
)
);
let hosts = ctl_client.get_hosts(Duration::from_secs(1)).await?;
assert_eq!(hosts.len(), 1);
assert_eq!(hosts[0].id, hid);
let inv = ctl_client.get_host_inventory(&hosts[0].id).await?;
println!("Got host inventory: {:?}", inv);
assert_eq!(1, inv.actors.len());
assert_eq!(inv.actors[0].image_ref, Some(KVCOUNTER_OCI.to_string()));
assert_eq!(inv.actors[0].name, Some("Key Value Counter".to_string()));
assert_eq!(inv.actors[0].revision, 2);
assert_eq!(4, inv.labels.len()); // each host gets 3 built-in labels
assert_eq!(inv.host_id, hosts[0].id);
h.stop().await;
Ok(())
}
pub(crate) async fn live_update() -> Result<()> {
::std::env::set_var("KVCACHE_NATS_URL", "0.0.0.0:4222");
const NS: &str = "liveupdate";
const PORT: u32 = 5251;
let host = HostBuilder::new()
.with_namespace(NS)
.enable_live_updates() // Need this or we can't update
.build();
host.start().await?;
let a = Actor::from_file("./tests/modules/echo_r0.wasm")?;
let a_id = a.public_key();
host.start_actor(a).await?;
await_actor_count(&host, 1, Duration::from_millis(50), 20).await?;
let arc = crate::common::par_from_file("./tests/modules/httpserver.par.gz")?;
let websrv = NativeCapability::from_archive(&arc, None)?;
let websrv_id = arc.claims().unwrap().subject;
let mut webvalues: HashMap<String, String> = HashMap::new();
webvalues.insert("PORT".to_string(), format!("{}", PORT));
host.start_native_capability(websrv).await?;
await_provider_count(&host, 2, Duration::from_millis(50), 3).await?;
host.set_link(&a_id, "wasmcloud:httpserver", None, websrv_id, webvalues)
.await?;
actix_rt::time::sleep(Duration::from_secs(1)).await; // Give web server time to start
let url = format!("http://localhost:{}/foo/bar", PORT);
let resp = reqwest::get(&url).await?;
assert!(resp.status().is_success());
assert_eq!(0, get_revision(&resp.text().await?));
let bytes = {
let mut f = std::fs::File::open("./tests/modules/echo_r1.wasm")?;
let mut buf = Vec::new();
f.read_to_end(&mut buf)?;
buf
};
host.update_actor(&a_id, None, &bytes).await?;
let resp = reqwest::get(&url).await?;
assert!(resp.status().is_success());
assert_eq!(1, get_revision(&resp.text().await?));
Ok(())
}
/// Ensures a live update of an actor places the new OCI reference in the lattice cache
pub(crate) async fn multiple_ocirefs() -> Result<()> {
// Ensure that we're not accidentally using the replication feature on KV cache
::std::env::remove_var("KVCACHE_NATS_URL");
const NS: &str = "liveupdate_ctl";
const ECHO_0_2_0: &str = "wasmcloud.azurecr.io/echo:0.2.0";
const ECHO_0_2_1: &str = "wasmcloud.azurecr.io/echo:0.2.1";
const ECHO_PKEY: &str = "MBCFOPM6JW2APJLXJD3Z5O4CN7CPYJ2B4FTKLJUR5YR5MITIU7HD3WD5";
const MAX_RETRY: u8 = 60;
let nc = nats::asynk::connect("0.0.0.0:4222").await?;
let nc3 = nats::asynk::connect("0.0.0.0:4222").await?;
let h = HostBuilder::new()
.with_namespace(NS)
.with_rpc_client(nc3)
.with_control_client(nc)
.oci_allow_latest()
.enable_live_updates()
.with_label("testing", "multiple-ocirefs")
.build();
h.start().await?;
let hid = h.id();
let nc2 = nats::asynk::connect("0.0.0.0:4222").await?;
let ctl_client = Client::new(nc2, Some(NS.to_string()), Duration::from_secs(20));
let hosts = ctl_client.get_hosts(Duration::from_secs(1)).await?;
assert_eq!(hosts.len(), 1);
assert_eq!(hosts[0].id, hid);
ctl_client
.start_actor(&hid, "wasmcloud.azurecr.io/echo:0.2.0")
.await?;
for _ in 0..MAX_RETRY {
let inv = ctl_client.get_host_inventory(&hosts[0].id).await?;
if !inv.actors.is_empty() && inv.actors[0].image_ref == Some(ECHO_0_2_0.to_string()) {
assert_eq!(inv.actors[0].image_ref, Some(ECHO_0_2_0.to_string()));
assert_eq!(inv.actors[0].name, Some("Echo".to_string()));
assert_eq!(inv.actors[0].revision, 1);
info!("Successfully found echo 0.2.0 in inventory");
break;
}
actix_rt::time::sleep(Duration::from_millis(500)).await;
}
ctl_client
.update_actor(&hid, ECHO_PKEY, "wasmcloud.azurecr.io/echo:0.2.1")
.await?;
for _ in 0..MAX_RETRY {
let inv = ctl_client.get_host_inventory(&hosts[0].id).await?;
if !inv.actors.is_empty() && inv.actors[0].image_ref == Some(ECHO_0_2_1.to_string()) {
assert_eq!(inv.actors[0].image_ref, Some(ECHO_0_2_1.to_string()));
assert_eq!(inv.actors[0].name, Some("Echo".to_string()));
assert_eq!(inv.actors[0].revision, 2);
info!("Successfully found echo 0.2.1 in inventory");
break;
}
actix_rt::time::sleep(Duration::from_millis(500)).await;
}
// Ensure oci references exist over control interface
let inv = ctl_client.get_host_inventory(&hosts[0].id).await?;
assert_eq!(1, inv.actors.len());
assert_eq!(inv.actors[0].image_ref, Some(ECHO_0_2_1.to_string()));
assert_eq!(inv.actors[0].name, Some("Echo".to_string()));
assert_eq!(inv.actors[0].revision, 2);
assert_eq!(4, inv.labels.len()); // each host gets 3 built-in labels
assert_eq!(inv.host_id, hosts[0].id);
// Ensure oci references exist on host API
let oci_refs = h.oci_references().await?;
assert!(oci_refs.contains_key(ECHO_0_2_0));
assert_eq!(oci_refs.get(ECHO_0_2_0).unwrap(), ECHO_PKEY);
assert!(oci_refs.contains_key(ECHO_0_2_1));
assert_eq!(oci_refs.get(ECHO_0_2_1).unwrap(), ECHO_PKEY);
h.stop().await;
Ok(())
}
fn | (body: &str) -> u64 {
println!("{}", body);
let v: serde_json::Value = serde_json::from_str(body).unwrap();
v["revision"].as_u64().unwrap()
}
pub(crate) async fn calltest() -> Result<()> {
// Ensure that we're not accidentally using the replication feature on KV cache
::std::env::remove_var("KVCACHE_NATS_URL");
let nc = nats::asynk::connect("0.0.0.0:4222").await?;
let nc3 = nats::asynk::connect("0.0.0.0:4222").await?;
let h = HostBuilder::new()
.with_namespace("calltest")
.with_control_client(nc)
.with_rpc_client(nc3)
.build();
h.start().await?;
let a = Actor::from_file("./tests/modules/echo.wasm")?;
let a_id = a.public_key();
h.start_actor(a).await?;
await_actor_count(&h, 1, Duration::from_millis(50), 20).await?;
actix_rt::time::sleep(Duration::from_millis(600)).await;
let nc2 = nats::asynk::connect("0.0.0.0:4222").await?;
let ctl_client = Client::new(nc2, Some("calltest".to_string()), Duration::from_secs(20));
let req = wasmcloud_actor_http_server::Request {
header: HashMap::new(),
method: "GET".to_string(),
path: "".to_string(),
query_string: "".to_string(),
body: b"NARF".to_vec(),
};
let inv_r = ctl_client
.call_actor(&a_id, "HandleRequest", &serialize(&req)?)
.await?;
let http_r: wasmcloud_actor_http_server::Response = deserialize(&inv_r.msg)?;
assert_eq!(inv_r.error, None);
assert_eq!(
std::str::from_utf8(&http_r.body)?,
r#"{"method":"GET","path":"","query_string":"","headers":{},"body":[78,65,82,70]}"#
);
assert_eq!(http_r.status, "OK".to_string());
assert_eq!(http_r.status_code, 200);
h.stop().await;
actix_rt::time::sleep(Duration::from_millis(900)).await;
ctl_client.stop_actor(&h.id(), &a_id).await?;
actix_rt::time::sleep(Duration::from_millis(300)).await;
let inv_r = ctl_client
.call_actor(&a_id, "HandleRequest", &serialize(&req)?)
.await;
println!("{:?}", inv_r);
// we should not be able to invoke an actor that we stopped
assert!(inv_r.is_err());
Ok(())
}
pub(crate) async fn auctions() -> Result<()> {
// Auctions tests require that the hosts are at the very least
// sharing the same lattice data.
// Set the default kvcache provider to enable NATS-based replication
// by supplying a NATS URL.
::std::env::set_var("KVCACHE_NATS_URL", "0.0.0.0:4222");
let nc = nats::asynk::connect("0.0.0.0:4222").await?;
let h = HostBuilder::new()
.with_namespace("auctions")
.with_control_client(nc)
.oci_allow_latest()
.with_label("kv-friendly", "yes")
.with_label("web-friendly", "no")
.build();
h.start().await?;
let hid = h.id();
let nc2 = nats::asynk::connect("0.0.0.0:4222").await?;
let nc3 = nats::asynk::connect("0.0.0.0:4222").await?;
let ctl_client = Client::new(nc2, Some("auctions".to_string()), Duration::from_secs(20));
let h2 = HostBuilder::new()
.with_namespace("auctions")
.with_control_client(nc3)
.oci_allow_latest()
.with_label("web-friendly", "yes")
.build();
h2.start().await?;
let hid2 = h2.id();
actix_rt::time::sleep(Duration::from_secs(2)).await;
// auction with no requirements
let kvack = ctl_client
.perform_actor_auction(KVCOUNTER_OCI, HashMap::new(), Duration::from_secs(5))
.await?;
assert_eq!(2, kvack.len());
// auction the KV counter with a constraint
let kvack = ctl_client
.perform_actor_auction(KVCOUNTER_OCI, kvrequirements(), Duration::from_secs(5))
.await?;
assert_eq!(1, kvack.len());
assert_eq!(kvack[0].host_id, hid);
// start it and re-attempt an auction
let _ = ctl_client.start_actor(&hid, KVCOUNTER_OCI).await?;
await_actor_count(&h, 1, Duration::from_millis(50), 20).await?;
actix_rt::time::sleep(Duration::from_secs(1)).await;
let kvack = ctl_client
.perform_actor_auction(KVCOUNTER_OCI, kvrequirements(), Duration::from_secs(5))
.await?;
// Should be no viable candidates now
assert_eq!(0, kvack.len());
// find a place for the web server
let httpack = ctl_client
.perform_provider_auction(
HTTPSRV_OCI,
"default",
webrequirements(),
Duration::from_secs(2),
)
.await?;
assert_eq!(1, httpack.len());
assert_eq!(httpack[0].host_id, hid2);
// start web server on host 2
let _http_ack = ctl_client
.start_provider(&httpack[0].host_id, HTTPSRV_OCI, None)
.await?;
await_provider_count(&h2, 3, Duration::from_millis(50), 10).await?;
actix_rt::time::sleep(Duration::from_millis(500)).await;
// should be no candidates now
let httpack = ctl_client
.perform_provider_auction(
HTTPSRV_OCI,
"default",
webrequirements(),
Duration::from_secs(1),
)
.await?;
assert_eq!(0, httpack.len());
h.stop().await;
h2.stop().await;
actix_rt::time::sleep(Duration::from_millis(300)).await;
Ok(())
}
fn kvrequirements() -> HashMap<String, String> {
let mut hm = HashMap::new();
hm.insert("kv-friendly".to_string(), "yes".to_string());
hm
}
fn webrequirements() -> HashMap<String, String> {
let mut hm = HashMap::new();
hm.insert("web-friendly".to_string(), "yes".to_string());
hm
}
| get_revision |
main.ts | import { NestFactory } from '@nestjs/core';
import { Logger } from '@nestjs/common';
import { AppModule } from './app.module';
async function bootstrap() {
const logger = new Logger('bootstrap');
const app = await NestFactory.create(AppModule);
await app.listen(3000); | bootstrap(); |
logger.log('Application is listening on port 3000');
} |
augmented_pickle.py | """
Suppose you have some input data sources `data_in` on which you apply some process `F` parameterized by `args`:
data_out = F(data_in, args)
You want to serialize `data_out`, but also don't want to lose `args`,
to preserve the exact setup that generated the output data.
Now suppose you want to inspect `args` for a particular `data_out`:
- Saving both `{"data": data_out, "args": args}` may not be a viable solution,
as `data_out` needs to be fully loaded into memory without actually needing it.
- Saving `data_out` and `args` separately necessitates extra care to keep them tied together.
Solution: define a simple data format -- *augmented pickle*
<metadata>
<body (actual data)>
Pickle both objects, but read body on-demand:
res = read_augmented_pickle("./data.apkl", get_body=True)
# get metadata (body is not loaded)
meta = next(res)
# query the generator again to get body (data)
data = next(res)
"""
import pickle
from os import PathLike
from typing import Any, Iterable, Union
def | (
metadata: Any,
body: Any,
path: Union[str, PathLike],
) -> None:
"""Write an augmented pickle file containing `metadata` and `body`."""
with open(path, "wb") as fp:
pickle.dump(metadata, fp)
pickle.dump(body, fp)
def read_augmented_pickle(
path: Union[str, PathLike],
get_body: bool,
) -> Iterable[Any]:
"""Read an augmented pickle file containing `metadata` and `body`.
Returns a generator that can be queried on-demand using `next`.
If `get_body` is False, only `metadata` is yielded.
"""
with open(path, "rb") as fp:
metadata = pickle.load(fp)
yield metadata
if not get_body:
return
body = pickle.load(fp)
yield body
| write_augmented_pickle |
wrap.go | package pkging
import (
"fmt"
"os"
"path/filepath"
"github.com/rhomber/pkger/here"
)
func | (parent, with Pkger) Pkger {
return withPkger{
base: with,
parent: parent,
}
}
type withPkger struct {
base Pkger
parent Pkger
}
func (w withPkger) String() string {
if w.parent == nil {
return fmt.Sprintf("%T", w.base)
}
return fmt.Sprintf("%T > %T", w.base, w.parent)
}
func (w withPkger) Parse(p string) (here.Path, error) {
pt, err := w.base.Parse(p)
if err != nil {
if w.parent != nil {
return w.parent.Parse(p)
}
return pt, err
}
return pt, nil
}
func (w withPkger) Current() (here.Info, error) {
pt, err := w.base.Current()
if err != nil {
if w.parent != nil {
return w.parent.Current()
}
return pt, err
}
return pt, nil
}
func (w withPkger) Info(p string) (here.Info, error) {
pt, err := w.base.Info(p)
if err != nil {
if w.parent != nil {
return w.parent.Info(p)
}
return pt, err
}
return pt, nil
}
// Create creates the named file with mode 0666 (before umask) - It's actually 0644, truncating it if it already exists. If successful, methods on the returned File can be used for I/O; the associated file descriptor has mode O_RDWR.
func (w withPkger) Create(p string) (File, error) {
pt, err := w.base.Create(p)
if err != nil {
if w.parent != nil {
return w.parent.Create(p)
}
return pt, err
}
return pt, nil
}
// MkdirAll creates a directory named path, along with any necessary parents, and returns nil, or else returns an error. The permission bits perm (before umask) are used for all directories that MkdirAll creates. If path is already a directory, MkdirAll does nothing and returns nil.
func (w withPkger) MkdirAll(p string, perm os.FileMode) error {
err := w.base.MkdirAll(p, perm)
if err != nil {
return err
}
if w.parent != nil {
return w.parent.MkdirAll(p, perm)
}
return nil
}
// Open opens the named file for reading. If successful, methods on the returned file can be used for reading; the associated file descriptor has mode O_RDONLY.
func (w withPkger) Open(p string) (File, error) {
pt, err := w.base.Open(p)
if err != nil {
if w.parent != nil {
return w.parent.Open(p)
}
return pt, err
}
return pt, nil
}
// Stat returns a FileInfo describing the named file.
func (w withPkger) Stat(p string) (os.FileInfo, error) {
pt, err := w.base.Stat(p)
if err != nil {
if w.parent != nil {
return w.parent.Stat(p)
}
return pt, err
}
return pt, nil
}
// Walk walks the file tree rooted at root, calling walkFn for each file or directory in the tree, including root. All errors that arise visiting files and directories are filtered by walkFn. The files are walked in lexical order, which makes the output deterministic but means that for very large directories Walk can be inefficient. Walk does not follow symbolic links. - That is from the standard library. I know. Their grammar teachers can not be happy with them right now.
func (w withPkger) Walk(p string, wf filepath.WalkFunc) error {
err := w.base.Walk(p, wf)
if err != nil {
return err
}
if w.parent != nil {
return w.parent.Walk(p, wf)
}
return nil
}
// Remove removes the named file or (empty) directory.
func (w withPkger) Remove(p string) error {
err := w.base.Remove(p)
if err != nil {
return err
}
if w.parent != nil {
return w.parent.Remove(p)
}
return nil
}
// RemoveAll removes path and any children it contains. It removes everything it can but returns the first error it encounters. If the path does not exist, RemoveAll returns nil (no error).
func (w withPkger) RemoveAll(p string) error {
err := w.base.RemoveAll(p)
if err != nil {
return err
}
if w.parent != nil {
return w.parent.RemoveAll(p)
}
return nil
}
| Wrap |
pprof.go | // Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package control
import (
"runtime"
"runtime/pprof"
"runtime/trace"
"time"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/urpc"
)
const (
// DefaultBlockProfileRate is the default profiling rate for block
// profiles.
//
// The default here is 10%, which will record a stacktrace 10% of the
// time when blocking occurs. Since these events should not be super
// frequent, we expect this to achieve a reasonable balance between
// collecting the data we need and imposing a high performance cost
// (e.g. skewing even the CPU profile).
DefaultBlockProfileRate = 10
// DefaultMutexProfileRate is the default profiling rate for mutex
// profiles. Like the block rate above, we use a default rate of 10%
// for the same reasons.
DefaultMutexProfileRate = 10
)
// Profile includes profile-related RPC stubs. It provides a way to
// control the built-in runtime profiling facilities.
//
// The profile object must be instantied via NewProfile.
type Profile struct {
// kernel is the kernel under profile. It's immutable.
kernel *kernel.Kernel
// cpuMu protects CPU profiling.
cpuMu sync.Mutex
// blockMu protects block profiling.
blockMu sync.Mutex
// mutexMu protects mutex profiling.
mutexMu sync.Mutex
// traceMu protects trace profiling.
traceMu sync.Mutex
// done is closed when profiling is done.
done chan struct{}
}
// NewProfile returns a new Profile object.
func NewProfile(k *kernel.Kernel) *Profile |
// Stop implements urpc.Stopper.Stop.
func (p *Profile) Stop() {
close(p.done)
}
// CPUProfileOpts contains options specifically for CPU profiles.
type CPUProfileOpts struct {
// FilePayload is the destination for the profiling output.
urpc.FilePayload
// Duration is the duration of the profile.
Duration time.Duration `json:"duration"`
}
// CPU is an RPC stub which collects a CPU profile.
func (p *Profile) CPU(o *CPUProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
return nil // Allowed.
}
output := o.FilePayload.Files[0]
defer output.Close()
p.cpuMu.Lock()
defer p.cpuMu.Unlock()
// Returns an error if profiling is already started.
if err := pprof.StartCPUProfile(output); err != nil {
return err
}
defer pprof.StopCPUProfile()
// Collect the profile.
select {
case <-time.After(o.Duration):
case <-p.done:
}
return nil
}
// HeapProfileOpts contains options specifically for heap profiles.
type HeapProfileOpts struct {
// FilePayload is the destination for the profiling output.
urpc.FilePayload
// Delay is the sleep time, similar to Duration. This may
// not affect the data collected however, as the heap will
// continue only the memory associated with the last alloc.
Delay time.Duration `json:"delay"`
}
// Heap generates a heap profile.
func (p *Profile) Heap(o *HeapProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
return nil // Allowed.
}
output := o.FilePayload.Files[0]
defer output.Close()
// Wait for the given delay.
select {
case <-time.After(o.Delay):
case <-p.done:
}
// Get up-to-date statistics.
runtime.GC()
// Write the given profile.
return pprof.WriteHeapProfile(output)
}
// GoroutineProfileOpts contains options specifically for goroutine profiles.
type GoroutineProfileOpts struct {
// FilePayload is the destination for the profiling output.
urpc.FilePayload
}
// Goroutine dumps out the stack trace for all running goroutines.
func (p *Profile) Goroutine(o *GoroutineProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
return nil // Allowed.
}
output := o.FilePayload.Files[0]
defer output.Close()
return pprof.Lookup("goroutine").WriteTo(output, 2)
}
// BlockProfileOpts contains options specifically for block profiles.
type BlockProfileOpts struct {
// FilePayload is the destination for the profiling output.
urpc.FilePayload
// Duration is the duration of the profile.
Duration time.Duration `json:"duration"`
// Rate is the block profile rate.
Rate int `json:"rate"`
}
// Block dumps a blocking profile.
func (p *Profile) Block(o *BlockProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
return nil // Allowed.
}
output := o.FilePayload.Files[0]
defer output.Close()
p.blockMu.Lock()
defer p.blockMu.Unlock()
// Always set the rate. We then wait to collect a profile at this rate,
// and disable when we're done.
rate := DefaultBlockProfileRate
if o.Rate != 0 {
rate = o.Rate
}
runtime.SetBlockProfileRate(rate)
defer runtime.SetBlockProfileRate(0)
// Collect the profile.
select {
case <-time.After(o.Duration):
case <-p.done:
}
return pprof.Lookup("block").WriteTo(output, 0)
}
// MutexProfileOpts contains options specifically for mutex profiles.
type MutexProfileOpts struct {
// FilePayload is the destination for the profiling output.
urpc.FilePayload
// Duration is the duration of the profile.
Duration time.Duration `json:"duration"`
// Fraction is the mutex profile fraction.
Fraction int `json:"fraction"`
}
// Mutex dumps a mutex profile.
func (p *Profile) Mutex(o *MutexProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
return nil // Allowed.
}
output := o.FilePayload.Files[0]
defer output.Close()
p.mutexMu.Lock()
defer p.mutexMu.Unlock()
// Always set the fraction.
fraction := DefaultMutexProfileRate
if o.Fraction != 0 {
fraction = o.Fraction
}
runtime.SetMutexProfileFraction(fraction)
defer runtime.SetMutexProfileFraction(0)
// Collect the profile.
select {
case <-time.After(o.Duration):
case <-p.done:
}
return pprof.Lookup("mutex").WriteTo(output, 0)
}
// TraceProfileOpts contains options specifically for traces.
type TraceProfileOpts struct {
// FilePayload is the destination for the profiling output.
urpc.FilePayload
// Duration is the duration of the profile.
Duration time.Duration `json:"duration"`
}
// Trace is an RPC stub which starts collection of an execution trace.
func (p *Profile) Trace(o *TraceProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
return nil // Allowed.
}
output, err := fd.NewFromFile(o.FilePayload.Files[0])
if err != nil {
return err
}
defer output.Close()
p.traceMu.Lock()
defer p.traceMu.Unlock()
// Returns an error if profiling is already started.
if err := trace.Start(output); err != nil {
output.Close()
return err
}
defer trace.Stop()
// Ensure all trace contexts are registered.
p.kernel.RebuildTraceContexts()
// Wait for the trace.
select {
case <-time.After(o.Duration):
case <-p.done:
}
// Similarly to the case above, if tasks have not ended traces, we will
// lose information. Thus we need to rebuild the tasks in order to have
// complete information. This will not lose information if multiple
// traces are overlapping.
p.kernel.RebuildTraceContexts()
return nil
}
| {
return &Profile{
kernel: k,
done: make(chan struct{}),
}
} |
test_random_walker.py | import numpy as np
from skimage.segmentation import random_walker
from skimage.transform import resize
from skimage._shared._warnings import expected_warnings
from skimage._shared import testing
# older versions of scipy raise a warning with new NumPy because they use
# numpy.rank() instead of arr.ndim or numpy.linalg.matrix_rank.
SCIPY_EXPECTED = 'numpy.linalg.matrix_rank|\A\Z'
PYAMG_EXPECTED_WARNING = 'pyamg|\A\Z'
PYAMG_SCIPY_EXPECTED = SCIPY_EXPECTED + '|' + PYAMG_EXPECTED_WARNING
def make_2d_syntheticdata(lx, ly=None):
if ly is None:
ly = lx
np.random.seed(1234)
data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (
0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))
data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5] = 1
seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2
return data, seeds
def make_3d_syntheticdata(lx, ly=None, lz=None):
if ly is None:
ly = lx
if lz is None:
lz = lx
np.random.seed(1234)
data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l,
lz // 2 - small_l:lz // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1,
lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0
# make a hole
hole_size = np.max([1, small_l // 8])
data[lx // 2 - small_l,
ly // 2 - hole_size:ly // 2 + hole_size,
lz // 2 - hole_size:lz // 2 + hole_size] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5, lz // 5] = 1
seeds[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 2 - small_l // 4] = 2
return data, seeds
def test_2d_bf():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
# Now test with more than two labels
labels[55, 80] = 3
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert len(full_prob_bf) == 3
assert data.shape == labels.shape
def test_2d_cg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_cg = random_walker(data, labels, beta=90, mode='cg')
assert (labels_cg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
full_prob = random_walker(data, labels, beta=90, mode='cg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg
def test_2d_cg_mg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
expected = 'scipy.sparse.sparsetools|%s' % PYAMG_SCIPY_EXPECTED
with expected_warnings([expected]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings([expected]):
full_prob = random_walker(data, labels, beta=90, mode='cg_mg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_types():
|
def test_reorder_labels():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[labels == 2] = 4
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_bf
def test_2d_inactive():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[10:20, 10:20] = -1
labels[46:50, 33:38] = -2
labels = random_walker(data, labels, beta=90)
assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d_inactive():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
old_labels = np.copy(labels)
labels[5:25, 26:29, 26:29] = -1
after_labels = np.copy(labels)
with expected_warnings(['"cg" mode|CObject type' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels, old_labels, after_labels
def test_multispectral_2d():
lx, ly = 70, 100
data, labels = make_2d_syntheticdata(lx, ly)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_multispectral_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_spacing_0():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
# Rescale `data` along Z axis
data_aniso = np.zeros((n, n, n // 2))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n, n // 2),
mode='constant',
anti_aliasing=False)
# Generate new labels
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 4 - small_l // 8] = 2
# Test with `spacing` kwarg
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 1., 0.5))
assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()
def test_spacing_1():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
# Rescale `data` along Y axis
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
data_aniso = np.zeros((n, n * 2, n))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n * 2, n),
mode='constant',
anti_aliasing=False)
# Generate new labels
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly - small_l // 2,
lz // 2 - small_l // 4] = 2
# Test with `spacing` kwarg
# First, anisotropic along Y
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 2., 1.))
assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()
# Rescale `data` along X axis
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
data_aniso = np.zeros((n, n * 2, n))
for i in range(data.shape[1]):
data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),
mode='constant',
anti_aliasing=False)
# Generate new labels
small_l = int(lx // 5)
labels_aniso2 = np.zeros_like(data_aniso)
labels_aniso2[lx // 5, ly // 5, lz // 5] = 1
labels_aniso2[lx - small_l // 2,
ly // 2 + small_l // 4,
lz // 2 - small_l // 4] = 2
# Anisotropic along X
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso2 = random_walker(data_aniso,
labels_aniso2,
mode='cg', spacing=(2., 1., 1.))
assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
def test_trivial_cases():
# When all voxels are labeled
img = np.ones((10, 10))
labels = np.ones((10, 10))
with expected_warnings(["Returning provided labels"]):
pass_through = random_walker(img, labels)
np.testing.assert_array_equal(pass_through, labels)
# When all voxels are labeled AND return_full_prob is True
labels[:, :5] = 3
expected = np.concatenate(((labels == 1)[..., np.newaxis],
(labels == 3)[..., np.newaxis]), axis=2)
with expected_warnings(["Returning provided labels"]):
test = random_walker(img, labels, return_full_prob=True)
np.testing.assert_array_equal(test, expected)
def test_length2_spacing():
# If this passes without raising an exception (warnings OK), the new
# spacing code is working properly.
np.random.seed(42)
img = np.ones((10, 10)) + 0.2 * np.random.normal(size=(10, 10))
labels = np.zeros((10, 10), dtype=np.uint8)
labels[2, 4] = 1
labels[6, 8] = 4
random_walker(img, labels, spacing=(1., 2.))
def test_bad_inputs():
# Too few dimensions
img = np.ones(10)
labels = np.arange(10)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
# Too many dimensions
np.random.seed(42)
img = np.random.normal(size=(3, 3, 3, 3, 3))
labels = np.arange(3 ** 5).reshape(img.shape)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
# Spacing incorrect length
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
labels[2, 4] = 2
labels[6, 8] = 5
with testing.raises(ValueError):
random_walker(img, labels, spacing=(1,))
# Invalid mode
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
with testing.raises(ValueError):
random_walker(img, labels, mode='bad')
def test_isolated_seeds():
np.random.seed(0)
a = np.random.random((7, 7))
mask = - np.ones(a.shape)
# This pixel is an isolated seed
mask[1, 1] = 1
# Unlabeled pixels
mask[3:, 3:] = 0
# Seeds connected to unlabeled pixels
mask[4, 4] = 2
mask[6, 6] = 1
# Test that no error is raised, and that labels of isolated seeds are OK
res = random_walker(a, mask)
assert res[1, 1] == 1
res = random_walker(a, mask, return_full_prob=True)
assert res[0, 1, 1] == 1
assert res[1, 1, 1] == 0
| lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
data = 255 * (data - data.min()) // (data.max() - data.min())
data = data.astype(np.uint8)
with expected_warnings([PYAMG_SCIPY_EXPECTED]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_cg_mg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.