file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
webpack.base.conf.js | 'use strict'
const path = require('path')
const utils = require('./utils')
const config = require('../config')
const vueLoaderConfig = require('./vue-loader.conf')
function | (dir) {
return path.join(__dirname, '..', dir)
}
const createLintingRule = () => ({
test: /\.(js|vue)$/,
loader: 'eslint-loader',
enforce: 'pre',
include: [resolve('src'), resolve('test')],
options: {
formatter: require('eslint-friendly-formatter'),
emitWarning: !config.dev.showEslintErrorsInOverlay
}
})
module.exports = {
context: path.resolve(__dirname, '../'),
entry: {
app: ['babel-polyfill', './src/main.js']
},
output: {
path: config.build.assetsRoot,
filename: '[name].js',
publicPath: process.env.NODE_ENV === 'production'
? config.build.assetsPublicPath
: config.dev.assetsPublicPath
},
resolve: {
extensions: ['.js', '.vue', '.json'],
alias: {
'vue$': 'vue/dist/vue.esm.js',
'@': resolve('src'),
}
},
module: {
rules: [
...(config.dev.useEslint ? [createLintingRule()] : []),
{
test: /\.vue$/,
loader: 'vue-loader',
options: vueLoaderConfig
},
{
test: /\.js$/,
loader: 'babel-loader',
include: [resolve('src'), resolve('test'), resolve('node_modules/webpack-dev-server/client')]
},
{
test: /\.(png|jpe?g|gif|svg)(\?.*)?$/,
loader: 'url-loader',
options: {
limit: 10000,
name: utils.assetsPath('img/[name].[hash:7].[ext]')
}
},
{
test: /\.scss$/,
loaders: ['style', 'css', 'sass']
},
{
test: /\.(mp4|webm|ogg|mp3|wav|flac|aac)(\?.*)?$/,
loader: 'url-loader',
options: {
limit: 10000,
name: utils.assetsPath('media/[name].[hash:7].[ext]')
}
},
{
test: /\.(woff2?|eot|ttf|otf)(\?.*)?$/,
loader: 'url-loader',
options: {
limit: 10000,
name: utils.assetsPath('fonts/[name].[hash:7].[ext]')
}
}
]
},
node: {
// prevent webpack from injecting useless setImmediate polyfill because Vue
// source contains it (although only uses it if it's native).
setImmediate: false,
// prevent webpack from injecting mocks to Node native modules
// that does not make sense for the client
dgram: 'empty',
fs: 'empty',
net: 'empty',
tls: 'empty',
child_process: 'empty'
}
}
| resolve |
test_githubhandler.py | import json
import pytest
from buildtrigger.test.githubmock import get_github_trigger
from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException,
InvalidPayloadException)
from endpoints.building import PreparedBuild
from util.morecollections import AttrDict
@pytest.fixture
def github_trigger():
return get_github_trigger()
@pytest.mark.parametrize('payload, expected_error, expected_message', [
('{"zen": true}', SkipRequestException, ""),
('{}', InvalidPayloadException, "Missing 'repository' on request"),
('{"repository": "foo"}', InvalidPayloadException, "Missing 'owner' on repository"),
# Valid payload:
('''{
"repository": {
"owner": {
"name": "someguy"
},
"name": "somerepo",
"ssh_url": "someurl"
},
"ref": "refs/tags/foo",
"head_commit": {
"id": "11d6fbc",
"url": "http://some/url",
"message": "some message",
"timestamp": "NOW"
}
}''', None, None),
# Skip message:
('''{
"repository": {
"owner": {
"name": "someguy"
},
"name": "somerepo",
"ssh_url": "someurl"
},
"ref": "refs/tags/foo",
"head_commit": {
"id": "11d6fbc",
"url": "http://some/url",
"message": "[skip build]",
"timestamp": "NOW"
}
}''', SkipRequestException, ''),
])
def test_handle_trigger_request(github_trigger, payload, expected_error, expected_message):
def get_payload():
return json.loads(payload)
request = AttrDict(dict(get_json=get_payload))
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
github_trigger.handle_trigger_request(request)
assert str(ipe.value) == expected_message
else:
assert isinstance(github_trigger.handle_trigger_request(request), PreparedBuild)
@pytest.mark.parametrize('dockerfile_path, contents', [
('/Dockerfile', 'hello world'),
('somesubdir/Dockerfile', 'hi universe'),
('unknownpath', None),
])
def test_load_dockerfile_contents(dockerfile_path, contents):
trigger = get_github_trigger(dockerfile_path)
assert trigger.load_dockerfile_contents() == contents
@pytest.mark.parametrize('username, expected_response', [
('unknownuser', None),
('knownuser', {'html_url': 'https://bitbucket.org/knownuser', 'avatar_url': 'avatarurl'}),
])
def test_lookup_user(username, expected_response, github_trigger):
assert github_trigger.lookup_user(username) == expected_response
def test_list_build_subdirs(github_trigger):
assert github_trigger.list_build_subdirs() == ['Dockerfile', 'somesubdir/Dockerfile']
def | (github_trigger):
namespaces_expected = [
{
'personal': True,
'score': 1,
'avatar_url': 'avatarurl',
'id': 'knownuser',
'title': 'knownuser',
'url': 'https://bitbucket.org/knownuser',
},
{
'score': 0,
'title': 'someorg',
'personal': False,
'url': '',
'avatar_url': 'avatarurl',
'id': 'someorg'
}
]
found = github_trigger.list_build_source_namespaces()
found.sort()
namespaces_expected.sort()
assert found == namespaces_expected
| test_list_build_source_namespaces |
main.rs | mod executor;
fn | () {
println!(
"{:?}",
executor::Executor::new("tes\nhht", None, Vec::<u8>::new(), Vec::<u8>::new())
);
}
| main |
field.go | package log
import (
"fmt"
"strings"
"time"
"go.uber.org/zap"
)
// 应用唯一标识符
func FieldAid(value string) Field {
return String("aid", value)
}
// 模块
func FieldMod(value string) Field {
value = strings.Replace(value, " ", ".", -1)
return String("mod", value)
}
// 依赖的实例名称。以mysql为例,"dsn = "root:juno@tcp(127.0.0.1:3306)/juno?charset=utf8",addr为 "127.0.0.1:3306"
func FieldAddr(value string) Field {
return String("addr", value)
}
// FieldAddrAny ...
func FieldAddrAny(value interface{}) Field {
return Any("addr", value)
}
// FieldName ...
func FieldName(value string) Field {
return String("name", value)
}
// FieldType ...
func FieldType(value string) Field {
return String("type", value)
}
// FieldCode ...
func FieldCode(value int32) Field {
return Int32("code", value)
}
// 耗时时间
func FieldCost(value time.Duration) Field {
return String("cost", fmt.Sprintf("%.3f", float64(value.Round(time.Microsecond))/float64(time.Millisecond)))
}
// FieldKey ...
func FieldKey(value string) Field {
return String("key", value)
}
// 耗时时间
func FieldKeyAny(value interface{}) Field {
return Any("key", value)
}
// FieldValue ...
func FieldValue(value string) Field {
return String("value", value)
}
// FieldValueAny ...
func FieldValueAny(value interface{}) Field {
return Any("value", value)
}
// FieldErrKind ...
func FieldErrKind(value string) Field {
return String("errKind", value)
}
// FieldErr ...
func FieldErr(err error) Field {
return zap.Error(err)
}
// FieldErr ...
func FieldStringErr(err string) Field {
return String("err", err)
}
// FieldExtMessage ...
func FieldExtMessage(vals ...interface{}) Field {
return zap.Any("ext", vals)
}
// FieldStack ...
func FieldStack(value []byte) Field {
return ByteString("stack", value)
}
// FieldMethod ...
fun | eturn String("method", value)
}
// FieldEvent ...
func FieldEvent(value string) Field {
return String("event", value)
}
| c FieldMethod(value string) Field {
r |
delegate.py | import sys
import subprocess
import yaml
if len(sys.argv) != 4:
print('usage: delegate.py [val1_stake] [val2_stake] [val3_stake]')
exit(0)
# Load config
confFile = open('./conf.yml')
conf = yaml.safe_load(confFile)
def delegate_cmd(valNumber, amount):
|
# Perform delegation
for s in sys.argv[1:]:
if not s.isnumeric():
print(s + ' must be a number')
exit(1)
i = 0
for s in sys.argv[1:]:
if int(s) > 0:
print(i)
cmd = delegate_cmd(i, s)
print('running: ' + " ".join(cmd))
subprocess.run(cmd, check=True)
i += 1
print()
print()
print('delegation performed, to show validator set:')
print('spnd q tendermint-validator-set')
print()
print('to show consensus state')
print('spnd q ibc client self-consensus-state') | cmd = ["spnd", "tx", "staking", "delegate"]
cmd.append(conf['validator_addresses'][valNumber])
stake = amount + conf['staking_denom']
cmd.append(stake)
cmd.append('--from')
cmd.append(conf['validator_names'][valNumber])
cmd.append('--chain-id')
cmd.append(conf['chain_id'])
cmd.append('-y')
return cmd |
test_supervisor.rs | extern crate actix;
extern crate futures;
extern crate tokio;
extern crate tokio_timer;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use actix::prelude::*;
use futures::{future, Future};
use tokio_timer::Delay;
struct Die;
impl Message for Die {
type Result = ();
}
struct | (Arc<AtomicUsize>, Arc<AtomicUsize>, Arc<AtomicUsize>);
impl Actor for MyActor {
type Context = Context<Self>;
fn started(&mut self, _: &mut Context<MyActor>) {
self.0.fetch_add(1, Ordering::Relaxed);
}
}
impl actix::Supervised for MyActor {
fn restarting(&mut self, _: &mut actix::Context<MyActor>) {
self.1.fetch_add(1, Ordering::Relaxed);
}
}
impl actix::Handler<Die> for MyActor {
type Result = ();
fn handle(&mut self, _: Die, ctx: &mut actix::Context<MyActor>) {
self.2.fetch_add(1, Ordering::Relaxed);
ctx.stop();
}
}
#[test]
fn test_supervisor_restart() {
let starts = Arc::new(AtomicUsize::new(0));
let restarts = Arc::new(AtomicUsize::new(0));
let messages = Arc::new(AtomicUsize::new(0));
let starts2 = Arc::clone(&starts);
let restarts2 = Arc::clone(&restarts);
let messages2 = Arc::clone(&messages);
let addr = Arc::new(Mutex::new(None));
let addr2 = Arc::clone(&addr);
System::run(move || {
let addr =
actix::Supervisor::start(move |_| MyActor(starts2, restarts2, messages2));
addr.do_send(Die);
addr.do_send(Die);
*addr2.lock().unwrap() = Some(addr);
tokio::spawn(Delay::new(Instant::now() + Duration::new(0, 100_000)).then(
|_| {
Arbiter::system().do_send(actix::msgs::SystemExit(0));
future::result(Ok(()))
},
));
});
assert_eq!(starts.load(Ordering::Relaxed), 3);
assert_eq!(restarts.load(Ordering::Relaxed), 2);
assert_eq!(messages.load(Ordering::Relaxed), 2);
}
| MyActor |
edgeDetection.py | #Perform Edge Detection using Roberts Cross Gradient & Sobel Operators over an Image
import cv2
import math
import numpy as np
def robertCrossGradient(image):
#Objective: Performing Robert Cross Gradient Edge Detection over an Image
#Input: Original Image
#Output: Resultant Image
#Robert Cross Operator
# x 0 1
# -1 0
# y 1 0
# 0 -1
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale
resultant_image = image.copy()
for i in range(0,image.shape[0]-1):
for j in range(0,image.shape[1]-1):
gx = image[i, j+1] - image[i+1, j]
gy = image[i, j] - image[i+1, j+1]
resultant_image[i, j] = math.sqrt(gx*gx + gy*gy)
return resultant_image
def | (image):
#Objective: Performing Sobel Edge Detection over an Image
#Input: Original Image
#Output: Resultant Image
#Sobel Operator
# x -1 -2 -1
# 0 0 0
# 1 2 1
#y -1 0 1
# -2 0 2
# -1 0 1
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale
resultant_image = image.copy()
#Applying Padding
rows,cols = image.shape
image = np.insert(image,0,0,axis=0) #top
image = np.insert(image,rows+1,0,axis=0) #bottom
image = np.insert(image,0,0,axis=1) #left
image = np.insert(image,cols+1,0,axis=1) #right
for i in range(1, image.shape[0]-1):
for j in range(1, image.shape[1]-1):
fx = image[i+1, j-1] + 2*image[i+1, j] + image[i+1, j+1] - image[i-1, j-1] - 2*image[i-1, j] - image[i+1, j-1]
fy = image[i-1, j+1] + 2*image[i, j+1] + image[i+1, j+1] - image[i-1, j-1] - 2*image[i, j-1] - image[i+1, j-1]
resultant_image[i-1, j-1] = math.sqrt(fx*fx + fy*fy)
return resultant_image
img = cv2.imread('image5.jpg')
output = sobelOperator(img)
cv2.imshow('image',output)
cv2.waitKey(0)
cv2.destroyAllWindows()
| sobelOperator |
log_config.go | package log
var (
trueValue = true
)
// NewConfig returns a new config instance, initialized with default values
func NewConfig() *Config {
return (&Config{}).InitDefaults()
}
type Config struct {
// Level is the log level. Default: normal
Level string `json:"level"`
// Handler specifies the log handler to use. Default: json
Handler string `json:"formatter"`
// File specifies the log file settings. Default: nil (log to stdout)
File *LumberjackConfig `json:"file,omitempty"`
// Include go routine ID as 'gid' in logged fields
GoRoutineID *bool `json:"go_routine_id,omitempty"`
// Named contains the configuration of named loggers.
// Any nested "Named" elements are ignored.
Named map[string]*Config `json:"named,omitempty"` | c.Level = "normal"
c.Handler = "json"
c.GoRoutineID = &trueValue
return c
}
// Stdout is a LumberjackConfig with an empty Filename that leads to logging to
// stdout.
var Stdout = &LumberjackConfig{}
type LumberjackConfig struct {
// Filename is the file to write logs to. Backup log files will be retained
// in the same directory. It uses <processname>-lumberjack.log in
// os.TempDir() if empty.
Filename string `json:"filename"`
// MaxSize is the maximum size in megabytes of the log file before it gets
// rotated. It defaults to 100 megabytes.
MaxSize int `json:"maxsize"`
// MaxAge is the maximum number of days to retain old log files based on the
// timestamp encoded in their filename. Note that a day is defined as 24
// hours and may not exactly correspond to calendar days due to daylight
// savings, leap seconds, etc. The default is not to remove old log files
// based on age.
MaxAge int `json:"maxage"`
// MaxBackups is the maximum number of old log files to retain. The default
// is to retain all old log files (though MaxAge may still cause them to get
// deleted.)
MaxBackups int `json:"maxbackups"`
// LocalTime determines if the time used for formatting the timestamps in
// backup files is the computer's local time. The default is to use UTC
// time.
LocalTime bool `json:"localtime"`
// Compress determines if the rotated log files should be compressed
// using gzip. The default is not to perform compression.
Compress bool `json:"compress"`
} | }
func (c *Config) InitDefaults() *Config { |
heuristic_test.go | package heuristic
import (
"math/big"
"reflect"
"testing"
"github.com/mmcloughlin/addchain/alg/algtest"
"github.com/mmcloughlin/addchain/internal/bigints"
)
// References:
//
// [hehcc:exp] Christophe Doche. Exponentiation. Handbook of Elliptic and Hyperelliptic Curve
// Cryptography, chapter 9. 2006.
// http://koclab.cs.ucsb.edu/teaching/ecc/eccPapers/Doche-ch09.pdf
func TestAlgorithms(t *testing.T) {
heuristics := []Heuristic{
UseFirst(Halving{}, DeltaLargest{}),
UseFirst(Halving{}, Approximation{}),
}
for _, heuristic := range heuristics {
suite := algtest.SequenceAlgorithmSuite{
Algorithm: NewAlgorithm(heuristic),
AcceptsLargeInputs: true,
}
t.Run(suite.Algorithm.String(), suite.Tests())
}
}
func TestHalving(t *testing.T) | {
cases := []struct {
F []*big.Int
Target *big.Int
Expect []*big.Int
}{
// Example from [hehcc:exp], page 163.
{
F: bigints.Int64s(14),
Target: big.NewInt(382),
Expect: bigints.Int64s(14, 23, 46, 92, 184, 368),
},
// Simple powers of two case.
{
F: bigints.Int64s(1, 2),
Target: big.NewInt(8),
Expect: bigints.Int64s(2, 4),
},
}
h := Halving{}
for _, c := range cases {
if got := h.Suggest(c.F, c.Target); !reflect.DeepEqual(c.Expect, got) {
t.Errorf("Suggest(%v, %v) = %v; expect %v", c.F, c.Target, got, c.Expect)
}
}
} |
|
powerpc64.rs | // Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME:
// Alignment of 128 bit types is not currently handled, this will
// need to be fixed when PowerPC vector support is added.
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{Align, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
| enum ABI {
ELFv1, // original ABI used for powerpc64 (big-endian)
ELFv2, // newer ABI used for powerpc64le and musl (both endians)
}
use self::ABI::*;
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
// ELFv1 only passes one-member aggregates transparently.
// ELFv2 passes up to eight uniquely addressable members.
if (abi == ELFv1 && arg.layout.size > unit.size)
|| arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
return None;
}
let valid_unit = match unit.kind {
RegKind::Integer => false,
RegKind::Float => true,
RegKind::Vector => arg.layout.size.bits() == 128
};
if valid_unit {
Some(Uniform {
unit,
total: arg.layout.size
})
} else {
None
}
})
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
return;
}
// The ELFv1 ABI doesn't return aggregates in registers
if abi == ELFv1 {
ret.make_indirect();
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, ret, abi) {
ret.cast_to(uniform);
return;
}
let size = ret.layout.size;
let bits = size.bits();
if bits <= 128 {
let unit = if cx.data_layout().endian == Endian::Big {
Reg { kind: RegKind::Integer, size }
} else if bits <= 8 {
Reg::i8()
} else if bits <= 16 {
Reg::i16()
} else if bits <= 32 {
Reg::i32()
} else {
Reg::i64()
};
ret.cast_to(Uniform {
unit,
total: size
});
return;
}
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, arg, abi) {
arg.cast_to(uniform);
return;
}
let size = arg.layout.size;
let (unit, total) = if size.bits() <= 64 {
// Aggregates smaller than a doubleword should appear in
// the least-significant bits of the parameter doubleword.
(Reg { kind: RegKind::Integer, size }, size)
} else {
// Aggregates larger than a doubleword should be padded
// at the tail to fill out a whole number of doublewords.
let align = Align::from_bits(64, 64).unwrap();
(Reg::i64(), size.abi_align(align))
};
arg.cast_to(Uniform {
unit,
total
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
let abi = if cx.target_spec().target_env == "musl" {
ELFv2
} else {
match cx.data_layout().endian {
Endian::Big => ELFv1,
Endian::Little => ELFv2
}
};
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, abi);
}
for arg in &mut fty.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg, abi);
}
} | #[derive(Debug, Clone, Copy, PartialEq)] |
atn_config.rs | use std::fmt::{Debug, Error, Formatter};
use std::hash::{Hash, Hasher};
use std::sync::Arc;
use murmur3::murmur3_32::MurmurHasher;
use crate::atn_config::ATNConfigType::LexerATNConfig;
use crate::atn_state::{ATNState, ATNStateRef, ATNStateType};
use crate::dfa::ScopeExt;
use crate::lexer_action_executor::LexerActionExecutor;
use crate::prediction_context::PredictionContext;
use crate::semantic_context::SemanticContext;
#[derive(Clone)]
pub struct ATNConfig {
precedence_filter_suppressed: bool,
//todo since ATNState is immutable when we started working with ATNConfigs
// looks like it is possible to have usual reference here
state: ATNStateRef,
alt: isize,
//todo maybe option is unnecessary and PredictionContext::EMPTY would be enough
//another todo check arena alloc
context: Option<Arc<PredictionContext>>,
pub semantic_context: Box<SemanticContext>,
pub reaches_into_outer_context: isize,
pub(crate) config_type: ATNConfigType,
}
impl Eq for ATNConfig {}
impl PartialEq for ATNConfig {
fn eq(&self, other: &Self) -> bool {
self.get_state() == other.get_state()
&& self.get_alt() == other.get_alt()
&& (Arc::ptr_eq(self.get_context().unwrap(), other.get_context().unwrap())
|| self.get_context() == other.get_context())
&& self.get_type() == other.get_type()
&& self.semantic_context == other.semantic_context
&& self.precedence_filter_suppressed == other.precedence_filter_suppressed
}
}
impl Hash for ATNConfig {
fn hash<H: Hasher>(&self, state: &mut H) {
state.write_i32(self.get_state() as i32);
state.write_i32(self.get_alt() as i32);
match self.get_context() {
None => state.write_i32(0),
Some(c) => c.hash(state),
}
self.semantic_context.hash(state);
if let LexerATNConfig {
lexer_action_executor,
passed_through_non_greedy_decision,
} = &self.config_type
{
state.write_i32(if *passed_through_non_greedy_decision {
1
} else {
0
});
match lexer_action_executor {
None => state.write_i32(0),
Some(ex) => ex.hash(state),
}
}
}
}
impl Debug for ATNConfig {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.write_fmt(format_args!(
"({},{},[{}]",
self.state,
self.alt,
self.context.as_deref().unwrap()
))?;
if self.reaches_into_outer_context > 0 {
f.write_fmt(format_args!(",up={}", self.reaches_into_outer_context))?;
}
f.write_str(")")
}
}
#[derive(Eq, PartialEq, Clone, Debug)]
pub(crate) enum ATNConfigType {
BaseATNConfig,
LexerATNConfig {
lexer_action_executor: Option<Box<LexerActionExecutor>>,
passed_through_non_greedy_decision: bool,
},
}
impl ATNConfig {
pub(crate) fn get_lexer_executor(&self) -> Option<&LexerActionExecutor> {
match &self.config_type {
ATNConfigType::BaseATNConfig => None,
ATNConfigType::LexerATNConfig {
lexer_action_executor,
..
} => lexer_action_executor.as_deref(),
}
}
pub fn default_hash(&self) -> u64 {
MurmurHasher::default().convert_with(|mut x| {
self.hash(&mut x);
x.finish()
})
}
pub fn new(
state: ATNStateRef,
alt: isize,
context: Option<Arc<PredictionContext>>,
) -> ATNConfig {
ATNConfig {
precedence_filter_suppressed: false,
state,
alt,
context,
semantic_context: Box::new(SemanticContext::NONE),
reaches_into_outer_context: 0,
config_type: ATNConfigType::BaseATNConfig,
}
}
pub fn new_with_semantic(
state: ATNStateRef,
alt: isize,
context: Option<Arc<PredictionContext>>,
semantic_context: Box<SemanticContext>,
) -> ATNConfig |
pub fn new_lexer_atnconfig6(
_state: ATNStateRef,
_alt: isize,
_context: Arc<PredictionContext>,
) -> ATNConfig {
let mut atnconfig = ATNConfig::new(_state, _alt, Some(_context));
atnconfig.config_type = ATNConfigType::LexerATNConfig {
lexer_action_executor: None,
passed_through_non_greedy_decision: false,
};
atnconfig
}
pub fn cloned_with_new_semantic(
&self,
target: &dyn ATNState,
ctx: Box<SemanticContext>,
) -> ATNConfig {
let mut new = self.cloned(target);
new.semantic_context = ctx;
new
}
pub fn cloned(&self, target: &dyn ATNState) -> ATNConfig {
// println!("depth {}",PredictionContext::size(self.context.as_deref()));
let mut new = self.clone();
new.state = target.get_state_number();
if let ATNConfigType::LexerATNConfig {
passed_through_non_greedy_decision,
..
} = &mut new.config_type
{
*passed_through_non_greedy_decision = check_non_greedy_decision(self, target);
}
new
}
pub fn cloned_with_new_ctx(
&self,
target: &dyn ATNState,
ctx: Option<Arc<PredictionContext>>,
) -> ATNConfig {
let mut new = self.cloned(target);
new.context = ctx;
new
}
pub(crate) fn cloned_with_new_exec(
&self,
target: &dyn ATNState,
exec: Option<LexerActionExecutor>,
) -> ATNConfig {
let mut new = self.cloned(target);
if let ATNConfigType::LexerATNConfig {
lexer_action_executor,
passed_through_non_greedy_decision: _,
} = &mut new.config_type
{
*lexer_action_executor = exec.map(Box::new);
// *passed_through_non_greedy_decision = check_non_greedy_decision(self, target);
}
new
}
pub fn get_state(&self) -> ATNStateRef { self.state }
pub fn get_alt(&self) -> isize { self.alt }
pub(crate) fn get_type(&self) -> &ATNConfigType { &self.config_type }
pub fn get_context(&self) -> Option<&Arc<PredictionContext>> { self.context.as_ref() }
pub fn take_context(&mut self) -> Arc<PredictionContext> { self.context.take().unwrap() }
pub fn set_context(&mut self, _v: Arc<PredictionContext>) { self.context = Some(_v); }
pub fn get_reaches_into_outer_context(&self) -> isize { self.reaches_into_outer_context }
pub fn set_reaches_into_outer_context(&mut self, _v: isize) {
self.reaches_into_outer_context = _v
}
pub fn is_precedence_filter_suppressed(&self) -> bool { self.precedence_filter_suppressed }
pub fn set_precedence_filter_suppressed(&mut self, _v: bool) {
self.precedence_filter_suppressed = _v;
}
}
fn check_non_greedy_decision(source: &ATNConfig, target: &dyn ATNState) -> bool {
if let LexerATNConfig {
passed_through_non_greedy_decision: true,
..
} = source.get_type()
{
return true;
}
if let ATNStateType::DecisionState {
nongreedy: true, ..
} = target.get_state_type()
{
return true;
}
false
}
| {
let mut new = Self::new(state, alt, context);
new.semantic_context = semantic_context;
new
} |
code_258_test.go | package _201910
import "testing"
func Test_addDigits(t *testing.T) | {
tests := []struct {
name string
num int
want int
}{
{"1", 38, 2},
{"2", 11111, 5},
{"3", 9999999, 9},
{"4", 1, 1},
{"5", 0, 0},
{"6", 2, 2},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := addDigits(tt.num); got != tt.want {
t.Errorf("addDigits() = %v, want %v", got, tt.want)
}
})
}
} |
|
search-piece.module.ts | import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { FormsModule } from '@angular/forms';
import { Routes, RouterModule } from '@angular/router';
import { IonicModule } from '@ionic/angular';
import { SearchPiecePage } from './search-piece.page';
const routes: Routes = [
{
path: '', | }
];
@NgModule({
imports: [
CommonModule,
FormsModule,
IonicModule,
RouterModule.forChild(routes)
],
declarations: [SearchPiecePage]
})
export class SearchPiecePageModule {} | component: SearchPiecePage |
views.py | from flask import render_template,redirect,url_for,flash,request
from . import auth
from flask_login import login_user,logout_user,login_required
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user,form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Login"
return render_template('auth/login.html',form =form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,firstname= form.firstname.data,lastname= form.lastname.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to one minute pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',form =form)
@auth.route('/logout')
@login_required
def logout():
| logout_user()
return redirect(url_for("main.index")) |
|
termbox-utils.go | package main
import (
"github.com/mattn/go-runewidth"
"github.com/nsf/termbox-go"
"github.com/simulatedsimian/go_sandbox/geom"
//"unicode/utf8"
)
func printAt(x, y int, s string, fg, bg termbox.Attribute) {
for _, r := range s {
termbox.SetCell(x, y, r, fg, bg)
w := runewidth.RuneWidth(r)
if w == 0 || (w == 2 && runewidth.IsAmbiguousWidth(r)) {
w = 1
}
x += w
}
}
func | (x, y int, s string) {
printAt(x, y, s, termbox.ColorDefault, termbox.ColorDefault)
}
func clearRect(rect geom.Rectangle, c rune, fg, bg termbox.Attribute) {
w, h := termbox.Size()
sz := geom.RectangleFromSize(geom.Coord{w, h})
toClear, ok := geom.RectangleIntersection(rect, sz)
if ok {
for y := toClear.Min.Y; y < toClear.Max.Y; y++ {
for x := toClear.Min.X; x < toClear.Max.X; x++ {
termbox.SetCell(x, y, ' ', termbox.ColorDefault, termbox.ColorDefault)
}
}
}
}
func clearRectDef(rect geom.Rectangle) {
clearRect(rect, '.', termbox.ColorDefault, termbox.ColorDefault)
}
| printAtDef |
observatory_management_service.py | #!/usr/bin/env python
"""Service managing marine facility sites and deployments"""
import string
import time
import logging
from collections import defaultdict
from pyon.core.governance import ORG_MANAGER_ROLE, DATA_OPERATOR, OBSERVATORY_OPERATOR, INSTRUMENT_OPERATOR, GovernanceHeaderValues, has_org_role
from ooi.logging import log
from pyon.core.exception import NotFound, BadRequest, Inconsistent
from pyon.public import CFG, IonObject, RT, PRED, LCS, LCE, OT
from pyon.ion.resource import ExtendedResourceContainer
from ion.services.dm.utility.test.parameter_helper import ParameterHelper
from ion.services.dm.utility.granule import RecordDictionaryTool
from ion.services.sa.instrument.status_builder import AgentStatusBuilder
from ion.services.sa.observatory.deployment_activator import DeploymentPlanner
from ion.util.enhanced_resource_registry_client import EnhancedResourceRegistryClient
from ion.services.sa.observatory.observatory_util import ObservatoryUtil
from ion.services.sa.observatory.asset_tracking import AssetTracking
from ion.services.sa.observatory.deployment_util import DeploymentUtil
from ion.services.sa.product.data_product_management_service import DataProductManagementService
from ion.processes.event.device_state import DeviceStateManager
from ion.util.geo_utils import GeoUtils
from ion.util.related_resources_crawler import RelatedResourcesCrawler
from ion.util.datastore.resources import ResourceRegistryUtil
from interface.services.sa.iobservatory_management_service import BaseObservatoryManagementService
from interface.objects import OrgTypeEnum, ComputedValueAvailability, ComputedIntValue, ComputedListValue, ComputedDictValue, AggregateStatusType, DeviceStatusType, TemporalBounds, DatasetWindow
from interface.objects import MarineFacilityOrgExtension, NegotiationStatusEnum, NegotiationTypeEnum, ProposalOriginatorEnum, GeospatialBounds
from datetime import datetime
import calendar
INSTRUMENT_OPERATOR_ROLE = 'INSTRUMENT_OPERATOR'
OBSERVATORY_OPERATOR_ROLE = 'OBSERVATORY_OPERATOR'
DATA_OPERATOR_ROLE = 'DATA_OPERATOR'
STATUS_UNKNOWN = {1:1, 2:1, 3:1, 4:1}
class ObservatoryManagementService(BaseObservatoryManagementService):
def on_init(self):
self.override_clients(self.clients)
self.agent_status_builder = AgentStatusBuilder(process=self)
self.HIERARCHY_DEPTH = {RT.InstrumentSite: 3,
RT.PlatformSite: 2,
RT.Subsite: 1,
RT.Observatory: 0,
}
self.HIERARCHY_LOOKUP = [RT.Observatory,
RT.Subsite,
RT.PlatformSite,
RT.InstrumentSite]
#todo: add lcs methods for these??
# # set up all of the policy interceptions
# if self.container and self.container.governance_controller:
# reg_precondition = self.container.governance_controller.register_process_operation_precondition
# reg_precondition(self, 'execute_observatory_lifecycle',
# self.RR2.policy_fn_lcs_precondition("observatory_id"))
# reg_precondition(self, 'execute_subsite_lifecycle',
# self.RR2.policy_fn_lcs_precondition("subsite_id"))
# reg_precondition(self, 'execute_platform_site_lifecycle',
# self.RR2.policy_fn_lcs_precondition("platform_site_id"))
# reg_precondition(self, 'execute_instrument_site_lifecycle',
# self.RR2.policy_fn_lcs_precondition("instrument_site_id"))
def override_clients(self, new_clients):
"""
Replaces the service clients with a new set of them... and makes sure they go to the right places
"""
self.RR2 = EnhancedResourceRegistryClient(new_clients.resource_registry)
#shortcut names for the import sub-services
if hasattr(new_clients, "resource_registry"):
self.RR = new_clients.resource_registry
if hasattr(new_clients, "instrument_management"):
self.IMS = new_clients.instrument_management
if hasattr(new_clients, "data_process_management"):
self.PRMS = new_clients.data_process_management
def _calc_geospatial_point_center(self, site):
siteTypes = [RT.Site, RT.Subsite, RT.Observatory, RT.PlatformSite, RT.InstrumentSite]
if site and site.type_ in siteTypes:
# if the geospatial_bounds is set then calculate the geospatial_point_center
for constraint in site.constraint_list:
if constraint.type_ == OT.GeospatialBounds:
site.geospatial_point_center = GeoUtils.calc_geospatial_point_center(constraint)
##########################################################################
#
# CRUD OPS
#
##########################################################################
def create_marine_facility(self, org=None):
"""Create an Org (domain of authority) that realizes a marine facility. This Org will have
set up roles for a marine facility. Shared resources, such as a device can only be
registered in one marine facility Org, and additionally in many virtual observatory Orgs. The
marine facility operators will have more extensive permissions and will supercede virtual
observatory commands
@param org Org
@retval org_id str
@throws BadRequest if object does not have _id or _rev attribute
@throws NotFound object with specified id does not exist
"""
log.debug("ObservatoryManagementService.create_marine_facility(): %s", org)
# create the org
org.org_type = OrgTypeEnum.MARINE_FACILITY
org_id = self.clients.org_management.create_org(org)
#Instantiate initial set of User Roles for this marine facility
instrument_operator_role = IonObject(RT.UserRole,
governance_name=INSTRUMENT_OPERATOR_ROLE,
name='Facility Operator', #previously Instrument Operator
description='Operate and post events related to Facility Platforms and Instruments')
self.clients.org_management.add_user_role(org_id, instrument_operator_role)
observatory_operator_role = IonObject(RT.UserRole,
governance_name=OBSERVATORY_OPERATOR_ROLE,
name='Facility Manager', # previously Observatory Operator
description='Change Facility configuration, post Site-related events')
self.clients.org_management.add_user_role(org_id, observatory_operator_role)
data_operator_role = IonObject(RT.UserRole,
governance_name=DATA_OPERATOR_ROLE,
name='Facility Data Operator', # previously Data Operator
description='Manipulate and post events related to Facility Data products')
self.clients.org_management.add_user_role(org_id, data_operator_role)
return org_id
def create_virtual_observatory(self, org=None):
"""Create an Org (domain of authority) that realizes a virtual observatory. This Org will have
set up roles for a virtual observatory. Shared resources, such as a device can only be
registered in one marine facility Org, and additionally in many virtual observatory Orgs. The
marine facility operators will have more extensive permissions and will supercede virtual
observatory commands
@param org Org
@retval org_id str
@throws BadRequest if object does not have _id or _rev attribute
@throws NotFound object with specified id does not exist
"""
log.debug("ObservatoryManagementService.create_virtual_observatory(): %s", org)
# create the org
org.org_type = OrgTypeEnum.VIRTUAL_OBSERVATORY
org_id = self.clients.org_management.create_org(org)
return org_id
def create_observatory(self, observatory=None, org_id=""):
"""Create a Observatory resource. An observatory is coupled
with one Org. The Org is created and associated as part of this call.
@param observatory Observatory
@retval observatory_id str
@throws BadRequest if object does not have _id or _rev attribute
@throws NotFound object with specified id does not exist
"""
# if the geospatial_bounds is set then calculate the geospatial_point_center
self._calc_geospatial_point_center(observatory)
# create the marine facility
observatory_id = self.RR2.create(observatory, RT.Observatory)
if org_id:
self.assign_resource_to_observatory_org(observatory_id, org_id)
return observatory_id
def read_observatory(self, observatory_id=''):
"""Read a Observatory resource
@param observatory_id str
@retval observatory Observatory
@throws NotFound object with specified id does not exist
"""
return self.RR2.read(observatory_id, RT.Observatory)
def update_observatory(self, observatory=None):
"""Update a Observatory resource
@param observatory Observatory
@throws NotFound object with specified id does not exist
"""
# if the geospatial_bounds is set then calculate the geospatial_point_center
self._calc_geospatial_point_center(observatory)
return self.RR2.update(observatory, RT.Observatory)
def delete_observatory(self, observatory_id=''):
"""Delete a Observatory resource
@param observatory_id str
@throws NotFound object with specified id does not exist
"""
return self.RR2.lcs_delete(observatory_id, RT.Observatory)
def force_delete_observatory(self, observatory_id=''):
return self.RR2.force_delete(observatory_id, RT.Observatory)
def create_subsite(self, subsite=None, parent_id=''):
"""Create a Subsite resource. A subsite is a frame of reference within an observatory. Its parent is
either the observatory or another subsite.
@param subsite Subsite
@param parent_id str
@retval subsite_id str
@throws BadRequest if object does not have _id or _rev attribute
@throws NotFound object with specified id does not exist
"""
# if the geospatial_bounds is set then calculate the geospatial_point_center
self._calc_geospatial_point_center(subsite)
subsite_id = self.RR2.create(subsite, RT.Subsite)
if parent_id:
self.assign_site_to_site(subsite_id, parent_id)
return subsite_id
def read_subsite(self, subsite_id=''):
"""Read a Subsite resource
@param subsite_id str
@retval subsite Subsite
@throws NotFound object with specified id does not exist
"""
return self.RR2.read(subsite_id, RT.Subsite)
def update_subsite(self, subsite=None):
"""Update a Subsite resource
@param subsite Subsite
@throws NotFound object with specified id does not exist
"""
# if the geospatial_bounds is set then calculate the geospatial_point_center
self._calc_geospatial_point_center(subsite)
return self.RR2.update(subsite, RT.Subsite)
def delete_subsite(self, subsite_id=''):
"""Delete a subsite resource, removes assocations to parents
@param subsite_id str
@throws NotFound object with specified id does not exist
"""
self.RR2.lcs_delete(subsite_id, RT.Subsite)
def force_delete_subsite(self, subsite_id=''):
self.RR2.force_delete(subsite_id, RT.Subsite)
def create_platform_site(self, platform_site=None, parent_id=''):
"""Create a PlatformSite resource. A platform_site is a frame of reference within an observatory. Its parent is
either the observatory or another platform_site.
@param platform_site PlatformSite
@param parent_id str
@retval platform_site_id str
@throws BadRequest if object does not have _id or _rev attribute
@throws NotFound object with specified id does not exist
"""
# if the geospatial_bounds is set then calculate the geospatial_point_center
self._calc_geospatial_point_center(platform_site)
platform_site_id = self.RR2.create(platform_site, RT.PlatformSite)
if parent_id:
self.RR2.assign_site_to_one_site_with_has_site(platform_site_id, parent_id)
return platform_site_id
def read_platform_site(self, platform_site_id=''):
"""Read a PlatformSite resource
@param platform_site_id str
@retval platform_site PlatformSite
@throws NotFound object with specified id does not exist
"""
return self.RR2.read(platform_site_id, RT.PlatformSite)
def update_platform_site(self, platform_site=None):
"""Update a PlatformSite resource
@param platform_site PlatformSite
@throws NotFound object with specified id does not exist
"""
# if the geospatial_bounds is set then calculate the geospatial_point_center
self._calc_geospatial_point_center(platform_site)
return self.RR2.update(platform_site, RT.PlatformSite)
def delete_platform_site(self, platform_site_id=''):
"""Delete a PlatformSite resource, removes assocations to parents
@param platform_site_id str
@throws NotFound object with specified id does not exist
"""
self.RR2.lcs_delete(platform_site_id, RT.PlatformSite)
def force_delete_platform_site(self, platform_site_id=''):
self.RR2.force_delete(platform_site_id, RT.PlatformSite)
def create_instrument_site(self, instrument_site=None, parent_id=''):
"""Create a InstrumentSite resource. A instrument_site is a frame of reference within an observatory. Its parent is
either the observatory or another instrument_site.
@param instrument_site InstrumentSite
@param parent_id str
@retval instrument_site_id str
@throws BadRequest if object does not have _id or _rev attribute
@throws NotFound object with specified id does not exist
"""
# if the geospatial_bounds is set then calculate the geospatial_point_center
self._calc_geospatial_point_center(instrument_site)
instrument_site_id = self.RR2.create(instrument_site, RT.InstrumentSite)
if parent_id:
self.RR2.assign_site_to_one_site_with_has_site(instrument_site_id, parent_id)
return instrument_site_id
def read_instrument_site(self, instrument_site_id=''):
"""Read a InstrumentSite resource
@param instrument_site_id str
@retval instrument_site InstrumentSite
@throws NotFound object with specified id does not exist
"""
return self.RR2.read(instrument_site_id, RT.InstrumentSite)
def update_instrument_site(self, instrument_site=None):
"""Update a InstrumentSite resource
@param instrument_site InstrumentSite
@throws NotFound object with specified id does not exist
"""
# if the geospatial_bounds is set then calculate the geospatial_point_center
self._calc_geospatial_point_center(instrument_site)
return self.RR2.update(instrument_site, RT.InstrumentSite)
def delete_instrument_site(self, instrument_site_id=''):
"""Delete a InstrumentSite resource, removes assocations to parents
@param instrument_site_id str
@throws NotFound object with specified id does not exist
"""
self.RR2.lcs_delete(instrument_site_id, RT.InstrumentSite)
def force_delete_instrument_site(self, instrument_site_id=''):
self.RR2.force_delete(instrument_site_id, RT.InstrumentSite)
def create_deployment(self, deployment=None, site_id="", device_id=""):
"""
Create a Deployment resource. Represents a (possibly open-ended) time interval
grouping one or more resources within a given context, such as an instrument
deployment on a platform at an observatory site.
"""
deployment_id = self.RR2.create(deployment, RT.Deployment)
#Verify that site and device exist, add links if they do
if site_id:
site_obj = self.RR2.read(site_id)
if site_obj:
self.assign_site_to_deployment(site_id=site_id, deployment_id=deployment_id)
if device_id:
device_obj = self.RR2.read(device_id)
if device_obj:
self.assign_device_to_deployment(device_id=device_id, deployment_id=deployment_id)
return deployment_id
def update_deployment(self, deployment=None):
# Overwrite Deployment object
self.RR2.update(deployment, RT.Deployment)
def read_deployment(self, deployment_id=''):
deployment_obj = self.RR2.read(deployment_id, RT.Deployment)
return deployment_obj
def delete_deployment(self, deployment_id=''):
"""
Delete a Deployment resource
"""
self.RR2.lcs_delete(deployment_id, RT.Deployment)
def force_delete_deployment(self, deployment_id=''):
self.RR2.force_delete(deployment_id, RT.Deployment)
############################
#
# ASSOCIATIONS
#
############################
def assign_site_to_site(self, child_site_id='', parent_site_id=''):
"""Connects a child site (any subtype) to a parent site (any subtype)
@param child_site_id str
@param parent_site_id str
@throws NotFound object with specified id does not exist
"""
self.RR2.assign_site_to_site_with_has_site(child_site_id, parent_site_id)
def unassign_site_from_site(self, child_site_id='', parent_site_id=''):
"""Disconnects a child site (any subtype) from a parent site (any subtype)
@param child_site_id str
@param parent_site_id str
@throws NotFound object with specified id does not exist
"""
self.RR2.unassign_site_from_site_with_has_site(child_site_id, parent_site_id)
def assign_device_to_site(self, device_id='', site_id=''):
"""Connects a device (any type) to a site (any subtype)
@param device_id str
@param site_id str
@throws NotFound object with specified id does not exist
"""
self.RR2.assign_device_to_site_with_has_device(device_id, site_id)
def unassign_device_from_site(self, device_id='', site_id=''):
"""Disconnects a device (any type) from a site (any subtype)
@param device_id str
@param site_id str
@throws NotFound object with specified id does not exist
"""
self.RR2.unassign_device_from_site_with_has_device(device_id, site_id)
def _update_device_add_geo_add_temporal(self, device_id='', site_id='', deployment_obj=''):
"""Assigns to device:
temporal extent from deployment
geo location from site
@param device_id str
@param site_id str
@param deployment_obj Deployment
@throws NotFound object with specified id does not exist
"""
device_obj = self.RR.read(device_id)
site_obj = self.RR.read(site_id)
for constraint in site_obj.constraint_list:
if constraint.type_ == OT.GeospatialBounds:
device_obj.geospatial_bounds = GeoUtils.calc_geo_bounds_for_geo_bounds_list(
[device_obj.geospatial_bounds, constraint])
for constraint in deployment_obj.constraint_list:
if constraint.type_ == OT.TemporalBounds:
device_obj.temporal_bounds = GeoUtils.calc_temp_bounds_for_temp_bounds_list(
[device_obj.temporal_bounds, constraint])
self.RR.update(device_obj)
def _update_device_remove_geo_update_temporal(self, device_id='', temporal_constraint=None):
"""Remove the geo location and update temporal extent (end) from the device
@param device_id str
@param site_id str
@throws NotFound object with specified id does not exist
"""
device_obj = self.RR.read(device_id)
bounds = GeospatialBounds(geospatial_latitude_limit_north=float(0),
geospatial_latitude_limit_south=float(0),
geospatial_longitude_limit_west=float(0),
geospatial_longitude_limit_east=float(0),
geospatial_vertical_min=float(0),
geospatial_vertical_max=float(0))
device_obj.geospatial_bounds = bounds
if temporal_constraint:
device_obj.temporal_bounds.end_datetime = GeoUtils.calc_temp_bounds_for_temp_bounds_list(
[device_obj.temporal_bounds, temporal_constraint])
self.RR.update(device_obj)
def _get_bounds_from_object(self, obj=''):
temporal = None
geographic = None
for constraint in obj.constraint_list:
if constraint.type_ == OT.TemporalBounds:
temporal = constraint
if constraint.type_ == OT.GeospatialBounds:
geographic = constraint
return temporal, geographic
def assign_device_to_network_parent(self, child_device_id='', parent_device_id=''):
"""Connects a device (any type) to parent in the RSN network
@param child_device_id str
@param parent_device_id str
@throws NotFound object with specified id does not exist
"""
self.RR2.assign_device_to_one_device_with_has_network_parent(parent_device_id, child_device_id)
def unassign_device_from_network_parent(self, child_device_id='', parent_device_id=''):
"""Disconnects a child device (any type) from parent in the RSN network
@param child_device_id str
@param parent_device_id str
@throws NotFound object with specified id does not exist
"""
self.RR2.unassign_device_from_device_with_has_network_parent(parent_device_id, child_device_id)
def assign_instrument_model_to_instrument_site(self, instrument_model_id='', instrument_site_id=''):
self.RR2.assign_instrument_model_to_instrument_site_with_has_model(instrument_model_id, instrument_site_id)
def unassign_instrument_model_from_instrument_site(self, instrument_model_id='', instrument_site_id=''):
self.RR2.unassign_instrument_model_from_instrument_site_with_has_model(instrument_model_id, instrument_site_id)
def assign_platform_model_to_platform_site(self, platform_model_id='', platform_site_id=''):
self.RR2.assign_platform_model_to_platform_site_with_has_model(platform_model_id, platform_site_id)
def unassign_platform_model_from_platform_site(self, platform_model_id='', platform_site_id=''):
self.RR2.unassign_platform_model_from_platform_site_with_has_model(platform_model_id, platform_site_id)
def assign_resource_to_observatory_org(self, resource_id='', org_id=''):
if not org_id:
raise BadRequest("Org id not given")
if not resource_id:
raise BadRequest("Resource id not given")
#log.trace("assign_resource_to_observatory_org: org_id=%s, resource_id=%s ", org_id, resource_id)
self.clients.org_management.share_resource(org_id, resource_id)
def unassign_resource_from_observatory_org(self, resource_id='', org_id=''):
if not org_id:
raise BadRequest("Org id not given")
if not resource_id:
raise BadRequest("Resource id not given")
self.clients.org_management.unshare_resource(org_id, resource_id)
##########################################################################
#
# DEPLOYMENTS
#
##########################################################################
def _get_deployment_assocs(self, deployment_id):
res_ids, assocs = self.RR.find_subjects(predicate=PRED.hasDeployment, object=deployment_id, id_only=True)
assoc_by_type = dict(Site=[], Device=[])
for a in assocs:
if a.st not in assoc_by_type:
assoc_by_type[a.st] = []
assoc_by_type[a.st].append(a)
if a.st.endswith("Device"):
assoc_by_type["Device"].append(a)
if a.st.endswith("Site"):
assoc_by_type["Site"].append(a)
return assoc_by_type
def assign_device_to_deployment(self, device_id='', deployment_id=''):
device = self.RR.read(device_id)
dep_assocs = self._get_deployment_assocs(deployment_id)
if dep_assocs["Device"]:
raise BadRequest("Deployment %s - Cannot have more than 1 Device" % deployment_id)
if device.type_ == RT.InstrumentDevice:
self.RR2.assign_deployment_to_instrument_device_with_has_deployment(deployment_id, device_id)
if dep_assocs["Site"] and dep_assocs["Site"][0].st != RT.InstrumentSite:
raise BadRequest("Deployment %s - Device %s (%s) incompatible with associated Site %s (%s)" % (
deployment_id, device_id, device.type_, dep_assocs["Site"][0].s, dep_assocs["Site"][0].st))
elif device.type_ == RT.PlatformDevice:
self.RR2.assign_deployment_to_platform_device_with_has_deployment(deployment_id, device_id)
if dep_assocs["Site"] and dep_assocs["Site"][0].st != RT.PlatformSite:
raise BadRequest("Deployment %s - Device %s (%s) incompatible with associated Site %s (%s)" % (
deployment_id, device_id, device.type_, dep_assocs["Site"][0].s, dep_assocs["Site"][0].st))
else:
raise BadRequest("Illegal resource type to assign to Deployment: %s" % device.type_)
def unassign_device_from_deployment(self, device_id='', deployment_id=''):
device = self.RR.read(device_id)
if device.type_ == RT.InstrumentDevice:
self.RR2.unassign_deployment_from_instrument_device_with_has_deployment(deployment_id, device_id)
elif device.type_ == RT.PlatformDevice:
self.RR2.unassign_deployment_from_platform_device_with_has_deployment(deployment_id, device_id)
else:
raise BadRequest("Illegal resource type to assign to Deployment: %s" % device.type_)
def assign_site_to_deployment(self, site_id='', deployment_id=''):
site = self.RR.read(site_id)
dep_assocs = self._get_deployment_assocs(deployment_id)
if dep_assocs["Site"]:
raise BadRequest("Deployment %s - Cannot have more than 1 Site" % deployment_id)
if site.type_ == RT.InstrumentSite:
self.RR2.assign_deployment_to_instrument_site_with_has_deployment(deployment_id, site_id)
if dep_assocs["Device"] and dep_assocs["Device"][0].st != RT.InstrumentDevice:
raise BadRequest("Deployment %s - Site %s (%s) incompatible with associated Device %s (%s)" % (
deployment_id, site_id, site.type_, dep_assocs["Device"][0].s, dep_assocs["Device"][0].st))
elif site.type_ == RT.PlatformSite:
self.RR2.assign_deployment_to_platform_site_with_has_deployment(deployment_id, site_id)
if dep_assocs["Device"] and dep_assocs["Device"][0].st != RT.PlatformDevice:
raise BadRequest("Deployment %s - Site %s (%s) incompatible with associated Device %s (%s)" % (
deployment_id, site_id, site.type_, dep_assocs["Device"][0].s, dep_assocs["Device"][0].st))
else:
raise BadRequest("Illegal resource type to assign to Deployment: %s" % site.type_)
def unassign_site_from_deployment(self, site_id='', deployment_id=''):
site = self.RR.read(site_id)
if site.type_ == RT.InstrumentSite:
self.RR2.unassign_deployment_from_instrument_site_with_has_deployment(deployment_id, site_id)
elif site.type_ == RT.PlatformSite:
self.RR2.unassign_deployment_from_platform_site_with_has_deployment(deployment_id, site_id)
else:
raise BadRequest("Illegal resource type to assign to Deployment: %s" % site.type_)
def activate_deployment(self, deployment_id='', activate_subscriptions=False):
"""
Make the devices on this deployment the primary devices for the sites
"""
dep_util = DeploymentUtil(self.container)
# Verify that the deployment exists
deployment_obj = self.RR2.read(deployment_id)
log.info("Activating deployment %s '%s'", deployment_id, deployment_obj.name)
# Find an existing primary deployment
dep_site_id, dep_dev_id = dep_util.get_deployment_relations(deployment_id)
active_dep = dep_util.get_site_primary_deployment(dep_site_id)
if active_dep and active_dep._id == deployment_id:
raise BadRequest("Deployment %s already active for site %s" % (deployment_id, dep_site_id))
self.deploy_planner = DeploymentPlanner(self.clients)
pairs_to_remove, pairs_to_add = self.deploy_planner.prepare_activation(deployment_obj)
log.debug("activate_deployment pairs_to_add: %s", pairs_to_add)
log.debug("activate_deployment pairs_to_remove: %s", pairs_to_remove)
if not pairs_to_add:
log.warning('No Site and Device pairs were added to activate this deployment')
temp_constraint = dep_util.get_temporal_constraint(deployment_obj)
# process any removals
for site_id, device_id in pairs_to_remove:
log.info("Unassigning hasDevice; device '%s' from site '%s'", device_id, site_id)
self.unassign_device_from_site(device_id, site_id)
log.info("Removing geo and updating temporal attrs for device '%s'", device_id)
self._update_device_remove_geo_update_temporal(device_id, temp_constraint)
# Sever the connection between dev/site and the primary deployment
assocs = self.clients.resource_registry.find_associations(device_id, PRED.hasPrimaryDeployment, deployment_id)
for assoc in assocs:
self.RR.delete_association(assoc)
assocs = self.clients.resource_registry.find_associations(site_id, PRED.hasPrimaryDeployment, deployment_id)
for assoc in assocs:
self.RR.delete_association(assoc)
# process the additions
for site_id, device_id in pairs_to_add:
log.info("Setting primary device '%s' for site '%s'", device_id, site_id)
self.assign_device_to_site(device_id, site_id)
log.info("Adding geo and updating temporal attrs for device '%s'", device_id)
self._update_device_add_geo_add_temporal(device_id, site_id, deployment_obj)
site_obj = self.RR2.read(site_id)
dev_obj = self.RR2.read(device_id)
# Make this deployment Primary for every device and site
self.RR.create_association(subject=device_id, predicate=PRED.hasPrimaryDeployment, object=deployment_id, assoc_type=RT.Deployment)
self.RR.create_association(subject=site_id, predicate=PRED.hasPrimaryDeployment, object=deployment_id, assoc_type=RT.Deployment)
# Add a withinDeployment association from Device to Deployment
# so the entire history of a Device can be found.
self.RR.create_association(subject=device_id, predicate=PRED.withinDeployment, object=deployment_id, assoc_type=RT.Deployment)
sdps, _ = self.RR.find_objects(subject=site_id, predicate=PRED.hasOutputProduct, object_type=RT.DataProduct, id_only=False)
sdps_ids = [s._id for s in sdps] # Get a list of Site Data Product IDs
sdps_streams, _ = self.RR.find_objects_mult(subjects=sdps_ids, predicate=PRED.hasStream, id_only=False)
dpds, _ = self.RR.find_objects(subject=device_id, predicate=PRED.hasOutputProduct, object_type=RT.DataProduct, id_only=False)
dps_ids = [d._id for d in dpds] # Get a list of device data product ids
dps_streams, _ = self.RR.find_objects_mult(subjects=dps_ids, predicate=PRED.hasStream, id_only=False)
# Match SDPs to DDPs to get dataset_id and update the dataset_windows.
if not sdps_ids and log.isEnabledFor(logging.DEBUG):
log.debug("Not updating data_windows on Site '%s'... no SiteDataProducts were found." % site_id)
for sdp in sdps:
if not sdp.ingest_stream_name:
log.warning("Unable to pair site data product %s without an ingest stream name", sdp.name)
continue # Ingest stream name isn't defined
for dpd in dpds:
# breakpoint(locals(), globals())
if sdp.ingest_stream_name == dpd.ingest_stream_name:
# Update the window list in the resource
site_dataset_id = self.RR2.find_object(sdp._id, PRED.hasDataset, id_only=True)
device_dataset_id = self.RR2.find_object(dpd._id, PRED.hasDataset, id_only=True)
bounds = TemporalBounds(start_datetime=temp_constraint.start_datetime, end_datetime=str(calendar.timegm(datetime(2038,1,1).utctimetuple())))
window = DatasetWindow(dataset_id=device_dataset_id, bounds=bounds)
sdp.dataset_windows.append(window)
self.clients.data_product_management.update_data_product(sdp)
# TODO: Once coverages support None for open intervals on complex, we'll change it
# in the man time, 2038 is pretty far out, and the world will end shortly after, so
# it's pretty good for an arbitrary point in the future
start = int(temp_constraint.start_datetime) + 2208988800
end = calendar.timegm(datetime(2038,1,1).utctimetuple()) + 2208988800
self.clients.dataset_management.add_dataset_window_to_complex(device_dataset_id, (start, end), site_dataset_id)
dp_params = self.clients.data_product_management.get_data_product_parameters(dpd._id, id_only=False)
# print [d.name for d in dp_params]
for param in dp_params:
if 'lat' in param.name and param.parameter_type == 'sparse':
# Update sparse lat/lon data with site lat/lon
site_obj = self.RR.read(site_id)
# Search for GeospatialBounds bbox constraint
for constraint in site_obj.constraint_list:
if constraint.type_ == OT.GeospatialBounds:
# Get the midpoint of the site geospatial bounds
mid_point = GeoUtils.calc_geospatial_point_center(constraint)
# Create granule using midpoint
stream_def_id, _ = self.RR.find_objects(subject=dpd, predicate=PRED.hasStreamDefinition, id_only=True)
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id[0])
rdt['time'] = [start]
rdt['lat'] = [mid_point['lat']]
rdt['lon'] = [mid_point['lon']]
ParameterHelper.publish_rdt_to_data_product(dpd, rdt)
if deployment_obj.lcstate != LCS.DEPLOYED:
self.RR.execute_lifecycle_transition(deployment_id, LCE.DEPLOY)
else:
log.warn("Deployment %s was already DEPLOYED when activated", deployment_obj._id)
if active_dep:
log.info("activate_deployment(): Deactivating prior Deployment %s at site %s" % (active_dep._id, dep_site_id))
# Set Deployment end date
olddep_tc = dep_util.get_temporal_constraint(active_dep)
newdep_tc = dep_util.get_temporal_constraint(deployment_obj)
if float(olddep_tc.end_datetime) > float(newdep_tc.start_datetime):
# Set to new deployment start date
dep_util.set_temporal_constraint(active_dep, end_time=newdep_tc.start_datetime)
self.RR.update(active_dep)
# Change LCS
if active_dep.lcstate == LCS.DEPLOYED:
self.RR.execute_lifecycle_transition(active_dep._id, LCE.INTEGRATE)
else:
log.warn("Prior Deployment %s was not in DEPLOYED lcstate", active_dep._id)
def deactivate_deployment(self, deployment_id=''):
"""Remove the primary device designation for the deployed devices at the sites
@param deployment_id str
@throws NotFound object with specified id does not exist
@throws BadRequest if devices can not be undeployed
"""
#Verify that the deployment exists
deployment_obj = self.RR2.read(deployment_id)
dep_util = DeploymentUtil(self.container)
if deployment_obj.lcstate != LCS.DEPLOYED:
log.warn("deactivate_deployment(): Deployment %s is not DEPLOYED" % deployment_id)
# raise BadRequest("This deployment is not active")
# get all associated components
self.deploy_planner = DeploymentPlanner(self.clients)
site_ids, device_ids = self.deploy_planner.get_deployment_sites_devices(deployment_obj)
dep_util.set_temporal_constraint(deployment_obj, end_time=DeploymentUtil.DATE_NOW)
self.RR.update(deployment_obj)
temp_constraint = dep_util.get_temporal_constraint(deployment_obj)
# delete only associations where both site and device have passed the filter
for s in site_ids:
dataset_ids = []
ds, _ = self.RR.find_objects(s, PRED.hasDevice, id_only=True)
for d in ds:
if d in device_ids:
a = self.RR.get_association(s, PRED.hasDevice, d)
self.RR.delete_association(a)
log.info("Removing geo and updating temporal attrs for device '%s'", d)
self._update_device_remove_geo_update_temporal(d, temp_constraint)
try:
self.RR.execute_lifecycle_transition(d, LCE.INTEGRATE)
except BadRequest:
log.warn("Could not set device %s lcstate to INTEGRATED", d)
primary_d = self.RR.find_associations(subject=d, predicate=PRED.hasPrimaryDeployment, object=deployment_id)
if primary_d:
self.RR.delete_association(primary_d[0])
primary_s = self.RR.find_associations(subject=s, predicate=PRED.hasPrimaryDeployment, object=deployment_id)
if primary_s:
self.RR.delete_association(primary_s[0])
# Get Dataset IDs for a Device
dps, _ = self.RR.find_objects(subject=d, predicate=PRED.hasOutputProduct, id_only=True)
dataset_ids, _ = self.RR.find_objects_mult(subjects=dps, predicate=PRED.hasDataset, id_only=True)
dataset_ids = list(set(dataset_ids))
# Get the Deployment time bounds as datetime objects
temporal, geographc = self._get_bounds_from_object(obj=deployment_obj)
# Set the ending of the appropriate dataset_windows. Have to search by dataset_id because we are
# not creating any new resources for the dataset_window logic!
site_dps, _ = self.RR.find_objects(s, PRED.hasOutputProduct, id_only=True)
for dp in site_dps:
site_data_product = self.RR.read(dp)
# This is assuming that data_windows is ALWAYS kept IN ORDER (Ascending).
# There should NEVER be a situation where there are two dataset_window
# attribute missing an 'ending' value. If there is, it wasn't deactivated
# properly.
for window in site_data_product.dataset_windows:
if window.dataset_id in dataset_ids:
# Set up the tuples of start and stops
old_start = int(window.bounds.start_datetime) + 2208988800
old_end = int(window.bounds.end_datetime) + 2208988800
new_start = old_start
new_end = int(temporal.end_datetime) + 2208988800
# Update the data product resource
window.bounds.end_datetime = temporal.end_datetime
site_dataset_id = self.RR2.find_object(site_data_product._id, PRED.hasDataset, id_only=True)
device_dataset_id = window.dataset_id
# Update the dataset
self.clients.dataset_management.update_dataset_window_for_complex(device_dataset_id, (old_start, old_end), (new_start, new_end), site_dataset_id)
break
self.clients.data_product_management.update_data_product(site_data_product)
# This should set the deployment resource to retired.
# Michael needs to fix the RR retire logic so it does not
# retire all associations before we can use it. Currently we switch
# back to INTEGRATE.
#self.RR.execute_lifecycle_transition(deployment_id, LCE.RETIRE)
# mark deployment as not deployed (developed seems appropriate)
if deployment_obj.lcstate == LCS.DEPLOYED:
self.RR.execute_lifecycle_transition(deployment_id, LCE.INTEGRATE)
else:
log.warn("Deployment %s was not in DEPLOYED lcstate", deployment_id)
def prepare_deployment_support(self, deployment_id=''):
extended_resource_handler = ExtendedResourceContainer(self)
resource_data = extended_resource_handler.create_prepare_resource_support(deployment_id, OT.DeploymentPrepareSupport)
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.create_request, 'observatory_management',
'create_deployment', { "deployment": "$(deployment)" })
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.update_request, 'observatory_management',
'update_deployment', { "deployment": "$(deployment)" })
#Fill out service request information for assigning a InstrumentDevice
extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasInstrumentDevice'].assign_request, 'observatory_management',
'assign_device_to_deployment', {"device_id": "$(instrument_device_id)",
"deployment_id": deployment_id })
#Fill out service request information for assigning a PlatformDevice
extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasPlatformDevice'].assign_request, 'observatory_management',
'assign_device_to_deployment', {"device_id": "$(platform_device_id)",
"deployment_id": deployment_id })
#Fill out service request information for unassigning a InstrumentDevice
extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasInstrumentDevice'].unassign_request, 'observatory_management',
'unassign_device_from_deployment', {"device_id": "$(instrument_device_id)",
"deployment_id": deployment_id })
#Fill out service request information for unassigning a PlatformDevice
extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasPlatformDevice'].unassign_request, 'observatory_management',
'unassign_device_from_deployment', {"device_id": "$(platform_device_id)",
"deployment_id": deployment_id })
#Fill out service request information for assigning a InstrumentSite
extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasInstrumentSite'].assign_request, 'observatory_management',
'assign_site_to_deployment', {"site_id": "$(instrument_site_id)",
"deployment_id": deployment_id })
#Fill out service request information for assigning a PlatformSite
extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasPlatformSite'].assign_request, 'observatory_management',
'assign_site_to_deployment', {"site_id": "$(platform_site_id)",
"deployment_id": deployment_id })
#Fill out service request information for unassigning a InstrumentSite
extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasInstrumentSite'].unassign_request, 'observatory_management',
'unassign_site_from_deployment', {"site_id": "$(instrument_site_id)",
"deployment_id": deployment_id })
#Fill out service request information for unassigning a PlatformSite
extended_resource_handler.set_service_requests(resource_data.associations['DeploymentHasPlatformSite'].unassign_request, 'observatory_management',
'unassign_site_from_deployment', {"site_id": "$(platform_site_id)",
"deployment_id": deployment_id })
return resource_data
##########################################################################
#
# FIND OPS
#
##########################################################################
def find_org_by_observatory(self, observatory_id=''):
"""
"""
orgs,_ = self.RR.find_subjects(RT.Org, PRED.hasResource, observatory_id, id_only=False)
return orgs
def find_related_frames_of_reference(self, input_resource_id='', output_resource_type_list=None):
# use the related resources crawler
finder = RelatedResourcesCrawler()
# generate the partial function (cached association list)
get_assns = finder.generate_related_resources_partial(self.RR, [PRED.hasSite])
# run 2 searches allowing all site-based resource types: one down (subj-obj), one up (obj-subj)
full_crawllist = [RT.InstrumentSite, RT.PlatformSite, RT.Subsite, RT.Observatory]
search_down = get_assns({PRED.hasSite: (True, False)}, full_crawllist)
search_up = get_assns({PRED.hasSite: (False, True)}, full_crawllist)
# the searches return a list of association objects, so compile all the ids by extracting them
retval_ids = set([])
# we want only those IDs that are not the input resource id
for a in search_down(input_resource_id, -1) + search_up(input_resource_id, -1):
if a.o not in retval_ids and a.o != input_resource_id:
retval_ids.add(a.o)
if a.s not in retval_ids and a.s != input_resource_id:
retval_ids.add(a.s)
log.trace("converting retrieved ids to objects = %s" % retval_ids)
#initialize the dict
retval = dict((restype, []) for restype in output_resource_type_list)
#workaround for read_mult problem
all_res = []
if retval_ids: all_res = self.RR.read_mult(list(retval_ids))
#all_res = self.RR.read_mult(retval_ids)
# put resources in the slot based on their type
for resource in all_res:
typename = type(resource).__name__
if typename in output_resource_type_list:
retval[typename].append(resource)
# display a count of how many resources we retrieved
log.debug("got these resources: %s", dict([(k, len(v)) for k, v in retval.iteritems()]))
return retval
def find_related_sites(self, parent_resource_id='', exclude_site_types=None, include_parents=False,
include_devices=False, id_only=False):
if not parent_resource_id:
raise BadRequest("Must provide a parent parent_resource_id")
exclude_site_types = exclude_site_types or []
if not isinstance(exclude_site_types, list):
raise BadRequest("exclude_site_types mut be a list, is: %s" % type(exclude_site_types))
parent_resource = self.RR.read(parent_resource_id)
org_id, site_id = None, None
if parent_resource.type_ == RT.Org:
org_id = parent_resource_id
elif RT.Site in parent_resource._get_extends():
site_id = parent_resource_id
else:
raise BadRequest("Illegal parent_resource_id type. Expected Org/Site, given:%s" % parent_resource.type_)
RR2 = EnhancedResourceRegistryClient(self.RR)
RR2.cache_resources(RT.Observatory)
RR2.cache_resources(RT.PlatformSite)
RR2.cache_resources(RT.InstrumentSite)
if include_devices:
RR2.cache_resources(RT.PlatformDevice)
RR2.cache_resources(RT.InstrumentDevice)
outil = ObservatoryUtil(self, enhanced_rr=RR2)
site_resources, site_children = outil.get_child_sites(site_id, org_id,
exclude_types=exclude_site_types, include_parents=include_parents, id_only=id_only)
site_devices, device_resources = None, None
if include_devices:
site_devices = outil.get_device_relations(site_children.keys())
device_list = list({tup[1] for key,dev_list in site_devices.iteritems() if dev_list for tup in dev_list})
device_resources = RR2.read_mult(device_list)
# HACK:
dev_by_id = {dev._id: dev for dev in device_resources}
site_resources.update(dev_by_id)
return site_resources, site_children, site_devices, device_resources
def get_sites_devices_status(self, parent_resource_ids=None, include_sites=False, include_devices=False, include_status=False):
if not parent_resource_ids:
raise BadRequest("Must provide a parent parent_resource_id")
result_dict = {}
RR2 = EnhancedResourceRegistryClient(self.RR)
RR2.cache_resources(RT.Observatory)
RR2.cache_resources(RT.PlatformSite)
RR2.cache_resources(RT.InstrumentSite)
RR2.cache_resources(RT.PlatformDevice)
RR2.cache_resources(RT.InstrumentDevice)
outil = ObservatoryUtil(self, enhanced_rr=RR2, device_status_mgr=DeviceStateManager())
parent_resource_objs = RR2.read_mult(parent_resource_ids)
res_by_id = dict(zip(parent_resource_ids, parent_resource_objs))
# Loop thru all the provided site ids and create the result structure
for parent_resource_id in parent_resource_ids:
parent_resource = res_by_id[parent_resource_id]
org_id, site_id = None, None
if parent_resource.type_ == RT.Org:
org_id = parent_resource_id
elif RT.Site in parent_resource._get_extends():
site_id = parent_resource_id
site_result_dict = {}
site_resources, site_children = outil.get_child_sites(site_id, org_id, include_parents=True, id_only=False)
if include_sites:
site_result_dict["site_resources"] = site_resources
site_result_dict["site_children"] = site_children
all_device_statuses = {}
if include_devices or include_status:
RR2.cache_predicate(PRED.hasSite)
RR2.cache_predicate(PRED.hasDevice)
all_device_statuses = outil.get_status_roll_ups(parent_resource_id)
if include_status:
#add code to grab the master status table to pass in to the get_status_roll_ups calc
log.debug('get_sites_devices_status site master_status_table: %s ', all_device_statuses)
site_result_dict["site_status"] = all_device_statuses
#create the aggreagate_status for each device and site
log.debug("calculate site aggregate status")
site_status = [all_device_statuses.get(x,{}).get('agg',DeviceStatusType.STATUS_UNKNOWN) for x in site_children.keys()]
site_status_dict = dict(zip(site_children.keys(), site_status))
log.debug('get_sites_devices_status site_status_dict: %s ', site_status_dict)
site_result_dict["site_aggregate_status"] = site_status_dict
if include_devices:
log.debug("calculate device aggregate status")
inst_status = [all_device_statuses.get(x,{}).get('agg',DeviceStatusType.STATUS_UNKNOWN) for x in all_device_statuses.keys()]
device_agg_status_dict = dict(zip(all_device_statuses.keys(), inst_status))
log.debug('get_sites_devices_status device_agg_status_dict: %s ', device_agg_status_dict)
site_result_dict["device_aggregate_status"] = device_agg_status_dict
result_dict[parent_resource_id] = site_result_dict
return result_dict
def find_site_data_products(self, parent_resource_id='', include_sites=False, include_devices=False,
include_data_products=False):
if not parent_resource_id:
raise BadRequest("Must provide a parent parent_resource_id")
outil = ObservatoryUtil(self)
res_dict = outil.get_site_data_products(parent_resource_id, include_sites=include_sites,
include_devices=include_devices,
include_data_products=include_data_products)
return res_dict
# -------------------------------------------------------------------------
# Marine Asset Management RESOURCES (start)
# -------------------------------------------------------------------------
# AssetType
def create_asset_type(self, asset_type=None):
"""Create a AssetType resource.
@param asset_type RT.AssetType
@retval asset_type_id str
@throws: BadRequest 'asset_type object is empty'
"""
if not asset_type:
raise BadRequest('asset_type object is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
asset_type_id = at.create_asset_type(asset_type)
return asset_type_id
def read_asset_type(self, asset_type_id=''):
"""Read an AssetType resource.
@param asset_type_id str
@retval asset_type RT.AssetType
@throws: BadRequest 'asset_type_id parameter is empty'
"""
if not asset_type_id:
raise BadRequest('asset_type_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
asset_type = at.read_asset_type(asset_type_id)
return asset_type
def update_asset_type(self, asset_type=None):
"""Update an AssetType resource.
@param asset_type RT.AssetType
@throws: BadRequest 'asset_type object is empty'
"""
if not asset_type:
raise BadRequest('asset_type object is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
obj = at.update_asset_type(asset_type)
return obj
def delete_asset_type(self, asset_type_id=''):
"""Delete an AssetType resource.
@param asset_type_id str
@throws: BadRequest 'asset_type_id parameter is empty'
"""
if not asset_type_id:
raise BadRequest('asset_type_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.delete_asset_type(asset_type_id)
def force_delete_asset_type(self, asset_type_id=''):
"""Force delete an AssetType resource
@param asset_type_id str
@throws: BadRequest 'asset_type_id parameter is empty'
"""
if not asset_type_id:
raise BadRequest('asset_type_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.force_delete_asset_type(asset_type_id)
def update_attribute_specifications(self, resource_id='', spec_dict=None):
""" Update attribute_specifications of resource using spec_dict provided.
@param resource_id str # id of RT.Asset or RT.EventDurationType
@param spec_dict [] # list of attribute specification name(s)
@throws BadRequest 'resource_id parameter is empty'
@throws BadRequest 'spec_dict parameter is empty'
@throws Inconsistent unable to process resource of this type
"""
# TODO NOTE: Must abide by state restriction model
# Updating attribute_specification is dependent on state (i.e. if in integrated or deployment state,
# updates are not permitted unless the operator has privileges to do so.
if not resource_id:
raise BadRequest('resource_id parameter is empty')
if not spec_dict:
raise BadRequest('spec_dict parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.update_attribute_specifications(resource_id, spec_dict)
def delete_attribute_specification(self, resource_id='', attr_spec_names=None):
"""Delete attribute_specifications in list of attr_spec_names and return the
TypeResource attribute_specifications dictionary for resource_id.
@param resource_id str # id of RT.Asset or RT.EventDurationType
@param attr_spec_names [] # list of attribute specification name(s)
@retval r_obj {} # dictionary of attribute specification(s)
@throws BadRequest 'resource_id parameter is empty'
@throws BadRequest 'attr_spec_names parameter is empty'
"""
# TODO NOTE: Must abide by state restriction model
# Delete attribute_specifications in list of attr_spec_names and return the
# TypeResource attribute_specifications dictionary for resource_id.
if not resource_id:
raise BadRequest('resource_id parameter is empty')
if not attr_spec_names:
raise BadRequest('attr_spec_names parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
r_obj = at.delete_attribute_specification(resource_id, attr_spec_names)
return r_obj
#
# Asset
#
def create_asset(self, asset=None, asset_type_id=''):
"""Create an Asset resource. If alt_ids provided verify well formed and unique
in namespace RT.Asset. An Asset is coupled with an AssetType. The AssetType is
created and associated within this call if asset_type_id provided.
@param asset RT.Asset
@param asset_type_id str # optional
@param asset_id str
@throws BadRequest 'asset object is empty'
@throws Inconsistent 'multiple alt_ids not permitted for Asset resources'
@throws Inconsistent 'malformed alt_ids provided for Asset; required format \'Asset:asset_name\''
@throws BadRequest 'resource instance already exists (\'Asset\') with this altid: %s'
@throws Inconsistent 'Invalid asset object'
"""
if not asset:
raise BadRequest('asset object is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
asset_id = at.create_asset(asset, asset_type_id)
return asset_id
def read_asset(self, asset_id=''):
"""Read an Asset resource
@param asset_id str
@retval asset RT.Asset
@throws BadRequest 'asset_id parameter is empty'
"""
if not asset_id:
raise BadRequest('asset_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
obj = at.read_asset(asset_id)
return obj
def update_asset(self, asset=None):
"""Update an Asset resource. Ensure alt_ids value (if provided) is well formed and
unique in namespace. The asset object provided shall have asset_attrs defined and shall also have
an association (PRED.implementsAssetType) defined or method shall fail. asset.asset_attrs and
the association are required to perform validation and constraint checks prior to update.
@param asset RT.Asset
@throws BadRequest 'asset object is empty'
@throws BadRequest '_id is empty'
@throws BadRequest 'asset (id=%s) does not have association (PRED.implementsAssetType) defined'
@throws BadRequest 'asset (id=%s) has more than one association (PRED.implementsAssetType) defined'
@throws BadRequest 'asset type (id: \'%s\') does not have attribute_specifications'
@throws BadRequest 'asset_update requires asset_attrs to be provided'
@throws BadRequest 'attribute (\'%s\') not found in AssetType (id=\'%s\') AttributeSpecification '
@throws BadRequest 'update_asset: altid returned: %s; instance using current_altid_exists: %s'
@throws BadRequest (numerous error messages from lower methods inside update_asset)
@throws BadRequest 'update_asset failed'
"""
try:
if not asset:
raise BadRequest('asset object is empty')
if not asset._id:
raise NotFound('_id is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.update_asset(asset)
except BadRequest, Arguments:
raise BadRequest('update_asset: %s' % Arguments.get_error_message())
except NotFound, Arguments:
raise NotFound('update_asset: %s' % Arguments.get_error_message())
except Inconsistent, Arguments:
raise Inconsistent('update_asset: %s' % Arguments.get_error_message())
except:
raise BadRequest('update_asset failed')
return
def delete_asset(self, asset_id=''):
"""Delete an Asset resource
@param asset_id str
@throws BadRequest 'asset_id parameter is empty'
"""
if not asset_id:
raise BadRequest('asset_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.delete_asset(asset_id)
return
def force_delete_asset(self, asset_id=''):
""" Force delete an Asset resource
@param asset_id str
@throws BadRequest 'asset_id parameter is empty'
"""
if not asset_id:
raise BadRequest('asset_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.force_delete_asset(asset_id)
def get_asset_extension(self, asset_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Returns an AssetExtension object containing additional related information
@param asset_id str
@param ext_associations dict
@param ext_exclude list
@param user_id str
@retval extended_asset AssetExtension
@throws BadRequest 'asset_id parameter is empty'
"""
if not asset_id:
raise BadRequest('asset_id parameter is empty')
extended_resource_handler = ExtendedResourceContainer(self)
extended_asset = extended_resource_handler.create_extended_resource_container(
extended_resource_type=OT.AssetExtension,
resource_id=asset_id,
computed_resource_type=OT.BaseComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id)
from ion.util.extresource import strip_resource_extension, get_matchers, matcher_UserInfo, matcher_MarineAsset,\
matcher_DataProduct, matcher_DeviceModel, matcher_Device
matchers = get_matchers([matcher_MarineAsset, matcher_UserInfo])
strip_resource_extension(extended_asset, matchers=matchers)
return extended_asset
def prepare_asset_support(self, asset_id=''):
"""Asset prepare support for UI (create, update).
@param asset_id str
@retval resource_data resource_schema
"""
extended_resource_handler = ExtendedResourceContainer(self)
resource_data = extended_resource_handler.create_prepare_resource_support(asset_id, OT.AssetPrepareSupport)
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.create_request, 'observatory_management',
'create_asset', { "asset": "$(asset)" })
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.update_request, 'observatory_management',
'update_asset', { "asset": "$(asset)" })
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assign event to asset (LocationEvent, OperabilityEvent, VerificationEvent, IntegrationEvent)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#Fill out service request information for assigning an EventDuration to Asset (LocationEvent)
extended_resource_handler.set_service_requests(resource_data.associations['AssetHasLocationEvent'].assign_request,
'observatory_management', 'assign_event_duration_to_asset',
{"event_duration_id": "$(event_duration_id)", "asset_id": asset_id })
#Fill out service request information for assigning an EventDuration to Asset (OperabilityEvent)
extended_resource_handler.set_service_requests(resource_data.associations['AssetHasOperabilityEvent'].assign_request,
'observatory_management', 'assign_event_duration_to_asset',
{"event_duration_id": "$(event_duration_id)", "asset_id": asset_id })
#Fill out service request information for assigning an EventDuration to Asset (VerificationEvent)
extended_resource_handler.set_service_requests(resource_data.associations['AssetHasVerificationEvent'].assign_request,
'observatory_management', 'assign_event_duration_to_asset',
{"event_duration_id": "$(event_duration_id)", "asset_id": asset_id })
#Fill out service request information for assigning an EventDuration to Asset (IntegrationEvent)
extended_resource_handler.set_service_requests(resource_data.associations['AssetHasAssemblyEvent'].assign_request,
'observatory_management', 'assign_event_duration_to_asset',
{"event_duration_id": "$(event_duration_id)", "asset_id": asset_id })
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# unassign event to asset (LocationEvent, OperabilityEvent, VerificationEvent, IntegrationEvent)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#Fill out service request information for unassigning an EventDuration to Asset (LocationEvent)
extended_resource_handler.set_service_requests(resource_data.associations['AssetHasLocationEvent'].unassign_request,
'observatory_management', 'unassign_event_duration_to_asset',
{"event_duration_id": "$(event_duration_id)", "asset_id": asset_id })
#Fill out service request information for unassigning an EventDuration to Asset (OperabilityEvent)
extended_resource_handler.set_service_requests(resource_data.associations['AssetHasOperabilityEvent'].unassign_request,
'observatory_management', 'unassign_event_duration_to_asset',
{"event_duration_id": "$(event_duration_id)", "asset_id": asset_id })
#Fill out service request information for unassigning an EventDuration to Asset (VerificationEvent)
extended_resource_handler.set_service_requests(resource_data.associations['AssetHasVerificationEvent'].unassign_request,
'observatory_management', 'unassign_event_duration_to_asset',
{"event_duration_id": "$(event_duration_id)", "asset_id": asset_id })
#Fill out service request information for unassigning an EventDuration to Asset (IntegrationEvent)
extended_resource_handler.set_service_requests(resource_data.associations['AssetHasAssemblyEvent'].unassign_request,
'observatory_management', 'unassign_event_duration_to_asset',
{"event_duration_id": "$(event_duration_id)", "asset_id": asset_id })
return resource_data
def assign_asset_type_to_asset(self, asset_type_id='',asset_id=''):
""" Link an Asset to an AssetType
@param asset_type_id str
@param asset_id str
@throws BadRequest 'asset_type_id parameter is empty'
@throws BadRequest 'asset_id parameter is empty'
"""
if not asset_type_id:
raise BadRequest('asset_type_id parameter is empty')
if not asset_id:
raise BadRequest('asset_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.assign_asset_type_to_asset(asset_type_id, asset_id)
def unassign_asset_type_from_asset(self, asset_type_id='', asset_id=''):
"""Remove link of Asset from AssetType.
@param asset_type_id str
@param asset_id str
@throws BadRequest 'asset_type_id parameter is empty'
@throws BadRequest 'asset_id parameter is empty'
"""
if not asset_type_id:
raise BadRequest('asset_type_id parameter is empty')
if not asset_id:
raise BadRequest('asset_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.unassign_asset_type_from_asset(asset_type_id, asset_id)
#
# EventDurationType
#
def create_event_duration_type(self, event_duration_type=None):
"""Create a EventDurationType resource.
@param event_duration_type RT.EventDurationType
@retval event_duration_type_id str
@throws: BadRequest 'event_duration_type parameter is empty'
"""
if not event_duration_type:
raise BadRequest('event_duration_type parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
event_duration_type = at.create_event_duration_type(event_duration_type)
return event_duration_type
def read_event_duration_type(self, event_duration_type_id=''):
"""Read an EventDurationType resource.
@param event_duration_type_id str
@retval event_duration_type RT.EventDurationType
@throws: BadRequest 'event_duration_type_id parameter is empty'
"""
if not event_duration_type_id:
raise BadRequest('event_duration_type_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
event_duration_type = at.read_event_duration_type(event_duration_type_id)
return event_duration_type
def update_event_duration_type(self, event_duration_type=None):
"""Update an EventDurationType resource.
@param event_duration_type RT.EventDurationType
@throws: BadRequest 'event_duration_type parameter is empty'
"""
if not event_duration_type:
raise BadRequest('event_duration_type parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.update_event_duration_type(event_duration_type)
return
def delete_event_duration_type(self, event_duration_type_id=''):
"""Delete an EventDurationType resource.
@param event_duration_type_id str
@throws: BadRequest 'event_duration_type_id parameter is empty'
"""
if not event_duration_type_id:
raise BadRequest('event_duration_type_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.delete_event_duration_type(event_duration_type_id)
return
def force_delete_event_duration_type(self, event_duration_type_id=''):
"""Force delete an EventDurationType resource.
@param event_duration__type_id str
@throws: BadRequest 'event_duration_type_id parameter is empty'
"""
if not event_duration_type_id:
raise BadRequest('event_duration_type_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.force_delete_event_duration_type(event_duration_type_id)
#
# EventDuration
#
def create_event_duration(self, event_duration=None, event_duration_type_id=''):
"""Create a EventDuration resource.
An EventDuration is created and is coupled with an EventDurationType if
the optional event_duration_type_id is provided.
@param event_duration RT.EventDuration
@param event_duration_type_id str # optional
@retval event_duration_id str
@throws BadRequest 'event_duration parameter is empty'
@throws Inconsistent 'multiple alt_ids not permitted for EventDuration resources'
@throws Inconsistent 'malformed EventDuration.alt_ids provided; required format empty or \'EventDuration:event_name\'
@throws Inconsistent 'invalid namespace (%s) provided for EventDuration resource'
@throws BadRequest 'resource instance already exists (\'EventDuration\') with this altid: %s'
@throws Inconsistent 'Invalid event_duration object'
"""
if not event_duration:
raise BadRequest('event_duration parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
event_duration_id = at.create_event_duration(event_duration, event_duration_type_id)
return event_duration_id
def read_event_duration(self, event_duration_id=''):
"""Read an EventDuration resource.
@param event_duration_id str
@retval event_duration RT.EventDuration
@throws BadRequest 'event_duration_id parameter is empty'
"""
if not event_duration_id:
raise BadRequest('event_duration_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
event_duration = at.read_event_duration(event_duration_id)
return event_duration
def update_event_duration(self, event_duration=None):
"""Update an EventDuration resource and ensure alt_ids value (if provided) is well formed and
unique in namespace. The event_duration object provided shall have event_duration_attrs
defined and shall also have an association (PRED.implementsEventDurationType) defined or
method shall fail. event_duration.event_duration_attrs and the association are required
to perform validation and constraint checks prior to update.
@param event_duration RT.EventDuration
@throws BadRequest 'update_event_duration failed'
@throws BadRequest 'event_duration parameter is empty'
@throws BadRequest 'event_duration (id=%s) does not have association (PRED.implementsEventDurationType) defined'
@throws BadRequest 'event_duration (id=%s) has more than one association (PRED.implementsEventDurationType) defined'
@throws BadRequest 'event_duration_update requires event_duration_attrs to be provided'
@throws BadRequest 'event_duration_update: altid returned: %s and current_altid_exists: %s'
@throws BadRequest 'update_event_duration failed'
"""
try:
if not event_duration:
raise BadRequest('event_duration parameter is empty')
if not event_duration._id:
raise NotFound('_id is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.update_event_duration(event_duration)
except BadRequest, Arguments:
raise BadRequest('update_event_duration: %s' % Arguments.get_error_message())
except NotFound, Arguments:
raise NotFound('update_event_duration: %s' % Arguments.get_error_message())
except Inconsistent, Arguments:
raise Inconsistent('update_event_duration: %s' % Arguments.get_error_message())
except:
raise BadRequest('update_event_duration failed')
return
def delete_event_duration(self, event_duration_id=''):
"""Delete an EventDuration resource.
@param event_duration_id str
@throws BadRequest 'event_duration_id parameter is empty'
"""
if not event_duration_id:
raise BadRequest('event_duration_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.delete_event_duration(event_duration_id)
return
def force_delete_event_duration(self, event_duration_id=''):
""" Force delete an EventDuration resource.
@param event_duration_id str
@throws BadRequest 'event_duration_id parameter is empty'
"""
if not event_duration_id:
raise BadRequest('event_duration_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.force_delete_event_duration(event_duration_id)
def assign_event_duration_type_to_event_duration(self, event_duration_type_id='', event_duration_id=''):
""" Link an EventDuration to an EventDurationType.
@param event_duration_type_id str
@param event_duration_id str
@throws BadRequest 'event_duration_type_id parameter is empty'
@throws BadRequest 'event_duration_id parameter is empty'
"""
if not event_duration_type_id:
raise BadRequest('event_duration_type_id parameter is empty')
if not event_duration_id:
raise BadRequest('event_duration_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.assign_event_duration_type_to_event_duration(event_duration_type_id, event_duration_id)
def unassign_event_duration_type_from_event_duration(self, event_duration_type_id='', event_duration_id=''):
"""Remove link of EventDuration from EventDurationType.
@param event_duration_type_id str
@param event_duration_id str
@throws BadRequest 'event_duration_type_id parameter is empty'
@throws BadRequest 'event_duration_id parameter is empty'
"""
if not event_duration_type_id:
raise BadRequest('event_duration_type_id parameter is empty')
if not event_duration_id:
raise BadRequest('event_duration_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.unassign_event_duration_type_from_event_duration(event_duration_type_id, event_duration_id)
def get_event_duration_extension(self, event_duration_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Returns an EventDurationExtension object containing additional related information
@param event_duration_id str
@param ext_associations dict
@param ext_exclude list
@param user_id str
@retval extended_event_duration EventDurationExtension
@throws BadRequest 'event_duration_id parameter is empty'
"""
if not event_duration_id:
raise BadRequest('event_duration_id parameter is empty')
extended_resource_handler = ExtendedResourceContainer(self)
extended_event_duration = extended_resource_handler.create_extended_resource_container(
extended_resource_type=OT.EventDurationExtension,
resource_id=event_duration_id,
computed_resource_type=OT.BaseComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id)
from ion.util.extresource import strip_resource_extension, get_matchers, matcher_UserInfo, matcher_MarineAsset, \
matcher_DataProduct, matcher_DeviceModel, matcher_Device
matchers = get_matchers([matcher_MarineAsset, matcher_UserInfo])
strip_resource_extension(extended_event_duration, matchers=matchers)
return extended_event_duration
def prepare_event_duration_support(self, event_duration_id=''):
"""EventDuration prepare support for UI (create, update).
@param event_duration_id str
@retval resource_data resource_schema
"""
extended_resource_handler = ExtendedResourceContainer(self)
resource_data = extended_resource_handler.create_prepare_resource_support(event_duration_id, OT.EventDurationPrepareSupport)
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.create_request, 'observatory_management',
'create_event_duration', { "event_duration": "$(event_duration)" })
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.update_request, 'observatory_management',
'update_event_duration', { "event_duration": "$(event_duration)" })
"""
#Fill out service request information for assigning an EventDurationType from EventDuration
extended_resource_handler.set_service_requests(resource_data.associations['EventDurationHasEventDurationType'].assign_request, 'observatory_management',
'assign_event_duration_type_from_event_duration', {"event_duration_type_id": "$(event_duration_type_id)",
"event_duration_id": event_duration_id })
#Fill out service request information for unassigning an EventDurationType from EventDuration
extended_resource_handler.set_service_requests(resource_data.associations['EventDurationHasEventDurationType'].unassign_request, 'observatory_management',
'unassign_event_duration_type_from_event_duration', {"event_duration_type_id": "$(event_duration_type_id)",
"event_duration_id": event_duration_id })
"""
return resource_data
def assign_event_duration_to_asset(self, event_duration_id='', asset_id=''):
""" Link an EventDuration to an Asset.
@param event_duration_id str
@param asset_id str
@throws BadRequest 'event_duration_id parameter is empty'
@throws BadRequest 'asset_id parameter is empty'
@throws NotFound 'asset instance not found'
@throws Inconsistent 'this event duration has multiple event duration types'
@throws BadRequest 'this event duration does not have associated event duration type'
@throws BadRequest 'unknown EventCategoryEnum value for association category'
@throws BadRequest 'an association (%s) already exists, cannot assign more than one association of the same type'
@throws BadRequest 'unknown association category predicate (Event to Asset)'
@throws BadRequest 'failed to assign association (%s)
"""
if not event_duration_id:
raise BadRequest('event_duration_id parameter is empty')
if not asset_id:
raise BadRequest('asset_id parameter is empty')
try:
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.assign_event_duration_to_asset(event_duration_id, asset_id)
except BadRequest, Arguments:
raise BadRequest(Arguments.get_error_message())
except NotFound, Arguments:
raise NotFound(Arguments.get_error_message())
except Inconsistent, Arguments:
raise Inconsistent(Arguments.get_error_message())
except:
raise BadRequest('failed to assign association event duration to asset')
def unassign_event_duration_to_asset(self, event_duration_id='', asset_id=''):
"""Remove link of EventDuration from Asset.
@param event_duration_id str
@param asset_id str
@throws BadRequest 'event_duration_id parameter is empty'
@throws BadRequest 'asset_id parameter is empty'
@throws Inconsistent 'this event duration implements multiple event duration types'
@throws BadRequest 'this event duration does not have associated event duration type'
@throws Inconsistent 'this event duration has multiple associations with asset'
@throws BadRequest 'this event duration is not associated with asset'
"""
if not event_duration_id:
raise BadRequest('event_duration_id parameter is empty')
if not asset_id:
raise BadRequest('asset_id parameter is empty')
try:
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.unassign_event_duration_to_asset(event_duration_id, asset_id)
except BadRequest, Arguments:
raise BadRequest(Arguments.get_error_message())
except NotFound, Arguments:
raise NotFound(Arguments.get_error_message())
except Inconsistent, Arguments:
raise Inconsistent(Arguments.get_error_message())
except:
raise BadRequest('failed to unassign association (event duration from asset)')
#
# Asset associations to resource
# (not used; remove here AND from observatory_management_service.yml)
#
def assign_asset_to_resource(self, asset_id='',resource_id=''):
# Link an asset to a resource (deprecate)
#@param asset_id str
#@param resource_id str
#@throws NotFound object with specified id does not exist
#@throws BadRequest if object with specified id does not have_id or_rev attribute
#
if not asset_id:
raise BadRequest('asset_id parameter is empty')
if not resource_id:
raise BadRequest('resource_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.assign_asset_to_resource(asset_id, resource_id)
def unassign_asset_from_resource(self, asset_id='', resource_id=''):
#Remove link of asset from resource. (deprecate)
#@param asset_id str
#@param resource_id str
#@throws BadRequest if object with specified id does not have_id or_rev attribute
#
if not asset_id:
raise BadRequest('asset_id parameter is empty')
if not resource_id:
raise BadRequest('resource_id parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.unassign_asset_from_resource(asset_id, resource_id)
#
# CodeSpace
#
def create_code_space(self, code_space=None):
"""Create a CodeSpace resource.
@param code_space RT.CodeSpace
@retval id str
@throws: BadRequest 'code_space object is empty'
@throws: Inconsistent 'invalid code_space object'
"""
if not code_space:
raise BadRequest('code_space object is empty')
try:
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
id = at.create_code_space(code_space)
except BadRequest, Arguments:
raise BadRequest(Arguments.get_error_message())
except Inconsistent, Arguments:
raise Inconsistent(Arguments.get_error_message())
except NotFound, Arguments:
raise NotFound(Arguments.get_error_message())
except:
raise Inconsistent('invalid code_space object')
return id
def read_code_space(self, resource_id=''):
"""Read an CodeSpace resource.
@param resource_id str
@retval code_space RT.CodeSpace
@throws BadRequest 'resource_id parameter is empty'
@throws NotFound 'object with specified id does not exist'
"""
if not resource_id:
raise BadRequest('resource_id parameter is empty')
try:
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
obj = at.read_code_space(resource_id)
except:
raise NotFound('object with specified id does not exist.')
return obj
def update_code_space(self, code_space=None):
"""Update an CodeSpace resource.
@param code_space RT.CodeSpace
@throws BadRequest 'code_space object is empty'
"""
if not code_space:
raise BadRequest('code_space object is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
obj = at.update_code_space(code_space)
return obj
def delete_code_space(self, resource_id=''):
"""Delete a CodeSpace resource.
@param resource_id str
@throws BadRequest 'resource_id parameter is empty'
@throws NotFound 'object with specified id does not exist.'
"""
if not resource_id:
raise BadRequest('resource_id parameter is empty')
try:
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.delete_code_space(resource_id)
except:
raise NotFound('object with specified id does not exist.')
return
def force_delete_code_space(self, resource_id=''):
""" Force delete a CodeSpace resource.
@param resource_id str
@throws BadRequest 'resource_id parameter is empty'
@throws NotFound 'object with specified id does not exist.'
"""
if not resource_id:
raise BadRequest('resource_id parameter is empty')
try:
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.force_delete_code_space(resource_id)
except:
raise NotFound('object with specified id does not exist.')
return #obj
def read_codesets_by_name(self, resource_id='', names=None):
"""Read CodeSpace (id=resource_id) for list of codeset name(s); return list of CodeSets.
@param resource_id str
@param names []
@throws: BadRequest 'resource_id parameter is empty'
@throws: BadRequest 'names parameter is empty'
@throws NotFound 'object with specified resource_id (type RT.CodeSpace) does not exist'
"""
if not resource_id:
raise BadRequest('resource_id parameter is empty')
if not names:
raise BadRequest('names parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
codesets = at.read_codesets_by_name(resource_id, names)
return codesets
def read_codes_by_name(self, resource_id='', names=None, id_only=False):
"""Read CodeSpace with resource_id and for list of Code name(s); return list of Codes.
@param resource_id str
@param names []
@params id_only bool # optional
@throws BadRequest 'resource_id parameter is empty'
@throws BadRequest 'names parameter is empty'
@throws NotFound 'object with specified resource_id (type RT.CodeSpace) does not exist'
"""
if not resource_id:
raise BadRequest('resource_id parameter is empty')
if not names:
raise BadRequest('names parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
codes = at.read_codes_by_name(resource_id, names, id_only)
return codes
def update_codes(self, resource_id='', codes=None):
"""Read CodeSpace with resource_id, update Codes identified in dictionary of codes.
@param resource_id str
@param codes {}
@throws BadRequest 'resource_id parameter is empty'
@throws BadRequest 'codes parameter is empty'
@throws NotFound 'object with specified resource_id and type=RT.CodeSpace does not exist'
@throws NotFound 'code not found in CodeSpace (with id=resource_id).
@throws NotFound 'code provided for update with empty name.'
@throws NotFound 'codes not found in CodeSpace (with id=resource_id).'
"""
if not resource_id:
raise BadRequest('resource_id parameter is empty')
if not codes:
raise BadRequest('codes parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.update_codes(resource_id, codes)
def update_codesets(self, resource_id='', codesets=None):
"""Read CodeSpace, with resource_id, and update codesets as identified in
the dictionary codesets.
@param resource_id str
@param codesets {}
@throws BadRequest 'resource_id parameter is empty'
@throws BadRequest 'codesets parameter is empty'
@throws NotFound 'object with specified resource_id and type=RT.CodeSpace does not exist'
@throws NotFound 'CodeSet not found in CodeSpace.'
@throws NotFound 'CodeSet provided for update with empty name.'
@throws NotFound 'CodeSpace codesets is empty.'
"""
if not resource_id:
raise BadRequest('resource_id parameter is empty')
if not codesets:
raise BadRequest('codesets parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
at.update_codesets(resource_id, codesets)
def delete_codes(self, resource_id='', names=None):
"""Delete Codes (identified in names list) from CodeSpace; return list of Codes in CodeSpace.
Check if code is used by code_set; if so, remove code fom code_set, update code_set and then
delete the code.
@param resource_id str
@param names []
@retval codes_list []
@throws BadRequest 'resource_id parameter is empty'
@throws BadRequest 'names parameter is empty'
@throws NotFound 'object with resource_id and type RT.CodeSpace does not exist
"""
if not resource_id:
raise BadRequest('resource_id parameter is empty')
if not names:
raise BadRequest('names parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
codes_list = at.delete_codes(resource_id, names)
return codes_list
def delete_codesets(self, resource_id='', names=None):
"""Delete CodeSets identified in list names; return list of CodeSets in CodeSpace.
@param resource_id str
@param names []
@retval codeset_list []
@throws BadRequest 'resource_id parameter is empty'
@throws BadRequest 'names parameter is empty'
@throws NotFound 'object with resource_id and type RT.CodeSpace does not exist
"""
#todo (* Return value scheduled to change.)
if not resource_id:
raise BadRequest('resource_id parameter is empty')
if not names:
raise BadRequest('names parameter is empty')
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
codeset_list = at.delete_codesets(resource_id, names)
return codeset_list
############################
#
# START - Services for Marine Asset Management
#
############################
def declare_asset_tracking_resources(self, content='', content_type='', content_encoding=''):
"""Read content which defines asset management resources, instantiate resources;
return dictionary of resource ids by category of resource type.
@param content encoded blob # binascii.b2a_hex(content)
@param content_type file_descriptor.mimetype # file descriptor type
@param content_encoding 'b2a_hex' # encoding (set to binascii.b2a_hex)
@retval response {} # dict of resource ids by category of resource type
@throws BadRequest 'content parameter is empty'
@throws BadRequest 'declare_asset_tracking_resources error'
@throws BadRequest (from _process_xls)
@throws NotFound (from _process_xls)
@throws Inconsistent (from _process_xls)
"""
if not content:
raise BadRequest('content parameter is empty')
try:
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
response = at._process_xls(content, content_type, content_encoding)
except BadRequest, Arguments:
raise BadRequest(Arguments.get_error_message())
except NotFound, Arguments:
raise NotFound(Arguments.get_error_message())
except Inconsistent, Arguments:
raise Inconsistent(Arguments.get_error_message())
except:
raise BadRequest('declare_asset_tracking_resources error')
return response
def asset_tracking_report(self):
"""Query system instances of marine tracking resources (CodeSpaces,Codes,CodeSets, Assets, AssetTypes, EventDurations,
EventDurationTypes) produce xls workbook and return encoded content.
@retval content binascii.b2a_hex(xls workbook)
@throws BadRequest 'asset tracking report failed to produce xls'
@throws BadRequest (from _download_xls)
@throws NotFound (from _download_xls)
@throws Inconsistent (from _download_xls)
"""
try:
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
content = at._download_xls()
except BadRequest, Arguments:
raise BadRequest(Arguments.get_error_message())
except NotFound, Arguments:
raise NotFound(Arguments.get_error_message())
except Inconsistent, Arguments:
raise Inconsistent(Arguments.get_error_message())
except:
raise BadRequest('asset tracking report failed to produce xls')
return content
# Deprecate - helper picklists for altids (Asset and Event[Duration]s)
def get_altids(self, res_type=''):
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
picklist = at.get_altids(res_type)
return picklist
# helper picklists for altids (Asset and Event[Duration]s)
def get_assets_picklist(self, id_only=''):
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
picklist = at.get_picklist(RT.Asset, id_only)
return picklist
def get_events_picklist(self, id_only=''):
at = AssetTracking(self,container=self.container, enhanced_rr=self.RR2, rr=self.RR, node=self.container.node)
picklist = at.get_picklist(RT.EventDuration, id_only)
return picklist
# -------------------------------------------------------------------------
# Marine Asset Management RESOURCES (end)
# -------------------------------------------------------------------------
############################
#
# EXTENDED RESOURCES
#
############################
# TODO: Make every incoming call to this one
def get_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):
site_extension = None
# Make a case decision on what what to do
site_obj = self.RR2.read(site_id)
site_type = site_obj._get_type()
if site_type == RT.InstrumentSite:
site_extension = self._get_instrument_site_extension(site_id, ext_associations, ext_exclude, user_id)
elif site_type in (RT.Observatory, RT.Subsite):
site_extension = self._get_platform_site_extension(site_id, ext_associations, ext_exclude, user_id)
elif site_type == RT.PlatformSite:
site_extension = self._get_platform_site_extension(site_id, ext_associations, ext_exclude, user_id)
else:
raise BadRequest("Unknown site type '%s' for site %s" % (site_type, site_id))
from ion.util.extresource import strip_resource_extension, get_matchers, matcher_DataProduct, matcher_DeviceModel, \
matcher_Device, matcher_UserInfo
matchers = get_matchers([matcher_DataProduct, matcher_DeviceModel, matcher_Device, matcher_UserInfo])
strip_resource_extension(site_extension, matchers=matchers)
return site_extension
# TODO: Redundant, remove operation and use get_site_extension
def get_observatory_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):
return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)
# TODO: Redundant, remove operation and use get_site_extension
def get_platform_station_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):
return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)
# TODO: Redundant, remove operation and use get_site_extension
def get_platform_assembly_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):
return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)
# TODO: Redundant, remove operation and use get_site_extension
def get_platform_component_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):
return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)
# TODO: Redundant, remove operation and use get_site_extension
def get_instrument_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):
return self.get_site_extension(site_id, ext_associations, ext_exclude, user_id)
def _get_site_device(self, site_id, device_relations):
site_devices = [tup[1] for tup in device_relations.get(site_id, []) if tup[2] in (RT.InstrumentDevice, RT.PlatformDevice)]
if len(site_devices) > 1:
log.error("Inconsistent: Site %s has multiple devices: %s", site_id, site_devices)
if not site_devices:
return None
return site_devices[0]
def _get_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Returns a site extension object containing common information, plus some helper objects
@param site_id str
@param ext_associations dict
@param ext_exclude list
@retval TBD
@throws BadRequest A parameter is missing
@throws NotFound An object with the specified observatory_id does not exist
"""
try:
if not site_id:
raise BadRequest("The site_id parameter is empty")
extended_resource_handler = ExtendedResourceContainer(self)
extended_site = extended_resource_handler.create_extended_resource_container(
extended_resource_type=OT.SiteExtension,
resource_id=site_id,
computed_resource_type=OT.SiteComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id)
RR2 = EnhancedResourceRegistryClient(self.clients.resource_registry)
outil = ObservatoryUtil(self, enhanced_rr=RR2, device_status_mgr=DeviceStateManager())
# Find all subsites and devices
site_resources, site_children = outil.get_child_sites(parent_site_id=site_id, include_parents=False, id_only=False)
site_ids = site_resources.keys() + [site_id] # IDs of this site and all child sites
device_relations = outil.get_device_relations(site_ids)
# Set parent immediate child sites
parent_site_ids = [a.s for a in RR2.filter_cached_associations(PRED.hasSite, lambda a: a.p ==PRED.hasSite and a.o == site_id)]
if parent_site_ids:
extended_site.parent_site = RR2.read(parent_site_ids[0])
else:
extended_site.parent_site = None
extended_site.sites = [site_resources[ch_id] for ch_id in site_children[site_id]] if site_children.get(site_id, None) is not None else []
# Set all nested child devices, remove any dups
instrument_device_ids = list( set( [tup[1] for (parent,dlst) in device_relations.iteritems() for tup in dlst if tup[2] == RT.InstrumentDevice] ) )
platform_device_ids = list( set( [tup[1] for (parent,dlst) in device_relations.iteritems() for tup in dlst if tup[2] == RT.PlatformDevice] ) )
device_ids = list(set(instrument_device_ids + platform_device_ids))
device_objs = self.RR2.read_mult(device_ids)
devices_by_id = dict(zip(device_ids, device_objs))
extended_site.instrument_devices = [devices_by_id[did] for did in instrument_device_ids]
extended_site.platform_devices = [devices_by_id[did] for did in platform_device_ids]
# Set primary device at immediate child sites
extended_site.sites_devices = []
for ch_site in extended_site.sites:
device_id = self._get_site_device(ch_site._id, device_relations)
extended_site.sites_devices.append(devices_by_id.get(device_id, None))
extended_site.portal_instruments = extended_site.sites_devices # ALIAS
# Set deployments
RR2.cache_predicate(PRED.hasDeployment)
deployment_assocs = RR2.filter_cached_associations(PRED.hasDeployment, lambda a: a.s in site_ids)
deployment_ids = [a.o for a in deployment_assocs]
deployment_objs = RR2.read_mult(list(set(deployment_ids)))
extended_site.deployments = deployment_objs
# Get current active deployment. May be site or parent sites
dep_util = DeploymentUtil(self.container)
extended_site.deployment = dep_util.get_active_deployment(site_id, is_site=True, rr2=RR2)
# Set data products
RR2.cache_predicate(PRED.hasSource)
dataproduct_assocs = RR2.filter_cached_associations(PRED.hasSource, lambda a: a.o in site_ids)
dataproduct_ids = [a.s for a in dataproduct_assocs]
dataproduct_objs = RR2.read_mult(list(set(dataproduct_ids)))
extended_site.data_products = dataproduct_objs
log.debug("Building list of model objs")
# Build a lookup for device models via hasModel predicates.
# lookup is a 2d associative array of [subject type][subject id] -> object id
RR2.cache_predicate(PRED.hasModel)
lookup = {rt : {} for rt in [RT.InstrumentDevice, RT.PlatformDevice]}
for a in RR2.filter_cached_associations(PRED.hasModel, lambda assn: assn.st in lookup):
lookup[a.st][a.s] = a.o
def retrieve_model_objs(rsrc_list, object_type):
# rsrc_list is devices that need models looked up. object_type is the resource type (a device)
# not all devices have models (represented as None), which kills read_mult. so, extract the models ids,
# look up all the model ids, then create the proper output
model_list = [lookup[object_type].get(r._id) for r in rsrc_list]
model_uniq = list(set([m for m in model_list if m is not None]))
model_objs = self.RR2.read_mult(model_uniq)
model_dict = dict(zip(model_uniq, model_objs))
return [model_dict.get(m) for m in model_list]
extended_site.instrument_models = retrieve_model_objs(extended_site.instrument_devices, RT.InstrumentDevice)
extended_site.platform_models = retrieve_model_objs(extended_site.platform_devices, RT.PlatformDevice)
primary_device_id = self._get_site_device(site_id, device_relations)
# Filtered subsites by type/alt type
def fs(resource_type, filter_fn):
both = lambda s: ((resource_type == s._get_type()) and filter_fn(s))
return filter(both, site_resources.values())
extended_site.platform_station_sites = fs(RT.PlatformSite, lambda s: s.alt_resource_type == "StationSite")
extended_site.platform_component_sites = fs(RT.PlatformSite, lambda s: s.alt_resource_type == "PlatformComponentSite")
extended_site.platform_assembly_sites = fs(RT.PlatformSite, lambda s: s.alt_resource_type == "PlatformAssemblySite")
extended_site.instrument_sites = fs(RT.InstrumentSite, lambda _: True)
context = dict(
extended_site=extended_site,
enhanced_RR=RR2,
site_device_id=primary_device_id,
site_resources=site_resources,
site_children=site_children,
device_relations=device_relations,
outil=outil
)
return context
except:
log.error('_get_site_extension failed', exc_info=True)
raise
def _get_platform_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Creates a SiteExtension and status for platforms and higher level sites"""
log.debug("_get_platform_site_extension")
context = self._get_site_extension(site_id, ext_associations, ext_exclude, user_id)
extended_site, RR2, platform_device_id, site_resources, site_children, device_relations, outil = \
context["extended_site"], context["enhanced_RR"], context["site_device_id"], \
context["site_resources"], context["site_children"], context["device_relations"], context["outil"]
statuses = outil.get_status_roll_ups(site_id, include_structure=True)
portal_status = []
if extended_site.portal_instruments:
for x in extended_site.portal_instruments:
if x:
portal_status.append(statuses.get(x._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN))
else:
portal_status.append(DeviceStatusType.STATUS_UNKNOWN)
extended_site.computed.portal_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=portal_status)
else:
extended_site.computed.portal_status = ComputedListValue(status=ComputedValueAvailability.NOTAVAILABLE)
site_status = []
if extended_site.sites:
for x in extended_site.sites:
if x:
site_status.append(statuses.get(x._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN))
else:
site_status.append(DeviceStatusType.STATUS_UNKNOWN)
extended_site.computed.site_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=site_status)
else:
extended_site.computed.site_status = ComputedListValue(status=ComputedValueAvailability.NOTAVAILABLE)
# create the list of station status from the overall status list
subset_status = []
for site in extended_site.platform_station_sites:
if not extended_site.sites.count(site):
log.error(" Platform Site does not exist in the full list of sites. id: %s", site._id)
break
idx = extended_site.sites.index( site )
subset_status.append( site_status[idx] )
extended_site.computed.station_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=subset_status)
inst_status = []
if extended_site.instrument_devices:
for x in extended_site.instrument_devices:
if x:
inst_status.append(statuses.get(x._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN))
else:
inst_status.append(DeviceStatusType.STATUS_UNKNOWN)
extended_site.computed.instrument_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=inst_status)
else:
extended_site.computed.instrument_status = ComputedListValue(status=ComputedValueAvailability.NOTAVAILABLE)
plat_status = []
if extended_site.platform_devices:
for x in extended_site.platform_devices:
if x:
plat_status.append(statuses.get(x._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN))
else:
plat_status.append(DeviceStatusType.STATUS_UNKNOWN)
extended_site.computed.platform_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=plat_status)
else:
extended_site.computed.platform_status = ComputedListValue(status=ComputedValueAvailability.NOTAVAILABLE)
comms_rollup = statuses.get(site_id,{}).get(AggregateStatusType.AGGREGATE_COMMS,DeviceStatusType.STATUS_UNKNOWN)
power_rollup = statuses.get(site_id,{}).get(AggregateStatusType.AGGREGATE_POWER,DeviceStatusType.STATUS_UNKNOWN)
data_rollup = statuses.get(site_id,{}).get(AggregateStatusType.AGGREGATE_DATA,DeviceStatusType.STATUS_UNKNOWN)
location_rollup = statuses.get(site_id,{}).get(AggregateStatusType.AGGREGATE_LOCATION,DeviceStatusType.STATUS_UNKNOWN)
extended_site.computed.communications_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=comms_rollup)
extended_site.computed.data_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=power_rollup)
extended_site.computed.location_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=data_rollup)
extended_site.computed.power_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=location_rollup)
dep_util = DeploymentUtil(self.container)
extended_site.deployment_info = dep_util.describe_deployments(extended_site.deployments,
status_map=statuses)
return extended_site
def _get_instrument_site_extension(self, site_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Creates a SiteExtension and status for instruments"""
context = self._get_site_extension(site_id, ext_associations, ext_exclude, user_id)
extended_site, RR2, inst_device_id, site_resources, site_children, device_relations, outil = \
context["extended_site"], context["enhanced_RR"], context["site_device_id"], \
context["site_resources"], context["site_children"], context["device_relations"], context["outil"]
statuses = outil.get_status_roll_ups(site_id, include_structure=True)
comms_rollup = statuses.get(site_id,{}).get(AggregateStatusType.AGGREGATE_COMMS,DeviceStatusType.STATUS_UNKNOWN)
power_rollup = statuses.get(site_id,{}).get(AggregateStatusType.AGGREGATE_POWER,DeviceStatusType.STATUS_UNKNOWN)
data_rollup = statuses.get(site_id,{}).get(AggregateStatusType.AGGREGATE_DATA,DeviceStatusType.STATUS_UNKNOWN)
location_rollup = statuses.get(site_id,{}).get(AggregateStatusType.AGGREGATE_LOCATION,DeviceStatusType.STATUS_UNKNOWN)
extended_site.computed.communications_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=comms_rollup)
extended_site.computed.data_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=power_rollup)
extended_site.computed.location_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=data_rollup)
extended_site.computed.power_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=location_rollup)
instrument_status = []
if extended_site.instrument_devices:
for x in extended_site.instrument_devices:
if x:
instrument_status.append(statuses.get(x._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN))
else:
instrument_status.append(DeviceStatusType.STATUS_UNKNOWN)
extended_site.computed.instrument_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=instrument_status)
else:
extended_site.computed.instrument_status = ComputedListValue(status=ComputedValueAvailability.NOTAVAILABLE)
extended_site.computed.platform_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=[])
extended_site.computed.site_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=[])
extended_site.computed.portal_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=[])
dep_util = DeploymentUtil(self.container)
extended_site.deployment_info = dep_util.describe_deployments(extended_site.deployments,
status_map=statuses)
return extended_site
def get_deployment_extension(self, deployment_id='', ext_associations=None, ext_exclude=None, user_id=''):
if not deployment_id:
raise BadRequest("The deployment_id parameter is empty")
extended_resource_handler = ExtendedResourceContainer(self)
extended_deployment = extended_resource_handler.create_extended_resource_container(
extended_resource_type=OT.DeploymentExtension,
resource_id=deployment_id,
computed_resource_type=OT.DeploymentComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id)
if not extended_deployment.device or not extended_deployment.site \
or not hasattr(extended_deployment.device, '_id') \
or not hasattr(extended_deployment.site, '_id'):
return extended_deployment
#raise Inconsistent('deployment %s should be associated with a device and a site' % deployment_id)
log.debug('have device: %r\nand site: %r', extended_deployment.device.__dict__, extended_deployment.site.__dict__)
RR2 = EnhancedResourceRegistryClient(self.clients.resource_registry)
finder = RelatedResourcesCrawler()
get_assns = finder.generate_related_resources_partial(RR2, [PRED.hasDevice])
# search from PlatformDevice to subplatform or InstrumentDevice
search_down = get_assns({PRED.hasDevice: (True, False)}, [RT.InstrumentDevice, RT.PlatformDevice])
# collect ids of devices below deployment target
platform_device_ids = set()
instrument_device_ids = set()
# make sure main device in deployment is in the list
if extended_deployment.device.type_==RT.InstrumentDevice:
instrument_device_ids.add(extended_deployment.device._id)
else:
platform_device_ids.add(extended_deployment.device._id)
for a in search_down(extended_deployment.device._id, -1):
if a.o != extended_deployment.device._id:
if a.ot == RT.InstrumentDevice:
instrument_device_ids.add(a.o)
else: # a.ot == RT.PlatformDevice:
platform_device_ids.add(a.o)
# get sites (portals)
extended_deployment.computed.portals = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=[])
subsite_ids = set()
device_by_site = { extended_deployment.site._id: extended_deployment.device._id }
for did in platform_device_ids:
related_sites = RR2.find_platform_site_ids_by_platform_device_using_has_device(did)
for sid in related_sites:
subsite_ids.add(sid)
device_by_site[sid] = did
for did in instrument_device_ids:
related_sites = RR2.find_instrument_site_ids_by_instrument_device_using_has_device(did)
for sid in related_sites:
subsite_ids.add(sid)
device_by_site[sid] = did
# sort the objects into the lists to be displayed
ids = list(platform_device_ids|instrument_device_ids|subsite_ids)
device_by_id = { extended_deployment.device._id: extended_deployment.device }
objs = self.RR.read_mult(ids)
for obj in objs:
if obj.type_==RT.InstrumentDevice:
extended_deployment.instrument_devices.append(obj)
elif obj.type_==RT.PlatformDevice:
extended_deployment.platform_devices.append(obj)
else: # InstrumentSite or PlatformSite
extended_deployment.computed.portals.value.append(obj)
# get associated models for all devices
devices = list(platform_device_ids|instrument_device_ids)
assocs = self.RR.find_associations(anyside=list(devices), id_only=False)
## WORKAROUND find_associations doesn't support anyside + predicate,
# so must use anyside to find a list of values and filter for predicate later
workaround = []
for a in assocs:
if a.p==PRED.hasModel:
workaround.append(a)
assocs = workaround
## end workaround
model_id_by_device = { a.s: a.o for a in assocs }
model_ids = set( [ a.o for a in assocs ])
models = self.RR.read_mult( list(model_ids) )
model_by_id = { o._id: o for o in models }
extended_deployment.instrument_models = [ model_by_id[model_id_by_device[d._id]] for d in extended_deployment.instrument_devices ]
extended_deployment.platform_models = [ model_by_id[model_id_by_device[d._id]] for d in extended_deployment.platform_devices ]
for p in extended_deployment.computed.portals.value:
if p._id in device_by_site and device_by_site[p._id] in device_by_id:
extended_deployment.portal_instruments.append( device_by_id[device_by_site[p._id]] )
# TODO -- all status values
#
#status: !ComputedIntValue
## combined list of sites and their status
##@ResourceType=InstrumentSite,PlatformSite
#portal_status: !ComputedListValue
## status of device lists
#instrument_status: !ComputedListValue
#platform_status: !ComputedListValue
from ion.util.extresource import strip_resource_extension, get_matchers, matcher_DataProduct, matcher_DeviceModel, \
matcher_Device, matcher_UserInfo
matchers = get_matchers([matcher_DataProduct, matcher_DeviceModel, matcher_Device, matcher_UserInfo])
strip_resource_extension(extended_deployment, matchers=matchers)
return extended_deployment
#-----------------------------------------------
# COMPUTED RESOURCES
#-----------------------------------------------
def get_marine_facility_extension(self, org_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Returns an MarineFacilityOrgExtension object containing additional related information
@param org_id str
@param ext_associations dict
@param ext_exclude list
@retval observatory ObservatoryExtension
@throws BadRequest A parameter is missing
@throws NotFound An object with the specified observatory_id does not exist
"""
if not org_id:
raise BadRequest("The org_id parameter is empty")
extended_resource_handler = ExtendedResourceContainer(self)
extended_org = extended_resource_handler.create_extended_resource_container(
extended_resource_type=OT.MarineFacilityOrgExtension,
resource_id=org_id,
computed_resource_type=OT.MarineFacilityOrgComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id,
negotiation_status=NegotiationStatusEnum.OPEN)
RR2 = EnhancedResourceRegistryClient(self.RR)
RR2.cache_predicate(PRED.hasModel)
RR2.cache_predicate(PRED.hasDevice)
outil = ObservatoryUtil(self, enhanced_rr=RR2, device_status_mgr=DeviceStateManager())
#Fill out service request information for requesting data products
extended_org.data_products_request.service_name = 'resource_registry'
extended_org.data_products_request.service_operation = 'find_objects'
extended_org.data_products_request.request_parameters = {
'subject': org_id,
'predicate': 'hasResource',
'object_type': 'DataProduct',
'id_only': False,
'limit': 10,
'skip': 0
}
#Fill out service request information for requesting marine tracking resources - Assets
extended_org.assets_request.service_name = 'resource_registry'
extended_org.assets_request.service_operation = 'find_objects'
extended_org.assets_request.request_parameters = {
'subject': org_id,
'predicate': 'hasResource',
'object_type': 'Asset',
'id_only': False,
'limit': 10,
'skip': 0
}
#Fill out service request information for requesting marine tracking resources - AssetTypes
extended_org.asset_types_request.service_name = 'resource_registry'
extended_org.asset_types_request.service_operation = 'find_objects'
extended_org.asset_types_request.request_parameters = {
'subject': org_id,
'predicate': 'hasResource',
'object_type': 'AssetType',
'id_only': False,
'limit': 10,
'skip': 0
}
#Fill out service request information for requesting marine tracking resources - EventDuration
extended_org.event_durations_request.service_name = 'resource_registry'
extended_org.event_durations_request.service_operation = 'find_objects'
extended_org.event_durations_request.request_parameters = {
'subject': org_id,
'predicate': 'hasResource',
'object_type': 'EventDuration',
'id_only': False,
'limit': 10,
'skip': 0
}
#Fill out service request information for requesting marine tracking resources - EventDurationTypes
extended_org.event_duration_types_request.service_name = 'resource_registry'
extended_org.event_duration_types_request.service_operation = 'find_objects'
extended_org.event_duration_types_request.request_parameters = {
'subject': org_id,
'predicate': 'hasResource',
'object_type': 'EventDurationType',
'id_only': False,
'limit': 10,
'skip': 0
}
# extended object contains list of member ActorIdentity, so need to change to user info
rr_util = ResourceRegistryUtil(self.container)
extended_org.members = rr_util.get_actor_users(extended_org.members) |
# lookup all hasModel predicates
# lookup is a 2d associative array of [subject type][subject id] -> object id (model)
lookup = dict([(rt, {}) for rt in [RT.InstrumentDevice, RT.PlatformDevice]])
for a in RR2.filter_cached_associations(PRED.hasModel, lambda assn: assn.st in lookup):
if a.st in lookup:
lookup[a.st][a.s] = a.o
def retrieve_model_objs(rsrc_list, object_type):
# rsrc_list is devices that need models looked up. object_type is the resource type (a device)
# not all devices have models (represented as None), which kills read_mult. so, extract the models ids,
# look up all the model ids, then create the proper output
model_list = [lookup[object_type].get(r._id) for r in rsrc_list]
model_uniq = list(set([m for m in model_list if m is not None]))
model_objs = self.clients.resource_registry.read_mult(model_uniq)
model_dict = dict(zip(model_uniq, model_objs))
return [model_dict.get(m) for m in model_list]
extended_org.instrument_models = retrieve_model_objs(extended_org.instruments, RT.InstrumentDevice)
extended_org.platform_models = retrieve_model_objs(extended_org.platforms, RT.PlatformDevice)
statuses = outil.get_status_roll_ups(org_id, include_structure=True)
site_status = []
if extended_org.sites:
for x in extended_org.sites:
if x:
site_status.append(statuses.get(x._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN))
else:
site_status.append(DeviceStatusType.STATUS_UNKNOWN)
extended_org.computed.site_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=site_status)
else:
extended_org.computed.site_status = ComputedListValue(status=ComputedValueAvailability.NOTAVAILABLE)
inst_status = []
if extended_org.instruments:
for x in extended_org.instruments:
if x:
inst_status.append(statuses.get(x._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN))
else:
inst_status.append(DeviceStatusType.STATUS_UNKNOWN)
extended_org.computed.instrument_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=inst_status)
else:
extended_org.computed.instrument_status = ComputedListValue(status=ComputedValueAvailability.NOTAVAILABLE)
plat_status = []
if extended_org.platforms:
for x in extended_org.platforms:
if x:
plat_status.append(statuses.get(x._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN))
else:
plat_status.append(DeviceStatusType.STATUS_UNKNOWN)
extended_org.computed.platform_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=plat_status)
else:
extended_org.computed.platform_status = ComputedListValue(status=ComputedValueAvailability.NOTAVAILABLE)
subset = []
for site in extended_org.station_sites:
if site.alt_resource_type=='StationSite':
subset.append(site)
extended_org.station_sites = subset
station_status = []
if extended_org.station_sites:
for x in extended_org.station_sites:
if x:
station_status.append(statuses.get(x._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN))
else:
station_status.append(DeviceStatusType.STATUS_UNKNOWN)
extended_org.computed.station_status = ComputedListValue(status=ComputedValueAvailability.PROVIDED, value=station_status)
else:
extended_org.computed.station_status = ComputedListValue(status=ComputedValueAvailability.NOTAVAILABLE)
comms_rollup = statuses.get(org_id,{}).get(AggregateStatusType.AGGREGATE_COMMS,DeviceStatusType.STATUS_UNKNOWN)
power_rollup = statuses.get(org_id,{}).get(AggregateStatusType.AGGREGATE_POWER,DeviceStatusType.STATUS_UNKNOWN)
data_rollup = statuses.get(org_id,{}).get(AggregateStatusType.AGGREGATE_DATA,DeviceStatusType.STATUS_UNKNOWN)
location_rollup = statuses.get(org_id,{}).get(AggregateStatusType.AGGREGATE_LOCATION,DeviceStatusType.STATUS_UNKNOWN)
extended_org.computed.communications_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=comms_rollup)
extended_org.computed.data_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=power_rollup)
extended_org.computed.location_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=data_rollup)
extended_org.computed.power_status_roll_up = ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=location_rollup)
dep_util = DeploymentUtil(self.container)
extended_org.deployment_info = dep_util.describe_deployments(extended_org.deployments,
status_map=statuses)
from ion.util.extresource import strip_resource_extension, get_matchers, matcher_DataProduct, matcher_DeviceModel, \
matcher_Device, matcher_UserInfo, matcher_MarineAsset
matchers = get_matchers([matcher_DataProduct, matcher_DeviceModel, matcher_Device, matcher_UserInfo, matcher_MarineAsset])
strip_resource_extension(extended_org, matchers=matchers)
return extended_org
def _get_root_platforms(self, RR2, platform_device_list):
# get all relevant assocation objects
filter_fn = lambda a: a.o in platform_device_list
# get child -> parent dict
lookup = dict([(a.o, a.s) for a in RR2.filter_cached_associations(PRED.hasDevice, filter_fn)])
# root platforms have no parent, or a parent that's not in our list
return [r for r in platform_device_list if (r not in lookup or (lookup[r] not in platform_device_list))]
# return a table of device statuses for all given device ids
def _get_master_status_table(self, RR2, site_tree_ids):
platformdevice_tree_ids = []
for s in site_tree_ids:
platformdevice_tree_ids += RR2.find_objects(s, PRED.hasDevice, RT.PlatformDevice, True)
plat_roots = self._get_root_platforms(RR2, platformdevice_tree_ids)
# build id -> aggstatus lookup table
master_status_table = {}
for plat_root_id in plat_roots:
agg_status, _ = self.agent_status_builder.get_cumulative_status_dict(plat_root_id)
if None is agg_status:
log.warn("Can't get agg status for platform %s, ignoring", plat_root_id)
else:
for k, v in agg_status.iteritems():
master_status_table[k] = v
return master_status_table
# based on ALL the site ids in this tree, return a site rollup list corresponding to each site in the site_id_list
def _get_site_rollup_list(self, RR2, master_status_table, site_id_list):
# get rollup for each site
master_status_rollup_list = []
for s in site_id_list:
#_, underlings = self.outil.get_child_sites(parent_site_id=s, id_only=True)
master_status_rollup_list.append(self.agent_status_builder._crush_status_dict(
self._get_site_rollup_dict(RR2, master_status_table, s)))
return master_status_rollup_list
# based on return a site rollup dict corresponding to a site in the site_id_list
def _get_site_rollup_dict(self, RR2, master_status_table, site_id):
outil = ObservatoryUtil(self, enhanced_rr=RR2)
attr1, underlings = outil.get_child_sites(parent_site_id=site_id, id_only=True)
def collect_all_children(site_id, child_site_struct, child_list):
#walk the tree of site children and put all site ids (all the way down the hierarchy) into one list
children = child_site_struct.get(site_id, [])
for child in children:
child_list.append(child)
#see if this child has children
more_children = child_site_struct.get(child, [])
if more_children:
collect_all_children(child, child_site_struct, child_list)
log.debug('collect_all_children child_list: %s', child_list)
child_list = list( set(child_list ) )
return child_list
site_aggregate = {}
all_site_ids = [site_id]
all_site_ids = collect_all_children(site_id, underlings, all_site_ids)
site_aggregate = {}
#all_site_ids = underlings.keys()
all_device_ids = []
for s in all_site_ids:
all_device_ids += RR2.find_objects(s, PRED.hasDevice, RT.PlatformDevice, True)
all_device_ids += RR2.find_objects(s, PRED.hasDevice, RT.InstrumentDevice, True)
log.debug("Calculating cumulative rollup values for all_device_ids = %s", all_device_ids)
for k, v in AggregateStatusType._str_map.iteritems():
aggtype_list = [master_status_table.get(d, {}).get(k, DeviceStatusType.STATUS_UNKNOWN) for d in all_device_ids]
log.debug("aggtype_list for %s is %s", v, zip(all_device_ids, aggtype_list))
site_aggregate[k] = self.agent_status_builder._crush_status_list(aggtype_list)
return site_aggregate
def _get_platform_rollup_list(self, RR2, master_status_table, platform_id_list):
finder = RelatedResourcesCrawler()
get_assns = finder.generate_related_resources_partial(RR2, [PRED.hasDevice])
full_crawllist = [RT.InstrumentDevice, RT.PlatformDevice]
search_down = get_assns({PRED.hasDevice: (True, False)}, full_crawllist)
# get rollup for each platform device
master_status_rollup_list = []
for p in platform_id_list:
# the searches return a list of association objects, so compile all the ids by extracting them
underlings = set([])
# we want only those IDs that are not the input resource id
for a in search_down(p, -1):
underlings.add(a.o)
underlings.add(p)
master_status_rollup_list.append(self.agent_status_builder._crush_status_list(
[self.agent_status_builder._crush_status_dict(master_status_table.get(k, {})) for k in underlings]
))
return master_status_rollup_list
def _convert_negotiations_to_requests(self, extended_marine_facility=None, negotiations=None):
assert isinstance(extended_marine_facility, MarineFacilityOrgExtension)
assert isinstance(negotiations, list)
#Get all associations for user info
assoc_list = self.clients.resource_registry.find_associations(predicate=PRED.hasInfo, id_only=False)
ret_list = []
followup_list = defaultdict(list)
for neg in negotiations:
request = IonObject(OT.OrgUserNegotiationRequest, ts_updated=neg.ts_updated, negotiation_id=neg._id,
negotiation_type=NegotiationTypeEnum._str_map[neg.negotiation_type],
negotiation_status=NegotiationStatusEnum._str_map[neg.negotiation_status],
originator=ProposalOriginatorEnum._str_map[neg.proposals[-1].originator],
request_type=neg.proposals[-1].type_,
description=neg.description, reason=neg.reason,
org_id=neg.proposals[-1].provider)
# since this is a proxy for the Negotiation object, simulate its id to help the UI deal with it
request._id = neg._id
actor_assoc = [ a for a in assoc_list if a.s == neg.proposals[-1].consumer ]
if actor_assoc:
member_assoc = [ m for m in extended_marine_facility.members if m._id == actor_assoc[0].o ]
if member_assoc:
request.user_id = member_assoc[0]._id
request.name = member_assoc[0].name
else:
followup_list[actor_assoc[0].o].append(request)
ret_list.append(request)
# assign names/user_ids to any requests that weren't in the members list, likely enroll requests
if len(followup_list):
user_infos = self.clients.resource_registry.read_mult(followup_list.keys())
udict = {}
for u in user_infos:
udict[u._id] = u
for k, v in followup_list.iteritems():
for request in v:
request.user_id = k
request.name = udict[k].name
return ret_list
def check_deployment_activation_policy(self, process, message, headers):
try:
gov_values = GovernanceHeaderValues(headers=headers, process=process, resource_id_required=False)
except Inconsistent, ex:
return False, ex.message
resource_id = message.get("deployment_id", None)
if not resource_id:
return False, '%s(%s) has been denied - no deployment_id argument provided' % (process.name, gov_values.op)
# Allow actor to activate/deactivate deployment in an org where the actor has the appropriate role
orgs,_ = self.clients.resource_registry.find_subjects(subject_type=RT.Org, predicate=PRED.hasResource, object=resource_id, id_only=False)
for org in orgs:
if (has_org_role(gov_values.actor_roles, org.org_governance_name, [ORG_MANAGER_ROLE, OBSERVATORY_OPERATOR])):
log.error("returning true: "+str(gov_values.actor_roles))
return True, ''
return False, '%s(%s) has been denied since the user is not a member in any org to which the deployment id %s belongs ' % (process.name, gov_values.op, resource_id) |
#Convert Negotiations to OrgUserNegotiationRequest
extended_org.open_requests = self._convert_negotiations_to_requests(extended_org, extended_org.open_requests)
extended_org.closed_requests = self._convert_negotiations_to_requests(extended_org, extended_org.closed_requests) |
document.py | """
Base class for objects that are backed by database documents.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import eta.core.serial as etas
class Document(object):
"""Base class for objects that are associated with
:class:`fiftyone.core.dataset.Dataset` instances and are backed by
documents in database collections.
Args:
dataset (None): the :class:`fiftyone.core.dataset.Dataset` to which the
document belongs
"""
def __init__(self, dataset=None):
self._dataset = dataset
def __dir__(self):
return super().__dir__() + list(self.field_names)
def __getattr__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
if name != "_doc":
return self._doc.get_field(name)
else:
raise
def __setattr__(self, name, value):
if name.startswith("_") or (
hasattr(self, name) and not self._doc.has_field(name)
):
super().__setattr__(name, value)
else:
try:
self._secure_media(name, value)
except AttributeError:
pass
self._doc.__setattr__(name, value)
def __delattr__(self, name):
try:
self.__delitem__(name)
except KeyError:
super().__delattr__(name)
def __delitem__(self, field_name):
try:
self.clear_field(field_name)
except ValueError as e:
raise KeyError(e.args[0])
def __copy__(self):
return self.copy()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._doc == other._doc
@property
def id(self):
"""The ID of the document, or ``None`` if it has not been added to the
database.
"""
return str(self._doc.id) if self._in_db else None
@property
def _id(self):
"""The ObjectId of the document, or ``None`` if it has not been added
to the database.
"""
return self._doc.id if self._in_db else None
@property
def ingest_time(self):
"""The time the document was added to the database, or ``None`` if it
has not been added to the database.
"""
return self._doc.ingest_time
@property
def in_dataset(self):
|
@property
def dataset(self):
"""The dataset to which this document belongs, or ``None`` if it has
not been added to a dataset.
"""
return self._dataset
@property
def field_names(self):
"""An ordered tuple of the names of the fields of this document."""
return self._doc.field_names
@property
def _in_db(self):
"""Whether the underlying :class:`fiftyone.core.odm.Document` has
been inserted into the database.
"""
return self._doc.in_db
@property
def _skip_iter_field_names(self):
"""A tuple of names of fields to skip when :meth:`iter_fields` is
called.
"""
return tuple()
def _get_field_names(self, include_private=False):
"""Returns an ordered tuple of field names of this document.
Args:
include_private (False): whether to include private fields
Returns:
a tuple of field names
"""
return self._doc._get_field_names(include_private=include_private)
def get_field(self, field_name):
"""Gets the value of a field of the document.
Args:
field_name: the field name
Returns:
the field value
Raises:
AttributeError: if the field does not exist
"""
return self._doc.get_field(field_name)
def set_field(self, field_name, value, create=True):
"""Sets the value of a field of the document.
Args:
field_name: the field name
value: the field value
create (True): whether to create the field if it does not exist
Raises:
ValueError: if ``field_name`` is not an allowed field name or does
not exist and ``create == False``
"""
if field_name.startswith("_"):
raise ValueError(
"Invalid field name: '%s'. Field names cannot start with '_'"
% field_name
)
self._doc.set_field(field_name, value, create=create)
def update_fields(self, fields_dict, create=True):
"""Sets the dictionary of fields on the document.
Args:
fields_dict: a dict mapping field names to values
create (True): whether to create fields if they do not exist
"""
for field_name, value in fields_dict.items():
self.set_field(field_name, value, create=create)
def clear_field(self, field_name):
"""Clears the value of a field of the document.
Args:
field_name: the name of the field to clear
Raises:
ValueError: if the field does not exist
"""
self._doc.clear_field(field_name)
def iter_fields(self):
"""Returns an iterator over the ``(name, value)`` pairs of the fields
of the document.
Private fields are omitted.
Returns:
an iterator that emits ``(name, value)`` tuples
"""
field_names = tuple(
f for f in self.field_names if f not in self._skip_iter_field_names
)
for field_name in field_names:
yield field_name, self.get_field(field_name)
def merge(self, document, overwrite=True):
"""Merges the fields of the document into this document.
``None``-valued fields are always omitted.
Args:
document: a :class:`Document` of the same type
overwrite (True): whether to overwrite existing fields. Note that
existing fields whose values are ``None`` are always
overwritten
"""
existing_field_names = self.field_names
for field_name, value in document.iter_fields():
if value is None:
continue
if (
not overwrite
and (field_name in existing_field_names)
and (self[field_name] is not None)
):
continue
self.set_field(field_name, value)
def copy(self):
"""Returns a deep copy of the document that has not been added to the
database.
Returns:
a :class:`Document`
"""
kwargs = {k: deepcopy(v) for k, v in self.iter_fields()}
return self.__class__(**kwargs)
def to_dict(self):
"""Serializes the document to a JSON dictionary.
Sample IDs and private fields are excluded in this representation.
Returns:
a JSON dict
"""
d = self._doc.to_dict(extended=True)
return {k: v for k, v in d.items() if not k.startswith("_")}
def to_json(self, pretty_print=False):
"""Serializes the document to a JSON string.
Sample IDs and private fields are excluded in this representation.
Args:
pretty_print (False): whether to render the JSON in human readable
format with newlines and indentations
Returns:
a JSON string
"""
return etas.json_to_str(self.to_dict(), pretty_print=pretty_print)
def to_mongo_dict(self):
"""Serializes the document to a BSON dictionary equivalent to the
representation that would be stored in the database.
Returns:
a BSON dict
"""
return self._doc.to_dict(extended=False)
def save(self):
"""Saves the document to the database."""
self._doc.save()
def reload(self):
"""Reloads the document from the database."""
self._doc.reload()
def _delete(self):
"""Deletes the document from the database."""
self._doc.delete()
@classmethod
def from_dict(cls, d):
"""Loads the document from a JSON dictionary.
The returned document will not belong to a dataset.
Returns:
a :class:`Document`
"""
doc = cls._NO_COLL_CLS.from_dict(d, extended=True)
return cls.from_doc(doc)
@classmethod
def from_json(cls, s):
"""Loads the document from a JSON string.
Args:
s: the JSON string
Returns:
a :class:`Document`
"""
doc = cls._NO_COLL_CL.from_json(s)
return cls.from_doc(doc)
@classmethod
def _rename_field(cls, collection_name, field_name, new_field_name):
"""Renames any field values for in-memory document instances that
belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
field_name: the name of the field to rename
new_field_name: the new field name
"""
for document in cls._instances[collection_name].values():
data = document._doc._data
data[new_field_name] = data.pop(field_name, None)
@classmethod
def _purge_field(cls, collection_name, field_name):
"""Removes values for the given field from all in-memory document
instances that belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
field_name: the name of the field to purge
"""
for document in cls._instances[collection_name].values():
document._doc._data.pop(field_name, None)
@classmethod
def _reload_docs(cls, collection_name):
"""Reloads the backing documents for all in-memory document instances
that belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
"""
for document in cls._instances[collection_name].values():
document.reload()
def _set_backing_doc(self, doc, dataset=None):
"""Sets the backing doc for the document.
Args:
doc: a :class:`fiftyone.core.odm.SampleDocument`
dataset (None): the :class:`fiftyone.core.dataset.Dataset` to which
the document belongs, if any
"""
# Ensure the doc is saved to the database
if not doc.id:
doc.save()
self._doc = doc
# Save weak reference
dataset_instances = self._instances[doc.collection_name]
if self.id not in dataset_instances:
dataset_instances[self.id] = self
self._dataset = dataset
@classmethod
def _reset_backing_docs(cls, collection_name, doc_ids):
"""Resets the document(s) backing documents.
Args:
collection_name: the name of the MongoDB collection
doc_ids: a list of document IDs
"""
dataset_instances = cls._instances[collection_name]
for doc_id in doc_ids:
document = dataset_instances.pop(doc_id, None)
if document is not None:
document._reset_backing_doc()
@classmethod
def _reset_all_backing_docs(cls, collection_name):
"""Resets the backing documents for all documents in the collection.
Args:
collection_name: the name of the MongoDB collection
"""
if collection_name not in cls._instances:
return
dataset_instances = cls._instances.pop(collection_name)
for document in dataset_instances.values():
document._reset_backing_doc()
def _reset_backing_doc(self):
self._doc = self.copy()._doc
self._dataset = None
| """Whether the document has been added to a dataset."""
return self.dataset is not None |
task.controller.ts | import * as Boom from 'boom';
import * as Hapi from 'hapi';
import { BaseController } from '../../shared';
import { Task, ITask } from '../../models';
export class | extends BaseController {
constructor() {
super('Task not found');
}
public getTaskById(request: Hapi.Request, reply: Hapi.IReply) {
const id = request.params['id'];
Task.findById(id).then(task => {
if (!task) {
reply(Boom.notFound('Task does not exist.'));
}
reply(task);
}).catch(error => {
reply(Boom.badImplementation(error));
});
}
public getTasks(request: Hapi.Request, reply: Hapi.IReply) {
Task.getTasks().then(tasks => {
reply(tasks);
}).catch(error => {
reply(Boom.badImplementation(error));
});
}
public createTask(request: Hapi.Request, reply: Hapi.IReply) {
let task: ITask = {
id: '',
name: request.payload.name,
description: request.payload.description
};
Task.addTask(task).then(task => {
reply(task);
}).catch(error => {
reply(Boom.badData(error));
});
}
public updateTask(request: Hapi.Request, reply: Hapi.IReply) {
let task: ITask = {
id: request.params['id'],
name: request.payload.name,
description: request.payload.description
};
Task.updateTask(task).then(task => {
reply(task);
}).catch(error => {
reply(Boom.badData(error));
});
}
public deleteTask(request: Hapi.Request, reply: Hapi.IReply) {
const id = request.params['id'];
Task.deleteTask(id).then(() => {
reply(true);
}).catch(error => {
reply(Boom.badData(error));
});
}
}
export const taskController: TaskController = new TaskController();
| TaskController |
.eslintrc.js | module.exports = {
"env": { | "node": true,
},
"extends": [
"eslint:recommended",
"plugin:vue/essential",
"@nuxtjs",
"plugin:nuxt/recommended"
],
"parserOptions": {
"ecmaVersion": 11,
"sourceType": "module"
},
"plugins": [
"vue"
],
"rules": {
"indent": [
"error",
"tab"
],
"linebreak-style": [
"error",
"unix"
],
"quotes": [
"error",
"double"
],
"semi": [
"error",
"always"
],
"vue/no-use-v-if-with-v-for": ["warn"],
'comma-dangle': [ 'error', {
arrays: 'always',
objects: 'always',
imports: 'never',
exports: 'never',
functions: 'never',
}, ],
semi: [ 'error', 'always', ],
'vue/html-indent': [ 'error', 'tab', ],
'no-tabs': 0,
'array-bracket-spacing': [ 'error', 'always', ],
'comma-style': [ 'error', 'last', ],
'comma-spacing': [ 'error', { before: false, after: true, }, ],
'space-before-function-paren': [ 'error', 'never', ],
}
}; | "browser": true,
"es2020": true, |
saved_model.rs | // This file is generated by rust-protobuf 2.17.0. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![allow(unused_attributes)]
#![rustfmt::skip]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `tensorflow/core/protobuf/saved_model.proto`
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0;
#[derive(PartialEq,Clone,Default)]
pub struct SavedModel {
// message fields
pub saved_model_schema_version: i64,
pub meta_graphs: ::protobuf::RepeatedField<super::meta_graph::MetaGraphDef>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a SavedModel {
fn default() -> &'a SavedModel {
<SavedModel as ::protobuf::Message>::default_instance()
}
}
impl SavedModel {
pub fn new() -> SavedModel {
::std::default::Default::default()
}
// int64 saved_model_schema_version = 1;
pub fn get_saved_model_schema_version(&self) -> i64 {
self.saved_model_schema_version
}
pub fn clear_saved_model_schema_version(&mut self) {
self.saved_model_schema_version = 0;
}
// Param is passed by value, moved
pub fn set_saved_model_schema_version(&mut self, v: i64) {
self.saved_model_schema_version = v;
}
// repeated .tensorflow.MetaGraphDef meta_graphs = 2;
pub fn get_meta_graphs(&self) -> &[super::meta_graph::MetaGraphDef] {
&self.meta_graphs
}
pub fn clear_meta_graphs(&mut self) {
self.meta_graphs.clear();
}
// Param is passed by value, moved
pub fn set_meta_graphs(&mut self, v: ::protobuf::RepeatedField<super::meta_graph::MetaGraphDef>) {
self.meta_graphs = v;
}
// Mutable pointer to the field.
pub fn mut_meta_graphs(&mut self) -> &mut ::protobuf::RepeatedField<super::meta_graph::MetaGraphDef> {
&mut self.meta_graphs
}
// Take field
pub fn take_meta_graphs(&mut self) -> ::protobuf::RepeatedField<super::meta_graph::MetaGraphDef> {
::std::mem::replace(&mut self.meta_graphs, ::protobuf::RepeatedField::new())
}
}
impl ::protobuf::Message for SavedModel {
fn is_initialized(&self) -> bool {
for v in &self.meta_graphs {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint |
let tmp = is.read_int64()?;
self.saved_model_schema_version = tmp;
},
2 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.meta_graphs)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.saved_model_schema_version != 0 {
my_size += ::protobuf::rt::value_size(1, self.saved_model_schema_version, ::protobuf::wire_format::WireTypeVarint);
}
for value in &self.meta_graphs {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.saved_model_schema_version != 0 {
os.write_int64(1, self.saved_model_schema_version)?;
}
for v in &self.meta_graphs {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> SavedModel {
SavedModel::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"saved_model_schema_version",
|m: &SavedModel| { &m.saved_model_schema_version },
|m: &mut SavedModel| { &mut m.saved_model_schema_version },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::meta_graph::MetaGraphDef>>(
"meta_graphs",
|m: &SavedModel| { &m.meta_graphs },
|m: &mut SavedModel| { &mut m.meta_graphs },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<SavedModel>(
"SavedModel",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static SavedModel {
static instance: ::protobuf::rt::LazyV2<SavedModel> = ::protobuf::rt::LazyV2::INIT;
instance.get(SavedModel::new)
}
}
impl ::protobuf::Clear for SavedModel {
fn clear(&mut self) {
self.saved_model_schema_version = 0;
self.meta_graphs.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for SavedModel {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for SavedModel {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n*tensorflow/core/protobuf/saved_model.proto\x12\ntensorflow\x1a)tensor\
flow/core/protobuf/meta_graph.proto\"\x84\x01\n\nSavedModel\x12;\n\x1asa\
ved_model_schema_version\x18\x01\x20\x01(\x03R\x17savedModelSchemaVersio\
n\x129\n\x0bmeta_graphs\x18\x02\x20\x03(\x0b2\x18.tensorflow.MetaGraphDe\
fR\nmetaGraphsBo\n\x18org.tensorflow.frameworkB\x10SavedModelProtosP\x01\
Z<github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf\xf8\x01\
\x01b\x06proto3\
";
static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT;
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
| {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
} |
cluster.go | // Copyright (c) nano Authors. All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package cluster
import (
"context"
"fmt"
"sync"
"github.com/aura-studio/nano/cluster/clusterpb"
"github.com/aura-studio/nano/log"
)
// cluster represents a nano cluster, which contains a bunch of nano nodes
// and each of them provide a group of different services. All services requests
// from client will send to gate firstly and be forwarded to appropriate node.
type cluster struct {
// If cluster is not large enough, use slice is OK
currentNode *Node
rpcClient *rpcClient
mu sync.RWMutex
members []*Member // current node doesn't exsits in members
}
func newCluster(currentNode *Node) *cluster {
return &cluster{currentNode: currentNode}
}
// Register implements the MasterServer gRPC service
func (c *cluster) Register(_ context.Context, req *clusterpb.RegisterRequest) (*clusterpb.RegisterResponse, error) {
if req.MemberInfo == nil {
return nil, ErrInvalidRegisterReq
}
var index = -1
resp := &clusterpb.RegisterResponse{}
for i, m := range c.members {
if m.memberInfo.ServiceAddr == req.MemberInfo.ServiceAddr {
index = i
break
}
}
if index >= 0 {
log.Warnf("Address %s repeatedly registered, it will be unregistered before register", req.MemberInfo.ServiceAddr)
// Notify registered node to update remote services
delMember := &clusterpb.DelMemberRequest{ServiceAddr: req.MemberInfo.ServiceAddr}
for _, m := range c.members {
if m.MemberInfo().ServiceAddr == c.currentNode.MemberAddr {
continue
}
pool, err := c.rpcClient.getConnPool(m.memberInfo.ServiceAddr)
if err != nil {
return nil, err
}
client := clusterpb.NewMemberClient(pool.Get())
_, err = client.DelMember(context.Background(), delMember)
if err != nil {
log.Warnln("Delete member failed", err)
}
}
log.Infoln("Exists peer unregister to cluster", req.MemberInfo.ServiceAddr)
// Register services to current node
c.currentNode.handler.delMember(req.MemberInfo.ServiceAddr)
c.mu.Lock()
if index == len(c.members)-1 {
c.members = c.members[:index]
} else {
c.members = append(c.members[:index], c.members[index+1:]...)
}
c.mu.Unlock()
}
// Notify registered node to update remote services
newMember := &clusterpb.NewMemberRequest{MemberInfo: req.MemberInfo}
for _, m := range c.members {
resp.Members = append(resp.Members, m.memberInfo)
if m.isMaster {
continue
}
pool, err := c.rpcClient.getConnPool(m.memberInfo.ServiceAddr)
if err != nil {
return nil, err
}
client := clusterpb.NewMemberClient(pool.Get())
_, err = client.NewMember(context.Background(), newMember)
if err != nil {
log.Warnln("New member failed", err)
}
}
log.Infoln("New peer register to cluster", req.MemberInfo.ServiceAddr)
c.currentNode.handler.addMember(req.MemberInfo)
c.mu.Lock()
defer c.mu.Unlock()
c.members = append(c.members, &Member{isMaster: false, memberInfo: req.MemberInfo})
if c.currentNode.MasterPersist != nil {
var memberInfos []*clusterpb.MemberInfo
for _, member := range c.members {
if member.isMaster {
continue
}
memberInfos = append(memberInfos, member.MemberInfo())
}
if err := c.currentNode.MasterPersist.Set(memberInfos); err != nil {
return nil, err
}
}
return resp, nil
}
// Register implements the MasterServer gRPC service
func (c *cluster) Unregister(_ context.Context, req *clusterpb.UnregisterRequest) (*clusterpb.UnregisterResponse, error) {
if req.ServiceAddr == "" {
return nil, ErrInvalidRegisterReq
}
var index = -1
resp := &clusterpb.UnregisterResponse{}
for i, m := range c.members {
if m.memberInfo.ServiceAddr == req.ServiceAddr {
index = i
break
}
}
if index < 0 {
return nil, fmt.Errorf("address %s has not registered", req.ServiceAddr)
}
// Notify registered node to update remote services
delMember := &clusterpb.DelMemberRequest{ServiceAddr: req.ServiceAddr}
for _, m := range c.members {
if m.MemberInfo().ServiceAddr == c.currentNode.MemberAddr {
continue
}
pool, err := c.rpcClient.getConnPool(m.memberInfo.ServiceAddr)
if err != nil {
return nil, err
}
client := clusterpb.NewMemberClient(pool.Get())
_, err = client.DelMember(context.Background(), delMember)
if err != nil {
log.Warnln("Delete member failed", err)
}
}
log.Infoln("Exists peer unregister to cluster", req.ServiceAddr)
// Register services to current node
c.currentNode.handler.delMember(req.ServiceAddr)
c.mu.Lock()
defer c.mu.Unlock()
if index == len(c.members)-1 {
c.members = c.members[:index]
} else {
c.members = append(c.members[:index], c.members[index+1:]...)
}
if c.currentNode.MasterPersist != nil {
var memberInfos []*clusterpb.MemberInfo
for _, member := range c.members {
if member.isMaster {
continue
}
memberInfos = append(memberInfos, member.MemberInfo())
}
if err := c.currentNode.MasterPersist.Set(memberInfos); err != nil {
return nil, err
}
}
return resp, nil
}
func (c *cluster) setRPCClient(client *rpcClient) {
c.rpcClient = client
}
func (c *cluster) remoteAddrs() []string {
var addrs []string
c.mu.RLock()
for _, m := range c.members {
addrs = append(addrs, m.memberInfo.ServiceAddr)
}
c.mu.RUnlock()
return addrs
}
func (c *cluster) initMembers(members []*clusterpb.MemberInfo) {
c.mu.Lock()
for _, info := range members {
c.members = append(c.members, &Member{
memberInfo: info,
})
}
c.mu.Unlock()
}
func (c *cluster) addMember(info *clusterpb.MemberInfo) {
c.mu.Lock()
var found bool
for _, member := range c.members {
if member.memberInfo.ServiceAddr == info.ServiceAddr {
member.memberInfo = info
found = true
break
}
}
if !found {
c.members = append(c.members, &Member{ | c.mu.Unlock()
}
func (c *cluster) delMember(addr string) {
c.mu.Lock()
var index = -1
for i, member := range c.members {
if member.memberInfo.ServiceAddr == addr {
index = i
break
}
}
if index != -1 {
if index == len(c.members)-1 {
c.members = c.members[:index]
} else {
c.members = append(c.members[:index], c.members[index+1:]...)
}
}
c.mu.Unlock()
} | memberInfo: info,
})
} |
gan_run.py | # Copyright (c) maiot GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software | # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from zenml.core.datasources.image_datasource import ImageDatasource
from zenml.core.pipelines.training_pipeline import TrainingPipeline
from zenml.core.steps.split.categorical_domain_split_step import \
CategoricalDomainSplit
from zenml.core.repo.repo import Repository
from examples.gan.gan_functions import CycleGANTrainer
from examples.gan.preprocessing import GANPreprocessor
repo: Repository = Repository().get_instance()
gan_pipeline = TrainingPipeline(name="whynotletitfly",
enable_cache=False)
try:
ds = ImageDatasource(name="gan_images",
base_path="/Users/nicholasjunge/workspaces/maiot/ce_project/images_mini")
except:
ds = repo.get_datasource_by_name('gan_images')
gan_pipeline.add_datasource(ds)
gan_pipeline.add_split(CategoricalDomainSplit(categorical_column="label",
split_map={"train": [0],
"eval": [1]}))
gan_pipeline.add_preprocesser(GANPreprocessor())
# gan_pipeline.add_preprocesser(transform_step)
gan_pipeline.add_trainer(CycleGANTrainer(epochs=5))
gan_pipeline.run() | |
test_tag.py | """Test cltk.tag."""
import os
import shutil
import unittest
from cltk.corpus.utils.importer import CorpusImporter
from cltk.stem.latin.j_v import JVReplacer
from cltk.tag import ner
from cltk.tag.ner import NamedEntityReplacer
from cltk.tag.pos import POSTag
__license__ = 'MIT License. See LICENSE.'
class TestSequenceFunctions(unittest.TestCase): # pylint: disable=R0904
"""Class for unittest"""
def setUp(self):
"""Clone Greek models in order to test pull function and other model
tests later.
"""
corpus_importer = CorpusImporter('greek')
corpus_importer.import_corpus('greek_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/greek/model/greek_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('latin')
corpus_importer.import_corpus('latin_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/latin/model/latin_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('french')
corpus_importer.import_corpus('french_data_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/french/text/french_data_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter("old_norse")
corpus_importer.import_corpus("old_norse_models_cltk")
file_rel = os.path.join(get_cltk_data_dir() + '/old_norse/model/old_norse_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('middle_low_german')
corpus_importer.import_corpus('middle_low_german_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/middle_low_german/model/middle_low_german_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('old_english')
corpus_importer.import_corpus('old_english_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/old_english/model/old_english_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
def test_pos_unigram_greek(self):
"""Test tagging Greek POS with unigram tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_unigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_bigram_greek(self):
"""Test tagging Greek POS with bigram tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_bigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_trigram_greek(self):
"""Test tagging Greek POS with trigram tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_trigram(' | ger.tag_ngram_123_backoff('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_tnt_tagger_greek(self):
"""Test tagging Greek POS with TnT tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_tnt('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_unigram_latin(self):
"""Test tagging Latin POS with unigram tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_unigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_bigram_latin(self):
"""Test tagging Latin POS with bigram tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_bigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_trigram_latin(self):
"""Test tagging Latin POS with trigram tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_trigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_latin(self):
"""Test tagging Latin POS with a 1-, 2-, and 3-gram backoff tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_ngram_123_backoff('Gallia est omnis divisa in partes tres') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_tnt_tagger_latin(self):
"""Test tagging Latin POS with TnT tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_tnt('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_crf_tagger_latin(self):
"""Test tagging Latin POS with CRF tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_crf('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_check_latest_latin(self):
"""Test _check_latest_data()"""
ner._check_latest_data('latin')
names_path = os.path.normpath(get_cltk_data_dir() + '/latin/model/latin_models_cltk/ner/proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_check_latest_latin(self):
"""Test _check_latest_data()"""
path = get_cltk_data_dir() + '/latin/model/latin_models_cltk'
#p = get_cltk_data_dir() + '/latin/model/latin_models_cltk/ner/proper_names.txt'
names_dir = os.path.expanduser(path)
shutil.rmtree(names_dir, ignore_errors=True)
ner._check_latest_data('latin')
names_path = os.path.join(names_dir, 'ner', 'proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_tag_ner_str_list_latin(self):
"""Test make_ner(), str, list."""
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
tokens = ner.tag_ner('latin', input_text=text_str_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity'), (',',), ('ut',), ('aliae',), ('quae',), ('primae',), ('dicuntur',), ('esse',), ('mangitudinis',), ('.',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_latin(self):
"""Test make_ner(), list, list."""
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
tokens = ner.tag_ner('latin', input_text=text_list_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), ('Sirius', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_latin(self):
"""Test make_ner(), list, str."""
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
text = ner.tag_ner('latin', input_text=text_list_iu, output_type=str)
target = ' ut Uenus/Entity Sirius/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_latin(self):
"""Test make_ner(), str, str."""
jv_replacer = JVReplacer()
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
text = ner.tag_ner('latin', input_text=text_str_iu, output_type=str)
target = ' ut Uenus/Entity, ut Sirius/Entity, ut Spica/Entity, ut aliae quae primae dicuntur esse mangitudinis.'
self.assertEqual(text, target)
def test_tag_ner_str_list_greek(self):
"""Test make_ner(), str, list."""
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
tokens = ner.tag_ner('greek', input_text=text_str, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity'), ('Κάππαρος', 'Entity'), ('Πρωτογενείας', 'Entity'), ('Διονυσιάδες', 'Entity'), ('τὴν',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_greek(self):
"""Test make_ner(), list, list."""
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
tokens = ner.tag_ner('greek', input_text=text_list, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_greek(self):
"""Test make_ner(), list, str."""
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
text = ner.tag_ner('greek', input_text=text_list, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_greek(self):
"""Test make_ner(), str, str."""
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
text = ner.tag_ner('greek', input_text=text_str, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity Κάππαρος/Entity Πρωτογενείας/Entity Διονυσιάδες/Entity τὴν'
self.assertEqual(text, target)
def test_tag_ner_str_list_french(self):
"""Test make_ner(), str, list."""
text_str = """Berte fu mere Charlemaine, qui pukis tint France et tot le Maine."""
ner_replacer = NamedEntityReplacer()
tokens = ner_replacer.tag_ner_fr(input_text=text_str, output_type=list)
target = [[('Berte', 'entity', 'CHI')], ('fu',), ('mere',), [('Charlemaine', 'entity', 'CHI')], (',',), ('qui',), ('pukis',),
('tint',), [('France', 'entity', 'LOC')], ('et',), ('tot',), ('le',), [('Maine', 'entity', 'LOC')], ('.',)]
self.assertEqual(tokens, target)
def test_pos_tnt_tagger_old_norse(self):
"""Test tagging Old Norse POS with TnT tagger."""
tagger = POSTag('old_norse')
tagged = tagger.tag_tnt('Hlióðs bið ek allar.')
print(tagged)
self.assertTrue(tagged)
def test_pos_ngram12_tagger_middle_low_german(self):
""" Test MOG POS 12-backoff tagger"""
tagger = POSTag('middle_low_german')
tagged = tagger.tag_ngram_12_backoff('Jck Johannes preister verwarer vnde voirs tender des Juncfrouwen kloisters to Mariendale')
self.assertTrue(tagged)
def test_pos_unigram_old_english(self):
"""Test tagging Old English POS with unigram tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_unigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_bigram_old_english(self):
"""Test tagging Old English POS with bigram tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_bigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_trigram_old_english(self):
"""Test tagging old_english POS with trigram tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_trigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_old_english(self):
"""Test tagging Old English POS with a 1-, 2-, and 3-gram backoff tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_ngram_123_backoff('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_crf_tagger_old_english(self):
"""Test tagging Old English POS with CRF tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_crf('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_perceptron_tagger_old_english(self):
"""Test tagging Old English POS with Perceptron tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_perceptron('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
if __name__ == '__main__':
unittest.main()
| θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_ngram123_tagger_greek(self):
"""Test tagging Greek POS with a 1-, 2-, and 3-gram backoff tagger."""
tagger = POSTag('greek')
tagged = tag |
mod.rs | #[cfg(any(feature = "google-cloud-orchestration-airflow-service-v1"))]
pub mod v1;
#[cfg(any(feature = "google-cloud-orchestration-airflow-service-v1beta1"))] | pub mod v1beta1; |
|
hash.spec.js | /* eslint import/no-extraneous-dependencies: ["error", {"optionalDependencies": false}] */
/* global describe, it, before, after */
const array = require('stream-array');
const File = require('gulp-util').File;
const gulp = require('gulp');
const assert = require('stream-assert');
const path = require('path');
const fs = require('fs');
const rimraf = require('rimraf');
require('should');
require('mocha');
const file = (...files) => {
files = files.map(stream => {
return new File({
cwd: process.cwd(),
base: process.cwd(),
path: process.cwd() + '/' + (stream).toString() + '.js',
contents: new Buffer(stream)
});
});
return array(files);
};
describe('gulp-version.hash', () => {
//We'll delete it when we're done.
before(done => {
fs.mkdir('./output', done);
});
//We'll delete it when we're done.
after(done => {
rimraf('output', done);
});
it('it should read from stream and add a hash', done => {
file('first')
.pipe(version.hash())
.pipe(assert.first((d) => {
path.basename(d.path.toString()).should.eql('first.e0996a37.js');
}))
.pipe(assert.end(done));
});
it('it should read multiple from stream and add a hash', done => {
file('first', 'second')
.pipe(version.hash())
.pipe(assert.first((d) => {
path.basename(d.path.toString()).should.eql('first.e0996a37.js');
}))
.pipe(assert.second((d) => {
path.basename(d.path.toString()).should.eql('second.352f7829.js');
}))
.pipe(assert.end(done));
});
it('should read from filesystem, add hash and save to correct destination', done => {
| gulp.src('./fixtures/first.js')
.pipe(version.hash())
.pipe(gulp.dest('./output/'))
.pipe(assert.first((d) => {
d.path.toString().should.eql(`${__dirname}/output/first.7fff6f72.js`);
}))
.pipe(assert.end(done));
});
it('should read multiple from filesystem, add hash and save to correct destination', done => {
gulp.src(['./fixtures/first.js', './fixtures/second.js'])
.pipe(version.hash())
.pipe(gulp.dest('./output/'))
.pipe(assert.first((d) => {
d.path.toString().should.eql(`${__dirname}/output/first.7fff6f72.js`);
}))
.pipe(assert.second((d) => {
d.path.toString().should.eql(`${__dirname}/output/second.343854a0.js`);
}))
.pipe(assert.end(done));
});
}); | |
workermap.rs | use std::ops::{Deref, DerefMut};
use crate::common::Map;
use crate::server::worker::Worker;
use crate::WorkerId;
#[derive(Default, Debug)]
pub struct WorkerMap {
workers: Map<WorkerId, Worker>,
}
impl WorkerMap {
#[inline]
pub fn get_worker(&self, worker_id: WorkerId) -> &Worker {
&self.workers[&worker_id]
}
#[inline]
pub fn get_worker_mut(&mut self, worker_id: WorkerId) -> &mut Worker {
self.workers.get_mut(&worker_id).expect("Worker not found")
}
}
impl Deref for WorkerMap {
type Target = Map<WorkerId, Worker>;
#[inline]
fn | (&self) -> &Self::Target {
&self.workers
}
}
impl DerefMut for WorkerMap {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.workers
}
}
| deref |
regenerate_system_token.go | /*
*
* Copyright 2020, 2021 ZUP IT SERVICOS EM TECNOLOGIA E INOVACAO SA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package systoken
import (
"github.com/ZupIT/charlescd/gate/internal/logging"
"github.com/ZupIT/charlescd/gate/internal/repository"
"github.com/google/uuid"
"strings"
)
type RegenerateSystemToken interface {
Execute(id uuid.UUID) (string, error)
}
type regenerateSystemToken struct {
systemTokenRepository repository.SystemTokenRepository
}
func NewRegenerateSystemToken(repository repository.SystemTokenRepository) RegenerateSystemToken |
func (r regenerateSystemToken) Execute(id uuid.UUID) (string, error) {
systemToken, err := r.systemTokenRepository.FindByID(id)
if err != nil {
return "", logging.WithOperation(err, "RevokeSystemToken.Execute")
}
if systemToken.Revoked {
return "", logging.NewError("Cannot update revoked tokens", logging.CustomError{}, logging.BusinessError, nil)
}
systemToken.Token = strings.ReplaceAll(uuid.New().String(), "-", "")
err = r.systemTokenRepository.Update(systemToken)
if err != nil {
return "", logging.WithOperation(err, "RevokeSystemToken.Execute")
}
return systemToken.Token, nil
}
| {
return regenerateSystemToken{
systemTokenRepository: repository,
}
} |
q02.rs | //-----------------------------------------------------
// Setup.
use aoc::Day;
static INPUT: &str = include_str!("data/q02.data");
// static INPUT : &'static str = "ULL
// RRDDD
// LURDL
// UUUUD";
type Key = [usize; 2];
type Keypad = Vec<Vec<char>>;
type KeypadRef<'a> = &'a [Vec<char>];
#[derive(Debug)]
enum Direction {
Up,
Left,
Down,
Right,
}
fn get(keypad: KeypadRef, key: Key) -> char {
keypad[key[0]][key[1]]
}
impl Direction {
fn shift(&self, key: Key, keypad: KeypadRef) -> Key {
match *self {
Direction::Up => {
if get(keypad, [key[0] - 1, key[1]]) == ' ' {
key
} else {
[key[0] - 1, key[1]]
}
}
Direction::Left => {
if get(keypad, [key[0], key[1] - 1]) == ' ' {
key
} else {
[key[0], key[1] - 1]
}
}
Direction::Down => {
if get(keypad, [key[0] + 1, key[1]]) == ' ' {
key
} else {
[key[0] + 1, key[1]]
}
}
Direction::Right => {
if get(keypad, [key[0], key[1] + 1]) == ' ' {
key
} else {
[key[0], key[1] + 1]
}
}
}
}
}
use std::str::FromStr;
impl FromStr for Direction {
type Err = ();
fn from_str(s: &str) -> Result<Direction, ()> {
match s {
"U" => Ok(Direction::Up),
"L" => Ok(Direction::Left),
"D" => Ok(Direction::Down),
"R" => Ok(Direction::Right),
_ => Err(()),
}
}
}
fn handle_direction(key: Key, keypad: KeypadRef, next: char) -> Key {
let direction: Direction = next.to_string().parse().unwrap();
// println!("{:?}, {:?}", direction, direction.shift(key));
direction.shift(key, keypad)
}
fn parse_line(key: &mut Key, keypad: KeypadRef, line: &str) {
for direction in line.chars() {
*key = handle_direction(*key, keypad, direction);
}
print!("{}", get(keypad, *key));
}
//-----------------------------------------------------
// Questions.
pub struct Q;
impl Day for Q {
fn number(&self) -> String |
fn a(&self) {
print!("{}A: ", self.number());
let keypad: Keypad = vec![
vec![' ', ' ', ' ', ' ', ' '],
vec![' ', '1', '2', '3', ' '],
vec![' ', '4', '5', '6', ' '],
vec![' ', '7', '8', '9', ' '],
vec![' ', ' ', ' ', ' ', ' '],
];
let mut key: Key = [2, 2];
print!("Result = ");
for line in INPUT.lines() {
parse_line(&mut key, &keypad, line);
}
println!();
}
fn b(&self) {
print!("{}B: ", self.number());
let keypad: Keypad = vec![
vec![' ', ' ', ' ', ' ', ' ', ' ', ' '],
vec![' ', ' ', ' ', '1', ' ', ' ', ' '],
vec![' ', ' ', '2', '3', '4', ' ', ' '],
vec![' ', '5', '6', '7', '8', '9', ' '],
vec![' ', ' ', 'A', 'B', 'C', ' ', ' '],
vec![' ', ' ', ' ', 'D', ' ', ' ', ' '],
vec![' ', ' ', ' ', ' ', ' ', ' ', ' '],
];
let mut key: Key = [3, 1];
print!("Result = ");
for line in INPUT.lines() {
parse_line(&mut key, &keypad, line);
}
println!();
}
}
| {
String::from("2")
} |
text_parser_test.go | // Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/catper/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"fmt"
"math"
"testing"
. "github.com/catper/protobuf/proto"
proto3pb "github.com/catper/protobuf/proto/proto3_proto"
. "github.com/catper/protobuf/proto/test_proto"
)
type UnmarshalTextTest struct {
in string
err string // if "", no error expected
out *MyMessage
}
func buildExtStructTest(text string) UnmarshalTextTest {
msg := &MyMessage{
Count: Int32(42),
}
SetExtension(msg, E_Ext_More, &Ext{
Data: String("Hello, world!"),
})
return UnmarshalTextTest{in: text, out: msg}
}
func buildExtDataTest(text string) UnmarshalTextTest {
msg := &MyMessage{
Count: Int32(42),
}
SetExtension(msg, E_Ext_Text, String("Hello, world!"))
SetExtension(msg, E_Ext_Number, Int32(1729))
return UnmarshalTextTest{in: text, out: msg}
}
func buildExtRepStringTest(text string) UnmarshalTextTest {
msg := &MyMessage{
Count: Int32(42),
}
if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
panic(err)
}
return UnmarshalTextTest{in: text, out: msg}
}
var unMarshalTextTests = []UnmarshalTextTest{
// Basic
{
in: " count:42\n name:\"Dave\" ",
out: &MyMessage{
Count: Int32(42),
Name: String("Dave"),
},
},
// Empty quoted string
{
in: `count:42 name:""`,
out: &MyMessage{
Count: Int32(42),
Name: String(""),
},
},
// Quoted string concatenation with double quotes
{
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
out: &MyMessage{
Count: Int32(42),
Name: String("My name is elsewhere"),
},
},
// Quoted string concatenation with single quotes
{
in: "count:42 name: 'My name is '\n'elsewhere'",
out: &MyMessage{
Count: Int32(42),
Name: String("My name is elsewhere"),
},
},
// Quoted string concatenations with mixed quotes
{
in: "count:42 name: 'My name is '\n\"elsewhere\"",
out: &MyMessage{
Count: Int32(42),
Name: String("My name is elsewhere"),
},
},
{
in: "count:42 name: \"My name is \"\n'elsewhere'",
out: &MyMessage{
Count: Int32(42),
Name: String("My name is elsewhere"),
},
},
// Quoted string with escaped apostrophe
{
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
out: &MyMessage{
Count: Int32(42),
Name: String("HOLIDAY - New Year's Day"),
},
},
// Quoted string with single quote
{
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
out: &MyMessage{
Count: Int32(42),
Name: String(`Roger "The Ramster" Ramjet`),
},
},
// Quoted string with all the accepted special characters from the C++ test
{
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
out: &MyMessage{
Count: Int32(42),
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
},
},
// Quoted string with quoted backslash
{
in: `count:42 name: "\\'xyz"`,
out: &MyMessage{
Count: Int32(42),
Name: String(`\'xyz`),
},
},
// Quoted string with UTF-8 bytes.
{
in: "count:42 name: '\303\277\302\201\x00\xAB\xCD\xEF'",
out: &MyMessage{
Count: Int32(42),
Name: String("\303\277\302\201\x00\xAB\xCD\xEF"),
},
},
// Quoted string with unicode escapes.
{
in: `count: 42 name: "\u0047\U00000047\uffff\U0010ffff"`,
out: &MyMessage{
Count: Int32(42),
Name: String("GG\uffff\U0010ffff"),
},
},
// Bad quoted string
{
in: `inner: < host: "\0" >` + "\n",
err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`,
},
// Bad \u escape
{
in: `count: 42 name: "\u000"`,
err: `line 1.16: invalid quoted string "\u000": \u requires 4 following digits`,
},
// Bad \U escape
{
in: `count: 42 name: "\U0000000"`,
err: `line 1.16: invalid quoted string "\U0000000": \U requires 8 following digits`,
},
// Bad \U escape
{
in: `count: 42 name: "\xxx"`,
err: `line 1.16: invalid quoted string "\xxx": \xxx contains non-hexadecimal digits`,
},
// Number too large for int64
{
in: "count: 1 others { key: 123456789012345678901 }",
err: "line 1.23: invalid int64: 123456789012345678901",
},
// Number too large for int32
{
in: "count: 1234567890123",
err: "line 1.7: invalid int32: 1234567890123",
},
// Number in hexadecimal
{
in: "count: 0x2beef",
out: &MyMessage{
Count: Int32(0x2beef),
},
},
// Number in octal
{
in: "count: 024601",
out: &MyMessage{
Count: Int32(024601),
},
},
// Floating point number with "f" suffix
{
in: "count: 4 others:< weight: 17.0f >",
out: &MyMessage{
Count: Int32(4),
Others: []*OtherMessage{
{
Weight: Float32(17),
},
},
},
},
// Floating point positive infinity
{
in: "count: 4 bigfloat: inf",
out: &MyMessage{
Count: Int32(4),
Bigfloat: Float64(math.Inf(1)),
},
},
// Floating point negative infinity
{
in: "count: 4 bigfloat: -inf",
out: &MyMessage{
Count: Int32(4),
Bigfloat: Float64(math.Inf(-1)),
},
},
// Number too large for float32
{
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
},
// Number posing as a quoted string
{
in: `inner: < host: 12 >` + "\n",
err: `line 1.15: invalid string: 12`,
},
// Quoted string posing as int32
{
in: `count: "12"`,
err: `line 1.7: invalid int32: "12"`,
},
// Quoted string posing a float32
{
in: `others:< weight: "17.4" >`,
err: `line 1.17: invalid float32: "17.4"`,
},
// unclosed bracket doesn't cause infinite loop
{
in: `[`,
err: `line 1.0: unclosed type_url or extension name`,
},
// Enum
{
in: `count:42 bikeshed: BLUE`,
out: &MyMessage{
Count: Int32(42),
Bikeshed: MyMessage_BLUE.Enum(),
},
},
// Repeated field
{
in: `count:42 pet: "horsey" pet:"bunny"`,
out: &MyMessage{
Count: Int32(42),
Pet: []string{"horsey", "bunny"},
},
},
// Repeated field with list notation
{
in: `count:42 pet: ["horsey", "bunny"]`,
out: &MyMessage{
Count: Int32(42),
Pet: []string{"horsey", "bunny"},
},
},
// Repeated message with/without colon and <>/{}
{
in: `count:42 others:{} others{} others:<> others:{}`,
out: &MyMessage{
Count: Int32(42),
Others: []*OtherMessage{
{},
{},
{},
{},
},
},
},
// Missing colon for inner message
{
in: `count:42 inner < host: "cauchy.syd" >`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("cauchy.syd"),
},
},
},
// Missing colon for string field
{
in: `name "Dave"`,
err: `line 1.5: expected ':', found "\"Dave\""`,
},
// Missing colon for int32 field
{
in: `count 42`,
err: `line 1.6: expected ':', found "42"`,
},
// Missing required field
{
in: `name: "Pawel"`,
err: fmt.Sprintf(`proto: required field "%T.count" not set`, MyMessage{}),
out: &MyMessage{
Name: String("Pawel"),
},
},
// Missing required field in a required submessage
{
in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`,
err: fmt.Sprintf(`proto: required field "%T.host" not set`, InnerMessage{}),
out: &MyMessage{
Count: Int32(42),
WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}},
},
},
// Repeated non-repeated field
{
in: `name: "Rob" name: "Russ"`,
err: `line 1.12: non-repeated field "name" was repeated`,
},
// Group
{
in: `count: 17 SomeGroup { group_field: 12 }`,
out: &MyMessage{
Count: Int32(17),
Somegroup: &MyMessage_SomeGroup{
GroupField: Int32(12),
},
},
},
// Semicolon between fields
{
in: `count:3;name:"Calvin"`,
out: &MyMessage{
Count: Int32(3),
Name: String("Calvin"),
},
},
// Comma between fields
{
in: `count:4,name:"Ezekiel"`,
out: &MyMessage{
Count: Int32(4),
Name: String("Ezekiel"),
},
},
// Boolean false
{
in: `count:42 inner { host: "example.com" connected: false }`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("example.com"),
Connected: Bool(false),
},
},
},
// Boolean true
{
in: `count:42 inner { host: "example.com" connected: true }`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("example.com"),
Connected: Bool(true),
},
},
},
// Boolean 0
{
in: `count:42 inner { host: "example.com" connected: 0 }`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("example.com"),
Connected: Bool(false),
},
},
},
// Boolean 1
{
in: `count:42 inner { host: "example.com" connected: 1 }`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("example.com"),
Connected: Bool(true),
},
},
},
// Boolean f
{
in: `count:42 inner { host: "example.com" connected: f }`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("example.com"),
Connected: Bool(false),
},
},
},
// Boolean t
{
in: `count:42 inner { host: "example.com" connected: t }`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("example.com"),
Connected: Bool(true),
},
},
},
// Boolean False
{
in: `count:42 inner { host: "example.com" connected: False }`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("example.com"),
Connected: Bool(false),
},
},
},
// Boolean True
{
in: `count:42 inner { host: "example.com" connected: True }`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("example.com"),
Connected: Bool(true),
},
},
},
// Extension
buildExtStructTest(`count: 42 [test_proto.Ext.more]:<data:"Hello, world!" >`),
buildExtStructTest(`count: 42 [test_proto.Ext.more] {data:"Hello, world!"}`),
buildExtDataTest(`count: 42 [test_proto.Ext.text]:"Hello, world!" [test_proto.Ext.number]:1729`),
buildExtRepStringTest(`count: 42 [test_proto.greeting]:"bula" [test_proto.greeting]:"hola"`),
// Big all-in-one
{
in: "count:42 # Meaning\n" +
`name:"Dave" ` +
`quote:"\"I didn't want to go.\"" ` +
`pet:"bunny" ` +
`pet:"kitty" ` +
`pet:"horsey" ` +
`inner:<` +
` host:"footrest.syd" ` +
` port:7001 ` +
` connected:true ` +
`> ` +
`others:<` +
` key:3735928559 ` +
` value:"\x01A\a\f" ` +
`> ` +
`others:<` +
" weight:58.9 # Atomic weight of Co\n" +
` inner:<` +
` host:"lesha.mtv" ` +
` port:8002 ` +
` >` +
`>`,
out: &MyMessage{
Count: Int32(42),
Name: String("Dave"),
Quote: String(`"I didn't want to go."`),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &InnerMessage{
Host: String("footrest.syd"),
Port: Int32(7001),
Connected: Bool(true),
},
Others: []*OtherMessage{
{
Key: Int64(3735928559),
Value: []byte{0x1, 'A', '\a', '\f'},
},
{
Weight: Float32(58.9),
Inner: &InnerMessage{
Host: String("lesha.mtv"),
Port: Int32(8002),
},
},
},
},
},
}
func TestUnmarshalText(t *testing.T) {
for i, test := range unMarshalTextTests {
pb := new(MyMessage)
err := UnmarshalText(test.in, pb)
if test.err == "" {
// We don't expect failure.
if err != nil {
t.Errorf("Test %d: Unexpected error: %v", i, err)
} else if !Equal(pb, test.out) {
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
i, pb, test.out)
}
} else {
// We do expect failure.
if err == nil {
t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
} else if err.Error() != test.err {
t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
i, err.Error(), test.err)
} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !Equal(pb, test.out) {
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
i, pb, test.out)
}
}
}
}
func TestUnmarshalTextCustomMessage(t *testing.T) {
msg := &textMessage{}
if err := UnmarshalText("custom", msg); err != nil {
t.Errorf("Unexpected error from custom unmarshal: %v", err)
}
if UnmarshalText("not custom", msg) == nil {
t.Errorf("Didn't get expected error from custom unmarshal")
}
}
// Regression test; this caused a panic.
func TestRepeatedEnum(t *testing.T) {
pb := new(RepeatedEnum)
if err := UnmarshalText("color: RED", pb); err != nil {
t.Fatal(err)
}
exp := &RepeatedEnum{
Color: []RepeatedEnum_Color{RepeatedEnum_RED},
}
if !Equal(pb, exp) {
t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
}
}
func TestProto3TextParsing(t *testing.T) {
m := new(proto3pb.Message)
const in = `name: "Wallace" true_scotsman: true`
want := &proto3pb.Message{
Name: "Wallace",
TrueScotsman: true,
}
if err := UnmarshalText(in, m); err != nil {
t.Fatal(err)
}
if !Equal(m, want) |
}
func TestMapParsing(t *testing.T) {
m := new(MessageWithMap)
const in = `name_mapping:<key:1234 value:"Feist"> name_mapping:<key:1 value:"Beatles">` +
`msg_mapping:<key:-4, value:<f: 2.0>,>` + // separating commas are okay
`msg_mapping<key:-2 value<f: 4.0>>` + // no colon after "value"
`msg_mapping:<value:<f: 5.0>>` + // omitted key
`msg_mapping:<key:1>` + // omitted value
`byte_mapping:<key:true value:"so be it">` +
`byte_mapping:<>` // omitted key and value
want := &MessageWithMap{
NameMapping: map[int32]string{
1: "Beatles",
1234: "Feist",
},
MsgMapping: map[int64]*FloatingPoint{
-4: {F: Float64(2.0)},
-2: {F: Float64(4.0)},
0: {F: Float64(5.0)},
1: nil,
},
ByteMapping: map[bool][]byte{
false: nil,
true: []byte("so be it"),
},
}
if err := UnmarshalText(in, m); err != nil {
t.Fatal(err)
}
if !Equal(m, want) {
t.Errorf("\n got %v\nwant %v", m, want)
}
}
func TestOneofParsing(t *testing.T) {
const in = `name:"Shrek"`
m := new(Communique)
want := &Communique{Union: &Communique_Name{"Shrek"}}
if err := UnmarshalText(in, m); err != nil {
t.Fatal(err)
}
if !Equal(m, want) {
t.Errorf("\n got %v\nwant %v", m, want)
}
const inOverwrite = `name:"Shrek" number:42`
m = new(Communique)
testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'"
if err := UnmarshalText(inOverwrite, m); err == nil {
t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr)
} else if err.Error() != testErr {
t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v",
err.Error(), testErr)
}
}
var benchInput string
func init() {
benchInput = "count: 4\n"
for i := 0; i < 1000; i++ {
benchInput += "pet: \"fido\"\n"
}
// Check it is valid input.
pb := new(MyMessage)
err := UnmarshalText(benchInput, pb)
if err != nil {
panic("Bad benchmark input: " + err.Error())
}
}
func BenchmarkUnmarshalText(b *testing.B) {
pb := new(MyMessage)
for i := 0; i < b.N; i++ {
UnmarshalText(benchInput, pb)
}
b.SetBytes(int64(len(benchInput)))
}
| {
t.Errorf("\n got %v\nwant %v", m, want)
} |
test_distillation.py | # flake8: noqa
import os
from tempfile import TemporaryDirectory
from pytest import mark
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.contrib.datasets import MNIST
from catalyst.data import ToTensor
from catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS
class DistilRunner(dl.Runner):
def handle_batch(self, batch):
x, y = batch
self.model["teacher"].eval() # let's manually set teacher model to eval mode
with torch.no_grad():
t_logits = self.model["teacher"](x)
s_logits = self.model["student"](x)
self.batch = {
"t_logits": t_logits,
"s_logits": s_logits,
"targets": y,
"s_logprobs": F.log_softmax(s_logits, dim=-1),
"t_probs": F.softmax(t_logits, dim=-1),
}
def train_experiment(device, engine=None):
with TemporaryDirectory() as logdir:
teacher = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
student = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
model = {"teacher": teacher, "student": student}
criterion = {"cls": nn.CrossEntropyLoss(), "kl": nn.KLDivLoss(reduction="batchmean")}
optimizer = optim.Adam(student.parameters(), lr=0.02)
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
}
runner = DistilRunner()
# model training
runner.train(
engine=engine or dl.DeviceEngine(device),
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
num_epochs=1,
logdir=logdir,
verbose=False,
callbacks=[
dl.AccuracyCallback(
input_key="t_logits", target_key="targets", num_classes=2, prefix="teacher_"
),
dl.AccuracyCallback(
input_key="s_logits", target_key="targets", num_classes=2, prefix="student_"
),
dl.CriterionCallback(
input_key="s_logits",
target_key="targets",
metric_key="cls_loss",
criterion_key="cls",
),
dl.CriterionCallback(
input_key="s_logprobs",
target_key="t_probs",
metric_key="kl_div_loss",
criterion_key="kl",
),
dl.MetricAggregationCallback(
metric_key="loss", metrics=["kl_div_loss", "cls_loss"], mode="mean"
),
dl.OptimizerCallback(metric_key="loss", model_key="student"),
dl.CheckpointCallback(
logdir=logdir,
loader_key="valid",
metric_key="loss",
minimize=True,
save_n_best=3,
),
],
)
# Torch
def test_distillation_on_cpu():
train_experiment("cpu")
@mark.skipif(not IS_CUDA_AVAILABLE, reason="CUDA device is not available")
def test_distillation_on_torch_cuda0():
train_experiment("cuda:0")
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_cuda1():
train_experiment("cuda:1")
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_dp():
train_experiment(None, dl.DataParallelEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_ddp():
train_experiment(None, dl.DistributedDataParallelEngine())
# AMP
@mark.skipif(
not (IS_CUDA_AVAILABLE and SETTINGS.amp_required), reason="No CUDA or AMP found",
)
def test_distillation_on_amp():
train_experiment(None, dl.AMPEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),
reason="No CUDA>=2 or AMP found",
)
def test_distillation_on_amp_dp():
train_experiment(None, dl.DataParallelAMPEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),
reason="No CUDA>=2 or AMP found",
)
def test_distillation_on_amp_ddp():
|
# APEX
@mark.skipif(
not (IS_CUDA_AVAILABLE and SETTINGS.apex_required), reason="No CUDA or Apex found",
)
def test_distillation_on_apex():
train_experiment(None, dl.APEXEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),
reason="No CUDA>=2 or Apex found",
)
def test_distillation_on_apex_dp():
train_experiment(None, dl.DataParallelAPEXEngine())
# @mark.skipif(
# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),
# reason="No CUDA>=2 or Apex found",
# )
# def test_distillation_on_apex_ddp():
# train_experiment(None, dl.DistributedDataParallelApexEngine())
| train_experiment(None, dl.DistributedDataParallelAMPEngine()) |
cli.py | import hashlib
import datetime
import json
import uuid
from hashlib import sha256
from sys import version_info as pyVersion
from binascii import hexlify, unhexlify
from wallet import *
from func.send_message import send_message
from func.send_coin import send_coin
from func.node_connection import *
from lib.mixlib import *
import pickle
from blockchain.blockchain_main import get_blockchain , create_blockchain, sendme_full_chain
from lib.settings import the_settings
def show_menu():
print(banner_maker(sc_name="Mix Blockchain Network",description="This is an open source blockchain network project. It exists for people to build and use their own blockchain networks. Or to join the network created by others.",author="Onur Atakan ULUSOY",email="[email protected]") + \
menu_space() + \
menu_maker(menu_number="cbc",menu_text="Create Blockchain")+ \
menu_maker(menu_number="cw",menu_text="Create Wallet")+ \
menu_space() + \
menu_maker(menu_number="sm",menu_text="Send Message")+ \
menu_maker(menu_number="sc",menu_text="Send Coin")+ \
menu_space() + \
menu_maker(menu_number="gb",menu_text="Get Balance")+ \
menu_space() + \
menu_maker(menu_number="ndstart",menu_text="Node Start")+ \
menu_maker(menu_number="ndstop",menu_text="Node Stop")+ \
menu_maker(menu_number="ndconnect",menu_text="Node Connect")+ \
menu_maker(menu_number="ndconnectmix_blockchain_network",menu_text="Node Connect from mix_blockchain_network-DB")+ \
menu_space() + \
menu_maker(menu_number="testmodeon",menu_text="Test mode ON")+ \
menu_maker(menu_number="testmodeoff",menu_text="Test mode OF")+ \
menu_maker(menu_number="debugmodeon",menu_text="Debug mode ON")+ \
menu_maker(menu_number="debugmodeoff",menu_text="Debug mode OF")+ \
menu_space() + \
menu_maker(menu_number="getfullnodelist",menu_text="Get Full Node List")+ \
menu_maker(menu_number="getfullchain",menu_text="Get Full Chain")+ \
quit_menu_maker(mode="main")
)
def menu():
while True:
show_menu()
choices_input = question_maker(mode="main")
if choices_input == "cbc":
create_blockchain()
if choices_input == "cw":
Wallet_Create()
if choices_input == "sm":
send_message(input("Message: "),input("Please write receiver adress: "))
if choices_input == "sc":
send_coin(input("Coin Amount: "),input("Please write receiver adress: "))
if choices_input == "gb":
print(get_blockchain().getBalance(Wallet_Import(0,0)))
if choices_input == "help":
show_menu()
if choices_input == "ndstart":
ndstart(int(input("port: ")))
if choices_input == "ndstop":
ndstop()
if choices_input == "ndconnect":
ndconnect(str(input("node ip: ")),int(input("node port: ")))
if choices_input == "ndconnectmix_blockchain_network":
ndconnectmix_blockchain_network()
if choices_input == "testmodeon":
the_settings().test_mode(True)
if choices_input == "testmodeoff":
the_settings().test_mode(False)
if choices_input == "debugmodeon":
the_settings().debug_mode(True)
if choices_input == "debugmodeoff":
the_settings().debug_mode(False)
if choices_input == "getfullnodelist":
sendme_full_node_list()
if choices_input == "getfullchain":
sendme_full_chain()
if choices_input == "0":
exit()
def start():
|
if __name__ == '__main__':
start() | menu() |
helper.go | package helper
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"io/ioutil"
"net/http"
"strings"
"github.com/micro/cli/v2"
"github.com/micro/go-micro/metadata"
)
func ACMEHosts(ctx *cli.Context) []string {
var hosts []string | }
return hosts
}
func RequestToContext(r *http.Request) context.Context {
ctx := context.Background()
md := make(metadata.Metadata)
for k, v := range r.Header {
md[k] = strings.Join(v, ",")
}
return metadata.NewContext(ctx, md)
}
func TLSConfig(ctx *cli.Context) (*tls.Config, error) {
cert := ctx.String("tls_cert_file")
key := ctx.String("tls_key_file")
ca := ctx.String("tls_client_ca_file")
if len(cert) > 0 && len(key) > 0 {
certs, err := tls.LoadX509KeyPair(cert, key)
if err != nil {
return nil, err
}
if len(ca) > 0 {
caCert, err := ioutil.ReadFile(ca)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
return &tls.Config{
Certificates: []tls.Certificate{certs},
ClientCAs: caCertPool,
ClientAuth: tls.RequireAndVerifyClientCert,
NextProtos: []string{"h2", "http/1.1"},
}, nil
}
return &tls.Config{
Certificates: []tls.Certificate{certs}, NextProtos: []string{"h2", "http/1.1"},
}, nil
}
return nil, errors.New("TLS certificate and key files not specified")
}
func ServeCORS(w http.ResponseWriter, r *http.Request) {
set := func(w http.ResponseWriter, k, v string) {
if v := w.Header().Get(k); len(v) > 0 {
return
}
w.Header().Set(k, v)
}
if origin := r.Header.Get("Origin"); len(origin) > 0 {
set(w, "Access-Control-Allow-Origin", origin)
} else {
set(w, "Access-Control-Allow-Origin", "*")
}
set(w, "Access-Control-Allow-Methods", "POST, PATCH, GET, OPTIONS, PUT, DELETE")
set(w, "Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
} | for _, host := range strings.Split(ctx.String("acme_hosts"), ",") {
if len(host) > 0 {
hosts = append(hosts, host)
} |
controller.py | import inspect
from collections.abc import Callable
from inspect import Parameter
from typing import Optional, TypeVar, Union
from fastapi import APIRouter, Depends
from starlette.routing import Route, WebSocketRoute
from .types import InitializedError
from .utils import make_cls_accept_cls_annotated_deps
T = TypeVar("T")
def controller(
router: APIRouter, *, version: Optional[float] = None
) -> Callable[[type[T]], type[T]]:
"""
Factory function that returns a decorator converting the decorated class into a controller class.
The first positional argument (typically `self`) to all methods decorated as endpoints using the provided router
will be populated with a controller instance via FastAPI's dependency-injection system.
"""
def decorator(cls: type[T]) -> type[T]:
return _controller(cls, router, version=version)
return decorator
def _controller(
cls: type[T], router: APIRouter, *, version: Optional[float] = None | """
Decorator that converts the decorated class into a controller class.
Replace all methods of class `cls` decorated as endpoints of router `router` with
function calls that will properly inject an instance of class `cls`.
"""
if getattr(cls, "__fastapi_controller__", False):
raise InitializedError(cls)
setattr(cls, "__fastapi_controller__", cls.__name__)
setattr(cls, "__version__", version)
setattr(cls, "router", router)
cls = make_cls_accept_cls_annotated_deps(cls)
internal_router = APIRouter()
function_members = inspect.getmembers(cls, inspect.isfunction)
function_set = set(func for _, func in function_members)
routes = [
route
for route in router.routes
if isinstance(route, (Route, WebSocketRoute)) and route.endpoint in function_set
]
for route in routes:
router.routes.remove(route)
_update_controller_route_endpoint_signature(cls, route)
route.path = route.path.removeprefix(router.prefix)
internal_router.routes.append(route)
router.include_router(internal_router)
return cls
def _update_controller_route_endpoint_signature(
cls: type[T], route: Union[Route, WebSocketRoute]
) -> None:
"""
Fix a controller route endpoint signature to ensure FastAPI injects dependencies properly.
"""
old_endpoint = route.endpoint
old_signature = inspect.signature(old_endpoint)
old_params = list(old_signature.parameters.values())
old_1st_param = old_params[0]
new_1st_param = old_1st_param.replace(default=Depends(cls))
new_params = [new_1st_param] + [
param.replace(kind=Parameter.KEYWORD_ONLY) for param in old_params[1:]
]
new_signature = old_signature.replace(parameters=new_params)
setattr(route.endpoint, "__signature__", new_signature) | ) -> type[T]: |
Menu.tsx | import * as React from 'react';
import {
Platform,
StyleProp,
StyleSheet,
Animated,
BackHandler,
Dimensions,
Easing,
I18nManager,
LayoutRectangle,
TouchableWithoutFeedback,
View,
ViewStyle,
ScrollView,
findNodeHandle,
NativeEventSubscription,
} from 'react-native';
import { withTheme } from '../../core/theming';
import type { $Omit } from '../../types';
import Portal from '../Portal/Portal';
import Surface from '../Surface';
import MenuItem from './MenuItem';
import { APPROX_STATUSBAR_HEIGHT } from '../../constants';
import { addEventListener } from '../../utils/addEventListener';
type Props = {
/**
* Whether the Menu is currently visible.
*/
visible: boolean;
/**
* The anchor to open the menu from. In most cases, it will be a button that opens the menu.
*/
anchor: React.ReactNode | { x: number; y: number };
/**
* Extra margin to add at the top of the menu to account for translucent status bar on Android.
* If you are using Expo, we assume translucent status bar and set a height for status bar automatically.
* Pass `0` or a custom value to and customize it.
* This is automatically handled on iOS.
*/
statusBarHeight?: number;
/**
* Callback called when Menu is dismissed. The `visible` prop needs to be updated when this is called.
*/
onDismiss: () => void;
/**
* Accessibility label for the overlay. This is read by the screen reader when the user taps outside the menu.
*/
overlayAccessibilityLabel?: string;
/**
* Content of the `Menu`.
*/
children: React.ReactNode;
/**
* Style of menu's inner content.
*/
contentStyle?: StyleProp<ViewStyle>;
style?: StyleProp<ViewStyle>;
/**
* @optional
*/
theme: ReactNativePaper.Theme;
};
type Layout = $Omit<$Omit<LayoutRectangle, 'x'>, 'y'>;
type State = {
rendered: boolean;
top: number;
left: number;
menuLayout: Layout;
anchorLayout: Layout;
opacityAnimation: Animated.Value;
scaleAnimation: Animated.ValueXY;
};
// Minimum padding between the edge of the screen and the menu
const SCREEN_INDENT = 0;
// From https://material.io/design/motion/speed.html#duration
const ANIMATION_DURATION = 250;
// From the 'Standard easing' section of https://material.io/design/motion/speed.html#easing
const EASING = Easing.bezier(0.4, 0, 0.2, 1);
/**
* Menus display a list of choices on temporary elevated surfaces. Their placement varies based on the element that opens them.
*
* <div class="screenshots">
* <img class="medium" src="screenshots/menu-1.png" />
* <img class="medium" src="screenshots/menu-2.png" />
* </div>
*
* ## Usage
* ```js
* import * as React from 'react';
* import { View } from 'react-native';
* import { Button, Menu, Divider, Provider } from 'react-native-paper';
*
* const MyComponent = () => {
* const [visible, setVisible] = React.useState(false);
*
* const openMenu = () => setVisible(true);
*
* const closeMenu = () => setVisible(false);
*
* return (
* <Provider>
* <View
* style={{
* paddingTop: 50,
* flexDirection: 'row',
* justifyContent: 'center',
* }}>
* <Menu
* visible={visible}
* onDismiss={closeMenu}
* anchor={<Button onPress={openMenu}>Show menu</Button>}>
* <Menu.Item onPress={() => {}} title="Item 1" />
* <Menu.Item onPress={() => {}} title="Item 2" />
* <Divider />
* <Menu.Item onPress={() => {}} title="Item 3" />
* </Menu>
* </View>
* </Provider>
* );
* };
*
* export default MyComponent;
* ```
*/
class Menu extends React.Component<Props, State> {
// @component ./MenuItem.tsx
static Item = MenuItem;
static defaultProps = {
statusBarHeight: APPROX_STATUSBAR_HEIGHT,
overlayAccessibilityLabel: 'Close menu',
};
static getDerivedStateFromProps(nextProps: Props, prevState: State) {
if (nextProps.visible && !prevState.rendered) {
return { rendered: true };
}
return null;
}
state = {
rendered: this.props.visible,
top: 0,
left: 0,
menuLayout: { width: 0, height: 0 },
anchorLayout: { width: 0, height: 0 },
opacityAnimation: new Animated.Value(0),
scaleAnimation: new Animated.ValueXY({ x: 0, y: 0 }),
};
componentDidUpdate(prevProps: Props) {
if (prevProps.visible !== this.props.visible) {
this.updateVisibility();
}
}
componentWillUnmount() {
this.removeListeners();
}
private anchor?: View | null = null;
private menu?: View | null = null;
private backHandlerSubscription: NativeEventSubscription | undefined;
private dimensionsSubscription: NativeEventSubscription | undefined;
private isCoordinate = (anchor: any): anchor is { x: number; y: number } =>
!React.isValidElement(anchor) &&
typeof anchor?.x === 'number' &&
typeof anchor?.y === 'number';
private measureMenuLayout = () =>
new Promise<LayoutRectangle>((resolve) => {
if (this.menu) {
this.menu.measureInWindow((x, y, width, height) => {
resolve({ x, y, width, height });
});
}
});
private measureAnchorLayout = () =>
new Promise<LayoutRectangle>((resolve) => {
const { anchor } = this.props;
if (this.isCoordinate(anchor)) {
resolve({ x: anchor.x, y: anchor.y, width: 0, height: 0 });
return;
}
if (this.anchor) {
this.anchor.measureInWindow((x, y, width, height) => {
resolve({ x, y, width, height });
});
}
});
private updateVisibility = async () => {
// Menu is rendered in Portal, which updates items asynchronously
// We need to do the same here so that the ref is up-to-date
await Promise.resolve();
if (this.props.visible) {
this.show();
} else {
this.hide();
}
};
private isBrowser = () => Platform.OS === 'web' && 'document' in global;
private focusFirstDOMNode = (el: View | null | undefined) => {
if (el && this.isBrowser()) {
// When in the browser, we want to focus the first focusable item on toggle
// For example, when menu is shown, focus the first item in the menu
// And when menu is dismissed, send focus back to the button to resume tabbing
const node: any = findNodeHandle(el);
const focusableNode = node.querySelector(
// This is a rough list of selectors that can be focused
'button, [href], input, select, textarea, [tabindex]:not([tabindex="-1"])'
);
focusableNode?.focus();
}
};
private handleDismiss = () => {
if (this.props.visible) {
this.props.onDismiss();
}
return true;
};
private handleKeypress = (e: KeyboardEvent) => {
if (e.key === 'Escape') {
this.props.onDismiss();
}
};
private attachListeners = () => {
this.backHandlerSubscription = addEventListener(
BackHandler,
'hardwareBackPress',
this.handleDismiss
);
this.dimensionsSubscription = addEventListener(
Dimensions,
'change',
this.handleDismiss
);
this.isBrowser() && document.addEventListener('keyup', this.handleKeypress);
};
private removeListeners = () => {
this.backHandlerSubscription?.remove();
this.dimensionsSubscription?.remove();
this.isBrowser() &&
document.removeEventListener('keyup', this.handleKeypress);
};
private show = async () => {
const windowLayout = Dimensions.get('window');
const [menuLayout, anchorLayout] = await Promise.all([
this.measureMenuLayout(),
this.measureAnchorLayout(),
]);
// When visible is true for first render
// native views can be still not rendered and
// measureMenuLayout/measureAnchorLayout functions
// return wrong values e.g { x:0, y: 0, width: 0, height: 0 }
// so we have to wait until views are ready
// and rerun this function to show menu
if (
!windowLayout.width ||
!windowLayout.height ||
!menuLayout.width ||
!menuLayout.height ||
(!anchorLayout.width && !this.isCoordinate(this.props.anchor)) ||
(!anchorLayout.height && !this.isCoordinate(this.props.anchor))
) {
requestAnimationFrame(this.show);
return;
}
this.setState(
() => ({
left: anchorLayout.x,
top: anchorLayout.y,
anchorLayout: {
height: anchorLayout.height,
width: anchorLayout.width,
},
menuLayout: {
width: menuLayout.width,
height: menuLayout.height,
},
}),
() => {
this.attachListeners();
const { animation } = this.props.theme;
Animated.parallel([
Animated.timing(this.state.scaleAnimation, {
toValue: { x: menuLayout.width, y: menuLayout.height },
duration: ANIMATION_DURATION * animation.scale,
easing: EASING,
useNativeDriver: true,
}),
Animated.timing(this.state.opacityAnimation, {
toValue: 1,
duration: ANIMATION_DURATION * animation.scale,
easing: EASING,
useNativeDriver: true,
}),
]).start(({ finished }) => {
if (finished) {
this.focusFirstDOMNode(this.menu);
}
});
}
);
};
private hide = () => {
this.removeListeners();
const { animation } = this.props.theme;
Animated.timing(this.state.opacityAnimation, {
toValue: 0,
duration: ANIMATION_DURATION * animation.scale,
easing: EASING,
useNativeDriver: true,
}).start(({ finished }) => {
if (finished) {
this.setState({ menuLayout: { width: 0, height: 0 }, rendered: false });
this.state.scaleAnimation.setValue({ x: 0, y: 0 });
this.focusFirstDOMNode(this.anchor);
}
});
};
render() {
const {
visible,
anchor,
contentStyle,
style,
children,
theme,
statusBarHeight,
onDismiss,
overlayAccessibilityLabel,
} = this.props;
const {
rendered,
menuLayout,
anchorLayout,
opacityAnimation,
scaleAnimation,
} = this.state;
let { left, top } = this.state;
// I don't know why but on Android measure function is wrong by 24
const additionalVerticalValue = Platform.select({
android: statusBarHeight,
default: 0,
});
const scaleTransforms = [
{
scaleX: scaleAnimation.x.interpolate({
inputRange: [0, menuLayout.width],
outputRange: [0, 1],
}),
},
{
scaleY: scaleAnimation.y.interpolate({
inputRange: [0, menuLayout.height],
outputRange: [0, 1],
}),
},
];
const windowLayout = Dimensions.get('window');
// We need to translate menu while animating scale to imitate transform origin for scale animation
const positionTransforms = [];
// Check if menu fits horizontally and if not align it to right.
if (left <= windowLayout.width - menuLayout.width - SCREEN_INDENT) {
positionTransforms.push({
translateX: scaleAnimation.x.interpolate({
inputRange: [0, menuLayout.width],
outputRange: [-(menuLayout.width / 2), 0],
}),
});
| left = SCREEN_INDENT;
}
} else {
positionTransforms.push({
translateX: scaleAnimation.x.interpolate({
inputRange: [0, menuLayout.width],
outputRange: [menuLayout.width / 2, 0],
}),
});
left += anchorLayout.width - menuLayout.width;
const right = left + menuLayout.width;
// Check if menu position has enough space from right side
if (right > windowLayout.width - SCREEN_INDENT) {
left = windowLayout.width - SCREEN_INDENT - menuLayout.width;
}
}
// If the menu is larger than available vertical space,
// calculate the height of scrollable view
let scrollableMenuHeight = 0;
// Check if the menu should be scrollable
if (
// Check if the menu overflows from bottom side
top >=
windowLayout.height -
menuLayout.height -
SCREEN_INDENT -
additionalVerticalValue &&
// And bottom side of the screen has more space than top side
top <= windowLayout.height - top
) {
// Scrollable menu should be below the anchor (expands downwards)
scrollableMenuHeight =
windowLayout.height - top - SCREEN_INDENT - additionalVerticalValue;
} else if (
// Check if the menu overflows from bottom side
top >=
windowLayout.height -
menuLayout.height -
SCREEN_INDENT -
additionalVerticalValue &&
// And top side of the screen has more space than bottom side
top >= windowLayout.height - top &&
// And menu overflows from top side
top <=
menuLayout.height -
anchorLayout.height +
SCREEN_INDENT -
additionalVerticalValue
) {
// Scrollable menu should be above the anchor (expands upwards)
scrollableMenuHeight =
top + anchorLayout.height - SCREEN_INDENT + additionalVerticalValue;
}
// Scrollable menu max height
scrollableMenuHeight =
scrollableMenuHeight > windowLayout.height - 2 * SCREEN_INDENT
? windowLayout.height - 2 * SCREEN_INDENT
: scrollableMenuHeight;
// Menu is typically positioned below the element that generates it
// So first check if it fits below the anchor (expands downwards)
if (
// Check if menu fits vertically
top <=
windowLayout.height -
menuLayout.height -
SCREEN_INDENT -
additionalVerticalValue ||
// Or if the menu overflows from bottom side
(top >=
windowLayout.height -
menuLayout.height -
SCREEN_INDENT -
additionalVerticalValue &&
// And bottom side of the screen has more space than top side
top <= windowLayout.height - top)
) {
positionTransforms.push({
translateY: scaleAnimation.y.interpolate({
inputRange: [0, menuLayout.height],
outputRange: [-((scrollableMenuHeight || menuLayout.height) / 2), 0],
}),
});
// Check if menu position has enough space from top side
if (top < SCREEN_INDENT) {
top = SCREEN_INDENT;
}
} else {
positionTransforms.push({
translateY: scaleAnimation.y.interpolate({
inputRange: [0, menuLayout.height],
outputRange: [(scrollableMenuHeight || menuLayout.height) / 2, 0],
}),
});
top += anchorLayout.height - (scrollableMenuHeight || menuLayout.height);
const bottom =
top +
(scrollableMenuHeight || menuLayout.height) +
additionalVerticalValue;
// Check if menu position has enough space from bottom side
if (bottom > windowLayout.height - SCREEN_INDENT) {
top =
scrollableMenuHeight === windowLayout.height - 2 * SCREEN_INDENT
? -SCREEN_INDENT * 2
: windowLayout.height -
menuLayout.height -
SCREEN_INDENT -
additionalVerticalValue;
}
}
const shadowMenuContainerStyle = {
opacity: opacityAnimation,
transform: scaleTransforms,
borderRadius: theme.roundness,
...(scrollableMenuHeight ? { height: scrollableMenuHeight } : {}),
};
const positionStyle = {
top: this.isCoordinate(anchor) ? top : top + additionalVerticalValue,
...(I18nManager.isRTL ? { right: left } : { left }),
};
return (
<View
ref={(ref) => {
this.anchor = ref;
}}
collapsable={false}
>
{this.isCoordinate(anchor) ? null : anchor}
{rendered ? (
<Portal>
<TouchableWithoutFeedback
accessibilityLabel={overlayAccessibilityLabel}
accessibilityRole="button"
onPress={onDismiss}
>
<View
style={[
StyleSheet.absoluteFill,
{ backgroundColor: theme.colors.backdrop },
]}
/>
</TouchableWithoutFeedback>
<View
ref={(ref) => {
this.menu = ref;
}}
collapsable={false}
accessibilityViewIsModal={visible}
style={[styles.wrapper, positionStyle, style]}
pointerEvents={visible ? 'box-none' : 'none'}
onAccessibilityEscape={onDismiss}
>
<Animated.View style={{ transform: positionTransforms }}>
<Surface
style={
[
styles.shadowMenuContainer,
shadowMenuContainerStyle,
contentStyle,
] as StyleProp<ViewStyle>
}
>
{(scrollableMenuHeight && (
<ScrollView>{children}</ScrollView>
)) || <React.Fragment>{children}</React.Fragment>}
</Surface>
</Animated.View>
</View>
</Portal>
) : null}
</View>
);
}
}
const styles = StyleSheet.create({
wrapper: {
position: 'absolute',
},
shadowMenuContainer: {
opacity: 0,
paddingVertical: 8,
elevation: 8,
},
});
export default withTheme(Menu); | // Check if menu position has enough space from left side
if (left < SCREEN_INDENT) { |
PyAlgoFusion.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import talib
from pyalgotrade import strategy, plotter
from pyalgotrade.broker.backtesting import TradePercentage, Broker
from pyalgotrade.broker import Order
from pyalgotrade.barfeed import yahoofeed
from pyalgotrade.broker.slippage import NoSlippage, VolumeShareSlippage
from pyalgotrade.stratanalyzer import returns, trades
from pyalgotrade.talibext import indicator
from pyalgotrade.optimizer import server, local
import itertools
from sklearn import preprocessing, svm, cross_validation, metrics, pipeline, grid_search
from scipy.stats import sem
from WeekDataPrepare import readWSDFile, readWSDIndexFile, prepareData, optimizeSVM
def readAndReWriteCSV(baseDir, instrument, startYear, yearNum=1):
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir + instrument + '/wsd_' + instrument + '_' + str(startYear + i) + '.csv',
index_col=0, sep='\t', usecols=[0, 2, 3, 4, 5, 6, 14], header=None,
skiprows=1, names=['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close'],
parse_dates=True, date_parser=dateparse)
if i == 0:
df = tempDF
else:
df = df.append(tempDF)
pathName = None
resultDF = None
if yearNum==1:
pathName = baseDir+str(instrument)+'_'+str(startYear)+'.csv'
resultDF = df[str(startYear)]
else:
pathName = baseDir+str(instrument)+'_'+str(startYear)+'_'+str(startYear+yearNum-1)+'.csv'
resultDF = df[str(startYear):str(startYear+yearNum-1)]
resultDF.to_csv(pathName)
return pathName, resultDF
'''
计算收益率
'''
def returnRatio(V, C=100000.0):
return V/C-1.0
'''
计算收益率(多期)
'''
def returnRatioArr1(VArr, C=100000.0):
arr = []
for v in VArr: arr.append(v/C-1.0)
return arr
def returnRatioArr(VArr, C=100000.0):
arr = []
for v in VArr:
arr.append(v / C - 1.0)
C = v
return arr
'''
计算年化收益率(多期)
'''
def annualizedReturnRatio(returnRatioArr, T=250.0, D=250.0):
import math
tmp = 1
for r in returnRatioArr: tmp *= (r+1)
return math.pow(tmp, D/T)-1
'''
计算年化收益率(单期)
'''
def annualizedReturnRatioSingle(portfolio, C=100000.0, T=250.0, D=250.0):
import math
return math.pow(portfolio/C, D/T) - 1
'''分类结果投票
'''
def vote_clf(svm, rf, sgd):
up = 0; down = 0
for i in (svm, rf, sgd):
if i==1: up += 1
else: down += 1
if up>down: return 1
else: return -1
baseDir = '/Users/eugene/Downloads/Data/'
# baseDir = '/Users/eugene/Downloads/marketQuotationData/'
# 沪深300 上证50 中证500
instruments = ['000300.SH', '000016.SH', '000905.SH']
instrument = instruments[2]
initCapital = 100000000.0 # 一亿
startYear = 2015; yearNum = 1
# startYear = 2014; yearNum = 2
df = readWSDFile(baseDir, instrument, startYear, yearNum)
print 'Day count:', len(df)
# print df.head(5)
dfi = readWSDIndexFile(baseDir, instrument, startYear, yearNum)
X, y, actionDates = prepareData(df, dfi)
print np.shape(X)
normalizer = preprocessing.Normalizer().fit(X) # fit does nothing
X_norm = normalizer.transform(X)
# gamma, C, score = optimizeSVM(X_norm, y, kFolds=10); print 'gamma=',gamma, 'C=',C, 'score=',score
# clf = svm.SVC(kernel='rbf', gamma=32, C=32768)
# clf = svm.SVC(kernel='rbf', gamma=32, C=128)
# clf = svm.SVC(kernel='rbf', gamma=128, C=2)
# clf = svm.SVC(kernel='rbf', gamma=512, C=0.5)
# clf = svm.SVC(kernel='rbf', gamma=2, C=128)
clf = svm.SVC(kernel='rbf', gamma=0.125, C=0.125)
from EnsembleTest import optimizeEnsemble
from AdaboostSGDTest import optimizeAdaBoostSGD
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, ExtraTreesClassifier, BaggingClassifier
from sklearn.linear_model import SGDClassifier
# clf_rf = RandomForestClassifier(max_depth=None, min_samples_split=1, max_features=7)
# n_estimators, score = optimizeEnsemble(X_norm, y, clf=clf_rf, kFolds=10); print 'n_estimators=',n_estimators, 'score=',score
# clf_rf = RandomForestClassifier(max_depth=None, min_samples_split=1, max_features=7, n_estimators=n_estimators)
# alpha, n_estimators_, score = optimizeAdaBoostSGD(X_norm, y, kFolds=10); print 'alpha',alpha, 'n_estimators=',n_estimators_, 'score=',score
# clf_sgd = AdaBoostClassifier(base_estimator=SGDClassifier(loss='log', n_iter=5, alpha=alpha), n_estimators=n_estimators_)
clf_rf = RandomForestClassifier(n_estimators=200)
clf_sgd = AdaBoostClassifier(base_estimator=SGDClassifier(loss='log', alpha=0.1), n_estimators=200)
pathName, df = readAndReWriteCSV(baseDir, instrument, startYear=startYear, yearNum=yearNum)
print pathName
# print df.sample(3)
feed = yahoofeed.Feed()
feed.addBarsFromCSV(instrument, pathName)
class SVMStrategy(strategy.BacktestingStrategy):
def __init__(self, feed, win=10):
super(SVMStrategy, self).__init__(feed)
self.__instrument = instrument
self.__position = None
self.getBroker().setCash(initCapital)
self.getBroker().setCommission(TradePercentage(0.003))
self.getBroker().setAllowNegativeCash(True)
self.getBroker().getFillStrategy().setVolumeLimit(1)
self.getBroker().getFillStrategy().setSlippageModel(VolumeShareSlippage(priceImpact=0.0))
self.__closeDataSeries = feed[instrument].getCloseDataSeries()
self.df = df
self.closeArr = []
self.portfolios = []
self.buys = []
self.sells = []
self.clf = clf
self.clf_rf = clf_rf
self.clf_sgd = clf_sgd
self.X_norm = X_norm
self.y = y
self.actionDates = actionDates
self.win = win
# print 'week count:', len(y)
self.weekCount = 1
self.dayCount = 0
self.errorCount = 0
self.rightCount = 0
def getDF(self):
return self.df
def getBuys(self):
return self.buys
def getSells(self):
return self.sells
def getCorrectness(self):
return self.rightCount*1.0/(self.errorCount+self.rightCount)
def onEnterOk(self, position):
# execInfo = position.getEntryOrder().getExecutionInfo()
# self.info("%s BUY %.0f shares at %.3f, commission=%.3f, PnL=%.3f" %
# (execInfo.getDateTime().date(), execInfo.getQuantity(), execInfo.getPrice(), execInfo.getCommission(), position.getPnL()))
pass
def onEnterCanceled(self, position):
self.__position = None
def onExitOk(self, position):
# execInfo = position.getExitOrder().getExecutionInfo( | # self.info("%s SELL %.0f shares at %.3f, commission=%.3f, PnL=%.3f" %
# (execInfo.getDateTime().date(), execInfo.getQuantity(), execInfo.getPrice(), execInfo.getCommission(), position.getPnL()))
self.__position = None
def onExitCanceled(self, position):
# If the exit was canceled, re-submit it.
self.__position.exitMarket()
def onStart(self):
pass
def onFinish(self, bars):
self.df['closeArr'] = self.closeArr
self.df['portfolio'] = self.portfolios
# print 'dayCount=',self.dayCount, 'weekCount=',self.weekCount-1
# print 'errorCount=',self.errorCount, 'rightCount=',self.rightCount
pass
def onOrderUpdated(self, order):
execInfo = order.getExecutionInfo()
fillDate = None
if execInfo!=None:
fillDate = execInfo.getDateTime().date()
if order.getAction()==1: self.buys.append(fillDate)
else: self.sells.append(fillDate)
# print 'id=',order.getId(), 'state=',Order.State.toString(order.getState()), 'type=',order.getType(), \
# 'submitAt=',order.getSubmitDateTime().date(), 'fillAt=',fillDate, \
# 'action=',order.getAction(), 'state=',order.getState(), 'active=',order.isActive(), \
# 'quantity=',order.getQuantity(), 'Positions=',self.getBroker().getPositions(), \
# 'cash=', self.getBroker().getCash()
def onBars(self, bars):
self.closeArr.append(bars[self.__instrument].getPrice())
self.portfolios.append(self.getBroker().getEquity())
self.dayCount += 1
curDate = bars[self.__instrument].getDateTime().date()
if curDate!=self.actionDates[self.weekCount-1]: # 非每周最后一天
return
else: # 每周最后一天
if self.weekCount < self.win+1:
self.weekCount += 1
return
else:
X_train = self.X_norm[self.weekCount-self.win-1:self.weekCount-1]
y_train = self.y[self.weekCount-self.win-1:self.weekCount-1]
X_test = self.X_norm[self.weekCount-1]
y_test = self.y[self.weekCount-1]
self.clf.fit(X_train, y_train)
svm = self.clf.predict([X_test])[0] # 为-1表示跌,为1表示涨
self.clf_rf.fit(X_train, y_train)
rf = self.clf_rf.predict([X_test])[0]
self.clf_sgd.fit(X_train, y_train)
sgd = self.clf_sgd.predict([X_test])[0]
result = vote_clf(svm, rf, sgd)
if result!=y_test: self.errorCount += 1 # 分类错误
else: self.rightCount += 1 # 分类正确
# If a position was not opened, check if we should enter a long position.
if self.__position is None:
if result==1:
shares = int(self.getBroker().getCash() / bars[self.__instrument].getPrice())
hands = shares/100
# Enter a buy market order. The order is good till canceled.
self.__position = self.enterLong(self.__instrument, hands*100, False)
# Check if we have to exit the position.
elif not self.__position.exitActive() and result==-1:
self.__position.exitMarket()
self.weekCount += 1
pass
def parameters_generator():
win = range(6, 23)
return itertools.product(win)
def testWithBestParameters(win=10):
# 用最佳参数回测
myStrategy = SVMStrategy(feed, win=win)
returnsAnalyzer = returns.Returns()
myStrategy.attachAnalyzer(returnsAnalyzer)
tradesAnalyzer = trades.Trades()
myStrategy.attachAnalyzer(tradesAnalyzer)
myStrategy.run()
df = myStrategy.getDF()
# print df[['Close', 'closeArr', 'fastSMA', 'slowSMA']].sample(5)
buys = myStrategy.getBuys()
sells = myStrategy.getSells()
# print 'TRADE INFO: ', 'count=',tradesAnalyzer.getCount(), 'allProfits=',tradesAnalyzer.getAll(), 'allReturns=',tradesAnalyzer.getAllReturns()
print "Accuracy: %.3f" % myStrategy.getCorrectness()
print "总净值: %.3f" % myStrategy.getResult()
print "总收益率: %.3f" % returnRatio(myStrategy.getResult(), C=initCapital)
print "年化收益率: %.3f" % annualizedReturnRatioSingle(myStrategy.getResult(), C=initCapital, T=250.0*yearNum, D=250.0)
# fig = plt.figure(figsize=(20,10))
# ax1 = fig.add_subplot(211)
# df[['closeArr']].plot(ax=ax1, lw=2.)
# ax1.plot(buys, df.closeArr.ix[buys], '^', markersize=10, color='m')
# ax1.plot(sells, df.closeArr.ix[sells], 'v', markersize=10, color='k')
# ax2 = fig.add_subplot(212)
# portfolio_ratio = df['portfolio']/initCapital
# portfolio_ratio.plot(ax=ax2, lw=2.)
# ax2.plot(buys, portfolio_ratio.ix[buys], '^', markersize=10, color='m')
# ax2.plot(sells, portfolio_ratio.ix[sells], 'v', markersize=10, color='k')
# # ax3 = fig.add_subplot(313)
# # df['portfolio'].plot(ax=ax3, lw=2.)
# # ax3.plot(buys, df['portfolio'].ix[buys], '^', markersize=10, color='m')
# # ax3.plot(sells, df['portfolio'].ix[sells], 'v', markersize=10, color='k')
# fig.tight_layout()
# plt.show()
def test(isOptimize=True, win=9):
if isOptimize:
# 寻找最佳参数
results = local.run(SVMStrategy, feed, parameters_generator())
print 'Parameters:', results.getParameters(), 'Result:', results.getResult()
else:
# 用最佳参数回测
testWithBestParameters(win=win)
test(isOptimize=False, win=8) | )
|
version.rs | /* | *
* Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers.
*
* The version of the OpenAPI document: v0.0.1-alpha.9
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Version {
/// Version is the service's version.
#[serde(rename = "version", skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
impl Version {
pub fn new() -> Version {
Version {
version: None,
}
}
} | * Ory APIs |
check.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a modified copy of x/tools/go/analysis/analysistest/analysistest.go
package testutil
import (
"bytes"
"fmt"
"go/format"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"testing"
"text/scanner"
"honnef.co/go/tools/internal/diff/myers"
"honnef.co/go/tools/lintcmd/runner"
"golang.org/x/tools/txtar"
)
type expectation struct {
kind string // either "fact" or "diagnostic"
name string // name of object to which fact belongs, or "package" ("fact" only)
rx *regexp.Regexp
}
func (ex expectation) String() string {
return fmt.Sprintf("%s %s:%q", ex.kind, ex.name, ex.rx) // for debugging
}
// sanitize removes the GOPATH portion of the filename,
// typically a gnarly /tmp directory, and returns the rest.
func sanitize(gopath, filename string) string {
prefix := gopath + string(os.PathSeparator) + "src" + string(os.PathSeparator)
return filepath.ToSlash(strings.TrimPrefix(filename, prefix))
}
// parseExpectations parses the content of a "// want ..." comment
// and returns the expectations, a mixture of diagnostics ("rx") and
// facts (name:"rx").
func parseExpectations(text string) (lineDelta int, expects []expectation, err error) {
var scanErr string
sc := new(scanner.Scanner).Init(strings.NewReader(text))
sc.Error = func(s *scanner.Scanner, msg string) {
scanErr = msg // e.g. bad string escape
}
sc.Mode = scanner.ScanIdents | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanInts
scanRegexp := func(tok rune) (*regexp.Regexp, error) {
if tok != scanner.String && tok != scanner.RawString {
return nil, fmt.Errorf("got %s, want regular expression",
scanner.TokenString(tok))
}
pattern, _ := strconv.Unquote(sc.TokenText()) // can't fail
return regexp.Compile(pattern)
}
for {
tok := sc.Scan()
switch tok {
case '+':
tok = sc.Scan()
if tok != scanner.Int {
return 0, nil, fmt.Errorf("got +%s, want +Int", scanner.TokenString(tok))
}
lineDelta, _ = strconv.Atoi(sc.TokenText())
case scanner.String, scanner.RawString:
rx, err := scanRegexp(tok)
if err != nil {
return 0, nil, err
}
expects = append(expects, expectation{"diagnostic", "", rx})
case scanner.Ident:
name := sc.TokenText()
tok = sc.Scan()
if tok != ':' {
return 0, nil, fmt.Errorf("got %s after %s, want ':'",
scanner.TokenString(tok), name)
}
tok = sc.Scan()
rx, err := scanRegexp(tok)
if err != nil {
return 0, nil, err
}
expects = append(expects, expectation{"fact", name, rx})
case scanner.EOF:
if scanErr != "" {
return 0, nil, fmt.Errorf("%s", scanErr)
}
return lineDelta, expects, nil
default:
return 0, nil, fmt.Errorf("unexpected %s", scanner.TokenString(tok))
}
}
}
func CheckSuggestedFixes(t *testing.T, diagnostics []runner.Diagnostic) {
// Process each result (package) separately, matching up the suggested
// fixes into a diff, which we will compare to the .golden file. We have
// to do this per-result in case a file appears in two packages, such as in
// packages with tests, where mypkg/a.go will appear in both mypkg and
// mypkg.test. In that case, the analyzer may suggest the same set of
// changes to a.go for each package. If we merge all the results, those
// changes get doubly applied, which will cause conflicts or mismatches.
// Validating the results separately means as long as the two analyses
// don't produce conflicting suggestions for a single file, everything
// should match up.
// file -> message -> edits
fileEdits := make(map[string]map[string][]runner.TextEdit)
fileContents := make(map[string][]byte)
// Validate edits, prepare the fileEdits map and read the file contents.
for _, diag := range diagnostics {
for _, sf := range diag.SuggestedFixes {
for _, edit := range sf.TextEdits {
// Validate the edit.
if edit.Position.Offset > edit.End.Offset {
t.Errorf(
"diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)",
diag.Category, edit.Position.Offset, edit.End.Offset)
continue
}
if edit.Position.Filename != edit.End.Filename {
t.Errorf(
"diagnostic for analysis %v contains Suggested Fix with malformed edit spanning files %v and %v",
diag.Category, edit.Position.Filename, edit.End.Filename)
continue
}
if _, ok := fileContents[edit.Position.Filename]; !ok {
contents, err := ioutil.ReadFile(edit.Position.Filename)
if err != nil {
t.Errorf("error reading %s: %v", edit.Position.Filename, err)
}
fileContents[edit.Position.Filename] = contents
}
if _, ok := fileEdits[edit.Position.Filename]; !ok {
fileEdits[edit.Position.Filename] = make(map[string][]runner.TextEdit)
}
fileEdits[edit.Position.Filename][sf.Message] = append(fileEdits[edit.Position.Filename][sf.Message], edit)
}
}
}
for file, fixes := range fileEdits {
// Get the original file contents.
orig, ok := fileContents[file]
if !ok |
// Get the golden file and read the contents.
ar, err := txtar.ParseFile(file + ".golden")
if err != nil {
t.Errorf("error reading %s.golden: %v", file, err)
continue
}
if len(ar.Files) > 0 {
// one virtual file per kind of suggested fix
if len(ar.Comment) != 0 {
// we allow either just the comment, or just virtual
// files, not both. it is not clear how "both" should
// behave.
t.Errorf("%s.golden has leading comment; we don't know what to do with it", file)
continue
}
var sfs []string
for sf := range fixes {
sfs = append(sfs, sf)
}
sort.Slice(sfs, func(i, j int) bool {
return sfs[i] < sfs[j]
})
for _, sf := range sfs {
edits := fixes[sf]
found := false
for _, vf := range ar.Files {
if vf.Name == sf {
found = true
out := applyEdits(orig, edits)
// the file may contain multiple trailing
// newlines if the user places empty lines
// between files in the archive. normalize
// this to a single newline.
want := string(bytes.TrimRight(vf.Data, "\n")) + "\n"
formatted, err := format.Source([]byte(out))
if err != nil {
t.Errorf("%s: error formatting edited source: %v\n%s", file, err, out)
continue
}
if want != string(formatted) {
d := myers.ComputeEdits(want, string(formatted))
diff := ""
for _, op := range d {
diff += op.String()
}
t.Errorf("suggested fixes failed for %s[%s]:\n%s", file, sf, diff)
}
break
}
}
if !found {
t.Errorf("no section for suggested fix %q in %s.golden", sf, file)
}
}
} else {
// all suggested fixes are represented by a single file
var catchallEdits []runner.TextEdit
for _, edits := range fixes {
catchallEdits = append(catchallEdits, edits...)
}
out := applyEdits(orig, catchallEdits)
want := string(ar.Comment)
formatted, err := format.Source([]byte(out))
if err != nil {
t.Errorf("%s: error formatting resulting source: %v\n%s", file, err, out)
continue
}
if want != string(formatted) {
d := myers.ComputeEdits(want, string(formatted))
diff := ""
for _, op := range d {
diff += op.String()
}
t.Errorf("suggested fixes failed for %s:\n%s", file, diff)
}
}
}
}
func Check(t *testing.T, gopath string, diagnostics []runner.Diagnostic, wants []runner.Want, facts []runner.TestFact) {
type key struct {
file string
line int
}
want := make(map[key][]expectation)
// processComment parses expectations out of comments.
processComment := func(filename string, linenum int, text string) {
text = strings.TrimSpace(text)
// Any comment starting with "want" is treated
// as an expectation, even without following whitespace.
if rest := strings.TrimPrefix(text, "want"); rest != text {
lineDelta, expects, err := parseExpectations(rest)
if err != nil {
t.Errorf("%s:%d: in 'want' comment: %s", filename, linenum, err)
return
}
if expects != nil {
want[key{filename, linenum + lineDelta}] = expects
}
}
}
for _, want := range wants {
filename := sanitize(gopath, want.Position.Filename)
processComment(filename, want.Position.Line, want.Comment)
}
checkMessage := func(posn token.Position, kind, name, message string) {
posn.Filename = sanitize(gopath, posn.Filename)
k := key{posn.Filename, posn.Line}
expects := want[k]
var unmatched []string
for i, exp := range expects {
if exp.kind == kind && exp.name == name {
if exp.rx.MatchString(message) {
// matched: remove the expectation.
expects[i] = expects[len(expects)-1]
expects = expects[:len(expects)-1]
want[k] = expects
return
}
unmatched = append(unmatched, fmt.Sprintf("%q", exp.rx))
}
}
if unmatched == nil {
t.Errorf("%v: unexpected %s: %v", posn, kind, message)
} else {
t.Errorf("%v: %s %q does not match pattern %s",
posn, kind, message, strings.Join(unmatched, " or "))
}
}
// Check the diagnostics match expectations.
for _, f := range diagnostics {
// TODO(matloob): Support ranges in analysistest.
posn := f.Position
checkMessage(posn, "diagnostic", "", f.Message)
}
// Check the facts match expectations.
for _, fact := range facts {
name := fact.ObjectName
posn := fact.Position
if name == "" {
name = "package"
posn.Line = 1
}
checkMessage(posn, "fact", name, fact.FactString)
}
// Reject surplus expectations.
//
// Sometimes an Analyzer reports two similar diagnostics on a
// line with only one expectation. The reader may be confused by
// the error message.
// TODO(adonovan): print a better error:
// "got 2 diagnostics here; each one needs its own expectation".
var surplus []string
for key, expects := range want {
for _, exp := range expects {
err := fmt.Sprintf("%s:%d: no %s was reported matching %q", key.file, key.line, exp.kind, exp.rx)
surplus = append(surplus, err)
}
}
sort.Strings(surplus)
for _, err := range surplus {
t.Errorf("%s", err)
}
}
func applyEdits(src []byte, edits []runner.TextEdit) []byte {
// This function isn't efficient, but it doesn't have to be.
edits = append([]runner.TextEdit(nil), edits...)
sort.Slice(edits, func(i, j int) bool {
if edits[i].Position.Offset < edits[j].Position.Offset {
return true
}
if edits[i].Position.Offset == edits[j].Position.Offset {
return edits[i].End.Offset < edits[j].End.Offset
}
return false
})
out := append([]byte(nil), src...)
offset := 0
for _, edit := range edits {
start := edit.Position.Offset + offset
end := edit.End.Offset + offset
if edit.End == (token.Position{}) {
end = -1
}
if len(edit.NewText) == 0 {
// pure deletion
copy(out[start:], out[end:])
out = out[:len(out)-(end-start)]
offset -= end - start
} else if end == -1 || end == start {
// pure insertion
tmp := make([]byte, len(out)+len(edit.NewText))
copy(tmp, out[:start])
copy(tmp[start:], edit.NewText)
copy(tmp[start+len(edit.NewText):], out[start:])
offset += len(edit.NewText)
out = tmp
} else if end-start == len(edit.NewText) {
// exact replacement
copy(out[start:], edit.NewText)
} else if end-start < len(edit.NewText) {
// replace with longer string
growth := len(edit.NewText) - (end - start)
tmp := make([]byte, len(out)+growth)
copy(tmp, out[:start])
copy(tmp[start:], edit.NewText)
copy(tmp[start+len(edit.NewText):], out[end:])
offset += growth
out = tmp
} else if end-start > len(edit.NewText) {
// replace with shorter string
shrinkage := (end - start) - len(edit.NewText)
copy(out[start:], edit.NewText)
copy(out[start+len(edit.NewText):], out[end:])
out = out[:len(out)-shrinkage]
offset -= shrinkage
}
}
// Debug code
if false {
fmt.Println("input:")
fmt.Println(string(src))
fmt.Println()
fmt.Println("edits:")
for _, edit := range edits {
fmt.Printf("%d:%d - %d:%d <- %q\n", edit.Position.Line, edit.Position.Column, edit.End.Line, edit.End.Column, edit.NewText)
}
fmt.Println("output:")
fmt.Println(string(out))
panic("")
}
return out
}
| {
t.Errorf("could not find file contents for %s", file)
continue
} |
args.go | package core |
import "github.com/tobscher/kiss/configuration"
// Args describes the args for the remote call.
type Args struct {
Environment []string
Args []string
Options configuration.OptionCollection
} | |
buffer.go | package log
import (
"io"
"os"
"sync"
"time"
)
// BufferWriter is an io.WriteCloser that writes with fixed size buffer.
type BufferWriter struct {
// BufferSize is the size in bytes of the buffer before it gets flushed.
BufferSize int
// FlushDuration is the period of the writer flush duration
FlushDuration time.Duration
// Writer specifies the writer of output.
Writer io.Writer
once sync.Once
mu sync.Mutex
buf []byte
}
// Flush flushes all pending log I/O.
func (w *BufferWriter) Flush() (err error) {
w.mu.Lock()
if len(w.buf) != 0 {
_, err = w.Writer.Write(w.buf)
w.buf = w.buf[:0]
}
w.mu.Unlock()
return
}
// Close implements io.Closer, and closes the underlying Writer.
func (w *BufferWriter) Close() (err error) {
w.mu.Lock()
_, err = w.Writer.Write(w.buf)
w.buf = w.buf[:0]
if closer, ok := w.Writer.(io.Closer); ok {
err = closer.Close()
}
w.mu.Unlock()
return
}
// Write implements io.Writer. If a write would cause the log buffer to be larger
// than Size, the buffer is written to the underlying Writer and cleared.
func (w *BufferWriter) Write(p []byte) (n int, err error) {
w.once.Do(func() {
if w.BufferSize == 0 {
return
}
if page := os.Getpagesize(); w.BufferSize%page != 0 {
w.BufferSize = (w.BufferSize + page) / page * page
}
if w.buf == nil {
w.buf = make([]byte, 0, w.BufferSize)
}
if w.FlushDuration > 0 {
if w.FlushDuration < 100*time.Millisecond {
w.FlushDuration = 100 * time.Millisecond
}
go func(w *BufferWriter) {
for {
time.Sleep(w.FlushDuration)
w.Flush()
}
}(w)
}
})
w.mu.Lock()
if w.BufferSize > 0 {
w.buf = append(w.buf, p...)
n = len(p)
if len(w.buf) > w.BufferSize {
_, err = w.Writer.Write(w.buf)
w.buf = w.buf[:0]
}
} else {
n, err = w.Writer.Write(p)
}
w.mu.Unlock()
return
}
// The Flusher interface is implemented by BufferWriters that allow
// an Logger to flush buffered data to the output.
type Flusher interface {
// Flush sends any buffered data to the output.
Flush() error
}
// Flush writes any buffered data to the underlying io.Writer.
func | (writer io.Writer) (err error) {
if flusher, ok := writer.(Flusher); ok {
err = flusher.Flush()
}
return
}
| Flush |
accounts.py | from rest_framework.exceptions import APIException
from revibe._errors import network
from revibe._helpers import status
# -----------------------------------------------------------------------------
class AccountError(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = "The request could not be completed, please try again"
default_code = 'conflict'
class AccountNotFound(network.UnauthorizedError):
default_detail = "Could not identify the current user, please try again"
class | (network.ForbiddenError):
default_detail = "Could not identify the current artist"
class ProfileNotFoundError(network.ExpectationFailedError):
default_detail = "The user's profile information could not be found"
| NotArtistError |
pod_array_status.py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_9 import models
class PodArrayStatus(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'frozen_at': 'int',
'mediator_status': 'str',
'pre_elected': 'bool',
'progress': 'float',
'status': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'frozen_at': 'frozen_at',
'mediator_status': 'mediator_status',
'pre_elected': 'pre_elected',
'progress': 'progress',
'status': 'status'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
frozen_at=None, # type: int
mediator_status=None, # type: str
pre_elected=None, # type: bool
progress=None, # type: float
status=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified.
name (str): The resource name, such as volume name, pod name, snapshot name, and so on.
frozen_at (int): The timestamp of when the data on the pod was frozen when the array went offline. Measured in milliseconds since the UNIX epoch. Also known as the recovery point. If the pod is in sync, a value of `null` will be returned.
mediator_status (str): The status of the mediator, which determines whether it is available to mediate a high availability event. Valid values are `flummoxed`, `online`, `unknown`, and `unreachable`. Only mediators in the `online` status can mediate high availability events. If set to `flummoxed`, the array can reach a mediator, but it is talking to the wrong one. Verify that the DNS in the environment is properly configured. This status might also appear if the pod has been offline on one array for an extended period of time and the peer array is unreachable. If set to `online`, the array is successfully communicating with the mediator, and the mediator is available to mediate a high availability event. If set to `unreachable`, the array cannot reach the mediator, either due to network issues or because the mediator is down. When a mediator is unreachable, synchronous replication continues to function provided all arrays are healthy and communicating, but a high availability event without mediator access can result in an outage.
pre_elected (bool): If set to `true`, the array has been pre-elected to remain online in the rare event that the mediator is inaccessible on both arrays within the stretched pod, and then later, the arrays within the stretched pod become disconnected from each other. If set to `false`, either the array has been pre-elected to remain offline while its peer array remains online, or pre-election is not in effect. One and only one array within each pod is pre-elected at a given point in time, so while a pre-elected array is keeping the pod online, the pod on its non-elected peer array remains offline during the communication failure. Users cannot pre-elect arrays.
progress (float): The percentage progress of the pod resyncing process for this array. The percentage is displayed as a decimal value, starting at 0.00 and ending at 1.00.
status (str): The status of the array within the stretched pod. Valid values are `offline`, `online`, `resyncing`, and `unknown`. If set to `offline`, the array is experiencing problems and may not have the latest pod data. The array cannot handle I/O to the pod and cannot take over during a high availability event. If set to `online`, the array is online and has the latest pod data. The array can handle I/O to the pod and take over during a high availability event. If set to `resyncing`, the array is actively getting the latest pod data so that it becomes fully synchronized with its peer array. During the resyncing process, the array cannot handle I/O to the pod. Once the arrays are fully synchronized, the array changes to `online` status. If set to `unknown`, the status of the peer array is unknown because this array is offline and cannot determine the state of the pod on the peer array. Only the peer array can ever be in unknown status; this unknown status is unique to the local array and will differ when viewed from its peer array.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if frozen_at is not None:
self.frozen_at = frozen_at
if mediator_status is not None:
self.mediator_status = mediator_status
if pre_elected is not None:
self.pre_elected = pre_elected
if progress is not None:
self.progress = progress
if status is not None:
self.status = status
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PodArrayStatus`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
|
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PodArrayStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PodArrayStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value |
auto-complete-item.ts | export class AutoCompleteItem<T> {
public readonly value: T;
public readonly index: number;
public isSelected: boolean;
public isDisabled: boolean;
public isHighlighted: boolean; | this.index = index;
this.isSelected = isSelected;
this.isDisabled = isDisabled;
this.isHighlighted = false;
}
} |
public constructor(value: T, index: number, isSelected: boolean, isDisabled: boolean) {
this.value = value; |
clock.rs | //! Common types for system clocks
//!
//! This module defines types that are helpful for working with system clocks.
/// Represents a number of ticks of a given clock
///
/// This struct is used to represent an amount of time, a duration, but in a
/// low-level way that hardware peripherals can understand and handle. It is
/// meant to be a common denominator that higher-level time APIs can be built on
/// top of.
pub struct Ticks<'clock, C: 'clock> {
/// The number of ticks
pub value: u32,
/// Reference to the clock
///
/// For many clocks, it's possible to change their frequency. If this were
/// to be done after an instance of `Ticks` had been created, that would
/// invalidate the `Ticks` instance, as the same number of ticks would
/// suddenly represent a different duration of time.
///
/// This reference exists to prevent this. Any change to the configuration
/// of a clock would presumably require a mutable reference, which means as
/// long as this shared reference to the clock exists, the compiler will
/// prevent the clock frequency from being changed.
pub clock: &'clock C,
}
impl<'clock, Clock> Clone for Ticks<'clock, Clock> {
fn | (&self) -> Self {
Ticks {
value: self.value,
clock: self.clock,
}
}
}
impl<'clock, Clock> Copy for Ticks<'clock, Clock> {}
/// Implemented by clocks that can return a frequency
///
/// Implementations of this trait might be very simple, for clocks that run at
/// one specific frequency. Or they might be more complex, for clocks whose
/// frequency can be configured.
///
/// Some clocks might not have an implementation of this trait at all. An
/// example of this might be a type that represents an external clock that is
/// fed into the microcontroller via a pin.
pub trait Frequency {
/// The frequency of the clock in Hz
///
/// This method must never return `0`.
fn hz(&self) -> u32;
}
/// Marker trait that identifies a clock as currently being enabled
///
/// A clock that is always enabled can just implement this trait
/// unconditionally. Clocks that can be disabled can use a different type or a
/// type parameter to implement this trait conditionally.
///
/// HAL users will typically use this trait to ensure that a clock that is
/// passed as a parameter is enabled.
///
/// # Examples
///
/// This is a function that takes a clock. The function uses this trait to
/// ensure the passed clock is enabled.
///
/// ``` rust
/// use lpc8xx_hal::clock;
///
/// fn use_clock<C>(clock: C) where C: clock::Frequency + clock::Enabled {
/// // do something with the clock
/// }
/// ```
///
/// The following example shows how to use a type parameter to track whether a
/// clock is enabled, and implement the `Enabled` trait conditionally.
///
/// ``` rust
/// use lpc8xx_hal::{
/// clock,
/// init_state,
/// };
///
///
/// struct MyClock<State> {
/// _state: State,
/// }
///
/// impl MyClock<init_state::Disabled> {
/// /// Consume the instance with disabled state, return one with enabled
/// /// state.
/// pub fn enable(self) -> MyClock<init_state::Enabled> {
/// // Enable the clock
/// // ...
///
/// MyClock {
/// _state: init_state::Enabled(()),
/// }
/// }
/// }
///
/// impl clock::Enabled for MyClock<init_state::Enabled> {}
/// ```
pub trait Enabled {}
| clone |
lib.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
//! Barebones Watch Face for Rust + Mynewt on PineTime Smart Watch
#![no_std] // Don't link with standard Rust library, which is not compatible with embedded systems
#![feature(trace_macros)] // Allow macro tracing: `trace_macros!(true)`
#![feature(concat_idents)] // Allow `concat_idents!()` macro used in `coap!()` macro
#![feature(proc_macro_hygiene)] // Allow Procedural Macros like `run!()`
#![feature(exclusive_range_pattern)] // Allow ranges like `0..128` in `match` statements
pub use watchface; // Export the Watch Face Framework
use core::{
fmt::Write,
ptr,
};
use macros::strn;
use watchface::lvgl::mynewt::{
result::*,
Strn,
};
use watchface::lvgl::{
self,
core::obj,
themes::theme,
widgets::label,
};
use watchface::{
BluetoothState,
String,
WatchFace,
WatchFaceState,
WatchFaceTime,
new_string,
to_strn,
};
///////////////////////////////////////////////////////////////////////////////
// Watch Face Definition
/// Barebones Watch Face with no frills
pub struct BarebonesWatchFace {
/// Label for Time: "12:34"
pub time_label: lvgl::Ptr,
/// Label for Date: "MON 22 MAY 2020"
pub date_label: lvgl::Ptr,
/// Label for Bluetooth State (Bluetooth Icon)
pub bluetooth_label: lvgl::Ptr,
/// Label for Power Indicator (Charging & Battery)
pub power_label: lvgl::Ptr,
}
impl WatchFace for BarebonesWatchFace {
///////////////////////////////////////////////////////////////////////////////
// Create Watch Face
/// Create the widgets for the Watch Face
fn new() -> MynewtResult<Self> {
// Get the active screen
let screen = watchface::get_active_screen();
// Get the large title font for showing the time
let title_font = theme::get_font_title() ? ;
// Create the widgets
let watch_face = Self {
// Create a Label for Time: "00:00"
time_label: {
let lbl = label::create(screen, ptr::null()) ? ; // `?` will terminate the function in case of error
label::set_long_mode(lbl, label::LV_LABEL_LONG_BREAK) ? ;
label::set_text( lbl, strn!("00:00")) ? ; // strn creates a null-terminated string
obj::set_width( lbl, 240) ? ;
obj::set_height( lbl, 200) ? ;
label::set_align( lbl, label::LV_LABEL_ALIGN_CENTER) ? ;
obj::align( lbl, screen, obj::LV_ALIGN_CENTER, 0, -30) ? ;
obj::set_style_local_text_font(lbl, label::LV_LABEL_PART_MAIN, obj::LV_STATE_DEFAULT, title_font) ? ; // Use large title font
lbl // Return the label as time_label
},
// Create a Label for Date: "MON 22 MAY 2020"
date_label: {
let lbl = label::create(screen, ptr::null()) ? ;
label::set_long_mode(lbl, label::LV_LABEL_LONG_BREAK) ? ;
obj::set_width( lbl, 200) ? ;
obj::set_height( lbl, 200) ? ;
label::set_text( lbl, strn!("")) ? ; // strn creates a null-terminated string
label::set_align( lbl, label::LV_LABEL_ALIGN_CENTER) ? ;
obj::align( lbl, screen, obj::LV_ALIGN_CENTER, 0, 40) ? ;
lbl // Return the label as date_label
},
// Create a Label for Bluetooth State
bluetooth_label: {
let lbl = label::create(screen, ptr::null()) ? ;
obj::set_width( lbl, 50) ? ;
obj::set_height( lbl, 80) ? ;
label::set_text( lbl, strn!("")) ? ; // strn creates a null-terminated string
label::set_recolor( lbl, true) ? ;
label::set_align( lbl, label::LV_LABEL_ALIGN_LEFT) ? ;
obj::align( lbl, screen, obj::LV_ALIGN_IN_TOP_LEFT, 0, 0) ? ;
lbl // Return the label as bluetooth_label
},
// Create a Label for Power Indicator
power_label: {
let lbl = label::create(screen, ptr::null()) ? ;
obj::set_width( lbl, 80) ? ;
obj::set_height( lbl, 20) ? ;
label::set_text( lbl, strn!("")) ? ; // strn creates a null-terminated string
label::set_recolor(lbl, true) ? ;
label::set_align( lbl, label::LV_LABEL_ALIGN_RIGHT) ? ;
obj::align( lbl, screen, obj::LV_ALIGN_IN_TOP_RIGHT, 0, 0) ? ;
lbl // Return the label as power_label
},
};
// Return the watch face
Ok(watch_face)
}
///////////////////////////////////////////////////////////////////////////////
// Update Watch Face
/// Update the widgets in the Watch Face with the current state
fn update(&mut self, state: &WatchFaceState) -> MynewtResult<()> {
// Populate the Time and Date Labels
self.update_date_time(state) ? ;
// Populate the Bluetooth Label
self.update_bluetooth(state) ? ;
// Populate the Power Label
self.update_power(state) ? ;
Ok(())
}
}
impl BarebonesWatchFace {
///////////////////////////////////////////////////////////////////////////////
// Update Watch Face
/// Populate the Time and Date Labels with the time and date
fn update_date_time(&self, state: &WatchFaceState) -> MynewtResult<()> {
// Format the time as "12:34" and set the label
let mut buf = new_string();
write!(
&mut buf, // Write the formatted text
"{:02}:{:02}\0", // Must terminate Rust strings with null
state.time.hour,
state.time.minute
).expect("time fail");
label::set_text( // Set the label
self.time_label,
&to_strn(&buf)
) ? ;
// Get the short day name and short month name
let day = get_day_name(&state.time);
let month = get_month_name(&state.time);
// Format the date as "MON 22 MAY 2020" and set the label
let mut buf = new_string();
write!(
&mut buf, // Write the formatted text
"{} {} {} {}\n\0", // Must terminate Rust strings with null
day,
state.time.day,
month,
state.time.year
).expect("date fail");
label::set_text( // Set the label
self.date_label,
&to_strn(&buf)
) ? ;
Ok(())
}
/// Populate the Bluetooth Label with the Bluetooth State (Bluetooth Icon)
fn update_bluetooth(&self, state: &WatchFaceState) -> MynewtResult<()> {
if state.bluetooth == BluetoothState::BLUETOOTH_STATE_DISCONNECTED {
// If Bluetooth is disconnected, leave the label empty
label::set_text(
self.bluetooth_label,
strn!("")
) ? ;
} else {
// Compute the color of the Bluetooth icon
let color =
match &state.bluetooth {
BluetoothState::BLUETOOTH_STATE_INACTIVE => "#000000", // Black
BluetoothState::BLUETOOTH_STATE_ADVERTISING => "#0000ff", // Blue
BluetoothState::BLUETOOTH_STATE_DISCONNECTED => "#ff0000", // Red
BluetoothState::BLUETOOTH_STATE_CONNECTED => "#00ff00", // Green
};
// Format the Bluetooth status and set the label
let mut buf = new_string();
write!(
&mut buf, // Write the formatted text
"{} \u{F293}#\0", // LV_SYMBOL_BLUETOOTH. Must terminate Rust strings with null.
color
).expect("bt fail");
label::set_text( // Set the label
self.bluetooth_label,
&to_strn(&buf)
) ? ;
}
Ok(())
}
/// Populate the Power Label with the Power Indicator (Charging & Battery)
fn update_power(&self, state: &WatchFaceState) -> MynewtResult<()> {
// Get the active screen
let screen = watchface::get_active_screen();
// Compute the percentage power
let percentage = convert_battery_voltage(state.millivolts);
// Compute the colour for the charging symbol
let color = // Charging color
if percentage <= 20 { "#f2495c" } // Low Battery
else if state.powered && !(state.charging) { "#73bf69" } // Full Battery
else { "#fade2a" }; // Mid Battery
let symbol = // Charging symbol
if state.powered { "\u{F0E7}" } // LV_SYMBOL_CHARGE
else { " " };
// Format the Power Indicator and set the label
let mut buf = new_string();
write!(
&mut buf, // Write the formatted text
"{} {}%{}#\nRUST ({}mV)\0", // Must terminate Rust strings with null
color,
percentage,
symbol,
state.millivolts
).expect("batt fail");
label::set_text(
self.power_label,
&to_strn(&buf)
) ? ;
obj::align(
self.power_label, screen,
obj::LV_ALIGN_IN_TOP_RIGHT, 0, 0
) ? ;
Ok(())
}
}
///////////////////////////////////////////////////////////////////////////////
// Date Time Functions
/// Get month short name
fn get_month_name(time: &WatchFaceTime) -> String {
String::from(
match time.month {
1 => "JAN",
2 => "FEB",
3 => "MAR",
4 => "APR",
5 => "MAY",
6 => "JUN",
7 => "JUL",
8 => "AUG",
9 => "SEP",
10 => "OCT",
11 => "NOV",
12 => "DEC",
_ => "???",
}
)
}
/// Get day short name
fn get_day_name(time: & WatchFaceTime) -> String {
String::from(
match time.day_of_week {
0 => "SUN",
1 => "MON",
2 => "TUE",
3 => "WED",
4 => "THU",
5 => "FRI",
6 => "SAT",
_ => "???",
}
)
}
///////////////////////////////////////////////////////////////////////////////
// Battery Functions
/// Convert battery voltage to percentage
fn | (_voltage: u32) -> i32 {
50 // TODO
}
| convert_battery_voltage |
script.js | function | () {
const URL = "https://tgftp.nws.noaa.gov/data/observations/metar/stations/";
icao = document.form.textBox1.value;
upper_icao = icao.toUpperCase()
//console.log(icao)
metar_URL = URL + upper_icao + '.TXT';
//metar_URL = "https://tgftp.nws.noaa.gov/data/observations/metar/stations/RJTT.TXT"
window.open(metar_URL, '_blank')
}
function enter() {
if (window.event.keyCode == 13) {
jump();
}
}
| jump |
vpnsites.go | package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// VpnSitesClient is the network Client
type VpnSitesClient struct {
BaseClient
}
// NewVpnSitesClient creates an instance of the VpnSitesClient client.
func NewVpnSitesClient(subscriptionID string) VpnSitesClient |
// NewVpnSitesClientWithBaseURI creates an instance of the VpnSitesClient client.
func NewVpnSitesClientWithBaseURI(baseURI string, subscriptionID string) VpnSitesClient {
return VpnSitesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates a VpnSite resource if it doesn't exist else updates the existing VpnSite.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
// vpnSiteName - the name of the VpnSite being created or updated.
// vpnSiteParameters - parameters supplied to create or update VpnSite.
func (client VpnSitesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters VpnSite) (result VpnSitesCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, vpnSiteName, vpnSiteParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client VpnSitesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters VpnSite) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vpnSiteName": autorest.Encode("path", vpnSiteName),
}
const APIVersion = "2018-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}", pathParameters),
autorest.WithJSON(vpnSiteParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) CreateOrUpdateSender(req *http.Request) (future VpnSitesCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) CreateOrUpdateResponder(resp *http.Response) (result VpnSite, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes a VpnSite.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
// vpnSiteName - the name of the VpnSite being deleted.
func (client VpnSitesClient) Delete(ctx context.Context, resourceGroupName string, vpnSiteName string) (result VpnSitesDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.Delete")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, vpnSiteName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client VpnSitesClient) DeletePreparer(ctx context.Context, resourceGroupName string, vpnSiteName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vpnSiteName": autorest.Encode("path", vpnSiteName),
}
const APIVersion = "2018-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) DeleteSender(req *http.Request) (future VpnSitesDeleteFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get retrieves the details of a VPN site.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
// vpnSiteName - the name of the VpnSite being retrieved.
func (client VpnSitesClient) Get(ctx context.Context, resourceGroupName string, vpnSiteName string) (result VpnSite, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, vpnSiteName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client VpnSitesClient) GetPreparer(ctx context.Context, resourceGroupName string, vpnSiteName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vpnSiteName": autorest.Encode("path", vpnSiteName),
}
const APIVersion = "2018-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) GetResponder(resp *http.Response) (result VpnSite, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists all the VpnSites in a subscription.
func (client VpnSitesClient) List(ctx context.Context) (result ListVpnSitesResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.List")
defer func() {
sc := -1
if result.lvsr.Response.Response != nil {
sc = result.lvsr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.lvsr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "List", resp, "Failure sending request")
return
}
result.lvsr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client VpnSitesClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnSites", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) ListResponder(resp *http.Response) (result ListVpnSitesResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client VpnSitesClient) listNextResults(ctx context.Context, lastResults ListVpnSitesResult) (result ListVpnSitesResult, err error) {
req, err := lastResults.listVpnSitesResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VpnSitesClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VpnSitesClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client VpnSitesClient) ListComplete(ctx context.Context) (result ListVpnSitesResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx)
return
}
// ListByResourceGroup lists all the vpnSites in a resource group.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
func (client VpnSitesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ListVpnSitesResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.ListByResourceGroup")
defer func() {
sc := -1
if result.lvsr.Response.Response != nil {
sc = result.lvsr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.lvsr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.lvsr, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client VpnSitesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) ListByResourceGroupResponder(resp *http.Response) (result ListVpnSitesResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client VpnSitesClient) listByResourceGroupNextResults(ctx context.Context, lastResults ListVpnSitesResult) (result ListVpnSitesResult, err error) {
req, err := lastResults.listVpnSitesResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VpnSitesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VpnSitesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client VpnSitesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ListVpnSitesResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.ListByResourceGroup")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
// UpdateTags updates VpnSite tags.
// Parameters:
// resourceGroupName - the resource group name of the VpnSite.
// vpnSiteName - the name of the VpnSite being updated.
// vpnSiteParameters - parameters supplied to update VpnSite tags.
func (client VpnSitesClient) UpdateTags(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters TagsObject) (result VpnSitesUpdateTagsFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSitesClient.UpdateTags")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, vpnSiteName, vpnSiteParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "UpdateTags", nil, "Failure preparing request")
return
}
result, err = client.UpdateTagsSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSitesClient", "UpdateTags", result.Response(), "Failure sending request")
return
}
return
}
// UpdateTagsPreparer prepares the UpdateTags request.
func (client VpnSitesClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters TagsObject) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vpnSiteName": autorest.Encode("path", vpnSiteName),
}
const APIVersion = "2018-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}", pathParameters),
autorest.WithJSON(vpnSiteParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateTagsSender sends the UpdateTags request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSitesClient) UpdateTagsSender(req *http.Request) (future VpnSitesUpdateTagsFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// UpdateTagsResponder handles the response to the UpdateTags request. The method always
// closes the http.Response Body.
func (client VpnSitesClient) UpdateTagsResponder(resp *http.Response) (result VpnSite, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| {
return NewVpnSitesClientWithBaseURI(DefaultBaseURI, subscriptionID)
} |
purge.py | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# Recode by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
#
""" Modul Userbot untuk menghapus pesan yang tidak dibutuhkan (chat spam atau lainnya)."""
from asyncio import sleep
from telethon.errors import rpcbaseerrors
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.events import register
from userbot.utils import edit_delete, ayiin_cmd
@ayiin_cmd(pattern="purge$")
@register(pattern=r"^\.cpurge$", sudo=True)
async def fastpurger(purg):
|
@ayiin_cmd(pattern="purgeme")
@register(pattern=r"^\.cpurgeme", sudo=True)
async def purgeme(delme):
message = delme.text
count = int(message[9:])
i = 1
async for message in delme.client.iter_messages(delme.chat_id, from_user="me"):
if i > count + 1:
break
i += 1
await message.delete()
smsg = await delme.client.send_message(
delme.chat_id,
"**Berhasil Menghapus** " + str(count) + " **Kenangan**",
)
await sleep(2)
i = 1
await smsg.delete()
@ayiin_cmd(pattern="del$")
@register(pattern=r"^\.cdel$", sudo=True)
async def delete_it(delme):
msg_src = await delme.get_reply_message()
if delme.reply_to_msg_id:
try:
await msg_src.delete()
await delme.delete()
except rpcbaseerrors.BadRequestError:
await delme.edit("**Tidak Bisa Menghapus Pesan**")
@ayiin_cmd(pattern="edit")
@register(pattern=r"^\.cedit", sudo=True)
async def editer(edit):
message = edit.text
chat = await edit.get_input_chat()
self_id = await edit.client.get_peer_id("me")
string = str(message[6:])
i = 1
async for message in edit.client.iter_messages(chat, self_id):
if i == 2:
await message.edit(string)
await edit.delete()
break
i += 1
@ayiin_cmd(pattern="sd")
async def selfdestruct(destroy):
message = destroy.text
counter = int(message[4:6])
text = str(destroy.text[6:])
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
await sleep(counter)
await smsg.delete()
purgechat = {}
@ayiin_cmd(pattern=r"(p|purge)(from$|to$)")
async def purgfromto(prgnew):
reply = await prgnew.get_reply_message()
if reply:
if prgnew.pattern_match.group(2) == "from":
await purgfrm(prgnew)
elif prgnew.pattern_match.group(2) == "to":
await purgto(prgnew)
else:
await edit_delete(prgnew, "**Mohon Balas Ke Pesan untuk mulai menghapus**")
async def purgfrm(purgdari):
prgstrtmsg = purgdari.reply_to_msg_id
purgechat[purgdari.chat_id] = prgstrtmsg
yinsubot = await edit_delete(
purgdari,
f"**Pesan ini telah dipilih sebagai awal menghapus, balas pesan lain dengan** `{cmd}purgeto` **untuk menghapusnya**",
)
await sleep(2)
await yinsubot.delete()
async def purgto(purgke):
try:
prgstrtmsg = purgechat[purgke.chat_id]
except KeyError:
yinsubot = await edit_delete(
purgke,
f"**Balas pesan dengan** `{cmd}purgefrom` **terlebih dahulu lalu gunakan** `{cmd}purgeto`",
5,
)
return
try:
chat = await purgke.get_input_chat()
prgendmsg = purgke.reply_to_msg_id
pmsgs = []
message = 0
async for msg in purgke.client.iter_messages(
purgke.chat_id, min_id=(prgstrtmsg - 1), max_id=(prgendmsg + 1)
):
pmsgs.append(msg)
message += 1
pmsgs.append(purgke.reply_to_msg_id)
if len(pmsgs) == 100:
await purgke.client.delete_messages(chat, msgs)
if pmsgs:
await purgke.client.delete_messages(chat, pmsgs)
await purgke.delete()
yins = await edit_delete(
purgke,
f"**Fast purge complete!**\n**Berhasil Menghapus** `{message}` **Pesan**",
5,
)
except Exception as er:
await purgke.edit(f"**ERROR:** `{er}`")
CMD_HELP.update(
{
"purge": f"**Plugin : **`Menghapus Kenangan Chat`\
\n\n • **Syntax :** `{cmd}purge`\
\n • **Function : **Menghapus semua pesan mulai dari pesan yang dibalas.\
\n\n • **Syntax :** `{cmd}purgefrom` atau `{cmd}pfrom`\
\n • **Function : **Menandai awal dari mana harus dihapus.\
\n\n • **Syntax :** `{cmd}purgeto` atau `{cmd}pto`\
\n • **Function : **Menandai akhir dari pesan yang akan dihapus.\
\n\n • **Syntax :** `{cmd}purgeme` <angka>\
\n • **Function : **Menghapus jumlah pesan anda, yang mau anda hapus.\
\n\n • **Syntax :** `{cmd}del`\
\n • **Function : **Menghapus pesan, balas ke pesan.\
\n\n • **Syntax :** `{cmd}edit <pesan baru>`\
\n • **Function : **Ganti pesan terakhir Anda dengan <pesan baru>.\
\n\n • **Syntax :** `{cmd}sd` <detik> <pesan>\
\n • **Function : **Membuat pesan yang hancur sendiri. harap pasang detik di bawah 100 untuk menghindari bot Anda akan sleep.\
"
}
)
| chat = await purg.get_input_chat()
msgs = []
itermsg = purg.client.iter_messages(chat, min_id=purg.reply_to_msg_id)
count = 0
if purg.reply_to_msg_id is None:
return await edit_delete(purg, "**Mohon Balas Ke Pesan**")
async for msg in itermsg:
msgs.append(msg)
count += 1
msgs.append(purg.reply_to_msg_id)
if len(msgs) == 100:
await purg.client.delete_messages(chat, msgs)
msgs = []
if msgs:
await purg.client.delete_messages(chat, msgs)
done = await purg.client.send_message(
purg.chat_id,
"**Fast Purge Completed!**\n**Berhasil Menghapus** `"
+ str(count)
+ "` **Pesan**",
)
await sleep(2)
await done.delete() |
remove.rs | // STD Dependencies -----------------------------------------------------------
use std::fmt;
// Internal Dependencies ------------------------------------------------------
use ::bot::{Bot, BotConfig};
use ::core::{EventQueue, Message};
use ::action::{ActionHandler, ActionGroup, MessageActions};
// Action Implementation ------------------------------------------------------
pub struct Action {
message: Message,
nickname: String
}
impl Action {
pub fn new(message: Message, nickname: String) -> Box<Action> {
Box::new(Action {
message: message,
nickname: nickname
})
}
}
impl ActionHandler for Action {
fn run(&mut self, bot: &mut Bot, _: &BotConfig, _: &mut EventQueue) -> ActionGroup {
if let Some(server) = bot.get_server(&self.message.server_id) | else {
vec![]
}
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[Action] [RemoveGreeting] {}", self.nickname)
}
}
| {
server.remove_greeting(&self.nickname);
MessageActions::Send::private(&self.message, format!(
"Greeting for `{}` has been removed on {}.",
self.nickname, server.name
))
} |
dubinsUAV.py | from matplotlib import pyplot as plt
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
#from lineSegmentAoE import *
import numpy as np
import sys
class dubinsUAV():
def | (self, position, velocity, heading, dt=0.1):
self.velocity = velocity
self.turnRateLimited = True
self.v = velocity
self.dt = dt
self.t = 0
self.turnrate = np.deg2rad(20)
#self.turn_radius = []
#Current state
self.x = position[0]
self.y = position[1]
self.vx = []
self.vy = []
self.lastDist = np.inf
#self.cmdHeading = []
#self.flightEnvX = []
#self.flightEnvY = []
self.heading = heading
self.currentWPIndex = 0
self.withinThreshold = False
# History
self.xs = np.array([])
self.ys = np.array([])
self.vxs = np.array([])
self.vys = np.array([])
self.headings = np.array([])
#self.headingcmds = np.array([])
self.ts = np.array([])
self.vx = velocity * np.cos(heading)
self.vy = velocity * np.sin(heading)
self.dt = dt
#self.turn_radius = self.v / self.turnrate
def getPosition(self):
return [self.position[0], self.position[1]]
def setWaypoints(self, newwps, newradius=0.01):
self.waypoints = newwps
self.wpRadius = newradius
def getWaypoints(self):
return self.waypoints
def getActiveWaypoint(self):
return self.waypoints[self.currentWPIndex]
def simulateWPDubins(self):
# currentWPIndex = 0
# withinThreshold = False
# lastDist = sys.maxsize
wpRadius = self.wpRadius
activeWP = self.getActiveWaypoint()
dist = self.distance(activeWP, (self.x, self.y))
print('D: ' + str(dist) + '\t ' + str(dist < wpRadius) + '\t ' + str(dist > self.lastDist) + '\t# ' + str(self.currentWPIndex) + '\tLast: ' + str(self.lastDist))
if (dist < wpRadius and dist > self.lastDist):
if(self.currentWPIndex < len(self.waypoints)-1):
self.currentWPIndex += 1
print("WP Increment")
#update distance...
dist = self.distance(self.getActiveWaypoint(), (self.x, self.y))
else:
print("end of list, do something")
RHeading = np.arctan2(self.y - activeWP[1], self.x - activeWP[0])
RHeading += np.deg2rad(180)
if(RHeading >= np.pi*2):
RHeading -= np.pi*2
self.update_pos(RHeading)
self.lastDist = dist
def distance(self, a, b):
return np.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)
def update_pos(self, RequestedHeading):
if self.turnRateLimited:
theta = self.heading
if(np.abs(RequestedHeading - theta) < self.turnrate * self.dt):
turnrate = np.abs(RequestedHeading - theta) / self.dt
else:
turnrate = self.turnrate
if abs(theta - RequestedHeading) < np.pi:
if theta - RequestedHeading < 0:
theta = theta + turnrate * self.dt
else:
theta = theta - turnrate * self.dt
else:
if theta - RequestedHeading > 0:
theta = theta + turnrate * self.dt
else:
theta = theta - turnrate * self.dt
# if(np.abs(RequestedHeading - theta) > self.turnrate * self.dt):
# if theta - RequestedHeading < 0:
# theta = theta + self.turnrate * self.dt
# else:
# theta = theta - self.turnrate * self.dt
# else:
# theta = RequestedHeading
else:
theta = RequestedHeading
if(theta >= np.pi*2):
theta -= np.pi*2
print('Req: '+ str(np.rad2deg(RequestedHeading)) + '\ttheta ' + str(np.rad2deg(theta)))
# Update States
self.t = self.t + self.dt
self.heading = theta
#self.cmdHeading = VF_heading
self.update_pos_simple()
def update_pos_simple(self):
# Update States
self.t = self.t + self.dt
theta = self.heading
self.vx = self.v * np.cos(theta)
self.vy = self.v * np.sin(theta)
self.x = self.x + self.vx * self.dt
self.y = self.y + self.vy * self.dt
self.position = [(self.x, self.y)] # added for CAS
# Update History
self.xs = np.append(self.xs, self.x)
self.ys = np.append(self.ys, self.y)
self.vxs = np.append(self.vxs, self.vx)
self.vys = np.append(self.vys, self.vy)
self.headings = np.append(self.headings, self.heading)
self.ts = np.append(self.ts, self.t)
| __init__ |
eval.rs | use std::fmt;
use std::marker::PhantomData;
use eyre::Result;
use rand::prelude::SliceRandom;
use rand::Rng;
use smallvec::SmallVec;
use crate::eval::{Data, Evaluator};
use crate::evaluators::lgp::cfg::LgpEvaluatorCfg;
use crate::evaluators::lgp::vm::cfg::LgpVmCfg;
use crate::evaluators::lgp::vm::disasm::lgp_disasm;
use crate::evaluators::lgp::vm::op::Op;
use crate::evaluators::lgp::vm::optimize::LgpOptimizer;
use crate::ops::crossover::crossover_kpx;
use crate::ops::distance::dist_fn;
use crate::ops::mutation::{mutate_insert, mutate_reset, mutate_scramble, mutate_swap};
#[derive(Debug, Clone, PartialEq, PartialOrd)]
pub struct LgpState {
ops_unopt: Vec<Op>, // Contains program code for linear genetic programming.
num_reg: usize,
num_const: usize,
output_regs: SmallVec<[u8; 8]>,
}
impl fmt::Display for LgpState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let ops_opt = self.ops_opt();
writeln!(
f,
"Unopt code len: {}, Opt code len: {}, Diff: {}",
self.ops_unopt.len(),
ops_opt.len(),
self.ops_unopt.len() - ops_opt.len()
)?; |
impl LgpState {
#[must_use]
pub fn new(ops_unopt: Vec<Op>, num_reg: usize, num_const: usize, output_regs: &[u8]) -> Self {
Self { ops_unopt, num_reg, num_const, output_regs: output_regs.into() }
}
#[must_use]
pub fn lgpvmcfg(&self, regs: &[f64], constants: &[f64]) -> LgpVmCfg {
assert!(regs.len() == self.num_reg, "regs length mismatch");
assert!(constants.len() == self.num_const, "constants length mismatch");
LgpVmCfg::new().set_code(&self.ops_opt()).set_regs(regs).set_constants(constants)
}
#[must_use]
pub fn num_reg(&self) -> usize {
self.num_reg
}
#[must_use]
pub fn num_const(&self) -> usize {
self.num_const
}
#[must_use]
pub fn ops_unopt(&self) -> &[Op] {
&self.ops_unopt
}
#[must_use]
pub fn ops_unopt_mut(&mut self) -> &mut Vec<Op> {
&mut self.ops_unopt
}
#[must_use]
pub fn ops_opt(&self) -> Vec<Op> {
// Optimise code operations for the purposes of running the code.
LgpOptimizer::new(self.ops_unopt(), &self.output_regs).optimize()
}
}
pub struct LgpEvaluator<D> {
cfg: LgpEvaluatorCfg,
_u: PhantomData<D>,
}
impl<D> LgpEvaluator<D> {
#[must_use]
pub fn new(cfg: LgpEvaluatorCfg) -> Self {
Self { cfg, _u: PhantomData }
}
}
impl<D: Data> Evaluator for LgpEvaluator<D> {
type State = LgpState;
type Data = D;
const NUM_CROSSOVER: usize = 2;
const NUM_MUTATION: usize = 7;
fn crossover(&self, s1: &mut LgpState, s2: &mut LgpState, idx: usize) {
match idx {
0 => {} // Do nothing.
1 => {
// Two point crossover.
crossover_kpx(s1.ops_unopt_mut(), s2.ops_unopt_mut(), 2);
}
_ => panic!("unknown crossover strategy"),
};
}
fn mutate(&self, s: &mut LgpState, rate: f64, idx: usize) {
let mut r = rand::thread_rng();
if r.gen::<f64>() > rate {
return;
}
let code_size = s.ops_unopt().len();
let op = self.cfg.rand_op();
match idx {
0 => mutate_swap(s.ops_unopt_mut()),
1 => mutate_insert(s.ops_unopt_mut()),
2 => mutate_reset(s.ops_unopt_mut(), op),
3 => mutate_scramble(s.ops_unopt_mut()),
4 => {
// Add new random instruction.
if code_size < self.cfg.max_code() {
s.ops_unopt_mut().insert(r.gen_range(0..code_size), op);
}
}
5 => {
// Remove random instruction.
if code_size > 1 {
s.ops_unopt_mut().remove(r.gen_range(0..code_size));
}
}
6 => {
// Micro-mutation
self.cfg.mutate(s.ops_unopt_mut().choose_mut(&mut r).unwrap());
}
_ => panic!("unknown mutation strategy"),
}
}
fn fitness(&self, _: &Self::State, _data: &Self::Data) -> Result<f64> {
unimplemented!()
}
fn distance(&self, s1: &Self::State, s2: &Self::State) -> Result<f64> {
// Use optimised operations for distance calculation, since
// otherwise things can be trivially very different.
Ok(dist_fn(&s1.ops_opt(), &s2.ops_opt(), 1.0, Op::dist))
}
} | write!(f, "{}", lgp_disasm(&ops_opt))
}
} |
google_test.go | package auth
import (
"context"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"errors"
"fmt"
"github.com/jonsabados/sabadoscodes.com/httputil"
"github.com/stretchr/testify/assert"
"io/ioutil"
"math/big"
"net/http"
"net/http/httptest"
"sort"
"testing"
"time"
)
func Test_sanitizeTokenForLog(t *testing.T) {
testCases := []struct {
desc string
input string
expected string
}{
{
"long",
"foobarblahbazweefunfoo",
"foobarblxxxxxxxxxxxxxx",
},
{
"short",
"aba",
"aba",
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
assert.Equal(t, tc.expected, sanitizeTokenForLog(tc.input))
})
}
}
func Test_NewGoogleCertFetcher_NetworkError(t *testing.T) {
asserter := assert.New(t)
ts := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
asserter.Fail("wtf?")
}))
ts.Close()
testInstance := NewGoogleCertFetcher(ts.URL, httputil.DefaultHttpClient)
_, err := testInstance(context.Background())
asserter.Error(err)
}
func Test_NewGoogleCertFetcher_NetworkGood(t *testing.T) {
testCases := []struct {
desc string
responseCode int
responseFixture string
expirationHeader string
expectedSerials []string
expectError bool
}{
{
"internal server error",
http.StatusInternalServerError,
"fixture/error.json",
"Thu, 25 Jun 2020 12:01:01 MST",
nil,
true,
},
{
"garbage response",
http.StatusOK,
"fixture/garbage.html",
"Thu, 25 Jun 2020 12:01:01 MST",
nil,
true,
},
{
"unexpected json",
http.StatusOK,
"fixture/error.json",
"Thu, 25 Jun 2020 12:01:01 MST",
nil,
true,
},
{
"empty json",
http.StatusOK,
"fixture/empty.json",
"Thu, 25 Jun 2020 12:01:01 MST",
nil,
true,
},
{
"happy path",
http.StatusOK,
"fixture/googlecerts.json",
"Thu, 25 Jun 2020 12:01:01 MST",
[]string{
"swjeSaDD1xCYlGMO3fPrpieIhRXTHfWqEL0EA7L3JMKPs2Dae3P/vtqN2qL7fq3Ft48xCz0swmE5Ci8OEBQZi+RB+A4t0MxMO9K3LJk1wmqyZdj0d7LZ3WFq5hyym7dQzes/4z/4UcYMel/z/jjmKM7qBvtm8a68vpAcZooMy/f13hIotTdYPwJ8fACB8EYOYVzz0gyKPFAXbXvNC64dR2IF4lR0/ql9IdgZkxqCeCyf/KQtNQ3D4p8yqvdMcJV0Va3r8Teh72zyj1U/QLnCJVURL/ircP3UDGZzN7bym/r5JQhuOHjGWTqPTsGgV0/ZkQA4pOxOvt1PUO0F1UsQTQ==",
"uK2uXX3c28Xpjyx0rUjmC7cBSJ5j7OUJfL4EQsZbXm1I514GD+GCnn/UhYqirv3hTdH0F22aiGJdgDwofZBr5iKAVf4Z2VHaQ8sE1taMH+cAqZEquJLmDuRTRKoJh6ZW116+8cuAVtDdfBGH8INTy8hedusJh+uUTqO+xg/dEt8EQHQlvO4DlQc5iqV/dAb1TnAdl9SyKV68naxts/B+Cy8P1FrVv7LHcXBDHYTo8jquhZRnz+GuxKrhqS2W8Nyfqj+k9xYZqd/usvvu6XUmb/wDDatw9i8zUDURKulcUeCA7OKyOGjNr6pKIkKnMPDHDoCA6N6aTrZBG1fuj3G8eg==",
},
false,
},
{
"missing expiration",
http.StatusOK,
"fixture/googlecerts.json",
"",
[]string{
"swjeSaDD1xCYlGMO3fPrpieIhRXTHfWqEL0EA7L3JMKPs2Dae3P/vtqN2qL7fq3Ft48xCz0swmE5Ci8OEBQZi+RB+A4t0MxMO9K3LJk1wmqyZdj0d7LZ3WFq5hyym7dQzes/4z/4UcYMel/z/jjmKM7qBvtm8a68vpAcZooMy/f13hIotTdYPwJ8fACB8EYOYVzz0gyKPFAXbXvNC64dR2IF4lR0/ql9IdgZkxqCeCyf/KQtNQ3D4p8yqvdMcJV0Va3r8Teh72zyj1U/QLnCJVURL/ircP3UDGZzN7bym/r5JQhuOHjGWTqPTsGgV0/ZkQA4pOxOvt1PUO0F1UsQTQ==",
"uK2uXX3c28Xpjyx0rUjmC7cBSJ5j7OUJfL4EQsZbXm1I514GD+GCnn/UhYqirv3hTdH0F22aiGJdgDwofZBr5iKAVf4Z2VHaQ8sE1taMH+cAqZEquJLmDuRTRKoJh6ZW116+8cuAVtDdfBGH8INTy8hedusJh+uUTqO+xg/dEt8EQHQlvO4DlQc5iqV/dAb1TnAdl9SyKV68naxts/B+Cy8P1FrVv7LHcXBDHYTo8jquhZRnz+GuxKrhqS2W8Nyfqj+k9xYZqd/usvvu6XUmb/wDDatw9i8zUDURKulcUeCA7OKyOGjNr6pKIkKnMPDHDoCA6N6aTrZBG1fuj3G8eg==",
},
false,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
asserter := assert.New(t)
expectedPath := "/testingfun"
ts := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
defer request.Body.Close()
asserter.Equal(expectedPath, request.URL.Path)
if tc.expirationHeader != "" {
writer.Header().Add("Expires", tc.expirationHeader)
}
writer.WriteHeader(tc.responseCode)
res, err := ioutil.ReadFile(tc.responseFixture)
if asserter.NoError(err) {
_, err = writer.Write(res)
asserter.NoError(err)
}
}))
defer ts.Close()
testInstance := NewGoogleCertFetcher(fmt.Sprintf("%s%s", ts.URL, expectedPath), httputil.DefaultHttpClient)
res, err := testInstance(context.Background())
if tc.expectError {
asserter.Error(err)
} else {
if !asserter.NoError(err) {
return
}
serials := make([]string, len(res.Certs))
for i, cert := range res.Certs {
serials[i] = base64.StdEncoding.EncodeToString(cert.Signature)
}
sort.Strings(serials)
asserter.Equal(tc.expectedSerials, serials)
if tc.expirationHeader != "" {
expectedTime, err := time.Parse(time.RFC1123, tc.expirationHeader)
if asserter.NoError(err) {
asserter.Equal(expectedTime, res.Expiration)
}
} else {
asserter.WithinDuration(time.Now(), res.Expiration, time.Second*1)
}
}
})
}
}
func TestNewGoogleAuthenticator_HappyPath_FirstKey(t *testing.T) {
asserter := assert.New(t)
reader := rand.Reader
bitSize := 2048
start := time.Now()
expires := time.Now().Add(time.Hour)
clientId := "testyMcTesterson"
subject := "12345"
email := "[email protected]"
name := "Bob McTester"
keyOne, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
keyTwo, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
certFetcher := func(ctx context.Context) (GooglePublicCerts, error) {
return GooglePublicCerts{
Certs: []*x509.Certificate{
createCert(keyOne, 123, start, expires),
createCert(keyTwo, 456, start, expires),
},
Expiration: expires,
}, nil
}
jwtPayload := fmt.Sprintf(`{
"iss": "foo.bar.com",
"azp": "whatever",
"aud": "%s",
"sub": "%s",
"email": "%s",
"email_verified": true,
"at_hash": "9SrKH5GRtXul1yRpWCyLow",
"name": "%s",
"picture": "https://some.google.link/blah",
"given_name": "Bob",
"family_name": "McTester",
"locale": "en",
"iat": %d,
"exp": %d,
"jti": "123abc56"
}`, clientId, subject, email, name, start.Unix(), expires.Unix())
jwtHeader := `{"alg":"RS256","kid":"a41a3570b8e3ae1b72caabcaa7b8d2db2065d7c1","typ":"JWT"}`
unsigned := fmt.Sprintf("%s.%s", base64.RawURLEncoding.EncodeToString([]byte(jwtHeader)), base64.RawURLEncoding.EncodeToString([]byte(jwtPayload)))
hasher := crypto.SHA256.New()
hasher.Write([]byte(unsigned))
sigBytes, err := rsa.SignPKCS1v15(rand.Reader, keyOne, crypto.SHA256, hasher.Sum(nil))
if err != nil {
panic(err)
}
jwt := fmt.Sprintf("%s.%s", unsigned, base64.RawURLEncoding.EncodeToString(sigBytes))
expectedRoles := []Role{
Role("whatever"),
}
getRoles := RoleOracle(func(ctx context.Context, emailAddress string) []Role {
asserter.Equal(email, emailAddress)
return expectedRoles
})
testInstance := NewGoogleAuthenticator(clientId, certFetcher, getRoles)
res, err := testInstance(context.Background(), jwt)
asserter.NoError(err)
asserter.Equal(Principal{
UserID: subject,
Email: email,
Name: name,
Roles: expectedRoles,
}, res)
}
func TestNewGoogleAuthenticator_HappyPath_NotFirstKey(t *testing.T) {
asserter := assert.New(t)
reader := rand.Reader
bitSize := 2048
start := time.Now()
expires := time.Now().Add(time.Hour)
clientId := "testyMcTesterson"
subject := "12345"
email := "[email protected]"
name := "Bob McTester"
keyOne, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
keyTwo, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
certFetcher := func(ctx context.Context) (GooglePublicCerts, error) {
return GooglePublicCerts{
Certs: []*x509.Certificate{
createCert(keyOne, 123, start, expires),
createCert(keyTwo, 456, start, expires),
},
Expiration: expires,
}, nil
}
jwtPayload := fmt.Sprintf(`{
"iss": "foo.bar.com",
"azp": "whatever",
"aud": "%s",
"sub": "%s",
"email": "%s",
"email_verified": true,
"at_hash": "9SrKH5GRtXul1yRpWCyLow",
"name": "%s",
"picture": "https://some.google.link/blah",
"given_name": "Bob",
"family_name": "McTester",
"locale": "en",
"iat": %d,
"exp": %d,
"jti": "123abc56"
}`, clientId, subject, email, name, start.Unix(), expires.Unix())
jwtHeader := `{"alg":"RS256","kid":"a41a3570b8e3ae1b72caabcaa7b8d2db2065d7c1","typ":"JWT"}`
unsigned := fmt.Sprintf("%s.%s", base64.RawURLEncoding.EncodeToString([]byte(jwtHeader)), base64.RawURLEncoding.EncodeToString([]byte(jwtPayload)))
hasher := crypto.SHA256.New()
hasher.Write([]byte(unsigned))
sigBytes, err := rsa.SignPKCS1v15(rand.Reader, keyTwo, crypto.SHA256, hasher.Sum(nil))
if err != nil {
panic(err)
}
jwt := fmt.Sprintf("%s.%s", unsigned, base64.RawURLEncoding.EncodeToString(sigBytes))
expectedRoles := []Role{
Role("whatever"),
}
getRoles := RoleOracle(func(ctx context.Context, emailAddress string) []Role {
asserter.Equal(email, emailAddress)
return expectedRoles
})
testInstance := NewGoogleAuthenticator(clientId, certFetcher, getRoles)
res, err := testInstance(context.Background(), jwt)
asserter.NoError(err)
asserter.Equal(Principal{
UserID: subject,
Email: email,
Name: name,
Roles: expectedRoles,
}, res)
}
func TestNewGoogleAuthenticator_HappyPath_Caching(t *testing.T) {
asserter := assert.New(t)
reader := rand.Reader
bitSize := 2048
start := time.Now()
expires := time.Now().Add(time.Hour)
clientId := "testyMcTesterson"
subject := "12345"
email := "[email protected]"
name := "Bob McTester"
keyOne, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
keyTwo, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
fetchCount := 0
certFetcher := func(ctx context.Context) (GooglePublicCerts, error) {
fetchCount++
return GooglePublicCerts{
Certs: []*x509.Certificate{
createCert(keyOne, 123, start, expires),
createCert(keyTwo, 456, start, expires),
},
Expiration: time.Now().Add(time.Millisecond * 250),
}, nil
}
jwtPayload := fmt.Sprintf(`{
"iss": "foo.bar.com",
"azp": "whatever",
"aud": "%s",
"sub": "%s",
"email": "%s",
"email_verified": true,
"at_hash": "9SrKH5GRtXul1yRpWCyLow",
"name": "%s",
"picture": "https://some.google.link/blah",
"given_name": "Bob",
"family_name": "McTester",
"locale": "en",
"iat": %d,
"exp": %d,
"jti": "123abc56"
}`, clientId, subject, email, name, start.Unix(), expires.Unix())
jwtHeader := `{"alg":"RS256","kid":"a41a3570b8e3ae1b72caabcaa7b8d2db2065d7c1","typ":"JWT"}`
unsigned := fmt.Sprintf("%s.%s", base64.RawURLEncoding.EncodeToString([]byte(jwtHeader)), base64.RawURLEncoding.EncodeToString([]byte(jwtPayload)))
hasher := crypto.SHA256.New()
hasher.Write([]byte(unsigned))
sigBytes, err := rsa.SignPKCS1v15(rand.Reader, keyOne, crypto.SHA256, hasher.Sum(nil))
if err != nil {
panic(err)
}
jwt := fmt.Sprintf("%s.%s", unsigned, base64.RawURLEncoding.EncodeToString(sigBytes))
expectedRoles := []Role{
Role("whatever"),
}
getRoles := RoleOracle(func(ctx context.Context, emailAddress string) []Role {
asserter.Equal(email, emailAddress)
return expectedRoles
})
testInstance := NewGoogleAuthenticator(clientId, certFetcher, getRoles)
res, err := testInstance(context.Background(), jwt)
asserter.NoError(err)
asserter.Equal(Principal{
UserID: subject,
Email: email,
Name: name,
Roles: expectedRoles,
}, res)
asserter.Equal(1, fetchCount)
res, err = testInstance(context.Background(), jwt)
asserter.NoError(err)
asserter.Equal(Principal{
UserID: subject,
Email: email,
Name: name,
Roles: expectedRoles,
}, res)
asserter.Equal(1, fetchCount)
time.Sleep(time.Millisecond * 500)
res, err = testInstance(context.Background(), jwt)
asserter.NoError(err)
asserter.Equal(Principal{
UserID: subject,
Email: email,
Name: name,
Roles: expectedRoles,
}, res)
asserter.Equal(2, fetchCount)
}
func TestNewGoogleAuthenticator_FailureToFetchCertOnFirstTry(t *testing.T) |
func TestNewGoogleAuthenticator_InvalidSigner(t *testing.T) {
asserter := assert.New(t)
reader := rand.Reader
bitSize := 2048
start := time.Now()
expires := time.Now().Add(time.Hour)
clientId := "testyMcTesterson"
subject := "12345"
email := "[email protected]"
name := "Bob McTester"
keyOne, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
keyTwo, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
certFetcher := func(ctx context.Context) (GooglePublicCerts, error) {
return GooglePublicCerts{
Certs: []*x509.Certificate{
createCert(keyOne, 123, start, expires),
},
Expiration: expires,
}, nil
}
jwtPayload := fmt.Sprintf(`{
"iss": "foo.bar.com",
"azp": "whatever",
"aud": "%s",
"sub": "%s",
"email": "%s",
"email_verified": true,
"at_hash": "9SrKH5GRtXul1yRpWCyLow",
"name": "%s",
"picture": "https://some.google.link/blah",
"given_name": "Bob",
"family_name": "McTester",
"locale": "en",
"iat": %d,
"exp": %d,
"jti": "123abc56"
}`, clientId, subject, email, name, start.Unix(), expires.Unix())
jwtHeader := `{"alg":"RS256","kid":"a41a3570b8e3ae1b72caabcaa7b8d2db2065d7c1","typ":"JWT"}`
unsigned := fmt.Sprintf("%s.%s", base64.RawURLEncoding.EncodeToString([]byte(jwtHeader)), base64.RawURLEncoding.EncodeToString([]byte(jwtPayload)))
hasher := crypto.SHA256.New()
hasher.Write([]byte(unsigned))
sigBytes, err := rsa.SignPKCS1v15(rand.Reader, keyTwo, crypto.SHA256, hasher.Sum(nil))
if err != nil {
panic(err)
}
jwt := fmt.Sprintf("%s.%s", unsigned, base64.RawURLEncoding.EncodeToString(sigBytes))
expectedRoles := []Role {
Role("whatever"),
}
getRoles := RoleOracle(func(ctx context.Context, emailAddress string) []Role {
asserter.Equal(email, emailAddress)
return expectedRoles
})
testInstance := NewGoogleAuthenticator(clientId, certFetcher, getRoles)
_, err = testInstance(context.Background(), jwt)
asserter.EqualError(err, fmt.Sprintf("invalid signature on token %s", jwt))
}
func TestNewGoogleAuthenticator_NotJson(t *testing.T) {
asserter := assert.New(t)
reader := rand.Reader
bitSize := 2048
start := time.Now()
expires := time.Now().Add(time.Hour)
clientId := "testyMcTesterson"
subject := "12345"
email := "[email protected]"
name := "Bob McTester"
keyOne, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
certFetcher := func(ctx context.Context) (GooglePublicCerts, error) {
return GooglePublicCerts{
Certs: []*x509.Certificate{
createCert(keyOne, 123, start, expires),
},
Expiration: expires,
}, nil
}
jwtPayload := fmt.Sprintf(`{
"iss": "foo.bar.com",
"azp": "whatever""", -- to many quotes and stuff
"aud": "%s",
"sub": "%s",
"email": "%s",
"email_verified": true,
"at_hash": "9SrKH5GRtXul1yRpWCyLow",
"name": "%s",
"picture": "https://some.google.link/blah",
"given_name": "Bob",
"family_name": "McTester",
"locale": "en",
"iat": %d,
"exp": %d,
"jti": "123abc56"
}`, clientId, subject, email, name, start.Unix(), expires.Unix())
jwtHeader := `{"alg":"RS256","kid":"a41a3570b8e3ae1b72caabcaa7b8d2db2065d7c1","typ":"JWT"}`
unsigned := fmt.Sprintf("%s.%s", base64.RawURLEncoding.EncodeToString([]byte(jwtHeader)), base64.RawURLEncoding.EncodeToString([]byte(jwtPayload)))
hasher := crypto.SHA256.New()
hasher.Write([]byte(unsigned))
sigBytes, err := rsa.SignPKCS1v15(rand.Reader, keyOne, crypto.SHA256, hasher.Sum(nil))
if err != nil {
panic(err)
}
jwt := fmt.Sprintf("%s.%s", unsigned, base64.RawURLEncoding.EncodeToString(sigBytes))
expectedRoles := []Role {
Role("whatever"),
}
getRoles := RoleOracle(func(ctx context.Context, emailAddress string) []Role {
asserter.Equal(email, emailAddress)
return expectedRoles
})
testInstance := NewGoogleAuthenticator(clientId, certFetcher, getRoles)
_, err = testInstance(context.Background(), jwt)
asserter.EqualError(err, fmt.Sprintf("garbage token: payload not json (%s)", jwt))
}
func TestNewGoogleAuthenticator_InvalidAudience(t *testing.T) {
asserter := assert.New(t)
reader := rand.Reader
bitSize := 2048
start := time.Now()
expires := time.Now().Add(time.Hour)
clientId := "testyMcTesterson"
subject := "12345"
email := "[email protected]"
name := "Bob McTester"
keyOne, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
certFetcher := func(ctx context.Context) (GooglePublicCerts, error) {
return GooglePublicCerts{
Certs: []*x509.Certificate{
createCert(keyOne, 123, start, expires),
},
Expiration: expires,
}, nil
}
jwtPayload := fmt.Sprintf(`{
"iss": "foo.bar.com",
"azp": "whatever",
"aud": "%s-whoops",
"sub": "%s",
"email": "%s",
"email_verified": true,
"at_hash": "9SrKH5GRtXul1yRpWCyLow",
"name": "%s",
"picture": "https://some.google.link/blah",
"given_name": "Bob",
"family_name": "McTester",
"locale": "en",
"iat": %d,
"exp": %d,
"jti": "123abc56"
}`, clientId, subject, email, name, start.Unix(), expires.Unix())
jwtHeader := `{"alg":"RS256","kid":"a41a3570b8e3ae1b72caabcaa7b8d2db2065d7c1","typ":"JWT"}`
unsigned := fmt.Sprintf("%s.%s", base64.RawURLEncoding.EncodeToString([]byte(jwtHeader)), base64.RawURLEncoding.EncodeToString([]byte(jwtPayload)))
hasher := crypto.SHA256.New()
hasher.Write([]byte(unsigned))
sigBytes, err := rsa.SignPKCS1v15(rand.Reader, keyOne, crypto.SHA256, hasher.Sum(nil))
if err != nil {
panic(err)
}
jwt := fmt.Sprintf("%s.%s", unsigned, base64.RawURLEncoding.EncodeToString(sigBytes))
expectedRoles := []Role {
Role("whatever"),
}
getRoles := RoleOracle(func(ctx context.Context, emailAddress string) []Role {
asserter.Equal(email, emailAddress)
return expectedRoles
})
testInstance := NewGoogleAuthenticator(clientId, certFetcher, getRoles)
_, err = testInstance(context.Background(), jwt)
asserter.EqualError(err, fmt.Sprintf("invalid audience: %s-whoops", clientId))
}
func TestNewGoogleAuthenticator_Expired(t *testing.T) {
asserter := assert.New(t)
reader := rand.Reader
bitSize := 2048
start := time.Now()
expires := time.Now().Add(-time.Second)
clientId := "testyMcTesterson"
subject := "12345"
email := "[email protected]"
name := "Bob McTester"
keyOne, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
certFetcher := func(ctx context.Context) (GooglePublicCerts, error) {
return GooglePublicCerts{
Certs: []*x509.Certificate{
createCert(keyOne, 123, start, expires),
},
Expiration: expires,
}, nil
}
jwtPayload := fmt.Sprintf(`{
"iss": "foo.bar.com",
"azp": "whatever",
"aud": "%s",
"sub": "%s",
"email": "%s",
"email_verified": true,
"at_hash": "9SrKH5GRtXul1yRpWCyLow",
"name": "%s",
"picture": "https://some.google.link/blah",
"given_name": "Bob",
"family_name": "McTester",
"locale": "en",
"iat": %d,
"exp": %d,
"jti": "123abc56"
}`, clientId, subject, email, name, start.Unix(), expires.Unix())
jwtHeader := `{"alg":"RS256","kid":"a41a3570b8e3ae1b72caabcaa7b8d2db2065d7c1","typ":"JWT"}`
unsigned := fmt.Sprintf("%s.%s", base64.RawURLEncoding.EncodeToString([]byte(jwtHeader)), base64.RawURLEncoding.EncodeToString([]byte(jwtPayload)))
hasher := crypto.SHA256.New()
hasher.Write([]byte(unsigned))
sigBytes, err := rsa.SignPKCS1v15(rand.Reader, keyOne, crypto.SHA256, hasher.Sum(nil))
if err != nil {
panic(err)
}
jwt := fmt.Sprintf("%s.%s", unsigned, base64.RawURLEncoding.EncodeToString(sigBytes))
expectedRoles := []Role {
Role("whatever"),
}
getRoles := RoleOracle(func(ctx context.Context, emailAddress string) []Role {
asserter.Equal(email, emailAddress)
return expectedRoles
})
testInstance := NewGoogleAuthenticator(clientId, certFetcher, getRoles)
_, err = testInstance(context.Background(), jwt)
asserter.EqualError(err, fmt.Sprintf("expired token, expiration: %s", expires.Format(time.RFC3339)))
}
func TestNewGoogleAuthenticator_Garbage(t *testing.T) {
asserter := assert.New(t)
reader := rand.Reader
bitSize := 2048
start := time.Now()
expires := time.Now().Add(-time.Second)
clientId := "testyMcTesterson"
keyOne, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
certFetcher := func(ctx context.Context) (GooglePublicCerts, error) {
return GooglePublicCerts{
Certs: []*x509.Certificate{
createCert(keyOne, 123, start, expires),
},
Expiration: expires,
}, nil
}
getRoles := RoleOracle(func(ctx context.Context, emailAddress string) []Role {
asserter.Fail("should not be here")
return make([]Role, 0)
})
testInstance := NewGoogleAuthenticator(clientId, certFetcher, getRoles)
_, err = testInstance(context.Background(), "wtfisthis?")
asserter.EqualError(err, "garbage token: format (wtfisthis?)")
_, err = testInstance(context.Background(), "YWJj.YWJj.###")
asserter.EqualError(err, "garbage token: signature malformed (YWJj.YWJj.###)")
_, err = testInstance(context.Background(), "YWJj.###.YWJj")
asserter.EqualError(err, "garbage token: payload malformed (YWJj.###.YWJj)")
}
func createCert(key *rsa.PrivateKey, serial int, start time.Time, expires time.Time) *x509.Certificate {
template := &x509.Certificate{
SerialNumber: big.NewInt(int64(serial)),
Subject: pkix.Name{
Organization: []string{"Testing FTW"},
},
NotBefore: start,
NotAfter: expires,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
bytes, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key)
if err != nil {
panic(err)
}
parsedCert, err := x509.ParseCertificate(bytes)
if err != nil {
panic(err)
}
return parsedCert
}
| {
asserter := assert.New(t)
reader := rand.Reader
bitSize := 2048
start := time.Now()
expires := time.Now().Add(time.Hour)
clientId := "testyMcTesterson"
subject := "12345"
email := "[email protected]"
name := "Bob McTester"
keyOne, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
keyTwo, err := rsa.GenerateKey(reader, bitSize)
if err != nil {
panic(err)
}
shouldBlowUp := true
certFetcher := func(ctx context.Context) (GooglePublicCerts, error) {
if shouldBlowUp {
shouldBlowUp = false
return GooglePublicCerts{}, errors.New("BWAHAHAAHHA")
}
return GooglePublicCerts{
Certs: []*x509.Certificate{
createCert(keyOne, 123, start, expires),
createCert(keyTwo, 456, start, expires),
},
Expiration: expires,
}, nil
}
jwtPayload := fmt.Sprintf(`{
"iss": "foo.bar.com",
"azp": "whatever",
"aud": "%s",
"sub": "%s",
"email": "%s",
"email_verified": true,
"at_hash": "9SrKH5GRtXul1yRpWCyLow",
"name": "%s",
"picture": "https://some.google.link/blah",
"given_name": "Bob",
"family_name": "McTester",
"locale": "en",
"iat": %d,
"exp": %d,
"jti": "123abc56"
}`, clientId, subject, email, name, start.Unix(), expires.Unix())
jwtHeader := `{"alg":"RS256","kid":"a41a3570b8e3ae1b72caabcaa7b8d2db2065d7c1","typ":"JWT"}`
unsigned := fmt.Sprintf("%s.%s", base64.RawURLEncoding.EncodeToString([]byte(jwtHeader)), base64.RawURLEncoding.EncodeToString([]byte(jwtPayload)))
hasher := crypto.SHA256.New()
hasher.Write([]byte(unsigned))
sigBytes, err := rsa.SignPKCS1v15(rand.Reader, keyTwo, crypto.SHA256, hasher.Sum(nil))
if err != nil {
panic(err)
}
jwt := fmt.Sprintf("%s.%s", unsigned, base64.RawURLEncoding.EncodeToString(sigBytes))
expectedRoles := []Role{
Role("whatever"),
}
getRoles := RoleOracle(func(ctx context.Context, emailAddress string) []Role {
asserter.Equal(email, emailAddress)
return expectedRoles
})
testInstance := NewGoogleAuthenticator(clientId, certFetcher, getRoles)
_, err = testInstance(context.Background(), jwt)
asserter.Error(err)
res, err := testInstance(context.Background(), jwt)
asserter.NoError(err)
asserter.Equal(Principal{
UserID: subject,
Email: email,
Name: name,
Roles: expectedRoles,
}, res)
} |
routine.py | import sys, time
from core import utils
from pprint import pprint
from modules import initials
from modules import subdomain
from modules import recon
from modules import assetfinding
from modules import takeover
from modules import screenshot
from modules import portscan
from modules import gitscan
from modules import burpstate
from modules import brutethings
from modules import dirbrute
from modules import vulnscan
from modules import cors
from modules import ipspace
from modules import sslscan
from modules import headers
from modules import conclusion
from modules import healcheck
# runnning normal routine if none of module specific
def normal(options):
utils.print_good("Running with {0} speed".format(options['SPEED']))
# Create skeleton json
initials.Initials(options)
# Finding subdomain
subdomain.SubdomainScanning(options)
# waiting for previous module
utils.just_waiting(options, 'SubdomainScanning')
# Scanning for subdomain take over
takeover.TakeOverScanning(options)
# Screen shot the target on common service
screenshot.ScreenShot(options)
# Recon
recon.Recon(options)
# Recon
assetfinding.AssetFinding(options)
# Scanning for CorsScan
cors.CorsScan(options)
# Discovery IP space
ipspace.IPSpace(options)
# SSL Scan
sslscan.SSLScan(options)
# Headers Scan
headers.HeadersScan(options)
# Note: From here the module gonna take really long time
# for scanning service and stuff like that
utils.print_info('This gonna take a while')
# Scanning all port using result from subdomain scanning
# and also checking vulnerable service based on version
portscan.PortScan(options)
# Directory scan
dirbrute.DirBrute(options)
# Starting vulnerable scan
vulnscan.VulnScan(options)
# brutethings.BruteThings(options)
conclusion.Conclusion(options)
def | (options, module):
module = module.lower()
# checking the tool is installed right or not and exit
if 'health' in module:
health = healcheck.Healcheck(options)
if health.checking():
utils.print_good("All things look fine")
else:
utils.print_bad("Installing Osmedeus not correctly done")
utils.just_shutdown_flask(options)
sys.exit(0)
initials.Initials(options)
if 'sub' in module or 'subdomain' in module:
subdomain.SubdomainScanning(options)
takeover.TakeOverScanning(options)
screenshot.ScreenShot(options)
cors.CorsScan(options)
recon.Recon(options)
assetfinding.AssetFinding(options)
if 'ip' in module:
# Discovery IP space
ipspace.IPSpace(options)
if 'screen' in module:
# Discovery IP space
screenshot.ScreenShot(options)
if 'portscan' in module:
# scanning port, service and vuln with masscan and nmap
portscan.PortScan(options)
if 'headers' in module:
headers.HeadersScan(options)
if 'asset' in module:
assetfinding.AssetFinding(options)
if 'vuln' in module:
# scanning vulnerable service based on version
vulnscan.VulnScan(options)
if 'dir' in module:
# run blind directory brute force directly
dirbrute.DirBrute(options)
if 'brute' in module or 'force' in module:
# running brute force things based on scanning result
brutethings.BruteThings(options)
if 'git' in module:
gitscan.GitScan(options)
# if 'burp' in module:
# burpstate.BurpState(options)
conclusion.Conclusion(options)
# just for debug purpose
def debug(options):
utils.print_good("Debug routine")
utils.print_good("Running with {0} speed".format(options['SPEED']))
# Create skeleton json
pprint(options)
initials.Initials(options)
# ##Finding subdomain
subdomain.SubdomainScanning(options)
# ####waiting for previous module
# utils.just_waiting(options, 'SubdomainScanning')
# recon.Recon(options)
# ###Screen shot the target on common service
screenshot.ScreenShot(options)
# ###Scanning for subdomain take over
# takeover.TakeOverScanning(options)
# ##Discovery IP space
# ipspace.IPSpace(options)
# # # ##Scanning for CorsScan
# cors.CorsScan(options)
# ### SSL Scan
# sslscan.SSLScan(options)
# ##Headers Scan
# headers.HeadersScan(options)
# ##### Note: From here the module gonna take really long time for scanning service and stuff like that
# utils.print_info('This gonna take a while')
# dirbrute.DirBrute(options)
# #Scanning all port using result from subdomain scanning and also checking vulnerable service based on version
# portscan.PortScan(options)
# # #Starting vulnerable scan
# vulnscan.VulnScan(options)
# # #Brute force service from port scan result
# # brutethings.BruteThings(options)
# conclusion.Conclusion(options)
| specific |
CardGrid.tsx | import { Typography } from '@mui/material'
import Grid from '@mui/material/Grid'
import CardItem from '@components/CardItem'
import Loader from '@components/Loader'
import { EntryType } from '@model/entryType' | interface CardGridProps {
results: EntryType[] | undefined
isLoading?: boolean
errorMessage?: string
}
function CardGrid({ results, isLoading, errorMessage }: CardGridProps): JSX.Element {
const retrievedMovies =
isLoading && !errorMessage ? (
<>
<Loader />
</>
) : errorMessage ? (
<Typography color="red">{errorMessage}</Typography>
) : !errorMessage ? (
<>
{results?.map((result, index) => (
<Grid item lg={3} sm={6} md={4} xs={12} key={`result-${index}`}>
<CardItem Title={result.Title} Poster={result.Poster} Year={result.Year} imdbID={result.imdbID} />
</Grid>
))}
</>
) : (
{ errorMessage }
)
return (
<Grid container spacing={3} sx={{ display: 'flex', justifyContent: 'center' }}>
{retrievedMovies}
</Grid>
)
}
export default CardGrid | |
validation.js | import React, { useEffect, useState } from "react";
import { useSelector, useDispatch } from "react-redux";
import { bindActionCreators } from "redux";
import { actionCreators } from "../../store/index";
const FormValidation = (profile, page) => {
//const dispatch = useDispatch();
const { updateErrorMessage } = bindActionCreators(actionCreators, useDispatch);
const { updateflag } = bindActionCreators(actionCreators, useDispatch);
//const [page, setPage] = useState(0);
//const profile = useSelector((state) => state.profile);
function | () {
var flag = true;
//console.log(profile.firstName);
//console.log(errorMessage);
if((page === 0) && ((profile.firstName == "") || (profile.website == "") || (profile.email=="") || (profile.schoolphone==""))) {
//console.log(errorMessage+ "inside");
flag = false;
var errorMessage = "Please fill up ";
if(profile.firstName == ""){
errorMessage = errorMessage+ "school name,";
}
if(profile.website == ""){
errorMessage = errorMessage+ "website, ";
}
if(profile.email == ""){
errorMessage = errorMessage+ "email, ";
}
if(profile.schoolphone == ""){
errorMessage = errorMessage+ "school phone ";
}
//alert(errorMessage);
//$('#div_element').load(errorMessage + ' #div_element');
updateErrorMessage(errorMessage);
//document.getElementById('div_element').innerHTML = errorMessage;
}
if((page === 1) && ((profile.address1 == "") || (profile.city == "") || (profile.state=="") || (profile.postalCode=="") || (profile.country==""))) {
//console.log(errorMessage+ "inside");
flag = false;
var errorMessage = "Please fill up ";
if(profile.address1 == ""){
errorMessage = errorMessage+ "address, ";
}
if(profile.city == ""){
errorMessage = errorMessage+ "city, ";
}
if(profile.state == ""){
errorMessage = errorMessage+ "state, ";
}
if(profile.postalCode == ""){
errorMessage = errorMessage+ "postal code, ";
}
if(profile.country == ""){
errorMessage = errorMessage+ "country";
}
updateErrorMessage(errorMessage);
//document.getElementById('div_element').innerHTML = errorMessage;
}
if((page === 2) && ((profile.schoolContact == "") || (profile.schoolContactEmail == "") || (profile.schoolContactPhone==""))) {
//console.log(errorMessage+ "inside");
flag = false;
var errorMessage = "Please fill up ";
if(profile.schoolContact == ""){
errorMessage = errorMessage+ "school contact person,";
}
if(profile.schoolContactEmail == ""){
errorMessage = errorMessage+ "school contact person email, ";
}
if(profile.schoolContactPhone == ""){
errorMessage = errorMessage+ "school contact person phone ";
}
updateErrorMessage(errorMessage);
//document.getElementById('div_element').innerHTML = errorMessage;
}
if((page === 3) && ((profile.schoolAccreditation == ""))) {
//console.log(errorMessage+ "inside");
flag = false;
var errorMessage = "Please fill up ";
if(profile.schoolAccreditation == ""){
errorMessage = errorMessage+ "school accreditation";
}
updateErrorMessage(errorMessage);
//document.getElementById('div_element').innerHTML = errorMessage;
}
updateflag(flag);
//return flag;
}
};
export default FormValidation;
| validation |
watcher.ts | // Implementation of a cache for arbitrary k8s custom resource in openebs.io
// api with v1alpha1 version.
import * as _ from 'lodash';
import {
CustomObjectsApi,
HttpError,
KubeConfig,
KubernetesObject,
KubernetesListObject,
ListWatch,
V1ListMeta,
Watch,
} from 'client-node-fixed-watcher';
const EventEmitter = require('events');
const log = require('./logger').Logger('watcher');
// If listWatch errors out then we restart it after this many msecs.
const RESTART_DELAY: number = 3000;
// We wait this many msecs for an event confirming operation done previously.
const EVENT_TIMEOUT: number = 5000;
const GROUP: string = 'openebs.io';
const VERSION: string = 'v1alpha1';
// Errors generated by api requests are hopelessly useless. We need to add
// a text from http body to them.
function bodyError(prefix: string, error: any): any {
if (error instanceof HttpError) {
error.message = prefix + ': ' + error.body.message;
} else {
error.message = prefix + ': ' + error.message;
}
return error;
}
// Commonly used metadata attributes.
export class CustomResourceMeta extends V1ListMeta {
name?: string;
namespace?: string;
generation?: number;
finalizers?: string[];
}
// Properties of custom resources (all optional so that we can do easy
// conversion from "object" type)
export class CustomResource implements KubernetesObject {
apiVersion?: string;
kind?: string;
metadata?: CustomResourceMeta;
spec?: object;
status?: any;
}
class TimeoutError extends Error {
constructor() {
super();
}
}
// Utility class for wrapping asynchronous operations that once done, need to be
// confirmed by something from outside (i.e. watcher event). If confirmation does
// not arrive on time, then end the operation regardless and let user know.
class ConfirmOp {
private id: string;
private timer: NodeJS.Timeout | null;
private timeout: number;
private since: number;
private confirmed: boolean;
private done: boolean;
private resolve?: () => void;
private reject?: (err: any) => void;
constructor(id: string, timeout: number) {
this.id = id;
this.timeout = timeout;
this.since = 0;
this.timer = null;
this.confirmed = false;
this.done = false;
}
run(action: () => Promise<void>): Promise<void> {
this.since = (new Date()).getTime();
if (this.timeout <= 0) {
this.confirmed = true;
}
return new Promise((resolve, reject) => {
this.resolve = resolve;
this.reject = reject;
action()
.then(() => {
this.done = true; | this.timer = setTimeout(() => {
const delta = (new Date()).getTime() - this.since;
log.warn(`Timed out waiting for watcher event on "${this.id}" (${delta}ms)`);
this.timer = null;
reject(new TimeoutError());
}, this.timeout);
} else {
this._complete();
}
})
.catch((err) => {
this.done = true;
this._complete(err);
});
});
}
// Beware that confirm can come before the operation done callback!
confirm() {
this.confirmed = true;
if (this.timeout > 0) {
this._complete();
}
}
_complete(err?: any) {
if (!err && (!this.confirmed || !this.done)) return;
const delta = (new Date()).getTime() - this.since;
log.trace(`The operation on "${this.id}" took ${delta}ms`);
if (this.timer) {
clearTimeout(this.timer);
}
if (err) {
this.reject!(err);
} else {
this.resolve!();
}
}
}
// Resource cache keeps track of a k8s custom resource and exposes methods
// for modifying the cache content.
//
// It is a classic operator loop design as seen in i.e. operator-sdk (golang)
// to watch a k8s resource. We utilize k8s client library to take care of low
// level details.
//
// It is a general implementation of watcher which can be used for any resource
// operator. The operator should subscribe to "new", "mod" and "del" events that
// are triggered when a resource is added, modified or deleted.
export class CustomResourceCache<T> extends EventEmitter {
name: string;
plural: string;
namespace: string;
waiting: Record<string, ConfirmOp>;
k8sApi: CustomObjectsApi;
listWatch: ListWatch<CustomResource>;
creator: new (obj: CustomResource) => T;
eventHandlers: Record<string, (obj: CustomResource) => void>;
connected: boolean;
restartDelay: number;
idleTimeout: number;
eventTimeout: number;
timer: any;
// Create the cache for given namespace and resource name.
//
// @param namespace Namespace of custom resource.
// @param name Name of the resource.
// @param kubeConfig Kube config object.
// @param creator Constructor of the object from custom resource object.
// @param opts Cache/watcher options.
constructor(
namespace: string,
name: string,
kubeConfig: KubeConfig,
creator: new (obj: CustomResource) => T,
opts?: {
restartDelay?: number,
eventTimeout?: number,
idleTimeout?: number
}
) {
super();
this.k8sApi = kubeConfig.makeApiClient(CustomObjectsApi);
this.name = name;
this.plural = name + 's';
this.namespace = namespace;
this.creator = creator;
this.waiting = {};
this.connected = false;
this.restartDelay = opts?.restartDelay || RESTART_DELAY;
this.eventTimeout = opts?.eventTimeout || EVENT_TIMEOUT;
this.idleTimeout = opts?.idleTimeout || 0;
this.eventHandlers = {
add: this._onEvent.bind(this, 'new'),
update: this._onEvent.bind(this, 'mod'),
delete: this._onEvent.bind(this, 'del'),
};
const watch = new Watch(kubeConfig);
this.listWatch = new ListWatch<CustomResource>(
`/apis/${GROUP}/${VERSION}/namespaces/${this.namespace}/${this.plural}`,
watch,
async () => {
var resp = await this.k8sApi.listNamespacedCustomObject(
GROUP,
VERSION,
this.namespace,
this.plural);
return {
response: resp.response,
body: resp.body as KubernetesListObject<CustomResource>,
};
},
false
);
}
// Clear idle/restart timer.
_clearTimer() {
if (this.timer) {
clearTimeout(this.timer);
this.timer = undefined;
}
}
// Install a timer that restarts watcher if idle for more than x seconds.
// On Azure AKS we have observed watcher connections that don't get any
// events after some time when idle.
_setIdleTimeout() {
if (this.idleTimeout > 0) {
this._clearTimer();
this.timer = setTimeout(() => {
this.stop();
this.start();
}, this.idleTimeout);
}
}
// Called upon a watcher event. It unblocks create or update operation if any
// is waiting for the event and propagates the event further.
_onEvent(event: string, cr: CustomResource) {
let name = cr.metadata?.name;
if (name === undefined) {
log.error(`Ignoring event ${event} with object without a name`);
return;
}
log.trace(`Received watcher event ${event} for ${this.name} "${name}"`);
this._setIdleTimeout();
let confirmOp = this.waiting[name];
if (confirmOp) {
confirmOp.confirm();
}
this._doWithObject(cr, (obj) => this.emit(event, obj));
}
// Convert custom resource object to desired object swallowing exceptions
// and call callback with the new object.
_doWithObject(obj: CustomResource | undefined, cb: (obj: T) => void): void {
if (obj === undefined) return;
try {
var newObj = new this.creator(obj);
} catch (e) {
log.error(`Ignoring invalid ${this.name} custom resource: ${e}`);
return;
}
cb(newObj);
}
// This method does not return until the cache is successfully populated.
// That means that the promise eventually always fulfills (resolves).
start(): Promise<void> {
this.listWatch.on('error', this._onError.bind(this));
for (let evName in this.eventHandlers) {
this.listWatch.on(evName, this.eventHandlers[evName]);
}
return this.listWatch.start()
.then(() => {
this.connected = true;
log.debug(`${this.name} watcher was started`);
log.trace(`Initial content of the "${this.name}" cache: ` +
this.listWatch.list().map((i: CustomResource) => i.metadata?.name));
this._setIdleTimeout();
})
.catch((err) => {
log.error(`Failed to start ${this.name} watcher: ${err}`)
this.stop();
log.info(`Restart ${this.name} watcher after ${this.restartDelay}ms...`);
return new Promise((resolve, reject) => {
this.timer = setTimeout(() => {
this.start().then(resolve, reject);
}, this.restartDelay);
});
});
}
// Called when the connection breaks.
_onError(err: any) {
log.error(`Watcher error: ${err}`);
this.stop();
log.info(`Restarting ${this.name} watcher after ${this.restartDelay}ms...`);
this.timer = setTimeout(() => this.start(), this.restartDelay);
}
// Deregister all internal event handlers on the watcher.
stop() {
this._clearTimer();
this.connected = false;
log.debug(`Deregistering "${this.name}" cache event handlers`);
this.listWatch.off('error', this._onError);
for (let evName in this.eventHandlers) {
this.listWatch.off(evName, this.eventHandlers[evName]);
}
this.listWatch.stop();
}
isConnected(): boolean {
// should we propagate event to consumers about the reset?
return this.connected;
}
// Get all objects from the cache.
list(): T[] {
let list: T[] = [];
this.listWatch.list().forEach((item) => {
this._doWithObject(item, (obj) => list.push(obj));
});
return list;
}
// Get object with given name (ID).
get(name: string): T | undefined {
var result;
this._doWithObject(this.listWatch.get(name), (obj) => result = obj);
return result;
}
// Execute the action and do not return until we receive an event from watcher.
// Otherwise the object in the cache might be stale when we do the next
// modification to it. Set timeout for the case when we never receive the
// event and restart the watcher to get fresh content in that case.
async _waitForEvent(name: string, action: () => Promise<void>) {
this.waiting[name] = new ConfirmOp(name, this.eventTimeout);
try {
await this.waiting[name].run(action);
} catch (err) {
delete this.waiting[name];
if (err instanceof TimeoutError) {
// restart the cache
this.stop();
await this.start();
} else {
throw err;
}
}
}
// Create the resource and wait for it to be created.
async create(obj: CustomResource) {
let name: string = obj.metadata?.name || '';
if (!name) {
throw Error("Object does not have a name");
}
log.trace(`Creating new "${this.name}" resource: ${JSON.stringify(obj)}`);
await this._waitForEvent(
name,
async () => {
try {
await this.k8sApi.createNamespacedCustomObject(
GROUP,
VERSION,
this.namespace,
this.plural,
obj
);
} catch (err) {
throw bodyError(`Delete of ${this.name} "${name}" failed`, err);
}
}
);
}
// Update the resource. The merge callback takes the original version from
// the cache, modifies it and returns the new version of object. The reason
// for this is that sometimes we get stale errors and we must repeat
// the operation with an updated version of the original object.
async update(name: string, merge: (orig: T) => CustomResource | undefined) {
await this._update(name, () => {
let orig = this.get(name);
if (orig === undefined) {
log.warn(`Tried to update ${this.name} "${name}" that does not exist`);
return;
}
return merge(orig);
});
}
// Same as above but works with custom resource type rather than user
// defined object.
async _updateCustomResource(name: string, merge: (orig: CustomResource) => CustomResource | undefined) {
await this._update(name, () => {
let orig = this.listWatch.get(name);
if (orig === undefined) {
log.warn(`Tried to update ${this.name} "${name}" that does not exist`);
return;
}
return merge(orig);
});
}
// Update the resource and wait for mod event. If update fails due to an error
// we restart the watcher and retry the operation. If event does not come,
// we restart the watcher.
async _update(
name: string,
getAndMerge: () => CustomResource | undefined,
) {
for (let retries = 1; retries >= 0; retries -= 1) {
let obj = getAndMerge();
if (obj === undefined) {
// likely means that the props are the same - nothing to do
return;
}
log.trace(`Updating ${this.name} "${name}": ${JSON.stringify(obj)}`);
try {
await this._waitForEvent(
name,
async () => {
await this.k8sApi.replaceNamespacedCustomObject(
GROUP,
VERSION,
this.namespace,
this.plural,
name,
obj!
);
}
);
break;
} catch (err) {
err = bodyError(`Update of ${this.name} "${name}" failed`, err);
if (retries == 0) {
throw err;
}
log.warn(`${err} (retrying ...)`);
this.stop();
await this.start();
}
}
}
// Update status of the resource. Unlike in case create/update we don't have
// to wait for confirming event because generation number is not incremented
// upon status change.
async updateStatus(name: string, merge: (orig: T) => CustomResource | undefined) {
for (let retries = 1; retries >= 0; retries -= 1) {
let orig = this.get(name);
if (orig === undefined) {
log.warn(`Tried to update status of ${this.name} "${name}" but it is gone`);
return;
}
let obj = merge(orig);
if (obj === undefined) {
// likely means that the props are the same - nothing to do
return;
}
log.trace(`Updating status of ${this.name} "${name}": ${JSON.stringify(obj.status)}`);
try {
await this._waitForEvent(
name,
async () => {
await this.k8sApi.replaceNamespacedCustomObjectStatus(
GROUP,
VERSION,
this.namespace,
this.plural,
name,
obj!
);
}
);
break;
} catch (err) {
err = bodyError(`Status update of ${this.name} "${name}" failed`, err);
if (retries == 0) {
throw err;
}
log.warn(`${err} (retrying ...)`);
this.stop();
await this.start();
}
}
}
// Delete the resource.
async delete(name: string) {
let orig = this.get(name);
if (orig === undefined) {
log.warn(`Tried to delete ${this.name} "${name}" that does not exist`);
return new Promise((resolve) => resolve());
}
log.trace(`Deleting ${this.name} "${name}"`);
await this._waitForEvent(
name,
async () => {
try {
await this.k8sApi.deleteNamespacedCustomObject(
GROUP,
VERSION,
this.namespace,
this.plural,
name
);
} catch (err) {
throw bodyError(`Delete of ${this.name} "${name}" failed`, err);
}
}
);
}
// Add finalizer to given resource if not already there.
async addFinalizer(name: string, finalizer: string) {
await this._updateCustomResource(name, (orig) => {
let finalizers = orig.metadata?.finalizers;
let newFinalizers = finalizers || [];
if (newFinalizers.indexOf(finalizer) >= 0) {
// it's already there
return;
}
newFinalizers = [finalizer].concat(newFinalizers);
let obj = _.cloneDeep(orig);
if (obj.metadata === undefined) {
throw new Error(`Resource ${this.name} "${name}" without metadata`)
}
obj.metadata.finalizers = newFinalizers;
return obj;
});
}
// Remove finalizer from the resource in case it's there.
async removeFinalizer(name: string, finalizer: string) {
await this._updateCustomResource(name, (orig) => {
let finalizers = orig.metadata?.finalizers;
let newFinalizers = finalizers || [];
let idx = newFinalizers.indexOf(finalizer);
if (idx < 0) {
// it's not there
return;
}
newFinalizers.splice(idx, 1);
let obj = _.cloneDeep(orig);
if (obj.metadata === undefined) {
throw new Error(`Resource ${this.name} "${name}" without metadata`)
}
obj.metadata.finalizers = newFinalizers;
return obj;
});
}
} | if (!this.confirmed) { |
reducers.ts | import { IHomeState } from '../../../../interfaces/home';
import initialState from './initialState';
import {
IHomeActionTypes,
SET_ATTEMPTED_QUIZZES_TYPE,
SET_CREATED_QUIZZES_TYPE,
SET_ONGOING_QUIZZES_TYPE,
TOGGLE_QUIZ_TYPE
} from './types';
export default function reducer(
state = initialState,
action: IHomeActionTypes
): IHomeState {
switch (action.type) {
case TOGGLE_QUIZ_TYPE:
return {
...state,
quizTypeSelected: action.payload.quizTypeSelected
};
case SET_ATTEMPTED_QUIZZES_TYPE:
return {
...state,
attemptedQuizList: action.payload
};
case SET_CREATED_QUIZZES_TYPE:
return {
...state,
createdQuizList: action.payload
};
case SET_ONGOING_QUIZZES_TYPE:
return {
...state,
ongoingQuizList: action.payload
}; | default:
return state;
}
} | |
environmentTextureTools.ts | import { Nullable } from "../types";
import { Tools } from "./tools";
import { Vector3 } from "../Maths/math.vector";
import { Scalar } from "../Maths/math.scalar";
import { SphericalPolynomial } from "../Maths/sphericalPolynomial";
import { InternalTexture } from "../Materials/Textures/internalTexture";
import { BaseTexture } from "../Materials/Textures/baseTexture";
import { CubeTexture } from "../Materials/Textures/cubeTexture";
import { Constants } from "../Engines/constants";
import { Scene } from "../scene";
import { PostProcess } from "../PostProcesses/postProcess";
import { Logger } from "../Misc/logger";
import "../Engines/Extensions/engine.renderTarget";
import "../Shaders/rgbdEncode.fragment";
import "../Shaders/rgbdDecode.fragment";
/**
* Raw texture data and descriptor sufficient for WebGL texture upload
*/
export interface EnvironmentTextureInfo {
/**
* Version of the environment map
*/
version: number;
/**
* Width of image
*/
width: number;
/**
* Irradiance information stored in the file.
*/
irradiance: any;
/**
* Specular information stored in the file.
*/
specular: any;
}
/**
* Defines One Image in the file. It requires only the position in the file
* as well as the length.
*/
interface BufferImageData {
/**
* Length of the image data.
*/
length: number;
/**
* Position of the data from the null terminator delimiting the end of the JSON.
*/
position: number;
}
/**
* Defines the specular data enclosed in the file.
* This corresponds to the version 1 of the data.
*/
export interface EnvironmentTextureSpecularInfoV1 {
/**
* Defines where the specular Payload is located. It is a runtime value only not stored in the file.
*/
specularDataPosition?: number;
/**
* This contains all the images data needed to reconstruct the cubemap.
*/
mipmaps: Array<BufferImageData>;
/**
* Defines the scale applied to environment texture. This manages the range of LOD level used for IBL according to the roughness.
*/
lodGenerationScale: number;
}
/**
* Defines the required storage to save the environment irradiance information.
*/
interface EnvironmentTextureIrradianceInfoV1 {
x: Array<number>;
y: Array<number>;
z: Array<number>;
xx: Array<number>;
yy: Array<number>;
zz: Array<number>;
yz: Array<number>;
zx: Array<number>;
xy: Array<number>;
}
/**
* Sets of helpers addressing the serialization and deserialization of environment texture
* stored in a BabylonJS env file.
* Those files are usually stored as .env files.
*/
export class | {
/**
* Magic number identifying the env file.
*/
private static _MagicBytes = [0x86, 0x16, 0x87, 0x96, 0xf6, 0xd6, 0x96, 0x36];
/**
* Gets the environment info from an env file.
* @param data The array buffer containing the .env bytes.
* @returns the environment file info (the json header) if successfully parsed.
*/
public static GetEnvInfo(data: ArrayBuffer): Nullable<EnvironmentTextureInfo> {
let dataView = new DataView(data);
let pos = 0;
for (let i = 0; i < EnvironmentTextureTools._MagicBytes.length; i++) {
if (dataView.getUint8(pos++) !== EnvironmentTextureTools._MagicBytes[i]) {
Logger.Error('Not a babylon environment map');
return null;
}
}
// Read json manifest - collect characters up to null terminator
let manifestString = '';
let charCode = 0x00;
while ((charCode = dataView.getUint8(pos++))) {
manifestString += String.fromCharCode(charCode);
}
let manifest: EnvironmentTextureInfo = JSON.parse(manifestString);
if (manifest.specular) {
// Extend the header with the position of the payload.
manifest.specular.specularDataPosition = pos;
// Fallback to 0.8 exactly if lodGenerationScale is not defined for backward compatibility.
manifest.specular.lodGenerationScale = manifest.specular.lodGenerationScale || 0.8;
}
return manifest;
}
/**
* Creates an environment texture from a loaded cube texture.
* @param texture defines the cube texture to convert in env file
* @return a promise containing the environment data if succesfull.
*/
public static CreateEnvTextureAsync(texture: CubeTexture): Promise<ArrayBuffer> {
let internalTexture = texture.getInternalTexture();
if (!internalTexture) {
return Promise.reject("The cube texture is invalid.");
}
if (!texture._prefiltered) {
return Promise.reject("The cube texture is invalid (not prefiltered).");
}
let engine = internalTexture.getEngine();
if (engine && engine.premultipliedAlpha) {
return Promise.reject("Env texture can only be created when the engine is created with the premultipliedAlpha option set to false.");
}
if (texture.textureType === Constants.TEXTURETYPE_UNSIGNED_INT) {
return Promise.reject("The cube texture should allow HDR (Full Float or Half Float).");
}
let canvas = engine.getRenderingCanvas();
if (!canvas) {
return Promise.reject("Env texture can only be created when the engine is associated to a canvas.");
}
let textureType = Constants.TEXTURETYPE_FLOAT;
if (!engine.getCaps().textureFloatRender) {
textureType = Constants.TEXTURETYPE_HALF_FLOAT;
if (!engine.getCaps().textureHalfFloatRender) {
return Promise.reject("Env texture can only be created when the browser supports half float or full float rendering.");
}
}
let cubeWidth = internalTexture.width;
let hostingScene = new Scene(engine);
let specularTextures: { [key: number]: ArrayBuffer } = {};
let promises: Promise<void>[] = [];
// Read and collect all mipmaps data from the cube.
let mipmapsCount = Scalar.Log2(internalTexture.width);
mipmapsCount = Math.round(mipmapsCount);
for (let i = 0; i <= mipmapsCount; i++) {
let faceWidth = Math.pow(2, mipmapsCount - i);
// All faces of the cube.
for (let face = 0; face < 6; face++) {
let data = texture.readPixels(face, i);
// Creates a temp texture with the face data.
let tempTexture = engine.createRawTexture(data, faceWidth, faceWidth, Constants.TEXTUREFORMAT_RGBA, false, false, Constants.TEXTURE_NEAREST_SAMPLINGMODE, null, textureType);
// And rgbdEncode them.
let promise = new Promise<void>((resolve, reject) => {
let rgbdPostProcess = new PostProcess("rgbdEncode", "rgbdEncode", null, null, 1, null, Constants.TEXTURE_NEAREST_SAMPLINGMODE, engine, false, undefined, Constants.TEXTURETYPE_UNSIGNED_INT, undefined, null, false);
rgbdPostProcess.getEffect().executeWhenCompiled(() => {
rgbdPostProcess.onApply = (effect) => {
effect._bindTexture("textureSampler", tempTexture);
};
// As the process needs to happen on the main canvas, keep track of the current size
let currentW = engine.getRenderWidth();
let currentH = engine.getRenderHeight();
// Set the desired size for the texture
engine.setSize(faceWidth, faceWidth);
hostingScene.postProcessManager.directRender([rgbdPostProcess], null);
// Reading datas from WebGL
Tools.ToBlob(canvas!, (blob) => {
let fileReader = new FileReader();
fileReader.onload = (event: any) => {
let arrayBuffer = event.target!.result as ArrayBuffer;
specularTextures[i * 6 + face] = arrayBuffer;
resolve();
};
fileReader.readAsArrayBuffer(blob!);
});
// Reapply the previous canvas size
engine.setSize(currentW, currentH);
});
});
promises.push(promise);
}
}
// Once all the textures haves been collected as RGBD stored in PNGs
return Promise.all(promises).then(() => {
// We can delete the hosting scene keeping track of all the creation objects
hostingScene.dispose();
// Creates the json header for the env texture
let info: EnvironmentTextureInfo = {
version: 1,
width: cubeWidth,
irradiance: this._CreateEnvTextureIrradiance(texture),
specular: {
mipmaps: [],
lodGenerationScale: texture.lodGenerationScale
}
};
// Sets the specular image data information
let position = 0;
for (let i = 0; i <= mipmapsCount; i++) {
for (let face = 0; face < 6; face++) {
let byteLength = specularTextures[i * 6 + face].byteLength;
info.specular.mipmaps.push({
length: byteLength,
position: position
});
position += byteLength;
}
}
// Encode the JSON as an array buffer
let infoString = JSON.stringify(info);
let infoBuffer = new ArrayBuffer(infoString.length + 1);
let infoView = new Uint8Array(infoBuffer); // Limited to ascii subset matching unicode.
for (let i = 0, strLen = infoString.length; i < strLen; i++) {
infoView[i] = infoString.charCodeAt(i);
}
// Ends up with a null terminator for easier parsing
infoView[infoString.length] = 0x00;
// Computes the final required size and creates the storage
let totalSize = EnvironmentTextureTools._MagicBytes.length + position + infoBuffer.byteLength;
let finalBuffer = new ArrayBuffer(totalSize);
let finalBufferView = new Uint8Array(finalBuffer);
let dataView = new DataView(finalBuffer);
// Copy the magic bytes identifying the file in
let pos = 0;
for (let i = 0; i < EnvironmentTextureTools._MagicBytes.length; i++) {
dataView.setUint8(pos++, EnvironmentTextureTools._MagicBytes[i]);
}
// Add the json info
finalBufferView.set(new Uint8Array(infoBuffer), pos);
pos += infoBuffer.byteLength;
// Finally inserts the texture data
for (let i = 0; i <= mipmapsCount; i++) {
for (let face = 0; face < 6; face++) {
let dataBuffer = specularTextures[i * 6 + face];
finalBufferView.set(new Uint8Array(dataBuffer), pos);
pos += dataBuffer.byteLength;
}
}
// Voila
return finalBuffer;
});
}
/**
* Creates a JSON representation of the spherical data.
* @param texture defines the texture containing the polynomials
* @return the JSON representation of the spherical info
*/
private static _CreateEnvTextureIrradiance(texture: CubeTexture): Nullable<EnvironmentTextureIrradianceInfoV1> {
let polynmials = texture.sphericalPolynomial;
if (polynmials == null) {
return null;
}
return {
x: [polynmials.x.x, polynmials.x.y, polynmials.x.z],
y: [polynmials.y.x, polynmials.y.y, polynmials.y.z],
z: [polynmials.z.x, polynmials.z.y, polynmials.z.z],
xx: [polynmials.xx.x, polynmials.xx.y, polynmials.xx.z],
yy: [polynmials.yy.x, polynmials.yy.y, polynmials.yy.z],
zz: [polynmials.zz.x, polynmials.zz.y, polynmials.zz.z],
yz: [polynmials.yz.x, polynmials.yz.y, polynmials.yz.z],
zx: [polynmials.zx.x, polynmials.zx.y, polynmials.zx.z],
xy: [polynmials.xy.x, polynmials.xy.y, polynmials.xy.z]
} as any;
}
/**
* Creates the ArrayBufferViews used for initializing environment texture image data.
* @param arrayBuffer the underlying ArrayBuffer to which the views refer
* @param info parameters that determine what views will be created for accessing the underlying buffer
* @return the views described by info providing access to the underlying buffer
*/
public static CreateImageDataArrayBufferViews(arrayBuffer: any, info: EnvironmentTextureInfo): Array<Array<ArrayBufferView>> {
if (info.version !== 1) {
throw new Error(`Unsupported babylon environment map version "${info.version}"`);
}
const specularInfo = info.specular as EnvironmentTextureSpecularInfoV1;
// Double checks the enclosed info
let mipmapsCount = Scalar.Log2(info.width);
mipmapsCount = Math.round(mipmapsCount) + 1;
if (specularInfo.mipmaps.length !== 6 * mipmapsCount) {
throw new Error(`Unsupported specular mipmaps number "${specularInfo.mipmaps.length}"`);
}
const imageData = new Array<Array<ArrayBufferView>>(mipmapsCount);
for (let i = 0; i < mipmapsCount; i++) {
imageData[i] = new Array<ArrayBufferView>(6);
for (let face = 0; face < 6; face++) {
const imageInfo = specularInfo.mipmaps[i * 6 + face];
imageData[i][face] = new Uint8Array(arrayBuffer, specularInfo.specularDataPosition! + imageInfo.position, imageInfo.length);
}
}
return imageData;
}
/**
* Uploads the texture info contained in the env file to the GPU.
* @param texture defines the internal texture to upload to
* @param arrayBuffer defines the buffer cotaining the data to load
* @param info defines the texture info retrieved through the GetEnvInfo method
* @returns a promise
*/
public static UploadEnvLevelsAsync(texture: InternalTexture, arrayBuffer: any, info: EnvironmentTextureInfo): Promise<void> {
if (info.version !== 1) {
throw new Error(`Unsupported babylon environment map version "${info.version}"`);
}
const specularInfo = info.specular as EnvironmentTextureSpecularInfoV1;
if (!specularInfo) {
// Nothing else parsed so far
return Promise.resolve();
}
texture._lodGenerationScale = specularInfo.lodGenerationScale;
const imageData = EnvironmentTextureTools.CreateImageDataArrayBufferViews(arrayBuffer, info);
return EnvironmentTextureTools.UploadLevelsAsync(texture, imageData);
}
/**
* Uploads the levels of image data to the GPU.
* @param texture defines the internal texture to upload to
* @param imageData defines the array buffer views of image data [mipmap][face]
* @returns a promise
*/
public static UploadLevelsAsync(texture: InternalTexture, imageData: ArrayBufferView[][]): Promise<void> {
if (!Tools.IsExponentOfTwo(texture.width)) {
throw new Error("Texture size must be a power of two");
}
const mipmapsCount = Math.round(Scalar.Log2(texture.width)) + 1;
// Gets everything ready.
let engine = texture.getEngine();
let expandTexture = false;
let generateNonLODTextures = false;
let rgbdPostProcess: Nullable<PostProcess> = null;
let cubeRtt: Nullable<InternalTexture> = null;
let lodTextures: Nullable<{ [lod: number]: BaseTexture }> = null;
let caps = engine.getCaps();
texture.format = Constants.TEXTUREFORMAT_RGBA;
texture.type = Constants.TEXTURETYPE_UNSIGNED_INT;
texture.generateMipMaps = true;
engine.updateTextureSamplingMode(Constants.TEXTURE_TRILINEAR_SAMPLINGMODE, texture);
// Add extra process if texture lod is not supported
if (!caps.textureLOD) {
expandTexture = false;
generateNonLODTextures = true;
lodTextures = {};
}
// in webgl 1 there are no ways to either render or copy lod level information for float textures.
else if (engine.webGLVersion < 2) {
expandTexture = false;
}
// If half float available we can uncompress the texture
else if (caps.textureHalfFloatRender && caps.textureHalfFloatLinearFiltering) {
expandTexture = true;
texture.type = Constants.TEXTURETYPE_HALF_FLOAT;
}
// If full float available we can uncompress the texture
else if (caps.textureFloatRender && caps.textureFloatLinearFiltering) {
expandTexture = true;
texture.type = Constants.TEXTURETYPE_FLOAT;
}
// Expand the texture if possible
if (expandTexture) {
// Simply run through the decode PP
rgbdPostProcess = new PostProcess("rgbdDecode", "rgbdDecode", null, null, 1, null, Constants.TEXTURE_TRILINEAR_SAMPLINGMODE, engine, false, undefined, texture.type, undefined, null, false);
texture._isRGBD = false;
texture.invertY = false;
cubeRtt = engine.createRenderTargetCubeTexture(texture.width, {
generateDepthBuffer: false,
generateMipMaps: true,
generateStencilBuffer: false,
samplingMode: Constants.TEXTURE_TRILINEAR_SAMPLINGMODE,
type: texture.type,
format: Constants.TEXTUREFORMAT_RGBA
});
}
else {
texture._isRGBD = true;
texture.invertY = true;
// In case of missing support, applies the same patch than DDS files.
if (generateNonLODTextures) {
let mipSlices = 3;
let scale = texture._lodGenerationScale;
let offset = texture._lodGenerationOffset;
for (let i = 0; i < mipSlices; i++) {
//compute LOD from even spacing in smoothness (matching shader calculation)
let smoothness = i / (mipSlices - 1);
let roughness = 1 - smoothness;
let minLODIndex = offset; // roughness = 0
let maxLODIndex = (mipmapsCount - 1) * scale + offset; // roughness = 1 (mipmaps start from 0)
let lodIndex = minLODIndex + (maxLODIndex - minLODIndex) * roughness;
let mipmapIndex = Math.round(Math.min(Math.max(lodIndex, 0), maxLODIndex));
let glTextureFromLod = new InternalTexture(engine, InternalTexture.DATASOURCE_TEMP);
glTextureFromLod.isCube = true;
glTextureFromLod.invertY = true;
glTextureFromLod.generateMipMaps = false;
engine.updateTextureSamplingMode(Constants.TEXTURE_LINEAR_LINEAR, glTextureFromLod);
// Wrap in a base texture for easy binding.
let lodTexture = new BaseTexture(null);
lodTexture.isCube = true;
lodTexture._texture = glTextureFromLod;
lodTextures![mipmapIndex] = lodTexture;
switch (i) {
case 0:
texture._lodTextureLow = lodTexture;
break;
case 1:
texture._lodTextureMid = lodTexture;
break;
case 2:
texture._lodTextureHigh = lodTexture;
break;
}
}
}
}
let promises: Promise<void>[] = [];
// All mipmaps up to provided number of images
for (let i = 0; i < imageData.length; i++) {
// All faces
for (let face = 0; face < 6; face++) {
// Constructs an image element from image data
let bytes = imageData[i][face];
let blob = new Blob([bytes], { type: 'image/png' });
let url = URL.createObjectURL(blob);
let image = new Image();
image.src = url;
// Enqueue promise to upload to the texture.
let promise = new Promise<void>((resolve, reject) => {
image.onload = () => {
if (expandTexture) {
let tempTexture = engine.createTexture(null, true, true, null, Constants.TEXTURE_NEAREST_SAMPLINGMODE, null,
(message) => {
reject(message);
},
image);
rgbdPostProcess!.getEffect().executeWhenCompiled(() => {
// Uncompress the data to a RTT
rgbdPostProcess!.onApply = (effect) => {
effect._bindTexture("textureSampler", tempTexture);
effect.setFloat2("scale", 1, 1);
};
engine.scenes[0].postProcessManager.directRender([rgbdPostProcess!], cubeRtt, true, face, i);
// Cleanup
engine.restoreDefaultFramebuffer();
tempTexture.dispose();
window.URL.revokeObjectURL(url);
resolve();
});
}
else {
engine._uploadImageToTexture(texture, image, face, i);
// Upload the face to the non lod texture support
if (generateNonLODTextures) {
let lodTexture = lodTextures![i];
if (lodTexture) {
engine._uploadImageToTexture(lodTexture._texture!, image, face, 0);
}
}
resolve();
}
};
image.onerror = (error) => {
reject(error);
};
});
promises.push(promise);
}
}
// Fill remaining mipmaps with black textures.
if (imageData.length < mipmapsCount) {
let data: ArrayBufferView;
const size = Math.pow(2, mipmapsCount - 1 - imageData.length);
const dataLength = size * size * 4;
switch (texture.type) {
case Constants.TEXTURETYPE_UNSIGNED_INT: {
data = new Uint8Array(dataLength);
break;
}
case Constants.TEXTURETYPE_HALF_FLOAT: {
data = new Uint16Array(dataLength);
break;
}
case Constants.TEXTURETYPE_FLOAT: {
data = new Float32Array(dataLength);
break;
}
}
for (let i = imageData.length; i < mipmapsCount; i++) {
for (let face = 0; face < 6; face++) {
engine._uploadArrayBufferViewToTexture(texture, data!, face, i);
}
}
}
// Once all done, finishes the cleanup and return
return Promise.all(promises).then(() => {
// Release temp RTT.
if (cubeRtt) {
engine._releaseFramebufferObjects(cubeRtt);
engine._releaseTexture(texture);
cubeRtt._swapAndDie(texture);
}
// Release temp Post Process.
if (rgbdPostProcess) {
rgbdPostProcess.dispose();
}
// Flag internal texture as ready in case they are in use.
if (generateNonLODTextures) {
if (texture._lodTextureHigh && texture._lodTextureHigh._texture) {
texture._lodTextureHigh._texture.isReady = true;
}
if (texture._lodTextureMid && texture._lodTextureMid._texture) {
texture._lodTextureMid._texture.isReady = true;
}
if (texture._lodTextureLow && texture._lodTextureLow._texture) {
texture._lodTextureLow._texture.isReady = true;
}
}
});
}
/**
* Uploads spherical polynomials information to the texture.
* @param texture defines the texture we are trying to upload the information to
* @param info defines the environment texture info retrieved through the GetEnvInfo method
*/
public static UploadEnvSpherical(texture: InternalTexture, info: EnvironmentTextureInfo): void {
if (info.version !== 1) {
Logger.Warn('Unsupported babylon environment map version "' + info.version + '"');
}
let irradianceInfo = info.irradiance as EnvironmentTextureIrradianceInfoV1;
if (!irradianceInfo) {
return;
}
const sp = new SphericalPolynomial();
Vector3.FromArrayToRef(irradianceInfo.x, 0, sp.x);
Vector3.FromArrayToRef(irradianceInfo.y, 0, sp.y);
Vector3.FromArrayToRef(irradianceInfo.z, 0, sp.z);
Vector3.FromArrayToRef(irradianceInfo.xx, 0, sp.xx);
Vector3.FromArrayToRef(irradianceInfo.yy, 0, sp.yy);
Vector3.FromArrayToRef(irradianceInfo.zz, 0, sp.zz);
Vector3.FromArrayToRef(irradianceInfo.yz, 0, sp.yz);
Vector3.FromArrayToRef(irradianceInfo.zx, 0, sp.zx);
Vector3.FromArrayToRef(irradianceInfo.xy, 0, sp.xy);
texture._sphericalPolynomial = sp;
}
/** @hidden */
public static _UpdateRGBDAsync(internalTexture: InternalTexture, data: ArrayBufferView[][], sphericalPolynomial: Nullable<SphericalPolynomial>, lodScale: number, lodOffset: number): Promise<void> {
internalTexture._dataSource = InternalTexture.DATASOURCE_CUBERAW_RGBD;
internalTexture._bufferViewArrayArray = data;
internalTexture._lodGenerationScale = lodScale;
internalTexture._lodGenerationOffset = lodOffset;
internalTexture._sphericalPolynomial = sphericalPolynomial;
return EnvironmentTextureTools.UploadLevelsAsync(internalTexture, data).then(() => {
internalTexture.isReady = true;
});
}
}
// References the dependencies.
InternalTexture._UpdateRGBDAsync = EnvironmentTextureTools._UpdateRGBDAsync;
| EnvironmentTextureTools |
train.py | from Recommender_System.algorithm.NeuMF.model import NeuMF_model
from Recommender_System.algorithm.train import train, test
import tensorflow as tf
def train_with_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):
neumf_model, gmf_model, mlp_model = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)
print('预训练GMF部分')
train(gmf_model, train_data, test_data, topk_data, epochs=10, batch=512)
print('预训练MLP部分')
train(mlp_model, train_data, test_data, topk_data, epochs=10, batch=512)
out_kernel = tf.concat((gmf_model.get_layer('gmf_out').get_weights()[0], mlp_model.get_layer('mlp_out').get_weights()[0]), 0)
out_bias = gmf_model.get_layer('gmf_out').get_weights()[1] + mlp_model.get_layer('mlp_out').get_weights()[1]
neumf_model.get_layer('out').set_weights([out_kernel * 0.5, out_bias * 0.5])
test(neumf_model, train_data, test_data, topk_data, batch=512)
train(neumf_model, train_data, test_data, topk_data, optimizer=tf.keras.optimizers.SGD(0.0001), epochs=10, batch=512)
def train_without_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):
neumf_model, _, _ = | NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)
train(neumf_model, train_data, test_data, topk_data, epochs=10, batch=512)
|
|
selectivity.go | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"math"
"math/bits"
"sort"
"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/expression"
planutil "github.com/pingcap/tidb/planner/util"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"go.uber.org/zap"
)
// If one condition can't be calculated, we will assume that the selectivity of this condition is 0.8.
const selectionFactor = 0.8
// StatsNode is used for calculating selectivity.
type StatsNode struct {
Tp int
ID int64
// mask is a bit pattern whose ith bit will indicate whether the ith expression is covered by this index/column.
mask int64
// Ranges contains all the Ranges we got.
Ranges []*ranger.Range
// Selectivity indicates the Selectivity of this column/index.
Selectivity float64
// numCols is the number of columns contained in the index or column(which is always 1).
numCols int
// partCover indicates whether the bit in the mask is for a full cover or partial cover. It is only true
// when the condition is a DNF expression on index, and the expression is not totally extracted as access condition.
partCover bool
}
// The type of the StatsNode.
const (
IndexType = iota
PkType
ColType
)
func compareType(l, r int) int {
if l == r {
return 0
}
if l == ColType {
return -1
}
if l == PkType {
return 1
}
if r == ColType {
return 1
}
return -1
}
// MockStatsNode is only used for test.
func MockStatsNode(id int64, m int64, num int) *StatsNode {
return &StatsNode{ID: id, mask: m, numCols: num}
}
const unknownColumnID = math.MinInt64
// getConstantColumnID receives two expressions and if one of them is column and another is constant, it returns the
// ID of the column.
func | (e []expression.Expression) int64 {
if len(e) != 2 {
return unknownColumnID
}
col, ok1 := e[0].(*expression.Column)
_, ok2 := e[1].(*expression.Constant)
if ok1 && ok2 {
return col.ID
}
col, ok1 = e[1].(*expression.Column)
_, ok2 = e[0].(*expression.Constant)
if ok1 && ok2 {
return col.ID
}
return unknownColumnID
}
func pseudoSelectivity(coll *HistColl, exprs []expression.Expression) float64 {
minFactor := selectionFactor
colExists := make(map[string]bool)
for _, expr := range exprs {
fun, ok := expr.(*expression.ScalarFunction)
if !ok {
continue
}
colID := getConstantColumnID(fun.GetArgs())
if colID == unknownColumnID {
continue
}
switch fun.FuncName.L {
case ast.EQ, ast.NullEQ, ast.In:
minFactor = math.Min(minFactor, 1.0/pseudoEqualRate)
col, ok := coll.Columns[colID]
if !ok {
continue
}
colExists[col.Info.Name.L] = true
if mysql.HasUniKeyFlag(col.Info.Flag) {
return 1.0 / float64(coll.Count)
}
case ast.GE, ast.GT, ast.LE, ast.LT:
minFactor = math.Min(minFactor, 1.0/pseudoLessRate)
// FIXME: To resolve the between case.
}
}
if len(colExists) == 0 {
return minFactor
}
// use the unique key info
for _, idx := range coll.Indices {
if !idx.Info.Unique {
continue
}
unique := true
for _, col := range idx.Info.Columns {
if !colExists[col.Name.L] {
unique = false
break
}
}
if unique {
return 1.0 / float64(coll.Count)
}
}
return minFactor
}
// isColEqCorCol checks if the expression is a eq function that one side is correlated column and another is column.
// If so, it will return the column's reference. Otherwise return nil instead.
func isColEqCorCol(filter expression.Expression) *expression.Column {
f, ok := filter.(*expression.ScalarFunction)
if !ok || f.FuncName.L != ast.EQ {
return nil
}
if c, ok := f.GetArgs()[0].(*expression.Column); ok {
if _, ok := f.GetArgs()[1].(*expression.CorrelatedColumn); ok {
return c
}
}
if c, ok := f.GetArgs()[1].(*expression.Column); ok {
if _, ok := f.GetArgs()[0].(*expression.CorrelatedColumn); ok {
return c
}
}
return nil
}
// Selectivity is a function calculate the selectivity of the expressions.
// The definition of selectivity is (row count after filter / row count before filter).
// And exprs must be CNF now, in other words, `exprs[0] and exprs[1] and ... and exprs[len - 1]` should be held when you call this.
// Currently the time complexity is o(n^2).
func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Expression, filledPaths []*planutil.AccessPath) (float64, []*StatsNode, error) {
// If table's count is zero or conditions are empty, we should return 100% selectivity.
if coll.Count == 0 || len(exprs) == 0 {
return 1, nil, nil
}
// TODO: If len(exprs) is bigger than 63, we could use bitset structure to replace the int64.
// This will simplify some code and speed up if we use this rather than a boolean slice.
if len(exprs) > 63 || (len(coll.Columns) == 0 && len(coll.Indices) == 0) {
return pseudoSelectivity(coll, exprs), nil, nil
}
ret := 1.0
var nodes []*StatsNode
sc := ctx.GetSessionVars().StmtCtx
remainedExprs := make([]expression.Expression, 0, len(exprs))
// Deal with the correlated column.
for _, expr := range exprs {
c := isColEqCorCol(expr)
if c == nil {
remainedExprs = append(remainedExprs, expr)
continue
}
if colHist := coll.Columns[c.UniqueID]; colHist == nil || colHist.IsInvalid(sc, coll.Pseudo) {
ret *= 1.0 / pseudoEqualRate
continue
}
colHist := coll.Columns[c.UniqueID]
if colHist.Histogram.NDV > 0 {
ret *= 1 / float64(colHist.Histogram.NDV)
} else {
ret *= 1.0 / pseudoEqualRate
}
}
extractedCols := make([]*expression.Column, 0, len(coll.Columns))
extractedCols = expression.ExtractColumnsFromExpressions(extractedCols, remainedExprs, nil)
for id, colInfo := range coll.Columns {
col := expression.ColInfo2Col(extractedCols, colInfo.Info)
if col != nil {
maskCovered, ranges, _, err := getMaskAndRanges(ctx, remainedExprs, ranger.ColumnRangeType, nil, nil, col)
if err != nil {
return 0, nil, errors.Trace(err)
}
nodes = append(nodes, &StatsNode{Tp: ColType, ID: id, mask: maskCovered, Ranges: ranges, numCols: 1})
if colInfo.IsHandle {
nodes[len(nodes)-1].Tp = PkType
var cnt float64
cnt, err = coll.GetRowCountByIntColumnRanges(sc, id, ranges)
if err != nil {
return 0, nil, errors.Trace(err)
}
nodes[len(nodes)-1].Selectivity = cnt / float64(coll.Count)
continue
}
cnt, err := coll.GetRowCountByColumnRanges(sc, id, ranges)
if err != nil {
return 0, nil, errors.Trace(err)
}
nodes[len(nodes)-1].Selectivity = cnt / float64(coll.Count)
}
}
id2Paths := make(map[int64]*planutil.AccessPath)
for _, path := range filledPaths {
// Index merge path and table path don't have index.
if path.Index == nil {
continue
}
id2Paths[path.Index.ID] = path
}
for id, idxInfo := range coll.Indices {
idxCols := FindPrefixOfIndexByCol(extractedCols, coll.Idx2ColumnIDs[id], id2Paths[idxInfo.ID])
if len(idxCols) > 0 {
lengths := make([]int, 0, len(idxCols))
for i := 0; i < len(idxCols) && i < len(idxInfo.Info.Columns); i++ {
lengths = append(lengths, idxInfo.Info.Columns[i].Length)
}
// If the found columns are more than the columns held by the index. We are appending the int pk to the tail of it.
// When storing index data to key-value store, we use (idx_col1, ...., idx_coln, handle_col) as its key.
if len(idxCols) > len(idxInfo.Info.Columns) {
lengths = append(lengths, types.UnspecifiedLength)
}
maskCovered, ranges, partCover, err := getMaskAndRanges(ctx, remainedExprs, ranger.IndexRangeType, lengths, id2Paths[idxInfo.ID], idxCols...)
if err != nil {
return 0, nil, errors.Trace(err)
}
cnt, err := coll.GetRowCountByIndexRanges(sc, id, ranges)
if err != nil {
return 0, nil, errors.Trace(err)
}
selectivity := cnt / float64(coll.Count)
nodes = append(nodes, &StatsNode{
Tp: IndexType,
ID: id,
mask: maskCovered,
Ranges: ranges,
numCols: len(idxInfo.Info.Columns),
Selectivity: selectivity,
partCover: partCover,
})
}
}
usedSets := GetUsableSetsByGreedy(nodes)
// Initialize the mask with the full set.
mask := (int64(1) << uint(len(remainedExprs))) - 1
for _, set := range usedSets {
mask &^= set.mask
ret *= set.Selectivity
// If `partCover` is true, it means that the conditions are in DNF form, and only part
// of the DNF expressions are extracted as access conditions, so besides from the selectivity
// of the extracted access conditions, we multiply another selectionFactor for the residual
// conditions.
if set.partCover {
ret *= selectionFactor
}
}
// Now we try to cover those still not covered DNF conditions using independence assumption,
// i.e., sel(condA or condB) = sel(condA) + sel(condB) - sel(condA) * sel(condB)
if mask > 0 {
OUTER:
for i, expr := range remainedExprs {
if mask&(1<<uint64(i)) == 0 {
continue
}
scalarCond, ok := expr.(*expression.ScalarFunction)
// Make sure we only handle DNF condition.
if !ok || scalarCond.FuncName.L != ast.LogicOr {
continue
}
// If there're columns not in stats, we won't handle them. This case might happen after DDL operations.
cols := expression.ExtractColumns(scalarCond)
for i := range cols {
if _, ok := coll.Columns[cols[i].UniqueID]; !ok {
continue OUTER
}
}
dnfItems := expression.FlattenDNFConditions(scalarCond)
dnfItems = ranger.MergeDNFItems4Col(ctx, dnfItems)
// If the conditions only contain a single column, we won't handle them.
if len(dnfItems) <= 1 {
continue
}
selectivity := 0.0
for _, cond := range dnfItems {
// In selectivity calculation, we don't handle CorrelatedColumn, so we directly skip over it.
// Other kinds of `Expression`, i.e., Constant, Column and ScalarFunction all can possibly be built into
// ranges and used to calculation selectivity, so we accept them all.
_, ok := cond.(*expression.CorrelatedColumn)
if ok {
continue
}
var cnfItems []expression.Expression
if scalar, ok := cond.(*expression.ScalarFunction); ok && scalar.FuncName.L == ast.LogicAnd {
cnfItems = expression.FlattenCNFConditions(scalar)
} else {
cnfItems = append(cnfItems, cond)
}
curSelectivity, _, err := coll.Selectivity(ctx, cnfItems, nil)
if err != nil {
logutil.BgLogger().Debug("something wrong happened, use the default selectivity", zap.Error(err))
selectivity = selectionFactor
}
selectivity = selectivity + curSelectivity - selectivity*curSelectivity
}
if selectivity != 0 {
ret *= selectivity
mask &^= 1 << uint64(i)
}
}
}
// If there's still conditions which cannot be calculated, we will multiply a selectionFactor.
if mask > 0 {
ret *= selectionFactor
}
return ret, nodes, nil
}
func getMaskAndRanges(ctx sessionctx.Context, exprs []expression.Expression, rangeType ranger.RangeType, lengths []int, cachedPath *planutil.AccessPath, cols ...*expression.Column) (mask int64, ranges []*ranger.Range, partCover bool, err error) {
sc := ctx.GetSessionVars().StmtCtx
isDNF := false
var accessConds, remainedConds []expression.Expression
switch rangeType {
case ranger.ColumnRangeType:
accessConds = ranger.ExtractAccessConditionsForColumn(exprs, cols[0])
ranges, err = ranger.BuildColumnRange(accessConds, sc, cols[0].RetType, types.UnspecifiedLength)
case ranger.IndexRangeType:
if cachedPath != nil {
ranges, accessConds, remainedConds, isDNF = cachedPath.Ranges, cachedPath.AccessConds, cachedPath.TableFilters, cachedPath.IsDNFCond
break
}
var res *ranger.DetachRangeResult
res, err = ranger.DetachCondAndBuildRangeForIndex(ctx, exprs, cols, lengths)
ranges, accessConds, remainedConds, isDNF = res.Ranges, res.AccessConds, res.RemainedConds, res.IsDNFCond
if err != nil {
return 0, nil, false, err
}
default:
panic("should never be here")
}
if err != nil {
return 0, nil, false, err
}
if isDNF && len(accessConds) > 0 {
mask |= 1
return mask, ranges, len(remainedConds) > 0, nil
}
for i := range exprs {
for j := range accessConds {
if exprs[i].Equal(ctx, accessConds[j]) {
mask |= 1 << uint64(i)
break
}
}
}
return mask, ranges, false, nil
}
// GetUsableSetsByGreedy will select the indices and pk used for calculate selectivity by greedy algorithm.
func GetUsableSetsByGreedy(nodes []*StatsNode) (newBlocks []*StatsNode) {
sort.Slice(nodes, func(i int, j int) bool {
if r := compareType(nodes[i].Tp, nodes[j].Tp); r != 0 {
return r < 0
}
return nodes[i].ID < nodes[j].ID
})
marked := make([]bool, len(nodes))
mask := int64(math.MaxInt64)
for {
// Choose the index that covers most.
bestID, bestCount, bestTp, bestNumCols, bestMask, bestSel := -1, 0, ColType, 0, int64(0), float64(0)
for i, set := range nodes {
if marked[i] {
continue
}
curMask := set.mask & mask
if curMask != set.mask {
marked[i] = true
continue
}
bits := bits.OnesCount64(uint64(curMask))
// This set cannot cover any thing, just skip it.
if bits == 0 {
marked[i] = true
continue
}
// We greedy select the stats info based on:
// (1): The stats type, always prefer the primary key or index.
// (2): The number of expression that it covers, the more the better.
// (3): The number of columns that it contains, the less the better.
// (4): The selectivity of the covered conditions, the less the better.
// The rationale behind is that lower selectivity tends to reflect more functional dependencies
// between columns. It's hard to decide the priority of this rule against rule 2 and 3, in order
// to avoid massive plan changes between tidb-server versions, I adopt this conservative strategy
// to impose this rule after rule 2 and 3.
if (bestTp == ColType && set.Tp != ColType) ||
bestCount < bits ||
(bestCount == bits && bestNumCols > set.numCols) ||
(bestCount == bits && bestNumCols == set.numCols && bestSel > set.Selectivity) {
bestID, bestCount, bestTp, bestNumCols, bestMask, bestSel = i, bits, set.Tp, set.numCols, curMask, set.Selectivity
}
}
if bestCount == 0 {
break
}
// Update the mask, remove the bit that nodes[bestID].mask has.
mask &^= bestMask
newBlocks = append(newBlocks, nodes[bestID])
marked[bestID] = true
}
return
}
// FindPrefixOfIndexByCol will find columns in index by checking the unique id or the virtual expression.
// So it will return at once no matching column is found.
func FindPrefixOfIndexByCol(cols []*expression.Column, idxColIDs []int64, cachedPath *planutil.AccessPath) []*expression.Column {
if cachedPath != nil {
idxCols := cachedPath.IdxCols
retCols := make([]*expression.Column, 0, len(idxCols))
idLoop:
for _, idCol := range idxCols {
for _, col := range cols {
if col.EqualByExprAndID(nil, idCol) {
retCols = append(retCols, col)
continue idLoop
}
}
// If no matching column is found, just return.
return retCols
}
return retCols
}
return expression.FindPrefixOfIndex(cols, idxColIDs)
}
| getConstantColumnID |
brainwaredamio.py | '''
Class for reading from Brainware DAM files
DAM files are binary files for holding raw data. They are broken up into
sequence of Segments, each containing a single raw trace and parameters.
The DAM file does NOT contain a sampling rate, nor can it be reliably
calculated from any of the parameters. You can calculate it from
the "sweep length" attribute if it is present, but it isn't always present.
It is more reliable to get it from the corresponding SRC file or F32 file if
you have one.
The DAM file also does not divide up data into Blocks, so only a single
Block is returned..
Brainware was developed by Dr. Jan Schnupp and is availabe from
Tucker Davis Technologies, Inc.
http://www.tdt.com/downloads.htm
Neither Dr. Jan Schnupp nor Tucker Davis Technologies, Inc. had any part in the
development of this code
The code is implemented with the permission of Dr. Jan Schnupp
Author: Todd Jennings
'''
# import needed core python modules
import os
import os.path
# numpy and quantities are already required by neo
import numpy as np
import quantities as pq
# needed core neo modules
from neo.core import (AnalogSignal, Block,
ChannelIndex, Segment)
# need to subclass BaseIO
from neo.io.baseio import BaseIO
| Class for reading Brainware raw data files with the extension '.dam'.
The read_block method returns the first Block of the file. It will
automatically close the file after reading.
The read method is the same as read_block.
Note:
The file format does not contain a sampling rate. The sampling rate
is set to 1 Hz, but this is arbitrary. If you have a corresponding .src
or .f32 file, you can get the sampling rate from that. It may also be
possible to infer it from the attributes, such as "sweep length", if
present.
Usage:
>>> from neo.io.brainwaredamio import BrainwareDamIO
>>> damfile = BrainwareDamIO(filename='multi_500ms_mulitrep_ch1.dam')
>>> blk1 = damfile.read()
>>> blk2 = damfile.read_block()
>>> print blk1.segments
>>> print blk1.segments[0].analogsignals
>>> print blk1.units
>>> print blk1.units[0].name
>>> print blk2
>>> print blk2[0].segments
"""
is_readable = True # This class can only read data
is_writable = False # write is not supported
# This class is able to directly or indirectly handle the following objects
# You can notice that this greatly simplifies the full Neo object hierarchy
supported_objects = [Block, ChannelIndex,
Segment, AnalogSignal]
readable_objects = [Block]
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuff: a definition for parameters when reading.
# This dict should be keyed by object (`Block`). Each entry is a list
# of tuple. The first entry in each tuple is the parameter name. The
# second entry is a dict with keys 'value' (for default value),
# and 'label' (for a descriptive name).
# Note that if the highest-level object requires parameters,
# common_io_test will be skipped.
read_params = {Block: []}
# do not support write so no GUI stuff
write_params = None
name = 'Brainware DAM File'
extensions = ['dam']
mode = 'file'
def __init__(self, filename=None):
'''
Arguments:
filename: the filename
'''
BaseIO.__init__(self)
self._path = filename
self._filename = os.path.basename(filename)
self._fsrc = None
def read(self, lazy=False, **kargs):
'''
Reads raw data file "fname" generated with BrainWare
'''
assert not lazy, 'Do not support lazy'
return self.read_block(lazy=lazy)
def read_block(self, lazy=False, **kargs):
'''
Reads a block from the raw data file "fname" generated
with BrainWare
'''
assert not lazy, 'Do not support lazy'
# there are no keyargs implemented to so far. If someone tries to pass
# them they are expecting them to do something or making a mistake,
# neither of which should pass silently
if kargs:
raise NotImplementedError('This method does not have any '
'arguments implemented yet')
self._fsrc = None
block = Block(file_origin=self._filename)
# create the objects to store other objects
chx = ChannelIndex(file_origin=self._filename,
channel_ids=np.array([1]),
index=np.array([0]),
channel_names=np.array(['Chan1'], dtype='S'))
# load objects into their containers
block.channel_indexes.append(chx)
# open the file
with open(self._path, 'rb') as fobject:
# while the file is not done keep reading segments
while True:
seg = self._read_segment(fobject)
# if there are no more Segments, stop
if not seg:
break
# store the segment and signals
seg.analogsignals[0].channel_index = chx
block.segments.append(seg)
# remove the file object
self._fsrc = None
block.create_many_to_one_relationship()
return block
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# IMPORTANT!!!
# These are private methods implementing the internal reading mechanism.
# Due to the way BrainWare DAM files are structured, they CANNOT be used
# on their own. Calling these manually will almost certainly alter your
# position in the file in an unrecoverable manner, whether they throw
# an exception or not.
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def _read_segment(self, fobject):
'''
Read a single segment with a single analogsignal
Returns the segment or None if there are no more segments
'''
try:
# float64 -- start time of the AnalogSignal
t_start = np.fromfile(fobject, dtype=np.float64, count=1)[0]
except IndexError:
# if there are no more Segments, return
return False
# int16 -- index of the stimulus parameters
seg_index = np.fromfile(fobject, dtype=np.int16, count=1)[0].tolist()
# int16 -- number of stimulus parameters
numelements = np.fromfile(fobject, dtype=np.int16, count=1)[0]
# read the name strings for the stimulus parameters
paramnames = []
for _ in range(numelements):
# unit8 -- the number of characters in the string
numchars = np.fromfile(fobject, dtype=np.uint8, count=1)[0]
# char * numchars -- a single name string
name = np.fromfile(fobject, dtype=np.uint8, count=numchars)
# exclude invalid characters
name = str(name[name >= 32].view('c').tostring())
# add the name to the list of names
paramnames.append(name)
# float32 * numelements -- the values for the stimulus parameters
paramvalues = np.fromfile(fobject, dtype=np.float32, count=numelements)
# combine parameter names and the parameters as a dict
params = dict(zip(paramnames, paramvalues))
# int32 -- the number elements in the AnalogSignal
numpts = np.fromfile(fobject, dtype=np.int32, count=1)[0]
# int16 * numpts -- the AnalogSignal itself
signal = np.fromfile(fobject, dtype=np.int16, count=numpts)
sig = AnalogSignal(signal.astype(np.float) * pq.mV,
t_start=t_start * pq.d,
file_origin=self._filename,
sampling_period=1. * pq.s,
copy=False)
# Note: setting the sampling_period to 1 s is arbitrary
# load the AnalogSignal and parameters into a new Segment
seg = Segment(file_origin=self._filename,
index=seg_index,
**params)
seg.analogsignals = [sig]
return seg | class BrainwareDamIO(BaseIO):
""" |
AccessChoice.tsx | import React from 'react';
import {
LockedIcon,
} from '@patternfly/react-icons'
import { |
type AccessChoiceProps = {
checkedValue: Access,
onChange(access: Access): void,
}
export default function AccessChoice({checkedValue, onChange}: AccessChoiceProps) {
return (<>
<Radio id="access-0" name="PUBLIC"
isChecked={checkedValue === 0}
onChange={() => onChange(0)}
label={ <><LockedIcon style={{ fill: "var(--pf-global--success-color--200)"}} /> Public</> } />
<Radio id="access-1" name="PROTECTED"
isChecked={checkedValue === 1}
onChange={() => onChange(1)}
label={ <><LockedIcon style={{ fill: "var(--pf-global--warning-color--100)"}} /> Protected</>} />
<Radio id="access-2" name="PRIVATE"
isChecked={checkedValue === 2}
onChange={() => onChange(2)}
label={ <><LockedIcon style={{ fill: "var(--pf-global--danger-color--100)"}} /> Private</>} />
</>)
} | Radio,
} from '@patternfly/react-core';
import { Access } from '../auth'; |
kernel.go | // Copyright 2017 Monax Industries Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"context"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/go-kit/kit/log"
"github.com/hyperledger/burrow/bcm"
"github.com/hyperledger/burrow/consensus/tendermint"
"github.com/hyperledger/burrow/consensus/tendermint/abci"
"github.com/hyperledger/burrow/event"
"github.com/hyperledger/burrow/execution"
"github.com/hyperledger/burrow/genesis"
"github.com/hyperledger/burrow/keys"
"github.com/hyperledger/burrow/logging"
"github.com/hyperledger/burrow/logging/structure"
"github.com/hyperledger/burrow/process"
"github.com/hyperledger/burrow/rpc"
"github.com/hyperledger/burrow/rpc/metrics"
"github.com/hyperledger/burrow/rpc/rpcevents"
"github.com/hyperledger/burrow/rpc/rpcinfo"
"github.com/hyperledger/burrow/rpc/rpcquery"
"github.com/hyperledger/burrow/rpc/rpctransact"
"github.com/hyperledger/burrow/txs"
"github.com/streadway/simpleuuid"
tmConfig "github.com/tendermint/tendermint/config"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/node"
tmTypes "github.com/tendermint/tendermint/types"
)
const (
FirstBlockTimeout = 3 * time.Second
CooldownTime = 1000 * time.Millisecond
ServerShutdownTimeout = 1000 * time.Millisecond
LoggingCallerDepth = 5
AccountsRingMutexCount = 100
)
// Kernel is the root structure of Burrow
type Kernel struct {
// Expose these public-facing interfaces to allow programmatic extension of the Kernel by other projects
Emitter event.Emitter
Service *rpc.Service
Launchers []process.Launcher
State *execution.State
Blockchain *bcm.Blockchain
Node *tendermint.Node
// Time-based UUID randomly generated each time Burrow is started
RunID simpleuuid.UUID
Logger *logging.Logger
nodeInfo string
processes map[string]process.Process
shutdownNotify chan struct{}
shutdownOnce sync.Once
}
func NewKernel(ctx context.Context, keyClient keys.KeyClient, privValidator tmTypes.PrivValidator,
genesisDoc *genesis.GenesisDoc, tmConf *tmConfig.Config, rpcConfig *rpc.RPCConfig, keyConfig *keys.KeysConfig,
keyStore *keys.KeyStore, exeOptions []execution.ExecutionOption, logger *logging.Logger) (*Kernel, error) {
var err error
kern := &Kernel{
processes: make(map[string]process.Process),
shutdownNotify: make(chan struct{}),
}
// Create a random ID based on start time
kern.RunID, err = simpleuuid.NewTime(time.Now())
logger = logger.WithScope("NewKernel()").With(structure.TimeKey, log.DefaultTimestampUTC,
structure.RunId, kern.RunID.String())
tmLogger := logger.With(structure.CallerKey, log.Caller(LoggingCallerDepth+1))
kern.Logger = logger.WithInfo(structure.CallerKey, log.Caller(LoggingCallerDepth))
stateDB := dbm.NewDB("burrow_state", dbm.GoLevelDBBackend, tmConf.DBDir())
kern.Blockchain, err = bcm.LoadOrNewBlockchain(stateDB, genesisDoc, kern.Logger)
if err != nil {
return nil, fmt.Errorf("error creating or loading blockchain state: %v", err)
}
// These should be in sync unless we are at the genesis block
if kern.Blockchain.LastBlockHeight() > 0 {
kern.Logger.InfoMsg("Loading application state")
kern.State, err = execution.LoadState(stateDB, kern.Blockchain.AppHashAfterLastBlock())
if err != nil {
return nil, fmt.Errorf("could not load persisted execution state at hash 0x%X: %v",
kern.Blockchain.AppHashAfterLastBlock(), err)
}
} else {
kern.State, err = execution.MakeGenesisState(stateDB, genesisDoc)
}
kern.Logger.InfoMsg("State loading successful")
txCodec := txs.NewAminoCodec()
tmGenesisDoc := tendermint.DeriveGenesisDoc(genesisDoc)
checker := execution.NewBatchChecker(kern.State, kern.Blockchain, keyClient, kern.Logger)
kern.Emitter = event.NewEmitter(kern.Logger)
committer := execution.NewBatchCommitter(kern.State, kern.Blockchain, kern.Emitter, keyClient, kern.Logger,
exeOptions...)
kern.nodeInfo = fmt.Sprintf("Burrow_%s_ValidatorID:%X", genesisDoc.ChainID(), privValidator.GetAddress())
app := abci.NewApp(kern.nodeInfo, kern.Blockchain, checker, committer, txCodec, kern.Panic, logger)
// We could use this to provide/register our own metrics (though this will register them with us). Unfortunately
// Tendermint currently ignores the metrics passed unless its own server is turned on.
metricsProvider := node.DefaultMetricsProvider
kern.Node, err = tendermint.NewNode(tmConf, privValidator, tmGenesisDoc, app, metricsProvider, tmLogger)
if err != nil {
return nil, err
}
transactor := execution.NewTransactor(kern.Blockchain, kern.Emitter, execution.NewAccounts(checker, keyClient, AccountsRingMutexCount),
kern.Node.MempoolReactor().BroadcastTx, txCodec, kern.Logger)
nameRegState := kern.State
accountState := kern.State
nodeView, err := tendermint.NewNodeView(kern.Node, txCodec, kern.RunID)
if err != nil {
return nil, err
}
kern.Service = rpc.NewService(accountState, nameRegState, kern.Blockchain, nodeView, kern.Logger)
kern.Launchers = []process.Launcher{
{
Name: "Profiling Server",
Enabled: rpcConfig.Profiler.Enabled,
Launch: func() (process.Process, error) {
debugServer := &http.Server{
Addr: ":6060",
}
go func() {
err := debugServer.ListenAndServe()
if err != nil {
kern.Logger.InfoMsg("Error from pprof debug server", structure.ErrorKey, err)
}
}()
return debugServer, nil
},
},
{
Name: "Database",
Enabled: true,
Launch: func() (process.Process, error) {
// Just close database
return process.ShutdownFunc(func(ctx context.Context) error {
stateDB.Close()
return nil
}), nil
},
},
{
Name: "Tendermint",
Enabled: true,
Launch: func() (process.Process, error) {
err := kern.Node.Start()
if err != nil {
return nil, fmt.Errorf("error starting Tendermint node: %v", err)
}
return process.ShutdownFunc(func(ctx context.Context) error {
err := kern.Node.Stop()
// Close tendermint database connections using our wrapper
defer kern.Node.Close()
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-kern.Node.Quit():
kern.Logger.InfoMsg("Tendermint Node has quit, closing DB connections...")
return nil
}
return err
}), nil
},
},
{
Name: "RPC/tm",
Enabled: rpcConfig.TM.Enabled,
Launch: func() (process.Process, error) {
server, err := rpcinfo.StartServer(kern.Service, "/websocket", rpcConfig.TM.ListenAddress, kern.Logger)
if err != nil {
return nil, err
}
return server, nil
},
},
{
Name: "RPC/metrics",
Enabled: rpcConfig.Metrics.Enabled,
Launch: func() (process.Process, error) {
server, err := metrics.StartServer(kern.Service, rpcConfig.Metrics.MetricsPath,
rpcConfig.Metrics.ListenAddress, rpcConfig.Metrics.BlockSampleSize, kern.Logger)
if err != nil {
return nil, err
}
return server, nil
},
},
{
Name: "RPC/GRPC",
Enabled: rpcConfig.GRPC.Enabled,
Launch: func() (process.Process, error) {
listen, err := net.Listen("tcp", rpcConfig.GRPC.ListenAddress)
if err != nil {
return nil, err
}
grpcServer := rpc.NewGRPCServer(kern.Logger)
var ks *keys.KeyStore
if keyStore != nil {
ks = keyStore
}
if keyConfig.GRPCServiceEnabled {
if keyStore == nil {
ks = keys.NewKeyStore(keyConfig.KeysDirectory, keyConfig.AllowBadFilePermissions, kern.Logger)
}
keys.RegisterKeysServer(grpcServer, ks)
}
rpcquery.RegisterQueryServer(grpcServer, rpcquery.NewQueryServer(kern.State, nameRegState,
kern.Blockchain, nodeView, kern.Logger))
rpctransact.RegisterTransactServer(grpcServer, rpctransact.NewTransactServer(transactor, txCodec))
rpcevents.RegisterExecutionEventsServer(grpcServer, rpcevents.NewExecutionEventsServer(kern.State,
kern.Emitter, kern.Blockchain, kern.Logger))
// Provides metadata about services registered
//reflection.Register(grpcServer)
go grpcServer.Serve(listen)
return process.ShutdownFunc(func(ctx context.Context) error {
grpcServer.Stop()
// listener is closed for us
return nil
}), nil
},
},
}
return kern, nil
}
// Boot the kernel starting Tendermint and RPC layers
func (kern *Kernel) Boot() error {
for _, launcher := range kern.Launchers {
if launcher.Enabled {
srvr, err := launcher.Launch()
if err != nil {
return fmt.Errorf("error launching %s server: %v", launcher.Name, err)
} | }
}
go kern.supervise()
return nil
}
func (kern *Kernel) Panic(err error) {
fmt.Fprintf(os.Stderr, "%s: Kernel shutting down due to panic: %v", kern.nodeInfo, err)
kern.Shutdown(context.Background())
os.Exit(1)
}
// Wait for a graceful shutdown
func (kern *Kernel) WaitForShutdown() {
// Supports multiple goroutines waiting for shutdown since channel is closed
<-kern.shutdownNotify
}
// Supervise kernel once booted
func (kern *Kernel) supervise() {
// perform disaster restarts of the kernel; rejoining the network as if we were a new node.
shutdownCh := make(chan os.Signal, 1)
reloadCh := make(chan os.Signal, 1)
syncCh := make(chan os.Signal, 1)
signal.Notify(shutdownCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
signal.Notify(reloadCh, syscall.SIGHUP)
signal.Notify(syncCh, syscall.SIGUSR1)
for {
select {
case <-reloadCh:
kern.Logger.Reload()
case <-syncCh:
kern.Logger.Sync()
case sig := <-shutdownCh:
kern.Logger.InfoMsg(fmt.Sprintf("Caught %v signal so shutting down", sig),
"signal", sig.String())
kern.Shutdown(context.Background())
return
}
}
}
// Stop the kernel allowing for a graceful shutdown of components in order
func (kern *Kernel) Shutdown(ctx context.Context) (err error) {
kern.shutdownOnce.Do(func() {
logger := kern.Logger.WithScope("Shutdown")
logger.InfoMsg("Attempting graceful shutdown...")
logger.InfoMsg("Shutting down servers")
ctx, cancel := context.WithTimeout(ctx, ServerShutdownTimeout)
defer cancel()
// Shutdown servers in reverse order to boot
for i := len(kern.Launchers) - 1; i >= 0; i-- {
name := kern.Launchers[i].Name
srvr, ok := kern.processes[name]
if ok {
logger.InfoMsg("Shutting down server", "server_name", name)
sErr := srvr.Shutdown(ctx)
if sErr != nil {
logger.InfoMsg("Failed to shutdown server",
"server_name", name,
structure.ErrorKey, sErr)
if err == nil {
err = sErr
}
}
}
}
logger.InfoMsg("Shutdown complete")
structure.Sync(kern.Logger.Info)
structure.Sync(kern.Logger.Trace)
// We don't want to wait for them, but yielding for a cooldown Let other goroutines flush
// potentially interesting final output (e.g. log messages)
time.Sleep(CooldownTime)
close(kern.shutdownNotify)
})
return
} |
kern.processes[launcher.Name] = srvr |
test_blackrock_rstart.py | from features.arduino_features import BlackrockSerialDIORowByte, SerialDIORowByte
from riglib import experiment
class par(object):
def init(self):
pass
class | (BlackrockSerialDIORowByte, par):
pass
f = F()
f.init()
| F |
display.rs | use crate::error::SniprunError;
use crate::{DataHolder, ReturnMessageType};
use log::info;
use neovim_lib::{Neovim, NeovimApi, NeovimApiAsync};
use std::fmt;
use std::str::FromStr;
use std::sync::{Arc, Mutex};
#[derive(Clone, Debug, Ord, PartialOrd, PartialEq, Eq)]
pub enum DisplayType {
Classic = 0,
NvimNotify,
VirtualTextOk,
VirtualTextErr,
Terminal,
LongTempFloatingWindow,
TempFloatingWindow,
Api,
}
use DisplayType::*;
impl FromStr for DisplayType {
type Err = SniprunError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"Classic" => Ok(Classic),
"VirtualTextOk" => Ok(VirtualTextOk),
"VirtualTextErr" => Ok(VirtualTextErr),
"Terminal" => Ok(Terminal),
"LongTempFloatingWindow" => Ok(LongTempFloatingWindow),
"TempFloatingWindow" => Ok(TempFloatingWindow),
"Api" => Ok(Api),
"NvimNotify" => Ok(NvimNotify),
_ => Err(SniprunError::InternalError(
"Invalid display type: ".to_string() + s,
)),
}
}
}
impl fmt::Display for DisplayType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = match &self {
DisplayType::Classic => "Classic",
DisplayType::VirtualTextOk => "VirtualTextOk",
DisplayType::VirtualTextErr => "VirtualTextErr",
DisplayType::Terminal => "Terminal",
DisplayType::LongTempFloatingWindow => "LongTempFloatingWindow",
DisplayType::TempFloatingWindow => "TempFloatingWindow",
DisplayType::Api => "Api",
DisplayType::NvimNotify => "NvimNotify",
};
write!(f, "{}", name)
}
}
pub fn display(result: Result<String, SniprunError>, nvim: Arc<Mutex<Neovim>>, data: &DataHolder) {
let mut display_type = data.display_type.clone();
display_type.sort();
display_type.dedup(); //now only uniques display types
info!("Display type chosen: {:?}", display_type);
for dt in display_type.iter() {
match dt {
Classic => return_message_classic(&result, &nvim, &data.return_message_type, data),
VirtualTextOk => display_virtual_text(&result, &nvim, data, true),
VirtualTextErr => display_virtual_text(&result, &nvim, data, false),
Terminal => display_terminal(&result, &nvim, data),
LongTempFloatingWindow => display_floating_window(&result, &nvim, data, true),
TempFloatingWindow => display_floating_window(&result, &nvim, data, false),
Api => send_api(&result, &nvim, data),
NvimNotify => display_nvim_notify(&result, &nvim, data),
}
}
}
pub fn display_nvim_notify(
message: &Result<String, SniprunError>,
nvim: &Arc<Mutex<Neovim>>,
data: &DataHolder,
) {
let res = match message {
Ok(result) => nvim.lock().unwrap().command(&format!(
"lua require\"sniprun.display\".display_nvim_notify(\"{}\", true)",
no_output_wrap(result, data, &DisplayType::Terminal),
)),
Err(result) => nvim.lock().unwrap().command(&format!(
"lua require\"sniprun.display\".display_nvim_notify(\"{}\", false)",
no_output_wrap(&result.to_string(), data, &DisplayType::Terminal),
)),
};
info!("display notify res = {:?}", res);
}
pub fn send_api(
message: &Result<String, SniprunError>,
nvim: &Arc<Mutex<Neovim>>,
data: &DataHolder,
) {
match message {
Ok(result) => {
let mut nvim_instance = nvim.lock().unwrap();
nvim_instance.command_async(&format!(
"lua require\"sniprun.display\".send_api(\"{}\", true)",
no_output_wrap(result, data, &DisplayType::Terminal),
))
}
Err(result) => {
let mut nvim_instance = nvim.lock().unwrap();
nvim_instance.command_async(&format!(
"lua require\"sniprun.display\".send_api(\"{}\", false)",
no_output_wrap(&result.to_string(), data, &DisplayType::Terminal),
))
}
};
info!("!done displyaing notify");
}
pub fn display_virtual_text(
result: &Result<String, SniprunError>,
nvim: &Arc<Mutex<Neovim>>,
data: &DataHolder,
is_ok: bool,
) {
if is_ok != result.is_ok() {
return; //don't display unasked-for things
}
let namespace_id = nvim
.lock()
.unwrap()
.create_namespace("sniprun")
.unwrap();
info!("namespace_id = {:?}", namespace_id);
let last_line = data.range[1] - 1;
let res = nvim.lock().unwrap().command(&format!(
"call nvim_buf_clear_namespace(0,{},{},{})",
namespace_id,
data.range[0] - 1,
last_line + 1
));
info!("cleared previous virtual_text? {:?}", res);
let hl_ok = "SniprunVirtualTextOk";
let hl_err = "SniprunVirtualTextErr";
let res = match result {
Ok(message_ok) => {
if shorten_ok(&no_output_wrap(
message_ok,
data,
&DisplayType::VirtualTextOk,
))
.is_empty()
{
return;
}
nvim.lock().unwrap().command(&format!(
"lua require\"sniprun.display\".display_extmark({},{},\"{}\",\"{}\")",
namespace_id,
last_line,
shorten_ok(&no_output_wrap(
message_ok,
data,
&DisplayType::VirtualTextOk
)),
hl_ok
))
}
Err(message_err) => {
if shorten_err(&no_output_wrap(
&message_err.to_string(),
data,
&DisplayType::VirtualTextErr,
))
.is_empty()
{
return;
}
nvim.lock().unwrap().command(&format!(
"lua require\"sniprun.display\".display_extmark({},{},\"{}\",\"{}\")",
namespace_id,
last_line,
shorten_err(&no_output_wrap(
&message_err.to_string(),
data,
&DisplayType::VirtualTextErr
)),
hl_err
))
}
};
info!("done displaying virtual text, {:?}", res);
}
pub fn display_terminal(
message: &Result<String, SniprunError>,
nvim: &Arc<Mutex<Neovim>>,
data: &DataHolder,
) {
let res = match message {
Ok(result) => nvim.lock().unwrap().command(&format!(
"lua require\"sniprun.display\".write_to_term(\"{}\", true)",
no_output_wrap(result, data, &DisplayType::Terminal),
)),
Err(result) => nvim.lock().unwrap().command(&format!(
"lua require\"sniprun.display\".write_to_term(\"{}\", false)",
no_output_wrap(&result.to_string(), data, &DisplayType::Terminal),
)),
};
info!("display terminal res = {:?}", res);
}
pub fn display_floating_window(
message: &Result<String, SniprunError>,
nvim: &Arc<Mutex<Neovim>>,
data: &DataHolder,
long_only: bool,
) {
if long_only {
let do_no_display = match message {
Ok(message_ok) => message_ok.lines().count() <= 1,
Err(message_err) => message_err.to_string().lines().count() <= 1,
};
if do_no_display {
return; //do not display short messages
}
}
let col = data
.current_bloc
.lines()
.filter(|&line| !line.is_empty())
.last()
.unwrap_or(&data.current_line)
.len();
let row = data.range[0] + data.current_bloc.trim_end_matches('\n').lines().count() as i64 - 1;
info!(
"trying to open a floating window on row, col = {}, {}",
row, col
);
let res = match message {
Ok(result) => nvim.lock().unwrap().command(&format!(
"lua require\"sniprun.display\".fw_open({},{},\"{}\", true)",
row - 1,
col,
no_output_wrap(result, data, &DisplayType::TempFloatingWindow),
)),
Err(result) => nvim.lock().unwrap().command(&format!(
"lua require\"sniprun.display\".fw_open({},{},\"{}\", false)",
row - 1,
col,
no_output_wrap(&result.to_string(), data, &DisplayType::TempFloatingWindow),
)),
};
info!("disaply floating window res = {:?}", res);
}
pub fn return_message_classic(
message: &Result<String, SniprunError>,
nvim: &Arc<Mutex<Neovim>>,
rmt: &ReturnMessageType,
data: &DataHolder,
) {
match message {
Ok(answer_ok) => {
//make sure there is no lone "
let answer_str = no_output_wrap(answer_ok, data, &DisplayType::Classic);
info!("Final str {}", answer_str);
match rmt {
ReturnMessageType::Multiline => {
let _ = nvim
.lock()
.unwrap()
.command(&format!("echo \"{}\"", answer_str));
}
ReturnMessageType::EchoMsg => {
let _ = nvim
.lock()
.unwrap()
.command(&format!("echomsg \"{}\"", answer_str));
}
}
}
Err(e) => match rmt {
ReturnMessageType::Multiline => {
let _ = nvim.lock().unwrap().err_writeln(&format!("{}", e));
}
ReturnMessageType::EchoMsg => {
let _ = nvim.lock().unwrap().command(&format!(
"echohl ErrorMsg | echomsg \"{}\" | echohl None",
e
));
}
},
}
}
fn | (message: &str) -> String {
if message.is_empty() {
return String::new();
}
let mut marker = String::from("<- ");
if message.lines().count() > 1 {
marker += &".".repeat(std::cmp::max(2, std::cmp::min(6, message.lines().count())));
}
marker.to_string()
+ message
.lines()
.filter(|&s| !s.is_empty())
.last()
.unwrap_or("")
}
fn shorten_err(message: &str) -> String {
if message.is_empty() {
return String::new();
}
let mut marker = String::from("<- ") + message.lines().next().unwrap_or("");
if message.lines().count() > 1 {
marker += &".".repeat(std::cmp::max(3, std::cmp::min(10, message.lines().count())));
}
marker
}
fn cleanup_and_escape(message: &str) -> String {
let answer_str = message.replace("\\", "\\\\");
let answer_str = answer_str.replace("\\\"", "\"");
let answer_str = answer_str.replace("\"", "\\\"");
//remove trailing /starting newlines
let answer_str = answer_str
.trim_start_matches('\n')
.trim_end_matches('\n')
.to_string();
answer_str.replace("\n", "\\\n")
}
fn no_output_wrap(message: &str, data: &DataHolder, current_type: &DisplayType) -> String {
let message_clean = cleanup_and_escape(message);
for dt in data.display_no_output.iter() {
if dt == current_type && message_clean.is_empty() {
info!("Empty message converted to 'no output')");
return String::from("(no output)");
}
}
info!("message '{}' cleaned out", message_clean);
message_clean
}
| shorten_ok |
guildMemberAdd.js | const profileModel = require('../../database/models/profileSchema');
let WelcomeSchema = require(`../../database/models/welcomeSchema`)
const Discord = require(`discord.js`) | const mongoose = require(`mongoose`)
module.exports = (client, member, GuildMember) => {
WelcomeSchema.findOne({ guildID: member.guild.id}, async (err, data, user) => {
//console.log(member.guild.id)
if(!data) return;
const channel = await client.channels.cache.find(x => x.id === `${data.WelcomeChannel}`)
channel.send(`Welcome ${member}, ${data.WelcomeMsg}`)
})
} | |
atores.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import math
DESTRUIDO = 'Destruido'
ATIVO = 'Ativo'
GRAVIDADE = 10 # m/s^2
class Ator:
"""
Classe que representa um ator. Ele representa um ponto cartesiano na tela.
"""
_caracter_ativo = 'A'
_caracter_destruido = ' '
def __init__(self, x=0, y=0):
"""
Método de inicialização da classe. Deve inicializar os parâmetros x, y, caracter e status
:param x: Posição horizontal inicial do ator
:param y: Posição vertical inicial do ator
"""
self.y = y
self.x = x
self.status = ATIVO
def caracter(self):
return self._caracter_ativo if self.status == ATIVO else self._caracter_destruido
def calcular_posicao(self, tempo):
"""
Método que calcula a posição do ator em determinado tempo.
Deve-se imaginar que o tempo começa em 0 e avança de 0,01 segundos
:param tempo: o tempo do jogo
:return: posição x, y do ator
"""
return self.x, self.y
def colidir(self, outro_ator, intervalo=1):
"""
Método que executa lógica de colisão entre dois atores.
Só deve haver colisão se os dois atores tiverem seus status ativos.
Para colisão, é considerado um quadrado, com lado igual ao parâmetro intervalo, em volta do ponto onde se
encontra o ator. Se os atores estiverem dentro desse mesmo quadrado, seus status devem ser alterados para
destruido, seus caracteres para destruido também.
:param outro_ator: Ator a ser considerado na colisão
:param intervalo: Intervalo a ser considerado
:return:
"""
if self.status==ATIVO and outro_ator.status==ATIVO:
delta_x=abs(self.x - outro_ator.x)
delta_y=abs(self.y - outro_ator.y)
if delta_x <= intervalo and delta_y <= intervalo:
self.status=outro_ator.status=DESTRUIDO
class Obstaculo(Ator):
_caracter_ativo = 'O'
class Porco(Ator):
_caracter_ativo = '@'
_caracter_destruido = '+'
class DuploLancamentoExcecao(Exception):
pass
class Passaro(Ator):
velocidade_escalar = 10
def __init__(self, x=0, y=0):
"""
Método de inicialização de pássaro.
Deve chamar a inicialização de ator. Além disso, deve armazenar a posição inicial e incializar o tempo de
lançamento e angulo de lançamento
:param x:
:param y:
"""
super().__init__(x, y)
self._x_inicial = x
self._y_inicial = y
self._tempo_de_lancamento = None
self._angulo_de_lancamento = None # radianos
def foi_lancado(self):
"""
Método que retorna verdaeira se o pássaro já foi lançado e falso caso contrário
:return: booleano
"""
return not self._tempo_de_lancamento is None
def colidir_com_chao(self):
"""
Método que executa lógica de colisão com o chão. Toda vez que y for menor ou igual a 0,
o status dos Passaro deve ser alterado para destruido, bem como o seu caracter
"""
if self.y <=0:
self.status = DESTRUIDO
def calcular_posicao(self, tempo):
"""
Método que cálcula a posição do passaro de acordo com o tempo.
Antes do lançamento o pássaro deve retornar o valor de sua posição inicial
Depois do lançamento o pássaro deve calcular de acordo com sua posição inicial, velocidade escalar,
ângulo de lancamento, gravidade (constante GRAVIDADE) e o tempo do jogo.
Após a colisão, ou seja, ter seus status destruido, o pássaro deve apenas retornar a última posição calculada.
:param tempo: tempo de jogo a ser calculada a posição
:return: posição x, y
"""
if self._esta_voando():
delta_t = tempo - self._tempo_de_lancamento
self._calcular_posicao_vertical(delta_t)
self._calcular_posicao_horizontal(delta_t)
return super().calcular_posicao(tempo)
def lancar(self, angulo, tempo_de_lancamento):
"""
Lógica que lança o pássaro. Deve armazenar o ângulo e o tempo de lançamento para posteriores cálculo.
O ângulo é passado em graus e deve ser transformado em radianos
:param angulo:
:param tempo_de_lancamento:
:return:
"""
self._angulo_de_lancamento = math.radians(angulo)
self._tempo_de_lancamento = tempo_de_lancamento
def _calcular_posicao_vertical(self, delta_t):
y_atual = self._y_inicial | elf._angulo_de_lancamento
y_atual += self.velocidade_escalar * delta_t * math.sin(angulo_radianos)
y_atual -= (GRAVIDADE * (delta_t**2)) / 2
self.y = y_atual
def _calcular_posicao_horizontal(self, delta_t):
x_atual = self._x_inicial
angulo_radianos=self._angulo_de_lancamento
x_atual += self.velocidade_escalar * delta_t * math.cos(angulo_radianos)
self.x = x_atual
def _esta_voando(self):
return self.foi_lancado() and self.status == ATIVO
class PassaroAmarelo(Passaro):
_caracter_ativo = 'A'
_caracter_destruido = 'a'
velocidade_escalar = 30
class PassaroVermelho(Passaro):
_caracter_ativo = 'V'
_caracter_destruido = 'v'
velocidade_escalar = 20 |
angulo_radianos=s |
compare_and_branch.go | // asmcheck
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package codegen
//go:noinline
func dummy() {}
// Signed 64-bit compare-and-branch.
func si64(x, y chan int64) {
// s390x:"CGRJ\t[$](2|4), R[0-9]+, R[0-9]+, "
for <-x < <-y {
dummy()
}
// s390x:"CL?GRJ\t[$]8, R[0-9]+, R[0-9]+, "
for <-x == <-y {
dummy()
}
}
// Signed 64-bit compare-and-branch with 8-bit immediate.
func si64x8() {
// s390x:"CGIJ\t[$]12, R[0-9]+, [$]127, "
for i := int64(0); i < 128; i++ {
dummy()
}
// s390x:"CGIJ\t[$]10, R[0-9]+, [$]-128, "
for i := int64(0); i > -129; i-- {
dummy()
}
// s390x:"CGIJ\t[$]2, R[0-9]+, [$]127, "
for i := int64(0); i >= 128; i++ {
dummy()
}
// s390x:"CGIJ\t[$]4, R[0-9]+, [$]-128, "
for i := int64(0); i <= -129; i-- {
dummy()
}
}
// Unsigned 64-bit compare-and-branch.
func ui64(x, y chan uint64) {
// s390x:"CLGRJ\t[$](2|4), R[0-9]+, R[0-9]+, "
for <-x > <-y {
dummy()
}
// s390x:"CL?GRJ\t[$]6, R[0-9]+, R[0-9]+, "
for <-x != <-y {
dummy()
}
}
// Unsigned 64-bit comparison with 8-bit immediate.
func ui64x8() {
// s390x:"CLGIJ\t[$]4, R[0-9]+, [$]128, "
for i := uint64(0); i < 128; i++ {
dummy()
}
// s390x:"CLGIJ\t[$]12, R[0-9]+, [$]255, "
for i := uint64(0); i < 256; i++ {
dummy()
}
// s390x:"CLGIJ\t[$]2, R[0-9]+, [$]255, "
for i := uint64(0); i >= 256; i-- {
dummy()
}
// s390x:"CLGIJ\t[$]2, R[0-9]+, [$]0, "
for i := uint64(1024); i > 0; i-- {
dummy()
}
}
// Signed 32-bit compare-and-branch.
func si32(x, y chan int32) {
// s390x:"CRJ\t[$](2|4), R[0-9]+, R[0-9]+, "
for <-x < <-y {
dummy()
}
// s390x:"CL?RJ\t[$]8, R[0-9]+, R[0-9]+, "
for <-x == <-y {
dummy()
}
}
// Signed 32-bit compare-and-branch with 8-bit immediate.
func si32x8() {
// s390x:"CIJ\t[$]12, R[0-9]+, [$]127, "
for i := int32(0); i < 128; i++ {
dummy()
}
// s390x:"CIJ\t[$]10, R[0-9]+, [$]-128, "
for i := int32(0); i > -129; i-- {
dummy()
}
// s390x:"CIJ\t[$]2, R[0-9]+, [$]127, "
for i := int32(0); i >= 128; i++ {
dummy()
}
// s390x:"CIJ\t[$]4, R[0-9]+, [$]-128, "
for i := int32(0); i <= -129; i-- {
dummy()
}
}
// Unsigned 32-bit compare-and-branch.
func ui32(x, y chan uint32) {
// s390x:"CLRJ\t[$](2|4), R[0-9]+, R[0-9]+, "
for <-x > <-y {
dummy()
}
// s390x:"CL?RJ\t[$]6, R[0-9]+, R[0-9]+, "
for <-x != <-y {
dummy()
}
}
// Unsigned 32-bit comparison with 8-bit immediate.
func ui32x8() {
// s390x:"CLIJ\t[$]4, R[0-9]+, [$]128, "
for i := uint32(0); i < 128; i++ {
dummy()
}
// s390x:"CLIJ\t[$]12, R[0-9]+, [$]255, "
for i := uint32(0); i < 256; i++ {
dummy()
}
// s390x:"CLIJ\t[$]2, R[0-9]+, [$]255, "
for i := uint32(0); i >= 256; i-- {
dummy()
}
// s390x:"CLIJ\t[$]2, R[0-9]+, [$]0, "
for i := uint32(1024); i > 0; i-- {
dummy()
}
}
// Signed 64-bit comparison with unsigned 8-bit immediate.
func si64xu8(x chan int64) {
// s390x:"CLGIJ\t[$]8, R[0-9]+, [$]128, "
for <-x == 128 {
dummy()
}
// s390x:"CLGIJ\t[$]6, R[0-9]+, [$]255, "
for <-x != 255 {
dummy()
}
}
// Signed 32-bit comparison with unsigned 8-bit immediate.
func si32xu8(x chan int32) {
// s390x:"CLIJ\t[$]8, R[0-9]+, [$]255, "
for <-x == 255 {
dummy()
}
// s390x:"CLIJ\t[$]6, R[0-9]+, [$]128, "
for <-x != 128 { | }
}
// Unsigned 64-bit comparison with signed 8-bit immediate.
func ui64xu8(x chan uint64) {
// s390x:"CGIJ\t[$]8, R[0-9]+, [$]-1, "
for <-x == ^uint64(0) {
dummy()
}
// s390x:"CGIJ\t[$]6, R[0-9]+, [$]-128, "
for <-x != ^uint64(127) {
dummy()
}
}
// Unsigned 32-bit comparison with signed 8-bit immediate.
func ui32xu8(x chan uint32) {
// s390x:"CIJ\t[$]8, R[0-9]+, [$]-128, "
for <-x == ^uint32(127) {
dummy()
}
// s390x:"CIJ\t[$]6, R[0-9]+, [$]-1, "
for <-x != ^uint32(0) {
dummy()
}
} | dummy() |
dataset.py | from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import torch.nn as nn
import numpy as np
import torch
from pathlib import Path
import collections
import numbers
import random
import os
class BirdDataset(Dataset):
def __init__(self, root_dir, mode, transform=None):
self.root_dir = root_dir
self.x = []
self.y = []
self.transform = transform
if mode == "train":
labels = open(os.path.join(self.root_dir, 'new_train_label.txt'))
elif mode == 'eval':
labels = open(os.path.join(self.root_dir, 'new_eval_label.txt'))
for label in labels:
label_list = label.split(',')
self.x.append(label_list[0])
self.y.append(int(label_list[1]))
def __len__(self):
return len(self.x)
def __getitem__(self, index):
image_path = self.x[index]
image = Image.open(image_path).convert('RGB')
image = image.copy()
if self.transform:
image = self.transform(image)
return image, self.y[index]
def Dataloader(dataset, batch_size, shuffle, num_workers):
data_loader = DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return data_loader
def | (x):
output = x.convert("HSV")
return output
class RandomShift(object):
def __init__(self, shift):
self.shift = shift
@staticmethod
def get_params(shift):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
hshift, vshift = np.random.uniform(-shift, shift, size=2)
return hshift, vshift
def __call__(self, img):
hshift, vshift = self.get_params(self.shift)
return img.transform(img.size, Image.AFFINE, (1, 0, hshift, 0, 1, vshift), resample=Image.BICUBIC, fill=1)
def make_dataset(mode, data_root, img_size):
colour_transform = transforms.Lambda(lambda x: _random_colour_space(x))
transform = [
transforms.RandomAffine(degrees=30, shear=50, fillcolor=0),
transforms.RandomGrayscale(p=0.5),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomPerspective(
distortion_scale=0.5, p=0.5, fill=0),
transforms.RandomVerticalFlip(p=0.5),
transforms.ColorJitter(
brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
RandomShift(3),
transforms.RandomApply([colour_transform]),
]
data_transform_train = transforms.Compose([
transforms.RandomResizedCrop(img_size),
transforms.RandomApply(transform, p=0.5),
transforms.RandomApply([transforms.RandomRotation(
(-90, 90), expand=False, center=None)], p=0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
0.229, 0.224, 0.225])
])
data_transform_dev = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
0.229, 0.224, 0.225])
])
data_transform_test = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
if (mode == "train"):
data_set = BirdDataset(data_root, mode, data_transform_train)
elif (mode == "eval"):
data_set = BirdDataset(data_root, mode, data_transform_dev)
elif (mode == "test"):
data_set = BirdDataset(data_root, mode, data_transform_test)
return data_set
| _random_colour_space |
issue-29161.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod a {
struct A;
impl Default for A {
pub fn default() -> A {
A;
}
}
}
fn | () {
a::A::default();
//~^ ERROR struct `A` is private
}
| main |
decodeUTF8.js | // https://gist.github.com/pascaldekloe/62546103a1576803dade9269ccf76330
| var c = bytes[i++];
if (c > 127) {
if (c > 191 && c < 224) {
if (i >= bytes.length) throw 'UTF-8 decode: incomplete 2-byte sequence';
c = (c & 31) << 6 | bytes[i] & 63;
} else if (c > 223 && c < 240) {
if (i + 1 >= bytes.length) throw 'UTF-8 decode: incomplete 3-byte sequence';
c = (c & 15) << 12 | (bytes[i] & 63) << 6 | bytes[++i] & 63;
} else if (c > 239 && c < 248) {
if (i+2 >= bytes.length) throw 'UTF-8 decode: incomplete 4-byte sequence';
c = (c & 7) << 18 | (bytes[i] & 63) << 12 | (bytes[++i] & 63) << 6 | bytes[++i] & 63;
} else throw 'UTF-8 decode: unknown multibyte start 0x' + c.toString(16) + ' at index ' + (i - 1);
++i;
}
if (c <= 0xffff) s += String.fromCharCode(c);
else if (c <= 0x10ffff) {
c -= 0x10000;
s += String.fromCharCode(c >> 10 | 0xd800)
s += String.fromCharCode(c & 0x3FF | 0xdc00)
} else throw 'UTF-8 decode: code point 0x' + c.toString(16) + ' exceeds UTF-16 reach';
}
return s;
}
function encodeUTF8(s) {
var i = 0;
var bytes = new Uint8Array(s.length * 4);
for (var ci = 0; ci != s.length; ci++) {
var c = s.charCodeAt(ci);
if (c < 128) {
bytes[i++] = c;
continue;
}
if (c < 2048) {
bytes[i++] = c >> 6 | 192;
} else {
if (c > 0xd7ff && c < 0xdc00) {
if (++ci == s.length) throw 'UTF-8 encode: incomplete surrogate pair';
var c2 = s.charCodeAt(ci);
if (c2 < 0xdc00 || c2 > 0xdfff) throw 'UTF-8 encode: second char code 0x' + c2.toString(16) + ' at index ' + ci + ' in surrogate pair out of range';
c = 0x10000 + ((c & 0x03ff) << 10) + (c2 & 0x03ff);
bytes[i++] = c >> 18 | 240;
bytes[i++] = c>> 12 & 63 | 128;
} else { // c <= 0xffff
bytes[i++] = c >> 12 | 224;
}
bytes[i++] = c >> 6 & 63 | 128;
}
bytes[i++] = c & 63 | 128;
}
return bytes.subarray(0, i);
}
module.exports = {
decodeUTF8,
encodeUTF8
} | function decodeUTF8(bytes, offset, length) {
var s = '';
var i = offset;
while (i < offset + length) { |
packet_generator.py | import random, sys
from socket import *
hostsA = ['192.168.128.7', '192.168.128.1']
hostsC = ['192.224.0.5', '192.224.0.7', '192.224.10.5', '192.224.15.6']
def gen_packets(packet_num=5):
|
# main method to launch the client
def main():
argv = sys.argv
if len(argv) != 2:
print 'Usage: python packet_generator.py <router port>'
sys.exit(1)
if not argv[1].isdigit():
print 'Port must be a number'
sys.exit(1)
port = int(argv[1])
# create a client socket and connect to server
clientsock = socket(AF_INET, SOCK_DGRAM) # Create a socket object
print 'Open a connection'
clientsock.connect(('127.0.0.1', port))
i = 0
try:
while True:
packet_id = i
source = random.choice(hostsA)
destination = random.choice(hostsC)
ttl = random.randint(1, 4)
packet = '%d, %s, %s, %d, testing' % (packet_id, source, destination, ttl)
print 'sent ' + packet
clientsock.send(packet)
i += 1
except KeyboardInterrupt as err:
print 'Warning: terminated by user'
clientsock.send('$')
sys.exit(1)
if __name__ == "__main__":
main()
| packets = []
for i in range(0, packet_num):
packet_id = i
source = random.choice(hostsA)
destination = random.choice(hostsC)
ttl = random.randint(1, 4)
packets.append('%d, %s, %s, %d, testing' % (packet_id, source, destination, ttl))
print packets |
jobHelpers.go | package testhelpers
import (
"fmt"
"math/rand"
"sort"
"time"
log "github.com/sirupsen/logrus"
"github.com/apache/thrift/lib/go/thrift"
"github.com/twitter/scoot/common/dialer"
"github.com/twitter/scoot/scheduler/api/thrift/gen-go/scoot"
"github.com/twitter/scoot/scheduler/client"
)
// Creates a CloudScootClient that talks to the specified address
func CreateScootClient(addr string) *client.CloudScootClient {
transportFactory := thrift.NewTTransportFactory()
protocolFactory := thrift.NewTBinaryProtocolFactoryDefault()
di := dialer.NewSimpleDialer(transportFactory, protocolFactory, time.Minute)
scootClient := client.NewCloudScootClient(
client.CloudScootClientConfig{
Addr: addr,
Dialer: di,
})
return scootClient
}
// Generates a random Job and sends it to the specified client to run
// returns the JobId if successfully scheduled, otherwise "", error
func GenerateJob(numTasks int, snapshotID string) *scoot.JobDefinition {
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
jobDef := GenJobDefinition(rng, numTasks, snapshotID)
return jobDef
}
func StartJob(client *client.CloudScootClient, job *scoot.JobDefinition) string |
// Waits until all jobs specified have completed running or the
// specified timeout has occurred. Periodically the status of
// running jobs is printed to the console
func WaitForJobsToCompleteAndLogStatus(
jobIds []string,
client scoot.CloudScoot,
timeout time.Duration,
) error {
jobs := make(map[string]*scoot.JobStatus)
for _, id := range jobIds {
jobs[id] = nil
}
end := time.Now().Add(timeout)
for {
if time.Now().After(end) {
return fmt.Errorf("Took longer than %v", timeout)
}
done := true
for jobId, oldStatus := range jobs {
if !IsJobCompleted(oldStatus) {
currStatus, err := client.GetStatus(jobId)
// if there is an error just continue
if err != nil {
log.Infof("Error: Updating Job Status ID: %v will retry later, Error: %v", jobId, err)
done = false
} else {
jobs[jobId] = currStatus
done = done && IsJobCompleted(currStatus)
}
}
}
PrintJobs(jobs)
if done {
log.Info("Done")
return nil
}
time.Sleep(time.Second)
}
}
// Show job progress in the format <jobId> (<done>/<total>), e.g. ffb16fef-13fd-486c-6070-8df9c7b80dce (9997/10000)
type jobProgress struct {
id string
numDone int
numTasks int
}
func (p jobProgress) String() string { return fmt.Sprintf("%s (%d/%d)", p.id, p.numDone, p.numTasks) }
// Prints the current status of the specified Jobs to the Log
func PrintJobs(jobs map[string]*scoot.JobStatus) {
byStatus := make(map[scoot.Status][]string)
for k, v := range jobs {
st := scoot.Status_NOT_STARTED
if v != nil {
st = v.Status
}
byStatus[st] = append(byStatus[st], k)
}
for _, v := range byStatus {
sort.Sort(sort.StringSlice(v))
}
inProgress := byStatus[scoot.Status_IN_PROGRESS]
progs := make([]jobProgress, len(inProgress))
for i, jobID := range inProgress {
jobStatus := jobs[jobID]
tasks := jobStatus.TaskStatus
numDone := 0
for _, st := range tasks {
if st == scoot.Status_COMPLETED {
numDone++
}
}
progs[i] = jobProgress{id: jobID, numTasks: len(tasks), numDone: numDone}
}
log.Info()
log.Info("Job Status")
log.Info("Waiting", byStatus[scoot.Status_NOT_STARTED])
log.Info("Running", progs)
log.Info("Done", byStatus[scoot.Status_COMPLETED])
}
// Returns true if a job is completed or failed, false otherwise
func IsJobCompleted(s *scoot.JobStatus) bool {
return s != nil && (s.Status == scoot.Status_COMPLETED || s.Status == scoot.Status_ROLLED_BACK)
}
| {
for {
j, err := client.RunJob(job)
if err == nil {
return j.ID
}
// retry starting job until it succeeds.
// this is useful for testing where we are restarting the scheduler
log.Infof("Error Starting Job: Retrying %v", err)
}
} |
event_ctx.rs | use crate::input::ContextMenu;
use crate::text::FONT_SIZE;
use crate::{
Canvas, Color, GeomBatch, GfxCtx, HorizontalAlignment, Text, UserInput, VerticalAlignment,
};
use abstutil::{elapsed_seconds, Timer, TimerSink};
use geom::Polygon;
use glium::implement_vertex;
use glium_glyph::glyph_brush::rusttype::Font;
use glium_glyph::glyph_brush::rusttype::Scale;
use glium_glyph::glyph_brush::GlyphCruncher;
use glium_glyph::GlyphBrush;
use std::cell::Cell;
use std::collections::VecDeque;
use std::time::Instant;
// Something that's been sent to the GPU already.
pub struct | {
pub(crate) vertex_buffer: glium::VertexBuffer<Vertex>,
pub(crate) index_buffer: glium::IndexBuffer<u32>,
}
#[derive(Copy, Clone)]
pub(crate) struct Vertex {
position: [f32; 2],
// TODO Maybe pass color as a uniform instead
// TODO Or have a fixed palette of colors and just index into it
color: [u8; 4],
}
implement_vertex!(Vertex, position, color);
// TODO Don't expose this directly
pub struct Prerender<'a> {
pub(crate) display: &'a glium::Display,
pub(crate) num_uploads: Cell<usize>,
// TODO Prerender doesn't know what things are temporary and permanent. Could make the API more
// detailed (and use the corresponding persistent glium types).
pub(crate) total_bytes_uploaded: Cell<usize>,
}
impl<'a> Prerender<'a> {
pub fn upload_borrowed(&self, list: Vec<(Color, &Polygon)>) -> Drawable {
self.actually_upload(true, list)
}
pub fn upload(&self, batch: GeomBatch) -> Drawable {
let borrows = batch.list.iter().map(|(c, p)| (*c, p)).collect();
self.actually_upload(true, borrows)
}
pub fn get_total_bytes_uploaded(&self) -> usize {
self.total_bytes_uploaded.get()
}
pub(crate) fn upload_temporary(&self, list: Vec<(Color, &Polygon)>) -> Drawable {
self.actually_upload(false, list)
}
fn actually_upload(&self, permanent: bool, list: Vec<(Color, &Polygon)>) -> Drawable {
self.num_uploads.set(self.num_uploads.get() + 1);
let mut vertices: Vec<Vertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
for (color, poly) in list {
let idx_offset = vertices.len();
let (pts, raw_indices) = poly.raw_for_rendering();
for pt in pts {
vertices.push(Vertex {
position: [pt.x() as f32, pt.y() as f32],
color: [
f32_to_u8(color.0[0]),
f32_to_u8(color.0[1]),
f32_to_u8(color.0[2]),
f32_to_u8(color.0[3]),
],
});
}
for idx in raw_indices {
indices.push((idx_offset + *idx) as u32);
}
}
let vertex_buffer = if permanent {
glium::VertexBuffer::immutable(self.display, &vertices).unwrap()
} else {
glium::VertexBuffer::new(self.display, &vertices).unwrap()
};
let index_buffer = if permanent {
glium::IndexBuffer::immutable(
self.display,
glium::index::PrimitiveType::TrianglesList,
&indices,
)
.unwrap()
} else {
glium::IndexBuffer::new(
self.display,
glium::index::PrimitiveType::TrianglesList,
&indices,
)
.unwrap()
};
if permanent {
self.total_bytes_uploaded.set(
self.total_bytes_uploaded.get()
+ vertex_buffer.get_size()
+ index_buffer.get_size(),
);
}
Drawable {
vertex_buffer,
index_buffer,
}
}
}
pub struct EventCtx<'a> {
pub input: &'a mut UserInput,
// TODO These two probably shouldn't be public
pub canvas: &'a mut Canvas,
pub prerender: &'a Prerender<'a>,
pub(crate) program: &'a glium::Program,
}
impl<'a> EventCtx<'a> {
pub fn loading_screen<O, F: FnOnce(&mut EventCtx, &mut Timer) -> O>(
&mut self,
timer_name: &str,
f: F,
) -> O {
let mut timer = Timer::new_with_sink(
timer_name,
Box::new(LoadingScreen::new(
self.prerender,
self.program,
self.canvas.window_width,
self.canvas.window_height,
timer_name.to_string(),
)),
);
f(self, &mut timer)
}
pub fn redo_mouseover(&self) -> bool {
self.input.window_lost_cursor()
|| (!self.canvas.is_dragging() && self.input.get_moved_mouse().is_some())
}
}
pub struct LoadingScreen<'a> {
canvas: Canvas,
prerender: &'a Prerender<'a>,
program: &'a glium::Program,
lines: VecDeque<String>,
max_capacity: usize,
last_drawn: Option<Instant>,
title: String,
}
impl<'a> LoadingScreen<'a> {
pub fn new(
prerender: &'a Prerender<'a>,
program: &'a glium::Program,
initial_width: f64,
initial_height: f64,
title: String,
) -> LoadingScreen<'a> {
// TODO Ew! Expensive and wacky. Fix by not storing GlyphBrush in Canvas at all.
let dejavu: &[u8] = include_bytes!("assets/DejaVuSans.ttf");
let screenspace_glyphs =
GlyphBrush::new(prerender.display, vec![Font::from_bytes(dejavu).unwrap()]);
let mapspace_glyphs =
GlyphBrush::new(prerender.display, vec![Font::from_bytes(dejavu).unwrap()]);
let canvas = Canvas::new(
initial_width,
initial_height,
screenspace_glyphs,
mapspace_glyphs,
);
// TODO Dupe code
let vmetrics = canvas.screenspace_glyphs.borrow().fonts()[0]
.v_metrics(Scale::uniform(FONT_SIZE as f32));
let line_height = f64::from(vmetrics.ascent - vmetrics.descent + vmetrics.line_gap);
LoadingScreen {
canvas,
prerender,
program,
lines: VecDeque::new(),
max_capacity: (0.8 * initial_height / line_height) as usize,
last_drawn: None,
title,
}
}
// Timer throttles updates reasonably, so don't bother throttling redraws.
fn redraw(&mut self) {
if let Some(t) = self.last_drawn {
if elapsed_seconds(t) < 0.2 {
return;
}
}
self.last_drawn = Some(Instant::now());
let mut txt = Text::prompt(&self.title);
txt.override_width = Some(self.canvas.window_width * 0.8);
txt.override_height = Some(self.canvas.window_height * 0.8);
for l in &self.lines {
txt.add_line(l.to_string());
}
let mut target = self.prerender.display.draw();
let context_menu = ContextMenu::new();
let mut g = GfxCtx::new(
&self.canvas,
self.prerender,
&mut target,
self.program,
&context_menu,
false,
);
g.clear(Color::BLACK);
// TODO Keep the width fixed.
g.draw_blocking_text(
&txt,
(HorizontalAlignment::Center, VerticalAlignment::Center),
);
self.canvas
.screenspace_glyphs
.borrow_mut()
.draw_queued(self.prerender.display, &mut target);
// LoadingScreen doesn't use mapspace_glyphs
target.finish().unwrap();
}
}
impl<'a> TimerSink for LoadingScreen<'a> {
// TODO Do word wrap. Assume the window is fixed during loading, if it makes things easier.
fn println(&mut self, line: String) {
if self.lines.len() == self.max_capacity {
self.lines.pop_front();
}
self.lines.push_back(line);
self.redraw();
}
fn reprintln(&mut self, line: String) {
self.lines.pop_back();
self.lines.push_back(line);
self.redraw();
}
}
fn f32_to_u8(x: f32) -> u8 {
(x * 255.0) as u8
}
| Drawable |
abstract.ts | /** Base class Department definition */
/* Commented
abstract class Department {
// private employees: string[] = [];
protected employees: string[] = [];
constructor (readonly id: string, public name: string) {
}
abstract describe (this: Department): void;
addEmployee (employee: string) {
this.employees.push(employee);
}
printEmployeeInformation () {
console.log(this.employees.length);
console.log(this.employees);
}
}
// ITDepartment Class definition
class ITDepartment extends Department {
constructor(id: string, public admins: string[]){
super(id, 'IT');
}
|
printAdmins (this: ITDepartment) {
console.log(`Admins ${this.admins}`);
}
}
// AccountingDepartment Class definition
class AccountingDepartment extends Department {
private lastReport: string;
// Getter
get mostRecentReport(){
if (this.lastReport){
return this.lastReport;
}
throw new Error('No report found!');
}
//Setter
set mostRecentReport(report: string){
if (!report){
// return;
throw new Error('Please, pass in a valid value!');
}
this.addReport(report);
}
constructor(id: string, private reports: string[]){
super(id, 'Accounting');
this.lastReport = reports[0];
}
// Implementation of the abstract method describe
describe(this: AccountingDepartment){
console.log(`Department Name: ${this.id}: ${this.name}`)
}
// overriding the addEmployee method
addEmployee(name: string){
if (name === 'Carlos'){
return;
}
this.employees.push(name)
}
addReport (text: string){
this.reports.push(text);
this.lastReport = text;
}
printReports (){
console.log(this.reports);
}
}
// IT Department
const it = new ITDepartment('d1', ['Carlos']);
it.addEmployee('Celso');
it.addEmployee('Edgar');
// Accounting Department
const accounting = new AccountingDepartment('d2', []);
accounting.addReport('FTE KPI');
accounting.addEmployee('Leonardo');
accounting.addEmployee('Steven');
// Calling the setters method
accounting.mostRecentReport = 'Sales Report';
// Logs
console.log(it);
console.log(accounting);
console.log(accounting.mostRecentReport);
//Calling implementatios of the abstract methods
it.describe();
accounting.describe();
*/ | // Implementation of the abstract method describe
describe(this: ITDepartment){
console.log(`Department Description: ${this.id}: ${this.name}`)
} |
aspect_sentiment.py | import spacy
from textblob import TextBlob
import pandas as pd
# Import functions from other files
from tweet_handlers import pullTweetsFromCSV, tweetPulls
### Declare functions to standardize, identify, and analyze input text
# Will ultimately take in a list of tweets and return:
# - Word counts
# - Split of positive / negative aspects
# - Brand identification?
#visualizeText() is a funtion to diagram sentences for help troubleshooting
# Inputs:
# - nlp: an NLP object,
# - txt = a string containing the sentence to be diagramed,
# - writeFilename: a string containing the filename to write the HTML diagram to
# Returns:
# - writeFilename: the path of the file that contains the HTML diagram
def visualizeText(nlp, txt, writeFilename):
doc = nlp(txt)
html = spacy.displacy.render(doc, style='dep')
filePath = './' + writeFilename + '.html'
with open(filePath, 'w') as f:
f.write(html)
return filePath
#extractDescriptors() is a funtion to pull aspects and descriptors from a list of sentences
# Inputs:
# - nlp: an NLP object,
# - sentenceList: a list of strinsg containing the sentences to be analyzed
# Outputs:
# - list of dictionaries containing 'aspect' and 'description' -- not broken by tweet
def extractDescriptors(nlp, sentenceList):
#We'll ultimately return this aspects list
aspects = []
aspects_lemma = []
attributes = []
attributes_lemma = []
#We will iterate through the sentences
for i, aSentence in enumerate( sentenceList ):
if i % 100 == 0: print("Tweet# ", str(i))
doc = nlp(aSentence)
for token in doc:
###TODO:
# Currently there's no standardization that makes it a 1:1 Noun + Adjective, so that needs to be fixed
# Also need to add in a case that checks for pronoun resolution and sees what we can do about that
# We need to identify each noun, and find its descendants that are (pos_ == 'ADJ' or pos_ == 'VERB') and (dep_ == 'amod' or dep_ == 'acl')
# Modifying rule to examine ALL nouns, not just the subject of the sentence
#if token.dep_ == 'nsubj' and token.pos_ == 'NOUN':
if (token.pos_ == 'ADJ' or token.pos_ == 'VERB') and (token.dep_ == 'amod' or token.dep_ == 'acl'):
#Now append the things
aspects.append (token.head.text)
aspects_lemma.append(token.head.lemma_)
attributes.append( token.text )
attributes_lemma.append( token.lemma_ )
return ( aspects , attributes, aspects_lemma, attributes_lemma )
# Need a function that pulls attributes for each keyword in the tweet DF, since we need them to be kept separate
# extractTweetAttributes:
# Takes a DF of tweets, keywords, etc. and pulls out adjectives for each
# Inputs:
# - nlp: an NLP object,
# - tweet_df: pandas dataframe containing colums:
# - Tweet
# - Keyword
# - Spanish
# - Date
# Returns:
# - attribute_df: dataframe containing the list of...
# ...aspects & attributes for each keyword / spanish pair
def extractTweetAttributes(nlp, tweet_df):
#define return df
attribute_df = pd.DataFrame( columns = [
'Keyword',
'Spanish',
'aspect',
'attribute',
'aspect_lemma',
'attribute_lemma'
])
# Now create a set for the different keywords and spanish words
keySet = set( tweet_df['Keyword'] )
for aKey in keySet:
print("Extracting ", aKey)
spanishWord = tweet_df.loc[ tweet_df['Keyword'] == aKey ]['Spanish'].iloc[0]
# And this is where we actually add the various analyses
( aspectList , attributeList, aspectList_lemma, attributeList_lemma ) = extractDescriptors( nlp, tweet_df[ tweet_df['Keyword'] == aKey ]['tweet'] )
# Now that we've got the data, create lookup lists for the Keyword & Spanish words
keyList = [aKey] * len(aspectList)
spanishList = [spanishWord] * len(aspectList)
temp_df = pd.DataFrame({
'Keyword': keyList,
'Spanish': spanishList,
'aspect': aspectList,
'attribute': attributeList,
'aspect_lemma': aspectList_lemma,
'attribute_lemma': attributeList_lemma
})
# Finally, append the data for this keyword to the attribute dataframe
attribute_df = attribute_df.append( temp_df )
return attribute_df
def | ( aspect_df ):
temp_df = pd.DataFrame({
'Keyword': aspect_df['Keyword'],
'Spanish': aspect_df['Spanish'],
'aspect': aspect_df['aspect_lemma'],
'attribute': aspect_df['attribute_lemma']
})
return temp_df.value_counts()
# In the main, this is where the tweet files are loaded...
# ...and routed through the analysis functions
if __name__ == "__main__":
print("In the main")
# Create the NLP object that will be used for all the text processing
#nlp = spacy.load("en_core_web_sm")
# We're actually using a spanish NLP object instead of an English one
nlp = spacy.load("es_core_news_sm")
# Pull in CSV files that hold all the tweets
tweetFileList = [
'./tweet_data/tweet_db_08.27.2021.csv'
]
# Create the DF of tweets from the CSV File
tweet_df = pullTweetsFromCSV( tweetFileList )#, fileEncoding='ANSI' )
# Instead of pulling tweets from a file, we're going to get new tweets
# First we need to designate a list of english + spanish keywords to search for
keyword_df = pd.read_csv('./keyword_list.csv')
#tweet_df = tweetPulls( keyword_df )
#Save the tweet-df because of errors
#tweet_df.to_csv('./tweet_data/tweet_db_08.27.2021.csv')#, encoding='ANSI')
# Run the tweets through the attribute extractor
aspect_df = extractTweetAttributes ( nlp, tweet_df)
# Run the aspects & attributes through a modified version of the wordcount function
count_df = countAttributes( aspect_df )
# - Not to mention run some sort of pronoun resolution
count_df.to_csv('./tweet_data/aspect_count_08.27.2021.csv') | countAttributes |
controller_test.go | package controller
import (
"bytes"
"fmt"
"os"
"testing"
"time"
"github.com/bogdanovich/siberite/repository"
"github.com/stretchr/testify/assert"
)
var dir = "./test_data"
var name = "test"
var err error
type MockTCPConn struct {
WriteBuffer bytes.Buffer
ReadBuffer bytes.Buffer
}
func | () *MockTCPConn {
conn := &MockTCPConn{}
return conn
}
func (conn *MockTCPConn) Read(b []byte) (int, error) {
return conn.ReadBuffer.Read(b)
}
func (conn *MockTCPConn) Write(b []byte) (int, error) {
return conn.WriteBuffer.Write(b)
}
func (conn *MockTCPConn) SetDeadline(t time.Time) error {
return nil
}
func TestMain(m *testing.M) {
_ = os.RemoveAll(dir)
err = os.MkdirAll(dir, 0777)
if err != nil {
fmt.Println(err)
}
result := m.Run()
err = os.RemoveAll(dir)
os.Exit(result)
}
func Test_NewSession_FinishSession(t *testing.T) {
repo, err := repository.Initialize(dir)
defer repo.CloseAllQueues()
assert.Nil(t, err)
mockTCPConn := NewMockTCPConn()
c := NewSession(mockTCPConn, repo)
assert.Equal(t, uint64(1), repo.Stats.CurrentConnections)
assert.Equal(t, uint64(1), repo.Stats.TotalConnections)
c.FinishSession()
assert.Equal(t, uint64(0), repo.Stats.CurrentConnections)
}
func Test_ReadFirstMessage(t *testing.T) {
repo, err := repository.Initialize(dir)
defer repo.CloseAllQueues()
assert.Nil(t, err)
mockTCPConn := NewMockTCPConn()
controller := NewSession(mockTCPConn, repo)
fmt.Fprintf(&mockTCPConn.ReadBuffer, "GET work\r\n")
message, err := controller.ReadFirstMessage()
assert.Nil(t, err)
assert.Equal(t, "GET work\r\n", message)
fmt.Fprintf(&mockTCPConn.ReadBuffer, "SET work 0 0 10\r\n0123456789\r\n")
message, err = controller.ReadFirstMessage()
assert.Nil(t, err)
assert.Equal(t, "SET work 0 0 10\r\n", message)
}
func Test_UnknownCommand(t *testing.T) {
repo, err := repository.Initialize(dir)
defer repo.CloseAllQueues()
assert.Nil(t, err)
mockTCPConn := NewMockTCPConn()
controller := NewSession(mockTCPConn, repo)
err = controller.UnknownCommand()
assert.Equal(t, "ERROR Unknown command", err.Error())
assert.Equal(t, "ERROR Unknown command\r\n", mockTCPConn.WriteBuffer.String())
}
func Test_SendError(t *testing.T) {
repo, err := repository.Initialize(dir)
defer repo.CloseAllQueues()
assert.Nil(t, err)
mockTCPConn := NewMockTCPConn()
controller := NewSession(mockTCPConn, repo)
controller.SendError("Test error message")
assert.Equal(t, "Test error message\r\n", mockTCPConn.WriteBuffer.String())
}
| NewMockTCPConn |
log.go | package audit
import (
"context"
"sync"
dcontext "github.com/docker/distribution/context"
"github.com/sirupsen/logrus"
)
const (
LogEntryType = "openshift.logger"
AuditUserEntry = "openshift.auth.user"
AuditUserIDEntry = "openshift.auth.userid"
AuditStatusEntry = "openshift.request.status"
AuditErrorEntry = "openshift.request.error"
auditLoggerKey = "openshift.audit.logger"
DefaultLoggerType = "registry"
AuditLoggerType = "audit"
OpStatusBegin = "begin"
OpStatusError = "error"
OpStatusOK = "success"
)
// Logger implements special audit log. We can't use the system logger because
// the change of log level can hide the audit logs.
type Logger struct {
mu sync.Mutex
ctx context.Context
logger *logrus.Logger
}
// NewLogger returns new audit logger which inherits fields from the system logger.
func NewLogger(ctx context.Context) *Logger {
logger := &Logger{
logger: logrus.New(),
ctx: ctx,
}
if entry, ok := dcontext.GetLogger(ctx).(*logrus.Entry); ok {
logger.SetFormatter(entry.Logger.Formatter)
} else if lgr, ok := dcontext.GetLogger(ctx).(*logrus.Logger); ok {
logger.SetFormatter(lgr.Formatter)
}
return logger
}
// SetFormatter sets the audit logger formatter.
func (l *Logger) SetFormatter(formatter logrus.Formatter) {
l.mu.Lock()
defer l.mu.Unlock()
l.logger.Formatter = formatter
}
// Log logs record.
func (l *Logger) Log(args ...interface{}) {
auditFields := logrus.Fields{
LogEntryType: AuditLoggerType,
AuditStatusEntry: OpStatusBegin,
}
l.getEntry().WithFields(auditFields).Info(args...)
}
// Logf formats record according to a format.
func (l *Logger) Logf(format string, args ...interface{}) {
auditFields := logrus.Fields{
LogEntryType: AuditLoggerType,
}
l.getEntry().WithFields(auditFields).Infof(format, args...)
}
// LogResult logs record with additional operation status.
func (l *Logger) LogResult(err error, args ...interface{}) {
auditFields := logrus.Fields{
LogEntryType: AuditLoggerType,
AuditStatusEntry: OpStatusOK,
}
if err != nil |
l.getEntry().WithFields(auditFields).Info(args...)
}
// LogResultf formats record according to a format with additional operation status.
func (l *Logger) LogResultf(err error, format string, args ...interface{}) {
auditFields := logrus.Fields{
LogEntryType: AuditLoggerType,
AuditStatusEntry: OpStatusOK,
}
if err != nil {
auditFields[AuditErrorEntry] = err
auditFields[AuditStatusEntry] = OpStatusError
}
l.getEntry().WithFields(auditFields).Infof(format, args...)
}
func (l *Logger) getEntry() *logrus.Entry {
if entry, ok := dcontext.GetLogger(l.ctx).(*logrus.Entry); ok {
return l.logger.WithFields(entry.Data)
}
return logrus.NewEntry(l.logger)
}
// LoggerExists checks audit logger existence.
func LoggerExists(ctx context.Context) (exists bool) {
_, exists = ctx.Value(auditLoggerKey).(*Logger)
return
}
// GetLogger returns the logger from the current context, if present. It will be created otherwise.
func GetLogger(ctx context.Context) *Logger {
if logger, ok := ctx.Value(auditLoggerKey).(*Logger); ok {
return logger
}
return NewLogger(ctx)
}
// WithLogger creates a new context with provided logger.
func WithLogger(ctx context.Context, logger *Logger) context.Context {
return context.WithValue(ctx, auditLoggerKey, logger)
}
| {
auditFields[AuditErrorEntry] = err
auditFields[AuditStatusEntry] = OpStatusError
} |
SubscribeForm.tsx | import config from 'config';
type Props = {
children: JSX.Element | Array<JSX.Element>,
}; |
export default function SubscribeForm({ children }: Props) {
return (
<form
action={config.subscribeForm}
method='post'
target='popupwindow'
>
{children}
</form>
);
} | |
manage_pulse_designs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.5 on Mon Jul 27 15:21:25 2015
import wx
# begin wxGlade: extracode
# end wxGlade
class MyDialog(wx.Dialog):
def | (self, *args, **kwds):
# begin wxGlade: MyDialog.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
wx.Dialog.__init__(self, *args, **kwds)
self.label_2 = wx.StaticText(self, wx.ID_ANY, "")
self.label_1 = wx.StaticText(self, wx.ID_ANY, "")
self.ButtonView = wx.Button(self, wx.ID_ANY, "&View...")
self.ButtonClone = wx.Button(self, wx.ID_ANY, "C&lone")
self.ButtonDelete = wx.Button(self, wx.ID_DELETE, "")
self.ListPulseDesigns = wx.ListCtrl(self, wx.ID_ANY, style=wx.BORDER_SUNKEN | wx.LC_REPORT)
self.ButtonImport = wx.Button(self, wx.ID_ANY, "&Import...")
self.ButtonExport = wx.Button(self, wx.ID_ANY, "E&xport...")
self.ButtonClose = wx.Button(self, wx.ID_CLOSE, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.on_view, self.ButtonView)
self.Bind(wx.EVT_BUTTON, self.on_clone, self.ButtonClone)
self.Bind(wx.EVT_BUTTON, self.on_delete, self.ButtonDelete)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.on_pulse_design_activated, self.ListPulseDesigns)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.on_selection_changed, self.ListPulseDesigns)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_selection_changed, self.ListPulseDesigns)
self.Bind(wx.EVT_BUTTON, self.on_import, self.ButtonImport)
self.Bind(wx.EVT_BUTTON, self.on_export, self.ButtonExport)
self.Bind(wx.EVT_BUTTON, self.on_close, self.ButtonClose)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialog.__set_properties
self.SetTitle("Manage Pulse Designs")
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialog.__do_layout
sizer_4 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.FlexGridSizer(3, 2, 10, 0)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2_copy = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_1.Add((20, 20), 0, 0, 0)
sizer_2_copy.Add(self.label_2, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
sizer_2_copy.Add((20, 20), 0, 0, 0)
sizer_2.Add(sizer_2_copy, 1, wx.EXPAND, 0)
sizer_1.Add(self.label_1, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.RIGHT, 5)
sizer_1.Add((20, 20), 0, 0, 0)
sizer_2.Add(sizer_1, 1, wx.EXPAND, 0)
grid_sizer_1.Add(sizer_2, 1, wx.EXPAND, 0)
sizer_5.Add(self.ButtonView, 0, wx.TOP, 5)
sizer_5.Add(self.ButtonClone, 0, wx.TOP, 5)
sizer_5.Add(self.ButtonDelete, 0, wx.TOP, 30)
grid_sizer_1.Add(sizer_5, 0, wx.BOTTOM | wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
grid_sizer_1.Add(self.ListPulseDesigns, 1, wx.EXPAND, 0)
grid_sizer_1.Add((20, 20), 0, 0, 0)
sizer_6.Add(self.ButtonImport, 0, 0, 0)
sizer_6.Add(self.ButtonExport, 0, wx.LEFT, 10)
sizer_6.Add((20, 20), 1, wx.EXPAND, 0)
sizer_6.Add(self.ButtonClose, 0, 0, 0)
grid_sizer_1.Add(sizer_6, 0, wx.BOTTOM | wx.EXPAND, 10)
grid_sizer_1.AddGrowableRow(1)
grid_sizer_1.AddGrowableCol(1)
sizer_4.Add(grid_sizer_1, 1, wx.ALL | wx.EXPAND, 10)
self.SetSizer(sizer_4)
sizer_4.Fit(self)
self.Layout()
# end wxGlade
def on_view(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_view' not implemented!")
event.Skip()
def on_clone(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_clone' not implemented!")
event.Skip()
def on_delete(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_delete' not implemented!")
event.Skip()
def on_selection_changed(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_selection_changed' not implemented!")
event.Skip()
def on_pulse_design_activated(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_pulse_design_activated' not implemented!")
event.Skip()
def on_import(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_import' not implemented!")
event.Skip()
def on_export(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_export' not implemented!")
event.Skip()
def on_close(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_close' not implemented!")
event.Skip()
# end of class MyDialog
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
DialogManagePulseDesigns = MyDialog(None, -1, "")
app.SetTopWindow(DialogManagePulseDesigns)
DialogManagePulseDesigns.Show()
app.MainLoop()
| __init__ |
client.py | import aiohttp
import rapidjson as json
import socket
from config import Config
from typing import List, Tuple
class RPCClient(object):
_instance = None
def __init__(self):
raise RuntimeError('Call instance() instead')
@classmethod
def instance(cls) -> 'RPCClient':
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls.node_url = Config.instance().node_url
cls.node_port = Config.instance().node_port
cls.wallet_id = Config.instance().wallet
cls.ipv6 = '::' in cls.node_url
cls.connector = aiohttp.TCPConnector(family=socket.AF_INET6 if cls.ipv6 else socket.AF_INET,resolver=aiohttp.AsyncResolver())
cls.session = aiohttp.ClientSession(connector=cls.connector, json_serialize=json.dumps)
return cls._instance
@classmethod
async def close(cls):
if hasattr(cls, 'session') and cls.session is not None:
await cls.session.close()
if cls._instance is not None:
cls._instance = None
async def make_request(self, req_json: dict):
async with self.session.post("http://{0}:{1}".format(self.node_url, self.node_port),json=req_json, timeout=300) as resp:
return await resp.json()
async def account_create(self) -> str:
account_create = {
'action': 'account_create',
'wallet': self.wallet_id
}
respjson = await self.make_request(account_create)
if 'account' in respjson:
return respjson['account']
return None
async def account_balance(self, account: str) -> dict:
account_balance = {
'action': 'account_balance',
'account': account
}
respjson = await self.make_request(account_balance)
if 'balance' in respjson:
return respjson
return None
async def send(self, id: str, source: str, destination: str, amount: str) -> str:
"""Make transaction, return hash if successful"""
send_action = {
'action': 'send',
'wallet': Config.instance().wallet,
'source': source,
'destination': destination,
'amount': amount,
'id': id
}
respjson = await self.make_request(send_action)
if 'block' in respjson:
return respjson['block']
return None
async def pending(self, account: str, count: int = 5) -> List[str]:
|
async def receive(self, account: str, hash: str) -> str:
"""Receive a block and return hash of receive block if successful"""
receive_action = {
'action': 'receive',
'wallet': Config.instance().wallet,
'account': account,
'block': hash
}
respjson = await self.make_request(receive_action)
if 'block' in respjson:
return respjson['block']
return None
async def account_info(self, account: str) -> dict:
info_action = {
'action': 'account_info',
'account': account,
'representative': True
}
respjson = await self.make_request(info_action)
if 'error' not in respjson:
return respjson
return None
async def account_representative_set(self, account: str, rep: str) -> str:
rep_action = {
"action": "account_representative_set",
"wallet": Config.instance().wallet,
"account": account,
"representative": rep
}
respjson = await self.make_request(rep_action)
if 'block' in respjson:
return respjson['block']
return None
async def block_count(self) -> Tuple[int, int]:
"Returns block_count from the node as a tuple count, unchecked"
count_action = {
"action": "block_count"
}
respjson = await self.make_request(count_action)
if 'count' in respjson and 'unchecked' in respjson:
return int(respjson['count']), int(respjson['unchecked'])
return None, None | """Return a list of pending blocks"""
pending_action = {
'action': 'pending',
'account': account,
'count': count
}
respjson = await self.make_request(pending_action)
if 'blocks' in respjson:
return respjson['blocks']
return None |
mod.rs | #[cfg(feature = "lattice")]
use crossbeam::Sender;
pub const URL_SCHEME: &str = "wasmbus";
#[cfg(feature = "lattice")]
use crate::{BindingsList, RouteKey};
#[cfg(feature = "lattice")]
use std::collections::HashMap;
#[cfg(feature = "lattice")]
use std::sync::{Arc, RwLock}; | use wascap::jwt::{Actor, Claims};
#[cfg(feature = "lattice")]
use wascc_codec::capabilities::CapabilityDescriptor;
#[cfg(not(feature = "lattice"))]
pub(crate) mod inproc;
#[cfg(feature = "lattice")]
pub(crate) mod lattice;
#[cfg(not(feature = "lattice"))]
pub(crate) use inproc::InprocBus as MessageBus;
#[cfg(feature = "lattice")]
pub(crate) use lattice::DistributedBus as MessageBus;
#[cfg(not(feature = "lattice"))]
pub(crate) fn new() -> MessageBus {
inproc::InprocBus::new()
}
#[cfg(feature = "lattice")]
pub(crate) fn new(
host_id: String,
claims: Arc<RwLock<HashMap<String, Claims<Actor>>>>,
caps: Arc<RwLock<HashMap<RouteKey, CapabilityDescriptor>>>,
bindings: Arc<RwLock<BindingsList>>,
labels: Arc<RwLock<HashMap<String, String>>>,
terminators: Arc<RwLock<HashMap<String, Sender<bool>>>>,
ns: Option<String>,
cplane_s: Sender<lattice::ControlCommand>,
authz: Arc<RwLock<Box<dyn crate::authz::Authorizer>>>,
image_map: Arc<RwLock<HashMap<String, String>>>,
) -> MessageBus {
lattice::DistributedBus::new(
host_id,
claims,
caps,
bindings,
labels,
terminators,
ns,
cplane_s,
authz,
image_map,
)
}
const LATTICE_NAMESPACE_ENV: &str = "LATTICE_NAMESPACE";
pub(crate) fn get_namespace_prefix() -> Option<String> {
::std::env::var(LATTICE_NAMESPACE_ENV).ok()
}
pub(crate) fn actor_subject(ns: Option<&str>, actor: &str) -> String {
format!("{}.actor.{}", nsprefix(ns), actor)
}
pub(crate) fn provider_subject(ns: Option<&str>, capid: &str, binding: &str) -> String {
format!(
"{}.provider.{}.{}",
nsprefix(ns),
normalize_capid(capid),
binding
)
}
pub(crate) fn inventory_wildcard_subject(ns: Option<&str>) -> String {
format!("{}.inventory.*", nsprefix(ns))
}
pub(crate) fn event_subject(ns: Option<&str>) -> String {
format!("{}.events", nsprefix(ns))
}
// By convention most of the waSCC ecosystem uses a "group:item" string
// for the capability IDs, e.g. "wascc:messaging" or "gpio:relay". To
// accommodate message broker subjects that might not work with the ":"
// character, we normalize the segments to dot-separated.
pub(crate) fn normalize_capid(capid: &str) -> String {
capid.to_lowercase().replace(":", ".").replace(" ", "_")
}
pub(crate) fn provider_subject_bound_actor(
ns: Option<&str>,
capid: &str,
binding: &str,
calling_actor: &str,
) -> String {
format!(
"{}.provider.{}.{}.{}",
nsprefix(ns),
normalize_capid(capid),
binding,
calling_actor
)
}
pub(crate) fn nsprefix(ns: Option<&str>) -> String {
match ns {
Some(s) => format!("{}.wasmbus", s),
None => "wasmbus".to_string(),
}
} | #[cfg(feature = "lattice")] |
base.py |
st = Speedtest()
# debug
if debugmode:
print(f'Download: {st.download()}')
print(f'Upload: {st.upload()}')
st.get_best_server([])
print(f'Ping: {st.results.ping}')
# functons
def get_upload_speed():
print('UPLOAD SPEED: Wait a few seconds...')
return int(st.upload())
def get_download_speed():
print('DOWNLOAD SPEED: Wait a few seconds...')
return int(st.download())
def get_ping():
print('Wait a few seconds...')
st.get_best_server([])
return int(st.results.ping) | from speedtest import Speedtest
# debugmode
debugmode = 0 |
|
PagerView.d.ts | import React, { ReactElement } from 'react';
import type { PagerViewProps } from './types';
/**
* Container that allows to flip left and right between child views. Each
* child view of the `PagerView` will be treated as a separate page
* and will be stretched to fill the `PagerView`.
*
* It is important all children are `<View>`s and not composite components.
* You can set style properties like `padding` or `backgroundColor` for each
* child. It is also important that each child have a `key` prop.
*
* Example:
*
* ```
* render: function() {
* return (
* <PagerView
* style={styles.PagerView}
* initialPage={0}>
* <View style={styles.pageStyle} key="1">
* <Text>First page</Text>
* </View>
* <View style={styles.pageStyle} key="2">
* <Text>Second page</Text>
* </View>
* </PagerView>
* );
* }
*
* ...
*
* var styles = {
* ...
* PagerView: {
* flex: 1
* },
* pageStyle: {
* alignItems: 'center',
* padding: 20,
* }
* }
* ``` | private PagerView;
getInnerViewNode: () => ReactElement;
private _onPageScroll;
private _onPageScrollStateChanged;
private _onPageSelected;
/**
* A helper function to scroll to a specific page in the PagerView.
* The transition between pages will be animated.
*/
setPage: (selectedPage: number) => void;
/**
* A helper function to scroll to a specific page in the PagerView.
* The transition between pages will *not* be animated.
*/
setPageWithoutAnimation: (selectedPage: number) => void;
/**
* A helper function to enable/disable scroll imperatively
* The recommended way is using the scrollEnabled prop, however, there might be a case where a
* imperative solution is more useful (e.g. for not blocking an animation)
*/
setScrollEnabled: (scrollEnabled: boolean) => void;
private _onMoveShouldSetResponderCapture;
private get deducedLayoutDirection();
render(): JSX.Element;
} | */
export declare class PagerView extends React.Component<PagerViewProps> {
private isScrolling; |
bar.component.d.ts | import { EventEmitter, ElementRef, SimpleChanges, OnChanges } from '@angular/core';
import { LocationStrategy } from '@angular/common';
export declare class BarComponent implements OnChanges {
private location;
fill: any;
data: any;
width: any;
height: any;
x: any;
y: any;
orientation: any;
roundEdges: boolean;
gradient: boolean;
offset: number;
isActive: boolean;
stops: any[];
select: EventEmitter<{}>;
activate: EventEmitter<{}>;
deactivate: EventEmitter<{}>;
element: any;
path: any;
gradientId: any; | initialized: boolean;
gradientStops: any[];
hasGradient: boolean;
constructor(element: ElementRef, location: LocationStrategy);
ngOnChanges(changes: SimpleChanges): void;
update(): void;
loadAnimation(): void;
animateToCurrentForm(): void;
getGradient(): any[];
getStartingPath(): any;
getPath(): any;
getRadius(): number;
getStartOpacity(): number;
readonly edges: boolean[];
onMouseEnter(): void;
onMouseLeave(): void;
} | gradientFill: any;
startOpacity: any; |
arc.rs | use crate::rt;
use std::pin::Pin;
use std::{mem, ops};
/// Mock implementation of `std::sync::Arc`.
#[derive(Debug)]
pub struct Arc<T: ?Sized> {
obj: std::sync::Arc<rt::Arc>,
value: std::sync::Arc<T>,
}
impl<T> Arc<T> {
/// Constructs a new `Arc<T>`.
#[track_caller]
pub fn new(value: T) -> Arc<T> {
let std = std::sync::Arc::new(value);
Arc::from_std(std)
}
/// Constructs a new `Pin<Arc<T>>`.
pub fn pin(data: T) -> Pin<Arc<T>> {
unsafe { Pin::new_unchecked(Arc::new(data)) }
}
/// Returns the inner value, if the `Arc` has exactly one strong reference.
pub fn try_unwrap(_this: Arc<T>) -> Result<T, Arc<T>> {
unimplemented!();
}
}
impl<T: ?Sized> Arc<T> {
/// Converts `std::sync::Arc` to `loom::sync::Arc`.
///
/// This is needed to create a `loom::sync::Arc<T>` where `T: !Sized`.
///
/// ## Panics
///
/// If the provided `Arc` has copies (i.e., if it is not unique).
///
/// ## Examples
///
/// While `std::sync::Arc` with `T: !Sized` can be created by coercing an
/// `std::sync::Arc` with a sized value:
///
/// ```rust
/// let sized: std::sync::Arc<[u8; 3]> = std::sync::Arc::new([1, 2, 3]);
/// let _unsized: std::sync::Arc<[u8]> = sized; // coercion
/// ```
///
/// `loom::sync::Arc` can't be created in the same way:
///
/// ```compile_fail,E0308
/// use loom::sync::Arc;
///
/// let sized: Arc<[u8; 3]> = Arc::new([1, 2, 3]);
/// let _unsized: Arc<[u8]> = sized; // error: mismatched types
/// ```
///
/// This is because `std::sync::Arc` uses an unstable trait called `CoerceUnsized`
/// that loom can't use. To create `loom::sync::Arc` with an unsized inner value
/// first create a `std::sync::Arc` of an appropriate type and then use this method:
///
/// ```rust
/// use loom::sync::Arc;
///
/// # loom::model::model(|| {
/// let std: std::sync::Arc<[u8]> = std::sync::Arc::new([1, 2, 3]);
/// let loom: Arc<[u8]> = Arc::from_std(std);
///
/// let std: std::sync::Arc<dyn Send + Sync> = std::sync::Arc::new([1, 2, 3]);
/// let loom: Arc<dyn Send + Sync> = Arc::from_std(std);
/// # });
/// ```
#[track_caller]
pub fn from_std(mut std: std::sync::Arc<T>) -> Self {
assert!(
std::sync::Arc::get_mut(&mut std).is_some(),
"Arc provided to `from_std` is not unique"
);
let obj = std::sync::Arc::new(rt::Arc::new(location!()));
let objc = std::sync::Arc::clone(&obj);
rt::execution(|e| {
e.arc_objs
.insert(std::sync::Arc::as_ptr(&std) as *const (), objc);
});
Arc { obj, value: std }
}
/// Gets the number of strong (`Arc`) pointers to this value.
#[track_caller]
pub fn strong_count(this: &Self) -> usize {
this.obj.strong_count()
}
/// Increments the strong reference count on the `Arc<T>` associated with the
/// provided pointer by one.
///
/// # Safety
///
/// The pointer must have been obtained through `Arc::into_raw`, and the
/// associated `Arc` instance must be valid (i.e. the strong count must be at
/// least 1) for the duration of this method.
#[track_caller]
pub unsafe fn increment_strong_count(ptr: *const T) {
// Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
let arc = mem::ManuallyDrop::new(Arc::<T>::from_raw(ptr));
// Now increase refcount, but don't drop new refcount either
let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
}
/// Decrements the strong reference count on the `Arc<T>` associated with the
/// provided pointer by one.
///
/// # Safety
///
/// The pointer must have been obtained through `Arc::into_raw`, and the
/// associated `Arc` instance must be valid (i.e. the strong count must be at
/// least 1) when invoking this method. This method can be used to release the final
/// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
/// released.
#[track_caller]
pub unsafe fn decrement_strong_count(ptr: *const T) {
mem::drop(Arc::from_raw(ptr));
}
/// Returns a mutable reference to the inner value, if there are
/// no other `Arc` pointers to the same value.
#[track_caller]
pub fn get_mut(this: &mut Self) -> Option<&mut T> |
/// Returns `true` if the two `Arc`s point to the same value (not
/// just values that compare as equal).
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
std::sync::Arc::ptr_eq(&this.value, &other.value)
}
/// Consumes the `Arc`, returning the wrapped pointer.
pub fn into_raw(this: Self) -> *const T {
let ptr = Self::as_ptr(&this);
mem::forget(this);
ptr
}
/// Provides a raw pointer to the data.
pub fn as_ptr(this: &Self) -> *const T {
std::sync::Arc::as_ptr(&this.value)
}
/// Constructs an `Arc` from a raw pointer.
///
/// # Safety
///
/// The raw pointer must have been previously returned by a call to
/// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
/// alignment as `T`. This is trivially true if `U` is `T`.
/// Note that if `U` is not `T` but has the same size and alignment, this is
/// basically like transmuting references of different types. See
/// [`mem::transmute`][transmute] for more information on what
/// restrictions apply in this case.
///
/// The user of `from_raw` has to make sure a specific value of `T` is only
/// dropped once.
///
/// This function is unsafe because improper use may lead to memory unsafety,
/// even if the returned `Arc<T>` is never accessed.
///
/// [into_raw]: Arc::into_raw
/// [transmute]: core::mem::transmute
#[track_caller]
pub unsafe fn from_raw(ptr: *const T) -> Self {
let inner = std::sync::Arc::from_raw(ptr);
let obj = rt::execution(|e| std::sync::Arc::clone(&e.arc_objs[&ptr.cast()]));
Arc { value: inner, obj }
}
}
impl<T: ?Sized> ops::Deref for Arc<T> {
type Target = T;
fn deref(&self) -> &T {
&self.value
}
}
impl<T: ?Sized> Clone for Arc<T> {
#[track_caller]
fn clone(&self) -> Arc<T> {
self.obj.ref_inc(location!());
Arc {
value: self.value.clone(),
obj: self.obj.clone(),
}
}
}
impl<T: ?Sized> Drop for Arc<T> {
#[track_caller]
fn drop(&mut self) {
if self.obj.ref_dec(location!()) {
assert_eq!(
1,
std::sync::Arc::strong_count(&self.value),
"something odd is going on"
);
rt::execution(|e| {
e.arc_objs
.remove(&std::sync::Arc::as_ptr(&self.value).cast())
.expect("Arc object was removed before dropping last Arc");
});
}
}
}
impl<T: Default> Default for Arc<T> {
#[track_caller]
fn default() -> Arc<T> {
Arc::new(Default::default())
}
}
impl<T> From<T> for Arc<T> {
#[track_caller]
fn from(t: T) -> Self {
Arc::new(t)
}
}
| {
if this.obj.get_mut(location!()) {
assert_eq!(1, std::sync::Arc::strong_count(&this.value));
Some(std::sync::Arc::get_mut(&mut this.value).unwrap())
} else {
None
}
} |
plugin.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedulingtypes
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"sigs.k8s.io/kubefed/pkg/apis/core/typeconfig"
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
genericclient "sigs.k8s.io/kubefed/pkg/client/generic"
"sigs.k8s.io/kubefed/pkg/controller/util"
)
const (
replicasPath = "/spec/replicas"
)
type Plugin struct {
targetInformer util.FederatedInformer
federatedStore cache.Store
federatedController cache.Controller
federatedTypeClient util.ResourceClient
typeConfig typeconfig.Interface
fedNsClient util.ResourceClient
limitedScope bool
stopChannel chan struct{}
}
func NewPlugin(controllerConfig *util.ControllerConfig, eventHandlers SchedulerEventHandlers, typeConfig typeconfig.Interface, nsAPIResource *metav1.APIResource) (*Plugin, error) {
targetAPIResource := typeConfig.GetTargetType()
userAgent := fmt.Sprintf("%s-replica-scheduler", strings.ToLower(targetAPIResource.Kind))
kubeConfig := restclient.CopyConfig(controllerConfig.KubeConfig)
restclient.AddUserAgent(kubeConfig, userAgent)
client := genericclient.NewForConfigOrDie(kubeConfig)
targetInformer, err := util.NewFederatedInformer(
controllerConfig,
client,
&targetAPIResource,
eventHandlers.ClusterEventHandler,
eventHandlers.ClusterLifecycleHandlers,
)
if err != nil {
return nil, err
}
p := &Plugin{
targetInformer: targetInformer,
typeConfig: typeConfig,
limitedScope: controllerConfig.LimitedScope(),
stopChannel: make(chan struct{}),
}
targetNamespace := controllerConfig.TargetNamespace
kubeFedEventHandler := eventHandlers.KubeFedEventHandler
federatedTypeAPIResource := typeConfig.GetFederatedType()
p.federatedTypeClient, err = util.NewResourceClient(kubeConfig, &federatedTypeAPIResource)
if err != nil {
return nil, err
}
p.federatedStore, p.federatedController = util.NewResourceInformer(p.federatedTypeClient, targetNamespace, &federatedTypeAPIResource, kubeFedEventHandler)
p.fedNsClient, err = util.NewResourceClient(kubeConfig, nsAPIResource)
if err != nil {
return nil, err
}
return p, nil
}
func (p *Plugin) Start() {
p.targetInformer.Start()
go p.federatedController.Run(p.stopChannel)
}
func (p *Plugin) Stop() {
p.targetInformer.Stop()
close(p.stopChannel)
}
func (p *Plugin) HasSynced() bool {
if !p.targetInformer.ClustersSynced() {
klog.V(2).Infof("Cluster list not synced")
return false
}
if !p.federatedController.HasSynced() {
return false
}
clusters, err := p.targetInformer.GetReadyClusters()
if err != nil {
runtime.HandleError(errors.Wrap(err, "Failed to get ready clusters"))
return false
}
if !p.targetInformer.GetTargetStore().ClustersSynced(clusters) {
return false
}
return true
}
func (p *Plugin) FederatedTypeExists(key string) bool {
_, exist, err := p.federatedStore.GetByKey(key)
if err != nil {
klog.Errorf("Failed to query store while reconciling RSP controller for key %q: %v", key, err)
wrappedErr := errors.Wrapf(err, "Failed to query store while reconciling RSP controller for key %q", key)
runtime.HandleError(wrappedErr)
return false
}
return exist
}
func (p *Plugin) GetResourceClusters(qualifiedName util.QualifiedName, clusters []*fedv1b1.KubeFedCluster) (selectedClusters sets.String, err error) {
fedObject, err := p.federatedTypeClient.Resources(qualifiedName.Namespace).Get(context.Background(), qualifiedName.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// get FederatedNamespace with namespace name of the object
fedNsObject, err := p.fedNsClient.Resources(qualifiedName.Namespace).Get(context.Background(), qualifiedName.Namespace, metav1.GetOptions{})
if err != nil {
return nil, err
}
if p.typeConfig.GetNamespaced() {
return util.ComputeNamespacedPlacement(fedObject, fedNsObject, clusters, p.limitedScope, true)
}
return util.ComputePlacement(fedObject, clusters, true)
}
func (p *Plugin) Reconcile(qualifiedName util.QualifiedName, result map[string]int64) error {
fedObject, err := p.federatedTypeClient.Resources(qualifiedName.Namespace).Get(context.Background(), qualifiedName.Name, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
// Federated resource has been deleted - no further action required
return nil
}
if err != nil {
return err
}
isDirty := false
newClusterNames := []string{}
for name := range result {
newClusterNames = append(newClusterNames, name)
}
clusterNames, err := util.GetClusterNames(fedObject)
if err != nil {
return err
}
if PlacementUpdateNeeded(clusterNames, newClusterNames) {
if err := util.SetClusterNames(fedObject, newClusterNames); err != nil {
return err
}
isDirty = true
}
overridesMap, err := util.GetOverrides(fedObject)
if err != nil {
return errors.Wrapf(err, "Error reading cluster overrides for %s %q", p.typeConfig.GetFederatedType().Kind, qualifiedName)
}
if OverrideUpdateNeeded(overridesMap, result) {
err := setOverrides(fedObject, overridesMap, result)
if err != nil {
return err
}
isDirty = true
}
if isDirty {
_, err := p.federatedTypeClient.Resources(qualifiedName.Namespace).Update(context.Background(), fedObject, metav1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
// These assume that there would be no duplicate clusternames
func | (names, newNames []string) bool {
sort.Strings(names)
sort.Strings(newNames)
return !reflect.DeepEqual(names, newNames)
}
func setOverrides(obj *unstructured.Unstructured, overridesMap util.OverridesMap, replicasMap map[string]int64) error {
if overridesMap == nil {
overridesMap = make(util.OverridesMap)
}
updateOverridesMap(overridesMap, replicasMap)
return util.SetOverrides(obj, overridesMap)
}
func updateOverridesMap(overridesMap util.OverridesMap, replicasMap map[string]int64) {
// Remove replicas override for clusters that are not scheduled
for clusterName, clusterOverrides := range overridesMap {
if _, ok := replicasMap[clusterName]; !ok {
for i, overrideItem := range clusterOverrides {
if overrideItem.Path == replicasPath {
clusterOverrides = append(clusterOverrides[:i], clusterOverrides[i+1:]...)
overridesMap[clusterName] = clusterOverrides
break
}
}
}
}
// Add/update replicas override for clusters that are scheduled
for clusterName, replicas := range replicasMap {
replicasOverrideFound := false
for idx, overrideItem := range overridesMap[clusterName] {
if overrideItem.Path == replicasPath {
overridesMap[clusterName][idx].Value = replicas
replicasOverrideFound = true
break
}
}
if !replicasOverrideFound {
clusterOverrides, exist := overridesMap[clusterName]
if !exist {
clusterOverrides = util.ClusterOverrides{}
}
clusterOverrides = append(clusterOverrides, util.ClusterOverride{Path: replicasPath, Value: replicas})
overridesMap[clusterName] = clusterOverrides
}
}
}
func OverrideUpdateNeeded(overridesMap util.OverridesMap, result map[string]int64) bool {
resultLen := len(result)
checkLen := 0
for clusterName, clusterOverridesMap := range overridesMap {
for _, overrideItem := range clusterOverridesMap {
path := overrideItem.Path
rawValue := overrideItem.Value
if path != replicasPath {
continue
}
// The type of the value will be float64 due to how json
// marshalling works for interfaces.
floatValue, ok := rawValue.(float64)
if !ok {
return true
}
value := int64(floatValue)
replicas, ok := result[clusterName]
if !ok || value != replicas {
return true
}
checkLen += 1
}
}
return checkLen != resultLen
}
| PlacementUpdateNeeded |
rbm.py | # Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netket
import numpy as _np
__all__ = ["PyRbm"]
class PyRbm(netket.machine.CxxMachine):
"""
__Do not use me in production code!__
A proof of concept implementation of a complex-valued RBM in pure Python.
This is an example of how to subclass `CxxMachine` so that the machine will
be usable with NetKet's C++ core.
This class can be used as a drop-in replacement for `RbmSpin`.
"""
def __init__(
self, hilbert, alpha=None, use_visible_bias=True, use_hidden_bias=True
):
r"""Constructs a new RBM. | Args:
hilbert: Hilbert space.
alpha: `alpha * hilbert.size` is the number of hidden spins.
use_visible_bias: specifies whether to use a bias for visible
spins.
use_hidden_bias: specifies whether to use a bias for hidden spins.
"""
# NOTE: The following call to __init__ is important!
super(PyRbm, self).__init__(hilbert)
n = hilbert.size
if alpha < 0:
raise ValueError("`alpha` should be non-negative")
m = int(round(alpha * n))
self._w = _np.empty([m, n], dtype=_np.complex128)
self._a = _np.empty(n, dtype=_np.complex128) if use_visible_bias else None
self._b = _np.empty(m, dtype=_np.complex128) if use_hidden_bias else None
def _number_parameters(self):
r"""Returns the number of parameters in the machine. We just sum the
sizes of all the tensors we hold.
"""
return (
self._w.size
+ (self._a.size if self._a is not None else 0)
+ (self._b.size if self._b is not None else 0)
)
def _number_visible(self):
r"""Returns the number of visible units.
"""
return self._w.shape[1]
def _get_parameters(self):
r"""Returns the parameters as a 1D tensor.
This function tries to order parameters in the exact same way as
``RbmSpin`` does so that we can do stuff like
>>> import netket
>>> import numpy
>>> hilbert = netket.hilbert.Spin(
graph=netket.graph.Hypercube(length=100, n_dim=1),
s=1/2.
)
>>> cxx_rbm = netket.machine.RbmSpin(hilbert, alpha=3)
>>> py_rbm = netket.machine.PyRbm(hilbert, alpha=3)
>>> cxx_rbm.init_random_parameters()
>>> # Order of parameters is the same, so we can assign one to the
>>> # other
>>> py_rbm.parameters = cxx_rbm.parameters
>>> x = np.array(hilbert.local_states, size=hilbert.size)
>>> assert numpy.isclose(py_rbm.log_val(x), cxx_rbm.log_val(x))
"""
params = tuple()
if self._a is not None:
params += (self._a,)
if self._b is not None:
params += (self._b,)
params += (self._w.reshape(-1, order="C"),)
return _np.concatenate(params)
def _set_parameters(self, p):
r"""Sets parameters from a 1D tensor.
``self._set_parameters(self._get_parameters())`` is an identity.
"""
i = 0
if self._a is not None:
self._a[:] = p[i : i + self._a.size]
i += self._a.size
if self._b is not None:
self._b[:] = p[i : i + self._b.size]
i += self._b.size
self._w[:] = p[i : i + self._w.size].reshape(self._w.shape, order="C")
def log_val(self, x):
r"""Computes the logarithm of the wave function given a spin
configuration ``x``.
"""
r = _np.dot(self._w, x)
if self._b is not None:
r += self._b
r = _np.sum(PyRbm._log_cosh(r))
if self._a is not None:
r += _np.dot(self._a, x)
# Officially, we should return
# self._w.shape[0] * 0.6931471805599453 + r
# but the C++ implementation ignores the "constant factor"
return r
def der_log(self, x):
r"""Computes the gradient of the logarithm of the wave function
given a spin configuration ``x``.
"""
grad = _np.empty(self.n_par, dtype=_np.complex128)
i = 0
if self._a is not None:
grad[i : i + self._a.size] = x
i += self._a.size
tanh_stuff = _np.dot(self._w, x)
if self._b is not None:
tanh_stuff += self._b
tanh_stuff = _np.tanh(tanh_stuff, out=tanh_stuff)
if self._b is not None:
grad[i : i + self._b.size] = tanh_stuff
i += self._b.size
out = grad[i : i + self._w.size]
out.shape = (tanh_stuff.size, x.size)
_np.outer(tanh_stuff, x, out=out)
return grad
def _is_holomorphic(self):
r"""Complex valued RBM a holomorphic function.
"""
return True
def save(self, filename):
r"""Saves machine weights to ``filename`` using ``pickle``.
"""
import pickle
with open(filename, "wb") as output_file:
pickle.dump((self._w, self._a, self._b), output_file)
def load(self, filename):
r"""Loads machine weights from ``filename`` using ``pickle``.
"""
import pickle
with open(filename, "rb") as input_file:
self._w, self._a, self._b = pickle.load(input_file)
@staticmethod
def _log_cosh(x):
# TODO: Handle big numbers properly
return _np.log(_np.cosh(x)) | |
restart.go | package api
/*
Copyright 2018 - 2020 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software | distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs"
log "github.com/sirupsen/logrus"
)
// Restart POSTs a Restart request to the PostgreSQL Operator "restart" endpoint in order to restart
// a PG cluster or one or more instances within it.
func Restart(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials,
request *msgs.RestartRequest) (msgs.RestartResponse, error) {
var response msgs.RestartResponse
ctx := context.TODO()
jsonValue, _ := json.Marshal(request)
url := fmt.Sprintf("%s/%s", SessionCredentials.APIServerURL, "restart")
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(jsonValue))
if err != nil {
return response, err
}
log.Debugf("restart called [%s]", url)
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(SessionCredentials.Username, SessionCredentials.Password)
resp, err := httpclient.Do(req)
if err != nil {
return response, err
}
defer resp.Body.Close()
log.Debugf("restart response: %v", resp)
if err := StatusCheck(resp); err != nil {
return response, err
}
if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
log.Println(err)
return response, err
}
return response, err
}
// QueryRestart sends a GET request to the PostgreSQL Operator "/restart/{clusterName}" endpoint
// in order to obtain information about the various instances available to restart within the
// cluster specified.
func QueryRestart(httpclient *http.Client, clusterName string, SessionCredentials *msgs.BasicAuthCredentials,
namespace string) (msgs.QueryRestartResponse, error) {
var response msgs.QueryRestartResponse
ctx := context.TODO()
url := fmt.Sprintf("%s/%s/%s", SessionCredentials.APIServerURL, "restart", clusterName)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return response, err
}
q := req.URL.Query()
q.Add("version", msgs.PGO_VERSION)
q.Add("namespace", namespace)
req.URL.RawQuery = q.Encode()
log.Debugf("query restart called [%s]", req.URL)
req.SetBasicAuth(SessionCredentials.Username, SessionCredentials.Password)
resp, err := httpclient.Do(req)
if err != nil {
return response, err
}
defer resp.Body.Close()
log.Debugf("query restart response: %v", resp)
if err := StatusCheck(resp); err != nil {
return response, err
}
if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
log.Println(err)
return response, err
}
return response, err
} | |
is-buffer.js | module.exports = isBuf;
/**
* Returns true if obj is a buffer or an arraybuffer.
*
* @api private
*/
|
function isBuf(obj) {
return (global.Buffer && global.Buffer.isBuffer(obj)) ||
(global.ArrayBuffer && obj instanceof ArrayBuffer);
} | |
glue_tasks.py | # coding: utf-8
from __future__ import division, print_function, unicode_literals, \
absolute_import
import glob
from pymatgen.analysis.elasticity.strain import Strain
from pymatgen.io.vasp import Vasprun, zpath
"""
This module defines tasks that acts as a glue between other vasp Firetasks to allow communication
between different Firetasks and Fireworks. This module also contains tasks that affect the control
flow of the workflow, e.g. tasks to check stability or the gap is within a certain range.
"""
import gzip
import os
import re
from pymatgen import MPRester
from pymatgen.io.vasp.sets import get_vasprun_outcar
from pymatgen.core.structure import Structure
from fireworks import explicit_serialize, FiretaskBase, FWAction
from atomate.utils.utils import env_chk, get_logger
from atomate.common.firetasks.glue_tasks import get_calc_loc, PassResult, \
CopyFiles, CopyFilesFromCalcLoc
logger = get_logger(__name__)
__author__ = 'Anubhav Jain, Kiran Mathew'
__email__ = '[email protected], [email protected]'
@explicit_serialize
class CopyVaspOutputs(CopyFiles):
"""
Copy files from a previous VASP run directory to the current directory.
By default, copies 'INCAR', 'POSCAR' (default: via 'CONTCAR'), 'KPOINTS',
'POTCAR', 'OUTCAR', and 'vasprun.xml'. Additional files, e.g. 'CHGCAR',
can also be specified. Automatically handles files that have a ".gz"
extension (copies and unzips).
Note that you must specify either "calc_loc" or "calc_dir" to indicate
the directory containing the previous VASP run.
Required params:
(none) - but you must specify either "calc_loc" OR "calc_dir"
Optional params:
calc_loc (str OR bool): if True will set most recent calc_loc. If str
search for the most recent calc_loc with the matching name
calc_dir (str): path to dir that contains VASP output files.
filesystem (str): remote filesystem. e.g. username@host
additional_files ([str]): additional files to copy,
e.g. ["CHGCAR", "WAVECAR"]. Use $ALL if you just want to copy
everything
contcar_to_poscar(bool): If True (default), will move CONTCAR to
POSCAR (original POSCAR is not copied).
"""
optional_params = ["calc_loc", "calc_dir", "filesystem", "additional_files",
"contcar_to_poscar"]
def run_task(self, fw_spec):
calc_loc = get_calc_loc(self["calc_loc"],
fw_spec["calc_locs"]) if self.get(
"calc_loc") else {}
# determine what files need to be copied
files_to_copy = None
if not "$ALL" in self.get("additional_files", []):
files_to_copy = ['INCAR', 'POSCAR', 'KPOINTS', 'POTCAR', 'OUTCAR',
'vasprun.xml']
if self.get("additional_files"):
files_to_copy.extend(self["additional_files"])
# decide between poscar and contcar
contcar_to_poscar = self.get("contcar_to_poscar", True)
if contcar_to_poscar and "CONTCAR" not in files_to_copy:
files_to_copy.append("CONTCAR")
files_to_copy = [f for f in files_to_copy if
f != 'POSCAR'] # remove POSCAR
# setup the copy
self.setup_copy(self.get("calc_dir", None),
filesystem=self.get("filesystem", None),
files_to_copy=files_to_copy, from_path_dict=calc_loc)
# do the copying
self.copy_files()
def copy_files(self):
all_files = self.fileclient.listdir(self.from_dir)
# start file copy
for f in self.files_to_copy:
prev_path_full = os.path.join(self.from_dir, f)
dest_fname = 'POSCAR' if f == 'CONTCAR' and self.get(
"contcar_to_poscar", True) else f
dest_path = os.path.join(self.to_dir, dest_fname)
relax_ext = ""
relax_paths = sorted(
self.fileclient.glob(prev_path_full + ".relax*"))
if relax_paths:
if len(relax_paths) > 9:
raise ValueError(
"CopyVaspOutputs doesn't properly handle >9 relaxations!")
m = re.search('\.relax\d*', relax_paths[-1])
relax_ext = m.group(0)
# detect .gz extension if needed - note that monty zpath() did not seem useful here
gz_ext = ""
if not (f + relax_ext) in all_files:
for possible_ext in [".gz", ".GZ"]:
if (f + relax_ext + possible_ext) in all_files:
gz_ext = possible_ext
if not (f + relax_ext + gz_ext) in all_files:
raise ValueError("Cannot find file: {}".format(f))
# copy the file (minus the relaxation extension)
self.fileclient.copy(prev_path_full + relax_ext + gz_ext,
dest_path + gz_ext)
# unzip the .gz if needed
if gz_ext in ['.gz', ".GZ"]:
# unzip dest file
f = gzip.open(dest_path + gz_ext, 'rt')
file_content = f.read()
with open(dest_path, 'w') as f_out:
f_out.writelines(file_content)
f.close()
os.remove(dest_path + gz_ext)
@explicit_serialize
class CheckStability(FiretaskBase):
"""
Checks the stability of the entry against the Materials Project database.
If the stability is less than the cutoff (default is 0.1 eV/atom), then
the task will return a FWAction that will defuse all remaining tasks.
Required params:
(none) - but your MAPI key must be set as an environ var in this case
Optional params:
ehull_cutoff: (float) energy in eV/atom to use as ehull cutoff. Default
is 0.05 eV/atom.
MAPI_KEY: (str) set MAPI key directly. Supports env_chk.
calc_dir: (str) string to path containing vasprun.xml (default currdir)
"""
required_params = []
optional_params = ["ehull_cutoff", "MAPI_KEY", "calc_dir"]
def run_task(self, fw_spec):
mpr = MPRester(env_chk(self.get("MAPI_KEY"), fw_spec))
vasprun, outcar = get_vasprun_outcar(self.get("calc_dir", "."),
parse_dos=False,
parse_eigen=False)
my_entry = vasprun.get_computed_entry(inc_structure=False)
stored_data = mpr.get_stability([my_entry])[0]
if stored_data["e_above_hull"] > self.get("ehull_cutoff", 0.05):
logger.info("CheckStability: failed test!")
return FWAction(stored_data=stored_data, exit=True,
defuse_workflow=True)
else:
return FWAction(stored_data=stored_data)
@explicit_serialize
class CheckBandgap(FiretaskBase):
"""
Checks the band gap of an entry. If band gap is >min_gap or <max_gap, then
the task will return a FWAction that will defuse all remaining tasks.
Required params:
(none) - but you should set either min_gap or max_gap
Optional params:
min_gap: (float) minimum gap energy in eV to proceed
max_gap: (float) maximum gap energy in eV to proceed
vasprun_path: (str) path to vasprun.xml file
"""
required_params = []
optional_params = ["min_gap", "max_gap", "vasprun_path"]
def run_task(self, fw_spec):
vr_path = zpath(self.get("vasprun_path", "vasprun.xml"))
min_gap = self.get("min_gap", None)
max_gap = self.get("max_gap", None)
if not os.path.exists(vr_path):
relax_paths = sorted(glob.glob(vr_path + ".relax*"))
if relax_paths:
if len(relax_paths) > 9:
raise ValueError(
"CheckBandgap doesn't properly handle >9 relaxations!")
vr_path = relax_paths[-1]
logger.info("Checking the gap of file: {}".format(vr_path))
vr = Vasprun(vr_path)
gap = vr.get_band_structure().get_band_gap()["energy"]
stored_data = {"band_gap": gap}
logger.info(
"The gap is: {}. Min gap: {}. Max gap: {}".format(gap, min_gap,
max_gap))
if (min_gap and gap < min_gap) or (max_gap and gap > max_gap):
logger.info("CheckBandgap: failed test!")
return FWAction(stored_data=stored_data, exit=True,
defuse_workflow=True)
return FWAction(stored_data=stored_data)
@explicit_serialize
class GetInterpolatedPOSCAR(FiretaskBase):
"""
Grabs CONTCARS from two previous calculations to create interpolated
structure.
The code gets the CONTCAR locations using get_calc_loc of two calculations
indicated by the start and end params, creates a folder named "interpolate"
in the current FireWork directory, and copies the two CONTCARs to this folder.
The two CONTCARs are then used to create nimages interpolated structures using
pymatgen.core.structure.Structure.interpolate. Finally, the structure indicated
by this_image is written as a POSCAR file.
Required params:
start (str): name of fw for start of interpolation.
end (str): name of fw for end of interpolation.
this_image (int): which interpolation this is.
nimages (int) : number of interpolations.
Optional params:
autosort_tol (float): parameter used by Structure.interpolate.
a distance tolerance in angstrom in which to automatically
sort end_structure to match to the closest
points in this particular structure. Default is 0.0.
"""
required_params = ["start", "end", "this_image", "nimages"]
optional_params = ["autosort_tol"]
def | (self, fw_spec):
structure = self.interpolate_poscar(fw_spec)
structure.to(fmt="POSCAR", filename=os.path.join(os.getcwd(), "POSCAR"))
def interpolate_poscar(self, fw_spec):
# make folder for poscar interpolation start and end structure files.
interpolate_folder = 'interpolate'
if not os.path.exists(os.path.join(os.getcwd(), interpolate_folder)):
os.makedirs(os.path.join(os.getcwd(), interpolate_folder))
# use method of GrabFilesFromCalcLoc to grab files from previous locations.
CopyFilesFromCalcLoc(calc_dir=None, calc_loc=self["start"],
filenames=["CONTCAR"],
name_prepend=interpolate_folder + os.sep,
name_append="_0").run_task(fw_spec=fw_spec)
CopyFilesFromCalcLoc(calc_dir=None, calc_loc=self["end"],
filenames=["CONTCAR"],
name_prepend=interpolate_folder + os.sep,
name_append="_1").run_task(fw_spec=fw_spec)
# assuming first calc_dir is polar structure for ferroelectric search
s1 = Structure.from_file(os.path.join(interpolate_folder, "CONTCAR_0"))
s2 = Structure.from_file(os.path.join(interpolate_folder, "CONTCAR_1"))
structs = s1.interpolate(s2, self["nimages"], interpolate_lattices=True,
autosort_tol=self.get("autosort_tol", 0.0))
# save only the interpolation needed for this run
i = self.get("this_image")
return structs[i]
def pass_vasp_result(pass_dict=None, calc_dir='.', filename="vasprun.xml.gz",
parse_eigen=False,
parse_dos=False, **kwargs):
"""
Function that gets a PassResult firework corresponding to output from a Vasprun. Covers
most use cases in which user needs to pass results from a vasp run to child FWs
(e. g. analysis FWs)
pass_vasp_result(pass_dict={'stress': ">>ionic_steps.-1.stress"})
Args:
pass_dict (dict): dictionary designating keys and values to pass
to child fireworks. If value is a string beginning with '>>',
the firework will search the parsed VASP output dictionary
for the designated property by following the sequence of keys
separated with periods, e. g. ">>ionic_steps.-1.stress" is used
to designate the stress from the last ionic_step. If the value
is not a string or does not begin with ">>" or "a>>" (for an
object attribute, rather than nested key of .as_dict() conversion),
it is passed as is. Defaults to pass the computed entry of
the Vasprun.
calc_dir (str): path to dir that contains VASP output files, defaults
to '.', e. g. current directory
filename (str): filename for vasp xml file to parse, defaults to
"vasprun.xml.gz"
parse_eigen (bool): flag on whether or not to parse eigenvalues,
defaults to false
parse_eigen (bool): flag on whether or not to parse dos,
defaults to false
**kwargs (keyword args): other keyword arguments passed to PassResult
e.g. mod_spec_key or mod_spec_cmd
"""
pass_dict = pass_dict or {"computed_entry": "a>>get_computed_entry"}
parse_kwargs = {"filename": filename, "parse_eigen": parse_eigen,
"parse_dos": parse_dos}
return PassResult(pass_dict=pass_dict, calc_dir=calc_dir,
parse_kwargs=parse_kwargs,
parse_class="pymatgen.io.vasp.outputs.Vasprun", **kwargs)
| run_task |
active_validator.rs | use super::{
state::State,
traits::Context,
validators::ValidatorIndex,
vertex::{Vertex, WireVote},
vote::{Observation, Panorama},
};
/// An action taken by a validator.
#[derive(Clone, Eq, PartialEq, Debug)]
pub(crate) enum Effect<C: Context> {
/// Newly vertex that should be gossiped to peers and added to the protocol state.
NewVertex(Vertex<C>),
/// `handle_timer` needs to be called at the specified instant.
ScheduleTimer(u64),
/// `propose` needs to be called with a value for a new block with the specified instant.
// TODO: Add more information required by the deploy buffer.
RequestNewBlock(BlockContext),
}
/// Information about the context in which a new block is created.
#[derive(Clone, Eq, PartialEq, Debug)]
pub(crate) struct BlockContext {
instant: u64,
}
impl BlockContext {
/// The block's timestamp.
pub(crate) fn instant(&self) -> u64 {
self.instant
}
}
/// A validator that actively participates in consensus by creating new vertices.
///
/// It implements the Highway schedule. The protocol proceeds in rounds, and in each round one
/// validator is the _leader_.
/// * In the beginning of the round, the leader sends a _proposal_ vote, containing consensus values
/// (i.e. a block).
/// * Upon receiving the proposal, all the other validators send a _confirmation_ vote, citing only
/// the proposal, their own previous message, and resulting transitive justifications.
/// * At a fixed point in time later in the round, everyone unconditionally sends a _witness_ vote,
/// citing every vote they have received so far.
///
/// If the rounds are long enough (i.e. message delivery is fast enough) and there are enough
/// honest validators, there will be a lot of confirmations for the proposal, and enough witness
/// votes citing all those confirmations, to create a summit and finalize the proposal.
pub(crate) struct ActiveValidator<C: Context> {
/// Our own validator index.
vidx: ValidatorIndex,
/// The validator's secret signing key.
// TODO: Sign votes.
_secret: C::ValidatorSecret,
/// The round exponent: Our subjective rounds are `1 << round_exp` milliseconds long.
round_exp: u8,
/// The latest timer we scheduled.
next_timer: u64,
}
impl<C: Context> ActiveValidator<C> {
/// Creates a new `ActiveValidator` and the timer effect for the first call.
pub(crate) fn new(
vidx: ValidatorIndex,
secret: C::ValidatorSecret,
round_exp: u8,
instant: u64,
state: &State<C>,
) -> (Self, Vec<Effect<C>>) {
let mut av = ActiveValidator {
vidx,
_secret: secret,
round_exp,
next_timer: 0,
};
let effects = av.schedule_timer(instant, state);
(av, effects)
}
/// Returns actions a validator needs to take at the specified `instant`, with the given
/// protocol `state`.
pub(crate) fn handle_timer(&mut self, instant: u64, state: &State<C>) -> Vec<Effect<C>> {
let round_offset = instant % self.round_len();
let round_id = instant - round_offset;
let mut effects = self.schedule_timer(instant, state);
if round_offset == 0 && state.leader(round_id) == self.vidx {
let bctx = BlockContext { instant };
effects.push(Effect::RequestNewBlock(bctx));
} else if round_offset == self.witness_offset() {
let panorama = state.panorama().clone();
let witness_vote = self.new_vote(panorama, instant, None, state);
effects.push(Effect::NewVertex(Vertex::Vote(witness_vote)))
}
effects
}
/// Returns actions a validator needs to take upon receiving a new vote.
pub(crate) fn on_new_vote(
&self,
vhash: &C::Hash,
instant: u64,
state: &State<C>,
) -> Vec<Effect<C>> {
if self.should_send_confirmation(vhash, instant, state) {
let panorama = self.confirmation_panorama(vhash, state);
let confirmation_vote = self.new_vote(panorama, instant, None, state);
vec![Effect::NewVertex(Vertex::Vote(confirmation_vote))]
} else {
vec![]
}
}
/// Proposes a new block with the given consensus value.
pub(crate) fn propose(
&self,
values: Vec<C::ConsensusValue>,
block_context: BlockContext,
state: &State<C>,
) -> Vec<Effect<C>> {
let panorama = state.panorama().clone();
let instant = block_context.instant();
let proposal_vote = self.new_vote(panorama, instant, Some(values), state);
vec![Effect::NewVertex(Vertex::Vote(proposal_vote))]
}
/// Returns whether the incoming message is a proposal that we need to send a confirmation for.
fn should_send_confirmation(&self, vhash: &C::Hash, instant: u64, state: &State<C>) -> bool {
let vote = state.vote(vhash);
instant / self.round_len() == vote.instant / self.round_len() // Current round.
&& state.leader(vote.instant) == vote.sender // The sender is the round's leader.
&& vote.sender != self.vidx // We didn't send it ourselves.
&& !state.has_evidence(vote.sender) // The sender is not faulty.
&& state
.panorama()
.get(self.vidx)
.correct()
.map_or(true, |own_vh| {
!state.sees_correct(&state.vote(own_vh).panorama, vhash)
}) // We haven't confirmed it already.
}
/// Returns the panorama of the confirmation for the leader vote `vhash`.
fn confirmation_panorama(&self, vhash: &C::Hash, state: &State<C>) -> Panorama<C> {
let vote = state.vote(vhash);
let mut panorama;
if let Some(prev_hash) = state.panorama().get(self.vidx).correct().cloned() {
let own_vote = state.vote(&prev_hash);
panorama = state.merge_panoramas(&vote.panorama, &own_vote.panorama);
panorama.update(self.vidx, Observation::Correct(prev_hash));
} else {
panorama = vote.panorama.clone();
}
panorama.update(vote.sender, Observation::Correct(vhash.clone()));
for faulty_v in state.faulty_validators() {
panorama.update(faulty_v, Observation::Faulty);
}
panorama
}
/// Returns a new vote with the given data, and the correct sequence number.
fn new_vote(
&self,
panorama: Panorama<C>,
instant: u64,
values: Option<Vec<C::ConsensusValue>>,
state: &State<C>,
) -> WireVote<C> {
let add1 = |vh: &C::Hash| state.vote(vh).seq_number + 1;
let seq_number = panorama.get(self.vidx).correct().map_or(0, add1);
WireVote {
panorama,
sender: self.vidx,
values,
seq_number,
instant,
}
}
/// Returns a `ScheduleTimer` effect for the next time we need to be called.
fn schedule_timer(&mut self, instant: u64, state: &State<C>) -> Vec<Effect<C>> {
if self.next_timer > instant {
return Vec::new(); // We already scheduled the next call; nothing to do.
}
let round_offset = instant % self.round_len();
let round_id = instant - round_offset;
self.next_timer = if round_offset < self.witness_offset() {
round_id + self.witness_offset()
} else if state.leader(round_id + self.round_len()) == self.vidx {
round_id + self.round_len()
} else {
round_id + self.round_len() + self.witness_offset()
};
vec![Effect::ScheduleTimer(self.next_timer)]
}
/// Returns the number of ticks after the beginning of a round when the witness votes are sent.
fn witness_offset(&self) -> u64 {
self.round_len() * 2 / 3
}
/// The length of a round, in ticks.
fn round_len(&self) -> u64 {
1u64 << self.round_exp
}
}
#[cfg(test)]
mod tests {
use std::fmt::Debug;
use super::{
super::{
finality_detector::{FinalityDetector, FinalityResult},
state::{tests::*, AddVoteError, Weight},
vertex::Vertex,
},
*,
};
type Eff = Effect<TestContext>;
| if let Eff::NewVertex(Vertex::Vote(wvote)) = self {
wvote
} else {
panic!("Unexpected effect: {:?}", self);
}
}
}
fn unwrap_single<T: Debug>(vec: Vec<T>) -> T {
let mut iter = vec.into_iter();
match (iter.next(), iter.next()) {
(None, _) => panic!("Unexpected empty vec"),
(Some(t), None) => t,
(Some(t0), Some(t1)) => panic!("Expected only one element: {:?}, {:?}", t0, t1),
}
}
#[test]
#[allow(clippy::unreadable_literal)] // 0xC0FFEE is more readable than 0x00C0_FFEE.
fn active_validator() -> Result<(), AddVoteError<TestContext>> {
let mut state = State::<TestContext>::new(&[Weight(3), Weight(4)], 0);
let mut fd = FinalityDetector::new(Weight(2));
// We start at time 410, with round length 16, so the first leader tick is 416, and the
// first witness tick 426.
assert_eq!(ALICE, state.leader(416)); // Alice will be the first leader.
assert_eq!(BOB, state.leader(432)); // Bob will be the second leader.
let (mut alice_av, effects) = ActiveValidator::new(ALICE, TestSecret(0), 4, 410, &state);
assert_eq!([Eff::ScheduleTimer(416)], *effects);
let (mut bob_av, effects) = ActiveValidator::new(BOB, TestSecret(1), 4, 410, &state);
assert_eq!([Eff::ScheduleTimer(426)], *effects);
assert!(alice_av.handle_timer(415, &state).is_empty()); // Too early: No new effects.
// Alice wants to propose a block, and also make her witness vote at 426.
let bctx = match &*alice_av.handle_timer(416, &state) {
[Eff::ScheduleTimer(426), Eff::RequestNewBlock(bctx)] => bctx.clone(),
effects => panic!("unexpected effects {:?}", effects),
};
assert_eq!(416, bctx.instant());
// She has a pending deploy from Colin who wants to pay for a hot beverage.
let effects = alice_av.propose(vec![0xC0FFEE], bctx, &state);
let proposal_wvote = unwrap_single(effects).unwrap_vote();
let prop_hash = proposal_wvote.hash();
state.add_vote(proposal_wvote)?;
assert!(alice_av.on_new_vote(&prop_hash, 417, &state).is_empty());
// Bob creates a confirmation vote for Alice's proposal.
let effects = bob_av.on_new_vote(&prop_hash, 419, &state);
state.add_vote(unwrap_single(effects).unwrap_vote())?;
// Bob creates his witness message 2/3 through the round.
let mut effects = bob_av.handle_timer(426, &state).into_iter();
assert_eq!(Some(Eff::ScheduleTimer(432)), effects.next()); // Bob is the next leader.
state.add_vote(effects.next().unwrap().unwrap_vote())?;
assert_eq!(None, effects.next());
assert_eq!(FinalityResult::None, fd.run(&state)); // Alice has not witnessed Bob's vote yet.
// Alice also sends her own witness message, completing the summit for her proposal.
let mut effects = alice_av.handle_timer(426, &state).into_iter();
assert_eq!(Some(Eff::ScheduleTimer(442)), effects.next()); // Timer for witness vote.
state.add_vote(effects.next().unwrap().unwrap_vote())?;
assert_eq!(None, effects.next());
// Payment finalized! "One Pumpkin Spice Mochaccino for Corbyn!"
assert_eq!(FinalityResult::Finalized(vec![0xC0FFEE]), fd.run(&state));
Ok(())
}
} | impl Eff {
fn unwrap_vote(self) -> WireVote<TestContext> { |
yes.rs | // * This file is part of the uutils coreutils package.
// *
// * (c) Jordi Boggiano <[email protected]>
// *
// * For the full copyright and license information, please view the LICENSE
// * file that was distributed with this source code.
/* last synced with: yes (GNU coreutils) 8.13 */
#[macro_use]
extern crate clap;
#[macro_use]
extern crate uucore;
use clap::Arg;
use std::borrow::Cow;
use std::io::{self, Write};
use uucore::zero_copy::ZeroCopyWriter;
// it's possible that using a smaller or larger buffer might provide better performance on some
// systems, but honestly this is good enough
const BUF_SIZE: usize = 16 * 1024;
pub fn uumain(args: impl uucore::Args) -> i32 {
let app = app_from_crate!().arg(Arg::with_name("STRING").index(1).multiple(true));
let matches = match app.get_matches_from_safe(args) {
Ok(m) => m,
Err(ref e)
if e.kind == clap::ErrorKind::HelpDisplayed
|| e.kind == clap::ErrorKind::VersionDisplayed =>
{
println!("{}", e);
return 0;
}
Err(f) => {
show_error!("{}", f);
return 1;
}
};
let string = if let Some(values) = matches.values_of("STRING") {
let mut result = values.fold(String::new(), |res, s| res + s + " ");
result.pop();
result.push('\n');
Cow::from(result)
} else {
Cow::from("y\n")
};
let mut buffer = [0; BUF_SIZE];
let bytes = prepare_buffer(&string, &mut buffer);
exec(bytes);
0
}
#[cfg(not(feature = "latency"))]
fn prepare_buffer<'a>(input: &'a str, buffer: &'a mut [u8; BUF_SIZE]) -> &'a [u8] |
#[cfg(feature = "latency")]
fn prepare_buffer<'a>(input: &'a str, _buffer: &'a mut [u8; BUF_SIZE]) -> &'a [u8] {
input.as_bytes()
}
pub fn exec(bytes: &[u8]) {
let mut stdin_raw = io::stdout();
let mut writer = ZeroCopyWriter::with_default(&mut stdin_raw, |stdin| stdin.lock());
loop {
// TODO: needs to check if pipe fails
writer.write_all(bytes).unwrap();
}
}
| {
if input.len() < BUF_SIZE / 2 {
let mut size = 0;
while size < BUF_SIZE - input.len() {
let (_, right) = buffer.split_at_mut(size);
right[..input.len()].copy_from_slice(input.as_bytes());
size += input.len();
}
&buffer[..size]
} else {
input.as_bytes()
}
} |
nodeserver.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package smb
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/container-storage-interface/spec/lib/go/csi"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"golang.org/x/net/context"
)
const (
usernameField = "username"
passwordField = "password"
sourceField = "source"
domainField = "domain"
defaultNetworkName = "AZURE"
)
// NodePublishVolume mount the volume from staging to target path
func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
klog.V(2).Infof("NodePublishVolume called with request %v", *req)
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
}
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
target := req.GetTargetPath()
if len(target) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path not provided")
}
source := req.GetStagingTargetPath()
if len(source) == 0 {
return nil, status.Error(codes.InvalidArgument, "Staging target not provided")
}
mountOptions := []string{"bind"}
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
mnt, err := d.ensureMountPoint(target)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not mount target %q: %v", target, err)
}
if mnt {
klog.V(2).Infof("NodePublishVolume: %s is already mounted", target)
return &csi.NodePublishVolumeResponse{}, nil
}
if err = preparePublishPath(target, d.mounter); err != nil {
return nil, fmt.Errorf("prepare publish failed for %s with error: %v", target, err)
}
context := req.GetVolumeContext()
var createSubDir string
for k, v := range context {
switch strings.ToLower(k) {
case createSubDirField:
createSubDir = v
}
}
if strings.EqualFold(createSubDir, "true") {
source = filepath.Join(source, req.GetVolumeId())
klog.V(2).Infof("NodePublishVolume: createSubDir(%s) MkdirAll(%s)", createSubDir, source)
if err := os.Mkdir(source, 0750); err != nil {
if os.IsExist(err) {
klog.Warningf("Mkdir(%s) failed with error: %v", source, err)
} else {
return nil, status.Errorf(codes.Internal, "Mkdir(%s) failed with error: %v", source, err)
}
}
}
klog.V(2).Infof("NodePublishVolume: mounting %s at %s with mountOptions: %v", source, target, mountOptions)
if err := d.mounter.Mount(source, target, "", mountOptions); err != nil {
if removeErr := os.Remove(target); removeErr != nil {
return nil, status.Errorf(codes.Internal, "Could not remove mount target %q: %v", target, removeErr)
}
return nil, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err)
}
klog.V(2).Infof("NodePublishVolume: mount %s at %s successfully", source, target)
return &csi.NodePublishVolumeResponse{}, nil
}
// NodeUnpublishVolume unmount the volume from the target path
func (d *Driver) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
klog.V(2).Infof("NodeUnPublishVolume: called with args %+v", *req)
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
targetPath := req.GetTargetPath()
volumeID := req.GetVolumeId()
klog.V(2).Infof("NodeUnpublishVolume: unmounting volume %s on %s", volumeID, targetPath)
err := CleanupMountPoint(d.mounter, targetPath, false)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to unmount target %q: %v", targetPath, err)
}
klog.V(2).Infof("NodeUnpublishVolume: unmount volume %s on %s successfully", volumeID, targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// NodeStageVolume mount the volume to a staging path
func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
// regex to mask username and password in log messages
var reqSecretsRegex, _ = regexp.Compile(`map\[password:.*? `)
s := fmt.Sprintf("NodeStageVolume called with request %v", *req)
klog.V(5).Info(reqSecretsRegex.ReplaceAllString(s, "map[password:**** "))
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
volumeCapability := req.GetVolumeCapability()
if volumeCapability == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability not provided")
}
targetPath := req.GetStagingTargetPath()
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Staging target not provided")
}
volumeID := req.GetVolumeId()
context := req.GetVolumeContext()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
secrets := req.GetSecrets()
source, ok := context[sourceField]
if !ok {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("%s field is missing, current context: %v", sourceField, context))
}
var username, password, domain string
for k, v := range secrets {
switch strings.ToLower(k) {
case usernameField:
username = strings.TrimSpace(v)
case passwordField:
password = strings.TrimSpace(v)
case domainField:
domain = strings.TrimSpace(v)
}
}
var mountOptions, sensitiveMountOptions, loggingMountOptions []string
if runtime.GOOS == "windows" {
if !strings.Contains(source, "\\") {
username = fmt.Sprintf("%s\\%s", defaultNetworkName, username)
}
mountOptions = []string{username, password}
loggingMountOptions = []string{username}
} else {
if err := os.MkdirAll(targetPath, 0750); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("MkdirAll %s failed with error: %v", targetPath, err))
}
sensitiveMountOptions = []string{fmt.Sprintf("%s=%s,%s=%s", usernameField, username, passwordField, password)}
mountOptions = mountFlags
loggingMountOptions = mountOptions
}
if domain != "" {
mountOptions = append(mountOptions, fmt.Sprintf("%s=%s", domainField, domain))
loggingMountOptions = mountOptions
}
klog.V(2).Infof("targetPath(%v) volumeID(%v) context(%v) mountflags(%v) mountOptions(%v)",
targetPath, volumeID, context, mountFlags, loggingMountOptions)
isDirMounted, err := d.ensureMountPoint(targetPath)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not mount target %q: %v", targetPath, err)
}
if !isDirMounted {
if err = prepareStagePath(targetPath, d.mounter); err != nil {
return nil, fmt.Errorf("prepare stage path failed for %s with error: %v", targetPath, err)
}
mountComplete := false
err = wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) {
err := Mount(d.mounter, source, targetPath, "cifs", mountOptions, sensitiveMountOptions)
mountComplete = true
return true, err
})
if !mountComplete {
return nil, status.Error(codes.Internal, fmt.Sprintf("volume(%s) mount %q on %q failed with timeout(10m)", volumeID, source, targetPath))
}
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("volume(%s) mount %q on %q failed with %v", volumeID, source, targetPath, err))
}
klog.V(2).Infof("volume(%s) mount %q on %q succeeded", volumeID, source, targetPath)
}
return &csi.NodeStageVolumeResponse{}, nil
}
// NodeUnstageVolume unmount the volume from the staging path
func (d *Driver) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
klog.V(2).Infof("NodeUnstageVolume: called with args %+v", *req)
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
stagingTargetPath := req.GetStagingTargetPath()
if len(stagingTargetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Staging target not provided")
}
klog.V(2).Infof("NodeUnstageVolume: CleanupMountPoint %s", stagingTargetPath)
if err := CleanupSMBMountPoint(d.mounter, stagingTargetPath, false); err != nil {
return nil, status.Errorf(codes.Internal, "failed to unmount staging target %q: %v", stagingTargetPath, err)
}
klog.V(2).Infof("NodeUnstageVolume: unmount %s successfully", stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil
}
// NodeGetCapabilities return the capabilities of the Node plugin
func (d *Driver) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
klog.V(2).Infof("NodeGetCapabilities called with request %v", *req)
return &csi.NodeGetCapabilitiesResponse{
Capabilities: d.NSCap,
}, nil
}
// NodeGetInfo return info of the node on which this plugin is running
func (d *Driver) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
klog.V(2).Infof("NodeGetInfo called with request %v", *req)
return &csi.NodeGetInfoResponse{
NodeId: d.NodeID,
}, nil
}
// NodeGetVolumeStats get volume stats
func (d *Driver) NodeGetVolumeStats(ctx context.Context, in *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// NodeExpandVolume node expand volume
// N/A for smb
func (d *Driver) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ensureMountPoint: create mount point if not exists
// return <true, nil> if it's already a mounted point otherwise return <false, nil>
func (d *Driver) ensureMountPoint(target string) (bool, error) {
notMnt, err := d.mounter.IsLikelyNotMountPoint(target)
if err != nil && !os.IsNotExist(err) {
if IsCorruptedDir(target) {
notMnt = false
klog.Warningf("detected corrupted mount for targetPath [%s]", target)
} else {
return !notMnt, err
}
}
if !notMnt {
// testing original mount point, make sure the mount link is valid
_, err := ioutil.ReadDir(target)
if err == nil {
klog.V(2).Infof("already mounted to target %s", target)
return !notMnt, nil
}
// mount link is invalid, now unmount and remount later
klog.Warningf("ReadDir %s failed with %v, unmount this directory", target, err)
if err := d.mounter.Unmount(target); err != nil {
klog.Errorf("Unmount directory %s failed with %v", target, err)
return !notMnt, err
}
notMnt = true
return !notMnt, err
}
if err := makeDir(target); err != nil {
klog.Errorf("MakeDir failed on target: %s (%v)", target, err)
return !notMnt, err
}
return false, nil
}
func | (pathname string) error {
err := os.MkdirAll(pathname, os.FileMode(0755))
if err != nil {
if !os.IsExist(err) {
return err
}
}
return nil
}
| makeDir |
c4_09_python_fv.py | "
Name : c4_09_python_fv.py
Book : Hands-on Data Science with Anaconda )
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan and James Yan
Date : 1/25/2018
email : [email protected]
[email protected]
"
import numpy as np
import matplotlib.pyplot as mlt
n=np.linspace(0,10,10)
pv=100
R=0.1 | mlt.plot(n,fv)
mlt.show() | fv=pv*(1+R)**n |
network.py | class ServiceCentre(object):
"""
An information store for each service centre in the queueing network.
Contains all information that is independent of customer class:
- number of servers
- queueing capacity
- server schedules + preemtion status
- class change matrix
"""
def __init__(self,
number_of_servers,
queueing_capacity,
class_change_matrix=None,
schedule=None,
preempt=False,
ps_threshold=1):
"""
Initialises the ServiceCentre object.
"""
self.number_of_servers = number_of_servers
self.queueing_capacity = queueing_capacity
self.class_change_matrix = class_change_matrix
self.schedule = schedule
self.preempt = preempt
self.ps_threshold = ps_threshold
class CustomerClass(object):
"""
An information store for each customer class in the queueing network.
Contains all information that is dependent on customer class:
- arrival distributions
- service distributions
- routing matrices/functions
- priority class
- baulking functions
- batching distributions
"""
def __init__(self,
arrival_distributions,
service_distributions,
routing,
priority_class,
baulking_functions,
batching_distributions):
"""
Initialises the CutomerCass object.
"""
self.arrival_distributions = arrival_distributions
self.service_distributions = service_distributions
self.batching_distributions = batching_distributions
self.routing = routing
self.priority_class = priority_class
self.baulking_functions = baulking_functions
class Network(object):
"""
An information store the queueing network.
Contains a list of ServiceCentre objects for each
service centre, and a list of CustomerClass objects
for each customer class.
"""
def __init__(self, service_centres, customer_classes):
| """
Initialises the Network object
"""
self.service_centres = service_centres
self.customer_classes = customer_classes
self.number_of_nodes = len(service_centres)
self.number_of_classes = len(customer_classes)
self.number_of_priority_classes = len(set([clss.priority_class for clss in customer_classes]))
self.priority_class_mapping = {i: clss.priority_class for i, clss in enumerate(customer_classes)} |
|
item.go | package sharecalculate
import ("strings"
"strconv"
"fmt"
)
type item struct{
name string
split []details
total float64
totweightage int
}
type items struct{
itemarr []item
}
//itemise each person expense and contribution
func (itemsdata items)updateshare()persons{
var persondata persons
for _,i := range itemsdata.itemarr{
for j:=0;j<len(i.split);j++{
//persondata = i.split[j].personmap(i.total,i.totweightage,persondata)
name,amtgiven,amttaken := i.split[j].personmap(i.total,i.totweightage)
persondata.updateperson(name,amtgiven,amttaken)
}
}
return persondata
}
//itemise the file data and build item array
func getitems(filerow []string) items | {
data := item{}
datas := items{}
firstitem := true
var err error
for _,dm := range filerow{
a := strings.Split(dm,",")
if a[0] != "" && !firstitem{
datas.itemarr = append(datas.itemarr,data)
data = item{}
data.name = a[0]
}
if firstitem{
data.name = a[0]
firstitem = false
}
dt := details{}
dt.personname = a[1]
dt.amtgiven,err = strconv.ParseFloat(a[2],64)
dt.weightage,err = strconv.Atoi(a[3])
if err != nil{
fmt.Println("error")
}
data.total = data.total + dt.amtgiven
data.totweightage = data.totweightage + dt.weightage
data.split = append(data.split,dt)
}
datas.itemarr = append(datas.itemarr,data)
return datas
} |
|
preprocess.rs | use unescape::unescape;
pub fn clean_string(mut input: String) -> String {
input = String::from(input.trim());
input = unescape(&input).unwrap_or(String::from(""));
if input == "" {
return input;
}
// remove line-comments
let mut last = '\0';
let mut len = input.len();
for (i, c) in input.chars().enumerate() {
if last == '/' && c == '/' {
len = i-1;
break;
}
last = c; |
input
}
pub fn add_semicolon(mut input: String) -> String {
if !input.ends_with(';') && !input.ends_with("*/") && !input.ends_with('}') {
input.push_str(";")
}
input
} | }
input.truncate(len); |
online_service.go | // ==========================================================================
// 云捷GO自动生成业务逻辑层相关代码,只生成一次,按需修改,再次生成不会覆盖.
// 生成日期:2020-02-17 14:03:51
// 生成路径: app/service/module/online/online_service.go
// 生成人:yunjie
// ==========================================================================
package online
import (
onlineModel "yj-app/app/model/monitor/online"
"yj-app/app/utils/convert"
"yj-app/app/utils/page"
)
//根据主键查询数据
func SelectRecordById(id int64) (*onlineModel.Entity, error) {
return onlineModel.FindOne("sessionId", id)
}
//根据主键删除数据
func DeleteRecordById(id int64) bool {
result, err := onlineModel.Delete("sessionId", id)
if err == nil {
affected, _ := result.RowsAffected()
if affected > 0 {
return true
}
}
return false
}
//批量删除数据记录
func DeleteRecordByIds(ids string) int64 {
idarr := convert.ToInt64Array(ids, ",")
result, err := onlineModel.Delete("sessionId in (?)", idarr)
if err != nil {
return 0
}
nums, _ := result.RowsAffected()
return nums
}
//批量删除数据
func DeleteRecordNotInIds(ids []string) int64 {
result, err := onlineModel.Delete("sessionId not in (?)", ids)
if err != nil {
return 0
}
nums, _ := result.RowsAffected()
return nums
}
//添加数据
func AddSave(entity onlineModel.Entity) (int64, error) {
result, err := entity.Insert()
if err != nil {
return 0, err
}
id, err := result.LastInsertId()
if err != nil || id <= 0 {
return 0, err
}
return id, nil
}
//根据条件查询数据
func SelectListAll(params *onlineModel.SelectPageReq) ([]onlineModel.Entity, error) {
return onlineModel.Selec | //根据条件分页查询数据
func SelectListByPage(params *onlineModel.SelectPageReq) ([]onlineModel.Entity, *page.Paging, error) {
return onlineModel.SelectListByPage(params)
}
| tListAll(params)
}
|
server.rs | use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
pub struct Server {
pub url: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub variables: Option<BTreeMap<String, ServerVariable>>,
#[serde(flatten)]
pub extensions:BTreeMap<String, serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
pub struct ServerVariable {
#[serde(rename = "enum")]
#[serde(default)]
#[serde(skip_serializing_if = "Vec::is_empty")] | pub extensions:BTreeMap<String, serde_json::Value>,
} | pub enumeration: Vec<String>,
pub default: String,
pub description: Option<String>,
#[serde(flatten)] |
restclient_test.go | package gounity_test
// func TestPingPong(t *testing.T) {
// ctx, err := testutil.NewTestContext()
// assert.Nil(t, err, "failed to setup rest client to mock server")
// defer ctx.TearDown()
// resp, err := ctx.restClient.pingPong(
// ctx.context, http.MethodGet,
// fmt.Sprintf("api/instances/lun/sv_1?compact=true&fields=%s", typeFieldsLun),
// nil, nil)
// assert.Nil(t, err)
// assert.Equal(t, 200, resp.StatusCode)
// }
// func TestDoWithHeaders(t *testing.T) {
// ctx, err := testutil.NewTestContext()
// assert.Nil(t, err, "failed to setup rest client to mock server")
// defer ctx.TearDown()
| // instResp := &instanceResp{}
// err = ctx.restClient.DoWithHeaders(
// ctx.context, http.MethodGet,
// fmt.Sprintf("api/instances/lun/sv_1?compact=true&fields=%s", typeFieldsLun),
// nil, nil, instResp)
// assert.Nil(t, err)
// lun := &Lun{}
// err = json.Unmarshal(instResp.Content, lun)
// assert.Nil(t, err)
// assert.Equal(t, "sv_1", lun.Id)
// assert.Equal(t, "pool_1", lun.Pool.Id)
// }
// func TestGet(t *testing.T) {
// ctx, err := testutil.NewTestContext()
// assert.Nil(t, err, "failed to setup rest client to mock server")
// defer ctx.TearDown()
// instResp := &instanceResp{}
// err = ctx.restClient.Get(
// ctx.context,
// fmt.Sprintf("api/instances/lun/sv_1?compact=true&fields=%s", typeFieldsLun),
// nil, instResp)
// assert.Nil(t, err)
// lun := &Lun{}
// err = json.Unmarshal(instResp.Content, lun)
// assert.Nil(t, err)
// assert.Equal(t, "sv_1", lun.Id)
// assert.Equal(t, "pool_1", lun.Pool.Id)
// } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.