file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
common.controller.ts
import { Controller } from "@nestjs/common"; @Controller('/common') export class
{ /** * 生成随机字符串 * @param length 随机字符串长度 */ randomString(length: number): string { return ''; } }
CommonController
data-point.model.ts
import { CollectionObject } from './collection-object.model'; export interface DataPoint extends CollectionObject { description: string; units: string; lowerLimit?: string; upperLimit?: string; metaTags?: string[]; processTag?: string;
dataGroupId: string; }
locationId: string;
test_ranger_admin.py
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import json from mock.mock import MagicMock, patch from stacks.utils.RMFTestCase import * class TestRangerAdmin(RMFTestCase): COMMON_SERVICES_PACKAGE_DIR = "RANGER/0.4.0/package" STACK_VERSION = "2.2" def test_configure_default(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py", classname = "RangerAdmin", command = "configure", config_file="default.json", hdp_stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assert_configure_default() self.assertNoMoreResources() def test_start_default(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py", classname = "RangerAdmin", command = "start", config_file="default.json", hdp_stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assert_configure_default() self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-start', not_if = 'ps -ef | grep proc_rangeradmin | grep -v grep', user = 'ranger', ) self.assertNoMoreResources() def test_stop_default(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py", classname = "RangerAdmin", command = "stop", config_file="default.json", hdp_stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-stop', user = 'ranger', ) self.assertNoMoreResources() def test_configure_secured(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py", classname = "RangerAdmin", command = "configure", config_file="secured.json", hdp_stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assert_configure_secured() self.assertNoMoreResources() def test_start_secured(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py", classname = "RangerAdmin", command = "start", config_file="secured.json", hdp_stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assert_configure_secured() self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-start', not_if = 'ps -ef | grep proc_rangeradmin | grep -v grep', user = 'ranger', ) self.assertNoMoreResources() def test_stop_secured(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py", classname = "RangerAdmin", command = "stop", config_file="secured.json", hdp_stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-stop', user = 'ranger', ) self.assertNoMoreResources() def assert_configure_default(self): self.assertResourceCalled('Execute', 'mysql -u root --password=aa -h localhost -s -e "select version();"',logoutput = True, environment = {}) self.assertResourceCalled('File', '/tmp/mysql-connector-java.jar', content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//mysql-jdbc-driver.jar'), ) self.assertResourceCalled('Execute', ('cp', '--remove-destination', '/tmp/mysql-connector-java.jar', '/usr/share/java/mysql-connector-java.jar'), not_if = 'test -f /usr/share/java/mysql-connector-java.jar', sudo = True, path = ['/bin', '/usr/bin/'], ) self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties', properties = self.getConfig()['configurations']['admin-properties'], ) self.assertResourceCalled('Execute', 'cd /usr/hdp/current/ranger-admin && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/ranger-admin/setup.sh', logoutput = True, environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'}, ) self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/conf/xa_system.properties', properties = self.getConfig()['configurations']['ranger-site'], ) self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/conf/ranger_webserver.properties', mode = 0744, properties = self.getConfig()['configurations']['ranger-site'] ) def assert_configure_secured(self): self.assertResourceCalled('Execute', 'mysql -u root --password=rootpassword -h localhost -s -e "select version();"',logoutput = True, environment = {}) self.assertResourceCalled('File', '/tmp/mysql-connector-java.jar', content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//mysql-jdbc-driver.jar'), ) self.assertResourceCalled('Execute', ('cp', '--remove-destination', '/tmp/mysql-connector-java.jar', '/usr/share/java/mysql-connector-java.jar'), not_if = 'test -f /usr/share/java/mysql-connector-java.jar', sudo = True, path = ['/bin', '/usr/bin/'], ) self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties', properties = self.getConfig()['configurations']['admin-properties'], ) self.assertResourceCalled('Execute', 'cd /usr/hdp/current/ranger-admin && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/ranger-admin/setup.sh', logoutput = True, environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'}, ) self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/conf/xa_system.properties', properties = self.getConfig()['configurations']['ranger-site'], ) self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/conf/ranger_webserver.properties', mode = 0744,
def test_pre_rolling_upgrade_23(self, ): config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/ranger-admin-upgrade.json" with open(config_file, "r") as f: json_content = json.load(f) json_content['commandParams']['version'] = '2.3.0.0-1234' mocks_dict = {} self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_usersync.py", classname = "RangerAdmin", command = "pre_rolling_restart", config_dict = json_content, hdp_stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES, call_mocks = [(0, None), (0, None)], mocks_dict = mocks_dict) self.assertResourceCalled("Execute", "hdp-select set ranger-admin 2.3.0.0-1234") self.assertEquals(2, mocks_dict['call'].call_count) self.assertEquals( "conf-select create-conf-dir --package ranger-admin --stack-version 2.3.0.0-1234 --conf-version 0", mocks_dict['call'].call_args_list[0][0][0]) self.assertEquals( "conf-select set-conf-dir --package ranger-admin --stack-version 2.3.0.0-1234 --conf-version 0", mocks_dict['call'].call_args_list[1][0][0])
properties = self.getConfig()['configurations']['ranger-site'] )
NodeList.js
"use strict"; const conversions = require("webidl-conversions"); const utils = require("./utils.js"); const implSymbol = utils.implSymbol; const ctorRegistrySymbol = utils.ctorRegistrySymbol; const interfaceName = "NodeList"; exports.is = value => { return utils.isObject(value) && utils.hasOwn(value, implSymbol) && value[implSymbol] instanceof Impl.implementation; }; exports.isImpl = value => { return utils.isObject(value) && value instanceof Impl.implementation; }; exports.convert = (value, { context = "The provided value" } = {}) => { if (exports.is(value)) { return utils.implForWrapper(value); } throw new TypeError(`${context} is not of type 'NodeList'.`); }; function makeWrapper(globalObject) { if (globalObject[ctorRegistrySymbol] === undefined) { throw new Error("Internal error: invalid global object"); } const ctor = globalObject[ctorRegistrySymbol]["NodeList"]; if (ctor === undefined) { throw new Error("Internal error: constructor NodeList is not installed on the passed global object"); } return Object.create(ctor.prototype); } exports.create = (globalObject, constructorArgs, privateData) => { const wrapper = makeWrapper(globalObject); return exports.setup(wrapper, globalObject, constructorArgs, privateData); }; exports.createImpl = (globalObject, constructorArgs, privateData) => { const wrapper = exports.create(globalObject, constructorArgs, privateData); return utils.implForWrapper(wrapper); }; exports._internalSetup = (wrapper, globalObject) => {}; exports.setup = (wrapper, globalObject, constructorArgs = [], privateData = {}) => { privateData.wrapper = wrapper; exports._internalSetup(wrapper, globalObject); Object.defineProperty(wrapper, implSymbol, { value: new Impl.implementation(globalObject, constructorArgs, privateData), configurable: true }); wrapper = new Proxy(wrapper, proxyHandler); wrapper[implSymbol][utils.wrapperSymbol] = wrapper; if (Impl.init) { Impl.init(wrapper[implSymbol]); } return wrapper; }; exports.new = globalObject => { let wrapper = makeWrapper(globalObject); exports._internalSetup(wrapper, globalObject); Object.defineProperty(wrapper, implSymbol, { value: Object.create(Impl.implementation.prototype), configurable: true }); wrapper = new Proxy(wrapper, proxyHandler); wrapper[implSymbol][utils.wrapperSymbol] = wrapper; if (Impl.init) { Impl.init(wrapper[implSymbol]); } return wrapper[implSymbol]; }; const exposed = new Set(["Window"]); exports.install = (globalObject, globalNames) => { if (!globalNames.some(globalName => exposed.has(globalName))) { return; } class NodeList { constructor() { throw new TypeError("Illegal constructor"); } item(index) { const esValue = this !== null && this !== undefined ? this : globalObject; if (!exports.is(esValue)) { throw new TypeError("'item' called on an object that is not a valid instance of NodeList."); } if (arguments.length < 1) { throw new TypeError( "Failed to execute 'item' on 'NodeList': 1 argument required, but only " + arguments.length + " present." ); } const args = []; { let curArg = arguments[0]; curArg = conversions["unsigned long"](curArg, { context: "Failed to execute 'item' on 'NodeList': parameter 1" }); args.push(curArg); } return utils.tryWrapperForImpl(esValue[implSymbol].item(...args)); } get length() { const esValue = this !== null && this !== undefined ? this : globalObject; if (!exports.is(esValue)) { throw new TypeError("'get length' called on an object that is not a valid instance of NodeList."); } return esValue[implSymbol]["length"]; } } Object.defineProperties(NodeList.prototype, { item: { enumerable: true }, length: { enumerable: true }, [Symbol.toStringTag]: { value: "NodeList", configurable: true }, [Symbol.iterator]: { value: Array.prototype[Symbol.iterator], configurable: true, writable: true }, keys: { value: Array.prototype.keys, configurable: true, enumerable: true, writable: true }, values: { value: Array.prototype[Symbol.iterator], configurable: true, enumerable: true, writable: true }, entries: { value: Array.prototype.entries, configurable: true, enumerable: true, writable: true }, forEach: { value: Array.prototype.forEach, configurable: true, enumerable: true, writable: true } }); if (globalObject[ctorRegistrySymbol] === undefined) { globalObject[ctorRegistrySymbol] = Object.create(null); } globalObject[ctorRegistrySymbol][interfaceName] = NodeList; Object.defineProperty(globalObject, interfaceName, { configurable: true, writable: true, value: NodeList }); }; const proxyHandler = { get(target, P, receiver) { if (typeof P === "symbol") { return Reflect.get(target, P, receiver); } const desc = this.getOwnPropertyDescriptor(target, P); if (desc === undefined) { const parent = Object.getPrototypeOf(target); if (parent === null) { return undefined; } return Reflect.get(target, P, receiver); } if (!desc.get && !desc.set) { return desc.value; } const getter = desc.get; if (getter === undefined) { return undefined; } return Reflect.apply(getter, receiver, []); }, has(target, P) { if (typeof P === "symbol") { return Reflect.has(target, P); } const desc = this.getOwnPropertyDescriptor(target, P); if (desc !== undefined) { return true; } const parent = Object.getPrototypeOf(target); if (parent !== null) { return Reflect.has(parent, P); } return false; }, ownKeys(target) { const keys = new Set(); for (const key of target[implSymbol][utils.supportedPropertyIndices]) { keys.add(`${key}`); } for (const key of Reflect.ownKeys(target)) { keys.add(key); } return [...keys]; }, getOwnPropertyDescriptor(target, P) { if (typeof P === "symbol") { return Reflect.getOwnPropertyDescriptor(target, P); } let ignoreNamedProps = false; if (utils.isArrayIndexPropName(P)) { const index = P >>> 0; const indexedValue = target[implSymbol].item(index); if (indexedValue !== null) { return { writable: false, enumerable: true, configurable: true, value: utils.tryWrapperForImpl(indexedValue) }; } ignoreNamedProps = true; } return Reflect.getOwnPropertyDescriptor(target, P); }, set(target, P, V, receiver) { if (typeof P === "symbol") { return Reflect.set(target, P, V, receiver); } // The `receiver` argument refers to the Proxy exotic object or an object // that inherits from it, whereas `target` refers to the Proxy target: if (target[implSymbol][utils.wrapperSymbol] === receiver) { } let ownDesc; if (utils.isArrayIndexPropName(P)) { const index = P >>> 0; const indexedValue = target[implSymbol].item(index); if (indexedValue !== null) { ownDesc = { writable: false, enumerable: true, configurable: true, value: utils.tryWrapperForImpl(indexedValue) }; } } if (ownDesc === undefined) { ownDesc = Reflect.getOwnPropertyDescriptor(target, P); } if (ownDesc === undefined) { const parent = Reflect.getPrototypeOf(target); if (parent !== null) { return Reflect.set(parent, P, V, receiver); } ownDesc = { writable: true, enumerable: true, configurable: true, value: undefined }; } if (!ownDesc.writable) { return false; } if (!utils.isObject(receiver)) { return false; } const existingDesc = Reflect.getOwnPropertyDescriptor(receiver, P); let valueDesc; if (existingDesc !== undefined) { if (existingDesc.get || existingDesc.set) { return false; } if (!existingDesc.writable) { return false; } valueDesc = { value: V }; } else { valueDesc = { writable: true, enumerable: true, configurable: true, value: V }; } return Reflect.defineProperty(receiver, P, valueDesc); }, defineProperty(target, P, desc) { if (typeof P === "symbol") { return Reflect.defineProperty(target, P, desc); }
return Reflect.defineProperty(target, P, desc); }, deleteProperty(target, P) { if (typeof P === "symbol") { return Reflect.deleteProperty(target, P); } if (utils.isArrayIndexPropName(P)) { const index = P >>> 0; return !(target[implSymbol].item(index) !== null); } return Reflect.deleteProperty(target, P); }, preventExtensions() { return false; } }; const Impl = require("../nodes/NodeList-impl.js");
if (utils.isArrayIndexPropName(P)) { return false; }
receive_sms.py
def main(): resp = MessagingResponse() resp.message ("You have reached the DogBot. Thanks for contacting us :)") return str(resp) if __name__ == "__main__": main()
from flask import Flask, request, redirect from twilio.twiml.messaging_response import MessagingResponse from get_secrets import *
log2report_csv.py
import os,sys import numpy as np import csv input_file=sys.argv[1] output_file=sys.argv[2] header='I0315' if len(sys.argv)>3: header=sys.argv[3] keys=[] epoch_data=[] epoch_row=[] with open(input_file,'r') as f: for ln in f: line=ln.rstrip('\n') if line.startswith(header) and 'Test net output' in line: fields=line.split(':') spaces=fields[len(fields)-2].split(' ') num=int(spaces[len(spaces)-1][1:]) attr=fields[len(fields)-1] if num==0 and epoch_row!=[]: epoch_data.append(epoch_row) epoch_row=[] key='' if 'loss' in attr:
else: key=attr.split('=')[0] epoch_row.append(float(attr.split('=')[1])) if len(epoch_data)==0 or epoch_data==[]: keys.append(key) print keys with open(output_file,'wb')as f: csv_writer=csv.writer(f,delimiter=',') csv_writer.writerow(keys) csv_writer.writerows(epoch_data) print 'finished'
key=attr.split('(')[0].split('=')[0] loss=float(attr.split('(')[0].split('=')[1]) epoch_row.append(loss)
plot_notebook.py
# -*- coding: utf-8 -*- """ ======================== Notebook styled examples ======================== The gallery is capable of transforming python files into reStructuredText files with a notebook structure. For this to be used you need to respect some syntax rules. It makes a lot of sense to contrast this output rst file with the :download:`original python script <plot_notebook.py>` to get better feeling of the necessary file structure. Anything before the python script docstring is ignored by sphinx-gallery and will not appear in the rst file, nor will it be executed. This python docstring requires an reStructuredText title to name the file and
Nevertheless you can break your code into blocks and give the rendered file a notebook style. In this case you have to include a code comment breaker a line of at least 20 hashes and then every comment start with the a new hash. As in this example we start by first writing this module style docstring, then for the first code block we write the example file author and script license continued by the import modules instructions. """ # Code source: Óscar Nájera # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt ############################################################################## # This code block is executed, although it produces no output. Lines starting # with a simple hash are code comment and get treated as part of the code # block. To include this new comment string we started the new block with a # long line of hashes. # # The sphinx-gallery parser will assume everything after this splitter and that # continues to start with a **comment hash and space** (respecting code style) # is text that has to be rendered in # html format. Keep in mind to always keep your comments always together by # comment hashes. That means to break a paragraph you still need to commend # that line break. # # In this example the next block of code produces some plotable data. Code is # executed, figure is saved and then code is presented next, followed by the # inlined figure. x = np.linspace(-np.pi, np.pi, 300) xx, yy = np.meshgrid(x, x) z = np.cos(xx) + np.cos(yy) plt.figure() plt.imshow(z) plt.colorbar() plt.xlabel('$x$') plt.ylabel('$y$') ########################################################################### # Again it is possble to continue the discussion with a new python string. This # time to introduce the next code block generates 2 separate figures. plt.figure() plt.imshow(z, cmap=plt.cm.get_cmap('hot')) plt.figure() plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none') ########################################################################## # There's some subtle differences between rendered html rendered comment # strings and code comment strings which I'll demonstrate below. (Some of this # only makes sense if you look at the # :download:`raw python script <plot_notebook.py>`) # # Comments in comment blocks remain nested in the text. def dummy(): """Dummy function to make sure docstrings don't get rendered as text""" pass # Code comments not preceded by the hash splitter are left in code blocks. string = """ Triple-quoted string which tries to break parser but doesn't. """ ############################################################################ # Finally, I'll call ``show`` at the end just so someone running the python # code directly will see the plots; this is not necessary for creating the docs plt.show()
correctly build the reference links. Once you close the docstring you would be writing python code. This code gets executed by sphinx gallery shows the plots and attaches the generating code.
chase_lev.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A lock-free concurrent work-stealing deque //! //! This module contains a hybrid implementation of the Chase-Lev work stealing deque //! described in ["Dynamic Circular Work-Stealing Deque"][chase_lev] and the improved version //! described in ["Correct and Efficient Work-Stealing for Weak Memory Models"][weak_chase_lev]. //! The implementation is heavily based on the pseudocode found in the papers. //! //! # Example //! //! ``` //! use crossbeam::sync::chase_lev; //! let (mut worker, stealer) = chase_lev::deque(); //! //! // Only the worker may push/try_pop //! worker.push(1); //! worker.try_pop(); //! //! // Stealers take data from the other end of the deque //! worker.push(1); //! stealer.steal(); //! //! // Stealers can be cloned to have many stealers stealing in parallel //! worker.push(1); //! let stealer2 = stealer.clone(); //! stealer2.steal(); //! ``` //! //! [chase_lev]: http://neteril.org/~jeremie/Dynamic_Circular_Work_Queue.pdf //! [weak_chase_lev]: http://www.di.ens.fr/~zappa/readings/ppopp13.pdf use std::cell::UnsafeCell; use std::fmt; use std::mem; use std::ptr; use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use std::sync::atomic::{AtomicIsize, fence}; use std::sync::Arc; use mem::epoch::{self, Atomic, Shared, Owned}; // Once the queue is less than 1/K full, then it will be downsized. Note that // the deque requires that this number be less than 2. const K: isize = 4; // Minimum number of bits that a buffer size should be. No buffer will resize to // under this value, and all deques will initially contain a buffer of this // size. // // The size in question is 1 << MIN_BITS const MIN_BITS: u32 = 7; #[derive(Debug)] struct Deque<T> { bottom: AtomicIsize, top: AtomicIsize, array: Atomic<Buffer<T>>, } // FIXME: can these constraints be relaxed? unsafe impl<T: Send> Send for Deque<T> {} unsafe impl<T: Send> Sync for Deque<T> {} /// Worker half of the work-stealing deque. This worker has exclusive access to /// one side of the deque, and uses `push` and `try_pop` method to manipulate it. /// /// There may only be one worker per deque, and operations on the worker /// require mutable access to the worker itself. #[derive(Debug)] pub struct Worker<T> { deque: Arc<Deque<T>>, } /// The stealing half of the work-stealing deque. Stealers have access to the /// opposite end of the deque from the worker, and they only have access to the /// `steal` method. /// /// Stealers can be cloned to have more than one handle active at a time. #[derive(Debug)] pub struct Stealer<T> { deque: Arc<Deque<T>>, } /// When stealing some data, this is an enumeration of the possible outcomes. #[derive(PartialEq, Eq, Debug)] pub enum Steal<T> { /// The deque was empty at the time of stealing Empty, /// The stealer lost the race for stealing data, and a retry may return more /// data. Abort, /// The stealer has successfully stolen some data. Data(T), } // An internal buffer used by the chase-lev deque. This structure is actually // implemented as a circular buffer, and is used as the intermediate storage of // the data in the deque. // // This Vec<T> always has a length of 0, the backing buffer is just used by the // code below. struct Buffer<T> { storage: UnsafeCell<Vec<T>>, log_size: u32, } impl<T> fmt::Debug for Buffer<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Buffer {{ ... }}") } } impl<T> Worker<T> { /// Pushes data onto the front of this work queue. pub fn push(&mut self, t: T) { unsafe { self.deque.push(t) } } /// Pops data off the front of the work queue, returning `None` on an empty /// queue. pub fn try_pop(&mut self) -> Option<T> { unsafe { self.deque.try_pop() } } } impl<T> Stealer<T> { /// Steals work off the end of the queue (opposite of the worker's end) pub fn steal(&self) -> Steal<T> { self.deque.steal() } } impl<T> Clone for Stealer<T> { fn clone(&self) -> Stealer<T> { Stealer { deque: self.deque.clone() } } } /// Creates a new empty deque pub fn deque<T>() -> (Worker<T>, Stealer<T>) { let a = Arc::new(Deque::new()); let b = a.clone(); (Worker { deque: a }, Stealer { deque: b }) } // Almost all of this code can be found directly in the paper so I'm not // personally going to heavily comment what's going on here. impl<T> Deque<T> { fn new() -> Deque<T> { let array = Atomic::null(); array.store(Some(Owned::new(Buffer::new(MIN_BITS))), SeqCst); Deque { bottom: AtomicIsize::new(0), top: AtomicIsize::new(0), array: array, } } unsafe fn push(&self, data: T) { let guard = epoch::pin(); let mut b = self.bottom.load(Relaxed); let t = self.top.load(Acquire); let mut a = self.array.load(Relaxed, &guard).unwrap(); let size = b - t; if size >= (a.size() as isize) - 1 { // You won't find this code in the chase-lev deque paper. This is // alluded to in a small footnote, however. We always free a buffer // when growing in order to prevent leaks. a = self.swap_buffer(a, a.resize(b, t, 1), &guard); // reload the bottom counter, since swap_buffer modifies it. b = self.bottom.load(Relaxed); } a.put(b, data); fence(Release); self.bottom.store(b + 1, Relaxed); } unsafe fn try_pop(&self) -> Option<T> { let guard = epoch::pin(); let b = self.bottom.load(Relaxed) - 1; let a = self.array.load(Relaxed, &guard).unwrap(); self.bottom.store(b, Relaxed); fence(SeqCst); // the store to bottom must occur before loading top. let t = self.top.load(Relaxed); let size = b - t; if size >= 0 { // non-empty case let mut data = Some(a.get(b)); if size == 0 { // last element in queue, check for races. if self.top.compare_and_swap(t, t + 1, SeqCst) != t { // lost the race. mem::forget(data.take()); } // set the queue to a canonically empty state. self.bottom.store(b + 1, Relaxed); } else { self.maybe_shrink(b, t, &guard); } data } else { // empty queue. revert the decrement of "b" and try to shrink. // // the original chase_lev paper uses "t" here, but the new one uses "b + 1". // don't worry, they're the same thing: pop and steal operations will never leave // the top counter greater than the bottom counter. After we decrement "b" at // the beginning of this function, the lowest possible value it could hold here is "t - 1". // That's also the only value that could cause this branch to be taken. self.bottom.store(b + 1, Relaxed); None } } fn steal(&self) -> Steal<T> { let guard = epoch::pin(); let t = self.top.load(Acquire); fence(SeqCst); // top must be loaded before bottom. let b = self.bottom.load(Acquire); let size = b - t; if size <= 0 { return Steal::Empty } unsafe { // while the paper uses a "consume" ordering here, the closest thing we have // available is Acquire, which is strictly stronger. let a = self.array.load(Acquire, &guard).unwrap(); let data = a.get(t); // we may be racing against other steals and a pop. if self.top.compare_and_swap(t, t + 1, SeqCst) == t { Steal::Data(data) } else { mem::forget(data); // someone else stole this value Steal::Abort } } } // potentially shrink the array. This can be called only from the worker. unsafe fn maybe_shrink(&self, b: isize, t: isize, guard: &epoch::Guard) { let a = self.array.load(SeqCst, guard).unwrap(); let size = b - t; if size < (a.size() as isize) / K && size > (1 << MIN_BITS) { self.swap_buffer(a, a.resize(b, t, -1), guard); } } // Helper routine not mentioned in the paper which is used in growing and // shrinking buffers to swap in a new buffer into place. // // As a bit of a recap, stealers can continue using buffers after this // method has called 'unlinked' on it. The continued usage is simply a read // followed by a forget, but we must make sure that the memory can continue // to be read after we flag this buffer for reclamation. All stealers, // however, have their own epoch pinned during this time so the buffer will // just naturally be free'd once all concurrent stealers have exited. // // This method may only be called safely from the workers due to the way it modifies // the array pointer. unsafe fn swap_buffer<'a>(&self, old: Shared<'a, Buffer<T>>, buf: Buffer<T>, guard: &'a epoch::Guard) -> Shared<'a, Buffer<T>> { let newbuf = Owned::new(buf); let newbuf = self.array.store_and_ref(newbuf, Release, &guard); guard.unlinked(old); newbuf } } impl<T> Drop for Deque<T> { fn drop(&mut self) { let guard = epoch::pin(); // Arc enforces that we have truly exclusive access here. let t = self.top.load(Relaxed); let b = self.bottom.load(Relaxed); let a = self.array.swap(None, Relaxed, &guard).unwrap(); // Free whatever is leftover in the dequeue, then free the backing // memory itself unsafe { for i in t..b { drop(a.get(i)); } guard.unlinked(a); } } } impl<T> Buffer<T> { fn new(log_size: u32) -> Buffer<T> { Buffer { storage: UnsafeCell::new(Vec::with_capacity(1 << log_size)), log_size: log_size, } } fn size(&self) -> usize { unsafe { (*self.storage.get()).capacity() } } fn mask(&self) -> isize { unsafe { ((*self.storage.get()).capacity() - 1) as isize } } unsafe fn elem(&self, i: isize) -> *mut T { (*self.storage.get()).as_mut_ptr().offset(i & self.mask()) } // This does not protect against loading duplicate values of the same cell, // nor does this clear out the contents contained within. Hence, this is a // very unsafe method which the caller needs to treat specially in case a // race is lost. unsafe fn get(&self, i: isize) -> T { ptr::read(self.elem(i)) } // Unsafe because this unsafely overwrites possibly uninitialized or // initialized data. unsafe fn put(&self, i: isize, t: T) { ptr::write(self.elem(i), t); } // Again, unsafe because this has incredibly dubious ownership violations. // It is assumed that this buffer is immediately dropped. unsafe fn resize(&self, b: isize, t: isize, delta: i32) -> Buffer<T> { let buf = Buffer::new(((self.log_size as i32) + delta) as u32); for i in t..b { buf.put(i, self.get(i)); } return buf; } } #[cfg(test)] mod tests { extern crate rand; use super::{deque, Worker, Stealer, Steal}; use std::thread; use std::sync::Arc; use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT}; use std::sync::atomic::Ordering::SeqCst; use self::rand::Rng; #[test] fn smoke() { let (mut w, s) = deque(); assert_eq!(w.try_pop(), None); assert_eq!(s.steal(), Steal::Empty); w.push(1); assert_eq!(w.try_pop(), Some(1)); w.push(1); assert_eq!(s.steal(), Steal::Data(1)); w.push(1); assert_eq!(s.clone().steal(), Steal::Data(1)); } #[test] fn stealpush() { static AMT: isize = 100000; let (mut w, s) = deque(); let t = thread::spawn(move || { let mut left = AMT; while left > 0 { match s.steal() { Steal::Data(i) => { assert_eq!(i, 1); left -= 1; } Steal::Abort | Steal::Empty => {} } } }); for _ in 0..AMT { w.push(1); } t.join().unwrap(); } #[test] fn stealpush_large() { static AMT: isize = 100000; let (mut w, s) = deque(); let t = thread::spawn(move || { let mut left = AMT; while left > 0 { match s.steal() { Steal::Data((1, 10)) => { left -= 1; } Steal::Data(..) => panic!(), Steal::Abort | Steal::Empty => {} } } }); for _ in 0..AMT { w.push((1, 10)); } t.join().unwrap(); } fn stampede(mut w: Worker<Box<isize>>, s: Stealer<Box<isize>>, nthreads: isize, amt: usize) { for _ in 0..amt { w.push(Box::new(20)); } let remaining = Arc::new(AtomicUsize::new(amt)); let threads = (0..nthreads).map(|_| { let remaining = remaining.clone(); let s = s.clone(); thread::spawn(move || { while remaining.load(SeqCst) > 0 { match s.steal() { Steal::Data(val) => { if *val == 20 { remaining.fetch_sub(1, SeqCst); } else { panic!() } } Steal::Abort | Steal::Empty => {} } } }) }).collect::<Vec<_>>(); while remaining.load(SeqCst) > 0 { if let Some(val) = w.try_pop() { if *val == 20 { remaining.fetch_sub(1, SeqCst); } else { panic!() } } } for thread in threads.into_iter() { thread.join().unwrap(); } } #[test] fn run_stampede() { let (w, s) = deque(); stampede(w, s, 8, 10000); } #[test] fn many_stampede() { static AMT: usize = 4; let threads = (0..AMT).map(|_| { let (w, s) = deque(); thread::spawn(|| { stampede(w, s, 4, 10000); }) }).collect::<Vec<_>>(); for thread in threads.into_iter() { thread.join().unwrap(); } } #[test] fn stress() { static AMT: isize = 100000; static NTHREADS: isize = 8; static DONE: AtomicBool = ATOMIC_BOOL_INIT; static HITS: AtomicUsize = ATOMIC_USIZE_INIT; let (mut w, s) = deque(); let threads = (0..NTHREADS).map(|_| { let s = s.clone(); thread::spawn(move || { loop { match s.steal() { Steal::Data(2) => { HITS.fetch_add(1, SeqCst); } Steal::Data(..) => panic!(), _ if DONE.load(SeqCst) => break, _ => {} } } }) }).collect::<Vec<_>>(); let mut rng = rand::thread_rng(); let mut expected = 0; while expected < AMT { if rng.gen_range(0, 3) == 2 { match w.try_pop() { None => {} Some(2) => { HITS.fetch_add(1, SeqCst); }, Some(_) => panic!(), } } else { expected += 1; w.push(2); } } while HITS.load(SeqCst) < AMT as usize { match w.try_pop() { None => {} Some(2) => { HITS.fetch_add(1, SeqCst); }, Some(_) => panic!(), } } DONE.store(true, SeqCst); for thread in threads.into_iter() { thread.join().unwrap(); } assert_eq!(HITS.load(SeqCst), expected as usize); } #[test] fn no_starvation()
}
{ static AMT: isize = 10000; static NTHREADS: isize = 4; static DONE: AtomicBool = ATOMIC_BOOL_INIT; let (mut w, s) = deque(); let (threads, hits): (Vec<_>, Vec<_>) = (0..NTHREADS).map(|_| { let s = s.clone(); let ctr = Arc::new(AtomicUsize::new(0)); let ctr2 = ctr.clone(); (thread::spawn(move || { loop { match s.steal() { Steal::Data((1, 2)) => { ctr.fetch_add(1, SeqCst); } Steal::Data(..) => panic!(), _ if DONE.load(SeqCst) => break, _ => {} } } }), ctr2) }).unzip(); let mut rng = rand::thread_rng(); let mut myhit = false; 'outer: loop { for _ in 0..rng.gen_range(0, AMT) { if !myhit && rng.gen_range(0, 3) == 2 { match w.try_pop() { None => {} Some((1, 2)) => myhit = true, Some(_) => panic!(), } } else { w.push((1, 2)); } } for slot in hits.iter() { let amt = slot.load(SeqCst); if amt == 0 { continue 'outer; } } if myhit { break } } DONE.store(true, SeqCst); for thread in threads.into_iter() { thread.join().unwrap(); } }
test_instance_hardness_threshold.py
"""Test the module .""" # Authors: Guillaume Lemaitre <[email protected]> # Christos Aridas # License: MIT import pytest import numpy as np from sklearn.ensemble import GradientBoostingClassifier from imblearn.under_sampling import InstanceHardnessThreshold RND_SEED = 0 X = np.array( [ [-0.3879569, 0.6894251], [-0.09322739, 1.28177189], [-0.77740357, 0.74097941], [0.91542919, -0.65453327], [-0.03852113, 0.40910479], [-0.43877303, 1.07366684], [-0.85795321, 0.82980738], [-0.18430329, 0.52328473], [-0.30126957, -0.66268378], [-0.65571327, 0.42412021], [-0.28305528, 0.30284991], [0.20246714, -0.34727125], [1.06446472, -1.09279772], [0.30543283, -0.02589502], [-0.00717161, 0.00318087], ] ) Y = np.array([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0]) ESTIMATOR = GradientBoostingClassifier(random_state=RND_SEED) def test_iht_init(): sampling_strategy = "auto" iht = InstanceHardnessThreshold( ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED ) assert iht.sampling_strategy == sampling_strategy assert iht.random_state == RND_SEED def test_iht_fit_resample(): iht = InstanceHardnessThreshold(ESTIMATOR, random_state=RND_SEED) X_resampled, y_resampled = iht.fit_resample(X, Y) assert X_resampled.shape == (12, 2) assert y_resampled.shape == (12,) def test_iht_fit_resample_half(): sampling_strategy = {0: 6, 1: 8} iht = InstanceHardnessThreshold( ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED ) X_resampled, y_resampled = iht.fit_resample(X, Y) assert X_resampled.shape == (14, 2) assert y_resampled.shape == (14,) def test_iht_fit_resample_class_obj():
def test_iht_fit_resample_wrong_class_obj(): from sklearn.cluster import KMeans est = KMeans() iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED) with pytest.raises(ValueError, match="Invalid parameter `estimator`"): iht.fit_resample(X, Y)
est = GradientBoostingClassifier(random_state=RND_SEED) iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED) X_resampled, y_resampled = iht.fit_resample(X, Y) assert X_resampled.shape == (12, 2) assert y_resampled.shape == (12,)
mod.rs
mod stream; use std::future::Future; use self::stream::AsyncStream; use crate::{cmap::conn::StreamOptions, error::Result}; /// An abstract handle to the async runtime. #[derive(Clone, Copy, Debug)] pub(crate) enum AsyncRuntime { /// Represents the `tokio` runtime. #[cfg(feature = "tokio-runtime")] Tokio, /// Represents the `async-std` runtime. #[cfg(feature = "async-std-runtime")] AsyncStd, } impl AsyncRuntime { /// Spawn a task in the background to run a future. pub(crate) fn execute<F>(self, fut: F) where F: Future<Output = ()> + Send + 'static,
/// Run a future in the foreground, blocking on it completing. pub(crate) fn block_on<F, T>(self, fut: F) -> T where F: Future<Output = T> + Send + 'static, T: Send + 'static, { #[cfg(all(feature = "tokio-runtime", not(feature = "async-std-runtime")))] { use tokio::runtime::Handle; Handle::current().enter(|| futures::executor::block_on(fut)) } #[cfg(feature = "async-std-runtime")] { async_std::task::block_on(fut) } } /// Create and connect a new `AsyncStream`. pub(crate) async fn connect_stream(self, options: StreamOptions) -> Result<AsyncStream> { AsyncStream::connect(options).await } }
{ match self { #[cfg(feature = "tokio-runtime")] Self::Tokio => { tokio::task::spawn(fut); } #[cfg(feature = "async-std-runtime")] Self::AsyncStd => { async_std::task::spawn(fut); } } }
cmr2.rs
#[doc = "Reader of register CMR2"] pub type R = crate::R<u32, super::CMR2>; #[doc = "Writer for register CMR2"] pub type W = crate::W<u32, super::CMR2>; #[doc = "Register CMR2 `reset()`'s with value 0"] impl crate::ResetValue for super::CMR2 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Channel Pre-scaler\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum CPRE_A { #[doc = "0: Peripheral clock"] MCK = 0, #[doc = "1: Peripheral clock/2"] MCK_DIV_2 = 1, #[doc = "2: Peripheral clock/4"] MCK_DIV_4 = 2, #[doc = "3: Peripheral clock/8"] MCK_DIV_8 = 3, #[doc = "4: Peripheral clock/16"] MCK_DIV_16 = 4, #[doc = "5: Peripheral clock/32"] MCK_DIV_32 = 5, #[doc = "6: Peripheral clock/64"] MCK_DIV_64 = 6, #[doc = "7: Peripheral clock/128"] MCK_DIV_128 = 7, #[doc = "8: Peripheral clock/256"] MCK_DIV_256 = 8, #[doc = "9: Peripheral clock/512"] MCK_DIV_512 = 9, #[doc = "10: Peripheral clock/1024"] MCK_DIV_1024 = 10, #[doc = "11: Clock A"] CLKA = 11, #[doc = "12: Clock B"] CLKB = 12, } impl From<CPRE_A> for u8 { #[inline(always)] fn from(variant: CPRE_A) -> Self { variant as _ } } #[doc = "Reader of field `CPRE`"] pub type CPRE_R = crate::R<u8, CPRE_A>; impl CPRE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, CPRE_A> { use crate::Variant::*; match self.bits { 0 => Val(CPRE_A::MCK), 1 => Val(CPRE_A::MCK_DIV_2), 2 => Val(CPRE_A::MCK_DIV_4), 3 => Val(CPRE_A::MCK_DIV_8), 4 => Val(CPRE_A::MCK_DIV_16), 5 => Val(CPRE_A::MCK_DIV_32), 6 => Val(CPRE_A::MCK_DIV_64), 7 => Val(CPRE_A::MCK_DIV_128), 8 => Val(CPRE_A::MCK_DIV_256), 9 => Val(CPRE_A::MCK_DIV_512), 10 => Val(CPRE_A::MCK_DIV_1024), 11 => Val(CPRE_A::CLKA), 12 => Val(CPRE_A::CLKB), i => Res(i), } } #[doc = "Checks if the value of the field is `MCK`"] #[inline(always)] pub fn is_mck(&self) -> bool { *self == CPRE_A::MCK } #[doc = "Checks if the value of the field is `MCK_DIV_2`"] #[inline(always)] pub fn is_mck_div_2(&self) -> bool { *self == CPRE_A::MCK_DIV_2 } #[doc = "Checks if the value of the field is `MCK_DIV_4`"] #[inline(always)] pub fn is_mck_div_4(&self) -> bool { *self == CPRE_A::MCK_DIV_4 } #[doc = "Checks if the value of the field is `MCK_DIV_8`"] #[inline(always)] pub fn is_mck_div_8(&self) -> bool { *self == CPRE_A::MCK_DIV_8 } #[doc = "Checks if the value of the field is `MCK_DIV_16`"] #[inline(always)] pub fn is_mck_div_16(&self) -> bool { *self == CPRE_A::MCK_DIV_16 } #[doc = "Checks if the value of the field is `MCK_DIV_32`"] #[inline(always)] pub fn is_mck_div_32(&self) -> bool { *self == CPRE_A::MCK_DIV_32 } #[doc = "Checks if the value of the field is `MCK_DIV_64`"] #[inline(always)] pub fn is_mck_div_64(&self) -> bool { *self == CPRE_A::MCK_DIV_64 } #[doc = "Checks if the value of the field is `MCK_DIV_128`"] #[inline(always)] pub fn is_mck_div_128(&self) -> bool { *self == CPRE_A::MCK_DIV_128 } #[doc = "Checks if the value of the field is `MCK_DIV_256`"] #[inline(always)] pub fn is_mck_div_256(&self) -> bool { *self == CPRE_A::MCK_DIV_256 } #[doc = "Checks if the value of the field is `MCK_DIV_512`"] #[inline(always)] pub fn is_mck_div_512(&self) -> bool { *self == CPRE_A::MCK_DIV_512 } #[doc = "Checks if the value of the field is `MCK_DIV_1024`"] #[inline(always)] pub fn is_mck_div_1024(&self) -> bool
#[doc = "Checks if the value of the field is `CLKA`"] #[inline(always)] pub fn is_clka(&self) -> bool { *self == CPRE_A::CLKA } #[doc = "Checks if the value of the field is `CLKB`"] #[inline(always)] pub fn is_clkb(&self) -> bool { *self == CPRE_A::CLKB } } #[doc = "Write proxy for field `CPRE`"] pub struct CPRE_W<'a> { w: &'a mut W, } impl<'a> CPRE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CPRE_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Peripheral clock"] #[inline(always)] pub fn mck(self) -> &'a mut W { self.variant(CPRE_A::MCK) } #[doc = "Peripheral clock/2"] #[inline(always)] pub fn mck_div_2(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_2) } #[doc = "Peripheral clock/4"] #[inline(always)] pub fn mck_div_4(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_4) } #[doc = "Peripheral clock/8"] #[inline(always)] pub fn mck_div_8(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_8) } #[doc = "Peripheral clock/16"] #[inline(always)] pub fn mck_div_16(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_16) } #[doc = "Peripheral clock/32"] #[inline(always)] pub fn mck_div_32(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_32) } #[doc = "Peripheral clock/64"] #[inline(always)] pub fn mck_div_64(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_64) } #[doc = "Peripheral clock/128"] #[inline(always)] pub fn mck_div_128(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_128) } #[doc = "Peripheral clock/256"] #[inline(always)] pub fn mck_div_256(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_256) } #[doc = "Peripheral clock/512"] #[inline(always)] pub fn mck_div_512(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_512) } #[doc = "Peripheral clock/1024"] #[inline(always)] pub fn mck_div_1024(self) -> &'a mut W { self.variant(CPRE_A::MCK_DIV_1024) } #[doc = "Clock A"] #[inline(always)] pub fn clka(self) -> &'a mut W { self.variant(CPRE_A::CLKA) } #[doc = "Clock B"] #[inline(always)] pub fn clkb(self) -> &'a mut W { self.variant(CPRE_A::CLKB) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f); self.w } } #[doc = "Reader of field `CALG`"] pub type CALG_R = crate::R<bool, bool>; #[doc = "Write proxy for field `CALG`"] pub struct CALG_W<'a> { w: &'a mut W, } impl<'a> CALG_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8); self.w } } #[doc = "Reader of field `CPOL`"] pub type CPOL_R = crate::R<bool, bool>; #[doc = "Write proxy for field `CPOL`"] pub struct CPOL_W<'a> { w: &'a mut W, } impl<'a> CPOL_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9); self.w } } #[doc = "Reader of field `CES`"] pub type CES_R = crate::R<bool, bool>; #[doc = "Write proxy for field `CES`"] pub struct CES_W<'a> { w: &'a mut W, } impl<'a> CES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10); self.w } } #[doc = "Reader of field `UPDS`"] pub type UPDS_R = crate::R<bool, bool>; #[doc = "Write proxy for field `UPDS`"] pub struct UPDS_W<'a> { w: &'a mut W, } impl<'a> UPDS_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11); self.w } } #[doc = "Reader of field `DTE`"] pub type DTE_R = crate::R<bool, bool>; #[doc = "Write proxy for field `DTE`"] pub struct DTE_W<'a> { w: &'a mut W, } impl<'a> DTE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16); self.w } } #[doc = "Reader of field `DTHI`"] pub type DTHI_R = crate::R<bool, bool>; #[doc = "Write proxy for field `DTHI`"] pub struct DTHI_W<'a> { w: &'a mut W, } impl<'a> DTHI_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17); self.w } } #[doc = "Reader of field `DTLI`"] pub type DTLI_R = crate::R<bool, bool>; #[doc = "Write proxy for field `DTLI`"] pub struct DTLI_W<'a> { w: &'a mut W, } impl<'a> DTLI_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18); self.w } } impl R { #[doc = "Bits 0:3 - Channel Pre-scaler"] #[inline(always)] pub fn cpre(&self) -> CPRE_R { CPRE_R::new((self.bits & 0x0f) as u8) } #[doc = "Bit 8 - Channel Alignment"] #[inline(always)] pub fn calg(&self) -> CALG_R { CALG_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - Channel Polarity"] #[inline(always)] pub fn cpol(&self) -> CPOL_R { CPOL_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 10 - Counter Event Selection"] #[inline(always)] pub fn ces(&self) -> CES_R { CES_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 11 - Update Selection"] #[inline(always)] pub fn upds(&self) -> UPDS_R { UPDS_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 16 - Dead-Time Generator Enable"] #[inline(always)] pub fn dte(&self) -> DTE_R { DTE_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 17 - Dead-Time PWMHx Output Inverted"] #[inline(always)] pub fn dthi(&self) -> DTHI_R { DTHI_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 18 - Dead-Time PWMLx Output Inverted"] #[inline(always)] pub fn dtli(&self) -> DTLI_R { DTLI_R::new(((self.bits >> 18) & 0x01) != 0) } } impl W { #[doc = "Bits 0:3 - Channel Pre-scaler"] #[inline(always)] pub fn cpre(&mut self) -> CPRE_W { CPRE_W { w: self } } #[doc = "Bit 8 - Channel Alignment"] #[inline(always)] pub fn calg(&mut self) -> CALG_W { CALG_W { w: self } } #[doc = "Bit 9 - Channel Polarity"] #[inline(always)] pub fn cpol(&mut self) -> CPOL_W { CPOL_W { w: self } } #[doc = "Bit 10 - Counter Event Selection"] #[inline(always)] pub fn ces(&mut self) -> CES_W { CES_W { w: self } } #[doc = "Bit 11 - Update Selection"] #[inline(always)] pub fn upds(&mut self) -> UPDS_W { UPDS_W { w: self } } #[doc = "Bit 16 - Dead-Time Generator Enable"] #[inline(always)] pub fn dte(&mut self) -> DTE_W { DTE_W { w: self } } #[doc = "Bit 17 - Dead-Time PWMHx Output Inverted"] #[inline(always)] pub fn dthi(&mut self) -> DTHI_W { DTHI_W { w: self } } #[doc = "Bit 18 - Dead-Time PWMLx Output Inverted"] #[inline(always)] pub fn dtli(&mut self) -> DTLI_W { DTLI_W { w: self } } }
{ *self == CPRE_A::MCK_DIV_1024 }
redis.ts
import * as IORedis from "ioredis"; import * as dotProp from "dot-prop"; import { api, config, id, log, Initializer, redis, utils } from "../index"; import * as RedisModule from "./../modules/redis"; export interface RedisApi { clients: { [key: string]: IORedis.Redis; }; subscriptionHandlers: { [key: string]: Function; }; rpcCallbacks: { [key: string]: any; }; status: { subscribed: boolean; }; } /** * Redis helpers and connections. */ export class
extends Initializer { constructor() { super(); this.name = "redis"; this.loadPriority = 200; this.startPriority = 101; this.stopPriority = 99999; } async initialize() { api.redis = { clients: {}, subscriptionHandlers: {}, rpcCallbacks: {}, status: { subscribed: false, }, }; api.redis.subscriptionHandlers.do = async ( message: RedisModule.redis.PubSubMessage ) => { if ( !message.connectionId || (api.connections && api.connections.connections[message.connectionId]) ) { const cmdParts = message.method.split("."); const cmd = cmdParts.shift(); if (cmd !== "api") { throw new Error( "cannot operate on a method outside of the api object" ); } const callableApi = Object.assign(api, { log }); const method: Function = dotProp.get(callableApi, cmdParts.join(".")); let args = message.args; if (args === null) { args = []; } if (!Array.isArray(args)) { args = [args]; } if (method) { const response = await method.apply(null, args); await redis.respondCluster(message.messageId, response); } else { log("RPC method `" + cmdParts.join(".") + "` not found", "crit"); } } }; api.redis.subscriptionHandlers.doResponse = function ( message: RedisModule.redis.PubSubMessage ) { if (api.redis.rpcCallbacks[message.messageId]) { const { resolve, timer } = api.redis.rpcCallbacks[message.messageId]; clearTimeout(timer); resolve(message.response); delete api.redis.rpcCallbacks[message.messageId]; } }; const connectionNames = ["client", "subscriber", "tasks"] as const; for (const r of connectionNames) { if (config.redis[r].buildNew === true) { const args = config.redis[r].args; api.redis.clients[r] = new config.redis[r].konstructor( args[0], args[1], args[2] ); api.redis.clients[r].on("error", (error) => { log(`Redis connection \`${r}\` error`, "alert", error); }); api.redis.clients[r].on("connect", () => { log(`Redis connection \`${r}\` connected`, "debug"); }); api.redis.clients[r].on("ready", () => { log(`Redis connection \`${r}\` ready`, "debug"); }); api.redis.clients[r].on("close", () => { log(`Redis connection \`${r}\` closed`, "debug"); }); api.redis.clients[r].on("end", () => { log(`Redis connection \`${r}\` ended`, "debug"); }); api.redis.clients[r].on("reconnecting", () => { log(`Redis connection \`${r}\` reconnecting`, "info"); }); } else { api.redis.clients[r] = config.redis[r].konstructor( config.redis[r].args ); api.redis.clients[r].on("error", (error) => { log(`Redis connection \`${r}\` error`, "alert", error); }); log(`Redis connection \`${r}\` connected`, "debug"); } if (r !== "subscriber") await api.redis.clients[r].get("_test"); } if (!api.redis.status.subscribed) { await api.redis.clients.subscriber.subscribe(config.general.channel); api.redis.status.subscribed = true; const messageHandler = async ( messageChannel: string, stringifiedMessage: string ) => { let message: RedisModule.redis.PubSubMessage; try { message = JSON.parse(stringifiedMessage); } catch (e) { message = {}; } if ( messageChannel === config.general.channel && message.serverToken === config.general.serverToken ) { if (api.redis.subscriptionHandlers[message.messageType]) { await api.redis.subscriptionHandlers[message.messageType](message); } } }; api.redis.clients.subscriber.on("message", messageHandler); } } async start() { await redis.doCluster("api.log", [ `actionhero member ${id} has joined the cluster`, ]); } async stop() { await api.redis.clients.subscriber.unsubscribe(); api.redis.status.subscribed = false; await redis.doCluster("api.log", [ `actionhero member ${id} has left the cluster`, ]); await utils.sleep(config.redis.stopTimeout); // allow some time for the goodbye message to propagate const keys = Object.keys(api.redis.clients); for (const i in keys) { const client = api.redis.clients[keys[i]]; //@ts-ignore if (typeof client.end === "function") { //@ts-ignore await client.end(); } else if (typeof client.quit === "function") { await client.quit(); } else if (typeof client.disconnect === "function") { await client.disconnect(); } } await utils.sleep(config.redis.stopTimeout); // allow some time for the connection to close } }
RedisInitializer
encounter.spec.ts
// Copyright 2018 Verily Life Sciences Inc. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. import {DateTime, Interval} from 'luxon'; import {Encounter} from './encounter'; const REQUEST_ID = '1234'; describe('Encounter', () => { it('should throw error if json is not of type Encounter', () => { const constructor = () => { const x = new Encounter({}, REQUEST_ID); }; expect(constructor).toThrowError(); }); it('should get label from json', () => { const testEncounter = { class: 'inpatient', id: '4027918', patient: {display: 'Smart, Joe', reference: 'Patient/4342010'}, period: {start: '2018-10-01T06:00:00.000Z', end: '2018-10-10T06:00:00.000Z'}, resourceType: 'Encounter', }; const encounter = new Encounter(testEncounter, REQUEST_ID); expect(encounter.encounterId).toEqual('4027918'); }); it('should get start and end date from json if they are there', () => { const testEncounter = { class: 'inpatient', id: '4027918', patient: {display: 'Smart, Joe', reference: 'Patient/4342010'}, period: {start: '2018-10-01T06:00:00.000Z', end: '2018-10-10T06:00:00.000Z'}, 'resourceType': 'Encounter', }; const encounter = new Encounter(testEncounter, REQUEST_ID); expect(encounter.period) .toEqual(Interval.fromDateTimes( DateTime.fromISO('2018-10-01T06:00:00.000Z').toLocal(), DateTime.fromISO('2018-10-10T06:00:00.000Z').toLocal())); }); it('should set end date to now if it isn\'t there', () => { const testEncounter = { class: 'inpatient', id: '4027918', patient: {display: 'Smart, Joe', reference: 'Patient/4342010'}, period: { start: '2018-10-01T06:00:00.000Z', }, resourceType: 'Encounter', }; const encounter = new Encounter(testEncounter, REQUEST_ID); expect(encounter.period.start) .toEqual(DateTime.fromISO('2018-10-01T06:00:00.000Z').toLocal()); expect(encounter.period.end.toMillis()) // accept differences up to 100ms .toBeCloseTo(DateTime.utc().toMillis(), -3); }); it('should set end date to now if it is in the future', () => { const testEncounter = { class: 'inpatient', id: '4027918', patient: {display: 'Smart, Joe', reference: 'Patient/4342010'}, period: { start: '2018-10-01T06:00:00.000Z', end: '2200-10-01T06:00:00.000Z', }, resourceType: 'Encounter', }; const encounter = new Encounter(testEncounter, REQUEST_ID); expect(encounter.period.start) .toEqual(DateTime.fromISO('2018-10-01T06:00:00.000Z').toLocal()); expect(encounter.period.end.toMillis()) // accept differences up to 100ms .toBeCloseTo(DateTime.utc().toMillis(), -3); }); it('should require a start date', () => { const testEncounter = { class: 'inpatient', id: '4027918', patient: {display: 'Smart, Joe', reference: 'Patient/4342010'}, period: { end: '2018-10-01T06:00:00.000Z', }, resourceType: 'Encounter', }; const constructor = () => { const encounter = new Encounter(testEncounter, REQUEST_ID);
expect(constructor).toThrowError(new RegExp(`Request IDs: ${REQUEST_ID}`)); }); it('should throw an error if start time is after end time', () => { const testEncounter = { class: 'inpatient', id: '4027918', patient: {display: 'Smart, Joe', reference: 'Patient/4342010'}, period: { start: '2018-10-01T06:00:00.000Z', end: '2018-09-01T06:00:00.000Z', }, resourceType: 'Encounter', }; const constructor = () => { const encounter = new Encounter(testEncounter, REQUEST_ID); }; expect(constructor).toThrowError(new RegExp(`Request IDs: ${REQUEST_ID}`)); }); it('should throw an error if start time is in the future', () => { const testEncounter = { class: 'inpatient', id: '4027918', patient: {display: 'Smart, Joe', reference: 'Patient/4342010'}, period: { start: '2200-09-01T06:00:00.000Z', end: '2018-10-01T06:00:00.000Z', }, resourceType: 'Encounter', }; const constructor = () => { const encounter = new Encounter(testEncounter, REQUEST_ID); }; expect(constructor).toThrowError(new RegExp(`Request IDs: ${REQUEST_ID}`)); }); });
};
EditListingPricingPanel.js
import React from 'react'; import PropTypes from 'prop-types'; import classNames from 'classnames'; import { FormattedMessage } from '../../util/reactIntl'; import { LISTING_STATE_DRAFT } from '../../util/types'; import { ListingLink } from '../../components'; import { EditListingPricingForm } from '../../forms'; import { ensureOwnListing, getListingCategory } from '../../util/data'; import { types as sdkTypes } from '../../util/sdkLoader'; import config from '../../config'; import css from './EditListingPricingPanel.css'; const { Money } = sdkTypes; const EditListingPricingPanel = props => {
disabled, ready, onSubmit, onChange, submitButtonText, panelUpdated, updateInProgress, errors, } = props; const classes = classNames(rootClassName || css.root, className); const currentListing = ensureOwnListing(listing); const { price } = currentListing.attributes; const listingCategory = getListingCategory(currentListing); const isPublished = currentListing.id && currentListing.attributes.state !== LISTING_STATE_DRAFT; const panelTitle = isPublished ? ( <FormattedMessage id="EditListingPricingPanel.title" values={{ listingTitle: <ListingLink listing={listing} /> }} /> ) : ( <FormattedMessage id="EditListingPricingPanel.createListingTitle" /> ); const priceCurrencyValid = price instanceof Money ? price.currency === config.currency : true; const form = priceCurrencyValid ? ( <EditListingPricingForm className={css.form} initialValues={{ price }} onSubmit={onSubmit} onChange={onChange} saveActionMsg={submitButtonText} disabled={disabled} ready={ready} updated={panelUpdated} updateInProgress={updateInProgress} fetchErrors={errors} listingCategory={listingCategory} /> ) : ( <div className={css.priceCurrencyInvalid}> <FormattedMessage id="EditListingPricingPanel.listingPriceCurrencyInvalid" /> </div> ); return ( <div className={classes}> <h1 className={css.title}>{panelTitle}</h1> {form} </div> ); }; const { func, object, string, bool } = PropTypes; EditListingPricingPanel.defaultProps = { className: null, rootClassName: null, listing: null, }; EditListingPricingPanel.propTypes = { className: string, rootClassName: string, // We cannot use propTypes.listing since the listing might be a draft. listing: object, disabled: bool.isRequired, ready: bool.isRequired, onSubmit: func.isRequired, onChange: func.isRequired, submitButtonText: string.isRequired, panelUpdated: bool.isRequired, updateInProgress: bool.isRequired, errors: object.isRequired, }; export default EditListingPricingPanel;
const { className, rootClassName, listing,
display.rs
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(non_upper_case_globals)] use block::Block; use dispatch::ffi::dispatch_queue_t; use io_surface::IOSurfaceRef; use libc; use std::ops::Deref; use std::ptr; pub use base::{boolean_t, CGError}; pub use geometry::{CGPoint, CGRect, CGSize}; use core_foundation::base::{CFRetain, TCFType}; use core_foundation::string::{CFString, CFStringRef}; use foreign_types::ForeignType; use image::CGImage; pub type CGDirectDisplayID = u32; pub type CGWindowID = u32; pub const kCGNullWindowID: CGWindowID = 0 as CGWindowID; pub const kCGNullDirectDisplayID: CGDirectDisplayID = 0 as CGDirectDisplayID; pub type CGWindowListOption = u32; pub const kCGWindowListOptionAll: CGWindowListOption = 0; pub const kCGWindowListOptionOnScreenOnly: CGWindowListOption = 1 << 0; pub const kCGWindowListOptionOnScreenAboveWindow: CGWindowListOption = 1 << 1; pub const kCGWindowListOptionOnScreenBelowWindow: CGWindowListOption = 1 << 2; pub const kCGWindowListOptionIncludingWindow: CGWindowListOption = 1 << 3; pub const kCGWindowListExcludeDesktopElements: CGWindowListOption = 1 << 4; pub type CGWindowImageOption = u32; pub const kCGWindowImageDefault: CGWindowImageOption = 0; pub const kCGWindowImageBoundsIgnoreFraming: CGWindowImageOption = 1 << 0; pub const kCGWindowImageShouldBeOpaque: CGWindowImageOption = 1 << 1; pub const kCGWindowImageOnlyShadows: CGWindowImageOption = 1 << 2; pub const kCGWindowImageBestResolution: CGWindowImageOption = 1 << 3; pub const kCGWindowImageNominalResolution: CGWindowImageOption = 1 << 4; pub type CGDisplayStreamFrameStatus = u32; pub const kCGDisplayStreamFrameStatusFrameComplete: CGDisplayStreamFrameStatus = 0; pub const kCGDisplayStreamFrameStatusFrameIdle: CGDisplayStreamFrameStatus = 1; pub const kCGDisplayStreamFrameStatusFrameBlank: CGDisplayStreamFrameStatus = 2; pub const kCGDisplayStreamFrameStatusStopped: CGDisplayStreamFrameStatus = 3; pub const kDisplayModeValidFlag: u32 = 0x00000001; pub const kDisplayModeSafeFlag: u32 = 0x00000002; pub const kDisplayModeDefaultFlag: u32 = 0x00000004; pub const kDisplayModeAlwaysShowFlag: u32 = 0x00000008; pub const kDisplayModeNeverShowFlag: u32 = 0x00000080; pub const kDisplayModeNotResizeFlag: u32 = 0x00000010; pub const kDisplayModeRequiresPanFlag: u32 = 0x00000020; pub const kDisplayModeInterlacedFlag: u32 = 0x00000040; pub const kDisplayModeSimulscanFlag: u32 = 0x00000100; pub const kDisplayModeBuiltInFlag: u32 = 0x00000400; pub const kDisplayModeNotPresetFlag: u32 = 0x00000200; pub const kDisplayModeStretchedFlag: u32 = 0x00000800; pub const kDisplayModeNotGraphicsQualityFlag: u32 = 0x00001000; pub const kDisplayModeValidateAgainstDisplay: u32 = 0x00002000; pub const kDisplayModeTelevisionFlag: u32 = 0x00100000; pub const kDisplayModeValidForMirroringFlag: u32 = 0x00200000; pub const kDisplayModeAcceleratorBackedFlag: u32 = 0x00400000; pub const kDisplayModeValidForHiResFlag: u32 = 0x00800000; pub const kDisplayModeValidForAirPlayFlag: u32 = 0x01000000; pub const kDisplayModeNativeFlag: u32 = 0x02000000; pub const kDisplayModeSafetyFlags: u32 = 0x00000007; pub const IO1BitIndexedPixels: &str = "P"; pub const IO2BitIndexedPixels: &str = "PP"; pub const IO4BitIndexedPixels: &str = "PPPP"; pub const IO8BitIndexedPixels: &str = "PPPPPPPP"; pub const IO16BitDirectPixels: &str = "-RRRRRGGGGGBBBBB"; pub const IO32BitDirectPixels: &str = "--------RRRRRRRRGGGGGGGGBBBBBBBB"; pub const kIO30BitDirectPixels: &str = "--RRRRRRRRRRGGGGGGGGGGBBBBBBBBBB"; pub const kIO64BitDirectPixels: &str = "-16R16G16B16"; pub const kIO16BitFloatPixels: &str = "-16FR16FG16FB16"; pub const kIO32BitFloatPixels: &str = "-32FR32FG32FB32"; pub const IOYUV422Pixels: &str = "Y4U2V2"; pub const IO8BitOverlayPixels: &str = "O8"; pub use core_foundation::array::{CFArray, CFArrayRef}; pub use core_foundation::array::{CFArrayGetCount, CFArrayGetValueAtIndex}; pub use core_foundation::base::{CFIndex, CFRelease, CFTypeRef}; pub use core_foundation::dictionary::{ CFDictionary, CFDictionaryGetValueIfPresent, CFDictionaryRef, }; pub type CGDisplayStreamFrameAvailableHandler = Block< ( CGDisplayStreamFrameStatus, u64, IOSurfaceRef, ::sys::CGDisplayStreamUpdateRef, ), (), >; pub type CGDisplayConfigRef = *mut libc::c_void; #[repr(u32)]
ConfigurePermanently = 2, } #[derive(Copy, Clone, Debug)] pub struct CGDisplay { pub id: CGDirectDisplayID, } foreign_type! { #[doc(hidden)] type CType = ::sys::CGDisplayMode; fn drop = CGDisplayModeRelease; fn clone = |p| CFRetain(p as *const _) as *mut _; pub struct CGDisplayMode; pub struct CGDisplayModeRef; } impl CGDisplay { #[inline] pub fn new(id: CGDirectDisplayID) -> CGDisplay { CGDisplay { id: id } } /// Returns the the main display. #[inline] pub fn main() -> CGDisplay { CGDisplay::new(unsafe { CGMainDisplayID() }) } /// A value that will never correspond to actual hardware. pub fn null_display() -> CGDisplay { CGDisplay::new(kCGNullDirectDisplayID) } /// Returns the bounds of a display in the global display coordinate space. #[inline] pub fn bounds(&self) -> CGRect { unsafe { CGDisplayBounds(self.id) } } /// Returns information about a display's current configuration. #[inline] pub fn display_mode(&self) -> Option<CGDisplayMode> { unsafe { let mode_ref = CGDisplayCopyDisplayMode(self.id); if !mode_ref.is_null() { Some(CGDisplayMode::from_ptr(mode_ref)) } else { None } } } /// Begins a new set of display configuration changes. pub fn begin_configuration(&self) -> Result<CGDisplayConfigRef, CGError> { unsafe { let mut config_ref: CGDisplayConfigRef = ptr::null_mut(); let result = CGBeginDisplayConfiguration(&mut config_ref); if result == 0 { Ok(config_ref) } else { Err(result) } } } /// Cancels a set of display configuration changes. pub fn cancel_configuration(&self, config_ref: &CGDisplayConfigRef) -> Result<(), CGError> { let result = unsafe { CGCancelDisplayConfiguration(*config_ref) }; if result == 0 { Ok(()) } else { Err(result) } } /// Completes a set of display configuration changes. pub fn complete_configuration( &self, config_ref: &CGDisplayConfigRef, option: CGConfigureOption, ) -> Result<(), CGError> { let result = unsafe { CGCompleteDisplayConfiguration(*config_ref, option) }; if result == 0 { Ok(()) } else { Err(result) } } /// Configures the display mode of a display. pub fn configure_display_with_display_mode( &self, config_ref: &CGDisplayConfigRef, display_mode: &CGDisplayMode, ) -> Result<(), CGError> { let result = unsafe { CGConfigureDisplayWithDisplayMode( *config_ref, self.id, display_mode.as_ptr(), ptr::null(), ) }; if result == 0 { Ok(()) } else { Err(result) } } /// Configures the origin of a display in the global display coordinate space. pub fn configure_display_origin( &self, config_ref: &CGDisplayConfigRef, x: i32, y: i32, ) -> Result<(), CGError> { let result = unsafe { CGConfigureDisplayOrigin(*config_ref, self.id, x, y) }; if result == 0 { Ok(()) } else { Err(result) } } /// Changes the configuration of a mirroring set. pub fn configure_display_mirror_of_display( &self, config_ref: &CGDisplayConfigRef, master: &CGDisplay, ) -> Result<(), CGError> { let result = unsafe { CGConfigureDisplayMirrorOfDisplay(*config_ref, self.id, master.id) }; if result == 0 { Ok(()) } else { Err(result) } } /// Returns an image containing the contents of the specified display. #[inline] pub fn image(&self) -> Option<CGImage> { unsafe { let image_ref = CGDisplayCreateImage(self.id); if !image_ref.is_null() { Some(CGImage::from_ptr(image_ref)) } else { None } } } /// Returns a composite image based on a dynamically generated list of /// windows. #[inline] pub fn screenshot( bounds: CGRect, list_option: CGWindowListOption, window_id: CGWindowID, image_option: CGWindowImageOption, ) -> Option<CGImage> { unsafe { let image_ref = CGWindowListCreateImage(bounds, list_option, window_id, image_option); if !image_ref.is_null() { Some(CGImage::from_ptr(image_ref)) } else { None } } } /// Returns a composite image of the specified windows. #[inline] pub fn screenshot_from_windows( bounds: CGRect, windows: CFArray, image_option: CGWindowImageOption, ) -> Option<CGImage> { unsafe { let image_ref = CGWindowListCreateImageFromArray( bounds, windows.as_concrete_TypeRef(), image_option, ); if !image_ref.is_null() { Some(CGImage::from_ptr(image_ref)) } else { None } } } /// Generates and returns information about the selected windows in the /// current user session. pub fn window_list_info( option: CGWindowListOption, relative_to_window: Option<CGWindowID>, ) -> Option<CFArray> { let relative_to_window = relative_to_window.unwrap_or(kCGNullWindowID); let array_ref = unsafe { CGWindowListCopyWindowInfo(option, relative_to_window) }; if !array_ref.is_null() { Some(unsafe { TCFType::wrap_under_create_rule(array_ref) }) } else { None } } /// Returns a Boolean value indicating whether a display is active. #[inline] pub fn is_active(&self) -> bool { unsafe { CGDisplayIsActive(self.id) != 0 } } /// Returns a boolean indicating whether a display is always in a /// mirroring set. #[inline] pub fn is_always_in_mirror_set(&self) -> bool { unsafe { CGDisplayIsAlwaysInMirrorSet(self.id) != 0 } } /// Returns a boolean indicating whether a display is sleeping (and is /// therefore not drawable.) #[inline] pub fn is_asleep(&self) -> bool { unsafe { CGDisplayIsAsleep(self.id) != 0 } } /// Returns a boolean indicating whether a display is built-in, such as /// the internal display in portable systems. #[inline] pub fn is_builtin(&self) -> bool { unsafe { CGDisplayIsBuiltin(self.id) != 0 } } /// Returns a boolean indicating whether a display is in a hardware /// mirroring set. #[inline] pub fn is_in_hw_mirror_set(&self) -> bool { unsafe { CGDisplayIsInHWMirrorSet(self.id) != 0 } } /// Returns a boolean indicating whether a display is in a mirroring set. #[inline] pub fn is_in_mirror_set(&self) -> bool { unsafe { CGDisplayIsInMirrorSet(self.id) != 0 } } /// Returns a boolean indicating whether a display is the main display. #[inline] pub fn is_main(&self) -> bool { unsafe { CGDisplayIsMain(self.id) != 0 } } /// Returns a boolean indicating whether a display is connected or online. #[inline] pub fn is_online(&self) -> bool { unsafe { CGDisplayIsOnline(self.id) != 0 } } /// Returns a boolean indicating whether Quartz is using OpenGL-based /// window acceleration (Quartz Extreme) to render in a display. #[inline] pub fn uses_open_gl_acceleration(&self) -> bool { unsafe { CGDisplayUsesOpenGLAcceleration(self.id) != 0 } } /// Returns a boolean indicating whether a display is running in a stereo /// graphics mode. #[inline] pub fn is_stereo(&self) -> bool { unsafe { CGDisplayIsStereo(self.id) != 0 } } /// For a secondary display in a mirroring set, returns the primary /// display. #[inline] pub fn mirrors_display(&self) -> CGDirectDisplayID { unsafe { CGDisplayMirrorsDisplay(self.id) } } /// Returns the primary display in a hardware mirroring set. #[inline] pub fn primary_display(&self) -> CGDirectDisplayID { unsafe { CGDisplayPrimaryDisplay(self.id) } } /// Returns the rotation angle of a display in degrees. #[inline] pub fn rotation(&self) -> f64 { unsafe { CGDisplayRotation(self.id) } } /// Returns the width and height of a display in millimeters. #[inline] pub fn screen_size(&self) -> CGSize { unsafe { CGDisplayScreenSize(self.id) } } /// Returns the serial number of a display monitor. #[inline] pub fn serial_number(&self) -> u32 { unsafe { CGDisplaySerialNumber(self.id) } } /// Returns the logical unit number of a display. #[inline] pub fn unit_number(&self) -> u32 { unsafe { CGDisplayUnitNumber(self.id) } } /// Returns the vendor number of the specified display's monitor. #[inline] pub fn vendor_number(&self) -> u32 { unsafe { CGDisplayVendorNumber(self.id) } } /// Returns the model number of a display monitor. #[inline] pub fn model_number(&self) -> u32 { unsafe { CGDisplayModelNumber(self.id) } } /// Returns the display height in pixel units. #[inline] pub fn pixels_high(&self) -> u64 { unsafe { CGDisplayPixelsHigh(self.id) as u64 } } /// Returns the display width in pixel units. #[inline] pub fn pixels_wide(&self) -> u64 { unsafe { CGDisplayPixelsWide(self.id) as u64 } } /// Provides a list of displays that are active (or drawable). #[inline] pub fn active_displays() -> Result<Vec<CGDirectDisplayID>, CGError> { let count = CGDisplay::active_display_count()?; let mut buf: Vec<CGDirectDisplayID> = vec![0; count as usize]; let result = unsafe { CGGetActiveDisplayList(count as u32, buf.as_mut_ptr(), ptr::null_mut()) }; if result == 0 { Ok(buf) } else { Err(result) } } /// Provides count of displays that are active (or drawable). #[inline] pub fn active_display_count() -> Result<u32, CGError> { let mut count: u32 = 0; let result = unsafe { CGGetActiveDisplayList(0, ptr::null_mut(), &mut count) }; if result == 0 { Ok(count as u32) } else { Err(result) } } /// Hides the mouse cursor, and increments the hide cursor count. #[inline] pub fn hide_cursor(&self) -> Result<(), CGError> { let result = unsafe { CGDisplayHideCursor(self.id) }; if result == 0 { Ok(()) } else { Err(result) } } /// Decrements the hide cursor count, and shows the mouse cursor if the /// count is 0. #[inline] pub fn show_cursor(&self) -> Result<(), CGError> { let result = unsafe { CGDisplayShowCursor(self.id) }; if result == 0 { Ok(()) } else { Err(result) } } /// Moves the mouse cursor to a specified point relative to the display /// origin (the upper-left corner of the display). #[inline] pub fn move_cursor_to_point(&self, point: CGPoint) -> Result<(), CGError> { let result = unsafe { CGDisplayMoveCursorToPoint(self.id, point) }; if result == 0 { Ok(()) } else { Err(result) } } /// Moves the mouse cursor without generating events. #[inline] pub fn warp_mouse_cursor_position(point: CGPoint) -> Result<(), CGError> { let result = unsafe { CGWarpMouseCursorPosition(point) }; if result == 0 { Ok(()) } else { Err(result) } } /// Connects or disconnects the mouse and cursor while an application is /// in the foreground. #[inline] pub fn associate_mouse_and_mouse_cursor_position(connected: bool) -> Result<(), CGError> { let result = unsafe { CGAssociateMouseAndMouseCursorPosition(connected as boolean_t) }; if result == 0 { Ok(()) } else { Err(result) } } } impl CGDisplayMode { /// Returns all display modes for the specified display id. pub fn all_display_modes( display_id: CGDirectDisplayID, options: CFDictionaryRef, ) -> Option<Vec<CGDisplayMode>> { let array_opt: Option<CFArray> = unsafe { let array_ref = CGDisplayCopyAllDisplayModes(display_id, options); if !array_ref.is_null() { Some(CFArray::wrap_under_create_rule(array_ref)) } else { None } }; match array_opt { Some(modes) => { let vec: Vec<CGDisplayMode> = modes .into_iter() .map(|value0| { let x = *value0.deref() as *mut ::sys::CGDisplayMode; unsafe { CGDisplayMode::from_ptr(x) } }) .collect(); Some(vec) } None => None, } } /// Returns the height of the specified display mode. #[inline] pub fn height(&self) -> u64 { unsafe { CGDisplayModeGetHeight(self.as_ptr()) as u64 } } /// Returns the width of the specified display mode. #[inline] pub fn width(&self) -> u64 { unsafe { CGDisplayModeGetWidth(self.as_ptr()) as u64 } } /// Returns the pixel height of the specified display mode. #[inline] pub fn pixel_height(&self) -> u64 { unsafe { CGDisplayModeGetPixelHeight(self.as_ptr()) as u64 } } /// Returns the pixel width of the specified display mode. #[inline] pub fn pixel_width(&self) -> u64 { unsafe { CGDisplayModeGetPixelWidth(self.as_ptr()) as u64 } } #[inline] pub fn refresh_rate(&self) -> f64 { unsafe { CGDisplayModeGetRefreshRate(self.as_ptr()) } } /// Returns the I/O Kit flags of the specified display mode. #[inline] pub fn io_flags(&self) -> u32 { unsafe { CGDisplayModeGetIOFlags(self.as_ptr()) as u32 } } /// Returns the pixel encoding of the specified display mode. #[inline] pub fn pixel_encoding(&self) -> CFString { unsafe { CFString::wrap_under_create_rule(CGDisplayModeCopyPixelEncoding(self.as_ptr())) } } /// Returns the number of bits per pixel of the specified display mode. pub fn bit_depth(&self) -> usize { let pixel_encoding = self.pixel_encoding().to_string(); // my numerical representation for kIO16BitFloatPixels and kIO32bitFloatPixels // are made up and possibly non-sensical if pixel_encoding.eq_ignore_ascii_case(kIO32BitFloatPixels) { 96 } else if pixel_encoding.eq_ignore_ascii_case(kIO64BitDirectPixels) { 64 } else if pixel_encoding.eq_ignore_ascii_case(kIO16BitFloatPixels) { 48 } else if pixel_encoding.eq_ignore_ascii_case(IO32BitDirectPixels) { 32 } else if pixel_encoding.eq_ignore_ascii_case(kIO30BitDirectPixels) { 30 } else if pixel_encoding.eq_ignore_ascii_case(IO16BitDirectPixels) { 16 } else if pixel_encoding.eq_ignore_ascii_case(IO8BitIndexedPixels) { 8 } else { 0 } } } #[link(name = "CoreGraphics", kind = "framework")] extern "C" { pub static CGRectNull: CGRect; pub static CGRectInfinite: CGRect; pub static kCGDisplayShowDuplicateLowResolutionModes: CFStringRef; pub static kCGDisplayStreamSourceRect: CFStringRef; pub static kCGDisplayStreamDestinationRect: CFStringRef; pub static kCGDisplayStreamPreserveAspectRatio: CFStringRef; pub static kCGDisplayStreamColorSpace: CFStringRef; pub static kCGDisplayStreamMinimumFrameTime: CFStringRef; pub static kCGDisplayStreamShowCursor: CFStringRef; pub static kCGDisplayStreamQueueDepth: CFStringRef; pub static kCGDisplayStreamYCbCrMatrix: CFStringRef; pub static kCGDisplayStreamYCbCrMatrix_ITU_R_709_2: CFStringRef; pub static kCGDisplayStreamYCbCrMatrix_ITU_R_601_4: CFStringRef; pub static kCGDisplayStreamYCbCrMatrix_SMPTE_240M_1995: CFStringRef; pub fn CGDisplayModeRelease(mode: ::sys::CGDisplayModeRef); pub fn CGMainDisplayID() -> CGDirectDisplayID; pub fn CGDisplayIsActive(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayIsAlwaysInMirrorSet(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayIsAsleep(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayIsBuiltin(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayIsInHWMirrorSet(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayIsInMirrorSet(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayIsMain(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayIsOnline(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayIsStereo(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayMirrorsDisplay(display: CGDirectDisplayID) -> CGDirectDisplayID; pub fn CGDisplayPrimaryDisplay(display: CGDirectDisplayID) -> CGDirectDisplayID; pub fn CGDisplayRotation(display: CGDirectDisplayID) -> libc::c_double; pub fn CGDisplayScreenSize(display: CGDirectDisplayID) -> CGSize; pub fn CGDisplaySerialNumber(display: CGDirectDisplayID) -> u32; pub fn CGDisplayUnitNumber(display: CGDirectDisplayID) -> u32; pub fn CGDisplayUsesOpenGLAcceleration(display: CGDirectDisplayID) -> boolean_t; pub fn CGDisplayVendorNumber(display: CGDirectDisplayID) -> u32; pub fn CGGetActiveDisplayList( max_displays: u32, active_displays: *mut CGDirectDisplayID, display_count: *mut u32, ) -> CGError; pub fn CGGetDisplaysWithRect( rect: CGRect, max_displays: u32, displays: *mut CGDirectDisplayID, matching_display_count: *mut u32, ) -> CGError; pub fn CGDisplayModelNumber(display: CGDirectDisplayID) -> u32; pub fn CGDisplayPixelsHigh(display: CGDirectDisplayID) -> libc::size_t; pub fn CGDisplayPixelsWide(display: CGDirectDisplayID) -> libc::size_t; pub fn CGDisplayBounds(display: CGDirectDisplayID) -> CGRect; pub fn CGDisplayCreateImage(display: CGDirectDisplayID) -> ::sys::CGImageRef; pub fn CGBeginDisplayConfiguration(config: *mut CGDisplayConfigRef) -> CGError; pub fn CGCancelDisplayConfiguration(config: CGDisplayConfigRef) -> CGError; pub fn CGCompleteDisplayConfiguration( config: CGDisplayConfigRef, option: CGConfigureOption, ) -> CGError; pub fn CGConfigureDisplayWithDisplayMode( config: CGDisplayConfigRef, display: CGDirectDisplayID, mode: ::sys::CGDisplayModeRef, options: CFDictionaryRef, ) -> CGError; pub fn CGConfigureDisplayMirrorOfDisplay( config: CGDisplayConfigRef, display: CGDirectDisplayID, master: CGDirectDisplayID, ) -> CGError; pub fn CGConfigureDisplayOrigin( config: CGDisplayConfigRef, display: CGDirectDisplayID, x: i32, y: i32, ) -> CGError; pub fn CGDisplayCopyDisplayMode(display: CGDirectDisplayID) -> ::sys::CGDisplayModeRef; pub fn CGDisplayModeGetHeight(mode: ::sys::CGDisplayModeRef) -> libc::size_t; pub fn CGDisplayModeGetWidth(mode: ::sys::CGDisplayModeRef) -> libc::size_t; pub fn CGDisplayModeGetPixelHeight(mode: ::sys::CGDisplayModeRef) -> libc::size_t; pub fn CGDisplayModeGetPixelWidth(mode: ::sys::CGDisplayModeRef) -> libc::size_t; pub fn CGDisplayModeGetRefreshRate(mode: ::sys::CGDisplayModeRef) -> libc::c_double; pub fn CGDisplayModeGetIOFlags(mode: ::sys::CGDisplayModeRef) -> u32; pub fn CGDisplayModeCopyPixelEncoding(mode: ::sys::CGDisplayModeRef) -> CFStringRef; pub fn CGDisplayCopyAllDisplayModes( display: CGDirectDisplayID, options: CFDictionaryRef, ) -> CFArrayRef; // mouse stuff pub fn CGDisplayHideCursor(display: CGDirectDisplayID) -> CGError; pub fn CGDisplayShowCursor(display: CGDirectDisplayID) -> CGError; pub fn CGDisplayMoveCursorToPoint(display: CGDirectDisplayID, point: CGPoint) -> CGError; pub fn CGWarpMouseCursorPosition(point: CGPoint) -> CGError; pub fn CGAssociateMouseAndMouseCursorPosition(connected: boolean_t) -> CGError; // Window Services Reference pub fn CGWindowListCopyWindowInfo( option: CGWindowListOption, relativeToWindow: CGWindowID, ) -> CFArrayRef; pub fn CGWindowListCreateImage( screenBounds: CGRect, listOptions: CGWindowListOption, windowId: CGWindowID, imageOptions: CGWindowImageOption, ) -> ::sys::CGImageRef; pub fn CGWindowListCreateImageFromArray( screenBounds: CGRect, windowArray: CFArrayRef, imageOptions: CGWindowImageOption, ) -> ::sys::CGImageRef; // display streaming #[allow(improper_ctypes)] pub fn CGDisplayStreamCreate( display: CGDirectDisplayID, outputWidth: usize, outputHeight: usize, pixelFormat: i32, properties: CFDictionaryRef, handler: &CGDisplayStreamFrameAvailableHandler, ) -> ::sys::CGDisplayStreamRef; #[allow(improper_ctypes)] pub fn CGDisplayStreamCreateWithDispatchQueue( display: CGDirectDisplayID, outputWidth: usize, outputHeight: usize, pixelFormat: i32, properties: CFDictionaryRef, queue: dispatch_queue_t, handler: &CGDisplayStreamFrameAvailableHandler, ) -> ::sys::CGDisplayStreamRef; pub fn CGDisplayStreamStart(displayStream: ::sys::CGDisplayStreamRef) -> CGError; pub fn CGDisplayStreamStop(displayStream: ::sys::CGDisplayStreamRef) -> CGError; }
#[derive(Clone, Copy)] pub enum CGConfigureOption { ConfigureForAppOnly = 0, ConfigureForSession = 1,
main.rs
use nu_plugin::serve_plugin; use nu_plugin_chart::Chart; fn main() {
serve_plugin(&mut Chart::new()); }
transform.go
/* Copyright 2021 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package transform import ( "fmt" "strings" sErrors "github.com/GoogleContainerTools/skaffold/pkg/skaffold/errors" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/render/kptfile" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/proto/v1" ) var ( allowListedTransformer = []string{"set-labels"} transformerAllowlist = map[string]kptfile.Function{ "set-namespace": { Image: "gcr.io/kpt-fn/set-namespace", ConfigMap: map[string]string{}, }, "set-labels": { Image: "gcr.io/kpt-fn/set-labels:v0.1", ConfigMap: map[string]string{}, }, "set-annotations": { Image: "gcr.io/kpt-fn/set-annotations:v0.1", ConfigMap: map[string]string{}, }, "create-setters": { Image: "gcr.io/kpt-fn/create-setters:unstable", ConfigMap: map[string]string{}, }, "apply-setters": { Image: "gcr.io/kpt-fn/apply-setters:unstable", ConfigMap: map[string]string{}, }, } AllowListedTransformer = func() []string { transformers := make([]string, 0, len(transformerAllowlist)) for funcName := range transformerAllowlist { transformers = append(transformers, funcName) } return transformers }() ) // NewTransformer instantiates a Transformer object. func
(config []latest.Transformer) (Transformer, error) { newFuncs, err := validateTransformers(config) if err != nil { return Transformer{}, err } return Transformer{kptFn: newFuncs, needRefresh: true, config: config}, nil } type Transformer struct { needRefresh bool kptFn []kptfile.Function config []latest.Transformer } // GetDeclarativeValidators transforms and returns the skaffold validators defined in skaffold.yaml func (v Transformer) GetDeclarativeTransformers() ([]kptfile.Function, error) { // TODO: guarantee the v.kptFn is updated once users changed skaffold.yaml file. if v.needRefresh { newFuncs, err := validateTransformers(v.config) if err != nil { return nil, err } v.kptFn = newFuncs v.needRefresh = false } return v.kptFn, nil } func validateTransformers(config []latest.Transformer) ([]kptfile.Function, error) { var newFuncs []kptfile.Function for _, c := range config { newFunc, ok := transformerAllowlist[c.Name] if !ok { // TODO: Add links to explain "skaffold-managed mode" and "kpt-managed mode". return nil, sErrors.NewErrorWithStatusCode( &proto.ActionableErr{ Message: fmt.Sprintf("unsupported transformer %q", c.Name), ErrCode: proto.StatusCode_CONFIG_UNKNOWN_TRANSFORMER, Suggestions: []*proto.Suggestion{ { SuggestionCode: proto.SuggestionCode_CONFIG_ALLOWLIST_transformers, Action: fmt.Sprintf( "please only use the following transformers in skaffold-managed mode: %v. "+ "to use custom transformers, please use kpt-managed mode.", allowListedTransformer), }, }, }) } if c.ConfigMap != nil { for _, stringifiedData := range c.ConfigMap { items := strings.Split(stringifiedData, ":") if len(items) != 2 { return nil, sErrors.NewErrorWithStatusCode( &proto.ActionableErr{ Message: fmt.Sprintf("unknown arguments for transformer %v", c.Name), ErrCode: proto.StatusCode_CONFIG_UNKNOWN_TRANSFORMER, Suggestions: []*proto.Suggestion{ { SuggestionCode: proto.SuggestionCode_CONFIG_ALLOWLIST_transformers, Action: fmt.Sprintf("please check if the .transformer field and " + "make sure `configMapData` is a list of data in the form of `${KEY}=${VALUE}`"), }, }, }) } newFunc.ConfigMap[items[0]] = items[1] } } newFuncs = append(newFuncs, newFunc) } return newFuncs, nil }
NewTransformer
logging.interceptor.ts
import {HttpEvent, HttpHandler, HttpInterceptor, HttpRequest} from '@angular/common/http'; import {Observable} from 'rxjs/Observable'; import 'rxjs/add/operator/do'; export class
implements HttpInterceptor { intercept(req: HttpRequest<any>, next: HttpHandler): Observable<HttpEvent<any>> { return next.handle(req).do( event => { console.log('Logging interceptor: ', event); } ); } }
LoggingInterceptor
App_tpl.py
from libs.Screen import * class App_tpl(object): @staticmethod def hello(): print(" _ _ _ _ _ ") print(" | |__(_) | | |_ ___ _ _ _ __ ") print(" | '_ \ | | | _/ -_) '_| ' \ ") print("__|_.__/_|_|_|\__\___|_| |_|_|_|_H_e_l_l_o_!") print(" ~ Write " + Text_style.BOLD + "help" + Text_style.END_STYLE + " if you are lost :$") @staticmethod def list_dbs(dbs): print("Existing DBs") for db in dbs: Screen.render_line([ [db, "{:<20}", Text_style.BLUE] ]) @staticmethod def
(): print('helping people!')
help
index.tsx
import { ClockIcon } from "@heroicons/react/outline"; import Link from "next/link"; import { useRouter } from "next/router"; import { useEffect } from "react"; import { useForm } from "react-hook-form"; import { useLocale } from "@lib/hooks/useLocale"; import { useToggleQuery } from "@lib/hooks/useToggleQuery"; import showToast from "@lib/notification"; import { trpc } from "@lib/trpc"; import { Dialog, DialogContent } from "@components/Dialog"; import Loader from "@components/Loader"; import Shell from "@components/Shell"; import { Alert } from "@components/ui/Alert"; import Button from "@components/ui/Button"; function
(mins: number) { const h = Math.floor(mins / 60); const m = mins % 60; const hours = h < 10 ? "0" + h : h; const minutes = m < 10 ? "0" + m : m; return `${hours}:${minutes}`; } export default function Availability() { const { t } = useLocale(); const queryMe = trpc.useQuery(["viewer.me"]); const formModal = useToggleQuery("edit"); const formMethods = useForm<{ startHours: string; startMins: string; endHours: string; endMins: string; bufferHours: string; bufferMins: string; }>({}); const router = useRouter(); useEffect(() => { /** * This hook populates the form with new values as soon as the user is loaded or changes */ const user = queryMe.data; if (formMethods.formState.isDirty || !user) { return; } formMethods.reset({ startHours: convertMinsToHrsMins(user.startTime).split(":")[0], startMins: convertMinsToHrsMins(user.startTime).split(":")[1], endHours: convertMinsToHrsMins(user.endTime).split(":")[0], endMins: convertMinsToHrsMins(user.endTime).split(":")[1], bufferHours: convertMinsToHrsMins(user.bufferTime).split(":")[0], bufferMins: convertMinsToHrsMins(user.bufferTime).split(":")[1], }); }, [formMethods, queryMe.data]); if (queryMe.status === "loading") { return <Loader />; } if (queryMe.status !== "success") { return <Alert severity="error" title={t("something_went_wrong")} />; } const user = queryMe.data; return ( <div> <Shell heading={t("availability")} subtitle={t("configure_availability")}> <div className="flex"> <div className="w-1/2 mr-2 bg-white border border-gray-200 rounded-sm"> <div className="px-4 py-5 sm:p-6"> <h3 className="text-lg leading-6 font-medium text-gray-900">{t("change_start_end")}</h3> <div className="mt-2 max-w-xl text-sm text-gray-500"> <p> {t("current_start_date")} {convertMinsToHrsMins(user.startTime)} {t("and_end_at")}{" "} {convertMinsToHrsMins(user.endTime)}. </p> </div> <div className="mt-5"> <Button href={formModal.hrefOn}>{t("change_available_times")}</Button> </div> </div> </div> <div className="w-1/2 ml-2 border border-gray-200 rounded-sm"> <div className="px-4 py-5 sm:p-6"> <h3 className="text-lg leading-6 font-medium text-gray-900"> {t("something_doesnt_look_right")} </h3> <div className="mt-2 max-w-xl text-sm text-gray-500"> <p>{t("troubleshoot_availability")}</p> </div> <div className="mt-5"> <Link href="/availability/troubleshoot"> <a className="btn btn-white">{t("launch_troubleshooter")}</a> </Link> </div> </div> </div> </div> <Dialog open={formModal.isOn} onOpenChange={(isOpen) => { router.push(isOpen ? formModal.hrefOn : formModal.hrefOff); }}> <DialogContent> <div className="sm:flex sm:items-start mb-4"> <div className="mx-auto flex-shrink-0 flex items-center justify-center h-12 w-12 rounded-full bg-neutral-100 sm:mx-0 sm:h-10 sm:w-10"> <ClockIcon className="h-6 w-6 text-neutral-600" /> </div> <div className="mt-3 text-center sm:mt-0 sm:ml-4 sm:text-left"> <h3 className="text-lg leading-6 font-medium text-gray-900" id="modal-title"> {t("change_your_available_times")} </h3> <div> <p className="text-sm text-gray-500">{t("change_start_end_buffer")}</p> </div> </div> </div> <form onSubmit={formMethods.handleSubmit(async (values) => { const startMins = parseInt(values.startHours) * 60 + parseInt(values.startMins); const endMins = parseInt(values.endHours) * 60 + parseInt(values.endMins); const bufferMins = parseInt(values.bufferHours) * 60 + parseInt(values.bufferMins); // TODO: Add validation // eslint-disable-next-line @typescript-eslint/no-unused-vars const response = await fetch("/api/availability/day", { method: "PATCH", body: JSON.stringify({ start: startMins, end: endMins, buffer: bufferMins }), headers: { "Content-Type": "application/json", }, }); if (!response.ok) { showToast(t("something_went_wrong"), "error"); return; } await queryMe.refetch(); router.push(formModal.hrefOff); showToast(t("start_end_changed_successfully"), "success"); })}> <div className="flex mb-4"> <label className="w-1/4 pt-2 block text-sm font-medium text-gray-700"> {t("start_time")} </label> <div> <label htmlFor="startHours" className="sr-only"> {t("hours")} </label> <input {...formMethods.register("startHours")} id="startHours" type="number" className="shadow-sm focus:ring-neutral-500 focus:border-neutral-500 block w-full sm:text-sm border-gray-300 rounded-sm" placeholder="9" defaultValue={convertMinsToHrsMins(user.startTime).split(":")[0]} /> </div> <span className="mx-2 pt-1">:</span> <div> <label htmlFor="startMins" className="sr-only"> {t("minutes")} </label> <input {...formMethods.register("startMins")} id="startMins" type="number" className="shadow-sm focus:ring-neutral-500 focus:border-neutral-500 block w-full sm:text-sm border-gray-300 rounded-sm" placeholder="30" /> </div> </div> <div className="flex mb-4"> <label className="w-1/4 pt-2 block text-sm font-medium text-gray-700">{t("end_time")}</label> <div> <label htmlFor="endHours" className="sr-only"> {t("hours")} </label> <input {...formMethods.register("endHours")} type="number" id="endHours" className="shadow-sm focus:ring-neutral-500 focus:border-neutral-500 block w-full sm:text-sm border-gray-300 rounded-sm" placeholder="17" /> </div> <span className="mx-2 pt-1">:</span> <div> <label htmlFor="endMins" className="sr-only"> {t("minutes")} </label> <input {...formMethods.register("endMins")} type="number" id="endMins" className="shadow-sm focus:ring-neutral-500 focus:border-neutral-500 block w-full sm:text-sm border-gray-300 rounded-sm" placeholder="30" /> </div> </div> <div className="flex mb-4"> <label className="w-1/4 pt-2 block text-sm font-medium text-gray-700">{t("buffer")}</label> <div> <label htmlFor="bufferHours" className="sr-only"> {t("hours")} </label> <input {...formMethods.register("bufferHours")} type="number" id="bufferHours" className="shadow-sm focus:ring-neutral-500 focus:border-neutral-500 block w-full sm:text-sm border-gray-300 rounded-sm" placeholder="0" /> </div> <span className="mx-2 pt-1">:</span> <div> <label htmlFor="bufferMins" className="sr-only"> {t("minutes")} </label> <input {...formMethods.register("bufferMins")} type="number" id="bufferMins" className="shadow-sm focus:ring-neutral-500 focus:border-neutral-500 block w-full sm:text-sm border-gray-300 rounded-sm" placeholder="10" /> </div> </div> <div className="mt-5 sm:mt-4 sm:flex space-x-2"> <Button href={formModal.hrefOff} color="secondary" tabIndex={-1}> {t("cancel")} </Button> <Button type="submit" loading={formMethods.formState.isSubmitting}> {t("update")} </Button> </div> </form> </DialogContent> </Dialog> </Shell> </div> ); }
convertMinsToHrsMins
identifiers.ts
/* * Copyright (c) 2021 Ville de Montreal. All rights reserved. * Licensed under the MIT license. * See LICENSE file in the project root for full license information. */ export const SERVICE_IDENTIFIER = { /** * You can bind your own logger. */ logger: Symbol('logger'), /** * You can bind your own strategy by implmenting IFailureStrategy interface. */ failure_strategy: Symbol('failureStrategy'), /** * You can bind your own strategy by implementing ISuccessStrategy interface. */ success_strategy: Symbol('successStrategy'), /** * You can bind your own process handler by implementing IProcessHandler interface. * Default is provided. */ process_handler: Symbol('process_handler'), /** * Config to pass to the process handler */ process_handler_config: Symbol('process_handler_config'), /** * You can get a new worker by using IoC * e.g IoC.get<Worker>(SERVICE_IDENTIFIER.worker); */ worker: Symbol('Worker'), /** * Pass your custom tracer or [NodeTracer](https://github.com/open-telemetry/opentelemetry-js/blob/master/packages/opentelemetry-node/src/NodeTracer.ts#L25) in order to trace operations in your worker. * `NodeTracer` must be bound in the IoC defaul is a [NoopTracer](https://github.com/open-telemetry/opentelemetry-js/blob/master/packages/opentelemetry-api/src/trace/NoopTracer.ts#L25) */ tracer: Symbol('tracer'), /** * Pass your custom propagator in order to get traceId from Camunda platform. * "TracerPropagator" must be bound in the IoC defaul is a NoopTracerPropagator
};
*/ tracer_propagator: Symbol('tracer_propagator'),
sequence_label_field.py
from typing import Dict, List, Union, Set import logging from overrides import overrides import torch from torch.autograd import Variable from allennlp.common.checks import ConfigurationError from allennlp.common.util import pad_sequence_to_length from allennlp.data.fields.field import Field from allennlp.data.fields.sequence_field import SequenceField from allennlp.data.vocabulary import Vocabulary logger = logging.getLogger(__name__) # pylint: disable=invalid-name class SequenceLabelField(Field[torch.Tensor]): """ A ``SequenceLabelField`` assigns a categorical label to each element in a :class:`~allennlp.data.fields.sequence_field.SequenceField`. Because it's a labeling of some other field, we take that field as input here, and we use it to determine our padding and other things. This field will get converted into a list of integer class ids, representing the correct class for each element in the sequence. Parameters ---------- labels : ``Union[List[str], List[int]]`` A sequence of categorical labels, encoded as strings or integers. These could be POS tags like [NN, JJ, ...], BIO tags like [B-PERS, I-PERS, O, O, ...], or any other categorical tag sequence. If the labels are encoded as integers, they will not be indexed using a vocab. sequence_field : ``SequenceField`` A field containing the sequence that this ``SequenceLabelField`` is labeling. Most often, this is a ``TextField``, for tagging individual tokens in a sentence. label_namespace : ``str``, optional (default='labels') The namespace to use for converting tag strings into integers. We convert tag strings to integers for you, and this parameter tells the ``Vocabulary`` object which mapping from strings to integers to use (so that "O" as a tag doesn't get the same id as "O" as a word). """ # It is possible that users want to use this field with a namespace which uses OOV/PAD tokens. # This warning will be repeated for every instantiation of this class (i.e for every data # instance), spewing a lot of warnings so this class variable is used to only log a single # warning per namespace. _already_warned_namespaces: Set[str] = set() def __init__(self, labels: Union[List[str], List[int]], sequence_field: SequenceField, label_namespace: str = 'labels') -> None: self.labels = labels self.sequence_field = sequence_field self._label_namespace = label_namespace self._indexed_labels = None self._maybe_warn_for_namespace(label_namespace) if len(labels) != sequence_field.sequence_length(): raise ConfigurationError("Label length and sequence length " "don't match: %d and %d" % (len(labels), sequence_field.sequence_length())) if all([isinstance(x, int) for x in labels]): self._indexed_labels = labels elif not all([isinstance(x, str) for x in labels]): raise ConfigurationError("SequenceLabelFields must be passed either all " "strings or all ints. Found labels {} with " "types: {}.".format(labels, [type(x) for x in labels])) def
(self, label_namespace: str) -> None: if not (self._label_namespace.endswith("labels") or self._label_namespace.endswith("tags")): if label_namespace not in self._already_warned_namespaces: logger.warning("Your label namespace was '%s'. We recommend you use a namespace " "ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by " "default to your vocabulary. See documentation for " "`non_padded_namespaces` parameter in Vocabulary.", self._label_namespace) self._already_warned_namespaces.add(label_namespace) @overrides def count_vocab_items(self, counter: Dict[str, Dict[str, int]]): if self._indexed_labels is None: for label in self.labels: counter[self._label_namespace][label] += 1 # type: ignore @overrides def index(self, vocab: Vocabulary): if self._indexed_labels is None: self._indexed_labels = [vocab.get_token_index(label, self._label_namespace) # type: ignore for label in self.labels] @overrides def get_padding_lengths(self) -> Dict[str, int]: return {'num_tokens': self.sequence_field.sequence_length()} @overrides def as_tensor(self, padding_lengths: Dict[str, int], cuda_device: int = -1, for_training: bool = True) -> torch.Tensor: desired_num_tokens = padding_lengths['num_tokens'] padded_tags = pad_sequence_to_length(self._indexed_labels, desired_num_tokens) tensor = Variable(torch.LongTensor(padded_tags), volatile=not for_training) return tensor if cuda_device == -1 else tensor.cuda(cuda_device) @overrides def empty_field(self): # pylint: disable=no-self-use # pylint: disable=protected-access sequence_label_field = SequenceLabelField([], self.sequence_field.empty_field()) sequence_label_field._indexed_labels = [] return sequence_label_field
_maybe_warn_for_namespace
retry_test.go
package gosip import ( "bytes" "context" "fmt" "io/ioutil" "net/http" "strings" "testing" "time" ) func
(t *testing.T) { siteURL := "http://localhost:8989" closer, err := startFakeServer(":8989", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // faking digest response if r.RequestURI == "/_api/ContextInfo" { _, _ = fmt.Fprintf(w, `{"d":{"GetContextWebInformation":{"FormDigestValue":"FAKE","FormDigestTimeoutSeconds":120,"LibraryVersion":"FAKE"}}}`) return } // retry after if r.RequestURI == "/_api/retryafter" && r.Header.Get("X-Gosip-Retry") == "1" { w.Header().Add("Retry-After", "1") w.WriteHeader(http.StatusTooManyRequests) _, _ = w.Write([]byte(`{ "error": "Body is not backed off" }`)) return } // ntlm retry if r.RequestURI == "/_api/ntlm" && r.Header.Get("X-Gosip-Retry") == "" { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte(`{ "error": "NTLM force retry" }`)) return } // context cancel if r.RequestURI == "/_api/contextcancel" && r.Header.Get("X-Gosip-Retry") == "" { w.Header().Add("Retry-After", "5") w.WriteHeader(http.StatusTooManyRequests) _, _ = w.Write([]byte(`{ "error": "context cancel" }`)) return } if r.Body != nil { defer func() { _ = r.Body.Close() }() data, _ := ioutil.ReadAll(r.Body) if r.RequestURI == "/_api/post/keepbody" && r.Header.Get("X-Gosip-Retry") == "1" { if fmt.Sprintf("%s", data) != "none-empty" { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write([]byte(`{ "error": "Body is not backed off" }`)) return } } } // backoff after 2 retries if r.Header.Get("X-Gosip-Retry") == "2" { _, _ = fmt.Fprintf(w, `{ "result": "Cool alfter some retries" }`) return } // intentional 503 w.WriteHeader(http.StatusServiceUnavailable) _, _ = w.Write([]byte(`{ "error": "503 Retry Please" }`)) })) if err != nil { t.Fatal(err) } defer func() { _ = closer.Close() }() t.Run("GetRequest", func(t *testing.T) { client := &SPClient{ AuthCnfg: &AnonymousCnfg{SiteURL: siteURL}, RetryPolicies: map[int]int{503: 3}, } req, err := http.NewRequest("GET", client.AuthCnfg.GetSiteURL()+"/_api/get", nil) if err != nil { t.Fatal(err) } resp, err := client.Execute(req) if err != nil { t.Error(err) } defer func() { _ = resp.Body.Close() }() if resp.StatusCode != 200 { t.Error("can't retry a request") } }) t.Run("PostRequest", func(t *testing.T) { client := &SPClient{ AuthCnfg: &AnonymousCnfg{SiteURL: siteURL}, RetryPolicies: map[int]int{503: 3}, } req, err := http.NewRequest("POST", client.AuthCnfg.GetSiteURL()+"/_api/post/keepbody", bytes.NewBuffer([]byte("none-empty"))) if err != nil { t.Fatal(err) } resp, err := client.Execute(req) if err != nil { t.Error(err) } defer func() { _ = resp.Body.Close() }() if resp.StatusCode != 200 { t.Error("can't retry a request") } }) t.Run("PostRequestEmptyBody", func(t *testing.T) { client := &SPClient{ AuthCnfg: &AnonymousCnfg{SiteURL: siteURL}, RetryPolicies: map[int]int{503: 3}, } req, err := http.NewRequest("POST", client.AuthCnfg.GetSiteURL()+"/_api/post", nil) if err != nil { t.Fatal(err) } resp, err := client.Execute(req) if err != nil { t.Error(err) } defer func() { _ = resp.Body.Close() }() if resp.StatusCode != 200 { t.Error("can't retry a request") } }) t.Run("PostRequestShould503", func(t *testing.T) { client := &SPClient{ AuthCnfg: &AnonymousCnfg{SiteURL: siteURL}, RetryPolicies: map[int]int{503: 1}, } req, err := http.NewRequest("POST", client.AuthCnfg.GetSiteURL()+"/_api/post", bytes.NewBuffer([]byte("none-empty"))) if err != nil { t.Fatal(err) } resp, _ := client.Execute(req) defer func() { _ = resp.Body.Close() }() if resp.StatusCode != 503 { t.Error("should receive 503") } }) t.Run("DisableRetry", func(t *testing.T) { client := &SPClient{ AuthCnfg: &AnonymousCnfg{SiteURL: siteURL}, } req, err := http.NewRequest("GET", client.AuthCnfg.GetSiteURL()+"/_api/get", nil) if err != nil { t.Fatal(err) } req.Header.Add("X-Gosip-NoRetry", "true") resp, _ := client.Execute(req) defer func() { _ = resp.Body.Close() }() if resp.StatusCode != 503 { t.Error("should receive 503") } }) t.Run("RetryAfter", func(t *testing.T) { client := &SPClient{ AuthCnfg: &AnonymousCnfg{SiteURL: siteURL}, } req, err := http.NewRequest("GET", client.AuthCnfg.GetSiteURL()+"/_api/retryafter", nil) if err != nil { t.Fatal(err) } beforeReq := time.Now() if _, err := client.Execute(req); err != nil { t.Error(err) } dur := time.Now().Sub(beforeReq) if dur < 1*time.Second { t.Error("retry after is ignored") } }) t.Run("NtlmRetry", func(t *testing.T) { client := &SPClient{ AuthCnfg: &AnonymousCnfg{ SiteURL: siteURL, Strategy: "ntlm", }, } req, err := http.NewRequest("GET", client.AuthCnfg.GetSiteURL()+"/_api/ntlm", nil) if err != nil { t.Fatal(err) } if _, err := client.Execute(req); err != nil { t.Error(err) } }) t.Run("ContextCancel", func(t *testing.T) { client := &SPClient{ AuthCnfg: &AnonymousCnfg{SiteURL: siteURL}, } req, err := http.NewRequest("GET", client.AuthCnfg.GetSiteURL()+"/_api/contextcancel", nil) if err != nil { t.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) req = req.WithContext(ctx) beforeReq := time.Now() go func() { select { case <-time.After(900 * time.Millisecond): cancel() } }() _, _ = client.Execute(req) // should be canceled with a context after 900 milliseconds dur := time.Now().Sub(beforeReq) if dur > 1*time.Second { t.Error("context canceling failed") } }) t.Run("ContextCancel", func(t *testing.T) { client := &SPClient{ AuthCnfg: &AnonymousCnfg{SiteURL: siteURL}, } req, err := http.NewRequest("GET", client.AuthCnfg.GetSiteURL()+"/_api/contextcancel_2", nil) if err != nil { t.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) req = req.WithContext(ctx) cancel() _, err = client.Execute(req) // should be prevented due to already closed context if err != nil && strings.Index(err.Error(), "context canceled") == -1 { t.Error("context canceling failed") } }) }
TestRetry
column_positions_request_builder.go
package columnpositions import (
// ColumnPositionsRequestBuilder builds and executes requests for operations under \drive\list\contentTypes\{contentType-id}\columnPositions type ColumnPositionsRequestBuilder struct { // Path parameters for the request pathParameters map[string]string; // The request adapter to use to execute the requests. requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter; // Url template to use to build the URL for the current request builder urlTemplate string; } // ColumnPositionsRequestBuilderGetOptions options for Get type ColumnPositionsRequestBuilderGetOptions struct { // Request headers H map[string]string; // Request options O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption; // Request query parameters Q *ColumnPositionsRequestBuilderGetQueryParameters; // Response handler to use in place of the default response handling provided by the core service ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler; } // ColumnPositionsRequestBuilderGetQueryParameters column order information in a content type. type ColumnPositionsRequestBuilderGetQueryParameters struct { // Include count of items Count *bool; // Expand related entities Expand []string; // Filter items by property values Filter *string; // Order items by property values Orderby []string; // Search items by search phrases Search *string; // Select properties to be returned Select []string; // Skip the first n items Skip *int32; // Show only the first n items Top *int32; } // NewColumnPositionsRequestBuilderInternal instantiates a new ColumnPositionsRequestBuilder and sets the default values. func NewColumnPositionsRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*ColumnPositionsRequestBuilder) { m := &ColumnPositionsRequestBuilder{ } m.urlTemplate = "{+baseurl}/drive/list/contentTypes/{contentType_id}/columnPositions{?top,skip,search,filter,count,orderby,select,expand}"; urlTplParams := make(map[string]string) for idx, item := range pathParameters { urlTplParams[idx] = item } m.pathParameters = pathParameters; m.requestAdapter = requestAdapter; return m } // NewColumnPositionsRequestBuilder instantiates a new ColumnPositionsRequestBuilder and sets the default values. func NewColumnPositionsRequestBuilder(rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*ColumnPositionsRequestBuilder) { urlParams := make(map[string]string) urlParams["request-raw-url"] = rawUrl return NewColumnPositionsRequestBuilderInternal(urlParams, requestAdapter) } // CreateGetRequestInformation column order information in a content type. func (m *ColumnPositionsRequestBuilder) CreateGetRequestInformation(options *ColumnPositionsRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) { requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation() requestInfo.UrlTemplate = m.urlTemplate requestInfo.PathParameters = m.pathParameters requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET if options != nil && options.Q != nil { requestInfo.AddQueryParameters(*(options.Q)) } if options != nil && options.H != nil { requestInfo.Headers = options.H } if options != nil && len(options.O) != 0 { err := requestInfo.AddRequestOptions(options.O...) if err != nil { return nil, err } } return requestInfo, nil } // Get column order information in a content type. func (m *ColumnPositionsRequestBuilder) Get(options *ColumnPositionsRequestBuilderGetOptions)(*ColumnPositionsResponse, error) { requestInfo, err := m.CreateGetRequestInformation(options); if err != nil { return nil, err } res, err := m.requestAdapter.SendAsync(*requestInfo, func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewColumnPositionsResponse() }, nil, nil) if err != nil { return nil, err } return res.(*ColumnPositionsResponse), nil } func (m *ColumnPositionsRequestBuilder) Ref()(*i01e6d69ef062545687691d54320e15b9cb6abf65bdd59aa36b65c585733ccc94.RefRequestBuilder) { return i01e6d69ef062545687691d54320e15b9cb6abf65bdd59aa36b65c585733ccc94.NewRefRequestBuilderInternal(m.pathParameters, m.requestAdapter); }
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go" i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization" i01e6d69ef062545687691d54320e15b9cb6abf65bdd59aa36b65c585733ccc94 "github.com/microsoftgraph/msgraph-sdk-go/drive/list/contenttypes/item/columnpositions/ref" )
threatactor_pattern.py
import addict from act.scio.aliasregex import normalize from act.scio.vocabulary import Vocabulary from act.scio.plugin import BasePlugin, Result from typing import Text, List import configparser import os.path def
(name: Text) -> Text: return normalize( name, capitalize=True, uppercase_abbr=["APT", "BRONZE", "IRON", "GOLD"], ) class Plugin(BasePlugin): name = "threatactor" info = "Extracting references to known threat actors from a body of text" version = "0.2" dependencies: List[Text] = [] async def analyze(self, nlpdata: addict.Dict) -> Result: ini = configparser.ConfigParser() ini.read([os.path.join(self.configdir, "threatactor_pattern.ini")]) ini['threat_actor']['alias'] = os.path.join(self.configdir, ini['threat_actor']['alias']) vocab = Vocabulary(ini['threat_actor']) res = addict.Dict() res.ThreatActors = vocab.regex_search( nlpdata.content, normalize_result=normalize_ta, debug=self.debug) return Result(name=self.name, version=self.version, result=res)
normalize_ta
noaa_logs_repository_test.go
package logs_test import ( "errors" "time" "code.cloudfoundry.org/cli/cf/configuration/coreconfig" noaaerrors "github.com/cloudfoundry/noaa/errors" "github.com/cloudfoundry/sonde-go/events" "github.com/gogo/protobuf/proto" "code.cloudfoundry.org/cli/cf/api/authentication/authenticationfakes" testapi "code.cloudfoundry.org/cli/cf/api/logs/logsfakes" testconfig "code.cloudfoundry.org/cli/util/testhelpers/configuration" "sync" "code.cloudfoundry.org/cli/cf/api/logs" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("logs with noaa repository", func() { var ( fakeNoaaConsumer *testapi.FakeNoaaConsumer config coreconfig.ReadWriter fakeTokenRefresher *authenticationfakes.FakeRepository retryTimeout time.Duration repo *logs.NoaaLogsRepository ) BeforeEach(func() { fakeNoaaConsumer = &testapi.FakeNoaaConsumer{} config = testconfig.NewRepositoryWithDefaults() config.SetLoggregatorEndpoint("loggregator.test.com") config.SetDopplerEndpoint("doppler.test.com") config.SetAccessToken("the-access-token") fakeTokenRefresher = &authenticationfakes.FakeRepository{} retryTimeout = time.Second + 500*time.Millisecond repo = logs.NewNoaaLogsRepository(config, fakeNoaaConsumer, fakeTokenRefresher, retryTimeout) }) Describe("Authentication Token Refresh", func() { It("sets the noaa token refresher", func() { Expect(fakeNoaaConsumer.RefreshTokenFromCallCount()).To(Equal(1)) Expect(fakeNoaaConsumer.RefreshTokenFromArgsForCall(0)).To(Equal(fakeTokenRefresher)) }) }) Describe("RecentLogsFor", func() { Context("when an error does not occur", func() { var msg1, msg2, msg3 *events.LogMessage BeforeEach(func() { msg1 = makeNoaaLogMessage("message 1", 1000) msg2 = makeNoaaLogMessage("message 2", 2000) msg3 = makeNoaaLogMessage("message 3", 3000) fakeNoaaConsumer.RecentLogsReturns([]*events.LogMessage{ msg3, msg2, msg1, }, nil) }) It("gets the logs for the requested app", func() { repo.RecentLogsFor("app-guid-1") arg, _ := fakeNoaaConsumer.RecentLogsArgsForCall(0) Expect(arg).To(Equal("app-guid-1")) }) It("returns the sorted log messages", func() { messages, err := repo.RecentLogsFor("app-guid") Expect(err).NotTo(HaveOccurred()) Expect(messages).To(Equal([]logs.Loggable{ logs.NewNoaaLogMessage(msg1), logs.NewNoaaLogMessage(msg2), logs.NewNoaaLogMessage(msg3), })) }) }) }) Describe("TailLogsFor", func() { var errChan chan error var logChan chan logs.Loggable AfterEach(func() { Eventually(errChan).Should(BeClosed()) Eventually(logChan).Should(BeClosed()) }) Context("when an error occurs", func() { var ( e chan error c chan *events.LogMessage closeWg *sync.WaitGroup ) BeforeEach(func() { closeWg = new(sync.WaitGroup) errChan = make(chan error) logChan = make(chan logs.Loggable) e = make(chan error, 1) c = make(chan *events.LogMessage) closeWg.Add(1) fakeNoaaConsumer.CloseStub = func() error { defer closeWg.Done() close(e) close(c) return nil } }) AfterEach(func() { closeWg.Wait() }) It("returns an error when it occurs", func(done Done) { defer repo.Close() err := errors.New("oops") fakeNoaaConsumer.TailingLogsStub = func(appGuid string, authToken string) (<-chan *events.LogMessage, <-chan error) { e <- err return c, e } var wg sync.WaitGroup wg.Add(1) defer wg.Wait() go func() { defer wg.Done() repo.TailLogsFor("app-guid", func() {}, logChan, errChan) }() Eventually(errChan).Should(Receive(&err)) close(done) }) It("does not return a RetryError before RetryTimeout", func(done Done) { defer repo.Close() err := noaaerrors.NewRetryError(errors.New("oops")) fakeNoaaConsumer.TailingLogsStub = func(appGuid string, authToken string) (<-chan *events.LogMessage, <-chan error) { e <- err return c, e } var wg sync.WaitGroup wg.Add(1) defer wg.Wait() go func() { defer wg.Done() repo.TailLogsFor("app-guid", func() {}, logChan, errChan) }() Consistently(errChan).ShouldNot(Receive()) close(done) }) It("returns a RetryError if no data is received before RetryTimeout", func() { defer repo.Close() err := noaaerrors.NewRetryError(errors.New("oops")) fakeNoaaConsumer.TailingLogsStub = func(appGuid string, authToken string) (<-chan *events.LogMessage, <-chan error) { e <- err return c, e } var wg sync.WaitGroup wg.Add(1) defer wg.Wait() go func() { defer wg.Done() repo.TailLogsFor("app-guid", func() {}, logChan, errChan) }() Consistently(errChan, time.Second).ShouldNot(Receive()) expectedErr := errors.New("Timed out waiting for connection to Loggregator (doppler.test.com).") Eventually(errChan, time.Second).Should(Receive(Equal(expectedErr))) }) It("Resets the retry timeout after a successful reconnection", func() { defer repo.Close() err := noaaerrors.NewRetryError(errors.New("oops")) fakeNoaaConsumer.TailingLogsStub = func(appGuid string, authToken string) (<-chan *events.LogMessage, <-chan error) { e <- err return c, e } var wg sync.WaitGroup wg.Add(1) defer wg.Wait() go func() { defer wg.Done() repo.TailLogsFor("app-guid", func() {}, logChan, errChan) }() Consistently(errChan, time.Second).ShouldNot(Receive()) fakeNoaaConsumer.SetOnConnectCallbackArgsForCall(0)() c <- makeNoaaLogMessage("foo", 100) Eventually(logChan).Should(Receive()) Consistently(errChan, time.Second).ShouldNot(Receive()) e <- err expectedErr := errors.New("Timed out waiting for connection to Loggregator (doppler.test.com).") Eventually(errChan, 2*time.Second).Should(Receive(Equal(expectedErr))) }) }) Context("when no error occurs", func() { var e chan error var c chan *events.LogMessage BeforeEach(func() { errChan = make(chan error) logChan = make(chan logs.Loggable) e = make(chan error) c = make(chan *events.LogMessage) fakeNoaaConsumer.CloseStub = func() error { close(e) close(c) return nil } }) It("asks for the logs for the given app", func(done Done) { defer repo.Close() fakeNoaaConsumer.TailingLogsReturns(c, e) repo.TailLogsFor("app-guid", func() {}, logChan, errChan) Eventually(fakeNoaaConsumer.TailingLogsCallCount).Should(Equal(1)) appGuid, token := fakeNoaaConsumer.TailingLogsArgsForCall(0) Expect(appGuid).To(Equal("app-guid")) Expect(token).To(Equal("the-access-token")) close(done) }, 2) It("sets the on connect callback", func() { defer repo.Close() fakeNoaaConsumer.TailingLogsReturns(c, e) callbackCalled := make(chan struct{}) var cb = func() { close(callbackCalled) return } repo.TailLogsFor("app-guid", cb, logChan, errChan) Expect(fakeNoaaConsumer.SetOnConnectCallbackCallCount()).To(Equal(1)) arg := fakeNoaaConsumer.SetOnConnectCallbackArgsForCall(0) arg() Expect(callbackCalled).To(BeClosed()) }) }) Context("and the buffer time is sufficient for sorting", func() { var msg1, msg2, msg3 *events.LogMessage var ec chan error var lc chan *events.LogMessage var syncMu sync.Mutex BeforeEach(func() { msg1 = makeNoaaLogMessage("hello1", 100) msg2 = makeNoaaLogMessage("hello2", 200) msg3 = makeNoaaLogMessage("hello3", 300) errChan = make(chan error) logChan = make(chan logs.Loggable) ec = make(chan error) syncMu.Lock() lc = make(chan *events.LogMessage) syncMu.Unlock() fakeNoaaConsumer.TailingLogsStub = func(string, string) (<-chan *events.LogMessage, <-chan error) { go func() { syncMu.Lock() lc <- msg3 lc <- msg2 lc <- msg1 syncMu.Unlock() }() return lc, ec } }) JustBeforeEach(func() { repo = logs.NewNoaaLogsRepository(config, fakeNoaaConsumer, fakeTokenRefresher, retryTimeout) fakeNoaaConsumer.CloseStub = func() error { syncMu.Lock() close(lc) syncMu.Unlock() close(ec) return nil } }) Context("when the channels are closed before reading", func() { It("sorts the messages before yielding them", func(done Done) { receivedMessages := []logs.Loggable{} repo.TailLogsFor("app-guid", func() {}, logChan, errChan) Consistently(errChan).ShouldNot(Receive()) m := <-logChan receivedMessages = append(receivedMessages, m) m = <-logChan receivedMessages = append(receivedMessages, m) m = <-logChan receivedMessages = append(receivedMessages, m) repo.Close() Expect(receivedMessages).To(Equal([]logs.Loggable{ logs.NewNoaaLogMessage(msg1), logs.NewNoaaLogMessage(msg2), logs.NewNoaaLogMessage(msg3), })) close(done) }) }) Context("when the channels are read while being written to", func() { It("sorts the messages before yielding them", func(done Done) { receivedMessages := []logs.Loggable{} repo.TailLogsFor("app-guid", func() {}, logChan, errChan) Consistently(errChan).ShouldNot(Receive()) m := <-logChan receivedMessages = append(receivedMessages, m) m = <-logChan receivedMessages = append(receivedMessages, m) m = <-logChan receivedMessages = append(receivedMessages, m) repo.Close() Expect(receivedMessages).To(Equal([]logs.Loggable{ logs.NewNoaaLogMessage(msg1), logs.NewNoaaLogMessage(msg2), logs.NewNoaaLogMessage(msg3), })) close(done) }) It("flushes remaining log messages when Close is called", func() { repo.BufferTime = 10 * time.Second repo.TailLogsFor("app-guid", func() {}, logChan, errChan) Consistently(errChan).ShouldNot(Receive()) Consistently(logChan).ShouldNot(Receive()) repo.Close() Eventually(logChan).Should(Receive(Equal(logs.NewNoaaLogMessage(msg1)))) Eventually(logChan).Should(Receive(Equal(logs.NewNoaaLogMessage(msg2)))) Eventually(logChan).Should(Receive(Equal(logs.NewNoaaLogMessage(msg3)))) }) }) }) }) }) func makeNoaaLogMessage(message string, timestamp int64) *events.LogMessage
{ messageType := events.LogMessage_OUT sourceName := "DEA" return &events.LogMessage{ Message: []byte(message), AppId: proto.String("app-guid"), MessageType: &messageType, SourceType: &sourceName, Timestamp: proto.Int64(timestamp), } }
day05.rs
type TParsed = Vec<Seat>; pub fn day(input: String) -> (usize, usize) { let parsed_input = parse(&input); (part_1(&parsed_input), part_2(&parsed_input)) } fn part_1(input: &TParsed) -> usize { input .iter() .max_by(|x, y| x.id().cmp(&y.id())) .unwrap() .id() } fn part_2(input: &TParsed) -> usize { let mut seated = vec![vec![false; 8]; 128]; for seat in input { assert!(!seated[seat.r][seat.c]); seated[seat.r][seat.c] = true; } let seated = seated; // we start 'counting' empty seats once // we've encoutered occupied ones let mut can_count = false; let mut result: Option<Seat> = None; 'outer: for (i, row) in seated.iter().enumerate() { for (j, col) in row.iter().enumerate() { if *col { can_count = true; } else if can_count { result = Some(Seat { r: i, c: j }); break 'outer; } } } result.expect("Unable to find part 2 seat").id() } #[derive(Debug)] struct Seat { r: usize, c: usize, } impl Seat { fn id(&self) -> usize { self.r * 8 + self.c } fn find_position(input: &str) -> (usize, usize) { let mut range_row = (0, 127); let mut range_col = (0, 7); for c in input.chars() { let rr = range_row.1 - range_row.0 + 1; let rc = range_col.1 - range_col.0 + 1; match c { 'F' => range_row.1 -= rr / 2, 'B' => range_row.0 += rr / 2, 'L' => range_col.1 -= rc / 2, 'R' => range_col.0 += rc / 2, _ => panic!("Uncovered input {}", c), } } assert_eq!(range_row.0, range_row.1); assert_eq!(range_col.0, range_col.1); (range_row.0, range_col.0) } } impl From<&str> for Seat { fn from(val: &str) -> Self { let t = Seat::find_position(val); Seat { r: t.0, c: t.1 } } } fn parse(input: &str) -> TParsed { input.lines().map(|line| Seat::from(line)).collect() }
#[test] fn test_example_1() { // No example to validate whole part assert_eq!(Seat::from(test::EXAMPLE_INPUT_1).id(), 357); assert_eq!(Seat::from(test::EXAMPLE_INPUT_2).id(), 567); assert_eq!(Seat::from(test::EXAMPLE_INPUT_3).id(), 119); assert_eq!(Seat::from(test::EXAMPLE_INPUT_4).id(), 820); } #[cfg(test)] mod test { pub const EXAMPLE_INPUT_1: &str = "FBFBBFFRLR"; pub const EXAMPLE_INPUT_2: &str = "BFFFBBFRRR"; pub const EXAMPLE_INPUT_3: &str = "FFFBBBFRRR"; pub const EXAMPLE_INPUT_4: &str = "BBFFBBFRLL"; }
errors.go
package playwright // Error represents a Playwright error type Error struct { Message string Stack string } func (e *Error) Error() string { return e.Message } // TimeoutError represents a Playwright TimeoutError type TimeoutError Error func (e *TimeoutError) Error() string { return e.Message } func parseError(err errorPayload) error { if err.Name == "TimeoutError" { return &TimeoutError{ Message: err.Message, Stack: err.Stack, } } return &Error{
Message: err.Message, Stack: err.Stack, } }
properties_test.py
#!/usr/bin/env vpython # Copyright 2015 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. import test_env from recipe_engine import recipe_api, config RECIPE_PROPERTY = recipe_api.BoundProperty.RECIPE_PROPERTY MODULE_PROPERTY = recipe_api.BoundProperty.MODULE_PROPERTY def make_prop(**kwargs): name = kwargs.pop('name', 'dumb_name') return recipe_api.Property(**kwargs).bind( name, RECIPE_PROPERTY, 'fake_repo::fake_recipe') class TestProperties(test_env.RecipeEngineUnitTest): def testDefault(self): """Tests the default option of properties.""" for val in (1, {}, 'test', None): prop = make_prop(default=val) self.assertEqual(val, prop.interpret(recipe_api.PROPERTY_SENTINEL, {})) def testRequired(self): """Tests that a required property errors when not provided.""" prop = make_prop() with self.assertRaises(ValueError): prop.interpret(recipe_api.PROPERTY_SENTINEL, {}) def testTypeSingle(self):
def testTypeFancy(self): """Tests a config style type property.""" prop = make_prop(kind=config.List(int)) for value in (1, 'hi', [3, 'test']): with self.assertRaises(TypeError): prop.interpret(value, {}) self.assertEqual([2, 3], prop.interpret([2, 3], {})) def testFromEnviron(self): """Tests that properties can pick up values from environment.""" prop = make_prop(default='def', from_environ='ENV_VAR') # Nothing is given => falls back to hardcoded default. self.assertEqual('def', prop.interpret(recipe_api.PROPERTY_SENTINEL, {})) # Only env var is given => uses it. self.assertEqual( 'var', prop.interpret(recipe_api.PROPERTY_SENTINEL, {'ENV_VAR': 'var'})) # Explicit values override the environment. self.assertEqual('value', prop.interpret('value', {'ENV_VAR': 'var'})) def testValidTypes(self): check = recipe_api.BoundProperty.legal_name for test, result, is_param_name in ( ('', False, False), ('.', False, False), ('foo', True, False), ('weird.param.name', True, False), ('weird.param.name', False, True), ('rietveld_url', True, False),): self.assertEqual( check(test, is_param_name=is_param_name), result, "name {} should be {}. is_param_name={}".format( test, result, is_param_name)) def testParamName(self): """ Tests setting a param name correctly carries through to a bound property. """ prop = recipe_api.Property(param_name='b') bound = prop.bind('a', RECIPE_PROPERTY, 'fake_repo::fake_recipe') self.assertEqual('b', bound.param_name) def testParamNameDotted(self): """ Tests setting a param name correctly carries through to a bound property. """ prop = recipe_api.Property(param_name='good_name') bound = prop.bind('bad.name-time', RECIPE_PROPERTY, 'fake_repo::fake_recipe') self.assertEqual('good_name', bound.param_name) def testModuleName(self): """ Tests declaring $repo_name/module properties. """ prop = recipe_api.Property(param_name='foo') prop.bind('$fake_repo/fake_module', MODULE_PROPERTY, 'fake_repo::fake_module') with self.assertRaises(ValueError): prop.bind('$fake_repo/wrong_module', MODULE_PROPERTY, 'fake_repo::fake_module') with self.assertRaises(ValueError): prop.bind('$fake_repo/fake_module', RECIPE_PROPERTY, 'fake_repo::fake_module:example') class TestProtoProperties(test_env.RecipeEngineUnitTest): def setUp(self): super(TestProtoProperties, self).setUp() self.deps = self.FakeRecipeDeps() main = self.deps.main_repo with main.write_file('recipe_proto/proto_props/props.proto') as proto: proto.write(''' syntax = "proto3"; message Props { string best_prop = 1; int32 worst_prop = 2; } message ModProps { string mod_prop = 1; } message EnvProps { string STR_ENVVAR = 1; int32 NUM_ENVVAR = 2; } ''') def testRecipeProperties(self): main = self.deps.main_repo with main.write_recipe('recipe') as recipe: recipe.imports = ['from PB.proto_props import props'] recipe.DEPS += ['recipe_engine/properties'] recipe.PROPERTIES = 'props.Props' recipe.ENV_PROPERTIES = 'props.EnvProps' recipe.RunSteps_args += ['props', 'env_props'] recipe.RunSteps.write(''' api.step('dump', ['echo', '[ normal prop:', props.best_prop, ']']) api.step('dump', ['echo', '[ env prop:', env_props.STR_ENVVAR, ']']) ''') output, retcode = main.recipes_py( 'run', 'recipe', 'best_prop="best property"', env={ 'STR_ENVVAR': 'coolio', }) self.assertEqual(retcode, 0, output) self.assertIn('[ normal prop: best property ]', output) self.assertIn('[ env prop: coolio ]', output) def testModuleProperties(self): main = self.deps.main_repo with main.write_module('modname') as mod: mod.imports = ['from PB.proto_props import props'] mod.PROPERTIES = 'props.ModProps' mod.GLOBAL_PROPERTIES = 'props.Props' mod.ENV_PROPERTIES = 'props.EnvProps' mod.api.write(''' def __init__(self, props, global_props, env_props, **kwargs): super(ModnameApi, self).__init__(**kwargs) self.value = global_props.best_prop self.mod_value = props.mod_prop self.env_value_str = env_props.STR_ENVVAR self.env_value_num = env_props.NUM_ENVVAR ''') with main.write_recipe('recipe') as recipe: recipe.DEPS += ['modname'] recipe.RunSteps.write(''' api.step('dump global', ['echo', '[ global:', api.modname.value, ']']) api.step('dump mod', ['echo', '[ mod:', api.modname.mod_value, ']']) api.step('dump env str', ['echo', '[ env str:', api.modname.env_value_str, ']']) api.step('dump env num', ['echo', '[ env num:', api.modname.env_value_num, ']']) ''') output, retcode = main.recipes_py( 'run', 'recipe', 'best_prop="best property"', '$main/modname={"mod_prop": "mod property"}', env={ 'STR_ENVVAR': 'env property', 'NUM_ENVVAR': '9000', }) self.assertEqual(retcode, 0, output) self.assertIn('[ global: best property ]', output) self.assertIn('[ mod: mod property ]', output) self.assertIn('[ env str: env property ]', output) self.assertIn('[ env num: 9000 ]', output) def testBadPropertyType(self): main = self.deps.main_repo with main.write_recipe('recipe') as recipe: recipe.imports = ['from PB.proto_props import props'] recipe.PROPERTIES = 'props.Props' recipe.RunSteps_args += ['properties'] output, retcode = main.recipes_py('run', 'recipe', 'worst_prop="invalid value"') self.assertNotEqual(retcode, 0, output) self.assertRegexpMatches(output, r'ParseError.*worst_prop.*invalid value') if __name__ == '__main__': test_env.main()
"""Tests a simple typed property.""" prop = make_prop(kind=bool) with self.assertRaises(TypeError): prop.interpret(1, {}) self.assertEqual(True, prop.interpret(True, {}))
cytoscape.go
// Package cytoscape provides conversion from our graph to the CystoscapeJS // configuration json model. // // The following links are useful for understanding CytoscapeJS and it's configuration: // // Main page: http://js.cytoscape.org/ // JSON config: http://js.cytoscape.org/#notation/elements-json // Demos: http://js.cytoscape.org/#demos // // Algorithm: Process the graph structure adding nodes and edges, decorating each // with information provided. An optional second pass generates compound // nodes for requested boxing. // // The package provides the Cytoscape implementation of graph/ConfigVendor. package cytoscape import ( "crypto/md5" "fmt" "sort" "strings" "github.com/kiali/kiali/graph" ) // ResponseFlags is a map of maps. Each response code is broken down by responseFlags:percentageOfTraffic, e.g.: // "200" : { // "-" : "80.0", // "DC" : "10.0", // "FI,FD" : "10.0" // }, ... type ResponseFlags map[string]string // ResponseHosts is a map of maps. Each response host is broken down by responseFlags:percentageOfTraffic, e.g.: // "200" : { // "www.google.com" : "80.0", // "www.yahoo.com" : "20.0" // }, ... type ResponseHosts map[string]string // ResponseDetail holds information broken down by response code. type ResponseDetail struct { Flags ResponseFlags `json:"flags,omitempty"` Hosts ResponseHosts `json:"hosts,omitempty"` } // Responses maps responseCodes to detailed information for that code type Responses map[string]*ResponseDetail // ProtocolTraffic supplies all of the traffic information for a single protocol type ProtocolTraffic struct { Protocol string `json:"protocol,omitempty"` // protocol Rates map[string]string `json:"rates,omitempty"` // map[rate]value Responses Responses `json:"responses,omitempty"` // see comment above } // GWInfo contains the resolved gateway configuration if the node represents an Istio gateway type GWInfo struct { // IngressInfo contains the resolved gateway configuration if the node represents an Istio ingress gateway IngressInfo GWInfoIngress `json:"ingressInfo,omitempty"` } // GWInfoIngress contains the resolved gateway configuration if the node represents an Istio ingress gateway type GWInfoIngress struct { // Hostnames is the list of hosts being served by the associated Istio gateways. Hostnames []string `json:"hostnames,omitempty"` } // VSInfo contains the resolved VS configuration if the node has a VS attached. type VSInfo struct { // Hostnames is the list of hostnames configured in the associated VSs Hostnames []string `json:"hostnames,omitempty"` } // HealthConfig maps annotations information for health type HealthConfig map[string]string type NodeData struct { // Cytoscape Fields ID string `json:"id"` // unique internal node ID (n0, n1...) Parent string `json:"parent,omitempty"` // Compound Node parent ID // App Fields (not required by Cytoscape) NodeType string `json:"nodeType"` Cluster string `json:"cluster"` Namespace string `json:"namespace"` Workload string `json:"workload,omitempty"` App string `json:"app,omitempty"` Version string `json:"version,omitempty"` Service string `json:"service,omitempty"` // requested service for NodeTypeService Aggregate string `json:"aggregate,omitempty"` // set like "<aggregate>=<aggregateVal>" DestServices []graph.ServiceName `json:"destServices,omitempty"` // requested services for [dest] node Traffic []ProtocolTraffic `json:"traffic,omitempty"` // traffic rates for all detected protocols HasCB bool `json:"hasCB,omitempty"` // true (has circuit breaker) | false HasFaultInjection bool `json:"hasFaultInjection,omitempty"` // true (vs has fault injection) | false HasHealthConfig HealthConfig `json:"hasHealthConfig,omitempty"` // set to the health config override HasMissingSC bool `json:"hasMissingSC,omitempty"` // true (has missing sidecar) | false HasRequestRouting bool `json:"hasRequestRouting,omitempty"` // true (vs has request routing) | false HasRequestTimeout bool `json:"hasRequestTimeout,omitempty"` // true (vs has request timeout) | false HasTCPTrafficShifting bool `json:"hasTCPTrafficShifting,omitempty"` // true (vs has tcp traffic shifting) | false HasTrafficShifting bool `json:"hasTrafficShifting,omitempty"` // true (vs has traffic shifting) | false HasVS *VSInfo `json:"hasVS,omitempty"` // it can be empty if there is a VS without hostnames IsBox string `json:"isBox,omitempty"` // set for NodeTypeBox, current values: [ 'app', 'cluster', 'namespace' ] IsDead bool `json:"isDead,omitempty"` // true (has no pods) | false IsGateway *GWInfo `json:"isGateway,omitempty"` // Istio ingress/egress gateway information IsIdle bool `json:"isIdle,omitempty"` // true | false IsInaccessible bool `json:"isInaccessible,omitempty"` // true if the node exists in an inaccessible namespace IsOutside bool `json:"isOutside,omitempty"` // true | false IsRoot bool `json:"isRoot,omitempty"` // true | false IsServiceEntry *graph.SEInfo `json:"isServiceEntry,omitempty"` // set static service entry information } type EdgeData struct { // Cytoscape Fields ID string `json:"id"` // unique internal edge ID (e0, e1...) Source string `json:"source"` // parent node ID Target string `json:"target"` // child node ID // App Fields (not required by Cytoscape) DestPrincipal string `json:"destPrincipal,omitempty"` // principal used for the edge destination IsMTLS string `json:"isMTLS,omitempty"` // set to the percentage of traffic using a mutual TLS connection ResponseTime string `json:"responseTime,omitempty"` // in millis SourcePrincipal string `json:"sourcePrincipal,omitempty"` // principal used for the edge source Throughput string `json:"throughput,omitempty"` // in bytes/sec (request or response, depends on client request) Traffic ProtocolTraffic `json:"traffic,omitempty"` // traffic rates for the edge protocol } type NodeWrapper struct { Data *NodeData `json:"data"` } type EdgeWrapper struct { Data *EdgeData `json:"data"` } type Elements struct { Nodes []*NodeWrapper `json:"nodes"` Edges []*EdgeWrapper `json:"edges"` } type Config struct { Timestamp int64 `json:"timestamp"` Duration int64 `json:"duration"` GraphType string `json:"graphType"` Elements Elements `json:"elements"` } func nodeHash(id string) string { return fmt.Sprintf("%x", md5.Sum([]byte(id))) } func edgeHash(from, to, protocol string) string { return fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s.%s.%s", from, to, protocol)))) } // NewConfig is required by the graph/ConfigVendor interface func NewConfig(trafficMap graph.TrafficMap, o graph.ConfigOptions) (result Config) { nodes := []*NodeWrapper{} edges := []*EdgeWrapper{} buildConfig(trafficMap, &nodes, &edges, o) // Add compound nodes as needed, inner boxes first if strings.Contains(o.BoxBy, graph.BoxByApp) || o.GraphType == graph.GraphTypeApp || o.GraphType == graph.GraphTypeVersionedApp { boxByApp(&nodes) } if strings.Contains(o.BoxBy, graph.BoxByNamespace) { boxByNamespace(&nodes) } if strings.Contains(o.BoxBy, graph.BoxByCluster) { boxByCluster(&nodes) } // sort nodes and edges for better json presentation (and predictable testing) // kiali-1258 parent nodes must come before the child references sort.Slice(nodes, func(i, j int) bool { switch { case nodes[i].Data.IsBox != nodes[j].Data.IsBox: rank := func(boxBy string) int { switch boxBy { case graph.BoxByCluster: return 0 case graph.BoxByNamespace: return 1 case graph.BoxByApp: return 2 default: return 3 } } return rank(nodes[i].Data.IsBox) < rank(nodes[j].Data.IsBox) case nodes[i].Data.Cluster != nodes[j].Data.Cluster: return nodes[i].Data.Cluster < nodes[j].Data.Cluster case nodes[i].Data.Namespace != nodes[j].Data.Namespace: return nodes[i].Data.Namespace < nodes[j].Data.Namespace case nodes[i].Data.App != nodes[j].Data.App: return nodes[i].Data.App < nodes[j].Data.App case nodes[i].Data.Version != nodes[j].Data.Version: return nodes[i].Data.Version < nodes[j].Data.Version case nodes[i].Data.Service != nodes[j].Data.Service: return nodes[i].Data.Service < nodes[j].Data.Service default: return nodes[i].Data.Workload < nodes[j].Data.Workload } }) sort.Slice(edges, func(i, j int) bool { switch { case edges[i].Data.Source != edges[j].Data.Source: return edges[i].Data.Source < edges[j].Data.Source case edges[i].Data.Target != edges[j].Data.Target: return edges[i].Data.Target < edges[j].Data.Target default: // source and target are the same, it must differ on protocol return edges[i].Data.Traffic.Protocol < edges[j].Data.Traffic.Protocol } }) elements := Elements{nodes, edges} result = Config{ Duration: int64(o.Duration.Seconds()), Timestamp: o.QueryTime, GraphType: o.GraphType, Elements: elements, } return result } func buildConfig(trafficMap graph.TrafficMap, nodes *[]*NodeWrapper, edges *[]*EdgeWrapper, o graph.ConfigOptions) { for id, n := range trafficMap { nodeID := nodeHash(id) nd := &NodeData{ ID: nodeID, NodeType: n.NodeType, Cluster: n.Cluster, Namespace: n.Namespace, Workload: n.Workload, App: n.App, Version: n.Version, Service: n.Service, } addNodeTelemetry(n, nd) // set annotations, if available if val, ok := n.Metadata[graph.HasHealthConfig]; ok { nd.HasHealthConfig = val.(map[string]string) } // node may have deployment but no pods running) if val, ok := n.Metadata[graph.IsDead]; ok { nd.IsDead = val.(bool) } // node may be idle if val, ok := n.Metadata[graph.IsIdle]; ok { nd.IsIdle = val.(bool) } // node may be a root if val, ok := n.Metadata[graph.IsRoot]; ok { nd.IsRoot = val.(bool) } // node is not accessible to the current user if val, ok := n.Metadata[graph.IsInaccessible]; ok { nd.IsInaccessible = val.(bool) } // node may represent an Istio Ingress Gateway if gateways, ok := n.Metadata[graph.IsIngressGateway]; ok { var configuredHostnames []string for _, hosts := range gateways.(graph.GatewaysMetadata) { configuredHostnames = append(configuredHostnames, hosts...) } nd.IsGateway = &GWInfo{ IngressInfo: GWInfoIngress{Hostnames: configuredHostnames}, } } // node may have a circuit breaker if val, ok := n.Metadata[graph.HasCB]; ok { nd.HasCB = val.(bool) } // node may have a virtual service if virtualServices, ok := n.Metadata[graph.HasVS]; ok { var configuredHostnames []string for _, hosts := range virtualServices.(graph.VirtualServicesMetadata) { configuredHostnames = append(configuredHostnames, hosts...) } nd.HasVS = &VSInfo{Hostnames: configuredHostnames} } // set sidecars checks, if available if val, ok := n.Metadata[graph.HasMissingSC]; ok { nd.HasMissingSC = val.(bool) } // check if node is on another namespace if val, ok := n.Metadata[graph.IsOutside]; ok { nd.IsOutside = val.(bool) } if val, ok := n.Metadata[graph.HasRequestRouting]; ok { nd.HasRequestRouting = val.(bool) } if val, ok := n.Metadata[graph.HasFaultInjection]; ok { nd.HasFaultInjection = val.(bool) } if val, ok := n.Metadata[graph.HasTrafficShifting]; ok { nd.HasTrafficShifting = val.(bool) } if val, ok := n.Metadata[graph.HasTCPTrafficShifting]; ok { nd.HasTCPTrafficShifting = val.(bool) } if val, ok := n.Metadata[graph.HasRequestTimeout]; ok { nd.HasRequestTimeout = val.(bool) } // node may have destination service info if val, ok := n.Metadata[graph.DestServices]; ok { nd.DestServices = []graph.ServiceName{} for _, val := range val.(graph.DestServicesMetadata) { nd.DestServices = append(nd.DestServices, val) } } // node may have service entry static info if val, ok := n.Metadata[graph.IsServiceEntry]; ok { nd.IsServiceEntry = val.(*graph.SEInfo) } // node may be an aggregate if n.NodeType == graph.NodeTypeAggregate { nd.Aggregate = fmt.Sprintf("%s=%s", n.Metadata[graph.Aggregate].(string), n.Metadata[graph.AggregateValue].(string)) } nw := NodeWrapper{ Data: nd, } *nodes = append(*nodes, &nw) for _, e := range n.Edges { sourceIDHash := nodeHash(n.ID) destIDHash := nodeHash(e.Dest.ID) protocol := "" if e.Metadata[graph.ProtocolKey] != nil { protocol = e.Metadata[graph.ProtocolKey].(string) } edgeID := edgeHash(sourceIDHash, destIDHash, protocol) ed := EdgeData{ ID: edgeID, Source: sourceIDHash, Target: destIDHash, Traffic: ProtocolTraffic{ Protocol: protocol, }, } if e.Metadata[graph.DestPrincipal] != nil { ed.DestPrincipal = e.Metadata[graph.DestPrincipal].(string) } if e.Metadata[graph.SourcePrincipal] != nil { ed.SourcePrincipal = e.Metadata[graph.SourcePrincipal].(string) } addEdgeTelemetry(e, &ed) ew := EdgeWrapper{ Data: &ed, } *edges = append(*edges, &ew) } } } func addNodeTelemetry(n *graph.Node, nd *NodeData) { for _, p := range graph.Protocols { protocolTraffic := ProtocolTraffic{Protocol: p.Name} for _, r := range p.NodeRates { if rateVal := getRate(n.Metadata, r.Name); rateVal > 0.0 { if protocolTraffic.Rates == nil { protocolTraffic.Rates = make(map[string]string) } protocolTraffic.Rates[string(r.Name)] = rateToString(r.Precision, rateVal) } } if protocolTraffic.Rates != nil { if nd.Traffic == nil { nd.Traffic = []ProtocolTraffic{} } nd.Traffic = append(nd.Traffic, protocolTraffic) } } } func addEdgeTelemetry(e *graph.Edge, ed *EdgeData) { if val, ok := e.Metadata[graph.IsMTLS]; ok { ed.IsMTLS = fmt.Sprintf("%.0f", val.(float64)) } if val, ok := e.Metadata[graph.ResponseTime]; ok { responseTime := val.(float64) ed.ResponseTime = fmt.Sprintf("%.0f", responseTime) } if val, ok := e.Metadata[graph.Throughput]; ok { throughput := val.(float64) ed.Throughput = fmt.Sprintf("%.0f", throughput) } // an edge represents traffic for at most one protocol for _, p := range graph.Protocols { protocolTraffic := ProtocolTraffic{Protocol: p.Name} total := 0.0 err := 0.0 var percentErr, percentReq graph.Rate for _, r := range p.EdgeRates { rateVal := getRate(e.Metadata, r.Name) switch { case r.IsTotal: // there is one field holding the total traffic total = rateVal case r.IsErr: // error rates can be reported for several error status codes, so sum up all // of the error traffic to be used in the percentErr calculation below. err += rateVal case r.IsPercentErr: // hold onto the percentErr field so we know how to report it below percentErr = r case r.IsPercentReq: // hold onto the percentReq field so we know how to report it below percentReq = r } if rateVal > 0.0 { if protocolTraffic.Rates == nil { protocolTraffic.Rates = make(map[string]string) } protocolTraffic.Rates[string(r.Name)] = rateToString(r.Precision, rateVal) } } if protocolTraffic.Rates != nil { if total > 0 { if percentErr.Name != "" { rateVal := err / total * 100 if rateVal > 0.0 { protocolTraffic.Rates[string(percentErr.Name)] = fmt.Sprintf("%.*f", percentErr.Precision, rateVal) } } if percentReq.Name != "" { rateVal := 0.0 for _, r := range p.NodeRates { if !r.IsOut { continue } rateVal = total / getRate(e.Source.Metadata, r.Name) * 100.0 break } if rateVal > 0.0 { protocolTraffic.Rates[string(percentReq.Name)] = fmt.Sprintf("%.*f", percentReq.Precision, rateVal) } } mdResponses := e.Metadata[p.EdgeResponses].(graph.Responses) for code, detail := range mdResponses { responseFlags := make(ResponseFlags) responseHosts := make(ResponseHosts) for flags, value := range detail.Flags { responseFlags[flags] = fmt.Sprintf("%.*f", 1, value/total*100.0) } for host, value := range detail.Hosts { responseHosts[host] = fmt.Sprintf("%.*f", 1, value/total*100.0) } responseDetail := &ResponseDetail{Flags: responseFlags, Hosts: responseHosts} if protocolTraffic.Responses == nil
else { protocolTraffic.Responses[code] = responseDetail } } ed.Traffic = protocolTraffic } break } } } func getRate(md graph.Metadata, k graph.MetadataKey) float64 { if rate, ok := md[k]; ok { return rate.(float64) } return 0.0 } // boxByApp adds compound nodes to box nodes for the same app func boxByApp(nodes *[]*NodeWrapper) { box := make(map[string][]*NodeData) for _, nw := range *nodes { if nw.Data.App != "unknown" && nw.Data.App != "" { k := fmt.Sprintf("box_%s_%s_%s", nw.Data.Cluster, nw.Data.Namespace, nw.Data.App) box[k] = append(box[k], nw.Data) } } generateBoxCompoundNodes(box, nodes, graph.BoxByApp) } // boxByNamespace adds compound nodes to box nodes in the same namespace func boxByNamespace(nodes *[]*NodeWrapper) { box := make(map[string][]*NodeData) for _, nw := range *nodes { if nw.Data.Parent == "" { k := fmt.Sprintf("box_%s_%s", nw.Data.Cluster, nw.Data.Namespace) box[k] = append(box[k], nw.Data) } } generateBoxCompoundNodes(box, nodes, graph.BoxByNamespace) } // boxByCluster adds compound nodes to box nodes in the same cluster func boxByCluster(nodes *[]*NodeWrapper) { box := make(map[string][]*NodeData) for _, nw := range *nodes { if nw.Data.Parent == "" { k := fmt.Sprintf("box_%s", nw.Data.Cluster) box[k] = append(box[k], nw.Data) } } generateBoxCompoundNodes(box, nodes, graph.BoxByCluster) } func generateBoxCompoundNodes(box map[string][]*NodeData, nodes *[]*NodeWrapper, boxBy string) { for k, members := range box { if boxBy != graph.BoxByApp || len(members) > 1 { // create the compound (parent) node for the member nodes nodeID := nodeHash(k) namespace := "" app := "" switch boxBy { case graph.BoxByNamespace: namespace = members[0].Namespace case graph.BoxByApp: namespace = members[0].Namespace app = members[0].App } nd := NodeData{ ID: nodeID, NodeType: graph.NodeTypeBox, Cluster: members[0].Cluster, Namespace: namespace, App: app, Version: "", IsBox: boxBy, } nw := NodeWrapper{ Data: &nd, } // assign each member node to the compound parent nd.HasMissingSC = false // TODO: this is probably unecessarily noisy nd.IsInaccessible = false nd.IsOutside = false for _, n := range members { n.Parent = nodeID // For logical boxing (app), copy some member attributes to to the box node if boxBy == graph.BoxByApp { nd.HasMissingSC = nd.HasMissingSC || n.HasMissingSC nd.IsInaccessible = nd.IsInaccessible || n.IsInaccessible nd.IsOutside = nd.IsOutside || n.IsOutside } } // add the compound node to the list of nodes *nodes = append(*nodes, &nw) } } } func rateToString(minPrecision int, rateVal float64) string { precision := minPrecision if requiredPrecision := calcPrecision(rateVal, 5); requiredPrecision > minPrecision { precision = requiredPrecision } return fmt.Sprintf("%.*f", precision, rateVal) } // calcPrecision returns the precision necessary to see at least one significant digit (up to max) func calcPrecision(val float64, max int) int { if val <= 0 { return 0 } precision := 0 for precision < max { if val >= 1 { break } val *= 10 precision++ } return precision }
{ protocolTraffic.Responses = Responses{code: responseDetail} }
train_wide_resnet.py
import argparse import os import shutil import time import torch import torch.nn as nn import torch.backends.cudnn as cudnn import torch.nn.functional as F from models import L0WideResNet, TDWideResNet from dataloaders import cifar10, cifar100 from utils import save_checkpoint, AverageMeter, accuracy from torch.optim import lr_scheduler parser = argparse.ArgumentParser(description="PyTorch WideResNet Training") parser.add_argument("--epochs", default=200, type=int, help="number of total epochs to run") parser.add_argument( "--start-epoch", default=0, type=int, help="manual epoch number (useful on restarts)" ) parser.add_argument( "-b", "--batch-size", default=128, type=int, help="mini-batch size (default: 128)" ) parser.add_argument( "--lr", "--learning-rate", default=0.1, type=float, help="initial learning rate" ) parser.add_argument("--momentum", default=0.9, type=float, help="momentum") parser.add_argument( "--weight-decay", "--wd", default=0.0005, type=float, help="weight decay (default: 5e-4)" ) parser.add_argument( "--print-freq", "-p", default=100, type=int, help="print frequency (default: 100)" ) parser.add_argument( "--depth", default=28, type=int, help="total depth of the network (default: 28)" ) parser.add_argument( "--width", default=10, type=int, help="total width of the network (default: 10)" ) parser.add_argument( "--droprate_init", default=0.3, type=float, help="dropout probability (default: 0.3)" ) parser.add_argument( "--no-augment", dest="augment", action="store_false", help="whether to use standard augmentation (default: True)", ) parser.add_argument( "--no-bottleneck", dest="bottleneck", action="store_false", help="To not use bottleneck block" ) parser.add_argument( "--resume", default="", type=str, help="path to latest checkpoint (default: none)" ) parser.add_argument("--name", default="L0WideResNet", type=str, help="name of experiment") parser.add_argument("--model", default="L0WideResNet", type=str, help="name of experiment") parser.add_argument( "--no-tensorboard", dest="tensorboard", action="store_false", help="whether to use tensorboard (default: True)", ) parser.add_argument("--multi_gpu", action="store_true") parser.add_argument("--lamba", type=float, default=0.001, help="Coefficient for the L0 term.") parser.add_argument("--beta_ema", type=float, default=0.99) parser.add_argument("--lr_decay_ratio", type=float, default=0.2) parser.add_argument("--dataset", choices=["c10", "c100"], default="c10") parser.add_argument("--local_rep", action="store_true") parser.add_argument("--epoch_drop", nargs="*", type=int, default=(60, 120, 160)) parser.add_argument("--temp", type=float, default=2.0 / 3.0) parser.add_argument("--prune", type=bool, default=False) parser.add_argument("--dropout", type=float, default=0.5) parser.add_argument("--dropout_botk", type=float, default=0.5) parser.add_argument("--dropout_type", type=str, default="weight") parser.set_defaults(bottleneck=True) parser.set_defaults(augment=True) parser.set_defaults(tensorboard=True) best_prec1 = 100 writer = None time_acc = [(0, 0, 0)] total_steps = 0 exp_flops, exp_l0 = [], [] def main(): global args, best_prec1, writer, time_acc, total_steps, exp_flops, exp_l0 args = parser.parse_args() log_dir_net = args.name args.name += "_{}_{}".format(args.depth, args.width) if args.dataset == "c100": args.name += "_c100" print("model:", args.name) if args.tensorboard: # used for logging to TensorBoard from tensorboardX import SummaryWriter directory = "logs/{}/{}".format(log_dir_net, args.name) if os.path.exists(directory): shutil.rmtree(directory) os.makedirs(directory) else: os.makedirs(directory) writer = SummaryWriter(directory) # Data loading code dataload = cifar10 if args.dataset == "c10" else cifar100 train_loader, val_loader, num_classes = dataload( augment=args.augment, batch_size=args.batch_size ) # create model if args.model == "L0WideResNet": model = L0WideResNet( args.depth, num_classes, widen_factor=args.width, droprate_init=args.droprate_init, N=50000, beta_ema=args.beta_ema, weight_decay=args.weight_decay, local_rep=args.local_rep, lamba=args.lamba, temperature=args.temp, ) if args.model == "TDWideResNet": model = TDWideResNet( args.depth, num_classes, widen_factor=args.width, droprate_init=args.droprate_init, N=50000, beta_ema=args.beta_ema, weight_decay=args.weight_decay, dropout=args.dropout, dropout_botk=args.dropout_botk, dropout_type=args.dropout_type, ) print( "Number of model parameters: {}".format( sum([p.data.nelement() for p in model.parameters()]) ) ) # for training on multiple GPUs. # Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use if args.multi_gpu: model = torch.nn.DataParallel(model).cuda() else: if torch.cuda.is_available(): model = model.cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, nesterov=True) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint["epoch"] best_prec1 = checkpoint["best_prec1"] model.load_state_dict(checkpoint["state_dict"]) optimizer.load_state_dict(checkpoint["optimizer"]) total_steps = checkpoint["total_steps"] time_acc = checkpoint["time_acc"] exp_flops = checkpoint["exp_flops"] exp_l0 = checkpoint["exp_l0"] if args.model == "L0WideResNet" and checkpoint["beta_ema"] > 0: if not args.multi_gpu: model.beta_ema = checkpoint["beta_ema"] model.avg_param = checkpoint["avg_params"] model.steps_ema = checkpoint["steps_ema"] else: model.module.beta_ema = checkpoint["beta_ema"] model.module.avg_param = checkpoint["avg_params"] model.module.steps_ema = checkpoint["steps_ema"] print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint["epoch"])) else: print("=> no checkpoint found at '{}'".format(args.resume)) total_steps, exp_flops, exp_l0 = 0, [], [] cudnn.benchmark = True loglike = nn.CrossEntropyLoss() if torch.cuda.is_available(): loglike = loglike.cuda() # define loss function (criterion) and optimizer def loss_function(output, target_var, model): # print("loss:", loss)
lr_schedule = lr_scheduler.MultiStepLR( optimizer, milestones=args.epoch_drop, gamma=args.lr_decay_ratio ) if args.prune: for i in range(10): botk = i * 0.1 model.prune(botk) prec1 = validate(val_loader, model, loss_function, 1) model.load_state_dict(checkpoint["state_dict"]) print(botk, 100 - prec1) return for epoch in range(args.start_epoch, args.epochs): time_glob = time.time() # train for one epoch prec1_tr = train(train_loader, model, loss_function, optimizer, lr_schedule, epoch) # evaluate on validation set prec1 = validate(val_loader, model, loss_function, epoch) time_ep = time.time() - time_glob time_acc.append((time_ep + time_acc[-1][0], prec1_tr, prec1)) # remember best prec@1 and save checkpoint is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) state = { "epoch": epoch + 1, "state_dict": model.state_dict(), "best_prec1": best_prec1, "curr_prec1": prec1, "optimizer": optimizer.state_dict(), "total_steps": total_steps, "time_acc": time_acc, "exp_flops": exp_flops, "exp_l0": exp_l0, } if args.model == "L0WideResNet": if not args.multi_gpu: state["beta_ema"] = model.beta_ema if model.beta_ema > 0: state["avg_params"] = model.avg_param state["steps_ema"] = model.steps_ema else: state["beta_ema"] = model.module.beta_ema if model.module.beta_ema > 0: state["avg_params"] = model.module.avg_param state["steps_ema"] = model.module.steps_ema if args.model == "TDWideResNet": state["dropout"] = args.dropout state["dropout_botk"] = args.dropout_botk save_checkpoint(state, is_best, args.name) print("Best error: ", best_prec1) if args.tensorboard: writer.close() def train(train_loader, model, criterion, optimizer, lr_schedule, epoch): """Train for one epoch on the training set""" global total_steps, exp_flops, exp_l0, args, writer batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() # switch to train mode model.train() lr_schedule.step(epoch=epoch) if writer is not None: writer.add_scalar("learning_rate", optimizer.param_groups[0]["lr"], epoch) end = time.time() for i, (input_, target) in enumerate(train_loader): data_time.update(time.time() - end) total_steps += 1 if torch.cuda.is_available(): target = target.cuda(async=True) input_ = input_.cuda() input_var = torch.autograd.Variable(input_) target_var = torch.autograd.Variable(target) # compute output output = model(input_var) loss = criterion(output, target_var, model) # measure accuracy and record loss prec1 = accuracy(output.data, target, topk=(1,))[0] losses.update(loss.item(), input_.size(0)) top1.update(100 - prec1.item(), input_.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() if args.model == "L0WideResNet": # clamp the parameters layers = model.layers if not args.multi_gpu else model.module.layers for k, layer in enumerate(layers): layer.constrain_parameters() e_fl, e_l0 = ( model.get_exp_flops_l0() if not args.multi_gpu else model.module.get_exp_flops_l0() ) exp_flops.append(e_fl) exp_l0.append(e_l0) if writer is not None: writer.add_scalar("stats_comp/exp_flops", e_fl, total_steps) writer.add_scalar("stats_comp/exp_l0", e_l0, total_steps) if not args.multi_gpu: if model.beta_ema > 0.0: model.update_ema() else: if model.module.beta_ema > 0.0: model.module.update_ema() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # input() if i % args.print_freq == 0: print( " Epoch: [{0}][{1}/{2}]\t" "Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t" "Data {data_time.val:.3f} ({data_time.avg:.3f})\t" "Loss {loss.val:.4f} ({loss.avg:.4f})\t" "Err@1 {top1.val:.3f} ({top1.avg:.3f})".format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, ) ) # log to TensorBoard if writer is not None: writer.add_scalar("train/loss", losses.avg, epoch) writer.add_scalar("train/err", top1.avg, epoch) return top1.avg def validate(val_loader, model, criterion, epoch): """Perform validation on the validation set""" global args, writer batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() # switch to evaluate mode model.eval() if not args.multi_gpu: old_params = model.get_params() if model.beta_ema > 0 and args.model == "L0WideResNet": model.load_ema_params() else: old_params = model.module.get_params() if args.model == "L0WideResNet" and model.module.beta_ema > 0: model.module.load_ema_params() end = time.time() for i, (input_, target) in enumerate(val_loader): if torch.cuda.is_available(): target = target.cuda(async=True) input_ = input_.cuda() input_var = torch.autograd.Variable(input_, volatile=True) target_var = torch.autograd.Variable(target, volatile=True) # compute output output = model(input_var) loss = criterion(output, target_var, model) # measure accuracy and record loss prec1 = accuracy(output.data, target, topk=(1,))[0] losses.update(loss.data.item(), input_.size(0)) top1.update(100 - prec1.item(), input_.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # if i % args.print_freq == 0: # print( # "Test: [{0}/{1}]\t" # "Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t" # "Loss {loss.val:.4f} ({loss.avg:.4f})\t" # "Err@1 {top1.val:.3f} ({top1.avg:.3f})".format( # i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1 # ) # ) # print(" * Err@1 {top1.avg:.3f}".format(top1=top1)) if not args.multi_gpu: if model.beta_ema > 0: model.load_params(old_params) else: if model.module.beta_ema > 0: model.module.load_params(old_params) # log to TensorBoard if writer is not None: writer.add_scalar("val/loss", losses.avg, epoch) writer.add_scalar("val/err", top1.avg, epoch) layers = model.layers if not args.multi_gpu else model.module.layers for k, layer in enumerate(layers): if hasattr(layer, "qz_loga"): mode_z = layer.sample_z(1, sample=0).view(-1) writer.add_histogram("mode_z/layer{}".format(k), mode_z.cpu().data.numpy(), epoch) return top1.avg if __name__ == "__main__": main()
loss = loglike(output, target_var) # reg = model.regularization() if not args.multi_gpu else model.module.regularization() total_loss = loss if torch.cuda.is_available(): total_loss = total_loss.cuda() return total_loss
impl.go
// Copyright 2015 Prometheus Team // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package notify import ( "bytes" "crypto/sha256" "crypto/tls" "encoding/json" "errors" "fmt" "io/ioutil" "mime" "mime/multipart" "net" "net/http" "net/mail" "net/smtp" "net/textproto" "net/url" "strings" "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" commoncfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" "github.com/prometheus/alertmanager/config" "github.com/prometheus/alertmanager/template" "github.com/prometheus/alertmanager/types" ) // A Notifier notifies about alerts under constraints of the given context. // It returns an error if unsuccessful and a flag whether the error is // recoverable. This information is useful for a retry logic. type Notifier interface { Notify(context.Context, ...*types.Alert) (bool, error) } // An Integration wraps a notifier and its config to be uniquely identified by // name and index from its origin in the configuration. type Integration struct { notifier Notifier conf notifierConfig name string idx int } // Notify implements the Notifier interface. func (i *Integration) Notify(ctx context.Context, alerts ...*types.Alert) (bool, error) { return i.notifier.Notify(ctx, alerts...) } // BuildReceiverIntegrations builds a list of integration notifiers off of a // receivers config. func BuildReceiverIntegrations(nc *config.Receiver, tmpl *template.Template, logger log.Logger) []Integration { var ( integrations []Integration add = func(name string, i int, n Notifier, nc notifierConfig) { integrations = append(integrations, Integration{ notifier: n, conf: nc, name: name, idx: i, }) } ) for i, c := range nc.WebhookConfigs { n := NewWebhook(c, tmpl, logger) add("webhook", i, n, c) } for i, c := range nc.EmailConfigs { n := NewEmail(c, tmpl, logger) add("email", i, n, c) } for i, c := range nc.PagerdutyConfigs { n := NewPagerDuty(c, tmpl, logger) add("pagerduty", i, n, c) } for i, c := range nc.OpsGenieConfigs { n := NewOpsGenie(c, tmpl, logger) add("opsgenie", i, n, c) } for i, c := range nc.WechatConfigs { n := NewWechat(c, tmpl, logger) add("wechat", i, n, c) } for i, c := range nc.SlackConfigs { n := NewSlack(c, tmpl, logger) add("slack", i, n, c) } for i, c := range nc.HipchatConfigs { n := NewHipchat(c, tmpl, logger) add("hipchat", i, n, c) } for i, c := range nc.VictorOpsConfigs { n := NewVictorOps(c, tmpl, logger) add("victorops", i, n, c) } for i, c := range nc.PushoverConfigs { n := NewPushover(c, tmpl, logger) add("pushover", i, n, c) } return integrations } const contentTypeJSON = "application/json" var userAgentHeader = fmt.Sprintf("Alertmanager/%s", version.Version) // Webhook implements a Notifier for generic webhooks. type Webhook struct { conf *config.WebhookConfig tmpl *template.Template logger log.Logger } // NewWebhook returns a new Webhook. func NewWebhook(conf *config.WebhookConfig, t *template.Template, l log.Logger) *Webhook { return &Webhook{conf: conf, tmpl: t, logger: l} } // WebhookMessage defines the JSON object send to webhook endpoints. type WebhookMessage struct { *template.Data // The protocol version. Version string `json:"version"` GroupKey string `json:"groupKey"` } // Notify implements the Notifier interface. func (w *Webhook) Notify(ctx context.Context, alerts ...*types.Alert) (bool, error) { data := w.tmpl.Data(receiverName(ctx, w.logger), groupLabels(ctx, w.logger), alerts...) groupKey, ok := GroupKey(ctx) if !ok { level.Error(w.logger).Log("msg", "group key missing") } msg := &WebhookMessage{ Version: "4", Data: data, GroupKey: groupKey, } var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(msg); err != nil { return false, err } req, err := http.NewRequest("POST", w.conf.URL.String(), &buf) if err != nil { return true, err } req.Header.Set("Content-Type", contentTypeJSON) req.Header.Set("User-Agent", userAgentHeader) c, err := commoncfg.NewClientFromConfig(*w.conf.HTTPConfig, "webhook") if err != nil { return false, err } resp, err := ctxhttp.Do(ctx, c, req) if err != nil { return true, err } resp.Body.Close() return w.retry(resp.StatusCode) } func (w *Webhook) retry(statusCode int) (bool, error) { // Webhooks are assumed to respond with 2xx response codes on a successful // request and 5xx response codes are assumed to be recoverable. if statusCode/100 != 2 { return (statusCode/100 == 5), fmt.Errorf("unexpected status code %v from %s", statusCode, w.conf.URL) } return false, nil } // Email implements a Notifier for email notifications. type Email struct { conf *config.EmailConfig tmpl *template.Template logger log.Logger } // NewEmail returns a new Email notifier. func NewEmail(c *config.EmailConfig, t *template.Template, l log.Logger) *Email { if _, ok := c.Headers["Subject"]; !ok { c.Headers["Subject"] = config.DefaultEmailSubject } if _, ok := c.Headers["To"]; !ok { c.Headers["To"] = c.To } if _, ok := c.Headers["From"]; !ok { c.Headers["From"] = c.From } return &Email{conf: c, tmpl: t, logger: l} } // auth resolves a string of authentication mechanisms. func (n *Email) auth(mechs string) (smtp.Auth, error) { username := n.conf.AuthUsername for _, mech := range strings.Split(mechs, " ") { switch mech { case "CRAM-MD5": secret := string(n.conf.AuthSecret) if secret == "" { continue } return smtp.CRAMMD5Auth(username, secret), nil case "PLAIN": password := string(n.conf.AuthPassword) if password == "" { continue } identity := n.conf.AuthIdentity // We need to know the hostname for both auth and TLS. host, _, err := net.SplitHostPort(n.conf.Smarthost) if err != nil { return nil, fmt.Errorf("invalid address: %s", err) } return smtp.PlainAuth(identity, username, password, host), nil case "LOGIN": password := string(n.conf.AuthPassword) if password == "" { continue } return LoginAuth(username, password), nil } } return nil, nil } // Notify implements the Notifier interface. func (n *Email) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { // We need to know the hostname for both auth and TLS. var c *smtp.Client host, port, err := net.SplitHostPort(n.conf.Smarthost) if err != nil { return false, fmt.Errorf("invalid address: %s", err) } if port == "465" { conn, err := tls.Dial("tcp", n.conf.Smarthost, &tls.Config{ServerName: host}) if err != nil { return true, err } c, err = smtp.NewClient(conn, n.conf.Smarthost) if err != nil { return true, err } } else { // Connect to the SMTP smarthost. c, err = smtp.Dial(n.conf.Smarthost) if err != nil { return true, err } } defer c.Quit() if n.conf.Hello != "" { err := c.Hello(n.conf.Hello) if err != nil { return true, err } } // Global Config guarantees RequireTLS is not nil if *n.conf.RequireTLS { if ok, _ := c.Extension("STARTTLS"); !ok { return true, fmt.Errorf("require_tls: true (default), but %q does not advertise the STARTTLS extension", n.conf.Smarthost) } tlsConf := &tls.Config{ServerName: host} if err := c.StartTLS(tlsConf); err != nil { return true, fmt.Errorf("starttls failed: %s", err) } } if ok, mech := c.Extension("AUTH"); ok { auth, err := n.auth(mech) if err != nil { return true, err } if auth != nil { if err := c.Auth(auth); err != nil { return true, fmt.Errorf("%T failed: %s", auth, err) } } } var ( tmplErr error data = n.tmpl.Data(receiverName(ctx, n.logger), groupLabels(ctx, n.logger), as...) tmpl = tmplText(n.tmpl, data, &tmplErr) from = tmpl(n.conf.From) to = tmpl(n.conf.To) ) if tmplErr != nil { return false, fmt.Errorf("failed to template 'from' or 'to': %v", tmplErr) } addrs, err := mail.ParseAddressList(from) if err != nil { return false, fmt.Errorf("parsing from addresses: %s", err) } if len(addrs) != 1 { return false, fmt.Errorf("must be exactly one from address") } if err := c.Mail(addrs[0].Address); err != nil { return true, fmt.Errorf("sending mail from: %s", err) } addrs, err = mail.ParseAddressList(to) if err != nil { return false, fmt.Errorf("parsing to addresses: %s", err) } for _, addr := range addrs { if err := c.Rcpt(addr.Address); err != nil { return true, fmt.Errorf("sending rcpt to: %s", err) } } // Send the email body. wc, err := c.Data() if err != nil { return true, err } defer wc.Close() for header, t := range n.conf.Headers { value, err := n.tmpl.ExecuteTextString(t, data) if err != nil { return false, fmt.Errorf("executing %q header template: %s", header, err) } fmt.Fprintf(wc, "%s: %s\r\n", header, mime.QEncoding.Encode("utf-8", value)) } buffer := &bytes.Buffer{} multipartWriter := multipart.NewWriter(buffer) fmt.Fprintf(wc, "Date: %s\r\n", time.Now().Format(time.RFC1123Z)) fmt.Fprintf(wc, "Content-Type: multipart/alternative; boundary=%s\r\n", multipartWriter.Boundary()) fmt.Fprintf(wc, "MIME-Version: 1.0\r\n") // TODO: Add some useful headers here, such as URL of the alertmanager // and active/resolved. fmt.Fprintf(wc, "\r\n") if len(n.conf.Text) > 0 { // Text template w, err := multipartWriter.CreatePart(textproto.MIMEHeader{"Content-Type": {"text/plain; charset=UTF-8"}}) if err != nil { return false, fmt.Errorf("creating part for text template: %s", err) } body, err := n.tmpl.ExecuteTextString(n.conf.Text, data) if err != nil { return false, fmt.Errorf("executing email text template: %s", err) } _, err = w.Write([]byte(body)) if err != nil { return true, err } } if len(n.conf.HTML) > 0 { // Html template // Preferred alternative placed last per section 5.1.4 of RFC 2046 // https://www.ietf.org/rfc/rfc2046.txt w, err := multipartWriter.CreatePart(textproto.MIMEHeader{"Content-Type": {"text/html; charset=UTF-8"}}) if err != nil { return false, fmt.Errorf("creating part for html template: %s", err) } body, err := n.tmpl.ExecuteHTMLString(n.conf.HTML, data) if err != nil { return false, fmt.Errorf("executing email html template: %s", err) } _, err = w.Write([]byte(body)) if err != nil { return true, err } } err = multipartWriter.Close() if err != nil { return false, fmt.Errorf("failed to close multipartWriter: %v", err) } _, err = wc.Write(buffer.Bytes()) if err != nil { return false, fmt.Errorf("failed to write body buffer: %v", err) } return false, nil } // PagerDuty implements a Notifier for PagerDuty notifications. type PagerDuty struct { conf *config.PagerdutyConfig tmpl *template.Template logger log.Logger } // NewPagerDuty returns a new PagerDuty notifier. func NewPagerDuty(c *config.PagerdutyConfig, t *template.Template, l log.Logger) *PagerDuty { return &PagerDuty{conf: c, tmpl: t, logger: l} } const ( pagerDutyEventTrigger = "trigger" pagerDutyEventResolve = "resolve" ) type pagerDutyMessage struct { RoutingKey string `json:"routing_key,omitempty"` ServiceKey string `json:"service_key,omitempty"` DedupKey string `json:"dedup_key,omitempty"` IncidentKey string `json:"incident_key,omitempty"` EventType string `json:"event_type,omitempty"` Description string `json:"description,omitempty"` EventAction string `json:"event_action"` Payload *pagerDutyPayload `json:"payload"` Client string `json:"client,omitempty"` ClientURL string `json:"client_url,omitempty"` Details map[string]string `json:"details,omitempty"` } type pagerDutyPayload struct { Summary string `json:"summary"` Source string `json:"source"` Severity string `json:"severity"` Timestamp string `json:"timestamp,omitempty"` Class string `json:"class,omitempty"` Component string `json:"component,omitempty"` Group string `json:"group,omitempty"` CustomDetails map[string]string `json:"custom_details,omitempty"` } func (n *PagerDuty) notifyV1( ctx context.Context, c *http.Client, eventType, key string, data *template.Data, details map[string]string, as ...*types.Alert, ) (bool, error) { var tmplErr error tmpl := tmplText(n.tmpl, data, &tmplErr) msg := &pagerDutyMessage{ ServiceKey: tmpl(string(n.conf.ServiceKey)), EventType: eventType, IncidentKey: hashKey(key), Description: tmpl(n.conf.Description), Details: details, } apiURL, err := url.Parse("https://events.pagerduty.com/generic/2010-04-15/create_event.json") if err != nil { return false, err } n.conf.URL = &config.URL{apiURL} if eventType == pagerDutyEventTrigger { msg.Client = tmpl(n.conf.Client) msg.ClientURL = tmpl(n.conf.ClientURL) } if tmplErr != nil { return false, fmt.Errorf("failed to template PagerDuty v1 message: %v", tmplErr) } var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(msg); err != nil { return false, err } resp, err := ctxhttp.Post(ctx, c, n.conf.URL.String(), contentTypeJSON, &buf) if err != nil { return true, err } defer resp.Body.Close() return n.retryV1(resp) } func (n *PagerDuty) notifyV2( ctx context.Context, c *http.Client, eventType, key string, data *template.Data, details map[string]string, as ...*types.Alert, ) (bool, error) { var tmplErr error tmpl := tmplText(n.tmpl, data, &tmplErr) if n.conf.Severity == "" { n.conf.Severity = "error" } msg := &pagerDutyMessage{ Client: tmpl(n.conf.Client), ClientURL: tmpl(n.conf.ClientURL), RoutingKey: tmpl(string(n.conf.RoutingKey)), EventAction: eventType, DedupKey: hashKey(key), Payload: &pagerDutyPayload{ Summary: tmpl(n.conf.Description), Source: tmpl(n.conf.Client), Severity: tmpl(n.conf.Severity), CustomDetails: details, Class: tmpl(n.conf.Class), Component: tmpl(n.conf.Component), Group: tmpl(n.conf.Group), }, } if tmplErr != nil { return false, fmt.Errorf("failed to template PagerDuty v2 message: %v", tmplErr) } var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(msg); err != nil { return false, err } resp, err := ctxhttp.Post(ctx, c, n.conf.URL.String(), contentTypeJSON, &buf) if err != nil { return true, err } defer resp.Body.Close() return n.retryV2(resp.StatusCode) } // Notify implements the Notifier interface. // // https://v2.developer.pagerduty.com/docs/events-api-v2 func (n *PagerDuty) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { key, ok := GroupKey(ctx) if !ok { return false, fmt.Errorf("group key missing") } var err error var ( alerts = types.Alerts(as...) data = n.tmpl.Data(receiverName(ctx, n.logger), groupLabels(ctx, n.logger), as...) eventType = pagerDutyEventTrigger ) if alerts.Status() == model.AlertResolved { eventType = pagerDutyEventResolve } level.Debug(n.logger).Log("msg", "Notifying PagerDuty", "incident", key, "eventType", eventType) details := make(map[string]string, len(n.conf.Details)) for k, v := range n.conf.Details { detail, err := n.tmpl.ExecuteTextString(v, data) if err != nil { return false, fmt.Errorf("failed to template %q: %v", v, err) } details[k] = detail } if err != nil { return false, err } c, err := commoncfg.NewClientFromConfig(*n.conf.HTTPConfig, "pagerduty") if err != nil { return false, err } if n.conf.ServiceKey != "" { return n.notifyV1(ctx, c, eventType, key, data, details, as...) } return n.notifyV2(ctx, c, eventType, key, data, details, as...) } func (n *PagerDuty) retryV1(resp *http.Response) (bool, error) { // Retrying can solve the issue on 403 (rate limiting) and 5xx response codes. // 2xx response codes indicate a successful request. // https://v2.developer.pagerduty.com/docs/trigger-events statusCode := resp.StatusCode if statusCode == 400 && resp.Body != nil { bs, err := ioutil.ReadAll(resp.Body) if err != nil { return false, fmt.Errorf("unexpected status code %v : problem reading response: %v", statusCode, err) } return false, fmt.Errorf("bad request (status code %v): %v", statusCode, string(bs)) } if statusCode/100 != 2 { return (statusCode == 403 || statusCode/100 == 5), fmt.Errorf("unexpected status code %v", statusCode) } return false, nil } func (n *PagerDuty) retryV2(statusCode int) (bool, error) { // Retrying can solve the issue on 429 (rate limiting) and 5xx response codes. // 2xx response codes indicate a successful request. // https://v2.developer.pagerduty.com/docs/events-api-v2#api-response-codes--retry-logic if statusCode/100 != 2 { return (statusCode == 429 || statusCode/100 == 5), fmt.Errorf("unexpected status code %v", statusCode) } return false, nil } // Slack implements a Notifier for Slack notifications. type Slack struct { conf *config.SlackConfig tmpl *template.Template logger log.Logger } // NewSlack returns a new Slack notification handler. func NewSlack(c *config.SlackConfig, t *template.Template, l log.Logger) *Slack { return &Slack{ conf: c, tmpl: t, logger: l, } } // slackReq is the request for sending a slack notification. type slackReq struct { Channel string `json:"channel,omitempty"` Username string `json:"username,omitempty"` IconEmoji string `json:"icon_emoji,omitempty"` IconURL string `json:"icon_url,omitempty"` LinkNames bool `json:"link_names,omitempty"` Attachments []slackAttachment `json:"attachments"` } // slackAttachment is used to display a richly-formatted message block. type slackAttachment struct { Title string `json:"title,omitempty"` TitleLink string `json:"title_link,omitempty"` Pretext string `json:"pretext,omitempty"` Text string `json:"text"` Fallback string `json:"fallback"` Fields []config.SlackField `json:"fields,omitempty"` Actions []config.SlackAction `json:"actions,omitempty"` Footer string `json:"footer"` Color string `json:"color,omitempty"` MrkdwnIn []string `json:"mrkdwn_in,omitempty"` } // Notify implements the Notifier interface. func (n *Slack) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { var err error var ( data = n.tmpl.Data(receiverName(ctx, n.logger), groupLabels(ctx, n.logger), as...) tmplText = tmplText(n.tmpl, data, &err) ) attachment := &slackAttachment{ Title: tmplText(n.conf.Title), TitleLink: tmplText(n.conf.TitleLink), Pretext: tmplText(n.conf.Pretext), Text: tmplText(n.conf.Text), Fallback: tmplText(n.conf.Fallback), Footer: tmplText(n.conf.Footer), Color: tmplText(n.conf.Color), MrkdwnIn: []string{"fallback", "pretext", "text"}, } var numFields = len(n.conf.Fields) if numFields > 0 { var fields = make([]config.SlackField, numFields) for index, field := range n.conf.Fields { // Check if short was defined for the field otherwise fallback to the global setting var short bool if field.Short != nil { short = *field.Short } else { short = n.conf.ShortFields } // Rebuild the field by executing any templates and setting the new value for short fields[index] = config.SlackField{ Title: tmplText(field.Title), Value: tmplText(field.Value), Short: &short, } } attachment.Fields = fields } var numActions = len(n.conf.Actions) if numActions > 0 { var actions = make([]config.SlackAction, numActions) for index, action := range n.conf.Actions { actions[index] = config.SlackAction{ Type: tmplText(action.Type), Text: tmplText(action.Text), URL: tmplText(action.URL), Style: tmplText(action.Style), } } attachment.Actions = actions } req := &slackReq{ Channel: tmplText(n.conf.Channel), Username: tmplText(n.conf.Username), IconEmoji: tmplText(n.conf.IconEmoji), IconURL: tmplText(n.conf.IconURL), LinkNames: n.conf.LinkNames, Attachments: []slackAttachment{*attachment}, } if err != nil { return false, err } var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(req); err != nil { return false, err } c, err := commoncfg.NewClientFromConfig(*n.conf.HTTPConfig, "slack") if err != nil { return false, err } resp, err := ctxhttp.Post(ctx, c, n.conf.APIURL.String(), contentTypeJSON, &buf) if err != nil
resp.Body.Close() return n.retry(resp.StatusCode) } func (n *Slack) retry(statusCode int) (bool, error) { // Only 5xx response codes are recoverable and 2xx codes are successful. // https://api.slack.com/incoming-webhooks#handling_errors // https://api.slack.com/changelog/2016-05-17-changes-to-errors-for-incoming-webhooks if statusCode/100 != 2 { return (statusCode/100 == 5), fmt.Errorf("unexpected status code %v", statusCode) } return false, nil } // Hipchat implements a Notifier for Hipchat notifications. type Hipchat struct { conf *config.HipchatConfig tmpl *template.Template logger log.Logger } // NewHipchat returns a new Hipchat notification handler. func NewHipchat(c *config.HipchatConfig, t *template.Template, l log.Logger) *Hipchat { return &Hipchat{ conf: c, tmpl: t, logger: l, } } type hipchatReq struct { From string `json:"from"` Notify bool `json:"notify"` Message string `json:"message"` MessageFormat string `json:"message_format"` Color string `json:"color"` } // Notify implements the Notifier interface. func (n *Hipchat) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { var err error var msg string var ( data = n.tmpl.Data(receiverName(ctx, n.logger), groupLabels(ctx, n.logger), as...) tmplText = tmplText(n.tmpl, data, &err) tmplHTML = tmplHTML(n.tmpl, data, &err) roomid = tmplText(n.conf.RoomID) apiURL = n.conf.APIURL.Copy() ) apiURL.Path += fmt.Sprintf("v2/room/%s/notification", roomid) q := apiURL.Query() q.Set("auth_token", fmt.Sprintf("%s", n.conf.AuthToken)) apiURL.RawQuery = q.Encode() if n.conf.MessageFormat == "html" { msg = tmplHTML(n.conf.Message) } else { msg = tmplText(n.conf.Message) } req := &hipchatReq{ From: tmplText(n.conf.From), Notify: n.conf.Notify, Message: msg, MessageFormat: n.conf.MessageFormat, Color: tmplText(n.conf.Color), } if err != nil { return false, err } var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(req); err != nil { return false, err } c, err := commoncfg.NewClientFromConfig(*n.conf.HTTPConfig, "hipchat") if err != nil { return false, err } resp, err := ctxhttp.Post(ctx, c, apiURL.String(), contentTypeJSON, &buf) if err != nil { return true, err } defer resp.Body.Close() return n.retry(resp.StatusCode) } func (n *Hipchat) retry(statusCode int) (bool, error) { // Response codes 429 (rate limiting) and 5xx can potentially recover. 2xx // responce codes indicate successful requests. // https://developer.atlassian.com/hipchat/guide/hipchat-rest-api/api-response-codes if statusCode/100 != 2 { return (statusCode == 429 || statusCode/100 == 5), fmt.Errorf("unexpected status code %v", statusCode) } return false, nil } // Wechat implements a Notfier for wechat notifications type Wechat struct { conf *config.WechatConfig tmpl *template.Template logger log.Logger accessToken string accessTokenAt time.Time } // Wechat AccessToken with corpid and corpsecret. type WechatToken struct { AccessToken string `json:"access_token"` } type weChatMessage struct { Text weChatMessageContent `yaml:"text,omitempty" json:"text,omitempty"` ToUser string `yaml:"touser,omitempty" json:"touser,omitempty"` ToParty string `yaml:"toparty,omitempty" json:"toparty,omitempty"` Totag string `yaml:"totag,omitempty" json:"totag,omitempty"` AgentID string `yaml:"agentid,omitempty" json:"agentid,omitempty"` Safe string `yaml:"safe,omitempty" json:"safe,omitempty"` Type string `yaml:"msgtype,omitempty" json:"msgtype,omitempty"` } type weChatMessageContent struct { Content string `json:"content"` } type weChatResponse struct { Code int `json:"code"` Error string `json:"error"` } // NewWechat returns a new Wechat notifier. func NewWechat(c *config.WechatConfig, t *template.Template, l log.Logger) *Wechat { return &Wechat{conf: c, tmpl: t, logger: l} } // Notify implements the Notifier interface. func (n *Wechat) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { key, ok := GroupKey(ctx) if !ok { return false, fmt.Errorf("group key missing") } level.Debug(n.logger).Log("msg", "Notifying Wechat", "incident", key) data := n.tmpl.Data(receiverName(ctx, n.logger), groupLabels(ctx, n.logger), as...) var err error tmpl := tmplText(n.tmpl, data, &err) if err != nil { return false, err } c, err := commoncfg.NewClientFromConfig(*n.conf.HTTPConfig, "wechat") if err != nil { return false, err } // Refresh AccessToken over 2 hours if n.accessToken == "" || time.Now().Sub(n.accessTokenAt) > 2*time.Hour { parameters := url.Values{} parameters.Add("corpsecret", tmpl(string(n.conf.APISecret))) parameters.Add("corpid", tmpl(string(n.conf.CorpID))) if err != nil { return false, fmt.Errorf("templating error: %s", err) } u := n.conf.APIURL.Copy() u.Path += "gettoken" u.RawQuery = parameters.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return true, err } req.Header.Set("Content-Type", contentTypeJSON) resp, err := c.Do(req.WithContext(ctx)) if err != nil { return true, err } defer resp.Body.Close() var wechatToken WechatToken if err := json.NewDecoder(resp.Body).Decode(&wechatToken); err != nil { return false, err } if wechatToken.AccessToken == "" { return false, fmt.Errorf("invalid APISecret for CorpID: %s", n.conf.CorpID) } // Cache accessToken n.accessToken = wechatToken.AccessToken n.accessTokenAt = time.Now() } msg := &weChatMessage{ Text: weChatMessageContent{ Content: tmpl(n.conf.Message), }, ToUser: tmpl(n.conf.ToUser), ToParty: tmpl(n.conf.ToParty), Totag: tmpl(n.conf.ToTag), AgentID: tmpl(n.conf.AgentID), Type: "text", Safe: "0", } if err != nil { return false, fmt.Errorf("templating error: %s", err) } var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(msg); err != nil { return false, err } postMessageURL := n.conf.APIURL.Copy() postMessageURL.Path += "message/send" q := postMessageURL.Query() q.Set("access_token", n.accessToken) postMessageURL.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodPost, postMessageURL.String(), &buf) if err != nil { return true, err } resp, err := c.Do(req.WithContext(ctx)) if err != nil { return true, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return true, err } level.Debug(n.logger).Log("msg", "response: "+string(body), "incident", key) if resp.StatusCode != 200 { return true, fmt.Errorf("unexpected status code %v", resp.StatusCode) } else { var weResp weChatResponse if err := json.Unmarshal(body, &weResp); err != nil { return true, err } // https://work.weixin.qq.com/api/doc#10649 if weResp.Code == 0 { return false, nil } // AccessToken is expired if weResp.Code == 42001 { n.accessToken = "" return true, errors.New(weResp.Error) } return false, errors.New(weResp.Error) } } // OpsGenie implements a Notifier for OpsGenie notifications. type OpsGenie struct { conf *config.OpsGenieConfig tmpl *template.Template logger log.Logger } // NewOpsGenie returns a new OpsGenie notifier. func NewOpsGenie(c *config.OpsGenieConfig, t *template.Template, l log.Logger) *OpsGenie { return &OpsGenie{conf: c, tmpl: t, logger: l} } type opsGenieCreateMessage struct { Alias string `json:"alias"` Message string `json:"message"` Description string `json:"description,omitempty"` Details map[string]string `json:"details"` Source string `json:"source"` Teams []map[string]string `json:"teams,omitempty"` Tags []string `json:"tags,omitempty"` Note string `json:"note,omitempty"` Priority string `json:"priority,omitempty"` } type opsGenieCloseMessage struct { Source string `json:"source"` } // Notify implements the Notifier interface. func (n *OpsGenie) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { req, retry, err := n.createRequest(ctx, as...) if err != nil { return retry, err } c, err := commoncfg.NewClientFromConfig(*n.conf.HTTPConfig, "opsgenie") if err != nil { return false, err } resp, err := ctxhttp.Do(ctx, c, req) if err != nil { return true, err } defer resp.Body.Close() return n.retry(resp.StatusCode) } // Like Split but filter out empty strings. func safeSplit(s string, sep string) []string { a := strings.Split(strings.TrimSpace(s), sep) b := a[:0] for _, x := range a { if x != "" { b = append(b, x) } } return b } // Create requests for a list of alerts. func (n *OpsGenie) createRequest(ctx context.Context, as ...*types.Alert) (*http.Request, bool, error) { key, ok := GroupKey(ctx) if !ok { return nil, false, fmt.Errorf("group key missing") } data := n.tmpl.Data(receiverName(ctx, n.logger), groupLabels(ctx, n.logger), as...) level.Debug(n.logger).Log("msg", "Notifying OpsGenie", "incident", key) var err error tmpl := tmplText(n.tmpl, data, &err) details := make(map[string]string, len(n.conf.Details)) for k, v := range n.conf.Details { details[k] = tmpl(v) } var ( msg interface{} apiURL = n.conf.APIURL.Copy() alias = hashKey(key) alerts = types.Alerts(as...) ) switch alerts.Status() { case model.AlertResolved: apiURL.Path += fmt.Sprintf("v2/alerts/%s/close", alias) q := apiURL.Query() q.Set("identifierType", "alias") apiURL.RawQuery = q.Encode() msg = &opsGenieCloseMessage{Source: tmpl(n.conf.Source)} default: message := tmpl(n.conf.Message) if len(message) > 130 { message = message[:127] + "..." level.Debug(n.logger).Log("msg", "Truncated message to %q due to OpsGenie message limit", "truncated_message", message, "incident", key) } apiURL.Path += "v2/alerts" var teams []map[string]string for _, t := range safeSplit(string(tmpl(n.conf.Teams)), ",") { teams = append(teams, map[string]string{"name": t}) } tags := safeSplit(string(tmpl(n.conf.Tags)), ",") msg = &opsGenieCreateMessage{ Alias: alias, Message: message, Description: tmpl(n.conf.Description), Details: details, Source: tmpl(n.conf.Source), Teams: teams, Tags: tags, Note: tmpl(n.conf.Note), Priority: tmpl(n.conf.Priority), } } if err != nil { return nil, false, fmt.Errorf("templating error: %s", err) } var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(msg); err != nil { return nil, false, err } req, err := http.NewRequest("POST", apiURL.String(), &buf) if err != nil { return nil, true, err } req.Header.Set("Content-Type", contentTypeJSON) req.Header.Set("Authorization", fmt.Sprintf("GenieKey %s", n.conf.APIKey)) return req, true, nil } func (n *OpsGenie) retry(statusCode int) (bool, error) { // https://docs.opsgenie.com/docs/response#section-response-codes // Response codes 429 (rate limiting) and 5xx are potentially recoverable if statusCode/100 == 5 || statusCode == 429 { return true, fmt.Errorf("unexpected status code %v", statusCode) } else if statusCode/100 != 2 { return false, fmt.Errorf("unexpected status code %v", statusCode) } return false, nil } // VictorOps implements a Notifier for VictorOps notifications. type VictorOps struct { conf *config.VictorOpsConfig tmpl *template.Template logger log.Logger } // NewVictorOps returns a new VictorOps notifier. func NewVictorOps(c *config.VictorOpsConfig, t *template.Template, l log.Logger) *VictorOps { return &VictorOps{ conf: c, tmpl: t, logger: l, } } const ( victorOpsEventTrigger = "CRITICAL" victorOpsEventResolve = "RECOVERY" ) type victorOpsMessage struct { MessageType string `json:"message_type"` EntityID string `json:"entity_id"` EntityDisplayName string `json:"entity_display_name"` StateMessage string `json:"state_message"` MonitoringTool string `json:"monitoring_tool"` } // Notify implements the Notifier interface. func (n *VictorOps) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { victorOpsAllowedEvents := map[string]bool{ "INFO": true, "WARNING": true, "CRITICAL": true, } key, ok := GroupKey(ctx) if !ok { return false, fmt.Errorf("group key missing") } var err error var ( alerts = types.Alerts(as...) data = n.tmpl.Data(receiverName(ctx, n.logger), groupLabels(ctx, n.logger), as...) tmpl = tmplText(n.tmpl, data, &err) apiURL = n.conf.APIURL.Copy() messageType = tmpl(n.conf.MessageType) stateMessage = tmpl(n.conf.StateMessage) ) apiURL.Path += fmt.Sprintf("%s/%s", n.conf.APIKey, tmpl(n.conf.RoutingKey)) if alerts.Status() == model.AlertFiring && !victorOpsAllowedEvents[messageType] { messageType = victorOpsEventTrigger } if alerts.Status() == model.AlertResolved { messageType = victorOpsEventResolve } if len(stateMessage) > 20480 { stateMessage = stateMessage[0:20475] + "\n..." level.Debug(n.logger).Log("msg", "Truncated stateMessage due to VictorOps stateMessage limit", "truncated_state_message", stateMessage, "incident", key) } msg := &victorOpsMessage{ MessageType: messageType, EntityID: hashKey(key), EntityDisplayName: tmpl(n.conf.EntityDisplayName), StateMessage: stateMessage, MonitoringTool: tmpl(n.conf.MonitoringTool), } if err != nil { return false, fmt.Errorf("templating error: %s", err) } var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(msg); err != nil { return false, err } c, err := commoncfg.NewClientFromConfig(*n.conf.HTTPConfig, "victorops") if err != nil { return false, err } resp, err := ctxhttp.Post(ctx, c, apiURL.String(), contentTypeJSON, &buf) if err != nil { return true, err } defer resp.Body.Close() return n.retry(resp.StatusCode) } func (n *VictorOps) retry(statusCode int) (bool, error) { // Missing documentation therefore assuming only 5xx response codes are // recoverable. if statusCode/100 == 5 { return true, fmt.Errorf("unexpected status code %v", statusCode) } else if statusCode/100 != 2 { return false, fmt.Errorf("unexpected status code %v", statusCode) } return false, nil } // Pushover implements a Notifier for Pushover notifications. type Pushover struct { conf *config.PushoverConfig tmpl *template.Template logger log.Logger } // NewPushover returns a new Pushover notifier. func NewPushover(c *config.PushoverConfig, t *template.Template, l log.Logger) *Pushover { return &Pushover{conf: c, tmpl: t, logger: l} } // Notify implements the Notifier interface. func (n *Pushover) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { key, ok := GroupKey(ctx) if !ok { return false, fmt.Errorf("group key missing") } data := n.tmpl.Data(receiverName(ctx, n.logger), groupLabels(ctx, n.logger), as...) level.Debug(n.logger).Log("msg", "Notifying Pushover", "incident", key) var err error tmpl := tmplText(n.tmpl, data, &err) parameters := url.Values{} parameters.Add("token", tmpl(string(n.conf.Token))) parameters.Add("user", tmpl(string(n.conf.UserKey))) title := tmpl(n.conf.Title) if len(title) > 250 { title = title[:247] + "..." level.Debug(n.logger).Log("msg", "Truncated title due to Pushover title limit", "truncated_title", title, "incident", key) } parameters.Add("title", title) message := tmpl(n.conf.Message) if len(message) > 1024 { message = message[:1021] + "..." level.Debug(n.logger).Log("msg", "Truncated message due to Pushover message limit", "truncated_message", message, "incident", key) } message = strings.TrimSpace(message) if message == "" { // Pushover rejects empty messages. message = "(no details)" } parameters.Add("message", message) supplementaryURL := tmpl(n.conf.URL) if len(supplementaryURL) > 512 { supplementaryURL = supplementaryURL[:509] + "..." level.Debug(n.logger).Log("msg", "Truncated URL due to Pushover url limit", "truncated_url", supplementaryURL, "incident", key) } parameters.Add("url", supplementaryURL) parameters.Add("priority", tmpl(n.conf.Priority)) parameters.Add("retry", fmt.Sprintf("%d", int64(time.Duration(n.conf.Retry).Seconds()))) parameters.Add("expire", fmt.Sprintf("%d", int64(time.Duration(n.conf.Expire).Seconds()))) if err != nil { return false, err } apiURL := "https://api.pushover.net/1/messages.json" u, err := url.Parse(apiURL) if err != nil { return false, err } u.RawQuery = parameters.Encode() level.Debug(n.logger).Log("msg", "Sending Pushover message", "incident", key, "url", u.String()) c, err := commoncfg.NewClientFromConfig(*n.conf.HTTPConfig, "pushover") if err != nil { return false, err } resp, err := ctxhttp.Post(ctx, c, u.String(), "text/plain", nil) if err != nil { return true, err } defer resp.Body.Close() return n.retry(resp.StatusCode) } func (n *Pushover) retry(statusCode int) (bool, error) { // Only documented behaviour is that 2xx response codes are successful and // 4xx are unsuccessful, therefore assuming only 5xx are recoverable. // https://pushover.net/api#response if statusCode/100 == 5 { return true, fmt.Errorf("unexpected status code %v", statusCode) } else if statusCode/100 != 2 { return false, fmt.Errorf("unexpected status code %v", statusCode) } return false, nil } // tmplText is using monadic error handling in order to make string templating // less verbose. Use with care as the final error checking is easily missed. func tmplText(tmpl *template.Template, data *template.Data, err *error) func(string) string { return func(name string) (s string) { if *err != nil { return } s, *err = tmpl.ExecuteTextString(name, data) return s } } // tmplHTML is using monadic error handling in order to make string templating // less verbose. Use with care as the final error checking is easily missed. func tmplHTML(tmpl *template.Template, data *template.Data, err *error) func(string) string { return func(name string) (s string) { if *err != nil { return } s, *err = tmpl.ExecuteHTMLString(name, data) return s } } type loginAuth struct { username, password string } func LoginAuth(username, password string) smtp.Auth { return &loginAuth{username, password} } func (a *loginAuth) Start(server *smtp.ServerInfo) (string, []byte, error) { return "LOGIN", []byte{}, nil } // Used for AUTH LOGIN. (Maybe password should be encrypted) func (a *loginAuth) Next(fromServer []byte, more bool) ([]byte, error) { if more { switch strings.ToLower(string(fromServer)) { case "username:": return []byte(a.username), nil case "password:": return []byte(a.password), nil default: return nil, errors.New("unexpected server challenge") } } return nil, nil } // hashKey returns the sha256 for a group key as integrations may have // maximum length requirements on deduplication keys. func hashKey(s string) string { h := sha256.New() h.Write([]byte(s)) return fmt.Sprintf("%x", h.Sum(nil)) }
{ return true, err }
gu.js
/* Copyright (c) 2003-2012, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.plugins.setLang( 'liststyle', 'gu', { armenian: 'અરમેનિયન આંકડા પદ્ધતિ', bulletedTitle: 'બુલેટેડ લીસ્ટના ગુણ',
circle: 'વર્તુળ', decimal: 'આંકડા (1, 2, 3, etc.)', decimalLeadingZero: 'સુન્ય આગળ આંકડા (01, 02, 03, etc.)', disc: 'ડિસ્ક', georgian: 'ગેઓર્ગિયન આંકડા પદ્ધતિ (an, ban, gan, etc.)', lowerAlpha: 'આલ્ફા નાના (a, b, c, d, e, etc.)', lowerGreek: 'ગ્રીક નાના (alpha, beta, gamma, etc.)', lowerRoman: 'રોમન નાના (i, ii, iii, iv, v, etc.)', none: 'કસુ ', notset: '<સેટ નથી>', numberedTitle: 'આંકડાના લીસ્ટના ગુણ', square: 'ચોરસ', start: 'શરુ કરવું', type: 'પ્રકાર', upperAlpha: 'આલ્ફા મોટા (A, B, C, D, E, etc.)', upperRoman: 'રોમન મોટા (I, II, III, IV, V, etc.)', validateStartNumber: 'લીસ્ટના સરુઆતનો આંકડો પુરો હોવો જોઈએ.' });
DinamicoNovedades.js
import { LitElement, html, css } from 'lit-element'; // import '@vaadin/vaadin-ordered-layout'; import { List } from '@material/mwc-list/mwc-list.js'; import { ListItem } from '@material/mwc-list/mwc-list-item.js'; // import { Icon } from '@material/mwc-icon'; import '@material/mwc-icon'; import { IconButton } from '@material/mwc-icon-button/mwc-icon-button.js'; import { Dialog } from '@material/mwc-dialog/mwc-dialog.js'; import '@material/mwc-textfield'; import { Button } from '@material/mwc-button/mwc-button.js'; export class
extends LitElement { static get styles(){ return css` table { font-family: arial, sans-serif; border-collapse: collapse; width: 100%; } td, th { border: 1px solid #dddddd; text-align: left; padding: 8px; } tr:nth-child(even) { background-color: #dddddd; } `; } render() { return html` <h3>Novedades</h3> <section> <table> <tr> <th style="text-align:center"> Empleado </th> <th style="text-align:center"> Ingresos Constitutivos de Salario </th> <th style="text-align:center"> Ingresos NO Constitutivos de Salario </th> <th style="text-align:center"> Deducciones </th> </tr> ${this._empleadosCollection && this._empleadosCollection.map((item) => html` <tr> <td> <p>${item.Nombre}</p> </td> <td> <mwc-list id="listaConstitutivos" @action="${this._listItemSelected}"> ${this._novedadesCollection && this._novedadesCollection.filter( function (el){ return (el.Empleado == item.Documento)&&(el.tipo == "constitutivo"); }).map((i) => html` <mwc-list-item twoline graphic="icon"> <span>$${Number(i.Valor_total).toLocaleString('es')}</span> <span slot="secondary">${i.Concepto}</span> <mwc-icon slot="graphic">add</mwc-icon> </mwc-list-item> `)} </mwc-list> </td> <td> <mwc-list id="listaNoConstitutivos" @action="${this._listItemSelected}"> ${this._novedadesCollection && this._novedadesCollection.filter( function (el){ return (el.Empleado == item.Documento)&&(el.tipo == "no_constitutivo"); }).map((i) => html` <mwc-list-item twoline graphic="icon"> <span>$${Number(i.Valor_total).toLocaleString('es')}</span> <span slot="secondary">${i.Concepto}</span> <mwc-icon slot="graphic">add</mwc-icon> </mwc-list-item> `)} </mwc-list> </td> <td> <mwc-list id="listaDeducciones" @action="${this._listItemSelected}"> ${this._novedadesCollection && this._novedadesCollection.filter( function (el){ return (el.Empleado == item.Documento)&&(el.tipo == "deduccion"); }).map((i) => html` <mwc-list-item twoline graphic="icon"> <span>$${Number(i.Valor_total).toLocaleString('es')}</span> <span slot="secondary">${i.Concepto}</span> <mwc-icon slot="graphic">remove</mwc-icon> </mwc-list-item> `)} </mwc-list> </td> </tr> `)} </table> </section> <mwc-dialog heading="${this._currentTipo}" id="dialogConceptosNovedades"> <div> <mwc-textfield label="Concepto" id="conceptoTextField"></mwc-textfield> </div> <mwc-button dialogAction="ok" slot="primaryAction"> ok </mwc-button> <mwc-button dialogAction="cancel" slot="secondaryAction"> cancel </mwc-button> </mwc-dialog> `; } static get properties() { return { _empleadosCollection:Array, _verLista:Boolean, _currentEmpleado:Number, _novedadesCollection:Array, _currentTipo: String }; } constructor() { super(); this._empleadosCollection = []; this._currentEmpleado=0; this._verLista=true; this._novedadesCollection=[]; this._currentTipo=""; const that = this; var empleadosRef = firebase.firestore().collection('empleado'); var query = empleadosRef.where("Activo", "==", true); query.get().then( function(querySnapshot){ querySnapshot.forEach(function(doc){ that._empleadosCollection.push(doc.data()); }); that.requestUpdate(); }); var novedadesRef = firebase.firestore().collection('novedades'); var query = novedadesRef.where("Periodo", "==", 9); query.get().then( function(querySnapshot){ querySnapshot.forEach(function(doc){ that._novedadesCollection.push(doc.data()); }); that.requestUpdate(); }); } _listItemSelected(e) { // this.shadowRoot.querySelector('#item'+(e.detail.index+1)).click(); console.log('hola'); console.log(e.target.id=="listaNoConstitutivos"); console.log(e.detail); console.log(e.detail.index); if (e.target.id=="listaNoConstitutivos") this._currentTipo="Ingresos no constitutivos de salario"; if (e.target.id=="listaConstitutivos") this._currentTipo="Ingresos constitutivos de salario"; if (e.target.id=="listaDeducciones") this._currentTipo="Deducciones"; this.shadowRoot.querySelector('#conceptoTextField').value= "666"; this.shadowRoot.querySelector('#dialogConceptosNovedades').open=true; } }
DinamicoNovedades
FilterSections.js
import React from 'react' import { useQuery } from 'react-query' import FilterContext from './context/filter-context' import CheckboxList from './skeleton/checkboxlist' function setCharAt(str, index, chr) { if (index > str.length - 1) return str return str.substring(0, index) + chr + str.substring(index + 1) } export default function
() { const { filter, setFilter } = React.useContext(FilterContext) // console.log('FILTER: ', filter) const fetchcategorys = () => fetch('https://peaceful-ridge-63546.herokuapp.com/categories').then((res) => res.json()) const { isLoading, isError, error, data, isFetching, isPreviousData } = useQuery( 'categories', () => fetchcategorys(), { keepPreviousData: true } ) const handleChange = (evt) => { const name = evt.target.name const value = evt.target.checked let newFilter = setCharAt(filter.filter, 17, 0) if (value && !filter.filter.includes(`&categories.slug=${name}`)) { // add to filter newFilter = newFilter + `&categories.slug=${name}` } else { // remove it from filter newFilter = newFilter.replace(`&categories.slug=${name}`, '') } console.log('-----------------------') console.log('NEW FILTER Categories: ', newFilter) console.log('-----------------------') setFilter({ filter: newFilter, start: 0, currentPage: 1 }) } return isLoading ? ( <div className='grid grid-cols-1 gap-y-4'> <CheckboxList /> <CheckboxList /> <CheckboxList /> <CheckboxList /> <CheckboxList /> <CheckboxList /> </div> ) : isError ? ( <div>Error: {error.message}</div> ) : ( <form className='hidden lg:block pb-6'> <h3 className='sr-only'>Filter</h3> <div className=''> <h3 className='-my-3 flow-root'> <button type='button' className='py-3 text-left w-full text-sm text-gray-400 hover:text-gray-500' aria-controls='filter-category-0' aria-expanded='false' > <span className='font-medium text-gray-900'>Categories</span> </button> </h3> <div className='pt-3' id='filter-category-0'> <div className='space-y-4'> {data.map((category) => ( <div key={category.id} className='flex items-center'> <input name={category.slug} value={category.slug} type='checkbox' onChange={handleChange} checked={filter.filter.includes(`&categories.slug=${category.slug}`)} className='h-4 w-4 border-gray-300 rounded text-indigo-600 focus:ring-indigo-500' /> <label htmlFor='filter-color-0' className='ml-3 text-sm text-gray-600'> {category.name} </label> </div> ))} </div> </div> </div> </form> ) }
Filtercategorys
fields.py
# -*- coding: utf-8 -*- from django.db.models.fields import CharField from ..forms.fields import TimeZoneInputField class TimeZoneField(CharField): """ A relatively dynamic TimeZone field for Django models. """ def
(self, *args, **kwargs): # Note, as of this writing, the max length of the pytz timezone choices # is 30 characters. kwargs.setdefault('max_length', 63) super().__init__(*args, **kwargs) def formfield(self, **kwargs): # Use the companion TimeZoneInputField by default, note, the super() # call is quite intentionally bypassing our parent class. return super(CharField, self).formfield(**{ 'form_class': TimeZoneInputField, **kwargs, })
__init__
operators.rs
use enum_kind::Kind; use string_enum::StringEnum; #[cfg(feature = "fold")] use swc_common::Fold; #[derive(Kind, StringEnum, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "fold", derive(Fold))] #[kind(function(precedence = "u8"))] pub enum BinaryOp { /// `==` #[kind(precedence = "6")] EqEq, /// `!=` #[kind(precedence = "6")] NotEq, /// `===` #[kind(precedence = "6")] EqEqEq, /// `!==` #[kind(precedence = "6")] NotEqEq, /// `<` #[kind(precedence = "7")] Lt, /// `<=` #[kind(precedence = "7")] LtEq, /// `>` #[kind(precedence = "7")] Gt, /// `>=` #[kind(precedence = "7")] GtEq, /// `<<` #[kind(precedence = "8")] LShift, /// `>>` #[kind(precedence = "8")] RShift, /// `>>>` #[kind(precedence = "8")] ZeroFillRShift, /// `+` #[kind(precedence = "9")] Add, /// `-` #[kind(precedence = "9")] Sub, /// `*` #[kind(precedence = "10")] Mul, /// `/` #[kind(precedence = "10")] Div, /// `%` #[kind(precedence = "10")]
BitOr, /// `^` #[kind(precedence = "4")] BitXor, /// `&` #[kind(precedence = "5")] BitAnd, /// `||` #[kind(precedence = "1")] LogicalOr, /// `&&` #[kind(precedence = "2")] LogicalAnd, /// `in` #[kind(precedence = "7")] In, /// `instanceof` #[kind(precedence = "7")] InstanceOf, /// `**` #[kind(precedence = "11")] Exp, } #[derive(StringEnum, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "fold", derive(Fold))] pub enum AssignOp { /// `=` Assign, /// `+=` AddAssign, /// `-=` SubAssign, /// `*=` MulAssign, /// `/=` DivAssign, /// `%=` ModAssign, /// `<<=` LShiftAssign, /// `>>=` RShiftAssign, /// `>>>=` ZeroFillRShiftAssign, /// `|=` BitOrAssign, /// `^=` BitXorAssign, /// `&=` BitAndAssign, /// `**=` ExpAssign, } #[derive(StringEnum, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "fold", derive(Fold))] pub enum UpdateOp { /// `++` PlusPlus, /// `--` MinusMinus, } #[derive(StringEnum, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "fold", derive(Fold))] pub enum UnaryOp { /// `-` Minus, /// `+` Plus, /// `!` Bang, /// `~` Tilde, /// `typeof` TypeOf, /// `void` Void, /// `delete` Delete, }
Mod, /// `|` #[kind(precedence = "3")]
recursion.ts
/** * @entrypoint InfiniteSelfRecursion * @trace */
} /** * @entrypoint IndirectInfiniteSelfRecursion * @trace */ function indirectInfiniteSelfRecursion() { makeIndirectRecursiveCall(); } /** * @trace */ function makeIndirectRecursiveCall() { indirectInfiniteSelfRecursion(); }
function infiniteSelfRecursion() { infiniteSelfRecursion();
GameRoom.tsx
import { useParams } from "react-router-dom"; import React from "react"; import { useStorageBackedState } from "../hooks/useStorageBackedState"; import { useNetworkBackedGameState } from "../hooks/useNetworkBackedGameState"; import { InputName } from "./InputName"; import { RandomFourCharacterString } from "../../state/RandomFourCharacterString"; import { GameModelContext } from "../../state/GameModelContext"; import { ActiveGame } from "./ActiveGame"; import { BuildGameModel } from "../../state/BuildGameModel"; import { RoomIdHeader } from "../common/RoomIdHeader"; import { FakeRooms } from "./FakeRooms"; export function
() { const { roomId } = useParams(); if (roomId === undefined) { throw new Error("RoomId missing"); } const [playerName, setPlayerName] = useStorageBackedState("", "name"); const [playerId] = useStorageBackedState( RandomFourCharacterString(), "playerId" ); const [gameState, setGameState] = useNetworkBackedGameState( roomId, playerId, playerName ); if (roomId === "MULTIPLAYER_TEST") { return <FakeRooms />; } const gameModel = BuildGameModel(gameState, setGameState, playerId); if (playerName.length === 0) { return ( <InputName setName={(name) => { setPlayerName(name); gameState.players[playerId].name = name; setGameState(gameState); }} /> ); } if (!gameState?.players?.[playerId]) { return null; } return ( <GameModelContext.Provider value={gameModel}> <RoomIdHeader /> <ActiveGame /> </GameModelContext.Provider> ); }
GameRoom
swc.rs
use std::{collections::HashMap, env, sync::Arc}; use anyhow::{bail, Context, Error}; use helpers::Helpers; use swc::{ config::{GlobalInliningPassEnvs, InputSourceMap, IsModule, JscConfig, TransformConfig}, try_with_handler, HandlerOpts, }; use swc_atoms::JsWord; use swc_bundler::{Load, ModuleData}; use swc_common::{ collections::AHashMap, comments::SingleThreadedComments, errors::{Handler, HANDLER}, sync::Lrc, FileName, DUMMY_SP, }; use swc_ecma_ast::{EsVersion, Expr, Lit, Module, Program, Str}; use swc_ecma_parser::{parse_file_as_module, Syntax}; use swc_ecma_transforms::{ helpers, optimization::{ inline_globals, simplify::{dead_branch_remover, expr_simplifier}, }, pass::noop, }; use swc_ecma_visit::FoldWith; use crate::loaders::json::load_json_as_module; /// JavaScript loader pub struct SwcLoader { compiler: Arc<swc::Compiler>, options: swc::config::Options, } impl SwcLoader { pub fn new(compiler: Arc<swc::Compiler>, options: swc::config::Options) -> Self { SwcLoader { compiler, options } } fn env_map(&self) -> Lrc<AHashMap<JsWord, Expr>> { let mut m = HashMap::default(); let envs = self .options .config .jsc .transform .as_ref() .and_then(|t| t.optimizer.as_ref()) .and_then(|o| o.globals.as_ref()) .map(|g| g.envs.clone()) .unwrap_or_default(); let envs_map: AHashMap<_, _> = match envs { GlobalInliningPassEnvs::Map(m) => m, GlobalInliningPassEnvs::List(envs) => envs .into_iter() .map(|name| { let value = env::var(&name).ok(); (name.into(), value.unwrap_or_default().into()) }) .collect(), }; for (k, v) in envs_map { m.insert( k, Expr::Lit(Lit::Str(Str { span: DUMMY_SP, value: v, has_escape: false, kind: Default::default(), })), ); } Lrc::new(m) } fn load_with_handler(&self, handler: &Handler, name: &FileName) -> Result<ModuleData, Error> { tracing::debug!("JsLoader.load({})", name); let helpers = Helpers::new(false); if let FileName::Custom(id) = name { // Handle built-in modules if id.starts_with("node:") { let fm = self .compiler .cm .new_source_file(name.clone(), "".to_string()); return Ok(ModuleData { fm, module: Module { span: DUMMY_SP, body: Default::default(), shebang: Default::default(), }, helpers: Default::default(), }); // Handle disabled modules, eg when `browser` has a field // set to `false` } else { // TODO: When we know the calling context is ESM // TODO: switch to `export default {}`. let fm = self .compiler .cm .new_source_file(name.clone(), "module.exports = {}".to_string()); let module = parse_file_as_module( &fm, Syntax::Es(Default::default()), Default::default(), None, &mut vec![], ) .unwrap(); return Ok(ModuleData { fm, module, helpers: Default::default(), }); } } let fm = self .compiler .cm .load_file(match name { FileName::Real(v) => v, _ => bail!("swc-loader only accepts path. Got `{}`", name), }) .with_context(|| format!("failed to load file `{}`", name))?; if let FileName::Real(path) = name { if let Some(ext) = path.extension() { if ext == "json" { let module = load_json_as_module(&fm) .with_context(|| format!("failed to load json file at {}", fm.name))?; return Ok(ModuleData { fm, module, helpers: Default::default(), }); } } } tracing::trace!("JsLoader.load: loaded"); let program = if fm.name.to_string().contains("node_modules") { let comments = self.compiler.comments().clone(); let program = self.compiler.parse_js( fm.clone(), handler, EsVersion::Es2020, Default::default(), IsModule::Bool(true), Some(&comments), )?; helpers::HELPERS.set(&helpers, || { HANDLER.set(handler, || { let program = program.fold_with(&mut inline_globals( self.env_map(), Default::default(), Default::default(), )); let program = program.fold_with(&mut expr_simplifier(Default::default())); program.fold_with(&mut dead_branch_remover()) }) }) } else { let comments = SingleThreadedComments::default(); let config = self.compiler.parse_js_as_input( fm.clone(), None, handler, &swc::config::Options { config: { let c = &self.options.config; swc::config::Config { jsc: JscConfig { transform: { c.jsc.transform.as_ref().map(|c| TransformConfig { react: c.react.clone(), const_modules: c.const_modules.clone(), optimizer: None, legacy_decorator: c.legacy_decorator, decorator_metadata: c.decorator_metadata, hidden: Default::default(), ..Default::default() }) }, external_helpers: true, ..c.jsc.clone() }, module: None, minify: false, input_source_map: InputSourceMap::Bool(false), ..c.clone() } }, skip_helper_injection: true, disable_hygiene: false, disable_fixer: true, global_mark: self.options.global_mark, cwd: self.options.cwd.clone(), caller: None, filename: String::new(), config_file: None, root: None, swcrc: true, env_name: { env::var("NODE_ENV").unwrap_or_else(|_| "development".into()) }, is_module: IsModule::Bool(true), ..Default::default() }, &fm.name, Some(&comments), |_| noop(), )?; tracing::trace!("JsLoader.load: loaded config"); // We run transform at this phase to strip out unused dependencies. // // Note that we don't apply compat transform at loading phase. let program = if let Some(config) = config { let program = config.program; let mut pass = config.pass; helpers::HELPERS.set(&helpers, || { HANDLER.set(handler, || { let program = program.fold_with(&mut inline_globals( self.env_map(), Default::default(), Default::default(), )); let program = program.fold_with(&mut expr_simplifier(Default::default())); let program = program.fold_with(&mut dead_branch_remover()); program.fold_with(&mut pass) }) }) } else { let comments = self.compiler.comments().clone(); self.compiler .parse_js( fm.clone(), handler, EsVersion::Es2020, config.as_ref().map(|v| v.syntax).unwrap_or_default(), IsModule::Bool(true), Some(&comments), ) .context("tried to parse as ecmascript as it's excluded by .swcrc")? }; tracing::trace!("JsLoader.load: applied transforms"); program }; match program { Program::Module(module) => Ok(ModuleData { fm, module, helpers, }), _ => unreachable!(), } } } impl Load for SwcLoader { fn load(&self, name: &FileName) -> Result<ModuleData, Error>
}
{ try_with_handler( self.compiler.cm.clone(), HandlerOpts { ..Default::default() }, |handler| self.load_with_handler(handler, name), ) }
day07part1.rs
use regex::Regex; use std::collections::{HashMap, HashSet}; fn main() { let input = include_str!("../../input/day07.txt"); let rules = input .lines() .map(parse_rule) .filter(|v| !v.is_none()) .map(|v| v.unwrap()) .collect::<Rules>(); let mut result = HashSet::<Bag>::new(); let mut to_trace: Vec<Bag> = vec![Bag::new("shiny", "gold")]; while to_trace.len() > 0 { let mut next_to_trace = HashSet::<Bag>::new(); to_trace.iter().for_each(|trace_bag| { rules.iter().for_each(|(k, v)| { if v.iter().any(|v| v.bag == *trace_bag) { next_to_trace.insert(k.clone()); } }) }); next_to_trace.iter().for_each(|v| { result.insert(v.clone()); }); to_trace = next_to_trace.into_iter().collect(); } println!("{}", result.len()); } #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct Bag { adjective: String, color: String, } impl Bag { fn new(adjective: &str, color: &str) -> Self { Bag { adjective: adjective.to_string(), color: color.to_string(), } } } #[derive(Debug)] struct CollectedBag { count: u32, bag: Bag, } impl CollectedBag { fn
(count: u32, bag: Bag) -> Self { CollectedBag { count, bag } } } type Rules = HashMap<Bag, Vec<CollectedBag>>; fn parse_rule(line: &str) -> Option<(Bag, Vec<CollectedBag>)> { let re_bag = Regex::new(r"^(\S+)\s+(\S+)\s+bags\s+contain(.*)$").unwrap(); let re_no_collected_bags = Regex::new(r"\s*no\s+other\s+bags").unwrap(); let re_collected_bag = Regex::new(r"\s*,?\s*(\d+)\s+(\S+)\s+(\S+)\s+bag[s]?(.*)$").unwrap(); match re_bag.is_match(line) { true => { let elems = re_bag.captures(line).unwrap(); let adjective = elems.get(1).map(|m| m.as_str()).unwrap(); let color = elems.get(2).map(|m| m.as_str()).unwrap(); let bag = Bag::new(adjective, color); let mut collected_bags = Vec::<CollectedBag>::new(); let mut line = elems.get(3).map(|m| m.as_str()).unwrap(); if re_no_collected_bags.is_match(line) { return Some((bag, collected_bags)); } while re_collected_bag.is_match(line) { let elems = re_collected_bag.captures(line).unwrap(); let count = elems .get(1) .map(|m| m.as_str()) .unwrap() .parse::<u32>() .unwrap(); let adjective = elems.get(2).map(|m| m.as_str()).unwrap(); let color = elems.get(3).map(|m| m.as_str()).unwrap(); let bag = Bag::new(adjective, color); collected_bags.push(CollectedBag::new(count, bag)); line = elems.get(4).map(|m| m.as_str()).unwrap(); } return Some((bag, collected_bags)); } false => None, } }
new
common.go
package node import ( "fmt" "strings" "github.com/run-ai/runai-cli/pkg/client" "github.com/run-ai/runai-cli/pkg/nodes" "github.com/run-ai/runai-cli/pkg/types" "github.com/run-ai/runai-cli/pkg/ui" ) var ( defaultHiddenFields = ui.EnsureStringPaths(types.NodeView{}, []string{ "GPUs.InUse", }) unhealthyGpusPath = ui.EnsureStringPaths(types.NodeView{}, []string{ "GPUs.Unhealthy", }) ) func GetNodeInfos(shouldQueryMetrics bool) (*[]nodes.NodeInfo, error) { kubeClient, err := client.GetClient() if err != nil { return nil, err } nodeInfos, warning, err := nodes.GetAllNodeInfos(kubeClient, shouldQueryMetrics) if err != nil
else if len(warning) > 0 { fmt.Println(warning) } return &nodeInfos, nil } func handleSpecificNodes(nodeInfos *[]nodes.NodeInfo, displayFunction func(*[]nodes.NodeInfo), selectedNodeNames ...string) { nodeNames := []string{} matchsNodeInfos := []nodes.NodeInfo{} if len(*nodeInfos) == 0 { fmt.Println("No available node found in cluster") return } // show all if no node selected if len(selectedNodeNames) == 0 { displayFunction(nodeInfos) return } for _, nodeInfo := range *nodeInfos { nodeNames = append(nodeNames, nodeInfo.Node.Name) if ui.Contains(selectedNodeNames, nodeInfo.Node.Name) { matchsNodeInfos = append(matchsNodeInfos, nodeInfo) } } if len(matchsNodeInfos) != len(selectedNodeNames) { notFoundNodeNames := []string{} for _, nodeName := range selectedNodeNames { if !ui.Contains(nodeNames, nodeName) { notFoundNodeNames = append(notFoundNodeNames, nodeName) } } fmt.Printf( `No match found for node(s) '%s' Available node names: %s `, notFoundNodeNames, "\t"+strings.Join(nodeNames, "\n\t")) } displayFunction(&matchsNodeInfos) }
{ return nil, err }
animation.gsap.js
/*! * ScrollMagic v2.0.5 (2015-04-29) * The javascript library for magical scroll interactions. * (c) 2015 Jan Paepke (@janpaepke) * Project Website: http://scrollmagic.io * * @version 2.0.5 * @license Dual licensed under MIT license and GPL. * @author Jan Paepke - [email protected] * * @file ScrollMagic GSAP Animation Plugin. * * requires: GSAP ~1.14 * Powered by the Greensock Animation Platform (GSAP): http://www.greensock.com/js * Greensock License info at http://www.greensock.com/licensing/ */ /** * This plugin is meant to be used in conjunction with the Greensock Animation Plattform. * It offers an easy API to trigger Tweens or synchronize them to the scrollbar movement. * * Both the `lite` and the `max` versions of the GSAP library are supported. * The most basic requirement is `TweenLite`. * * To have access to this extension, please include `plugins/animation.gsap.js`. * @requires {@link http://greensock.com/gsap|GSAP ~1.14.x} * @mixin animation.GSAP */ (function (root, factory) { if (typeof exports === 'object') { // CommonJS // Loads whole gsap package onto global scope. require('gsap'); factory(require('scrollmagic'), TweenMax, TimelineMax); } else { // Browser globals factory(root.ScrollMagic || (root.jQuery && root.jQuery.ScrollMagic), root.TweenMax || root.TweenLite, root.TimelineMax || root.TimelineLite); } }(this, function (ScrollMagic, Tween, Timeline) { "use strict"; var NAMESPACE = "animation.gsap"; var console = window.console || {}, err = Function.prototype.bind.call(console.error || console.log || function () {}, console); if (!ScrollMagic) { err("(" + NAMESPACE + ") -> ERROR: The ScrollMagic main module could not be found. Please make sure it's loaded before this plugin or use an asynchronous loader like requirejs."); } if (!Tween) { err("(" + NAMESPACE + ") -> ERROR: TweenLite or TweenMax could not be found. Please make sure GSAP is loaded before ScrollMagic or use an asynchronous loader like requirejs."); } /* * ---------------------------------------------------------------- * Extensions for Scene * ---------------------------------------------------------------- */ /** * Every instance of ScrollMagic.Scene now accepts an additional option. * See {@link ScrollMagic.Scene} for a complete list of the standard options. * @memberof! animation.GSAP# * @method new ScrollMagic.Scene(options) * @example * var scene = new ScrollMagic.Scene({tweenChanges: true}); * * @param {object} [options] - Options for the Scene. The options can be updated at any time. * @param {boolean} [options.tweenChanges=false] - Tweens Animation to the progress target instead of setting it. Does not affect animations where duration is `0`. */ /** * **Get** or **Set** the tweenChanges option value. * This only affects scenes with a duration. If `tweenChanges` is `true`, the progress update when scrolling will not be immediate, but instead the animation will smoothly animate to the target state. * For a better understanding, try enabling and disabling this option in the [Scene Manipulation Example](../examples/basic/scene_manipulation.html). * @memberof! animation.GSAP# * @method Scene.tweenChanges * * @example * // get the current tweenChanges option * var tweenChanges = scene.tweenChanges(); * * // set new tweenChanges option * scene.tweenChanges(true); * * @fires {@link Scene.change}, when used as setter * @param {boolean} [newTweenChanges] - The new tweenChanges setting of the scene. * @returns {boolean} `get` - Current tweenChanges option value. * @returns {Scene} `set` - Parent object for chaining. */ // add option (TODO: DOC (private for dev)) ScrollMagic.Scene.addOption("tweenChanges", // name false, // default function (val) { // validation callback return !!val; }); // extend scene ScrollMagic.Scene.extend(function () { var Scene = this, _tween; var log = function () { if (Scene._log) { // not available, when main source minified Array.prototype.splice.call(arguments, 1, 0, "(" + NAMESPACE + ")", "->"); Scene._log.apply(this, arguments); } }; // set listeners Scene.on("progress.plugin_gsap", function () { updateTweenProgress(); }); Scene.on("destroy.plugin_gsap", function (e) { Scene.removeTween(e.reset); }); /** * Update the tween progress to current position. * @private */ var updateTweenProgress = function () { if (_tween) { var progress = Scene.progress(), state = Scene.state(); if (_tween.repeat && _tween.repeat() === -1) { // infinite loop, so not in relation to progress if (state === 'DURING' && _tween.paused()) { _tween.play(); } else if (state !== 'DURING' && !_tween.paused()) { _tween.pause(); } } else if (progress != _tween.progress()) { // do we even need to update the progress? // no infinite loop - so should we just play or go to a specific point in time? if (Scene.duration() === 0) { // play the animation if (progress > 0) { // play from 0 to 1 _tween.play(); } else { // play from 1 to 0 _tween.reverse(); } } else { // go to a specific point in time if (Scene.tweenChanges() && _tween.tweenTo) { // go smooth _tween.tweenTo(progress * _tween.duration()); } else { // just hard set it _tween.progress(progress).pause(); } } } } }; /** * Add a tween to the scene. * If you want to add multiple tweens, add them into a GSAP Timeline object and supply it instead (see example below). * * If the scene has a duration, the tween's duration will be projected to the scroll distance of the scene, meaning its progress will be synced to scrollbar movement. * For a scene with a duration of `0`, the tween will be triggered when scrolling forward past the scene's trigger position and reversed, when scrolling back. * To gain better understanding, check out the [Simple Tweening example](../examples/basic/simple_tweening.html). * * Instead of supplying a tween this method can also be used as a shorthand for `TweenMax.to()` (see example below). * @memberof! animation.GSAP# * * @example * // add a single tween directly * scene.setTween(TweenMax.to("obj"), 1, {x: 100}); *
* // add a single tween via variable * var tween = TweenMax.to("obj"), 1, {x: 100}; * scene.setTween(tween); * * // add multiple tweens, wrapped in a timeline. * var timeline = new TimelineMax(); * var tween1 = TweenMax.from("obj1", 1, {x: 100}); * var tween2 = TweenMax.to("obj2", 1, {y: 100}); * timeline * .add(tween1) * .add(tween2); * scene.addTween(timeline); * * // short hand to add a TweenMax.to() tween * scene.setTween("obj3", 0.5, {y: 100}); * * // short hand to add a TweenMax.to() tween for 1 second * // this is useful, when the scene has a duration and the tween duration isn't important anyway * scene.setTween("obj3", {y: 100}); * * @param {(object|string)} TweenObject - A TweenMax, TweenLite, TimelineMax or TimelineLite object that should be animated in the scene. Can also be a Dom Element or Selector, when using direct tween definition (see examples). * @param {(number|object)} duration - A duration for the tween, or tween parameters. If an object containing parameters are supplied, a default duration of 1 will be used. * @param {object} params - The parameters for the tween * @returns {Scene} Parent object for chaining. */ Scene.setTween = function (TweenObject, duration, params) { var newTween; if (arguments.length > 1) { if (arguments.length < 3) { params = duration; duration = 1; } TweenObject = Tween.to(TweenObject, duration, params); } try { // wrap Tween into a Timeline Object if available to include delay and repeats in the duration and standardize methods. if (Timeline) { newTween = new Timeline({ smoothChildTiming: true }).add(TweenObject); } else { newTween = TweenObject; } newTween.pause(); } catch (e) { log(1, "ERROR calling method 'setTween()': Supplied argument is not a valid TweenObject"); return Scene; } if (_tween) { // kill old tween? Scene.removeTween(); } _tween = newTween; // some properties need to be transferred it to the wrapper, otherwise they would get lost. if (TweenObject.repeat && TweenObject.repeat() === -1) { // TweenMax or TimelineMax Object? _tween.repeat(-1); _tween.yoyo(TweenObject.yoyo()); } // Some tween validations and debugging helpers if (Scene.tweenChanges() && !_tween.tweenTo) { log(2, "WARNING: tweenChanges will only work if the TimelineMax object is available for ScrollMagic."); } // check if there are position tweens defined for the trigger and warn about it :) if (_tween && Scene.controller() && Scene.triggerElement() && Scene.loglevel() >= 2) { // controller is needed to know scroll direction. var triggerTweens = Tween.getTweensOf(Scene.triggerElement()), vertical = Scene.controller().info("vertical"); triggerTweens.forEach(function (value, index) { var tweenvars = value.vars.css || value.vars, condition = vertical ? (tweenvars.top !== undefined || tweenvars.bottom !== undefined) : (tweenvars.left !== undefined || tweenvars.right !== undefined); if (condition) { log(2, "WARNING: Tweening the position of the trigger element affects the scene timing and should be avoided!"); return false; } }); } // warn about tween overwrites, when an element is tweened multiple times if (parseFloat(TweenLite.version) >= 1.14) { // onOverwrite only present since GSAP v1.14.0 var list = _tween.getChildren ? _tween.getChildren(true, true, false) : [_tween], // get all nested tween objects newCallback = function () { log(2, "WARNING: tween was overwritten by another. To learn how to avoid this issue see here: https://github.com/janpaepke/ScrollMagic/wiki/WARNING:-tween-was-overwritten-by-another"); }; for (var i = 0, thisTween, oldCallback; i < list.length; i++) { /*jshint loopfunc: true */ thisTween = list[i]; if (oldCallback !== newCallback) { // if tweens is added more than once oldCallback = thisTween.vars.onOverwrite; thisTween.vars.onOverwrite = function () { if (oldCallback) { oldCallback.apply(this, arguments); } newCallback.apply(this, arguments); }; } } } log(3, "added tween"); updateTweenProgress(); return Scene; }; /** * Remove the tween from the scene. * This will terminate the control of the Scene over the tween. * * Using the reset option you can decide if the tween should remain in the current state or be rewound to set the target elements back to the state they were in before the tween was added to the scene. * @memberof! animation.GSAP# * * @example * // remove the tween from the scene without resetting it * scene.removeTween(); * * // remove the tween from the scene and reset it to initial position * scene.removeTween(true); * * @param {boolean} [reset=false] - If `true` the tween will be reset to its initial values. * @returns {Scene} Parent object for chaining. */ Scene.removeTween = function (reset) { if (_tween) { if (reset) { _tween.progress(0).pause(); } _tween.kill(); _tween = undefined; log(3, "removed tween (reset: " + (reset ? "true" : "false") + ")"); } return Scene; }; }); }));
hktab.d.ts
export declare class HKTABProps { segNo: number; tanClass: string; mode: number; }
* HKTAB (Verfügbare TAN-Medien ermitteln) * Section C.2.1.2 */ export declare class HKTAB extends HKTAB_base { type: string; protected defaults(): void; protected serialize(): string[]; protected deserialize(): void; } export {};
declare const HKTAB_base: import("..").Constructable<HKTABProps & import("./segment").Segment<HKTABProps>>; /**
ingest.go
package amclient import ( "context" "fmt" ) const ingestBasePath = "api/ingest" //go:generate mockgen -destination=./fake/mock_ingest.go -package=fake github.com/artefactual-labs/enduro/internal/amclient IngestService // Ingest is an interface for interfacing with the Ingest endpoints of the // Dashboard API. type IngestService interface { Status(context.Context, string) (*IngestStatusResponse, *Response, error) Hide(context.Context, string) (*IngestHideResponse, *Response, error) } // IngestServiceOp handles communication with the Ingest related methods of // the Archivematica API. type IngestServiceOp struct { client *Client } var _ IngestService = &IngestServiceOp{} type IngestStatusResponse struct { ID string `json:"uuid"` Status string `json:"status"` Name string `json:"name"` SIPID string `json:"sip_uuid"` Microservice string `json:"microservice"` Directory string `json:"directory"` Path string `json:"path"` Message string `json:"message"` Type string `json:"type"` } func (s *IngestServiceOp) Status(ctx context.Context, ID string) (*IngestStatusResponse, *Response, error) { path := fmt.Sprintf("%s/status/%s", ingestBasePath, ID) req, err := s.client.NewRequest(ctx, "GET", path, nil) if err != nil { return nil, nil, err } payload := &IngestStatusResponse{} resp, err := s.client.Do(ctx, req, payload) return payload, resp, err } type IngestHideResponse struct { Removed bool `json:"removed"` } func (s *IngestServiceOp) Hide(ctx context.Context, ID string) (*IngestHideResponse, *Response, error) { path := fmt.Sprintf("%s/%s/delete/", ingestBasePath, ID) req, err := s.client.NewRequest(ctx, "DELETE", path, nil) if err != nil
payload := &IngestHideResponse{} resp, err := s.client.Do(ctx, req, payload) return payload, resp, err }
{ return nil, nil, err }
helpers.go
package httpexpect import ( "encoding/json" "fmt" "reflect" "regexp" "github.com/xeipuuv/gojsonschema" "github.com/yalp/jsonpath" "github.com/yudai/gojsondiff" "github.com/yudai/gojsondiff/formatter" ) func toString(str interface{}) (s string, ok bool) { ok = true defer func() { if err := recover(); err != nil { ok = false } }() s = reflect.ValueOf(str).Convert(reflect.TypeOf("")).String() return } func getPath(chain *chain, value interface{}, path string) *Value { if chain.failed() { return &Value{*chain, nil} } result, err := jsonpath.Read(value, path) if err != nil { chain.fail(err.Error()) return &Value{*chain, nil} } return &Value{*chain, result} } func checkSchema(chain *chain, value, schema interface{}) { if chain.failed() { return } valueLoader := gojsonschema.NewGoLoader(value) var schemaLoader gojsonschema.JSONLoader if str, ok := toString(schema); ok { if ok, _ := regexp.MatchString(`^\w+://`, str); ok { schemaLoader = gojsonschema.NewReferenceLoader(str) } else { schemaLoader = gojsonschema.NewStringLoader(str) } } else { schemaLoader = gojsonschema.NewGoLoader(schema) } result, err := gojsonschema.Validate(schemaLoader, valueLoader) if err != nil { chain.fail("\n%s\n\nschema:\n%s\n\nvalue:\n%s", err.Error(), dumpSchema(schema), dumpValue(value)) return } if !result.Valid() { errors := "" for _, err := range result.Errors() { errors += fmt.Sprintf(" %s\n", err) } chain.fail( "\njson schema validation failed, schema:\n%s\n\nvalue:%s\n\nerrors:\n%s", dumpSchema(schema), dumpValue(value), errors) return } } func dumpSchema(schema interface{}) string { if s, ok := toString(schema); ok { schema = s } return regexp.MustCompile(`(?m:^)`). ReplaceAllString(fmt.Sprintf("%v", schema), " ") } func canonNumber(chain *chain, number interface{}) (f float64, ok bool) { ok = true defer func() { if err := recover(); err != nil { chain.fail("%v", err) ok = false } }() f = reflect.ValueOf(number).Convert(reflect.TypeOf(float64(0))).Float() return } func canonArray(chain *chain, in interface{}) ([]interface{}, bool) { var out []interface{} data, ok := canonValue(chain, in) if ok { out, ok = data.([]interface{}) if !ok { chain.fail("expected array, got %v", out) } } return out, ok } func
(chain *chain, in interface{}) (map[string]interface{}, bool) { var out map[string]interface{} data, ok := canonValue(chain, in) if ok { out, ok = data.(map[string]interface{}) if !ok { chain.fail("expected map, got %v", out) } } return out, ok } func canonValue(chain *chain, in interface{}) (interface{}, bool) { b, err := json.Marshal(in) if err != nil { chain.fail(err.Error()) return nil, false } var out interface{} if err := json.Unmarshal(b, &out); err != nil { chain.fail(err.Error()) return nil, false } return out, true } func dumpValue(value interface{}) string { b, err := json.MarshalIndent(value, " ", " ") if err != nil { return " " + fmt.Sprintf("%#v", value) } return " " + string(b) } func diffValues(expected, actual interface{}) string { differ := gojsondiff.New() var diff gojsondiff.Diff if ve, ok := expected.(map[string]interface{}); ok { if va, ok := actual.(map[string]interface{}); ok { diff = differ.CompareObjects(ve, va) } else { return " (unavailable)" } } else if ve, ok := expected.([]interface{}); ok { if va, ok := actual.([]interface{}); ok { diff = differ.CompareArrays(ve, va) } else { return " (unavailable)" } } else { return " (unavailable)" } config := formatter.AsciiFormatterConfig{ ShowArrayIndex: true, } f := formatter.NewAsciiFormatter(expected, config) str, err := f.Format(diff) if err != nil { return " (unavailable)" } return "--- expected\n+++ actual\n" + str }
canonMap
stack.go
package proxy import ( "context" "github.com/p4gefau1t/trojan-go/log" "github.com/p4gefau1t/trojan-go/tunnel" ) type Node struct { Name string Next map[string]*Node IsEndpoint bool context.Context tunnel.Server tunnel.Client } func (n *Node) BuildNext(name string) *Node { if next, found := n.Next[name]; found { return next } t, err := tunnel.GetTunnel(name) if err != nil { log.Fatal(err) } s, err := t.NewServer(n.Context, n.Server) if err != nil { log.Fatal(err) } newNode := &Node{ Name: name, Next: make(map[string]*Node), Context: n.Context, Server: s, } n.Next[name] = newNode return newNode } func (n *Node) LinkNextNode(next *Node) *Node { if next, found := n.Next[next.Name]; found { return next } n.Next[next.Name] = next t, err := tunnel.GetTunnel(next.Name) if err != nil { log.Fatal(err) } s, err := t.NewServer(next.Context, n.Server) // context of the child nodes have been initialized if err != nil { log.Fatal(err) } next.Server = s return next } func FindAllEndpoints(root *Node) []tunnel.Server { list := make([]tunnel.Server, 0) if root.IsEndpoint || len(root.Next) == 0 { list = append(list, root.Server) } for _, next := range root.Next { list = append(list, FindAllEndpoints(next)...) } return list } // CreateClientStack create client tunnel stacks from lists func CreateClientStack(ctx context.Context, clientStack []string) (tunnel.Client, error) { var client tunnel.Client for _, name := range clientStack { t, err := tunnel.GetTunnel(name) if err != nil { return nil, err } client, err = t.NewClient(ctx, client) if err != nil { return nil, err } } return client, nil } // CreateServerStack create server tunnel stack from list func CreateServerStack(ctx context.Context, serverStack []string) (tunnel.Server, error) { var server tunnel.Server for _, name := range serverStack { t, err := tunnel.GetTunnel(name) if err != nil
server, err = t.NewServer(ctx, server) if err != nil { return nil, err } } return server, nil }
{ return nil, err }
line.py
# -*- coding: utf-8 -*- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ Line visual implementing Agg- and GL-based drawing modes. """ from __future__ import division import numpy as np from ... import gloo, glsl from ...color import Color, ColorArray, get_colormap from ...ext.six import string_types from ..shaders import Function from ..visual import Visual, CompoundVisual from ...util.profiler import Profiler from .dash_atlas import DashAtlas vec2to4 = Function(""" vec4 vec2to4(vec2 inp) { return vec4(inp, 0, 1); } """) vec3to4 = Function(""" vec4 vec3to4(vec3 inp) { return vec4(inp, 1); } """) """ TODO: * Agg support is very minimal; needs attention. * Optimization--avoid creating new buffers, avoid triggering program recompile. """ joins = {'miter': 0, 'round': 1, 'bevel': 2} caps = {'': 0, 'none': 0, '.': 0, 'round': 1, ')': 1, '(': 1, 'o': 1, 'triangle in': 2, '<': 2, 'triangle out': 3, '>': 3, 'square': 4, '=': 4, 'butt': 4, '|': 5} class LineVisual(CompoundVisual): """Line visual Parameters ---------- pos : array Array of shape (..., 2) or (..., 3) specifying vertex coordinates. color : Color, tuple, or array The color to use when drawing the line. If an array is given, it must be of shape (..., 4) and provide one rgba color per vertex. Can also be a colormap name, or appropriate `Function`. width: The width of the line in px. Line widths > 1px are only guaranteed to work when using 'agg' method. connect : str or array Determines which vertices are connected by lines. * "strip" causes the line to be drawn with each vertex connected to the next. * "segments" causes each pair of vertices to draw an independent line segment * numpy arrays specify the exact set of segment pairs to connect. method : str Mode to use for drawing. * "agg" uses anti-grain geometry to draw nicely antialiased lines with proper joins and endcaps. * "gl" uses OpenGL's built-in line rendering. This is much faster, but produces much lower-quality results and is not guaranteed to obey the requested line width or join/endcap styles. antialias : bool Enables or disables antialiasing. For method='gl', this specifies whether to use GL's line smoothing, which may be unavailable or inconsistent on some platforms. """ def __init__(self, pos=None, color=(0.5, 0.5, 0.5, 1), width=1, connect='strip', method='gl', antialias=False): self._line_visual = None self._changed = {'pos': False, 'color': False, 'width': False, 'connect': False} self._pos = None self._color = None self._width = None self._connect = None self._bounds = None self._antialias = None self._method = 'none' CompoundVisual.__init__(self, []) # don't call subclass set_data; these often have different # signatures. LineVisual.set_data(self, pos=pos, color=color, width=width, connect=connect) self.antialias = antialias self.method = method @property def antialias(self): return self._antialias @antialias.setter def antialias(self, aa): self._antialias = bool(aa) self.update() @property def method(self): """The current drawing method""" return self._method @method.setter def method(self, method): if method not in ('agg', 'gl'): raise ValueError('method argument must be "agg" or "gl".') if method == self._method: return self._method = method if self._line_visual is not None: self.remove_subvisual(self._line_visual) if method == 'gl': self._line_visual = _GLLineVisual(self) elif method == 'agg': self._line_visual = _AggLineVisual(self) self.add_subvisual(self._line_visual) for k in self._changed: self._changed[k] = True def set_data(self, pos=None, color=None, width=None, connect=None): """Set the data used to draw this visual. Parameters ---------- pos : array Array of shape (..., 2) or (..., 3) specifying vertex coordinates. color : Color, tuple, or array The color to use when drawing the line. If an array is given, it must be of shape (..., 4) and provide one rgba color per vertex. width: The width of the line in px. Line widths < 1 px will be rounded up to 1 px when using the 'gl' method. connect : str or array Determines which vertices are connected by lines. * "strip" causes the line to be drawn with each vertex connected to the next. * "segments" causes each pair of vertices to draw an independent line segment * int numpy arrays specify the exact set of segment pairs to connect. * bool numpy arrays specify which _adjacent_ pairs to connect. """ if pos is not None: self._bounds = None self._pos = pos self._changed['pos'] = True if color is not None: self._color = color self._changed['color'] = True if width is not None: self._width = width self._changed['width'] = True if connect is not None: self._connect = connect self._changed['connect'] = True self.update() @property def color(self): return self._color @property def width(self): return self._width @property def connect(self): return self._connect @property def pos(self): return self._pos def _interpret_connect(self): if isinstance(self._connect, np.ndarray): # Convert a boolean connection array to a vertex index array if self._connect.ndim == 1 and self._connect.dtype == bool: index = np.empty((len(self._connect), 2), dtype=np.uint32) index[:] = np.arange(len(self._connect))[:, np.newaxis] index[:, 1] += 1 return index[self._connect] elif self._connect.ndim == 2 and self._connect.shape[1] == 2: return self._connect.astype(np.uint32) else: raise TypeError("Got invalid connect array of shape %r and " "dtype %r" % (self._connect.shape, self._connect.dtype)) else: return self._connect def _interpret_color(self, color_in=None): color_in = self._color if color_in is None else color_in colormap = None if isinstance(color_in, string_types): try: colormap = get_colormap(color_in) color = Function(colormap.glsl_map) except KeyError: color = Color(color_in).rgba elif isinstance(color_in, Function): color = Function(color_in) else: color = ColorArray(color_in).rgba if len(color) == 1: color = color[0] return color, colormap def _compute_bounds(self, axis, view): """Get the bounds Parameters ---------- mode : str Describes the type of boundary requested. Can be "visual", "data", or "mouse". axis : 0, 1, 2 The axis along which to measure the bounding values, in x-y-z order. """ # Can and should we calculate bounds? if (self._bounds is None) and self._pos is not None: pos = self._pos self._bounds = [(pos[:, d].min(), pos[:, d].max()) for d in range(pos.shape[1])] # Return what we can if self._bounds is None: return else: if axis < len(self._bounds): return self._bounds[axis] else: return (0, 0) def _prepare_draw(self, view): if self._width == 0: return False CompoundVisual._prepare_draw(self, view) class _GLLineVisual(Visual): VERTEX_SHADER = """ varying vec4 v_color; void main(void) { gl_Position = $transform($to_vec4($position)); v_color = $color;
FRAGMENT_SHADER = """ varying vec4 v_color; void main() { gl_FragColor = v_color; } """ def __init__(self, parent): self._parent = parent self._pos_vbo = gloo.VertexBuffer() self._color_vbo = gloo.VertexBuffer() self._connect_ibo = gloo.IndexBuffer() self._connect = None Visual.__init__(self, vcode=self.VERTEX_SHADER, fcode=self.FRAGMENT_SHADER) self.set_gl_state('translucent') def _prepare_transforms(self, view): xform = view.transforms.get_transform() view.view_program.vert['transform'] = xform def _prepare_draw(self, view): prof = Profiler() if self._parent._changed['pos']: if self._parent._pos is None: return False # todo: does this result in unnecessary copies? pos = np.ascontiguousarray(self._parent._pos.astype(np.float32)) self._pos_vbo.set_data(pos) self._program.vert['position'] = self._pos_vbo if pos.shape[-1] == 2: self._program.vert['to_vec4'] = vec2to4 elif pos.shape[-1] == 3: self._program.vert['to_vec4'] = vec3to4 else: raise TypeError("Got bad position array shape: %r" % (pos.shape,)) if self._parent._changed['color']: color, cmap = self._parent._interpret_color() # If color is not visible, just quit now if isinstance(color, Color) and color.is_blank: return False if isinstance(color, Function): # TODO: Change to the parametric coordinate once that is done self._program.vert['color'] = color( '(gl_Position.x + 1.0) / 2.0') else: if color.ndim == 1: self._program.vert['color'] = color else: self._color_vbo.set_data(color) self._program.vert['color'] = self._color_vbo self.shared_program['texture2D_LUT'] = cmap.texture_lut() \ if (hasattr(cmap, 'texture_lut')) else None # Do we want to use OpenGL, and can we? GL = None from ...app._default_app import default_app if default_app is not None and \ default_app.backend_name != 'ipynb_webgl': try: import OpenGL.GL as GL except Exception: # can be other than ImportError sometimes pass # Turn on line smooth and/or line width if GL: if self._parent._antialias: GL.glEnable(GL.GL_LINE_SMOOTH) else: GL.glDisable(GL.GL_LINE_SMOOTH) px_scale = self.transforms.pixel_scale width = px_scale * self._parent._width GL.glLineWidth(max(width, 1.)) if self._parent._changed['connect']: self._connect = self._parent._interpret_connect() if isinstance(self._connect, np.ndarray): self._connect_ibo.set_data(self._connect) if self._connect is None: return False prof('prepare') # Draw if isinstance(self._connect, string_types) and \ self._connect == 'strip': self._draw_mode = 'line_strip' self._index_buffer = None elif isinstance(self._connect, string_types) and \ self._connect == 'segments': self._draw_mode = 'lines' self._index_buffer = None elif isinstance(self._connect, np.ndarray): self._draw_mode = 'lines' self._index_buffer = self._connect_ibo else: raise ValueError("Invalid line connect mode: %r" % self._connect) prof('draw') class _AggLineVisual(Visual): _agg_vtype = np.dtype([('a_position', np.float32, (2,)), ('a_tangents', np.float32, (4,)), ('a_segment', np.float32, (2,)), ('a_angles', np.float32, (2,)), ('a_texcoord', np.float32, (2,)), ('alength', np.float32), ('color', np.float32, (4,))]) VERTEX_SHADER = glsl.get('lines/agg.vert') FRAGMENT_SHADER = glsl.get('lines/agg.frag') def __init__(self, parent): self._parent = parent self._vbo = gloo.VertexBuffer() self._pos = None self._color = None self._da = DashAtlas() dash_index, dash_period = self._da['solid'] self._U = dict(dash_index=dash_index, dash_period=dash_period, linejoin=joins['round'], linecaps=(caps['round'], caps['round']), dash_caps=(caps['round'], caps['round']), antialias=1.0) self._dash_atlas = gloo.Texture2D(self._da._data) Visual.__init__(self, vcode=self.VERTEX_SHADER, fcode=self.FRAGMENT_SHADER) self._index_buffer = gloo.IndexBuffer() self.set_gl_state('translucent', depth_test=False) self._draw_mode = 'triangles' def _prepare_transforms(self, view): data_doc = view.get_transform('visual', 'document') doc_px = view.get_transform('document', 'framebuffer') px_ndc = view.get_transform('framebuffer', 'render') vert = view.view_program.vert vert['transform'] = data_doc vert['doc_px_transform'] = doc_px vert['px_ndc_transform'] = px_ndc def _prepare_draw(self, view): bake = False if self._parent._changed['pos']: if self._parent._pos is None: return False # todo: does this result in unnecessary copies? self._pos = np.ascontiguousarray( self._parent._pos.astype(np.float32)) bake = True if self._parent._changed['color']: color, cmap = self._parent._interpret_color() self._color = color bake = True if self._parent._changed['connect']: if self._parent._connect not in [None, 'strip']: raise NotImplementedError("Only 'strip' connection mode " "allowed for agg-method lines.") if bake: V, idxs = self._agg_bake(self._pos, self._color) self._vbo.set_data(V) self._index_buffer.set_data(idxs) # self._program.prepare() self.shared_program.bind(self._vbo) uniforms = dict(closed=False, miter_limit=4.0, dash_phase=0.0, linewidth=self._parent._width) for n, v in uniforms.items(): self.shared_program[n] = v for n, v in self._U.items(): self.shared_program[n] = v self.shared_program['u_dash_atlas'] = self._dash_atlas @classmethod def _agg_bake(cls, vertices, color, closed=False): """ Bake a list of 2D vertices for rendering them as thick line. Each line segment must have its own vertices because of antialias (this means no vertex sharing between two adjacent line segments). """ n = len(vertices) P = np.array(vertices).reshape(n, 2).astype(float) idx = np.arange(n) # used to eventually tile the color array dx, dy = P[0] - P[-1] d = np.sqrt(dx*dx+dy*dy) # If closed, make sure first vertex = last vertex (+/- epsilon=1e-10) if closed and d > 1e-10: P = np.append(P, P[0]).reshape(n+1, 2) idx = np.append(idx, idx[-1]) n += 1 V = np.zeros(len(P), dtype=cls._agg_vtype) V['a_position'] = P # Tangents & norms T = P[1:] - P[:-1] N = np.sqrt(T[:, 0]**2 + T[:, 1]**2) # T /= N.reshape(len(T),1) V['a_tangents'][+1:, :2] = T V['a_tangents'][0, :2] = T[-1] if closed else T[0] V['a_tangents'][:-1, 2:] = T V['a_tangents'][-1, 2:] = T[0] if closed else T[-1] # Angles T1 = V['a_tangents'][:, :2] T2 = V['a_tangents'][:, 2:] A = np.arctan2(T1[:, 0]*T2[:, 1]-T1[:, 1]*T2[:, 0], T1[:, 0]*T2[:, 0]+T1[:, 1]*T2[:, 1]) V['a_angles'][:-1, 0] = A[:-1] V['a_angles'][:-1, 1] = A[+1:] # Segment L = np.cumsum(N) V['a_segment'][+1:, 0] = L V['a_segment'][:-1, 1] = L # V['a_lengths'][:,2] = L[-1] # Step 1: A -- B -- C => A -- B, B' -- C V = np.repeat(V, 2, axis=0)[1:-1] V['a_segment'][1:] = V['a_segment'][:-1] V['a_angles'][1:] = V['a_angles'][:-1] V['a_texcoord'][0::2] = -1 V['a_texcoord'][1::2] = +1 idx = np.repeat(idx, 2)[1:-1] # Step 2: A -- B, B' -- C -> A0/A1 -- B0/B1, B'0/B'1 -- C0/C1 V = np.repeat(V, 2, axis=0) V['a_texcoord'][0::2, 1] = -1 V['a_texcoord'][1::2, 1] = +1 idx = np.repeat(idx, 2) idxs = np.resize(np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32), (n-1)*(2*3)) idxs += np.repeat(4*np.arange(n-1, dtype=np.uint32), 6) # Length V['alength'] = L[-1] * np.ones(len(V)) # Color if color.ndim == 1: color = np.tile(color, (len(V), 1)) elif color.ndim == 2 and len(color) == n: color = color[idx] else: raise ValueError('Color length %s does not match number of ' 'vertices %s' % (len(color), n)) V['color'] = color return V, idxs
} """
sftp_test.go
package input import ( "context" "fmt" "github.com/Jeffail/benthos/v3/lib/types" "github.com/pkg/sftp" "log" "os" "path" "testing" "time" sftpSetup "github.com/Jeffail/benthos/v3/internal/service/sftp" benthosLog "github.com/Jeffail/benthos/v3/lib/log" "github.com/Jeffail/benthos/v3/lib/metrics" "github.com/stretchr/testify/assert" "golang.org/x/crypto/ssh" ) type scenario struct { Name string Conf *SFTPConfig } var sftpClient *sftp.Client var sftpUsername = "foo" var sftpPassword = "pass" var sftpDirectory = "/upload" var sftpPort string //func TestMain(m *testing.M) { // pool, err := dockertest.NewPool("") // if err != nil { // log.Fatalf("Could not connect to docker: %s", err) // } // // resource, err := pool.RunWithOptions(&dockertest.RunOptions{ // Repository: "atmoz/sftp", // Tag: "alpine", // Cmd: []string{ // "foo:pass:1001:100:upload", // }, // }) // // if err != nil { // log.Fatalf("Could not start resource: %s", err) // } // // if err := pool.Retry(func() error { // sftpPort = resource.GetPort("22/tcp") // if err != nil { // return err // } // // isConnected := ConnectToSFTPServer("localhost", sftpPort) // if !isConnected { // return errors.New("failed to connect to SSH server") // } // // return nil // }); err != nil { // log.Fatalf("Could not connect to docker: %s", err) // } // // code := m.Run() // // if err := pool.Purge(resource); err != nil { // log.Fatalf("Could not purge resource: %s", err) // } // // os.Exit(code) //} func TestProcessFile(t *testing.T) { t.Skip() filePath := path.Join(sftpDirectory, "test.txt") testCase := scenario{ Name: "good conf", Conf: &SFTPConfig{ Address: "http://localhost:" + sftpPort, Paths: []string{filePath}, Credentials: sftpSetup.Credentials{ Username: "foo", Password: "pass", }, MaxConnectionAttempts: 10, RetrySleepDuration: "5s", Codec: "lines", DeleteOnFinish: false, MaxBuffer: 1000000, }, } GenerateTestFile(filePath, "This is a test file") time.Sleep(time.Second * 1) proc, err := NewSFTP(*testCase.Conf, benthosLog.Noop(), metrics.Noop()) assert.NoError(t, err, "config should not error") assert.NotNil(t, proc, "should return non-nil data") time.Sleep(time.Second * 3) err = proc.ConnectWithContext(context.Background()) assert.NoError(t, err, "ConnectWithContext should not error") msg, _, err := proc.ReadWithContext(context.Background()) assert.NoError(t, err, "ReadWithContext should not error") ValidateMessage(t, msg, "This is a test file", filePath) } func TestNoServer(t *testing.T) { t.Skip() filepath := path.Join(sftpDirectory, "test.txt") testCase := scenario{ Name: "no server", Conf: &SFTPConfig{ Address: "http:///localhost:" + sftpPort, Paths: []string{filepath}, Credentials: sftpSetup.Credentials{ Username: sftpUsername, Password: sftpPassword, }, MaxConnectionAttempts: 3, RetrySleepDuration: "5s", Codec: "lines", MaxBuffer: 1000000, }, } proc, err := NewSFTP(*testCase.Conf, benthosLog.Noop(), metrics.Noop()) assert.NotNil(t, proc, "should return non-nil data") assert.NoError(t, err, "config should not error") err = proc.ConnectWithContext(context.Background()) assert.Error(t, err, "connection should error") } func TestInvalidCredentials(t *testing.T) { t.Skip() testCase := scenario{ Name: "invalid credentials", Conf: &SFTPConfig{ Address: "http://localhost:" + sftpPort, Paths: []string{path.Join(sftpDirectory, "test.txt")}, Credentials: sftpSetup.Credentials{ Username: "invaliduser", Password: "invalidpass", }, MaxConnectionAttempts: 3, RetrySleepDuration: "5s", MaxBuffer: 1000000, Codec: "lines", }, } proc, err := NewSFTP(*testCase.Conf, benthosLog.Noop(), metrics.Noop()) assert.NotNil(t, proc, "should return non-nil data") assert.NoError(t, err, "config should not error") err = proc.ConnectWithContext(context.Background()) assert.Error(t, err, "connection should error") } func TestFileNotFound(t *testing.T) { t.Skip() testCase := scenario{ Name: "file not found", Conf: &SFTPConfig{ Address: "http://localhost:" + sftpPort, Paths: []string{path.Join(sftpDirectory, "missingfile.txt")}, Credentials: sftpSetup.Credentials{ Username: sftpUsername, Password: sftpPassword, }, MaxConnectionAttempts: 10, RetrySleepDuration: "5s", MaxBuffer: 1000000, Codec: "lines", }, } proc, err := NewSFTP(*testCase.Conf, benthosLog.Noop(), metrics.Noop()) assert.NoError(t, err, "config should not error") assert.NotNil(t, proc, "should return non-nil data") err = proc.ConnectWithContext(context.Background()) assert.Error(t, err, "ConnectWithContext should error") } func TestProcessDirectory(t *testing.T) { t.Skip() dirPath := path.Join(sftpDirectory, t.Name()) GenerateTestDirectory(dirPath) defer DeleteTestDirectory(dirPath) file1Path := path.Join(dirPath, "dir_process_test1.txt") file2Path := path.Join(dirPath, "dir_process_test2.txt") defer DeleteTestFile(file1Path) defer DeleteTestFile(file2Path) testCase := scenario{ Name: "process directory", Conf: &SFTPConfig{ Address: "http://localhost:" + sftpPort, Paths: []string{path.Join(dirPath, "*.txt")}, Credentials: sftpSetup.Credentials{ Username: sftpUsername, Password: sftpPassword, }, MaxConnectionAttempts: 10, RetrySleepDuration: "5s", MaxBuffer: 1000000, Codec: "lines", }, } GenerateTestFile(file1Path, "This is a test\nAnother test line") GenerateTestFile(file2Path, "This is the other test file\nSecond line of second file") proc, err := NewSFTP(*testCase.Conf, benthosLog.Noop(), metrics.Noop()) assert.NoError(t, err, "config should not error") assert.NotNil(t, proc, "should return non-nil data") err = proc.ConnectWithContext(context.Background()) assert.NoError(t, err, "ConnectWithContext should not error") msg, _, err := proc.ReadWithContext(context.Background()) assert.NoError(t, err, "ReadWithContext should not error") ValidateMessage(t, msg, "This is a test", file1Path) msg, _, err = proc.ReadWithContext(context.Background()) assert.NoError(t, err, "ReadWithContext should not error") ValidateMessage(t, msg, "Another test line", file1Path) msg, _, err = proc.ReadWithContext(context.Background()) assert.Errorf(t, err, "should reach end of file") // move to the next file err = proc.ConnectWithContext(context.Background()) assert.NoError(t, err, "ConnectWithContext should not error") msg, _, err = proc.ReadWithContext(context.Background()) assert.NoError(t, err, "ReadWithContext should not error") ValidateMessage(t, msg, "This is the other test file", file2Path) msg, _, err = proc.ReadWithContext(context.Background()) assert.NoError(t, err, "ReadWithContext should not error") ValidateMessage(t, msg, "Second line of second file", file2Path) } func ValidateMessage(t *testing.T, msg types.Message, expectedMessage string, expectedFilePath string) { assert.NotNil(t, msg, "message should be non-nil") part := msg.Get(0) messageString := string(part.Get()) assert.Equal(t, expectedMessage, messageString) assert.Equal(t, expectedFilePath, part.Metadata().Get("path")) } func ConnectToSFTPServer(server string, port string) bool { // create sftp client and establish connection s := &sftpSetup.Server{ Host: server, Port: port, } certCheck := &ssh.CertChecker{ IsHostAuthority: sftpSetup.HostAuthCallback(), IsRevoked: sftpSetup.CertCallback(s), HostKeyFallback: sftpSetup.HostCallback(s), } addr := fmt.Sprintf("%s:%s", server, port) config := &ssh.ClientConfig{ User: sftpUsername, Auth: []ssh.AuthMethod{ ssh.Password(sftpPassword), }, HostKeyCallback: certCheck.CheckHostKey, } var conn *ssh.Client var err error conn, err = ssh.Dial("tcp", addr, config) if err != nil
client, err := sftp.NewClient(conn) if err != nil { return false } sftpClient = client return true } func GenerateTestFile(filepath string, data string) { file, err := sftpClient.Create(filepath) if err != nil { log.Fatalf("Error creating file %s on SSH server", filepath) return } _, err = file.Write([]byte(data)) if err != nil { log.Fatalf("Error writing to file %s on SSH server", filepath) } } func UpdateTestFile(filepath string, data string) { file, err := sftpClient.OpenFile(filepath, os.O_RDWR) if err != nil { log.Printf("Error updating file %s on SSH server", filepath) } _, err = file.Write([]byte(data)) if err != nil { log.Printf("Error writing to file %s on SSH server", filepath) } } func DeleteTestFile(filepath string) { err := sftpClient.Remove(filepath) if err != nil { log.Printf("Error deleting file %s on SSH server", filepath) } } func GenerateTestDirectory(dirPath string) { err := sftpClient.Mkdir(dirPath) if err != nil { log.Printf("Error creating directory %s on SSH server", dirPath) } } func DeleteTestDirectory(dirPath string) { err := sftpClient.RemoveDirectory(dirPath) if err != nil { log.Printf("Error removing directory %s on SSH server", dirPath) } }
{ return false }
test_get.py
import json from rest_framework.test import APITestCase from django.urls import reverse from rest_framework import status from django.contrib.auth import get_user_model from authors.apps.articles.models import Articles from authors.apps.profiles.models import Profile class TestGetEndpoint(APITestCase): def setUp(self): """ Prepares table for tests """ self.token = self.get_user_token() self.slug = "life_love_death" self.title = "Life Love and Death" self.description = "What is life?" self.body = "This is the real life body." self.tagList = "life,love,death" self.author = 'TestAuthor' self.article = Articles( slug=self.slug, title=self.title, description=self.description, body=self.body, tagList=self.tagList, author=Profile.objects.get(username=self.author)) self.article.save() def test_get_all_articles(self): """ This tests getting all articles successfully """ self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token) url = reverse('articles') response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_successfully_not_getting_articles_if_token_not_used(self): """ Unauthorized error returned if no token is passed in """ url = reverse('articles') response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def
(self): """ Tests the pk of the article is true """ self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token) url = reverse('articles') response = self.client.get(url) self.assertIn(b"1", response.content) def test_articles_are_paginated(self): """ This tests if the returned articles are paginated """ self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token) url = reverse('articles') response = self.client.get(url).render() # this checks the number of articles in the database self.assertIn(b"1", response.content) # next is null since there is only one article posted self.assertIn(b"null", response.content) # previous is null since only one article has been posted # the page_size holds ten articles per page self.assertIn(b"null", response.content) # previous def test_get_specific_article(self): """ This gets a specific article """ self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token) url = reverse('articleSpecific', kwargs={'slug': 'life_love_death'}) response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_getting_and_checking_articles_content(self): """ This checks if the right content of an article is returned """ self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token) url = reverse('articles') response = self.client.get(url).render() # checks if the body passed during posting is the one returned self.assertIn(b"This is the real life body.", response.content) # checks if id returned is 1 self.assertIn(b"1", response.content) def test_wrong_request(self): """ Checks request for a non existing article """ self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token) url = reverse( 'articleSpecific', kwargs={ 'slug': 'life_love_death_live'}) response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) response.render() self.assertIn(b"Article does not exist", response.content) def get_user_token(self): user = { "user": { "username": "TestAuthor", "email": "[email protected]", "password": "test123user#Password" } } response = self.client.post( reverse('register'), data=user, format='json') user = get_user_model() user = user.objects.get(username="TestAuthor") user.is_active = True user.save() response.render() data = response.content token = json.loads(data.decode('utf-8'))['user']['token'] return token
test_get_article_id
rsdm.rs
use wasm_bindgen::prelude::*; use std::f64; use crate::rsdm::preprocess::MetHour; use crate::rsdm::disperse::{calc_uz, plume_rise, get_sigma_y, get_sigma_z, wind_components, C}; const AMBIENT_TEMP: f64 = 293.15; // Fixed ambient temperature [K] (20 C) const BANDS: i32 = 10; pub const TOLERANCE: f64 = 0.00000001; /// Source defines the stack (emission source) parameters #[wasm_bindgen] #[derive(Copy, Clone)] pub struct Source { pub x: f64, // Stack location (m) pub y: f64, pub height: f64, // Stack height (m) pub diameter: f64, // Stack diameter (m) pub velocity: f64, // Plume velocity at stack tip (m/s) pub temp: f64, // Plume temperature (C) pub emission: f64, // Stack emission rate (g/s) } /// RSDM maintains the current state #[wasm_bindgen] pub struct RSDM { pub source: Source, // min, max grid extents and spacing (m) pub x_min: i32, pub x_max: i32, pub y_min: i32, pub y_max: i32, pub z_min: i32, pub z_max: i32, pub x_spacing: usize, pub y_spacing: usize, pub z_spacing: usize, pub x_points: usize, pub y_points: usize, pub z_points: usize, // meteorological settings pub hours: u32, pub wspd: f64, pub wdir: f64, // degrees pub roughness: u8, pub pgcat: u8, // grid plan concentrations (absolute and image representation) r_grid: Vec<f64>, r_disp: Vec<u8>, // grid profile concentrations (absolute and image representation) h_grid: Vec<f64>, h_disp: Vec<u8>, } /// Public methods, exported to JavaScript #[wasm_bindgen] impl RSDM { // Create instance of RSDM with default parameters pub fn new() -> RSDM { // Default emission source let source = Source{ x: 0.0, y: 0.0, height: 50.0, diameter: 0.5, velocity: 10.0, temp: 60.0, emission: 1.0 }; let mut core = RSDM { source: source, x_min: -2500, x_max: 2500, y_min: -2500, y_max: 2500, z_min: 0, z_max: 1000, x_spacing: 10, y_spacing: 10, z_spacing: 5, x_points: 500,
z_points: 200, hours: 20, wspd: 5.0, wdir: 235.0, roughness: 0, pgcat: b'C', r_grid: Vec::new(), r_disp: Vec::new(), h_grid: Vec::new(), h_disp: Vec::new(), }; core.setup_grids(); core } /// Change grid resolution pub fn set_resolution(&mut self, value: &str) { let v_spacing: usize; let h_spacing: usize; match value { "Low" => {h_spacing = 50; v_spacing = 25}, "Medium" => {h_spacing = 20; v_spacing = 10}, "High" => {h_spacing = 10; v_spacing = 5}, _ => panic!(), } self.x_spacing = h_spacing; self.y_spacing = h_spacing; self.z_spacing = v_spacing; self.setup_grids(); } /// Setup grids pub fn setup_grids(&mut self) { self.x_points = ((self.x_max - self.x_min) / self.x_spacing as i32) as usize; self.y_points = ((self.y_max - self.y_min) / self.y_spacing as i32) as usize; self.z_points = ((self.z_max - self.z_min) / self.z_spacing as i32) as usize; let r_grid_len = self.x_points * self.y_points; self.r_grid = vec![0.0; r_grid_len]; self.r_disp = vec![0; r_grid_len]; let h_grid_len = self.x_points * self.z_points; self.h_grid = vec![0.0; h_grid_len]; self.h_disp = vec![0; h_grid_len]; } pub fn set_elevation(&mut self, value: f64) { self.source.height = value; } pub fn set_diameter(&mut self, value: f64) { self.source.diameter = value; } pub fn set_velocity(&mut self, value: f64) { self.source.velocity = value; } pub fn set_temp(&mut self, value: f64) { self.source.temp = value; } pub fn set_wdir(&mut self, value: f64) { self.wdir = value; } pub fn set_wspd(&mut self, value: f64) { self.wspd = value; } pub fn set_roughness(&mut self, value: &str) { match value { "urban" => self.roughness = 0, "rural" => self.roughness = 1, _ => panic!(), } } pub fn set_pgcat(&mut self, value: &str) { match value { "A" => self.pgcat = b'A', "B" => self.pgcat = b'B', "C" => self.pgcat = b'C', "D" => self.pgcat = b'D', "E" => self.pgcat = b'E', "F" => self.pgcat = b'F', _ => panic!(), } } pub fn width(&self) -> u32 { self.x_points as u32 } pub fn height(&self) -> u32 { self.y_points as u32 } pub fn altitude(&self) -> u32 { self.z_points as u32 } pub fn r_grid(&self) -> *const u8 { self.r_disp.as_ptr() } pub fn h_grid(&self) -> *const u8 { self.h_disp.as_ptr() } fn cr_to_linear(&self, col: usize, row: usize) -> usize { let row_offset = self.y_points - 1; self.x_points * (row_offset - row) + col } fn ch_to_linear(&self, col: usize, row: usize) -> usize { let row_offset = self.z_points - 1; self.x_points * (row_offset - row) + col } pub fn r_grid_max(&self) -> f64 { self.r_grid.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b)) } pub fn h_grid_max(&self) -> f64 { self.h_grid.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b)) } pub fn clear_grids(&mut self) { for i in 0..self.r_grid.len() { self.r_grid[i] = 0.0; } for i in 0..self.h_grid.len() { self.h_grid[i] = 0.0; } } #[allow(non_snake_case)] pub fn iter_disp(&mut self, hours: u32) { for _ in 0..hours { // Generate an hour of met let metline: MetHour; if hours > 1 { metline = self.gen_met(true); } else { metline = self.gen_met(false); } // Calculate effective wind speed at stack tip (user specified wind speed is for 10 m) let Uz = calc_uz(metline.u, self.source.height, 10.0, metline.pgcat, self.roughness); // Calculate plume rise using Briggs equations let Ts = self.source.temp + 273.15; let (dH,Xf) = plume_rise(Uz, self.source.velocity, self.source.diameter, Ts, AMBIENT_TEMP, metline.pgcat); let H = self.source.height + dH; let Q = self.source.emission; let sin_phi = metline.phi.sin(); let cos_phi = metline.phi.cos(); // Calculate concentrations for plan view grid (fixed grid height of 0 m) for (y,Yr) in (self.y_min..self.y_max).step_by(self.y_spacing).enumerate() { for (x,Xr) in (self.x_min..self.x_max).step_by(self.x_spacing).enumerate() { if Uz > 0.5 { let (xx, yy) = wind_components(Xr as f64, Yr as f64, 0.0, 0.0, sin_phi, cos_phi); let xx = xx - (Xf / 1000.0); // Plume rise correction let sig_y = get_sigma_y(metline.pgcat, xx); let sig_z = get_sigma_z(metline.pgcat, xx); let i = self.cr_to_linear(x, y); let conc = C(xx, yy, 0.0, Uz, Q, H, sig_y, sig_z) / hours as f64; self.r_grid[i] += conc; } } } // Calculate concentrations for 2d slice showing height profile along plume for (z,Zr) in (self.z_min..self.z_max).step_by(self.z_spacing).enumerate() { for (x,Xr) in (self.x_min..self.x_max).step_by(self.x_spacing).enumerate() { if Uz > 0.5 { let xx = (Xr as f64 - Xf) / 1000.0; // Plume rise correction let sig_y = get_sigma_y(metline.pgcat, xx); let sig_z = get_sigma_z(metline.pgcat, xx); let i = self.ch_to_linear(x, z); let conc = C(xx, 0.0, Zr as f64, Uz, Q, H, sig_y, sig_z) / hours as f64; self.h_grid[i] += conc; } } } } } pub fn update_png(&mut self) { // Calculate min based on max - use log to band let grid_max = self.r_grid_max(); let min_norm = grid_max.log10().trunc() as i32 - BANDS; // Normalise 2d grid into bands by taking log for y in 0..self.y_points { for x in 0..self.x_points { let i = self.cr_to_linear(x, y); if self.r_grid[i] > grid_max / 1e10 { let conc_norm = self.r_grid[i].log10().trunc() as i32 - min_norm; self.r_disp[i] = conc_norm as u8; } else { self.r_disp[i] = 0; } } } // Normalise height slice into bands by taking log for z in 0..self.z_points { for x in 0..self.x_points { let i = self.ch_to_linear(x, z); if self.h_grid[i] > grid_max / 1e10 { let conc_norm = self.h_grid[i].log10().trunc() as i32 - min_norm; self.h_disp[i] = conc_norm as u8; } else { self.h_disp[i] = 0; } } } } } pub fn approx_equal(x :f64, y :f64) -> bool { let diff = (x - y).abs(); diff < TOLERANCE } #[test] fn model_run_test() { // Create new *RSDM and populate with fixed values (overwrite defaults) let mut dm = RSDM::new(); dm.wspd = 2.0; dm.wdir = 130.0; dm.source.height = 10.0; dm.source.temp = 100.0; dm.pgcat = b'A'; dm.hours = 1; dm.x_min = -2500; dm.x_max = 2500; dm.y_min = -2500; dm.y_max = 2500; dm.z_min = 0; dm.z_max = 1000; dm.x_spacing = 20; dm.y_spacing = 20; dm.z_spacing = 10; dm.setup_grids(); dm.iter_disp(1); let grid_ref = 23 * dm.x_points + 14; let value = dm.r_grid[grid_ref]; assert!(approx_equal(value, 6.366502967443e-08), value); let grid_ref = 18 * dm.x_points + 181; let value = dm.h_grid[grid_ref]; assert!(approx_equal(value, 4.086979994894e-07), value); }
y_points: 500,
create_instances_responses.go
// Code generated by go-swagger; DO NOT EDIT. package mute // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" "io" "github.com/go-openapi/runtime" "github.com/go-openapi/strfmt" "github.com/cldmnky/teamcity-rest-client-go/models" ) // CreateInstancesReader is a Reader for the CreateInstances structure. type CreateInstancesReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the received o. func (o *CreateInstancesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewCreateInstancesOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) } } // NewCreateInstancesOK creates a CreateInstancesOK with default headers values func NewCreateInstancesOK() *CreateInstancesOK
/*CreateInstancesOK handles this case with default header values. successful operation */ type CreateInstancesOK struct { Payload *models.Mutes } func (o *CreateInstancesOK) Error() string { return fmt.Sprintf("[POST /app/rest/mutes/multiple][%d] createInstancesOK %+v", 200, o.Payload) } func (o *CreateInstancesOK) GetPayload() *models.Mutes { return o.Payload } func (o *CreateInstancesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(models.Mutes) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil }
{ return &CreateInstancesOK{} }
api-keys.effects.ts
import { Injectable } from '@angular/core'; import { Actions, ofType, createEffect } from '@ngrx/effects'; // Constants import { map, withLatestFrom, catchError, mergeMap} from 'rxjs/operators'; // Actions import * as fromActions from './api-keys.actions'; import * as fromUserSessionActions from '@stores/user-session/user-session.actions'; import * as fromApplicationDataActions from '@stores/application-data/application-data.actions'; import { Logger } from '@services/logger/logger.service'; import { of } from 'rxjs'; import { ApiKeysService } from '@services/api-keys/api-keys.service'; import { ApiKeysDatabaseService } from '@db/api-keys-database.service'; import { API_KEY_CONSTANTS } from '@core/constants/api-key.constants'; import { Store } from '@ngrx/store'; import { ApiKeyModel } from '@core/models/api-key.model'; import * as fromApiKeysSelectors from '@stores/api-keys/api-keys.selectors'; import { ERROR_CONTEXTS } from '@core/constants/application-data.constants'; const log = new Logger('api-keys-data.effects'); @Injectable() export class ApiKeyEffects { constructor( private actions$: Actions<fromActions.ApiKeyActions | fromUserSessionActions.UserSessionActionActions >, private apiKeysService: ApiKeysService, private apiKeysDatabaseService: ApiKeysDatabaseService, private store: Store<ApiKeyModel>, ) { } public initApiKey = createEffect(() => this.actions$.pipe( ofType(fromActions.ApiKeyActionTypes.INIT_API_KEY), mergeMap( () => this.apiKeysDatabaseService.get(API_KEY_CONSTANTS.API_KEY_STATE).pipe( map((data: any) => { if (data) { return new fromActions.ApiKeyInitSuccess(data.query, data.items); } else { return new fromActions.ApiKeyResetSuccess(); } }) ) ) ) ); public queryApiKey = createEffect(() => this.actions$.pipe( ofType(fromActions.ApiKeyActionTypes.QUERY_API_KEY), mergeMap( (action) => this.apiKeysService.search(action.query).pipe( mergeMap( (queryResult) => this.apiKeysDatabaseService.set(API_KEY_CONSTANTS.API_KEY_STATE, {query: action.query, items: queryResult }).pipe( map( () => new fromActions.ApiKeyQuerySuccess(action.query, queryResult)) ) ), catchError((error) => of(new fromApplicationDataActions.PopulateError(error, ERROR_CONTEXTS.API_KEY))) ) ) ) ); public updateApiKey = createEffect(() => this.actions$.pipe( ofType(fromActions.ApiKeyActionTypes.UPDATE_API_KEY), withLatestFrom( this.store.select(fromApiKeysSelectors.getCurrentQuery) ), map(([action, query]) => { return {action, query}; }), mergeMap((payload) => this.apiKeysService.update(payload.action.id, payload.action.item).pipe( map( () => new fromActions.ApiKeyQuery(payload.query ? payload.query : '') ), catchError((error) => of(new fromApplicationDataActions.PopulateError(error, ERROR_CONTEXTS.API_KEY))) ) ) ) ); public deleteApiKey = createEffect(() => this.actions$.pipe( ofType(fromActions.ApiKeyActionTypes.DELETE_API_KEY), withLatestFrom( this.store.select(fromApiKeysSelectors.getCurrentQuery) ), map(([action, query]) => { return {action, query}; }), mergeMap((payload) => this.apiKeysService.delete(payload.action.id).pipe( map(() => new fromActions.ApiKeyQuery(payload.query ? payload.query : '')), catchError((error) => of(new fromApplicationDataActions.PopulateError(error, ERROR_CONTEXTS.API_KEY))) ) ) ) ); public addApiKey = createEffect(() => this.actions$.pipe( ofType(fromActions.ApiKeyActionTypes.ADD_API_KEY), withLatestFrom( this.store.select(fromApiKeysSelectors.getCurrentQuery) ), map(([action, query]) => { return {action, query}; }), mergeMap((payload) => this.apiKeysService.add(payload.action.item).pipe( map(user => new fromActions.ApiKeyQuery(payload.query ? payload.query : '')), catchError((error) => of(new fromApplicationDataActions.PopulateError(error, ERROR_CONTEXTS.API_KEY)))
) ) ); public payApiKey = createEffect(() => this.actions$.pipe( ofType(fromActions.ApiKeyActionTypes.PAY_API_KEY), map((payload) => this.apiKeysService.pay(payload.name, payload.amount)) ) , { dispatch: false }); public resetApiKey = createEffect(() => this.actions$.pipe( ofType(fromActions.ApiKeyActionTypes.RESET_API_KEY, fromUserSessionActions.UserSessionActionTypes.LOGOUT_USER_SESSION_SUCCESS), mergeMap(() => this.apiKeysDatabaseService.remove(API_KEY_CONSTANTS.API_KEY_STATE).pipe( map(() => new fromActions.ApiKeyResetSuccess()), catchError((error) => of(new fromApplicationDataActions.PopulateError(error, ERROR_CONTEXTS.API_KEY))) ) ) ) ); }
)
denomination.ts
import { Component } from '@angular/core'; import { NavParams, ViewController } from 'ionic-angular'; import _ from 'lodash'; import { ApiProvider, ChainNetwork } from '../../providers/api/api'; @Component({ selector: 'denomination', templateUrl: 'denomination.html' }) export class
{ public units: any = []; public availableNetworks; public currencySymbol; public showUnits = false; constructor( public viewCtrl: ViewController, public api: ApiProvider, public navParams: NavParams ) {} public ionViewDidEnter() { this.currencySymbol = this.navParams.data.currencySymbol; this.availableNetworks = this.api.networkSettings.availableNetworks; this.showUnits = _.some( this.availableNetworks, this.api.networkSettings.selectedNetwork ); this.units = [ this.api.networkSettings.availableNetworks[0].chain, this.api.networkSettings.availableNetworks[1].chain ]; } public changeUnit(unit: string): void { this.currencySymbol = unit; this.viewCtrl.data = { ...this.viewCtrl.data, config: { chain: this.currencySymbol, network: this.navParams.data.config.network }, currencySymbol: this.currencySymbol }; this.viewCtrl.dismiss({ ...this.viewCtrl.data }); } public changeExplorer(chainNetwork: ChainNetwork): void { this.viewCtrl.dismiss({ chainNetwork, currencySymbol: this.currencySymbol }); } }
DenominationComponent
version.go
package accumulate //-go:generate go run ./internal/cmd/yaml2json -input=openrpc.yml -output=openrpc.json //go:generate go run ./internal/cmd/goimports const unknownVersion = "version unknown" var Version = unknownVersion var Commit string
func IsVersionKnown() bool { return Version != unknownVersion }
security_group_rule.py
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.common import exception from heat.common.i18n import _ from heat.engine import constraints from heat.engine import properties from heat.engine.resources.openstack.neutron import neutron from heat.engine import support from heat.engine import translation class SecurityGroupRule(neutron.NeutronResource): """A resource for managing Neutron security group rules. Rules to use in security group resource. """ required_service_extension = 'security-group' entity = 'security_group_rule' support_status = support.SupportStatus(version='7.0.0') PROPERTIES = ( SECURITY_GROUP, DESCRIPTION, DIRECTION, ETHERTYPE, PORT_RANGE_MIN, PORT_RANGE_MAX, PROTOCOL, REMOTE_GROUP, REMOTE_IP_PREFIX ) = ( 'security_group', 'description', 'direction', 'ethertype', 'port_range_min', 'port_range_max', 'protocol', 'remote_group', 'remote_ip_prefix' ) _allowed_protocols = list(range(256)) + [ 'ah', 'dccp', 'egp', 'esp', 'gre', 'icmp', 'icmpv6', 'igmp', 'ipv6-encap', 'ipv6-frag', 'ipv6-icmp', 'ipv6-nonxt', 'ipv6-opts', 'ipv6-route', 'ospf', 'pgm', 'rsvp', 'sctp', 'tcp', 'udp', 'udplite', 'vrrp' ] properties_schema = { SECURITY_GROUP: properties.Schema( properties.Schema.STRING, _('Security group name or ID to add rule.'), required=True, constraints=[ constraints.CustomConstraint('neutron.security_group') ] ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of the security group rule.') ), DIRECTION: properties.Schema( properties.Schema.STRING, _('The direction in which the security group rule is applied. ' 'For a compute instance, an ingress security group rule ' 'matches traffic that is incoming (ingress) for that ' 'instance. An egress rule is applied to traffic leaving ' 'the instance.'), default='ingress', constraints=[ constraints.AllowedValues(['ingress', 'egress']), ] ), ETHERTYPE: properties.Schema( properties.Schema.STRING, _('Ethertype of the traffic.'), default='IPv4', constraints=[ constraints.AllowedValues(['IPv4', 'IPv6']), ] ), PORT_RANGE_MIN: properties.Schema( properties.Schema.INTEGER, _('The minimum port number in the range that is matched by the ' 'security group rule. If the protocol is TCP or UDP, this ' 'value must be less than or equal to the value of the ' 'port_range_max attribute. If the protocol is ICMP, this ' 'value must be an ICMP type.'), constraints=[ constraints.Range(0, 65535) ] ), PORT_RANGE_MAX: properties.Schema( properties.Schema.INTEGER, _('The maximum port number in the range that is matched by the ' 'security group rule. The port_range_min attribute constrains ' 'the port_range_max attribute. If the protocol is ICMP, this ' 'value must be an ICMP code.'), constraints=[ constraints.Range(0, 65535) ] ), PROTOCOL: properties.Schema( properties.Schema.STRING, _('The protocol that is matched by the security group rule. ' 'Allowed values are ah, dccp, egp, esp, gre, icmp, icmpv6, ' 'igmp, ipv6-encap, ipv6-frag, ipv6-icmp, ipv6-nonxt, ipv6-opts, ' 'ipv6-route, ospf, pgm, rsvp, sctp, tcp, udp, udplite, vrrp ' 'and integer representations [0-255].'), default='tcp', constraints=[constraints.AllowedValues(_allowed_protocols)] ), REMOTE_GROUP: properties.Schema( properties.Schema.STRING, _('The remote group name or ID to be associated with this ' 'security group rule.'), constraints=[ constraints.CustomConstraint('neutron.security_group') ] ), REMOTE_IP_PREFIX: properties.Schema( properties.Schema.STRING, _('The remote IP prefix (CIDR) to be associated with this ' 'security group rule.'), constraints=[ constraints.CustomConstraint('net_cidr') ] ) } def translation_rules(self, props): client_plugin = self.client_plugin() return [ translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [self.SECURITY_GROUP], client_plugin=client_plugin, finder='find_resourceid_by_name_or_id', entity=client_plugin.RES_TYPE_SECURITY_GROUP ), translation.TranslationRule( props, translation.TranslationRule.RESOLVE, [self.REMOTE_GROUP], client_plugin=client_plugin, finder='find_resourceid_by_name_or_id', entity=client_plugin.RES_TYPE_SECURITY_GROUP ), ] def validate(self): super(SecurityGroupRule, self).validate() if (self.properties[self.REMOTE_GROUP] is not None and self.properties[self.REMOTE_IP_PREFIX] is not None): raise exception.ResourcePropertyConflict( self.REMOTE_GROUP, self.REMOTE_IP_PREFIX) port_max = self.properties[self.PORT_RANGE_MAX] port_min = self.properties[self.PORT_RANGE_MIN] protocol = self.properties[self.PROTOCOL] if (port_max is not None and port_min is not None and protocol not in ('icmp', 'icmpv6', 'ipv6-icmp') and port_max < port_min): msg = _('The minimum port number must be less than or equal to ' 'the maximum port number.') raise exception.StackValidationFailed(message=msg) def handle_create(self):
def handle_delete(self): if self.resource_id is None: return with self.client_plugin().ignore_not_found: self.client().delete_security_group_rule(self.resource_id) def resource_mapping(): return { 'OS::Neutron::SecurityGroupRule': SecurityGroupRule }
props = self.prepare_properties( self.properties, self.physical_resource_name()) props['security_group_id'] = props.pop(self.SECURITY_GROUP) if self.REMOTE_GROUP in props: props['remote_group_id'] = props.pop(self.REMOTE_GROUP) for key in (self.PORT_RANGE_MIN, self.PORT_RANGE_MAX): if props.get(key) is not None: props[key] = str(props[key]) rule = self.client().create_security_group_rule( {'security_group_rule': props})['security_group_rule'] self.resource_id_set(rule['id'])
object.rs
//! Data structure of a scene node. use std::any::Any; use std::cell::RefCell; use std::rc::Rc; use std::path::Path; use gl::types::*; use na::{Pnt3, Pnt2, Vec3, Iso3}; use resource::{Texture, TextureManager, Material, Mesh}; use camera::Camera;
/// Set of data identifying a scene node. pub struct ObjectData { material: Rc<RefCell<Box<Material + 'static>>>, texture: Rc<Texture>, color: Pnt3<f32>, wlines: f32, wpoints: f32, draw_surface: bool, cull: bool, user_data: Box<Any + 'static> } impl ObjectData { /// The texture of this object. #[inline] pub fn texture<'a>(&'a self) -> &'a Rc<Texture> { &self.texture } /// The color of this object. #[inline] pub fn color<'a>(&'a self) -> &'a Pnt3<f32> { &self.color } /// The width of the lines draw for this object. #[inline] pub fn lines_width(&self) -> f32 { self.wlines } /// The size of the points draw for this object. #[inline] pub fn points_size(&self) -> f32 { self.wpoints } /// Whether this object has its surface rendered or not. #[inline] pub fn surface_rendering_active(&self) -> bool { self.draw_surface } /// Whether this object uses backface culling or not. #[inline] pub fn backface_culling_enabled(&self) -> bool { self.cull } /// An user-defined data. /// /// Use dynamic typing capabilities of the `Any` type to recover the actual data. #[inline] pub fn user_data<'a>(&'a self) -> &'a Any { &*self.user_data } } /// A 3d objects on the scene. /// /// This is the only interface to manipulate the object position, color, vertices and texture. pub struct Object { // FIXME: should Mesh and Object be merged? // (thus removing the need of ObjectData at all.) data: ObjectData, mesh: Rc<RefCell<Mesh>> } impl Object { #[doc(hidden)] pub fn new(mesh: Rc<RefCell<Mesh>>, r: f32, g: f32, b: f32, texture: Rc<Texture>, material: Rc<RefCell<Box<Material + 'static>>>) -> Object { let user_data = (); let data = ObjectData { color: Pnt3::new(r, g, b), texture: texture, wlines: 0.0, wpoints: 0.0, draw_surface: true, cull: true, material: material, user_data: Box::new(user_data) }; Object { data: data, mesh: mesh } } #[doc(hidden)] pub fn render(&self, transform: &Iso3<f32>, scale: &Vec3<f32>, pass: usize, camera: &mut Camera, light: &Light) { self.data.material.borrow_mut().render( pass, transform, scale, camera, light, &self.data, &mut *self.mesh.borrow_mut()); } /// Gets the data of this object. #[inline] pub fn data<'a>(&'a self) -> &'a ObjectData { &self.data } /// Gets the data of this object. #[inline] pub fn data_mut<'a>(&'a mut self) -> &'a mut ObjectData { &mut self.data } /// Enables or disables backface culling for this object. #[inline] pub fn enable_backface_culling(&mut self, active: bool) { self.data.cull = active; } /// Attaches user-defined data to this object. #[inline] pub fn set_user_data(&mut self, user_data: Box<Any + 'static>) { self.data.user_data = user_data; } /// Gets the material of this object. #[inline] pub fn material(&self) -> Rc<RefCell<Box<Material + 'static>>> { self.data.material.clone() } /// Sets the material of this object. #[inline] pub fn set_material(&mut self, material: Rc<RefCell<Box<Material + 'static>>>) { self.data.material = material; } /// Sets the width of the lines drawn for this object. #[inline] pub fn set_lines_width(&mut self, width: f32) { self.data.wlines = width } /// Returns the width of the lines drawn for this object. #[inline] pub fn lines_width(&self) -> f32 { self.data.wlines } /// Sets the size of the points drawn for this object. #[inline] pub fn set_points_size(&mut self, size: f32) { self.data.wpoints = size } /// Returns the size of the points drawn for this object. #[inline] pub fn points_size(&self) -> f32 { self.data.wpoints } /// Activate or deactivate the rendering of this object surface. #[inline] pub fn set_surface_rendering_activation(&mut self, active: bool) { self.data.draw_surface = active } /// Activate or deactivate the rendering of this object surface. #[inline] pub fn surface_rendering_activation(&self) -> bool { self.data.draw_surface } /// This object's mesh. #[inline] pub fn mesh<'a>(&'a self) -> &'a Rc<RefCell<Mesh>> { &self.mesh } /// Mutably access the object's vertices. #[inline(always)] pub fn modify_vertices<F: FnMut(&mut Vec<Pnt3<GLfloat>>) -> ()>(&mut self, f: &mut F) { let bmesh = self.mesh.borrow_mut(); let _ = bmesh.coords().write().unwrap().data_mut().as_mut().map(|coords| f(coords)); } /// Access the object's vertices. #[inline(always)] pub fn read_vertices<F: FnMut(&[Pnt3<GLfloat>]) -> ()>(&self, f: &mut F) { let bmesh = self.mesh.borrow(); let _ = bmesh.coords().read().unwrap().data().as_ref().map(|coords| f(&coords[..])); } /// Recomputes the normals of this object's mesh. #[inline] pub fn recompute_normals(&mut self) { self.mesh.borrow_mut().recompute_normals(); } /// Mutably access the object's normals. #[inline(always)] pub fn modify_normals<F: FnMut(&mut Vec<Vec3<GLfloat>>) -> ()>(&mut self, f: &mut F) { let bmesh = self.mesh.borrow_mut(); let _ = bmesh.normals().write().unwrap().data_mut().as_mut().map(|normals| f(normals)); } /// Access the object's normals. #[inline(always)] pub fn read_normals<F: FnMut(&[Vec3<GLfloat>]) -> ()>(&self, f: &mut F) { let bmesh = self.mesh.borrow(); let _ = bmesh.normals().read().unwrap().data().as_ref().map(|normals| f(&normals[..])); } /// Mutably access the object's faces. #[inline(always)] pub fn modify_faces<F: FnMut(&mut Vec<Pnt3<GLuint>>) -> ()>(&mut self, f: &mut F) { let bmesh = self.mesh.borrow_mut(); let _ = bmesh.faces().write().unwrap().data_mut().as_mut().map(|faces| f(faces)); } /// Access the object's faces. #[inline(always)] pub fn read_faces<F: FnMut(&[Pnt3<GLuint>]) -> ()>(&self, f: &mut F) { let bmesh = self.mesh.borrow(); let _ = bmesh.faces().read().unwrap().data().as_ref().map(|faces| f(&faces[..])); } /// Mutably access the object's texture coordinates. #[inline(always)] pub fn modify_uvs<F: FnMut(&mut Vec<Pnt2<GLfloat>>) -> ()>(&mut self, f: &mut F) { let bmesh = self.mesh.borrow_mut(); let _ = bmesh.uvs().write().unwrap().data_mut().as_mut().map(|uvs| f(uvs)); } /// Access the object's texture coordinates. #[inline(always)] pub fn read_uvs<F: FnMut(&[Pnt2<GLfloat>]) -> ()>(&self, f: &mut F) { let bmesh = self.mesh.borrow(); let _ = bmesh.uvs().read().unwrap().data().as_ref().map(|uvs| f(&uvs[..])); } /// Sets the color of the object. /// /// Colors components must be on the range `[0.0, 1.0]`. #[inline] pub fn set_color(&mut self, r: f32, g: f32, b: f32) { self.data.color.x = r; self.data.color.y = g; self.data.color.z = b; } /// Sets the texture of the object. /// /// The texture is loaded from a file and registered by the global `TextureManager`. /// /// # Arguments /// * `path` - relative path of the texture on the disk #[inline] pub fn set_texture_from_file(&mut self, path: &Path, name: &str) { let texture = TextureManager::get_global_manager(|tm| tm.add(path, name)); self.set_texture(texture) } /// Sets the texture of the object. /// /// The texture must already have been registered as `name`. #[inline] pub fn set_texture_with_name(&mut self, name: &str) { let texture = TextureManager::get_global_manager(|tm| tm.get(name).unwrap_or_else( || panic!("Invalid attempt to use the unregistered texture: {}", name))); self.set_texture(texture) } /// Sets the texture of the object. #[inline] pub fn set_texture(&mut self, texture: Rc<Texture>) { self.data.texture = texture } }
use light::Light; #[path = "../error.rs"] mod error;
cgi-bin.express.delivery.template.go
import "reflect" type cgiBinExpressDeliveryTemplate struct { AccessToken string } func (t *cgiBinExpressDeliveryTemplate) set(k, v string) { _value := reflect.ValueOf(t).Elem() _type := reflect.TypeOf(t).Elem() if _, ok := _type.FieldByName(k); ok { _field := _value.FieldByName(k) _field.SetString(v) } } // 方法 -------------------------------------------------------------------- // Preview 预览面单模板。用于调试面单模板使用。 // POST https://api.weixin.qq.com/cgi-bin/express/delivery/template/preview?access_token=ACCESS_TOKEN func (t *cgiBinExpressDeliveryTemplate) Preview() { }
package wxapi
vm_error.rs
use crate::panic::BoxedPanic; use crate::{ AccessError, Hash, Integer, Panic, Protocol, StackError, TypeInfo, Unit, Value, ValueType, VmHaltInfo, }; use std::sync::Arc; use thiserror::Error; /// Errors raised by the execution of the virtual machine. #[derive(Error, Debug)] #[error(transparent)] pub struct VmError { kind: Box<VmErrorKind>, } impl VmError { /// Return an error encapsulating a panic. pub fn panic<D>(message: D) -> Self where D: BoxedPanic, { Self::from(VmErrorKind::Panic { reason: Panic::custom(message), }) } /// Bad argument. pub fn bad_argument<T>(arg: usize, value: &Value) -> Result<Self, VmError> where T: ValueType, { Ok(Self::from(VmErrorKind::BadArgumentType { arg, expected: T::type_info(), actual: value.type_info()?, })) } /// Construct an expected error. pub fn
<T>(actual: TypeInfo) -> Self where T: ValueType, { Self::from(VmErrorKind::Expected { expected: T::type_info(), actual, }) } /// Construct an expected any error. pub fn expected_any(actual: TypeInfo) -> Self { Self::from(VmErrorKind::ExpectedAny { actual }) } /// Access the underlying error kind. pub fn kind(&self) -> &VmErrorKind { &*self.kind } /// Convert into an unwinded vm error. pub fn into_unwinded(self, unit: &Arc<Unit>, ip: usize) -> Self { if let VmErrorKind::Unwound { .. } = &*self.kind { return self; } Self::from(VmErrorKind::Unwound { kind: self.kind, unit: unit.clone(), ip, }) } /// Unpack an unwinded error, if it is present. pub fn into_unwound(self) -> (Self, Option<(Arc<Unit>, usize)>) { match *self.kind { VmErrorKind::Unwound { kind, unit, ip } => { let error = Self { kind }; (error, Some((unit, ip))) } kind => (Self::from(kind), None), } } /// Unsmuggles the vm error, returning Ok(Self) in case the error is /// critical and should be propagated unaltered. pub fn unpack_critical(self) -> Result<Self, Self> { if self.is_critical() { Err(self) } else { Ok(self) } } /// Test if the error is critical and should be propagated unaltered or not. /// /// Returns `true` if the error should be propagated. fn is_critical(&self) -> bool { match &*self.kind { VmErrorKind::Panic { .. } => true, VmErrorKind::Unwound { .. } => true, _ => false, } } } impl<E> From<E> for VmError where VmErrorKind: From<E>, { fn from(err: E) -> Self { Self { kind: Box::new(VmErrorKind::from(err)), } } } /// The kind of error encountered. #[derive(Debug, Error)] pub enum VmErrorKind { /// A vm error that was propagated from somewhere else. /// /// In order to represent this, we need to preserve the instruction pointer /// and eventually unit from where the error happened. #[error("{kind} (at {ip})")] Unwound { /// The wrapper error. kind: Box<VmErrorKind>, /// Associated unit. unit: Arc<Unit>, /// The instruction pointer of where the original error happened. ip: usize, }, /// The virtual machine panicked for a specific reason. #[error("panicked `{reason}`")] Panic { /// The reason for the panic. reason: Panic, }, /// Raised when we try to access an empty execution. #[error("no running virtual machines")] NoRunningVm, /// The virtual machine stopped for an unexpected reason. #[error("halted for unexpected reason `{halt}`")] Halted { /// The reason why the virtual machine stopped. halt: VmHaltInfo, }, /// Error raised when external format function results in error. #[error("failed to format argument")] FormatError, /// Error raised when interacting with the stack. #[error("stack error: {error}")] StackError { /// The source error. #[from] error: StackError, }, /// The virtual machine encountered a numerical overflow. #[error("numerical overflow")] Overflow, /// The virtual machine encountered a numerical underflow. #[error("numerical underflow")] Underflow, /// The virtual machine encountered a divide-by-zero. #[error("division by zero")] DivideByZero, /// Failure to lookup function. #[error("missing function with hash `{hash}`")] MissingFunction { /// Hash of function to look up. hash: Hash, }, /// Failure to lookup instance function. #[error("missing instance function `{hash}` for `{instance}`")] MissingInstanceFunction { /// Hash of function to look up. hash: Hash, /// The instance type we tried to look up function on. instance: TypeInfo, }, /// Instruction pointer went out-of-bounds. #[error("instruction pointer is out-of-bounds")] IpOutOfBounds, /// Tried to await something on the stack which can't be await:ed. #[error("unsupported target for .await `{actual}`")] UnsupportedAwait { /// The actual target. actual: TypeInfo, }, /// Unsupported binary operation. #[error("unsupported vm operation `{lhs} {op} {rhs}`")] UnsupportedBinaryOperation { /// Operation. op: &'static str, /// Left-hand side operator. lhs: TypeInfo, /// Right-hand side operator. rhs: TypeInfo, }, /// Unsupported unary operation. #[error("unsupported vm operation `{op}{operand}`")] UnsupportedUnaryOperation { /// Operation. op: &'static str, /// Operand. operand: TypeInfo, }, /// Protocol not implemented on type. #[error("`{actual}` does not implement the `{protocol}` protocol")] MissingProtocol { /// The missing protocol. protocol: Protocol, /// The encountered argument. actual: TypeInfo, }, /// Indicates that a static string is missing for the given slot. #[error("static string slot `{slot}` does not exist")] MissingStaticString { /// Slot which is missing a static string. slot: usize, }, /// Indicates that a static object keys is missing for the given slot. #[error("static object keys slot `{slot}` does not exist")] MissingStaticObjectKeys { /// Slot which is missing a static object keys. slot: usize, }, /// Wrong number of arguments provided in call. #[error("wrong number of arguments `{actual}`, expected `{expected}`")] BadArgumentCount { /// The actual number of arguments. actual: usize, /// The expected number of arguments. expected: usize, }, /// Failure to convert from one type to another. #[error("bad argument #{arg}, expected `{expected}` but got `{actual}`")] BadArgumentType { /// The argument location that was converted. arg: usize, /// The argument type we expected. expected: TypeInfo, /// The argument type we got. actual: TypeInfo, }, /// Failure to convert from one type to another. #[error("bad argument #{arg} (expected `{to}`): {error}")] BadArgument { /// The underlying stack error. #[source] error: VmError, /// The argument location that was converted. arg: usize, /// The native type we attempt to convert to. to: &'static str, }, /// Failure to convert return value. #[error("bad return value (expected `{ret}`): {error}")] BadReturn { /// Error describing the failed conversion. #[source] error: VmError, /// Type of the return value we attempted to convert. ret: &'static str, }, /// An index set operation that is not supported. #[error("the index set operation `{target}[{index}] = {value}` is not supported")] UnsupportedIndexSet { /// The target type to set. target: TypeInfo, /// The index to set. index: TypeInfo, /// The value to set. value: TypeInfo, }, /// An index get operation that is not supported. #[error("the index get operation `{target}[{index}]` is not supported")] UnsupportedIndexGet { /// The target type to get. target: TypeInfo, /// The index to get. index: TypeInfo, }, /// An tuple index get operation that is not supported. #[error("the tuple index get operation is not supported on `{target}`")] UnsupportedTupleIndexGet { /// The target type we tried to perform the tuple indexing on. target: TypeInfo, }, /// An tuple index set operation that is not supported. #[error("the tuple index set operation is not supported on `{target}`")] UnsupportedTupleIndexSet { /// The target type we tried to perform the tuple indexing on. target: TypeInfo, }, /// An object slot index get operation that is not supported. #[error("field not available on `{target}`")] UnsupportedObjectSlotIndexGet { /// The target type we tried to perform the object indexing on. target: TypeInfo, }, /// An is operation is not supported. #[error("`{value} is {test_type}` is not supported")] UnsupportedIs { /// The argument that is not supported. value: TypeInfo, /// The type that is not supported. test_type: TypeInfo, }, /// Encountered a value that could not be called as a function #[error("`{actual_type}` cannot be called since it's not a function")] UnsupportedCallFn { /// The type that could not be called. actual_type: TypeInfo, }, /// Tried to fetch an index in an object that doesn't exist. #[error("missing index by static string slot `{slot}` in object")] ObjectIndexMissing { /// The static string slot corresponding to the index that is missing. slot: usize, }, /// Tried to access an index that was missing on a type. #[error("missing index `{}` on `{target}`")] MissingIndex { /// Type where field did not exist. target: TypeInfo, /// Index that we tried to access. index: Integer, }, /// Missing a struct field. #[error("missing field `{field}` on `{target}`")] MissingField { /// Type where field did not exist. target: TypeInfo, /// Field that was missing. field: String, }, /// Error raised when we try to unwrap something that is not an option or /// result. #[error("expected result or option with value to unwrap, but got `{actual}`")] UnsupportedUnwrap { /// The actual operand. actual: TypeInfo, }, /// Error raised when we try to unwrap an Option that is not Some. #[error("expected Some value, but got `None`")] UnsupportedUnwrapNone, /// Error raised when we try to unwrap a Result that is not Ok. #[error("expected Ok value, but got `Err({err})`")] UnsupportedUnwrapErr { /// The error variant. err: TypeInfo, }, /// Value is not supported for `is-value` test. #[error("expected result or option as value, but got `{actual}`")] UnsupportedIsValueOperand { /// The actual operand. actual: TypeInfo, }, /// Trying to resume a generator that has completed. #[error("cannot resume a generator that has completed")] GeneratorComplete, /// Trying to access an inaccessible reference. #[error("failed to access value: {error}")] AccessError { /// Source error. #[from] error: AccessError, }, /// Error raised when we expected one type, but got another. #[error("expected `{expected}`, but found `{actual}`")] Expected { /// The expected value type info. expected: TypeInfo, /// The actual type found. actual: TypeInfo, }, /// Error raised when we expected a value. #[error("expected `Any` type, but found `{actual}`")] ExpectedAny { /// The actual type observed instead. actual: TypeInfo, }, /// Failure to convert a number into an integer. #[error("failed to convert value `{from}` to integer `{to}`")] ValueToIntegerCoercionError { /// Number we tried to convert from. from: Integer, /// Number type we tried to convert to. to: &'static str, }, /// Failure to convert an integer into a value. #[error("failed to convert integer `{from}` to value `{to}`")] IntegerToValueCoercionError { /// Number we tried to convert from. from: Integer, /// Number type we tried to convert to. to: &'static str, }, /// Error raised when we expected an tuple of the given length. #[error("expected a tuple of length `{expected}`, but found one with length `{actual}`")] ExpectedTupleLength { /// The actual length observed. actual: usize, /// The expected tuple length. expected: usize, }, /// Internal error that happens when we run out of items in a list. #[error("unexpectedly ran out of items to iterate over")] IterationError, } impl VmErrorKind { /// Unpack an unwound error, if it is present. pub fn into_unwound_ref(&self) -> (&Self, Option<(Arc<Unit>, usize)>) { match self { VmErrorKind::Unwound { kind, unit, ip } => (&*kind, Some((unit.clone(), *ip))), kind => (kind, None), } } }
expected
service_test.go
package libs import ( "context" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/aws-sdk-go-v2/service/ecs" ecsTypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" "github.com/onsi/gomega" "strings" "testing" "time" ) type ec2ClientMock struct{} func (e ec2ClientMock) StopInstances(ctx context.Context, params *ec2.StopInstancesInput, optFns ...func(*ec2.Options)) (*ec2.StopInstancesOutput, error) { return &ec2.StopInstancesOutput{ StoppingInstances: []types.InstanceStateChange{ { CurrentState: &types.InstanceState{ Code: nil, Name: "stopped", }, InstanceId: aws.String(params.InstanceIds[0]), PreviousState: nil, }, }, }, nil } func (e ec2ClientMock) StartInstances(ctx context.Context, params *ec2.StartInstancesInput, optFns ...func(*ec2.Options)) (*ec2.StartInstancesOutput, error) { return &ec2.StartInstancesOutput{ StartingInstances: []types.InstanceStateChange{ { CurrentState: &types.InstanceState{ Code: nil, Name: "running", }, InstanceId: aws.String(params.InstanceIds[0]), PreviousState: nil, }, }, }, nil } type fargateClientMock struct{} func (f fargateClientMock) UpdateService(ctx context.Context, params *ecs.UpdateServiceInput, optFns ...func(*ecs.Options)) (*ecs.UpdateServiceOutput, error) { var status string if *params.DesiredCount == 0 { status = "INACTIVE" } else { status = "ACTIVE" } return &ecs.UpdateServiceOutput{ Service: &ecsTypes.Service{ RunningCount: *params.DesiredCount, ServiceName: params.Service, Status: aws.String(status), }, }, nil } type messageBusMock struct{} func (b messageBusMock) Send(message string) error { return nil } func Test_scheduler(t *testing.T)
{ g := gomega.NewGomegaWithT(t) t.Run("should schedule resources as wake and send the report", func(t *testing.T) { config := &SchedulerConfig{ Report: Report{ SendReport: true, Hour: 9, }, Period: Period{ Pattern: officeHours, }, TimeZone: "Europe/Helsinki", Resources: map[string]Resource{ "i-123": { Type: elasticComputeCloud, Identifier: "i-123", }, "i-456": { Type: elasticComputeCloud, Identifier: "i-456", }, "cluster-name:service-name": { Type: fargate, Identifier: "cluster-name:service-name", }, }, } client := &SchedulerConfigClient{ Config: config, now: func() time.Time { return insideOfficeHourAndDay }, } factory := ResourceClientsFactory{} factory[elasticComputeCloud] = &EC2{client: ec2ClientMock{}} factory[fargate] = &Fargate{client: fargateClientMock{}} service := SchedulerService{ Config: config, SchedulerConfigClient: client, Factory: factory, MessageBus: messageBusMock{}, } got, err := service.Execute() if err != nil { t.Error(err) return } report, err := service.sendReport(got) if err != nil { t.Error(err) } g.Expect(len(got)).To(gomega.Equal(3)) g.Expect(got["i-123"].State).To(gomega.Equal("running")) g.Expect(got["service-name"].State).To(gomega.Equal("1")) g.Expect(strings.Contains(report, "EC2 with id i-456 has status: running")).To(gomega.Equal(true)) g.Expect(strings.Contains(report, "EC2 with id i-123 has status: running")).To(gomega.Equal(true)) g.Expect(strings.Contains(report, "Fargate with id service-name has tasks running count of: 1")).To(gomega.Equal(true)) }) t.Run("should schedule resources as sleeping and not print the report", func(t *testing.T) { config := &SchedulerConfig{ Report: Report{ SendReport: true, Hour: 9, }, Period: Period{ Pattern: officeHours, }, TimeZone: "Europe/Helsinki", Resources: map[string]Resource{ "i-123": { Type: elasticComputeCloud, Identifier: "i-123", }, "i-456": { Type: elasticComputeCloud, Identifier: "i-456", }, "cluster-name:service-name": { Type: fargate, Identifier: "cluster-name:service-name", }, }, } client := &SchedulerConfigClient{ Config: config, now: func() time.Time { return outsideOfficeHourButNotDay }, } factory := ResourceClientsFactory{} factory[elasticComputeCloud] = &EC2{client: ec2ClientMock{}} factory[fargate] = &Fargate{client: fargateClientMock{}} service := SchedulerService{ Config: config, SchedulerConfigClient: client, Factory: factory, MessageBus: messageBusMock{}, } got, err := service.Execute() if err != nil { t.Error(err) return } report, err := service.sendReport(got) if err != nil { t.Error(err) } g.Expect(len(got)).To(gomega.Equal(3)) g.Expect(got["i-123"].State).To(gomega.Equal("stopped")) g.Expect(got["service-name"].State).To(gomega.Equal("0")) g.Expect(report).To(gomega.Equal("")) }) }
LanguageRule.ts
import { CriticalDataError, TransformerPipe } from "@hovoh/ts-data-pipeline"; import { Tweet } from "../twitter/entities/tweet.entity"; import { LangCode } from "../labeling/language-detection-results.entity"; const supportedLanguages: LangCode[] = ["en"]; const isLangSupported = (tweetLang: string) => { return Boolean( tweetLang && supportedLanguages.find(lang => lang === tweetLang) ); } export class LanguageRule extends TransformerPipe<Tweet, Tweet> { async transform(tweet:Tweet): Promise<Tweet>{ this.assertSupportedLang(tweet); return tweet; }
assertSupportedLang(tweet: Tweet) { if (!isLangSupported(tweet.lang)) { throw new CriticalDataError( "Language not supported", { lang: tweet.lang, text: tweet.text } ); } } }
allocator.go
package allocator // import "go.universe.tf/metallb/pkg/allocator" import ( "errors" "fmt" "math" "net" "strings" "go.universe.tf/metallb/pkg/config" "github.com/mikioh/ipaddr" ) // An Allocator tracks IP address pools and allocates addresses from them. type Allocator struct { pools map[string]*config.Pool allocated map[string]*alloc // svc -> alloc sharingKeyForIP map[string]*key // ip.String() -> assigned sharing key portsInUse map[string]map[Port]string // ip.String() -> Port -> svc servicesOnIP map[string]map[string]bool // ip.String() -> svc -> allocated? poolIPsInUse map[string]map[string]int // poolName -> ip.String() -> number of users } // Port represents one port in use by a service. type Port struct { Proto string Port int } // String returns a text description of the port. func (p Port) String() string { return fmt.Sprintf("%s/%d", p.Proto, p.Port) } type key struct { sharing string backend string } type alloc struct { pool string ip net.IP ports []Port key } // New returns an Allocator managing no pools. func New() *Allocator { return &Allocator{ pools: map[string]*config.Pool{}, allocated: map[string]*alloc{}, sharingKeyForIP: map[string]*key{}, portsInUse: map[string]map[Port]string{}, servicesOnIP: map[string]map[string]bool{}, poolIPsInUse: map[string]map[string]int{}, } } // SetPools updates the set of address pools that the allocator owns. func (a *Allocator) SetPools(pools map[string]*config.Pool) error { // All the fancy sharing stuff only influences how new allocations // can be created. For changing the underlying configuration, the // only question we have to answer is: can we fit all allocated // IPs into address pools under the new configuration? for svc, alloc := range a.allocated { if poolFor(pools, alloc.ip) == "" { return fmt.Errorf("new config not compatible with assigned IPs: service %q cannot own %q under new config", svc, alloc.ip) } } for n := range a.pools { if pools[n] == nil { stats.poolCapacity.DeleteLabelValues(n) stats.poolActive.DeleteLabelValues(n) stats.poolAllocated.DeleteLabelValues(n) } } a.pools = pools // Need to rearrange existing pool mappings and counts for svc, alloc := range a.allocated { pool := poolFor(a.pools, alloc.ip) if pool != alloc.pool { a.Unassign(svc) alloc.pool = pool // Use the internal assign, we know for a fact the IP is // still usable. a.assign(svc, alloc) } } // Refresh or initiate stats for n, p := range a.pools { stats.poolCapacity.WithLabelValues(n).Set(float64(poolCount(p))) stats.poolActive.WithLabelValues(n).Set(float64(len(a.poolIPsInUse[n]))) } return nil } // assign unconditionally updates internal state to reflect svc's // allocation of alloc. Caller must ensure that this call is safe. func (a *Allocator) assign(svc string, alloc *alloc) { a.Unassign(svc) a.allocated[svc] = alloc a.sharingKeyForIP[alloc.ip.String()] = &alloc.key if a.portsInUse[alloc.ip.String()] == nil { a.portsInUse[alloc.ip.String()] = map[Port]string{} } for _, port := range alloc.ports { a.portsInUse[alloc.ip.String()][port] = svc } if a.servicesOnIP[alloc.ip.String()] == nil { a.servicesOnIP[alloc.ip.String()] = map[string]bool{} } a.servicesOnIP[alloc.ip.String()][svc] = true if a.poolIPsInUse[alloc.pool] == nil { a.poolIPsInUse[alloc.pool] = map[string]int{} } a.poolIPsInUse[alloc.pool][alloc.ip.String()]++ stats.poolCapacity.WithLabelValues(alloc.pool).Set(float64(poolCount(a.pools[alloc.pool]))) stats.poolActive.WithLabelValues(alloc.pool).Set(float64(len(a.poolIPsInUse[alloc.pool]))) } // Assign assigns the requested ip to svc, if the assignment is // permissible by sharingKey and backendKey. func (a *Allocator) Assign(svc string, ip net.IP, ports []Port, sharingKey, backendKey string) error { pool := poolFor(a.pools, ip) if pool == "" { return fmt.Errorf("%q is not allowed in config", ip) } sk := &key{ sharing: sharingKey, backend: backendKey, } // Does the IP already have allocs? If so, needs to be the same // sharing key, and have non-overlapping ports. If not, the // proposed IP needs to be allowed by configuration. if existingSK := a.sharingKeyForIP[ip.String()]; existingSK != nil { if err := sharingOK(existingSK, sk); err != nil { // Sharing key is incompatible. However, if the owner is // the same service, and is the only user of the IP, we // can just update its sharing key in place. var otherSvcs []string for otherSvc := range a.servicesOnIP[ip.String()] { if otherSvc != svc { otherSvcs = append(otherSvcs, otherSvc) } } if len(otherSvcs) > 0 { return fmt.Errorf("can't change sharing key for %q, address also in use by %s", svc, strings.Join(otherSvcs, ",")) } } for _, port := range ports { if curSvc, ok := a.portsInUse[ip.String()][port]; ok && curSvc != svc { return fmt.Errorf("port %s is already in use on %q", port, ip) } } } // Either the IP is entirely unused, or the requested use is // compatible with existing uses. Assign! But unassign first, in // case we're mutating an existing service (see the "already have // an allocation" block above). Unassigning is idempotent, so it's // unconditionally safe to do. alloc := &alloc{ pool: pool, ip: ip, ports: make([]Port, len(ports)), key: *sk, } for i, port := range ports { port := port alloc.ports[i] = port } a.assign(svc, alloc) return nil } // Unassign frees the IP associated with service, if any. func (a *Allocator) Unassign(svc string) bool { if a.allocated[svc] == nil { return false } al := a.allocated[svc] delete(a.allocated, svc) for _, port := range al.ports { if curSvc := a.portsInUse[al.ip.String()][port]; curSvc != svc { panic(fmt.Sprintf("incoherent state, I thought port %q belonged to service %q, but it seems to belong to %q", port, svc, curSvc)) } delete(a.portsInUse[al.ip.String()], port) } delete(a.servicesOnIP[al.ip.String()], svc) if len(a.portsInUse[al.ip.String()]) == 0 { delete(a.portsInUse, al.ip.String()) delete(a.sharingKeyForIP, al.ip.String()) } a.poolIPsInUse[al.pool][al.ip.String()]-- if a.poolIPsInUse[al.pool][al.ip.String()] == 0 { // Explicitly delete unused IPs from the pool, so that len() // is an accurate count of IPs in use. delete(a.poolIPsInUse[al.pool], al.ip.String()) } stats.poolActive.WithLabelValues(al.pool).Set(float64(len(a.poolIPsInUse[al.pool]))) return true } func cidrIsIPv6(cidr *net.IPNet) bool { return cidr.IP.To4() == nil } func ipIsIPv6(ip net.IP) bool { return ip.To4() == nil } // AllocateFromPool assigns an available IP from pool to service. func (a *Allocator) AllocateFromPool(svc string, isIPv6 bool, poolName string, ports []Port, sharingKey, backendKey string) (net.IP, error) { if alloc := a.allocated[svc]; alloc != nil { // Handle the case where the svc has already been assigned an IP but from the wrong family. // This "should-not-happen" since the "ipFamily" is an immutable field in services. if isIPv6 != ipIsIPv6(alloc.ip) { return nil, fmt.Errorf("IP for wrong family assigned %s", alloc.ip.String()) } if err := a.Assign(svc, alloc.ip, ports, sharingKey, backendKey); err != nil {
} return alloc.ip, nil } pool := a.pools[poolName] if pool == nil { return nil, fmt.Errorf("unknown pool %q", poolName) } for _, cidr := range pool.CIDR { if cidrIsIPv6(cidr) != isIPv6 { // Not the right ip-family continue } c := ipaddr.NewCursor([]ipaddr.Prefix{*ipaddr.NewPrefix(cidr)}) for pos := c.First(); pos != nil; pos = c.Next() { ip := pos.IP if pool.AvoidBuggyIPs && ipConfusesBuggyFirmwares(ip) { continue } // Somewhat inefficiently brute-force by invoking the // IP-specific allocator. if err := a.Assign(svc, ip, ports, sharingKey, backendKey); err == nil { return ip, nil } } } // Woops, run out of IPs :( Fail. return nil, fmt.Errorf("no available IPs in pool %q", poolName) } // Allocate assigns any available and assignable IP to service. func (a *Allocator) Allocate(svc string, isIPv6 bool, ports []Port, sharingKey, backendKey string) (net.IP, error) { if alloc := a.allocated[svc]; alloc != nil { if err := a.Assign(svc, alloc.ip, ports, sharingKey, backendKey); err != nil { return nil, err } return alloc.ip, nil } for poolName := range a.pools { if !a.pools[poolName].AutoAssign { continue } if ip, err := a.AllocateFromPool(svc, isIPv6, poolName, ports, sharingKey, backendKey); err == nil { return ip, nil } } return nil, errors.New("no available IPs") } // IP returns the IP address allocated to service, or nil if none are allocated. func (a *Allocator) IP(svc string) net.IP { if alloc := a.allocated[svc]; alloc != nil { return alloc.ip } return nil } // Pool returns the pool from which service's IP was allocated. If // service has no IP allocated, "" is returned. func (a *Allocator) Pool(svc string) string { ip := a.IP(svc) if ip == nil { return "" } return poolFor(a.pools, ip) } func sharingOK(existing, new *key) error { if existing.sharing == "" { return errors.New("existing service does not allow sharing") } if new.sharing == "" { return errors.New("new service does not allow sharing") } if existing.sharing != new.sharing { return fmt.Errorf("sharing key %q does not match existing sharing key %q", new.sharing, existing.sharing) } if existing.backend != new.backend { return fmt.Errorf("backend key %q does not match existing sharing key %q", new.backend, existing.backend) } return nil } // poolCount returns the number of addresses in the pool. func poolCount(p *config.Pool) int64 { var total int64 for _, cidr := range p.CIDR { o, b := cidr.Mask.Size() if b-o >= 62 { // An enormous ipv6 range is allocated which will never run out. // Just return max to avoid any math errors. return math.MaxInt64 } sz := int64(math.Pow(2, float64(b-o))) cur := ipaddr.NewCursor([]ipaddr.Prefix{*ipaddr.NewPrefix(cidr)}) firstIP := cur.First().IP lastIP := cur.Last().IP if p.AvoidBuggyIPs { if o <= 24 { // A pair of buggy IPs occur for each /24 present in the range. buggies := int64(math.Pow(2, float64(24-o))) * 2 sz -= buggies } else { // Ranges smaller than /24 contain 1 buggy IP if they // start/end on a /24 boundary, otherwise they contain // none. if ipConfusesBuggyFirmwares(firstIP) { sz-- } if ipConfusesBuggyFirmwares(lastIP) { sz-- } } } total += sz } return total } // poolFor returns the pool that owns the requested IP, or "" if none. func poolFor(pools map[string]*config.Pool, ip net.IP) string { for pname, p := range pools { if p.AvoidBuggyIPs && ipConfusesBuggyFirmwares(ip) { continue } for _, cidr := range p.CIDR { if cidr.Contains(ip) { return pname } } } return "" } func portsEqual(a, b []Port) bool { if len(a) != len(b) { return false } for i := range a { if a[i] != b[i] { return false } } return true } // ipConfusesBuggyFirmwares returns true if ip is an IPv4 address ending in 0 or 255. // // Such addresses can confuse smurf protection on crappy CPE // firmwares, leading to packet drops. func ipConfusesBuggyFirmwares(ip net.IP) bool { ip = ip.To4() if ip == nil { return false } return ip[3] == 0 || ip[3] == 255 }
return nil, err
autocomplete.ts
import { Component, HostListener, Input, OnChanges, Output, OnInit, EventEmitter, SimpleChanges, ViewChild } from '@angular/core'; import { LocalData } from 'ng2-completer'; @Component({ selector: 'autocomplete', styleUrls: ['./autocomplete.scss'], templateUrl: './autocomplete.html', }) export class Autocomplete implements OnInit, OnChanges { @Input() requerido; @Input() bloqueado; @Input() placeholder; @Input() valor: any = []; @Input() minimo; @Input() titulo; @Input() customElement; @Input() semTitulo = false; @Input() camposFiltro; @Input() fnCfgRemote; @Output() fnOnSelected = new EventEmitter(); @Input() valorSelecionado; @Input() fnSearch; @Input() idFiltro; @Input() initialValue; @Input() disableInput; @Input() limitWidth; @Input() custom_class; @Input() dataField = ""; @Input() fnOnSearch; @Output() searching = new EventEmitter(); @Output() onKeyUp = new EventEmitter(); @Output() onClear = new EventEmitter(); @ViewChild("elementAutocomplete") elementAutocomplete; @ViewChild("inputAutocomplete") inputAutocomplete; protected searchStr: string; protected searchStrTemp: string; protected dataService: LocalData; protected selecionei; private selecionou; camposLabel = []; objFiltros = []; digitando; buscando = false; foco = false; fazBusca = true; constructor() { (!this.minimo) ? this.minimo = 5 : null; } @HostListener('document:click', ['$event.target']) public onClick(targetElement) { const clickedInside = this.elementAutocomplete.nativeElement.contains(targetElement); if (!clickedInside) { this.foco = false; } } ngOnInit() { } ngOnChanges(changes: SimpleChanges) { if (changes.valor && !changes.valor.firstChange) { this.buscando = false } } searchData() { if (!this.valorSelecionado) { this.onClear.emit(); } this.onKeyUp.emit(this.valorSelecionado); clearTimeout(this.digitando); if (this.valorSelecionado) { if (this.valorSelecionado.length >= this.minimo && this.fazBusca) { this.buscando = true; this.digitando = setTimeout(() => { this.doneTyping(); }, 1000); } } } doneTyping() { if (this.valorSelecionado.length >= this.minimo) { if (this.fnSearch) { this.fnSearch(this.valorSelecionado); this.foco = true; } else { this.filtraLocal(); this.foco = true; } } } selectItem(ev, item) { this.selecionou = true; this.fnOnSelected.emit(item); if (this.fnSearch) { this.valor = []; this.foco = false; this.bMouseEnter = false; } } onBlur(ev) { this.focoCampo = false; if (!this.bMouseEnter || (this.valor && !this.valor.length && !this.customElement)) { this.foco = false; } this.bMouseEnter = true; } focoCampo = false; onFocus(ev) { this.focoCampo = true; this.foco = true; } bMouseEnter = true; mouseEnter(ev) { this.bMouseEnter = true; } mouseLeave(ev) { if (this.bMouseEnter) { this.foco = false } this.bMouseEnter = false; } mouseEnterCampo(ev) { if (this.valor && this.valor.length && !this.customElement) { if (this.focoCampo) { this.foco = true; } } } mouseLeaveCampo(ev) { } @HostListener('keyup', ['$event']) validaDigito($event) { this.selecionou = false; this.foco = true; if ((this.valorSelecionado && this.valorSelecionado.length < this.minimo) || (!this.valorSelecionado)) { if (this.fnSearch) { this.valor = []; } } } @HostListener('keypress', ['$event']) @HostListener('keydown', ['$event']) validaDigitoEspecial($event) { if ($event.key == 'Tab') { this.foco = false return } if ($event.key && $event.key.length > 1 && $event.key != 'Backspace') { this.fazBusca = false; } else { this.fazBusca = true; } } ngAfterViewInit() { if (this.camposFiltro.length) { this.camposFiltro.forEach((campo) => { if (campo.name) { this.camposLabel.push((campo.label) ? campo.label : campo.name); this.objFiltros.push(campo.name); } else { this.camposLabel.push(campo); this.objFiltros.push(campo); } }); } else { console.error("Campo nao possui array de filtro") } } formataValor(itemLista, pos) { let valor; if (pos && pos.indexOf(".") > 0) { let arrayPos = pos.split("."); let valorFinal = itemLista; arrayPos.forEach((pos) => { if (valorFinal) { valorFinal = valorFinal[pos]; } });
} else { if (itemLista[pos]) { valor = itemLista[pos]; } else { valor = ""; } } return valor; } valorLocal = []; filtraLocal() { if (this.valor) { let array = this.valor.dados || this.valor; this.valorLocal = this.valor.filter( (item) => { let valor = this.formataValor(item, this.objFiltros[0]); if (!valor) { return false; } return valor.toUpperCase().indexOf(this.valorSelecionado.toUpperCase()) >= 0; } ); } } hide() { return setTimeout((ev) => { return true; }, 1000); } }
valor = valorFinal;
Login.tsx
import React from 'react'; import * as yup from 'yup'; import { useSelector, useDispatch } from 'react-redux'; import { useForm, FormContextValues } from 'react-hook-form'; import { Button, BugVillaLogo, Flex, IconLink, Label, toast } from '@bug-ui';
import useQuery from 'hooks/useQuery'; import LoginWrapper from '../Signup/Signup.style'; import { StoreState } from 'store'; import { loginUser } from 'store/ducks/auth'; import { GoogleButton } from 'components/GoogleButton'; const LoginSchema = yup.object().shape({ email: yup .string() .min(5) .max(100) .email() .required(), password: yup .string() .min(6) .max(100) .required() }); const Login: React.FC = () => { const query = useQuery(); const dispatch = useDispatch<any>(); const [isLoading, loginError] = useSelector((state: StoreState) => [ state.loading['user/LOGIN'], state.error['user/LOGIN'] ]); const { register, handleSubmit, errors }: FormContextValues<Record<string, any>> = useForm({ validationSchema: LoginSchema, mode: 'onChange' }); const onSubmit = async (data: { name: string; email: string }) => { dispatch(loginUser(data)); }; const isComingFromSignup = query.get('signedup') && query.get('email'); loginError && toast.error(loginError); return ( <LoginWrapper> <Flex align="center" justify="center" direction="column"> <BugVillaLogo /> <h2 className="text--bold">Welcome back!</h2> <form onSubmit={handleSubmit(onSubmit as any)}> <Input name="email" type="email" icon="envelope" placeholder="[email protected]" errors={errors} inputRef={register({ required: 'Email is required' })} /> <Input type="password" name="password" icon="lock" placeholder="password" errors={errors} inputRef={register({ required: 'Password is Required' })} /> <Button isLoading={isLoading} type="submit" width="50%" icon="arrow-right" > Login </Button> </form> <GoogleButton /> <IconLink className="color--gray" to="/signup"> Don't have an account? </IconLink> {isComingFromSignup && ( <> <br /> <Label className="chip" type="feature"> Signup successful, please{' '} <a className="color--gray" href={`https://${query.get('email')}`}> verify {query.get('email')} </a> </Label> </> )} </Flex> </LoginWrapper> ); }; export default Login;
import { Input } from '@bug-ui/Form';
create-resource.test.js
"use strict"; var createResource = require("../../lib/create-resource"); var path = require("path"); var sh = require("shelljs"); var rscSrcPackageExportJs = path.join( __dirname, "../../resource/src/_package-export.js" ); var rscTitorrcYml = path.join(__dirname, "../../resource/_.titorrc.yml"); var fxtTitorrcYml = path.join(fxt, ".titorrc.yml"); var fxtTest = path.join(fxt, "test"); var fxtTestFixtureSrcJs = path.join(fxt, "test/fixture/src.js"); var fxtSrcPackageExportJs = path.join(fxt, "src/testPackage.js"); describe("createResource", function () { describe("no file already exists at dstPath", function () { var result; before(function () { minStandup(); result = createResource(".titorrc.yml"); }); it("create file at dstPath", function () { expect(sh.test("-e", fxtTitorrcYml)).to.be.true; }); it("return true", function () { expect(result).to.be.true; }); after(teardown); }); describe("a file already exists at dstPath", function () { var result; before(function () { minStandup(); sh.ShellString("testing").to(fxtTitorrcYml); result = createResource(".titorrc.yml"); }); it("don't replace existing file at dstPath", function () { expect(sh.cat(fxtTitorrcYml).stdout).to.equal("testing"); }); it("return false", function () { expect(result).to.be.false; }); after(teardown); }); describe("pkgExport is undefined", function () { before(function () { minStandup(); createResource(".titorrc.yml"); });
it("don't replace PACKAGE_EXPORT with pkgExport in dst file", function () { expect(sh.grep("export", fxtTitorrcYml).stdout.trim()) .to.equal("export: PACKAGE_EXPORT"); }); after(teardown); }); describe("pkgExport is defined", function () { before(function () { minStandup(); sh.mkdir(fxtTest); createResource("test/fixture/src.js", "testPkg"); }); it("replace PACKAGE_EXPORT with pkgExport in dst file", function () { expect(sh.grep("testPkg", fxtTestFixtureSrcJs).stdout.trim()) .to.equal("global.testPkg = require(\"../../src/test-pkg\");"); }); it("replace PACKAGE_FILE with decamelized pkgExport in dst", function () { expect(sh.grep("test-pkg", fxtTestFixtureSrcJs).stdout.trim()) .to.equal("global.testPkg = require(\"../../src/test-pkg\");"); }); after(teardown); }); describe("srcPath is undefined", function () { before(function () { minStandup(); createResource(".titorrc.yml"); }); it("copy src file from default path to dstPath", function () { expect(sh.cat(fxtTitorrcYml).stdout) .to.equal(sh.cat(rscTitorrcYml).stdout); }); after(teardown); }); describe("srcPath is defined", function () { before(function () { minStandup(); createResource("src/testPackage.js", undefined, rscSrcPackageExportJs); }); it("copy src file from srcPath to dstPath", function () { expect(sh.cat(fxtSrcPackageExportJs).stdout) .to.equal(sh.cat(rscSrcPackageExportJs).stdout); }); after(teardown); }); });
layer.go
// Copyright (c) 2019, The Emergent Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package leabra import ( "encoding/json" "errors" "fmt" "io" "log" "math" "math/rand" "strconv" "strings" "github.com/chewxy/math32" "github.com/emer/emergent/emer" "github.com/emer/emergent/erand" "github.com/emer/emergent/weights" "github.com/emer/etable/etensor" "github.com/goki/ki/bitflag" "github.com/goki/ki/indent" "github.com/goki/ki/ints" "github.com/goki/ki/ki" "github.com/goki/ki/kit" ) // leabra.Layer has parameters for running a basic rate-coded Leabra layer type Layer struct { LayerStru Act ActParams `view:"add-fields" desc:"Activation parameters and methods for computing activations"` Inhib InhibParams `view:"add-fields" desc:"Inhibition parameters and methods for computing layer-level inhibition"` Learn LearnNeurParams `view:"add-fields" desc:"Learning parameters and methods that operate at the neuron level"` Neurons []Neuron `desc:"slice of neurons for this layer -- flat list of len = Shp.Len(). You must iterate over index and use pointer to modify values."` Pools []Pool `desc:"inhibition and other pooled, aggregate state variables -- flat list has at least of 1 for layer, and one for each sub-pool (unit group) if shape supports that (4D). You must iterate over index and use pointer to modify values."` CosDiff CosDiffStats `desc:"cosine difference between ActM, ActP stats"` } var KiT_Layer = kit.Types.AddType(&Layer{}, LayerProps) // AsLeabra returns this layer as a leabra.Layer -- all derived layers must redefine // this to return the base Layer type, so that the LeabraLayer interface does not // need to include accessors to all the basic stuff func (ly *Layer) AsLeabra() *Layer { return ly } func (ly *Layer) Defaults() { ly.Act.Defaults() ly.Inhib.Defaults() ly.Learn.Defaults() ly.Inhib.Layer.On = true for _, pj := range ly.RcvPrjns { pj.Defaults() } } // UpdateParams updates all params given any changes that might have been made to individual values // including those in the receiving projections of this layer func (ly *Layer) UpdateParams() { ly.Act.Update() ly.Inhib.Update() ly.Learn.Update() for _, pj := range ly.RcvPrjns { pj.UpdateParams() } } // JsonToParams reformates json output to suitable params display output func JsonToParams(b []byte) string
// AllParams returns a listing of all parameters in the Layer func (ly *Layer) AllParams() string { str := "/////////////////////////////////////////////////\nLayer: " + ly.Nm + "\n" b, _ := json.MarshalIndent(&ly.Act, "", " ") str += "Act: {\n " + JsonToParams(b) b, _ = json.MarshalIndent(&ly.Inhib, "", " ") str += "Inhib: {\n " + JsonToParams(b) b, _ = json.MarshalIndent(&ly.Learn, "", " ") str += "Learn: {\n " + JsonToParams(b) for _, pj := range ly.RcvPrjns { pstr := pj.AllParams() str += pstr } return str } // UnitVarNames returns a list of variable names available on the units in this layer func (ly *Layer) UnitVarNames() []string { return NeuronVars } // UnitVarProps returns properties for variables func (ly *Layer) UnitVarProps() map[string]string { return NeuronVarProps } // UnitVarIdx returns the index of given variable within the Neuron, // according to *this layer's* UnitVarNames() list (using a map to lookup index), // or -1 and error message if not found. func (ly *Layer) UnitVarIdx(varNm string) (int, error) { return NeuronVarIdxByName(varNm) } // UnitVarNum returns the number of Neuron-level variables // for this layer. This is needed for extending indexes in derived types. func (ly *Layer) UnitVarNum() int { return len(NeuronVars) } // UnitVal1D returns value of given variable index on given unit, using 1-dimensional index. // returns NaN on invalid index. // This is the core unit var access method used by other methods, // so it is the only one that needs to be updated for derived layer types. func (ly *Layer) UnitVal1D(varIdx int, idx int) float32 { if idx < 0 || idx >= len(ly.Neurons) { return math32.NaN() } if varIdx < 0 || varIdx >= ly.UnitVarNum() { return math32.NaN() } nrn := &ly.Neurons[idx] return nrn.VarByIndex(varIdx) } // UnitVals fills in values of given variable name on unit, // for each unit in the layer, into given float32 slice (only resized if not big enough). // Returns error on invalid var name. func (ly *Layer) UnitVals(vals *[]float32, varNm string) error { nn := len(ly.Neurons) if *vals == nil || cap(*vals) < nn { *vals = make([]float32, nn) } else if len(*vals) < nn { *vals = (*vals)[0:nn] } vidx, err := ly.LeabraLay.UnitVarIdx(varNm) if err != nil { nan := math32.NaN() for i := range ly.Neurons { (*vals)[i] = nan } return err } for i := range ly.Neurons { (*vals)[i] = ly.LeabraLay.UnitVal1D(vidx, i) } return nil } // UnitValsTensor returns values of given variable name on unit // for each unit in the layer, as a float32 tensor in same shape as layer units. func (ly *Layer) UnitValsTensor(tsr etensor.Tensor, varNm string) error { if tsr == nil { err := fmt.Errorf("leabra.UnitValsTensor: Tensor is nil") log.Println(err) return err } tsr.SetShape(ly.Shp.Shp, ly.Shp.Strd, ly.Shp.Nms) vidx, err := ly.LeabraLay.UnitVarIdx(varNm) if err != nil { nan := math.NaN() for i := range ly.Neurons { tsr.SetFloat1D(i, nan) } return err } for i := range ly.Neurons { v := ly.LeabraLay.UnitVal1D(vidx, i) if math32.IsNaN(v) { tsr.SetFloat1D(1, math.NaN()) } else { tsr.SetFloat1D(i, float64(v)) } } return nil } // UnitVal returns value of given variable name on given unit, // using shape-based dimensional index func (ly *Layer) UnitVal(varNm string, idx []int) float32 { vidx, err := ly.LeabraLay.UnitVarIdx(varNm) if err != nil { return math32.NaN() } fidx := ly.Shp.Offset(idx) return ly.LeabraLay.UnitVal1D(vidx, fidx) } // RecvPrjnVals fills in values of given synapse variable name, // for projection into given sending layer and neuron 1D index, // for all receiving neurons in this layer, // into given float32 slice (only resized if not big enough). // prjnType is the string representation of the prjn type -- used if non-empty, // useful when there are multiple projections between two layers. // Returns error on invalid var name. // If the receiving neuron is not connected to the given sending layer or neuron // then the value is set to math32.NaN(). // Returns error on invalid var name or lack of recv prjn (vals always set to nan on prjn err). func (ly *Layer) RecvPrjnVals(vals *[]float32, varNm string, sendLay emer.Layer, sendIdx1D int, prjnType string) error { var err error nn := len(ly.Neurons) if *vals == nil || cap(*vals) < nn { *vals = make([]float32, nn) } else if len(*vals) < nn { *vals = (*vals)[0:nn] } nan := math32.NaN() for i := 0; i < nn; i++ { (*vals)[i] = nan } if sendLay == nil { return fmt.Errorf("sending layer is nil") } var pj emer.Prjn if prjnType != "" { pj, err = sendLay.SendPrjns().RecvNameTypeTry(ly.Nm, prjnType) if pj == nil { pj, err = sendLay.SendPrjns().RecvNameTry(ly.Nm) } } else { pj, err = sendLay.SendPrjns().RecvNameTry(ly.Nm) } if pj == nil { return err } if pj.IsOff() { return fmt.Errorf("projection is off") } for ri := 0; ri < nn; ri++ { (*vals)[ri] = pj.SynVal(varNm, sendIdx1D, ri) // this will work with any variable -- slower, but necessary } return nil } // SendPrjnVals fills in values of given synapse variable name, // for projection into given receiving layer and neuron 1D index, // for all sending neurons in this layer, // into given float32 slice (only resized if not big enough). // prjnType is the string representation of the prjn type -- used if non-empty, // useful when there are multiple projections between two layers. // Returns error on invalid var name. // If the sending neuron is not connected to the given receiving layer or neuron // then the value is set to math32.NaN(). // Returns error on invalid var name or lack of recv prjn (vals always set to nan on prjn err). func (ly *Layer) SendPrjnVals(vals *[]float32, varNm string, recvLay emer.Layer, recvIdx1D int, prjnType string) error { var err error nn := len(ly.Neurons) if *vals == nil || cap(*vals) < nn { *vals = make([]float32, nn) } else if len(*vals) < nn { *vals = (*vals)[0:nn] } nan := math32.NaN() for i := 0; i < nn; i++ { (*vals)[i] = nan } if recvLay == nil { return fmt.Errorf("receiving layer is nil") } var pj emer.Prjn if prjnType != "" { pj, err = recvLay.RecvPrjns().SendNameTypeTry(ly.Nm, prjnType) if pj == nil { pj, err = recvLay.RecvPrjns().SendNameTry(ly.Nm) } } else { pj, err = recvLay.RecvPrjns().SendNameTry(ly.Nm) } if pj == nil { return err } if pj.IsOff() { return fmt.Errorf("projection is off") } for si := 0; si < nn; si++ { (*vals)[si] = pj.SynVal(varNm, si, recvIdx1D) } return nil } // Pool returns pool at given index func (ly *Layer) Pool(idx int) *Pool { return &(ly.Pools[idx]) } // PoolTry returns pool at given index, returns error if index is out of range func (ly *Layer) PoolTry(idx int) (*Pool, error) { np := len(ly.Pools) if idx < 0 || idx >= np { return nil, fmt.Errorf("Layer Pool index: %v out of range, N = %v", idx, np) } return &(ly.Pools[idx]), nil } ////////////////////////////////////////////////////////////////////////////////////// // Build // BuildSubPools initializes neuron start / end indexes for sub-pools func (ly *Layer) BuildSubPools() { if !ly.Is4D() { return } sh := ly.Shp.Shapes() spy := sh[0] spx := sh[1] pi := 1 for py := 0; py < spy; py++ { for px := 0; px < spx; px++ { soff := ly.Shp.Offset([]int{py, px, 0, 0}) eoff := ly.Shp.Offset([]int{py, px, sh[2] - 1, sh[3] - 1}) + 1 pl := &ly.Pools[pi] pl.StIdx = soff pl.EdIdx = eoff for ni := pl.StIdx; ni < pl.EdIdx; ni++ { nrn := &ly.Neurons[ni] nrn.SubPool = int32(pi) } pi++ } } } // BuildPools builds the inhibitory pools structures -- nu = number of units in layer func (ly *Layer) BuildPools(nu int) error { np := 1 + ly.NPools() ly.Pools = make([]Pool, np) lpl := &ly.Pools[0] lpl.StIdx = 0 lpl.EdIdx = nu if np > 1 { ly.BuildSubPools() } return nil } // BuildPrjns builds the projections, recv-side func (ly *Layer) BuildPrjns() error { emsg := "" for _, pj := range ly.RcvPrjns { if pj.IsOff() { continue } err := pj.Build() if err != nil { emsg += err.Error() + "\n" } } if emsg != "" { return errors.New(emsg) } return nil } // Build constructs the layer state, including calling Build on the projections func (ly *Layer) Build() error { nu := ly.Shp.Len() if nu == 0 { return fmt.Errorf("Build Layer %v: no units specified in Shape", ly.Nm) } ly.Neurons = make([]Neuron, nu) err := ly.BuildPools(nu) if err != nil { return err } err = ly.BuildPrjns() return err } // WriteWtsJSON writes the weights from this layer from the receiver-side perspective // in a JSON text format. We build in the indentation logic to make it much faster and // more efficient. func (ly *Layer) WriteWtsJSON(w io.Writer, depth int) { w.Write(indent.TabBytes(depth)) w.Write([]byte("{\n")) depth++ w.Write(indent.TabBytes(depth)) w.Write([]byte(fmt.Sprintf("\"Layer\": %q,\n", ly.Nm))) w.Write(indent.TabBytes(depth)) w.Write([]byte(fmt.Sprintf("\"MetaData\": {\n"))) depth++ w.Write(indent.TabBytes(depth)) w.Write([]byte(fmt.Sprintf("\"ActMAvg\": \"%g\",\n", ly.Pools[0].ActAvg.ActMAvg))) w.Write(indent.TabBytes(depth)) w.Write([]byte(fmt.Sprintf("\"ActPAvg\": \"%g\"\n", ly.Pools[0].ActAvg.ActPAvg))) depth-- w.Write(indent.TabBytes(depth)) w.Write([]byte("},\n")) w.Write(indent.TabBytes(depth)) onps := make(emer.Prjns, 0, len(ly.RcvPrjns)) for _, pj := range ly.RcvPrjns { if !pj.IsOff() { onps = append(onps, pj) } } np := len(onps) if np == 0 { w.Write([]byte(fmt.Sprintf("\"Prjns\": null\n"))) } else { w.Write([]byte(fmt.Sprintf("\"Prjns\": [\n"))) depth++ for pi, pj := range onps { pj.WriteWtsJSON(w, depth) // this leaves prjn unterminated if pi == np-1 { w.Write([]byte("\n")) } else { w.Write([]byte(",\n")) } } depth-- w.Write(indent.TabBytes(depth)) w.Write([]byte("]\n")) } depth-- w.Write(indent.TabBytes(depth)) w.Write([]byte("}")) // note: leave unterminated as outer loop needs to add , or just \n depending } // ReadWtsJSON reads the weights from this layer from the receiver-side perspective // in a JSON text format. This is for a set of weights that were saved *for one layer only* // and is not used for the network-level ReadWtsJSON, which reads into a separate // structure -- see SetWts method. func (ly *Layer) ReadWtsJSON(r io.Reader) error { lw, err := weights.LayReadJSON(r) if err != nil { return err // note: already logged } return ly.SetWts(lw) } // SetWts sets the weights for this layer from weights.Layer decoded values func (ly *Layer) SetWts(lw *weights.Layer) error { if ly.IsOff() { return nil } if lw.MetaData != nil { if am, ok := lw.MetaData["ActMAvg"]; ok { pv, _ := strconv.ParseFloat(am, 32) ly.Pools[0].ActAvg.ActMAvg = float32(pv) } if ap, ok := lw.MetaData["ActPAvg"]; ok { pv, _ := strconv.ParseFloat(ap, 32) pl := &ly.Pools[0] pl.ActAvg.ActPAvg = float32(pv) ly.Inhib.ActAvg.EffFmAvg(&pl.ActAvg.ActPAvgEff, pl.ActAvg.ActPAvg) } } var err error rpjs := ly.RecvPrjns() if len(lw.Prjns) == len(*rpjs) { // this is essential if multiple prjns from same layer for pi := range lw.Prjns { pw := &lw.Prjns[pi] pj := (*rpjs)[pi] er := pj.SetWts(pw) if er != nil { err = er } } } else { for pi := range lw.Prjns { pw := &lw.Prjns[pi] pj := rpjs.SendName(pw.From) if pj != nil { er := pj.SetWts(pw) if er != nil { err = er } } } } return err } // VarRange returns the min / max values for given variable // todo: support r. s. projection values func (ly *Layer) VarRange(varNm string) (min, max float32, err error) { sz := len(ly.Neurons) if sz == 0 { return } vidx := 0 vidx, err = NeuronVarIdxByName(varNm) if err != nil { return } v0 := ly.Neurons[0].VarByIndex(vidx) min = v0 max = v0 for i := 1; i < sz; i++ { vl := ly.Neurons[i].VarByIndex(vidx) if vl < min { min = vl } if vl > max { max = vl } } return } // note: all basic computation can be performed on layer-level and prjn level ////////////////////////////////////////////////////////////////////////////////////// // Init methods // InitWts initializes the weight values in the network, i.e., resetting learning // Also calls InitActs func (ly *Layer) InitWts() { ly.LeabraLay.UpdateParams() for _, p := range ly.SndPrjns { if p.IsOff() { continue } p.(LeabraPrjn).InitWts() } for pi := range ly.Pools { pl := &ly.Pools[pi] pl.ActAvg.ActMAvg = ly.Inhib.ActAvg.Init pl.ActAvg.ActPAvg = ly.Inhib.ActAvg.Init pl.ActAvg.ActPAvgEff = ly.Inhib.ActAvg.EffInit() } ly.LeabraLay.InitActAvg() ly.LeabraLay.InitActs() ly.CosDiff.Init() } // InitActAvg initializes the running-average activation values that drive learning. func (ly *Layer) InitActAvg() { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] ly.Learn.InitActAvg(nrn) } } // InitActs fully initializes activation state -- only called automatically during InitWts func (ly *Layer) InitActs() { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] ly.Act.InitActs(nrn) } for pi := range ly.Pools { pl := &ly.Pools[pi] pl.Inhib.Init() pl.ActM.Init() pl.ActP.Init() } } // InitWtsSym initializes the weight symmetry -- higher layers copy weights from lower layers func (ly *Layer) InitWtSym() { for _, p := range ly.SndPrjns { if p.IsOff() { continue } plp := p.(LeabraPrjn) if !(plp.AsLeabra().WtInit.Sym) { continue } // key ordering constraint on which way weights are copied if p.RecvLay().Index() < p.SendLay().Index() { continue } rpj, has := ly.RecipToSendPrjn(p) if !has { continue } rlp := rpj.(LeabraPrjn) if !(rlp.AsLeabra().WtInit.Sym) { continue } plp.InitWtSym(rlp) } } // InitExt initializes external input state -- called prior to apply ext func (ly *Layer) InitExt() { msk := bitflag.Mask32(int(NeurHasExt), int(NeurHasTarg), int(NeurHasCmpr)) for ni := range ly.Neurons { nrn := &ly.Neurons[ni] nrn.Ext = 0 nrn.Targ = 0 nrn.ClearMask(msk) } } // ApplyExtFlags gets the clear mask and set mask for updating neuron flags // based on layer type, and whether input should be applied to Targ (else Ext) func (ly *Layer) ApplyExtFlags() (clrmsk, setmsk int32, toTarg bool) { clrmsk = bitflag.Mask32(int(NeurHasExt), int(NeurHasTarg), int(NeurHasCmpr)) toTarg = false if ly.Typ == emer.Target { setmsk = bitflag.Mask32(int(NeurHasTarg)) toTarg = true } else if ly.Typ == emer.Compare { setmsk = bitflag.Mask32(int(NeurHasCmpr)) toTarg = true } else { setmsk = bitflag.Mask32(int(NeurHasExt)) } return } // ApplyExt applies external input in the form of an etensor.Float32. If // dimensionality of tensor matches that of layer, and is 2D or 4D, then each dimension // is iterated separately, so any mismatch preserves dimensional structure. // Otherwise, the flat 1D view of the tensor is used. // If the layer is a Target or Compare layer type, then it goes in Targ // otherwise it goes in Ext func (ly *Layer) ApplyExt(ext etensor.Tensor) { switch { case ext.NumDims() == 2 && ly.Shp.NumDims() == 4: // special case ly.ApplyExt2Dto4D(ext) case ext.NumDims() != ly.Shp.NumDims() || !(ext.NumDims() == 2 || ext.NumDims() == 4): ly.ApplyExt1DTsr(ext) case ext.NumDims() == 2: ly.ApplyExt2D(ext) case ext.NumDims() == 4: ly.ApplyExt4D(ext) } } // ApplyExt2D applies 2D tensor external input func (ly *Layer) ApplyExt2D(ext etensor.Tensor) { clrmsk, setmsk, toTarg := ly.ApplyExtFlags() ymx := ints.MinInt(ext.Dim(0), ly.Shp.Dim(0)) xmx := ints.MinInt(ext.Dim(1), ly.Shp.Dim(1)) for y := 0; y < ymx; y++ { for x := 0; x < xmx; x++ { idx := []int{y, x} vl := float32(ext.FloatVal(idx)) i := ly.Shp.Offset(idx) nrn := &ly.Neurons[i] if nrn.IsOff() { continue } if toTarg { nrn.Targ = vl } else { nrn.Ext = vl } nrn.ClearMask(clrmsk) nrn.SetMask(setmsk) } } } // ApplyExt2Dto4D applies 2D tensor external input to a 4D layer func (ly *Layer) ApplyExt2Dto4D(ext etensor.Tensor) { clrmsk, setmsk, toTarg := ly.ApplyExtFlags() lNy, lNx, _, _ := etensor.Prjn2DShape(&ly.Shp, false) ymx := ints.MinInt(ext.Dim(0), lNy) xmx := ints.MinInt(ext.Dim(1), lNx) for y := 0; y < ymx; y++ { for x := 0; x < xmx; x++ { idx := []int{y, x} vl := float32(ext.FloatVal(idx)) ui := etensor.Prjn2DIdx(&ly.Shp, false, y, x) nrn := &ly.Neurons[ui] if nrn.IsOff() { continue } if toTarg { nrn.Targ = vl } else { nrn.Ext = vl } nrn.ClearMask(clrmsk) nrn.SetMask(setmsk) } } } // ApplyExt4D applies 4D tensor external input func (ly *Layer) ApplyExt4D(ext etensor.Tensor) { clrmsk, setmsk, toTarg := ly.ApplyExtFlags() ypmx := ints.MinInt(ext.Dim(0), ly.Shp.Dim(0)) xpmx := ints.MinInt(ext.Dim(1), ly.Shp.Dim(1)) ynmx := ints.MinInt(ext.Dim(2), ly.Shp.Dim(2)) xnmx := ints.MinInt(ext.Dim(3), ly.Shp.Dim(3)) for yp := 0; yp < ypmx; yp++ { for xp := 0; xp < xpmx; xp++ { for yn := 0; yn < ynmx; yn++ { for xn := 0; xn < xnmx; xn++ { idx := []int{yp, xp, yn, xn} vl := float32(ext.FloatVal(idx)) i := ly.Shp.Offset(idx) nrn := &ly.Neurons[i] if nrn.IsOff() { continue } if toTarg { nrn.Targ = vl } else { nrn.Ext = vl } nrn.ClearMask(clrmsk) nrn.SetMask(setmsk) } } } } } // ApplyExt1DTsr applies external input using 1D flat interface into tensor. // If the layer is a Target or Compare layer type, then it goes in Targ // otherwise it goes in Ext func (ly *Layer) ApplyExt1DTsr(ext etensor.Tensor) { clrmsk, setmsk, toTarg := ly.ApplyExtFlags() mx := ints.MinInt(ext.Len(), len(ly.Neurons)) for i := 0; i < mx; i++ { nrn := &ly.Neurons[i] if nrn.IsOff() { continue } vl := float32(ext.FloatVal1D(i)) if toTarg { nrn.Targ = vl } else { nrn.Ext = vl } nrn.ClearMask(clrmsk) nrn.SetMask(setmsk) } } // ApplyExt1D applies external input in the form of a flat 1-dimensional slice of floats // If the layer is a Target or Compare layer type, then it goes in Targ // otherwise it goes in Ext func (ly *Layer) ApplyExt1D(ext []float64) { clrmsk, setmsk, toTarg := ly.ApplyExtFlags() mx := ints.MinInt(len(ext), len(ly.Neurons)) for i := 0; i < mx; i++ { nrn := &ly.Neurons[i] if nrn.IsOff() { continue } vl := float32(ext[i]) if toTarg { nrn.Targ = vl } else { nrn.Ext = vl } nrn.ClearMask(clrmsk) nrn.SetMask(setmsk) } } // ApplyExt1D32 applies external input in the form of a flat 1-dimensional slice of float32s. // If the layer is a Target or Compare layer type, then it goes in Targ // otherwise it goes in Ext func (ly *Layer) ApplyExt1D32(ext []float32) { clrmsk, setmsk, toTarg := ly.ApplyExtFlags() mx := ints.MinInt(len(ext), len(ly.Neurons)) for i := 0; i < mx; i++ { nrn := &ly.Neurons[i] if nrn.IsOff() { continue } vl := ext[i] if toTarg { nrn.Targ = vl } else { nrn.Ext = vl } nrn.ClearMask(clrmsk) nrn.SetMask(setmsk) } } // UpdateExtFlags updates the neuron flags for external input based on current // layer Type field -- call this if the Type has changed since the last // ApplyExt* method call. func (ly *Layer) UpdateExtFlags() { clrmsk, setmsk, _ := ly.ApplyExtFlags() for i := range ly.Neurons { nrn := &ly.Neurons[i] if nrn.IsOff() { continue } nrn.ClearMask(clrmsk) nrn.SetMask(setmsk) } } // AlphaCycInit handles all initialization at start of new input pattern, including computing // input scaling from running average activation etc. // should already have presented the external input to the network at this point. func (ly *Layer) AlphaCycInit() { ly.LeabraLay.AvgLFmAvgM() for pi := range ly.Pools { pl := &ly.Pools[pi] ly.Inhib.ActAvg.AvgFmAct(&pl.ActAvg.ActMAvg, pl.ActM.Avg) ly.Inhib.ActAvg.AvgFmAct(&pl.ActAvg.ActPAvg, pl.ActP.Avg) ly.Inhib.ActAvg.EffFmAvg(&pl.ActAvg.ActPAvgEff, pl.ActAvg.ActPAvg) } for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } nrn.ActQ0 = nrn.ActP } ly.LeabraLay.GScaleFmAvgAct() if ly.Act.Noise.Type != NoNoise && ly.Act.Noise.Fixed && ly.Act.Noise.Dist != erand.Mean { ly.LeabraLay.GenNoise() } ly.LeabraLay.DecayState(ly.Act.Init.Decay) ly.LeabraLay.InitGInc() if ly.Act.Clamp.Hard && ly.Typ == emer.Input { ly.LeabraLay.HardClamp() } } // AvgLFmAvgM updates AvgL long-term running average activation that drives BCM Hebbian learning func (ly *Layer) AvgLFmAvgM() { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } ly.Learn.AvgLFmAvgM(nrn) if ly.Learn.AvgL.ErrMod { nrn.AvgLLrn *= ly.CosDiff.ModAvgLLrn } } } // GScaleFmAvgAct computes the scaling factor for synaptic input conductances G, // based on sending layer average activation. // This attempts to automatically adjust for overall differences in raw activity // coming into the units to achieve a general target of around .5 to 1 // for the integrated Ge value. func (ly *Layer) GScaleFmAvgAct() { totGeRel := float32(0) totGiRel := float32(0) for _, p := range ly.RcvPrjns { if p.IsOff() { continue } pj := p.(LeabraPrjn).AsLeabra() slay := p.SendLay().(LeabraLayer).AsLeabra() slpl := &slay.Pools[0] savg := slpl.ActAvg.ActPAvgEff snu := len(slay.Neurons) ncon := pj.RConNAvgMax.Avg pj.GScale = pj.WtScale.FullScale(savg, float32(snu), ncon) // reverting this change: if you want to eliminate a prjn, set the Off flag // if you want to negate it but keep the relative factor in the denominator // then set the scale to 0. // if pj.GScale == 0 { // continue // } if pj.Typ == emer.Inhib { totGiRel += pj.WtScale.Rel } else { totGeRel += pj.WtScale.Rel } } for _, p := range ly.RcvPrjns { if p.IsOff() { continue } pj := p.(LeabraPrjn).AsLeabra() if pj.Typ == emer.Inhib { if totGiRel > 0 { pj.GScale /= totGiRel } } else { if totGeRel > 0 { pj.GScale /= totGeRel } } } } // GenNoise generates random noise for all neurons func (ly *Layer) GenNoise() { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] nrn.Noise = float32(ly.Act.Noise.Gen(-1)) } } // DecayState decays activation state by given proportion (default is on ly.Act.Init.Decay). // This does *not* call InitGInc -- must call that separately at start of AlphaCyc func (ly *Layer) DecayState(decay float32) { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } ly.Act.DecayState(nrn, decay) } for pi := range ly.Pools { // decaying average act is essential for inhib pl := &ly.Pools[pi] pl.Inhib.Decay(decay) } } // DecayStatePool decays activation state by given proportion in given sub-pool index (0 based) func (ly *Layer) DecayStatePool(pool int, decay float32) { pi := int32(pool + 1) // 1 based pl := &ly.Pools[pi] for ni := pl.StIdx; ni < pl.EdIdx; ni++ { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } ly.Act.DecayState(nrn, decay) } pl.Inhib.Decay(decay) } // HardClamp hard-clamps the activations in the layer -- called during AlphaCycInit for hard-clamped Input layers func (ly *Layer) HardClamp() { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } ly.Act.HardClamp(nrn) } } ////////////////////////////////////////////////////////////////////////////////////// // Cycle // InitGinc initializes the Ge excitatory and Gi inhibitory conductance accumulation states // including ActSent and G*Raw values. // called at start of trial always, and can be called optionally // when delta-based Ge computation needs to be updated (e.g., weights // might have changed strength) func (ly *Layer) InitGInc() { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } ly.Act.InitGInc(nrn) } for _, p := range ly.RcvPrjns { if p.IsOff() { continue } p.(LeabraPrjn).InitGInc() } } // SendGDelta sends change in activation since last sent, to increment recv // synaptic conductances G, if above thresholds func (ly *Layer) SendGDelta(ltime *Time) { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } if nrn.Act > ly.Act.OptThresh.Send { delta := nrn.Act - nrn.ActSent if math32.Abs(delta) > ly.Act.OptThresh.Delta { for _, sp := range ly.SndPrjns { if sp.IsOff() { continue } sp.(LeabraPrjn).SendGDelta(ni, delta) } nrn.ActSent = nrn.Act } } else if nrn.ActSent > ly.Act.OptThresh.Send { delta := -nrn.ActSent // un-send the last above-threshold activation to get back to 0 for _, sp := range ly.SndPrjns { if sp.IsOff() { continue } sp.(LeabraPrjn).SendGDelta(ni, delta) } nrn.ActSent = 0 } } } // GFmInc integrates new synaptic conductances from increments sent during last SendGDelta. func (ly *Layer) GFmInc(ltime *Time) { ly.RecvGInc(ltime) ly.GFmIncNeur(ltime) } // RecvGInc calls RecvGInc on receiving projections to collect Neuron-level G*Inc values. // This is called by GFmInc overall method, but separated out for cases that need to // do something different. func (ly *Layer) RecvGInc(ltime *Time) { for _, p := range ly.RcvPrjns { if p.IsOff() { continue } p.(LeabraPrjn).RecvGInc() } } // GFmIncNeur is the neuron-level code for GFmInc that integrates overall Ge, Gi values // from their G*Raw accumulators. func (ly *Layer) GFmIncNeur(ltime *Time) { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } // note: each step broken out here so other variants can add extra terms to Raw ly.Act.GeFmRaw(nrn, nrn.GeRaw) ly.Act.GiFmRaw(nrn, nrn.GiRaw) } } // AvgMaxGe computes the average and max Ge stats, used in inhibition func (ly *Layer) AvgMaxGe(ltime *Time) { for pi := range ly.Pools { pl := &ly.Pools[pi] pl.Inhib.Ge.Init() for ni := pl.StIdx; ni < pl.EdIdx; ni++ { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } pl.Inhib.Ge.UpdateVal(nrn.Ge, ni) } pl.Inhib.Ge.CalcAvg() } } // InhibFmGeAct computes inhibition Gi from Ge and Act averages within relevant Pools func (ly *Layer) InhibFmGeAct(ltime *Time) { lpl := &ly.Pools[0] ly.Inhib.Layer.Inhib(&lpl.Inhib) ly.PoolInhibFmGeAct(ltime) ly.InhibFmPool(ltime) } // PoolInhibFmGeAct computes inhibition Gi from Ge and Act averages within relevant Pools func (ly *Layer) PoolInhibFmGeAct(ltime *Time) { np := len(ly.Pools) if np == 1 { return } lpl := &ly.Pools[0] lyInhib := ly.Inhib.Layer.On for pi := 1; pi < np; pi++ { pl := &ly.Pools[pi] ly.Inhib.Pool.Inhib(&pl.Inhib) if lyInhib { pl.Inhib.LayGi = lpl.Inhib.Gi pl.Inhib.Gi = math32.Max(pl.Inhib.Gi, lpl.Inhib.Gi) // pool is max of layer } else { lpl.Inhib.Gi = math32.Max(pl.Inhib.Gi, lpl.Inhib.Gi) // update layer from pool } } if !lyInhib { lpl.Inhib.GiOrig = lpl.Inhib.Gi // effective GiOrig } } // InhibFmPool computes inhibition Gi from Pool-level aggregated inhibition, including self and syn func (ly *Layer) InhibFmPool(ltime *Time) { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } pl := &ly.Pools[nrn.SubPool] ly.Inhib.Self.Inhib(&nrn.GiSelf, nrn.Act) nrn.Gi = pl.Inhib.Gi + nrn.GiSelf + nrn.GiSyn } } // ActFmG computes rate-code activation from Ge, Gi, Gl conductances // and updates learning running-average activations from that Act func (ly *Layer) ActFmG(ltime *Time) { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } ly.Act.VmFmG(nrn) ly.Act.ActFmG(nrn) ly.Learn.AvgsFmAct(nrn) } } // AvgMaxAct computes the average and max Act stats, used in inhibition func (ly *Layer) AvgMaxAct(ltime *Time) { for pi := range ly.Pools { pl := &ly.Pools[pi] pl.Inhib.Act.Init() for ni := pl.StIdx; ni < pl.EdIdx; ni++ { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } pl.Inhib.Act.UpdateVal(nrn.Act, ni) } pl.Inhib.Act.CalcAvg() } } // CyclePost is called after the standard Cycle update, as a separate // network layer loop. // This is reserved for any kind of special ad-hoc types that // need to do something special after Act is finally computed. // For example, sending a neuromodulatory signal such as dopamine. func (ly *Layer) CyclePost(ltime *Time) { } ////////////////////////////////////////////////////////////////////////////////////// // Quarter // QuarterFinal does updating after end of a quarter func (ly *Layer) QuarterFinal(ltime *Time) { for pi := range ly.Pools { pl := &ly.Pools[pi] switch ltime.Quarter { case 2: pl.ActM = pl.Inhib.Act case 3: pl.ActP = pl.Inhib.Act } } for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } switch ltime.Quarter { case 0: nrn.ActQ1 = nrn.Act case 1: nrn.ActQ2 = nrn.Act case 2: nrn.ActM = nrn.Act if nrn.HasFlag(NeurHasTarg) { // will be clamped in plus phase nrn.Ext = nrn.Targ nrn.SetFlag(NeurHasExt) } case 3: nrn.ActP = nrn.Act nrn.ActDif = nrn.ActP - nrn.ActM nrn.ActAvg += ly.Act.Dt.AvgDt * (nrn.Act - nrn.ActAvg) } } if ltime.Quarter == 3 { ly.LeabraLay.CosDiffFmActs() } } // CosDiffFmActs computes the cosine difference in activation state between minus and plus phases. // this is also used for modulating the amount of BCM hebbian learning func (ly *Layer) CosDiffFmActs() { lpl := &ly.Pools[0] avgM := lpl.ActM.Avg avgP := lpl.ActP.Avg cosv := float32(0) ssm := float32(0) ssp := float32(0) for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } ap := nrn.ActP - avgP // zero mean am := nrn.ActM - avgM cosv += ap * am ssm += am * am ssp += ap * ap } dist := math32.Sqrt(ssm * ssp) if dist != 0 { cosv /= dist } ly.CosDiff.Cos = cosv ly.Learn.CosDiff.AvgVarFmCos(&ly.CosDiff.Avg, &ly.CosDiff.Var, ly.CosDiff.Cos) if ly.LeabraLay.IsTarget() { ly.CosDiff.AvgLrn = 0 // no BCM for non-hidden layers ly.CosDiff.ModAvgLLrn = 0 } else { ly.CosDiff.AvgLrn = 1 - ly.CosDiff.Avg ly.CosDiff.ModAvgLLrn = ly.Learn.AvgL.ErrModFmLayErr(ly.CosDiff.AvgLrn) } } // IsTarget returns true if this layer is a Target layer. // By default, returns true for layers of Type == emer.Target // Other Target layers include the TRCLayer in deep predictive learning. // This is used for turning off BCM hebbian learning, // in CosDiffFmActs to set the CosDiff.ModAvgLLrn value // for error-modulated level of hebbian learning. // It is also used in WtBal to not apply it to target layers. // In both cases, Target layers are purely error-driven. func (ly *Layer) IsTarget() bool { return ly.Typ == emer.Target } ////////////////////////////////////////////////////////////////////////////////////// // Learning // DWt computes the weight change (learning) -- calls DWt method on sending projections func (ly *Layer) DWt() { for _, p := range ly.SndPrjns { if p.IsOff() { continue } p.(LeabraPrjn).DWt() } } // WtFmDWt updates the weights from delta-weight changes -- on the sending projections func (ly *Layer) WtFmDWt() { for _, p := range ly.SndPrjns { if p.IsOff() { continue } p.(LeabraPrjn).WtFmDWt() } } // WtBalFmWt computes the Weight Balance factors based on average recv weights func (ly *Layer) WtBalFmWt() { for _, p := range ly.RcvPrjns { if p.IsOff() { continue } p.(LeabraPrjn).WtBalFmWt() } } // WtFmLWt sets the effective weight value from the linear weight value. // This also removes any current weight failure zeros. func (ly *Layer) WtFmLWt() { for _, p := range ly.SndPrjns { if p.IsOff() { continue } p.(LeabraPrjn).WtFmLWt() } } // WtFail zeros out effective synaptic weights to simulate synaptic communication failures. func (ly *Layer) WtFail() { for _, p := range ly.SndPrjns { if p.IsOff() { continue } p.(LeabraPrjn).WtFail() } } // LrateMult sets the new Lrate parameter for Prjns to LrateInit * mult. // Useful for implementing learning rate schedules. func (ly *Layer) LrateMult(mult float32) { for _, p := range ly.RcvPrjns { // if p.IsOff() { // keep all sync'd // continue // } p.(LeabraPrjn).LrateMult(mult) } } ////////////////////////////////////////////////////////////////////////////////////// // Threading / Reports // CostEst returns the estimated computational cost associated with this layer, // separated by neuron-level and synapse-level, in arbitrary units where // cost per synapse is 1. Neuron-level computation is more expensive but // there are typically many fewer neurons, so in larger networks, synaptic // costs tend to dominate. Neuron cost is estimated from TimerReport output // for large networks. func (ly *Layer) CostEst() (neur, syn, tot int) { perNeur := 300 // cost per neuron, relative to synapse which is 1 neur = len(ly.Neurons) * perNeur syn = 0 for _, pji := range ly.SndPrjns { pj := pji.(LeabraPrjn).AsLeabra() ns := len(pj.Syns) syn += ns } tot = neur + syn return } ////////////////////////////////////////////////////////////////////////////////////// // Stats // note: use float64 for stats as that is best for logging // MSE returns the sum-squared-error and mean-squared-error // over the layer, in terms of ActP - ActM (valid even on non-target layers FWIW). // Uses the given tolerance per-unit to count an error at all // (e.g., .5 = activity just has to be on the right side of .5). func (ly *Layer) MSE(tol float32) (sse, mse float64) { nn := len(ly.Neurons) if nn == 0 { return 0, 0 } sse = 0.0 for ni := range ly.Neurons { nrn := &ly.Neurons[ni] if nrn.IsOff() { continue } var d float32 if ly.Typ == emer.Compare { d = nrn.Targ - nrn.ActM } else { d = nrn.ActP - nrn.ActM } if math32.Abs(d) < tol { continue } sse += float64(d * d) } return sse, sse / float64(nn) } // SSE returns the sum-squared-error over the layer, in terms of ActP - ActM // (valid even on non-target layers FWIW). // Uses the given tolerance per-unit to count an error at all // (e.g., .5 = activity just has to be on the right side of .5). // Use this in Python which only allows single return values. func (ly *Layer) SSE(tol float32) float64 { sse, _ := ly.MSE(tol) return sse } ////////////////////////////////////////////////////////////////////////////////////// // Lesion // UnLesionNeurons unlesions (clears the Off flag) for all neurons in the layer func (ly *Layer) UnLesionNeurons() { for ni := range ly.Neurons { nrn := &ly.Neurons[ni] nrn.ClearFlag(NeurOff) } } // LesionNeurons lesions (sets the Off flag) for given proportion (0-1) of neurons in layer // returns number of neurons lesioned. Emits error if prop > 1 as indication that percent // might have been passed func (ly *Layer) LesionNeurons(prop float32) int { ly.UnLesionNeurons() if prop > 1 { log.Printf("LesionNeurons got a proportion > 1 -- must be 0-1 as *proportion* (not percent) of neurons to lesion: %v\n", prop) return 0 } nn := len(ly.Neurons) if nn == 0 { return 0 } p := rand.Perm(nn) nl := int(prop * float32(nn)) for i := 0; i < nl; i++ { nrn := &ly.Neurons[p[i]] nrn.SetFlag(NeurOff) } return nl } ////////////////////////////////////////////////////////////////////////////////////// // Layer props for gui var LayerProps = ki.Props{ "ToolBar": ki.PropSlice{ {"Defaults", ki.Props{ "icon": "reset", "desc": "return all parameters to their intial default values", }}, {"InitWts", ki.Props{ "icon": "update", "desc": "initialize the layer's weight values according to prjn parameters, for all *sending* projections out of this layer", }}, {"InitActs", ki.Props{ "icon": "update", "desc": "initialize the layer's activation values", }}, {"sep-act", ki.BlankProp{}}, {"LesionNeurons", ki.Props{ "icon": "close", "desc": "Lesion (set the Off flag) for given proportion of neurons in the layer (number must be 0 -- 1, NOT percent!)", "Args": ki.PropSlice{ {"Proportion", ki.Props{ "desc": "proportion (0 -- 1) of neurons to lesion", }}, }, }}, {"UnLesionNeurons", ki.Props{ "icon": "reset", "desc": "Un-Lesion (reset the Off flag) for all neurons in the layer", }}, }, }
{ br := strings.Replace(string(b), `"`, ``, -1) br = strings.Replace(br, ",\n", "", -1) br = strings.Replace(br, "{\n", "{", -1) br = strings.Replace(br, "} ", "}\n ", -1) br = strings.Replace(br, "\n }", " }", -1) br = strings.Replace(br, "\n }\n", " }", -1) return br[1:] + "\n" }
device.js
/** * @namespace cam * @description Device section for Cam class * @author Andrew D.Laptev <[email protected]> * @licence MIT */ module.exports = function(Cam) { const linerase = require('./utils').linerase; /** * @typedef {object} Cam~NTPManual * @property {string} options.type Network host type: IPv4, IPv6 or DNS. - enum { 'IPv4', 'IPv6', 'DNS' } * @property {string} [options.IPv4Address] IPv4 address * @property {string} [options.IPv6Address] IPv6 address * @property {string} [options.DNSname] DNS name * @property {string} [options.extension] */ /** * @typedef {object} Cam~NTPFromDHCP * @property {string} options.type Network host type: IPv4, IPv6 or DNS. - enum { 'IPv4', 'IPv6', 'DNS' } * @property {string} [options.IPv4Address] IPv4 address * @property {string} [options.IPv6Address] IPv6 address * @property {string} [options.DNSname] DNS name * @property {string} [options.extension] */ /** * @typedef {object} Cam~NTPInformation * @property {boolean} fromDHCP Indicates if NTP information is to be retrieved by using DHCP * @property {Array.<Cam~NTPFromDHCP>} [NTPFromDHCP] List of NTP addresses retrieved by using DHCP * @property {Array.<Cam~NTPManual>} [NTPManual] List of manually entered NTP addresses */ /** * @callback Cam~NTPCallback * @property {?Error} error * @property {Cam~NTPInformation} NTP information object of current device's NTP manual * @property {string} xml Raw SOAP response */ /** * Receive NTP information from cam * @param {Cam~NTPCallback} callback */ Cam.prototype.getNTP = function(callback) { this._request({ service: 'device' , body: this._envelopeHeader() + '<GetNTP xmlns="http://www.onvif.org/ver10/device/wsdl"/>' + '</s:Body>' + '</s:Envelope>' }, function(err, data, xml) { if (!err) { this.NTP = linerase(data[0]['getNTPResponse'][0]['NTPInformation'][0]); } callback.call(this, err, err ? null : this.NTP, xml); }); }; /** * Set the NTP settings on a device * @param {object} options * @param {boolean} options.fromDHCP Indicate if NTP address information is to be retrieved using DHCP * @param {Array.<Cam~NTPManual>} [options.NTPManual] List of NTP addresses * @param {Cam~RequestCallback} [callback] */ Cam.prototype.setNTP = function(options, callback) { if (!Array.isArray(options.NTPManual)) { options.NTPManual = []; } // For backward compatibility if (options.type || options.ipv4Address || options.ipv6Address || options.dnsName || options.extension) { // Note the case changes to follow the xml parser rules options.NTPManual.push({ type: options.type, IPv4Address: options.ipv4Address, IPv6Address: options.ipv6Address, DNSname: options.dnsName, extension: options.extension, }) } let body = this._envelopeHeader() + '<SetNTP xmlns="http://www.onvif.org/ver10/device/wsdl">' + '<FromDHCP>' + options.fromDHCP + '</FromDHCP>'; if (options.NTPManual && Array.isArray(options.NTPManual)) { options.NTPManual.forEach((NTPManual)=>{ body += ( NTPManual.type ? '<NTPManual>' + '<Type xmlns="http://www.onvif.org/ver10/schema">' + NTPManual.type + '</Type>' + ( NTPManual.IPv4Address ? '<IPv4Address xmlns="http://www.onvif.org/ver10/schema">' + NTPManual.IPv4Address + '</IPv4Address>' : '' ) + ( NTPManual.IPv6Address ? '<IPv6Address xmlns="http://www.onvif.org/ver10/schema">' + NTPManual.IPv6Address + '</IPv6Address>' : '' ) + ( NTPManual.DNSname ? '<DNSname>' + NTPManual.DNSname + '</DNSname>' : '' ) + ( NTPManual.extension ? '<Extension>' + NTPManual.extension + '</Extension>' : '' ) + '</NTPManual>' : ''); }); } body += '</SetNTP>' + this._envelopeFooter(); this._request({ service: 'device' , body: body }, callback.bind(this));
/** * @typedef {object} Cam~NetworkInterface * @property {object} $ * @property {string} $.token Unique identifier referencing the physical entity. * @property {boolean} enabled Indicates whether or not an interface is enabled. * @property {object} [info] network interface information * @property {string} info.name Network interface name, for example eth0 * @property {string} info.hwAddress Network interface MAC address * @property {number} info.MTU Maximum transmission unit. * @property {object} [link] Link configuration. * @property {object} link.adminSettings Configured link settings. * @property {boolean} link.adminSettings.autoNegotiation Auto negotiation on/off. * @property {number} link.adminSettings.speed * @property {string} link.adminSettings.duplex Duplex type, Half or Full. - enum { 'Full', 'Half' } * @property {object} link.operSettings Current active link settings * @property {boolean} link.operSettings.autoNegotiation Auto negotiation on/off. * @property {number} link.operSettings.speed * @property {string} link.operSettings.duplex Duplex type, Half or Full. - enum { 'Full', 'Half' } * @property {number} link.interfaceType Integer indicating interface type, for example: 6 is ethernet. * @property {object} [IPv4] IPv4 network interface configuration. * @property {boolean} IPv4.enabled Indicates whether or not IPv4 is enabled. * @property {object} IPv4.config IPv4 configuration. * @property {object} [IPv4.config.manual] List of manually added IPv4 addresses. * @property {string} IPv4.config.manual.address IPv4 address. * @property {number} IPv4.config.manual.prefixLength Prefix/submask length. * @property {object} [IPv4.config.linkLocal] List of manually added IPv4 addresses. * @property {string} IPv4.config.linkLocal.address IPv4 address. * @property {number} IPv4.config.linkLocal.prefixLength Prefix/submask length. * @property {object} [IPv4.config.fromDHCP] IPv4 address configured by using DHCP. * @property {string} IPv4.config.fromDHCP.address IPv4 address. * @property {number} IPv4.config.fromDHCP.prefixLength Prefix/submask length. * @property {boolean} IPv4.config.DHCP Indicates whether or not DHCP is used. * @property {object} [IPv6] IPv6 network interface configuration. * @property {boolean} IPv6.enabled Indicates whether or not IPv6 is enabled. * @property {object} IPv6.config IPv6 configuration. * @property {boolean} [IPv6.config.acceptRouterAdvert] Indicates whether router advertisement is used. * @property {string} IPv6.config.DHCP DHCP configuration. - enum { 'Auto', 'Stateful', 'Stateless', 'Off' } * @property {object} [IPv6.config.manual] List of manually added IPv6 addresses. * @property {string} IPv6.config.manual.address IPv6 address. * @property {number} IPv6.config.manual.prefixLength Prefix/submask length. * @property {object} [IPv6.config.linkLocal] List of link local IPv6 addresses. * @property {string} IPv6.config.linkLocal.address IPv6 address. * @property {number} IPv6.config.linkLocal.prefixLength Prefix/submask length. * @property {object} [IPv6.config.fromDHCP] List of IPv6 addresses configured by using DHCP. * @property {string} IPv6.config.fromDHCP.address IPv6 address. * @property {number} IPv6.config.fromDHCP.prefixLength Prefix/submask length. * @property {object} [IPv6.config.fromRA] List of IPv6 addresses configured by using router advertisement. * @property {string} IPv6.config.fromRA.address IPv6 address. * @property {number} IPv6.config.fromRA.prefixLength Prefix/submask length. * @property {object} [IPv6.config.extension] Extension * @property {object} [extension] Extension * @property {string} extension.interfaceType * @property {object} [extension.dot3] Extension point prepared for future 802.3 configuration. * @property {object} [extension.dot11] * @property {string} extension.dot11.SSID * @property {string} extension.dot11.mode - enum { 'Ad-hoc', 'Infrastructure', 'Extended' } * @property {string} extension.dot11.alias * @property {string} extension.dot11.priority * @property {object} extension.dot11.security * @property {string} extension.dot11.security.mode - enum { 'None', 'WEP', 'PSK', 'Dot1X', 'Extended' * @property {string} extension.dot11.security.algorithm - enum { 'CCMP', 'TKIP', 'Any', 'Extended' } * @property {object} extension.dot11.security.PSK * @property {string} extension.dot11.security.PSK.key According to IEEE802.11-2007 H.4.1 the RSNA PSK consists of 256 bits, or 64 octets when represented in hex * Either Key or Passphrase shall be given, if both are supplied Key shall be used by the device and Passphrase ignored. * @property {string} extension.dot11.security.PSK.passphrase According to IEEE802.11-2007 H.4.1 a pass-phrase is a sequence of between 8 and 63 ASCII-encoded characters and each character in the pass-phrase must have an encoding in the range of 32 to 126 (decimal),inclusive. * if only Passpharse is supplied the Key shall be derived using the algorithm described in IEEE802.11-2007 section H.4 * @property {object} [extension.dot11.security.PSK.extension] * @property {string} [extension.dot11.security.dot1X] * @property {object} [extension.dot11.security.extension] * @property {object} [extension.extension] */ /** * @typedef {object} Cam~NetworkInterfaceSetConfiguration * @property {boolean} [enabled] Indicates whether or not an interface is enabled. * @property {object} [link] Link configuration * @property {boolean} link.autoNegotiation Auto negotiation on/off. * @property {number} link.speed Speed. * @property {string} link.duplex Duplex type, Half or Full. - enum { 'Full', 'Half' } * @property {number} [MTU] Maximum transmission unit. * @property {object} [IPv4] IPv4 network interface configuration. * @property {boolean} [IPv4.enabled] Indicates whether or not IPv4 is enabled. * @property {object} [IPv4.manual] List of manually added IPv4 addresses. * @property {string} IPv4.manual.address IPv4 address. * @property {number} IPv4.manual.prefixLength Prefix/submask length. * @property {boolean} [IPv4.DHCP] Indicates whether or not DHCP is used. * @property {object} [IPv6] IPv6 network interface configuration. * @property {boolean} [IPv6.enabled] Indicates whether or not IPv6 is enabled. * @property {boolean} [IPv6.acceptRouterAdvert] Indicates whether router advertisement is used. * @property {object} [IPv6.manual] List of manually added IPv6 addresses. * @property {string} IPv6.manual.address IPv6 address. * @property {number} IPv6.manual.prefixLength Prefix/submask length. * @property {string} [IPv6.DHCP] DHCP configuration. - enum { 'Auto', 'Stateful', 'Stateless', 'Off' } * @property {object} [extension] * @property {object} [extension.Dot3] * @property {object} extension.[Dot11 * @property {string} extension].Dot11.SSID * @property {string} extension.Dot11.mode - enum { 'Ad-hoc', 'Infrastructure', 'Extended' } * @property {string} extension.Dot11.alias * @property {string} extension.Dot11.priority * @property {object} extension.Dot11.security * @property {string} extension.Dot11.security.mode - enum { 'None', 'WEP', 'PSK', 'Dot1X', 'Extended' } * @property {string} [extension.Dot11.security.algorithm] - enum { 'CCMP', 'TKIP', 'Any', 'Extended' } * @property {object} [extension.Dot11.security.PSK] * @property {string} [extension.Dot11.security.PSK.key] According to IEEE802.11-2007 H.4.1 the RSNA PSK consists of 256 bits, or 64 octets when represented in hex Either Key or Passphrase shall be given, if both are supplied Key shall be used by the device and Passphrase ignored. * @property {string} [extension.Dot11.security.PSK.passphrase] According to IEEE802.11-2007 H.4.1 a pass-phrase is a sequence of between 8 and 63 ASCII-encoded characters and each character in the pass-phrase must have an encoding in the range of 32 to 126 (decimal),inclusive. If only Passpharse is supplied the Key shall be derived using the algorithm described in IEEE802.11-2007 section H.4 * @property {object} [extension.Dot11.security.PSK.extension] * @property {string} [extension.Dot11.security.dot1X] * @property {object} [extension.Dot11.security.extension] * @property {object} [extension.extension] */ /** * @callback Cam~GetNetworkInterfacesCallback * @property {?Error} error * @property {Array.<Cam~NetworkInterface>} networkInterfaces Network interfaces information * @property {string} xml Raw SOAP response */ /** * Receive network interfaces information * @param {Cam~GetNetworkInterfacesCallback} [callback] */ Cam.prototype.getNetworkInterfaces = function(callback) { this._request({ service: 'device' , body: this._envelopeHeader() + '<GetNetworkInterfaces xmlns="http://www.onvif.org/ver10/device/wsdl"/>' + this._envelopeFooter() }, function(err, data, xml) { if (!err) { this.networkInterfaces = linerase(data).getNetworkInterfacesResponse.networkInterfaces; // networkInterfaces is an array of network interfaces, but linerase remove the array if there is only one element inside // so we convert it back to an array if (!Array.isArray(this.networkInterfaces)) {this.networkInterfaces = [this.networkInterfaces];} } if (callback) { callback.call(this, err, this.networkInterfaces, xml); } }.bind(this)); }; /** * @callback Cam~SetNetworkInterfacesCallback * @property {?Error} error * @property {boolean} data.rebootNeeded * @property {string} xml Raw SOAP response */ /** * Set network interfaces information * @param {object} options * @param {string} options.interfaceToken Network interface token * @param {Cam~NetworkInterfaceSetConfiguration} options.networkInteface Network interface * @param {Cam~SetNetworkInterfacesCallback} [callback] */ Cam.prototype.setNetworkInterfaces = function(options,callback) { let ni = options.networkInterface; let modifyIpv4 = false; let modifyIpv6 = false; if (ni.IPv4) { try { modifyIpv4 = ni.IPv4.manual.address !== this.hostname; } catch (e) { modifyIpv4 = false } } else if (ni.IPv6) { try { modifyIpv6 = ni.IPv6.manual.address !== this.hostname; } catch (e) { modifyIpv6 = false; } } let body = this._envelopeHeader() + '<SetNetworkInterfaces xmlns="http://www.onvif.org/ver10/device/wsdl">' + '<InterfaceToken>' + options.interfaceToken + '</InterfaceToken>' + '<NetworkInterface>' + '<Enabled xmlns="http://www.onvif.org/ver10/schema">' + ni.enabled + '</Enabled>' + (ni.link ? '<Link xmlns="http://www.onvif.org/ver10/schema">' + '<AutoNegotiation>' + ni.link.autoNegotiation + '</AutoNegotiation>' + '<Speed>' + ni.link.speed + '</Speed>' + '<Duplex>' + ni.link.duplex + '</Duplex>' + '</Link>' : '' ) + (!isNaN(ni.MTU) ? '<MTU xmlns="http://www.onvif.org/ver10/schema">' + ni.MTU + '</MTU>' : '') + (ni.IPv4 ? '<IPv4 xmlns="http://www.onvif.org/ver10/schema">' + '<Enabled>' + ni.IPv4.enabled + '</Enabled>' + '<Manual>' + '<Address>' + ni.IPv4.manual.address + '</Address>' + '<PrefixLength>' + ni.IPv4.manual.prefixLength + '</PrefixLength>' + '</Manual>' + '<DHCP>' + ni.IPv4.DHCP + '</DHCP>' + '</IPv4>' : '' ) + (ni.IPv6 ? '<IPv6 xmlns="http://www.onvif.org/ver10/schema">' + '<Enabled>' + ni.IPv6.enabled + '</Enabled>' + '<AcceptRouterAdvert >' + ni.IPv6.acceptRouterAdvert + '</AcceptRouterAdvert>' + '<Manual>' + '<Address>' + ni.IPv6.manual.address + '</Address>' + '<PrefixLength>' + ni.IPv6.manual.prefixLength + '</PrefixLength>' + '</Manual>' + '<DHCP>' + ni.IPv6.DHCP + '</DHCP>' + '</IPv6>' : '' ) + '</NetworkInterface>' + '</SetNetworkInterfaces>' + this._envelopeFooter(); this._request({ service: 'device' , body: body, }, function(err, data, xml) { if (callback) { if (!err) { if (modifyIpv4) {this.hostname = ni.IPv4.manual.address;} if (modifyIpv6) {this.hostname = ni.IPv6.manual.address;} data = linerase(data[0].setNetworkInterfacesResponse); } callback.call(this, err, data, xml); } }.bind(this)); }; /** * @typedef {object} Cam~NetworkGateway * @property {string} IPv4Address * @property {string} IPv6Address */ /** * @callback Cam~GetNetworkDefaultGatewayCallback * @property {?Error} error * @property {Array.<Cam~NetworkGateway>} networkGateway Network Gateway information * @property {string} xml Raw SOAP response */ /** * Get network default gateway information * @param {Cam~GetNetworkDefaultGatewayCallback} [callback] */ Cam.prototype.getNetworkDefaultGateway = function(callback) { this._request({ service: 'device' , body: this._envelopeHeader() + '<GetNetworkDefaultGateway xmlns="http://www.onvif.org/ver10/device/wsdl"/>' + this._envelopeFooter() }, function(err, data, xml) { if (callback) { if (!err) { this.networkDefaultGateway = linerase(data[0].getNetworkDefaultGatewayResponse[0].networkGateway); } callback.call(this, err, this.networkDefaultGateway, xml); } }.bind(this)); }; /** * Set network default gateway information * @param {Cam~NetworkGateway} options * @param {Cam~GetNetworkDefaultGatewayCallback} [callback] */ Cam.prototype.setNetworkDefaultGateway = function(options,callback) { this._request({ service: 'device' , body: this._envelopeHeader() + '<SetNetworkDefaultGateway xmlns="http://www.onvif.org/ver10/device/wsdl">' + (options.IPv4Address ? '<IPv4Address>' + options.IPv4Address + '</IPv4Address>' : '') + (options.IPv6Address ? '<IPv6Address>' + options.IPv6Address + '</IPv6Address>' : '') + '</SetNetworkDefaultGateway>' + this._envelopeFooter() }, function(err, data, xml) { if (callback) { if (err) { return callback.call(this,err,data,xml); } this.getNetworkDefaultGateway(callback.bind(this)); } }.bind(this)); }; /** * @typedef {object} Cam~IPAddress * @property {string} type Indicates if the address is an IPv4 or IPv6 address. - enum { 'IPv4', 'IPv6' } * @property {string} IPv4Address IPv4 address. * @property {string} IPv6Address IPv6 address. */ /** * @typedef {object} Cam~DNSInformation * @property {string} fromDHCP Indicates whether or not DNS information is retrieved from DHCP. * @property {string} searchDomain Search domain. * @property {Array.<Cam~IPAddress>} DNSFromDHCP List of DNS addresses received from DHCP. * @property {Array.<Cam~IPAddress>} DNSManual List of manually entered DNS addresses. */ /** * @callback Cam~GetDNSCallback * @property {?Error} error * @property {Array.<Cam~DNSInformation>} DNSInformation DNS information * @property {string} xml Raw SOAP response */ /** * Get DNS information * @param {Cam~GetDNSCallback} [callback] */ Cam.prototype.getDNS = function(callback) { this._request({ service: 'device' , body: this._envelopeHeader() + '<GetDNS xmlns="http://www.onvif.org/ver10/device/wsdl"/>' + this._envelopeFooter() }, function(err, data, xml) { if (callback) { if (!err) { this.DNS = linerase(data[0].getDNSResponse[0].DNSInformation); if (this.DNS.DNSManual && !Array.isArray(this.DNS.DNSManual)) {this.DNS.DNSManual = [this.DNS.DNSManual];} if (this.DNS.DNSFromDHCP && !Array.isArray(this.DNS.DNSFromDHCP)) {this.DNS.DNSFromDHCP = [this.DNS.DNSFromDHCP];} } callback.call(this, err, this.DNS, xml); } }.bind(this)); }; /** * Set DNS information * @param {Cam~DNSInformation} options * @param {Cam~GetDNSCallback} [callback] */ Cam.prototype.setDNS = function(options,callback) { let body = this._envelopeHeader() + '<SetDNS xmlns="http://www.onvif.org/ver10/device/wsdl">' + '<FromDHCP>' + (!!options.fromDHCP) + '</FromDHCP>' + (options.searchDomain ? '<SearchDomain>' + options.searchDomain + '</SearchDomain>' : ''); options.DNSManual.forEach(function(dns) { body += '<DNSManual>' + '<Type xmlns="http://www.onvif.org/ver10/schema">' + ((dns.type === "IPv6") ? "IPv6" : 'IPv4') + '</Type>' + (dns.IPv4Address ? '<IPv4Address xmlns="http://www.onvif.org/ver10/schema">' + dns.IPv4Address + '</IPv4Address>' : '') + (dns.IPv6Address ? '<IPv6Address xmlns="http://www.onvif.org/ver10/schema">' + dns.IPv6Address + '</IPv6Address>' : '') + '</DNSManual>'; }); body += '</SetDNS>' + this._envelopeFooter() this._request({ service: 'device' , body: body, }, function(err, data, xml) { if (callback) { if (err) { return callback.call(this, err, null, xml); } this.getDNS(callback); } }.bind(this)); } /** * @typedef {object} Cam~NetworkProtocol * @property {string} name Network protocol type string. - enum { 'HTTP', 'HTTPS', 'RTSP' } * @property {boolean} enabled Indicates if the protocol is enabled or not. * @property {number} port The port that is used by the protocol. * @property {object} extension */ /** * @callback Cam~GetNetworkProtocolsCallback * @property {?Error} error * @property {Array.<Cam~NetworkProtocol>} network protocols information * @property {string} xml Raw SOAP response */ /** * Receive network protocols information * @param {Cam~GetNetworkProtocolsCallback} [callback] */ Cam.prototype.getNetworkProtocols = function(callback) { this._request({ service: 'device' , body: this._envelopeHeader() + '<GetNetworkProtocols xmlns="http://www.onvif.org/ver10/device/wsdl"/>' + this._envelopeFooter() }, function(err, data, xml) { if (!err) { this.networkProtocols = linerase(data).getNetworkProtocolsResponse; } if (callback) { callback.call(this, err, this.networkProtocols, xml); } }.bind(this)); }; /** * @typedef {object} Cam~User * @property {string} username * @property {string} password * @property {string} userLevel 'Administrator', 'Operator', 'User', 'Anonymous' or 'Extended' */ /** * @callback Cam~GetUsersCallback * @property {?Error} error * @property {Array.<Cam~User>} videoSourceConfigurations * @property {string} xml Raw SOAP response * The password is not included in the response even if it is present in Cam~User */ /** * Get the list of Username and their User level. * @param {Cam~GetUsersCallback} [callback] */ Cam.prototype.getUsers = function(callback) { this._request({ service: 'device' , body: this._envelopeHeader() + '<GetUsers xmlns="http://www.onvif.org/ver10/device/wsdl" />' + this._envelopeFooter() }, function(err, data, xml) { if (callback) { if (!err) { var users = data[0].getUsersResponse[0].user; users = users.map(function(user) { return linerase(user); }); this.users = users; data = users; } callback.call(this, err, data, xml); } }.bind(this)); }; /** * Create one or more users * @param {Array.<Cam~User>} users * @param {Cam~GetUsersCallback} [callback] */ Cam.prototype.createUsers = function(users,callback) { const usersOk = users.every(function(user) { if (!user.username || !user.password || !user.userLevel) { return false; } return true; }); if (!usersOk) { callback.call(this,new Error('Missing username, password or user level'),null,null); return; } var usersXml = ''; users.forEach(function(user) { usersXml += '<User>' + '<Username xmlns="http://www.onvif.org/ver10/schema">' + user.username + '</Username>' + '<Password xmlns="http://www.onvif.org/ver10/schema">' + user.password + '</Password>' + '<UserLevel xmlns="http://www.onvif.org/ver10/schema">' + user.userLevel + '</UserLevel>' + '</User>'; }); var body = this._envelopeHeader() + '<CreateUsers xmlns="http://www.onvif.org/ver10/device/wsdl">' + usersXml + '</CreateUsers>' + this._envelopeFooter(); this._request({ service: 'device' , body: body, }, function(err, data, xml) { if (callback) { if (err) { callback.call(this, err, data, xml); } else { this.getUsers(callback); } } }.bind(this)); }; /** * Set the Password and User level of one or more users by their Username * @param {Array.<Cam~User>} users * @param {Cam~GetUsersCallback} [callback] */ Cam.prototype.setUsers = function(users,callback) { const usersOk = users.every(function(user) { if (!user.username || !user.password || !user.userLevel) { return false; } return true; }); if (!usersOk) { callback.call(this,new Error('Missing username, password or user level'),null,null); return; } var usersXml = ''; users.forEach(function(user) { usersXml += '<User>' + '<Username xmlns="http://www.onvif.org/ver10/schema">' + user.username + '</Username>' + '<Password xmlns="http://www.onvif.org/ver10/schema">' + user.password + '</Password>' + '<UserLevel xmlns="http://www.onvif.org/ver10/schema">' + user.userLevel + '</UserLevel>' + '</User>'; }); var body = this._envelopeHeader() + '<SetUser xmlns="http://www.onvif.org/ver10/device/wsdl">' + // Although SetUser is not plural, we can set multiple users at a time usersXml + '</SetUser>' + this._envelopeFooter(); this._request({ service: 'device' , body: body, }, function(err, data, xml) { if (callback) { if (err) { callback.call(this, err, data, xml); } else { var delayGet = false; users.some(function(usr) { if (usr.username === this.username) { delayGet = true; this.password = usr.password; return true; } }); if (delayGet) { // On some cameras, changing the user password we currently use is not effective right away setTimeout((function() {this.getUsers(callback);}).bind(this),2000); } else { this.getUsers(callback); } } } }.bind(this)); }; /** * Delete one or more users by their Username * @param {Array.<Cam~User> | Array.<string>} users * @param {Cam~GetUsersCallback} [callback] */ Cam.prototype.deleteUsers = function(users,callback) { var usernames = []; users.forEach(function(user) { if (typeof user == 'string' && user.length > 0) { usernames.push(user); } else if (typeof user == 'object' && user.username) { usernames.push(user.username); } }); if (!usernames.length) { return callback.call(this,new Error('No username'),null,null); } var usersXml = ''; usernames.forEach(function(username) { usersXml += '<Username>' + username + '</Username>'; }); var body = this._envelopeHeader() + '<DeleteUsers xmlns="http://www.onvif.org/ver10/device/wsdl">' + usersXml + '</DeleteUsers>' + this._envelopeFooter(); this._request({ service: 'device' , body: body, }, function(err, data, xml) { if (callback) { if (err) { callback.call(this, err, data, xml); } else { this.getUsers(callback); } } }.bind(this)); }; };
};
test_acf.py
import statistical_modeling as sm from typing import Final import unittest class TestACF(unittest.TestCase): def test(self): s: Final = sm.Sample([1, 2, 3]) f: Final = 3
self.assertEqual(sm.ACF(s, f), sm.SampleACF(s, f))
linked_list.rs
use super::{align_up, Locked}; use alloc::alloc::{GlobalAlloc, Layout}; use core::{mem, ptr}; struct ListNode { size: usize, next: Option<&'static mut ListNode>, } impl ListNode { const fn new(size: usize) -> Self { ListNode { size, next: None } } fn start_addr(&self) -> usize { self as *const Self as usize } fn end_addr(&self) -> usize { self.start_addr() + self.size } } pub struct LinkedListAllocator { head: ListNode, } impl LinkedListAllocator { /// Creates an empty LinkedListAllocator. pub const fn new() -> Self { Self { head: ListNode::new(0), } } /// Initialize the allocator with the given heap bounds. /// /// This function is unsafe because the caller must guarantee that the given /// heap bounds are valid and that the heap is unused. This method must be /// called only once. pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) { self.add_free_region(heap_start, heap_size); } /// Adds the given memory region to the front of the list. unsafe fn add_free_region(&mut self, addr: usize, size: usize) { // ensure that the freed region is capable of holding ListNode assert_eq!(align_up(addr, mem::align_of::<ListNode>()), addr); assert!(size >= mem::size_of::<ListNode>()); // create a new list node and append it at the start of the list let mut node = ListNode::new(size); node.next = self.head.next.take(); let node_ptr = addr as *mut ListNode; node_ptr.write(node); self.head.next = Some(&mut *node_ptr) } /// Looks for a free region with the given size and alignment and removes /// it from the list. /// /// Returns a tuple of the list node and the start address of the allocation. fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> { // reference to current list node, updated for each iteration let mut current = &mut self.head; // look for a large enough memory region in linked list while let Some(ref mut region) = current.next { if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align)
else { // region not suitable -> continue with next region current = current.next.as_mut().unwrap(); } } // no suitable region found None } /// Try to use the given region for an allocation with given size and alignment. /// /// Returns the allocation start address on success. fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> { let alloc_start = align_up(region.start_addr(), align); let alloc_end = alloc_start.checked_add(size).ok_or(())?; if alloc_end > region.end_addr() { // region too small return Err(()); } let excess_size = region.end_addr() - alloc_end; if excess_size > 0 && excess_size < mem::size_of::<ListNode>() { // rest of region too small to hold a ListNode (required because the // allocation splits the region in a used and a free part) return Err(()); } // region suitable for allocation Ok(alloc_start) } /// Adjust the given layout so that the resulting allocated memory /// region is also capable of storing a `ListNode`. /// /// Returns the adjusted size and alignment as a (size, align) tuple. fn size_align(layout: Layout) -> (usize, usize) { let layout = layout .align_to(mem::align_of::<ListNode>()) .expect("adjusting alignment failed") .pad_to_align(); let size = layout.size().max(mem::size_of::<ListNode>()); (size, layout.align()) } } unsafe impl GlobalAlloc for Locked<LinkedListAllocator> { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { // perform layout adjustments let (size, align) = LinkedListAllocator::size_align(layout); let mut allocator = self.lock(); if let Some((region, alloc_start)) = allocator.find_region(size, align) { let alloc_end = alloc_start.checked_add(size).expect("overflow"); let excess_size = region.end_addr() - alloc_end; if excess_size > 0 { allocator.add_free_region(alloc_end, excess_size); } alloc_start as *mut u8 } else { ptr::null_mut() } } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { // perform layout adjustments let (size, _) = LinkedListAllocator::size_align(layout); self.lock().add_free_region(ptr as usize, size) } }
{ // region suitable for allocation -> remove node from list let next = region.next.take(); let ret = Some((current.next.take().unwrap(), alloc_start)); current.next = next; return ret; }
03_join_responses_with_ip.py
""" This script joins: * the EventLogging (EL) data based on webrequest beacons (in my experience, most complete / simplest) * Google Forms survey responses * EditAttemptStep data based on hive tables There are two outputs for each language: * CSV w/ survey responses + EL details (e.g., datetime, pageID) + webrequest details (e.g., client-IP, user-agent) * CSV w/ all approximate userhashes for matching against webrequest logs """ import argparse import csv import os from geopy.distance import distance import pandas as pd # hacky way to make sure utils is visible import sys sys.path.append(os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../../..')) from src.utils import config def
(): parser = argparse.ArgumentParser() parser.add_argument("--el_logs_fn", default=config.quicksurvey_el_tsv, help="TSV with EventLogging data") parser.add_argument("--survey_req_fn", default=config.quicksurvey_requests_tsv, help="TSV with survey webrequests.") parser.add_argument("--editattempt_fn", default=config.edit_el_tsv, help="TSV filename for edit attempt data") parser.add_argument("--ids_dir", default=config.ids_folder, help="Folder to store survey respondent UserIDs") parser.add_argument("--languages", default=config.languages, nargs="*", help="List of languages to process") parser.add_argument("--responses_dir", default=config.responses_folder, help="Folder to hold survey responses + associated webrequest") parser.add_argument("--dist_threshold", default=config.ip_dist_threshold, help="Max distance in km between Geonames point and IP point for match.") parser.add_argument("--geonames_tsv", default=config.geonames_tsv, help="Geonames TSV file w/ place and population information.") args = parser.parse_args() requests = pd.read_csv(args.survey_req_fn, sep="\t") print("{0} total requests.".format(len(requests))) requests.drop_duplicates(inplace=True) requests.sort_values(by=['response_type'], ascending=False, inplace=True) requests.set_index('pageview_token', inplace=True) print("{0} requests from {1} unique users after removing duplicates.".format(len(requests), len(requests['userhash'].unique()))) map_ip_to_population(requests, args.geonames_tsv, args.dist_threshold) # edit_attempts = pd.read_csv(args.editattempt_fn, sep="\t") # print("{0} edit actions across {1} users.".format(len(edit_attempts), len(edit_attempts['userhash'].unique()))) # edit_attempts = edit_attempts.groupby('userhash').apply(group_edit_actions) if not os.path.isdir(args.ids_dir): print("Creating directory: {0}".format(os.path.abspath(args.ids_dir))) os.mkdir(args.ids_dir) if not os.path.isdir(args.responses_dir): print("Creating directory: {0}".format(os.path.abspath(args.responses_dir))) os.mkdir(args.responses_dir) all_ids = [] for lang in args.languages: recoded_fn = os.path.join(config.data_folder, "recoded", "responses_{0}_recoded.csv".format(lang)) surv_responses = pd.read_csv(recoded_fn, sep = '\t') surv_responses.set_index('survey_id', inplace=True) print("**********") print("Google Responses in {0}: {1}".format(lang, len(surv_responses))) # merge in quicksurveys eventlogging -- priority to yes to take survey, no to take survey, initiation srv_el_req = pd.merge(surv_responses, requests, how="left", left_index=True, right_index=True) srv_el_req = srv_el_req[~srv_el_req.index.duplicated(keep='first')] print("Breakdown of ability to match up Google responses with EL: (w/o initiation)") print(srv_el_req['response_type'].value_counts(dropna=False)) print("Breakdown of ability to match up Google responses with EL (w/ initiation):") print(srv_el_req['country'].value_counts(dropna=False)) # merge in edit attempt data # srv_el_req = srv_el_req.join(edit_attempts, how="left", on="userhash") # print("Responses w/ associated edit data (is anon):") # print(srv_el_req['is_anon'].value_counts(dropna=False)) # Write responses+EL+webrequest data to TSV output_merged_data = os.path.join(args.responses_dir, "responses_with_el_{0}.csv".format(lang)) srv_el_req.to_csv(output_merged_data, sep='\t') # Write userIDs associated with completed surveys to file output_respondent_ids = os.path.join(args.ids_dir, "ids_{0}.csv".format(lang)) ids = srv_el_req["userhash"] ids = ids.dropna() ids.to_csv(output_respondent_ids, index=False, header=False) print("Complete IDs:", len(ids)) all_ids.extend(list(ids.values)) if all_ids: with open(config.all_ids_csv, 'w') as fout: csvwriter = csv.writer(fout) for ip_ua in all_ids: csvwriter.writerow([ip_ua]) def group_edit_actions(user_data): is_anon = any(user_data['anon']) edit_count = user_data['user_edit'].value_counts().index[0] editor_interface = user_data['editor_interface'].value_counts().index[0] return pd.Series({'is_anon': is_anon, 'edit_count': edit_count, 'editor_interface':editor_interface}) def map_ip_to_population(df, geonames_tsv, dist_threshold): print("Loading geonames lookup") geonames = get_geonames_map(geonames_tsv) print("Calculating populations") df['population'] = df.apply(lambda x: lookup_row(x, geonames, dist_threshold=dist_threshold), axis=1) print("Success rate:", (df['population'] >= 1).sum() / df['population'].count()) print("Breakdown of matches:", df['population'].apply(lambda x: 1 if x > 0 else x).value_counts(dropna=False)) try: ipdump_fn = geonames_tsv.replace('.txt', '_ipmatch.tsv') df[['city', 'country_code', 'lat', 'lon', 'population']].to_csv(ipdump_fn, header=True, index=False, sep='\t') print("Dumped IP->population data to:", ipdump_fn) except Exception: print("Failed to dump IP->population data.") def calc_dist(pt1, pt2): return distance(pt1, pt2).kilometers def get_geonames_map(allcountries): geonames_header = ['geonameid', 'name', 'asciiname', 'alternatenames', 'latitude', 'longitude', 'feature class', 'feature code', 'country code', 'cc2', 'admin1 code', 'admin2 code', 'admin3 code', 'admin4 code', 'population', 'elevation', 'dem', 'timezone', 'modification date'] country_idx = geonames_header.index('country code') pop_idx = geonames_header.index('population') lat_idx = geonames_header.index('latitude') lon_idx = geonames_header.index('longitude') name_idx = geonames_header.index('name') altname_idx = geonames_header.index('alternatenames') feature_idx = geonames_header.index('feature class') lookup = {} num_countries = 0 num_places = 0 num_pops = 0 nonzero_pops = 0 duplicates = 0 with open(allcountries, 'r') as fin: tsvreader = csv.reader(fin, delimiter='\t') for line in tsvreader: feature = line[feature_idx] try: population = int(line[pop_idx]) except ValueError: population = -1 if (feature == 'A' and population >= 0) or feature == 'P': pt = (float(line[lat_idx]), float(line[lon_idx])) names = [line[name_idx]] if line[altname_idx]: names.extend(line[altname_idx].split(',')) country = line[country_idx] if country not in lookup: num_countries += 1 lookup[country] = {} for n in names: if n in lookup[country]: if pt in lookup[country][n]: existing_pop = lookup[country][n][pt] if not population: continue elif existing_pop == population: continue elif not existing_pop: lookup[country][n][pt] = population num_pops += 1 else: duplicates += 1 else: lookup[country][n][pt] = population num_places += 1 if num_places % 500000 == 0: print(num_places, "added.") if population >= 0: num_pops += 1 if population == 0: nonzero_pops += 1 else: lookup[country][n] = {pt:population} num_places += 1 if num_places % 500000 == 0: print(num_places, "added.") if population >= 0: num_pops += 1 if population == 0: nonzero_pops += 1 print("{0} countries. {1} places. {2} places w/ population. {3} w/ pop 0. {4} duplicates".format( num_countries, num_places, num_pops, nonzero_pops, duplicates)) # add location-based lookup index for places w/ unknown cities but that still have points locs_to_add = {} for cc in lookup: for n in lookup[cc]: for loc in lookup[cc][n]: simple_loc = (int(loc[0]), int(loc[1])) if simple_loc not in locs_to_add: locs_to_add[simple_loc] = set() locs_to_add[simple_loc].add((cc, n)) for l in locs_to_add: lookup[l] = locs_to_add[l] return lookup def lookup_row(x, geonames, dist_threshold): country = x['country_code'] city = x['city'] pt = (float(x['lat']), float(x['lon'])) # no city info, use lat-lon as backup if city.lower() == "unknown": return lookup_pt(pt, geonames, dist_threshold) # use city to geocode and then lat-lon to filter else: try: candidates = geonames[country][city] within_thres = [] # find all potential place matches for cpt, cpop in candidates.items(): if calc_dist(pt, cpt) < dist_threshold: within_thres.append(cpop) # return potential match with highest population (arbitrary choice but empirically seems to matter little) if within_thres: # Success: found a matching place w/i distance threshold # Possibilities: # >0 == have a real population # 0 if geonames listed that # -1 population if geonames didn't provide a number return max(within_thres) else: # found a matching name but was not close enough backup = lookup_pt(pt, geonames, dist_threshold) if backup > 0: return backup else: return -2 except KeyError: # did not find a matching name return lookup_pt(pt, geonames, dist_threshold) def lookup_pt(pt, geonames, dist_threshold): simple_pt = (int(pt[0]), int(pt[1])) closest_with_pop = float('inf') pop = -3 for cc, name in geonames.get(simple_pt, []): for cpt, cpop in geonames[cc][name]: if cpop > 0: cand_dist = calc_dist(pt, cpt) if cand_dist < dist_threshold and cand_dist < closest_with_pop: closest_with_pop = cand_dist pop = cpop return pop if __name__ == "__main__": main()
main
ciliumegressnatpolicy.go
// SPDX-License-Identifier: Apache-2.0 // Copyright 2017-2021 Authors of Cilium // Code generated by lister-gen. DO NOT EDIT. package v2alpha1 import ( v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) // CiliumEgressNATPolicyLister helps list CiliumEgressNATPolicies. // All objects returned here must be treated as read-only. type CiliumEgressNATPolicyLister interface { // List lists all CiliumEgressNATPolicies in the indexer. // Objects returned here must be treated as read-only. List(selector labels.Selector) (ret []*v2alpha1.CiliumEgressNATPolicy, err error) // Get retrieves the CiliumEgressNATPolicy from the index for a given name. // Objects returned here must be treated as read-only. Get(name string) (*v2alpha1.CiliumEgressNATPolicy, error) CiliumEgressNATPolicyListerExpansion } // ciliumEgressNATPolicyLister implements the CiliumEgressNATPolicyLister interface. type ciliumEgressNATPolicyLister struct { indexer cache.Indexer } // NewCiliumEgressNATPolicyLister returns a new CiliumEgressNATPolicyLister. func NewCiliumEgressNATPolicyLister(indexer cache.Indexer) CiliumEgressNATPolicyLister { return &ciliumEgressNATPolicyLister{indexer: indexer} } // List lists all CiliumEgressNATPolicies in the indexer. func (s *ciliumEgressNATPolicyLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumEgressNATPolicy, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { ret = append(ret, m.(*v2alpha1.CiliumEgressNATPolicy)) }) return ret, err }
// Get retrieves the CiliumEgressNATPolicy from the index for a given name. func (s *ciliumEgressNATPolicyLister) Get(name string) (*v2alpha1.CiliumEgressNATPolicy, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { return nil, errors.NewNotFound(v2alpha1.Resource("ciliumegressnatpolicy"), name) } return obj.(*v2alpha1.CiliumEgressNATPolicy), nil }
svc_init.go
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "errors" "fmt" "strconv" "github.com/aws/copilot-cli/internal/pkg/aws/sessions" "github.com/aws/copilot-cli/internal/pkg/config" "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation" "github.com/aws/copilot-cli/internal/pkg/exec" "github.com/aws/copilot-cli/internal/pkg/initialize" "github.com/aws/copilot-cli/internal/pkg/manifest" "github.com/aws/copilot-cli/internal/pkg/term/color" "github.com/aws/copilot-cli/internal/pkg/term/log" termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress" "github.com/aws/copilot-cli/internal/pkg/term/prompt" "github.com/aws/copilot-cli/internal/pkg/term/selector" "github.com/aws/copilot-cli/internal/pkg/workspace" "github.com/spf13/afero" "github.com/spf13/cobra" ) const ( wkldInitImagePrompt = `What's the location of the image to use?` wkldInitImagePromptHelp = `The name of an existing Docker image. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest` wkldInitAppRunnerImagePromptHelp = `The name of an existing Docker image. App Runner supports images hosted in ECR or ECR Public registries.` ) const ( defaultSvcPortString = "80" service = "service" ) var ( fmtSvcInitSvcTypePrompt = "Which %s best represents your service's architecture?" fmtSvcInitSvcTypeHelpPrompt = `A %s is an internet-facing HTTP server managed by AWS App Runner that scales based on incoming requests. To learn more see: https://git.io/Jt2UC A %s is an internet-facing HTTP server managed by Amazon ECS on AWS Fargate behind a load balancer. To learn more see: https://git.io/JfIpv A %s is a private, non internet-facing service accessible from other services in your VPC. To learn more see: https://git.io/JfIpT` fmtWkldInitNamePrompt = "What do you want to %s this %s?" fmtWkldInitNameHelpPrompt = `The name will uniquely identify this %s within your app %s. Deployed resources (such as your ECR repository, logs) will contain this %[1]s's name and be tagged with it.` fmtWkldInitDockerfilePrompt = "Which " + color.Emphasize("Dockerfile") + " would you like to use for %s?" wkldInitDockerfileHelpPrompt = "Dockerfile to use for building your container image." fmtWkldInitDockerfilePathPrompt = "What is the path to the " + color.Emphasize("Dockerfile") + " for %s?" wkldInitDockerfilePathHelpPrompt = "Path to Dockerfile to use for building your container image." svcInitSvcPortPrompt = "Which %s do you want customer traffic sent to?" svcInitSvcPortHelpPrompt = `The port will be used by the load balancer to route incoming traffic to this service. You should set this to the port which your Dockerfile uses to communicate with the internet.` ) var serviceTypeHints = map[string]string{ manifest.RequestDrivenWebServiceType: "App Runner", manifest.LoadBalancedWebServiceType: "Internet to ECS on Fargate", manifest.BackendServiceType: "ECS on Fargate", } type initWkldVars struct { appName string wkldType string name string dockerfilePath string image string } type initSvcVars struct { initWkldVars port uint16 } type initSvcOpts struct { initSvcVars // Interfaces to interact with dependencies. fs afero.Fs init svcInitializer prompt prompter dockerEngineValidator dockerEngineValidator sel dockerfileSelector // Outputs stored on successful actions. manifestPath string // Cache variables df dockerfileParser // Init a Dockerfile parser using fs and input path dockerfile func(string) dockerfileParser } func newInitSvcOpts(vars initSvcVars) (*initSvcOpts, error) { store, err := config.NewStore() if err != nil { return nil, fmt.Errorf("couldn't connect to config store: %w", err) } ws, err := workspace.New() if err != nil { return nil, fmt.Errorf("workspace cannot be created: %w", err) } p := sessions.NewProvider() sess, err := p.Default() if err != nil { return nil, err } prompter := prompt.New() sel := selector.NewWorkspaceSelect(prompter, store, ws) initSvc := &initialize.WorkloadInitializer{ Store: store, Ws: ws, Prog: termprogress.NewSpinner(log.DiagnosticWriter), Deployer: cloudformation.New(sess), } fs := &afero.Afero{Fs: afero.NewOsFs()} opts := &initSvcOpts{ initSvcVars: vars, fs: fs, init: initSvc, prompt: prompter, sel: sel, dockerEngineValidator: exec.NewDockerCommand(), } opts.dockerfile = func(path string) dockerfileParser { if opts.df != nil { return opts.df } opts.df = exec.NewDockerfile(opts.fs, opts.dockerfilePath) return opts.df } return opts, nil } // Validate returns an error if the flag values passed by the user are invalid. func (o *initSvcOpts) Validate() error { if o.appName == "" { return errNoAppInWorkspace } if o.wkldType != "" { if err := validateSvcType(o.wkldType); err != nil { return err } } if o.name != "" { if err := validateSvcName(o.name, o.wkldType); err != nil { return err } } if o.dockerfilePath != "" && o.image != "" { return fmt.Errorf("--%s and --%s cannot be specified together", dockerFileFlag, imageFlag) } if o.dockerfilePath != "" { if _, err := o.fs.Stat(o.dockerfilePath); err != nil { return err } } if o.port != 0 { if err := validateSvcPort(o.port); err != nil { return err } } if o.image != "" && o.wkldType == manifest.RequestDrivenWebServiceType { if err := validateAppRunnerImage(o.image); err != nil { return err } } return nil } // Ask prompts for fields that are required but not passed in. func (o *initSvcOpts) Ask() error { if err := o.askSvcType(); err != nil { return err } if err := o.askSvcName(); err != nil { return err } dfSelected, err := o.askDockerfile() if err != nil { return err } if !dfSelected { if err := o.askImage(); err != nil { return err } } if err := o.askSvcPort(); err != nil { return err } return nil } // Execute writes the service's manifest file and stores the service in SSM. func (o *initSvcOpts) Execute() error { // Check for a valid healthcheck and add it to the opts. var hc *manifest.ContainerHealthCheck var err error if o.dockerfilePath != "" { hc, err = parseHealthCheck(o.dockerfile(o.dockerfilePath)) if err != nil { return fmt.Errorf("parse dockerfile %s: %w", o.dockerfilePath, err) } } manifestPath, err := o.init.Service(&initialize.ServiceProps{ WorkloadProps: initialize.WorkloadProps{ App: o.appName, Name: o.name, Type: o.wkldType, DockerfilePath: o.dockerfilePath, Image: o.image, }, Port: o.port, HealthCheck: hc, }) if err != nil { return err } o.manifestPath = manifestPath return nil } // RecommendedActions returns follow-up actions the user can take after successfully executing the command. func (o *initSvcOpts) RecommendedActions() []string { return []string{ fmt.Sprintf("Update your manifest %s to change the defaults.", color.HighlightResource(o.manifestPath)), fmt.Sprintf("Run %s to deploy your service to a %s environment.", color.HighlightCode(fmt.Sprintf("copilot svc deploy --name %s --env %s", o.name, defaultEnvironmentName)), defaultEnvironmentName), } } func (o *initSvcOpts) askSvcType() error { if o.wkldType != "" { return nil } help := fmt.Sprintf(fmtSvcInitSvcTypeHelpPrompt, manifest.RequestDrivenWebServiceType, manifest.LoadBalancedWebServiceType, manifest.BackendServiceType, ) msg := fmt.Sprintf(fmtSvcInitSvcTypePrompt, color.Emphasize("service type")) t, err := o.prompt.SelectOption(msg, help, svcTypePromptOpts(), prompt.WithFinalMessage("Service type:")) if err != nil { return fmt.Errorf("select service type: %w", err) } o.wkldType = t return nil } func (o *initSvcOpts) askSvcName() error { if o.name != "" { return nil } name, err := o.prompt.Get( fmt.Sprintf(fmtWkldInitNamePrompt, color.Emphasize("name"), color.HighlightUserInput(o.wkldType)), fmt.Sprintf(fmtWkldInitNameHelpPrompt, service, o.appName), func(val interface{}) error { return validateSvcName(val, o.wkldType) }, prompt.WithFinalMessage("Service name:")) if err != nil { return fmt.Errorf("get service name: %w", err) } o.name = name return nil } func (o *initSvcOpts) askImage() error { if o.image != "" { return nil } var validator prompt.ValidatorFunc promptHelp := wkldInitImagePromptHelp if o.wkldType == manifest.RequestDrivenWebServiceType { promptHelp = wkldInitAppRunnerImagePromptHelp validator = validateAppRunnerImage } image, err := o.prompt.Get( wkldInitImagePrompt, promptHelp, validator, prompt.WithFinalMessage("Image:"), ) if err != nil { return fmt.Errorf("get image location: %w", err) } o.image = image return nil } // isDfSelected indicates if any Dockerfile is in use. func (o *initSvcOpts) askDockerfile() (isDfSelected bool, err error) { if o.dockerfilePath != "" || o.image != "" { return true, nil } if err = o.dockerEngineValidator.CheckDockerEngineRunning(); err != nil { var errDaemon *exec.ErrDockerDaemonNotResponsive switch { case errors.Is(err, exec.ErrDockerCommandNotFound): log.Info("Docker command is not found; Copilot won't build from a Dockerfile.\n") return false, nil case errors.As(err, &errDaemon): log.Info("Docker daemon is not responsive; Copilot won't build from a Dockerfile.\n") return false, nil default: return false, fmt.Errorf("check if docker engine is running: %w", err) } } df, err := o.sel.Dockerfile( fmt.Sprintf(fmtWkldInitDockerfilePrompt, color.HighlightUserInput(o.name)), fmt.Sprintf(fmtWkldInitDockerfilePathPrompt, color.HighlightUserInput(o.name)), wkldInitDockerfileHelpPrompt, wkldInitDockerfilePathHelpPrompt, func(v interface{}) error { return validatePath(afero.NewOsFs(), v) }, ) if err != nil { return false, fmt.Errorf("select Dockerfile: %w", err) } if df == selector.DockerfilePromptUseImage { return false, nil } o.dockerfilePath = df return true, nil } func (o *initSvcOpts) askSvcPort() (err error) { // If the port flag was set, use that and don't ask. if o.port != 0 { return nil } var ports []uint16 if o.dockerfilePath != "" && o.image == "" { // Check for exposed ports. ports, err = o.dockerfile(o.dockerfilePath).GetExposedPorts() // Ignore any errors in dockerfile parsing--we'll use the default port instead. if err != nil { log.Debugln(err.Error()) } } defaultPort := defaultSvcPortString if o.dockerfilePath != "" { switch len(ports) { case 0: // There were no ports detected, keep the default port prompt. case 1: o.port = ports[0]
defaultPort = strconv.Itoa(int(ports[0])) } } // Skip asking if it is a backend service. if o.wkldType == manifest.BackendServiceType { return nil } port, err := o.prompt.Get( fmt.Sprintf(svcInitSvcPortPrompt, color.Emphasize("port")), svcInitSvcPortHelpPrompt, validateSvcPort, prompt.WithDefaultInput(defaultPort), prompt.WithFinalMessage("Port:"), ) if err != nil { return fmt.Errorf("get port: %w", err) } portUint, err := strconv.ParseUint(port, 10, 16) if err != nil { return fmt.Errorf("parse port string: %w", err) } o.port = uint16(portUint) return nil } func parseHealthCheck(df dockerfileParser) (*manifest.ContainerHealthCheck, error) { hc, err := df.GetHealthCheck() if err != nil { return nil, fmt.Errorf("get healthcheck: %w", err) } if hc == nil { return nil, nil } return &manifest.ContainerHealthCheck{ Interval: &hc.Interval, Timeout: &hc.Timeout, StartPeriod: &hc.StartPeriod, Retries: &hc.Retries, Command: hc.Cmd, }, nil } func svcTypePromptOpts() []prompt.Option { var options []prompt.Option for _, svcType := range manifest.ServiceTypes { options = append(options, prompt.Option{ Value: svcType, Hint: serviceTypeHints[svcType], }) } return options } // buildSvcInitCmd build the command for creating a new service. func buildSvcInitCmd() *cobra.Command { vars := initSvcVars{} cmd := &cobra.Command{ Use: "init", Short: "Creates a new service in an application.", Long: `Creates a new service in an application. This command is also run as part of "copilot init".`, Example: ` Create a "frontend" load balanced web service. /code $ copilot svc init --name frontend --svc-type "Load Balanced Web Service" --dockerfile ./frontend/Dockerfile Create a "subscribers" backend service. /code $ copilot svc init --name subscribers --svc-type "Backend Service"`, RunE: runCmdE(func(cmd *cobra.Command, args []string) error { opts, err := newInitSvcOpts(vars) if err != nil { return err } if err := opts.Validate(); err != nil { // validate flags return err } log.Warningln("It's best to run this command in the root of your workspace.") if err := opts.Ask(); err != nil { return err } if err := opts.Execute(); err != nil { return err } log.Infoln("Recommended follow-up actions:") for _, followup := range opts.RecommendedActions() { log.Infof("- %s\n", followup) } return nil }), } cmd.Flags().StringVarP(&vars.appName, appFlag, appFlagShort, tryReadingAppName(), appFlagDescription) cmd.Flags().StringVarP(&vars.name, nameFlag, nameFlagShort, "", svcFlagDescription) cmd.Flags().StringVarP(&vars.wkldType, svcTypeFlag, typeFlagShort, "", svcTypeFlagDescription) cmd.Flags().StringVarP(&vars.dockerfilePath, dockerFileFlag, dockerFileFlagShort, "", dockerFileFlagDescription) cmd.Flags().StringVarP(&vars.image, imageFlag, imageFlagShort, "", imageFlagDescription) cmd.Flags().Uint16Var(&vars.port, svcPortFlag, 0, svcPortFlagDescription) return cmd }
return nil default:
feed.go
package youtube import ( "context" "errors" "fmt" "strconv" "sync" "time" "github.com/botlabs-gg/yagpdb/v2/analytics" "github.com/botlabs-gg/yagpdb/v2/common" "github.com/botlabs-gg/yagpdb/v2/common/mqueue" "github.com/botlabs-gg/yagpdb/v2/feeds" "github.com/botlabs-gg/yagpdb/v2/lib/discordgo" "github.com/mediocregopher/radix/v3" "github.com/prometheus/client_golang/prometheus" "golang.org/x/oauth2/google" "google.golang.org/api/youtube/v3" ) const ( MaxChannelsPerPoll = 30 PollInterval = time.Second * 10 WebSubCheckInterval = time.Second * 10 // PollInterval = time.Second * 5 // <- used for debug purposes ) func (p *Plugin) StartFeed() { p.Stop = make(chan *sync.WaitGroup) p.runWebsubChecker() } func (p *Plugin) StopFeed(wg *sync.WaitGroup) { if p.Stop != nil { p.Stop <- wg } else { wg.Done() } } func (p *Plugin) SetupClient() error { httpClient, err := google.DefaultClient(context.Background(), youtube.YoutubeScope) if err != nil { return common.ErrWithCaller(err) } yt, err := youtube.New(httpClient) if err != nil { return common.ErrWithCaller(err) } p.YTService = yt return nil } // keeps the subscriptions up to date by updating the ones soon to be expiring func (p *Plugin) runWebsubChecker() { p.syncWebSubs() websubTicker := time.NewTicker(WebSubCheckInterval) for { select { case wg := <-p.Stop: wg.Done() return case <-websubTicker.C: p.checkExpiringWebsubs() } } } func (p *Plugin) checkExpiringWebsubs() { err := common.BlockingLockRedisKey(RedisChannelsLockKey, 0, 10) if err != nil { logger.WithError(err).Error("Failed locking channels lock") return } defer common.UnlockRedisKey(RedisChannelsLockKey) maxScore := time.Now().Unix() var expiring []string err = common.RedisPool.Do(radix.FlatCmd(&expiring, "ZRANGEBYSCORE", RedisKeyWebSubChannels, "-inf", maxScore)) if err != nil { logger.WithError(err).Error("Failed checking websubs") return } for _, v := range expiring { err := p.WebSubSubscribe(v) if err != nil { logger.WithError(err).WithField("yt_channel", v).Error("Failed subscribing to channel") } time.Sleep(time.Second) } } func (p *Plugin) syncWebSubs() { var activeChannels []string err := common.SQLX.Select(&activeChannels, "SELECT DISTINCT(youtube_channel_id) FROM youtube_channel_subscriptions;") if err != nil { logger.WithError(err).Error("Failed syncing websubs, failed retrieving subbed channels") return } common.RedisPool.Do(radix.WithConn(RedisKeyWebSubChannels, func(client radix.Conn) error { locked := false for _, channel := range activeChannels { if !locked { err := common.BlockingLockRedisKey(RedisChannelsLockKey, 0, 5000) if err != nil { logger.WithError(err).Error("Failed locking channels lock") return err } locked = true } mn := radix.MaybeNil{} client.Do(radix.Cmd(&mn, "ZSCORE", RedisKeyWebSubChannels, channel)) if mn.Nil { // Not added err := p.WebSubSubscribe(channel) if err != nil { logger.WithError(err).WithField("yt_channel", channel).Error("Failed subscribing to channel") } common.UnlockRedisKey(RedisChannelsLockKey) locked = false time.Sleep(time.Second) } } if locked { common.UnlockRedisKey(RedisChannelsLockKey) } return nil })) } func (p *Plugin) removeAllSubsForChannel(channel string) { err := common.GORM.Where("youtube_channel_id = ?", channel).Delete(ChannelSubscription{}).Error if err != nil { logger.WithError(err).WithField("yt_channel", channel).Error("failed removing channel") } go p.MaybeRemoveChannelWatch(channel) } func (p *Plugin) sendNewVidMessage(guild, discordChannel string, channelTitle string, videoID string, mentionEveryone bool) { content := fmt.Sprintf("**%s** uploaded a new youtube video!\n%s", channelTitle, "https://www.youtube.com/watch?v="+videoID) if mentionEveryone { content += " @everyone" } parsedChannel, _ := strconv.ParseInt(discordChannel, 10, 64) parsedGuild, _ := strconv.ParseInt(guild, 10, 64) parseMentions := []discordgo.AllowedMentionType{} if mentionEveryone { parseMentions = []discordgo.AllowedMentionType{discordgo.AllowedMentionTypeEveryone} } go analytics.RecordActiveUnit(parsedGuild, p, "posted_youtube_message") feeds.MetricPostedMessages.With(prometheus.Labels{"source": "youtube"}).Inc() mqueue.QueueMessage(&mqueue.QueuedElement{ GuildID: parsedGuild, ChannelID: parsedChannel, Source: "youtube", SourceItemID: "", MessageStr: content, Priority: 2, AllowedMentions: discordgo.AllowedMentions{ Parse: parseMentions, }, }) } var ( ErrIDNotFound = errors.New("ID not found") ) func SubsForChannel(channel string) (result []*ChannelSubscription, err error) { err = common.GORM.Where("youtube_channel_id = ?", channel).Find(&result).Error return } var ( ErrNoChannel = errors.New("No channel with that id found") ) func (p *Plugin) AddFeed(guildID, discordChannelID int64, youtubeChannelID, youtubeUsername string, mentionEveryone bool) (*ChannelSubscription, error) { sub := &ChannelSubscription{ GuildID: discordgo.StrID(guildID), ChannelID: discordgo.StrID(discordChannelID), MentionEveryone: mentionEveryone, Enabled: true, } call := p.YTService.Channels.List([]string{"snippet"}) if youtubeChannelID != "" { call = call.Id(youtubeChannelID) } else { call = call.ForUsername(youtubeUsername) } cResp, err := call.Do() if err != nil { return nil, common.ErrWithCaller(err) } if len(cResp.Items) < 1 { return nil, ErrNoChannel } sub.YoutubeChannelName = cResp.Items[0].Snippet.Title sub.YoutubeChannelID = cResp.Items[0].Id err = common.BlockingLockRedisKey(RedisChannelsLockKey, 0, 10) if err != nil { return nil, err } defer common.UnlockRedisKey(RedisChannelsLockKey) err = common.GORM.Create(sub).Error if err != nil { return nil, err } err = p.MaybeAddChannelWatch(false, sub.YoutubeChannelID) return sub, err } // maybeRemoveChannelWatch checks the channel for subs, if it has none then it removes it from the watchlist in redis. func (p *Plugin) MaybeRemoveChannelWatch(channel string) { err := common.BlockingLockRedisKey(RedisChannelsLockKey, 0, 10) if err != nil { return } defer common.UnlockRedisKey(RedisChannelsLockKey) var count int err = common.GORM.Model(&ChannelSubscription{}).Where("youtube_channel_id = ?", channel).Count(&count).Error if err != nil || count > 0 { if err != nil { logger.WithError(err).WithField("yt_channel", channel).Error("Failed getting sub count") } return } err = common.MultipleCmds( radix.Cmd(nil, "DEL", KeyLastVidTime(channel)), radix.Cmd(nil, "DEL", KeyLastVidID(channel)), radix.Cmd(nil, "ZREM", RedisKeyWebSubChannels, channel), ) if err != nil { return } err = p.WebSubUnsubscribe(channel) if err != nil { logger.WithError(err).Error("Failed unsubscribing to channel ", channel) } logger.WithField("yt_channel", channel).Info("Removed orphaned youtube channel from subbed channel sorted set") } // maybeAddChannelWatch adds a channel watch to redis, if there wasn't one before func (p *Plugin) MaybeAddChannelWatch(lock bool, channel string) error { if lock { err := common.BlockingLockRedisKey(RedisChannelsLockKey, 0, 10) if err != nil { return common.ErrWithCaller(err) } defer common.UnlockRedisKey(RedisChannelsLockKey) } now := time.Now().Unix() mn := radix.MaybeNil{} err := common.RedisPool.Do(radix.Cmd(&mn, "ZSCORE", RedisKeyWebSubChannels, channel)) if err != nil { return err } if !mn.Nil { // Websub subscription already active, don't do anything more return nil } err = common.RedisPool.Do(radix.FlatCmd(nil, "SET", KeyLastVidTime(channel), now)) if err != nil { return err } // Also add websub subscription err = p.WebSubSubscribe(channel) if err != nil { logger.WithError(err).Error("Failed subscribing to channel ", channel) } logger.WithField("yt_channel", channel).Info("Added new youtube channel watch") return nil } func (p *Plugin) CheckVideo(videoID string, channelID string) error { subs, err := p.getRemoveSubs(channelID) if err != nil || len(subs) < 1
lastVid, lastVidTime, err := p.getLastVidTimes(channelID) if err != nil { return err } if lastVid == videoID { // the video was already posted and was probably just edited return nil } resp, err := p.YTService.Videos.List([]string{"snippet"}).Id(videoID).Do() if err != nil || len(resp.Items) < 1 { return err } item := resp.Items[0] if item.Snippet.LiveBroadcastContent != "none" { // ignore livestreams for now, might enable them at some point return nil } parsedPublishedAt, err := time.Parse(time.RFC3339, item.Snippet.PublishedAt) if err != nil { return errors.New("Failed parsing youtube timestamp: " + err.Error() + ": " + item.Snippet.PublishedAt) } if time.Since(parsedPublishedAt) > time.Hour { // just a safeguard against empty lastVidTime's return nil } if lastVidTime.After(parsedPublishedAt) { // wasn't a new vid return nil } // This is a new video, post it return p.postVideo(subs, parsedPublishedAt, item, channelID) } func (p *Plugin) postVideo(subs []*ChannelSubscription, publishedAt time.Time, video *youtube.Video, channelID string) error { err := common.MultipleCmds( radix.FlatCmd(nil, "SET", KeyLastVidTime(channelID), publishedAt.Unix()), radix.FlatCmd(nil, "SET", KeyLastVidID(channelID), video.Id), ) if err != nil { return err } for _, sub := range subs { if sub.Enabled { p.sendNewVidMessage(sub.GuildID, sub.ChannelID, video.Snippet.ChannelTitle, video.Id, sub.MentionEveryone) } } return nil } func (p *Plugin) getRemoveSubs(channelID string) ([]*ChannelSubscription, error) { var subs []*ChannelSubscription err := common.GORM.Where("youtube_channel_id = ?", channelID).Find(&subs).Error if err != nil { return subs, err } if len(subs) < 1 { time.AfterFunc(time.Second*10, func() { p.MaybeRemoveChannelWatch(channelID) }) return subs, nil } return subs, nil } func (p *Plugin) getLastVidTimes(channelID string) (lastVid string, lastVidTime time.Time, err error) { // Find the last video time for this channel var unixSeconds int64 err = common.RedisPool.Do(radix.Cmd(&unixSeconds, "GET", KeyLastVidTime(channelID))) var lastProcessedVidTime time.Time if err != nil || unixSeconds == 0 { lastProcessedVidTime = time.Time{} } else { lastProcessedVidTime = time.Unix(unixSeconds, 0) } var lastVidID string err = common.RedisPool.Do(radix.Cmd(&lastVidID, "GET", KeyLastVidID(channelID))) return lastVidID, lastProcessedVidTime, err }
{ return err }
problem_10.rs
use std::thread; const MAX_VALUE: i64 = 2_000_000; const THREADS: i64 = 50; fn is_prime(n: i64) -> bool { if n == 2 { return true; } if n < 2 || n % 2 == 0 { return false; } for i in (3..=((n as f64).sqrt() as i64)).step_by(2) { if n % i == 0 { return false; } } true } fn
(mut from: i64, to: i64) -> i64 { let mut total: i64 = 0; while from <= to { if is_prime(from) { total += from; } from += 1; } total } fn main() { let mut childrens = vec![]; let size = MAX_VALUE / THREADS; let mut actual = 1; for _ in 0..THREADS { childrens.push(thread::spawn(move || { println!("Calculating from {} to {}", actual, (actual + size) - 1); calculate(actual, (actual + size) - 1) })); actual += size; } let total: i64 = childrens .into_iter() .map(|child| child.join().unwrap()) .sum(); println!("Total: {}", total); }
calculate
index.ts
export { default as KeyPerformance } from './key_performance' export {
default as Notifications, Notification, Root as StaticNotifications, Transaction as NotificationTransaction } from './notifications' export { default as TodaysActivity } from './todays_activity' export { default as Report } from './report' export { PSP, PspType } from './psp' export { RefundOverview, PaymentMethodOverview } from './charts_overview' export { TransactionAction, TransactionStatus } from './transaction'
outgoingcallerids.go
package twilio import ( "context" "net/url" ) const callerIDPathPart = "OutgoingCallerIds" type OutgoingCallerIDService struct { client *Client } type OutgoingCallerID struct { Sid string `json:"sid"` FriendlyName string `json:"friendly_name"` PhoneNumber PhoneNumber `json:"phone_number"` AccountSid string `json:"account_sid"` DateCreated TwilioTime `json:"date_created"` DateUpdated TwilioTime `json:"date_updated"` URI string `json:"uri"` } type CallerIDRequest struct { AccountSid string `json:"account_sid"` PhoneNumber PhoneNumber `json:"phone_number"` FriendlyName string `json:"friendly_name"` // Usually six digits, but a string to avoid stripping leading 0's ValidationCode string `json:"validation_code"`
Page OutgoingCallerIDs []*OutgoingCallerID `json:"outgoing_caller_ids"` } // Create a new OutgoingCallerID. Note the ValidationCode is only returned in // response to a Create, you can't retrieve it later. // // https://www.twilio.com/docs/api/rest/outgoing-caller-ids#list-post func (c *OutgoingCallerIDService) Create(ctx context.Context, data url.Values) (*CallerIDRequest, error) { id := new(CallerIDRequest) err := c.client.CreateResource(ctx, callerIDPathPart, data, id) return id, err } func (o *OutgoingCallerIDService) Get(ctx context.Context, sid string) (*OutgoingCallerID, error) { id := new(OutgoingCallerID) err := o.client.GetResource(ctx, callerIDPathPart, sid, id) return id, err } func (o *OutgoingCallerIDService) GetPage(ctx context.Context, data url.Values) (*OutgoingCallerIDPage, error) { op := new(OutgoingCallerIDPage) err := o.client.ListResource(ctx, callerIDPathPart, data, op) return op, err } // Update the caller ID with the given data. Valid parameters may be found here: // https://www.twilio.com/docs/api/rest/outgoing-caller-ids#list func (o *OutgoingCallerIDService) Update(ctx context.Context, sid string, data url.Values) (*OutgoingCallerID, error) { id := new(OutgoingCallerID) err := o.client.UpdateResource(ctx, callerIDPathPart, sid, data, id) return id, err } // Delete the Caller ID with the given sid. If the ID has already been deleted, // or does not exist, Delete returns nil. If another error or a timeout occurs, // the error is returned. func (o *OutgoingCallerIDService) Delete(ctx context.Context, sid string) error { return o.client.DeleteResource(ctx, callerIDPathPart, sid) } // OutgoingCallerIDPageIterator lets you retrieve consecutive pages of resources. type OutgoingCallerIDPageIterator struct { p *PageIterator } // GetPageIterator returns a OutgoingCallerIDPageIterator with the given page // filters. Call iterator.Next() to get the first page of resources (and again // to retrieve subsequent pages). func (o *OutgoingCallerIDService) GetPageIterator(data url.Values) *OutgoingCallerIDPageIterator { iter := NewPageIterator(o.client, data, callerIDPathPart) return &OutgoingCallerIDPageIterator{ p: iter, } } // Next returns the next page of resources. If there are no more resources, // NoMoreResults is returned. func (o *OutgoingCallerIDPageIterator) Next(ctx context.Context) (*OutgoingCallerIDPage, error) { op := new(OutgoingCallerIDPage) err := o.p.Next(ctx, op) if err != nil { return nil, err } o.p.SetNextPageURI(op.NextPageURI) return op, nil }
CallSid string `json:"call_sid"` } type OutgoingCallerIDPage struct {
mod.rs
use super::{load::TransformedModule, Bundler}; use crate::{id::ModuleId, load::Load, resolve::Resolve, util::IntoParallelIterator, Bundle}; use anyhow::{Context, Error}; #[cfg(feature = "rayon")] use rayon::iter::ParallelIterator; use std::collections::{HashMap, HashSet}; use swc_ecma_transforms::{hygiene, optimization::simplify::dce}; use swc_ecma_visit::FoldWith; mod circular; mod cjs; mod export; mod merge; mod plan; #[derive(Debug)] struct InternalEntry { basename: String, main: TransformedModule, included: Vec<ModuleId>, dynamic: bool, } #[derive(Debug, Default)] struct State { synchronously_included: HashSet<ModuleId>, dynamic_entries: HashSet<ModuleId>, common_libs: HashSet<ModuleId>, } impl<L, R> Bundler<'_, L, R> where L: Load, R: Resolve, { /// `entries` - Entry modules (provided by user) by it's basename. /// /// # How it works /// /// For first, we load all dependencies and determine all entries. pub(super) fn chunk( &self, entries: HashMap<String, TransformedModule>, ) -> Result<Vec<Bundle>, Error> { let plan = self.determine_entries(entries).context("failed to plan")?; Ok((&*plan.entries) .into_par_iter() .map(|&entry| { self.run(|| { let kind = plan .bundle_kinds .get(&entry) .unwrap_or_else(|| { unreachable!("Plan does not contain bundle kind for {:?}", entry) }) .clone(); let module = self .merge_modules(&plan, entry, true, false) .context("failed to merge module") .unwrap(); // TODO let module = module .fold_with(&mut dce::dce(Default::default())) .fold_with(&mut hygiene()); Bundle { kind, id: entry, module, } }) }) .collect()) } } #[cfg(test)] mod tests { use super::*; use crate::{bundler::tests::suite, BundleKind}; use swc_common::FileName; #[test] fn es6_determine_entries() { suite() .file( "main.js", " import './a'; import './b'; ", ) .file("a.js", "import './common';") .file("b.js", "import './common';") .file("common.js", r#"console.log('foo')"#) .run(|t| { let module = t .bundler .load_transformed(&FileName::Real("main.js".into()))? .unwrap(); let mut entries = HashMap::default(); entries.insert("main.js".to_string(), module); let determined = t.bundler.determine_entries(entries)?; assert_eq!(determined.normal.len(), 2); assert_eq!(determined.circular.len(), 0); Ok(()) }); } #[test] fn
() { suite() .file( "main.js", " require('./a'); require('./b'); ", ) .file("a.js", "require('./common')") .file("b.js", "require('./common')") .file("common.js", r#"console.log('foo')"#) .run(|t| { let module = t .bundler .load_transformed(&FileName::Real("main.js".into()))? .unwrap(); let mut entries = HashMap::default(); entries.insert("main.js".to_string(), module.clone()); let determined = t.bundler.determine_entries(entries)?; assert_eq!(determined.normal.len(), 2); assert_eq!(determined.circular.len(), 0); assert_eq!( *determined.bundle_kinds.get(&module.id).unwrap(), BundleKind::Named { name: "main.js".to_string() } ); Ok(()) }); } #[test] fn cjs_chunk() { suite() .file( "main.js", " require('./a'); require('./b'); ", ) .file("a.js", "require('./common')") .file("b.js", "require('./common')") .file("common.js", r#"console.log('foo')"#) .run(|t| { let module = t .bundler .load_transformed(&FileName::Real("main.js".into()))? .unwrap(); let mut entries = HashMap::default(); entries.insert("main.js".to_string(), module); let chunked = t.bundler.chunk(entries)?; assert_eq!(chunked.len(), 1); Ok(()) }); } }
cjs_determine_entries
test.js
const test = require('ava') const splituniq = require('./') test('splits, trims & dedups', t => { t.deepEqual(splituniq('beep, boop, beep'), ['beep', 'boop']) t.deepEqual(splituniq('beep | boop | beep', '|'), ['beep', 'boop']) })
cx_metalsl.rs
use metal::*; use crate::cx::*; #[derive(Clone)] pub struct CxPlatformShader { pub library: metal::Library, pub pipeline_state: metal::RenderPipelineState, pub geom_vbuf: MetalBuffer, pub geom_ibuf: MetalBuffer, } impl PartialEq for CxPlatformShader { fn eq(&self, _other: &Self) -> bool {false} } pub enum PackType { Packed, Unpacked } impl Cx { pub fn mtl_compile_all_shaders(&mut self, metal_cx: &MetalCx) { for sh in &mut self.shaders { let mtlsh = Self::mtl_compile_shader(sh, metal_cx); if let Err(err) = mtlsh { panic!("Got metal shader compile error: {}", err.msg); } }; } pub fn mtl_type_to_packed_metal(ty: &str) -> String { match ty.as_ref() { "float" => "float".to_string(), "vec2" => "packed_float2".to_string(), "vec3" => "packed_float3".to_string(), "vec4" => "packed_float4".to_string(), "mat2" => "packed_float2x2".to_string(), "mat3" => "packed_float3x3".to_string(), "mat4" => "float4x4".to_string(), ty => ty.to_string() } } pub fn mtl_type_to_metal(ty: &str) -> String { match ty.as_ref() { "float" => "float".to_string(), "vec2" => "float2".to_string(), "vec3" => "float3".to_string(), "vec4" => "float4".to_string(), "mat2" => "float2x2".to_string(), "mat3" => "float3x3".to_string(), "mat4" => "float4x4".to_string(), "texture2d" => "texture2d<float>".to_string(), ty => ty.to_string() } } pub fn mtl_assemble_struct(name: &str, vars: &Vec<ShVar>, pack_type: PackType, field: &str) -> String { let mut out = String::new(); out.push_str("struct "); out.push_str(name); out.push_str("{\n"); out.push_str(field); for var in vars { out.push_str(" "); match pack_type { PackType::Packed => { out.push_str(&Self::mtl_type_to_packed_metal(&var.ty)); out.push_str(" "); out.push_str(&var.name); out.push_str(";\n"); }, PackType::Unpacked => { out.push_str(&Self::mtl_type_to_metal(&var.ty)); out.push_str(" "); out.push_str(&var.name); out.push_str(";\n"); } } }; out.push_str("};\n\n"); out } pub fn mtl_assemble_texture_slots(textures: &Vec<ShVar>) -> String { let mut out = String::new(); out.push_str("struct "); out.push_str("_Tex{\n"); for (i, tex) in textures.iter().enumerate() { out.push_str("texture2d<float> "); out.push_str(&tex.name); out.push_str(&format!(" [[texture({})]];\n", i)); }; out.push_str("};\n\n"); out } pub fn mtl_assemble_shader(sg: &ShaderGen) -> Result<(String, CxShaderMapping), SlErr> { let mut mtl_out = "#include <metal_stdlib>\nusing namespace metal;\n".to_string(); // ok now define samplers from our sh. let texture_slots = sg.flat_vars(|v| if let ShVarStore::Texture = *v{true} else {false}); let geometries = sg.flat_vars(|v| if let ShVarStore::Geometry = *v{true} else {false}); let instances = sg.flat_vars(|v| if let ShVarStore::Instance(_) = *v{true} else {false}); let mut varyings = sg.flat_vars(|v| if let ShVarStore::Varying = *v{true} else {false}); let locals = sg.flat_vars(|v| if let ShVarStore::Local = *v{true} else {false}); let uniforms_cx = sg.flat_vars(|v| if let ShVarStore::UniformCx = *v{true} else {false}); let uniforms_vw = sg.flat_vars(|v| if let ShVarStore::UniformVw = *v{true} else {false}); let uniforms_dr = sg.flat_vars(|v| if let ShVarStore::Uniform(_) = *v{true} else {false}); // lets count the slots let geometry_slots = sg.compute_slot_total(&geometries); let instance_slots = sg.compute_slot_total(&instances); //let varying_slots = sh.compute_slot_total(&varyings); mtl_out.push_str(&Self::mtl_assemble_struct("_Geom", &geometries, PackType::Packed, "")); mtl_out.push_str(&Self::mtl_assemble_struct("_Inst", &instances, PackType::Packed, "")); mtl_out.push_str(&Self::mtl_assemble_struct("_UniCx", &uniforms_cx, PackType::Unpacked, "")); mtl_out.push_str(&Self::mtl_assemble_struct("_UniVw", &uniforms_vw, PackType::Unpacked, "")); mtl_out.push_str(&Self::mtl_assemble_struct("_UniDr", &uniforms_dr, PackType::Unpacked, "")); mtl_out.push_str(&Self::mtl_assemble_struct("_Loc", &locals, PackType::Unpacked, "")); // we need to figure out which texture slots exist mtl_out.push_str(&Self::mtl_assemble_texture_slots(&texture_slots)); // we need to figure out which texture slots exist // mtl_out.push_str(&Self::assemble_constants(&texture_slots)); let mut const_cx = SlCx { depth: 0, target: SlTarget::Constant, defargs_fn: "".to_string(), defargs_call: "".to_string(), call_prefix: "_".to_string(), shader_gen: sg, scope: Vec::new(), fn_deps: Vec::new(), fn_done: Vec::new(), auto_vary: Vec::new() }; let consts = sg.flat_consts(); for cnst in &consts { let const_init = assemble_const_init(cnst, &mut const_cx) ?; mtl_out.push_str("#define "); mtl_out.push_str(" "); mtl_out.push_str(&cnst.name); mtl_out.push_str(" ("); mtl_out.push_str(&const_init.sl); mtl_out.push_str(")\n"); } let mut vtx_cx = SlCx { depth: 0, target: SlTarget::Vertex, defargs_fn: "_Tex _tex, thread _Loc &_loc, thread _Vary &_vary, thread _Geom &_geom, thread _Inst &_inst, device _UniCx &_uni_cx, device _UniVw &_uni_vw, device _UniDr &_uni_dr".to_string(), defargs_call: "_tex, _loc, _vary, _geom, _inst, _uni_cx, _uni_vw, _uni_dr".to_string(), call_prefix: "_".to_string(), shader_gen: sg, scope: Vec::new(), fn_deps: vec!["vertex".to_string()], fn_done: Vec::new(), auto_vary: Vec::new() }; let vtx_fns = assemble_fn_and_deps(sg, &mut vtx_cx) ?; let mut pix_cx = SlCx { depth: 0, target: SlTarget::Pixel, defargs_fn: "_Tex _tex, thread _Loc &_loc, thread _Vary &_vary, device _UniCx &_uni_cx, device _UniVw &_uni_vw, device _UniDr &_uni_dr".to_string(), defargs_call: "_tex, _loc, _vary, _uni_cx, _uni_vw, _uni_dr".to_string(), call_prefix: "_".to_string(), shader_gen: sg, scope: Vec::new(), fn_deps: vec!["pixel".to_string()], fn_done: vtx_cx.fn_done, auto_vary: Vec::new() }; let pix_fns = assemble_fn_and_deps(sg, &mut pix_cx) ?; // lets add the auto_vary ones to the varyings struct for auto in &pix_cx.auto_vary { varyings.push(auto.clone()); } mtl_out.push_str(&Self::mtl_assemble_struct("_Vary", &varyings, PackType::Unpacked, " float4 mtl_position [[position]];\n")); mtl_out.push_str("//Vertex shader\n"); mtl_out.push_str(&vtx_fns); mtl_out.push_str("//Pixel shader\n"); mtl_out.push_str(&pix_fns); // lets define the vertex shader mtl_out.push_str("vertex _Vary _vertex_shader(_Tex _tex, device _Geom *in_geometries [[buffer(0)]], device _Inst *in_instances [[buffer(1)]],\n"); mtl_out.push_str(" device _UniCx &_uni_cx [[buffer(2)]], device _UniVw &_uni_vw [[buffer(3)]], device _UniDr &_uni_dr [[buffer(4)]],\n"); mtl_out.push_str(" uint vtx_id [[vertex_id]], uint inst_id [[instance_id]]){\n"); mtl_out.push_str(" _Loc _loc;\n"); mtl_out.push_str(" _Vary _vary;\n"); mtl_out.push_str(" _Geom _geom = in_geometries[vtx_id];\n"); mtl_out.push_str(" _Inst _inst = in_instances[inst_id];\n"); mtl_out.push_str(" _vary.mtl_position = _vertex("); mtl_out.push_str(&vtx_cx.defargs_call); mtl_out.push_str(");\n\n"); for auto in pix_cx.auto_vary { if let ShVarStore::Geometry = auto.store { mtl_out.push_str(" _vary."); mtl_out.push_str(&auto.name); mtl_out.push_str(" = _geom."); mtl_out.push_str(&auto.name); mtl_out.push_str(";\n"); } else if let ShVarStore::Instance(_) = auto.store { mtl_out.push_str(" _vary."); mtl_out.push_str(&auto.name); mtl_out.push_str(" = _inst."); mtl_out.push_str(&auto.name); mtl_out.push_str(";\n"); } } mtl_out.push_str(" return _vary;\n"); mtl_out.push_str("};\n"); // then the fragment shader mtl_out.push_str("fragment float4 _fragment_shader(_Vary _vary[[stage_in]],_Tex _tex,\n"); mtl_out.push_str(" device _UniCx &_uni_cx [[buffer(0)]], device _UniVw &_uni_vw [[buffer(1)]], device _UniDr &_uni_dr [[buffer(2)]]){\n"); mtl_out.push_str(" _Loc _loc;\n"); mtl_out.push_str(" return _pixel("); mtl_out.push_str(&pix_cx.defargs_call); mtl_out.push_str(");\n};\n"); if sg.log != 0 {
let uniform_props = UniformProps::construct(sg, &uniforms_dr); Ok((mtl_out, CxShaderMapping { zbias_uniform_prop: uniform_props.find_zbias_uniform_prop(), rect_instance_props: RectInstanceProps::construct(sg, &instances), instance_props: InstanceProps::construct(sg, &instances), uniform_props: uniform_props, instances: instances, geometries: geometries, instance_slots: instance_slots, geometry_slots: geometry_slots, uniforms_dr: uniforms_dr, uniforms_vw: uniforms_vw, uniforms_cx: uniforms_cx, texture_slots: texture_slots, })) } pub fn mtl_compile_shader(sh: &mut CxShader, metal_cx: &MetalCx) -> Result<(), SlErr> { let (mtlsl, mapping) = Self::mtl_assemble_shader(&sh.shader_gen) ?; let options = CompileOptions::new(); let library = metal_cx.device.new_library_with_source(&mtlsl, &options); match library { Err(library) => return Err(SlErr {msg: library}), Ok(library) => { sh.mapping = mapping; sh.platform = Some(CxPlatformShader { pipeline_state: { let vert = library.get_function("_vertex_shader", None).unwrap(); let frag = library.get_function("_fragment_shader", None).unwrap(); let rpd = RenderPipelineDescriptor::new(); rpd.set_vertex_function(Some(&vert)); rpd.set_fragment_function(Some(&frag)); //rpd.set_sample_count(2); //rpd.set_alpha_to_coverage_enabled(false); let color = rpd.color_attachments().object_at(0).unwrap(); color.set_pixel_format(MTLPixelFormat::BGRA8Unorm); color.set_blending_enabled(true); color.set_source_rgb_blend_factor(MTLBlendFactor::One); color.set_destination_rgb_blend_factor(MTLBlendFactor::OneMinusSourceAlpha); color.set_source_alpha_blend_factor(MTLBlendFactor::One); color.set_destination_alpha_blend_factor(MTLBlendFactor::OneMinusSourceAlpha); color.set_rgb_blend_operation(MTLBlendOperation::Add); color.set_alpha_blend_operation(MTLBlendOperation::Add); rpd.set_depth_attachment_pixel_format(MTLPixelFormat::Depth32Float_Stencil8); metal_cx.device.new_render_pipeline_state(&rpd).unwrap() }, library: library, geom_ibuf: { let mut geom_ibuf = MetalBuffer {..Default::default()}; geom_ibuf.update_with_u32_data(metal_cx, &sh.shader_gen.geometry_indices); geom_ibuf }, geom_vbuf: { let mut geom_vbuf = MetalBuffer {..Default::default()}; geom_vbuf.update_with_f32_data(metal_cx, &sh.shader_gen.geometry_vertices); geom_vbuf } }); return Ok(()); } }; } } impl<'a> SlCx<'a> { pub fn map_call(&self, name: &str, args: &Vec<Sl>) -> MapCallResult { match name { "sample2d" => { // transform call to let base = &args[0]; let coord = &args[1]; return MapCallResult::Rewrite( format!("{}.sample(sampler(mag_filter::linear,min_filter::linear),{})", base.sl, coord.sl), "vec4".to_string() ) }, "color" => { let col = color(&args[0].sl); return MapCallResult::Rewrite( format!("float4({},{},{},{})", col.r, col.g, col.b, col.a), "vec4".to_string() ); }, _ => return MapCallResult::None } } pub fn mat_mul(&self, left: &str, right: &str) -> String { format!("{}*{}", left, right) } pub fn map_type(&self, ty: &str) -> String { Cx::mtl_type_to_metal(ty) } pub fn map_constructor(&self, name: &str, args: &Vec<Sl>)->String{ let mut out = String::new(); out.push_str(&self.map_type(name)); out.push_str("("); for (i,arg) in args.iter().enumerate(){ if i != 0{ out.push_str(", "); } out.push_str(&arg.sl); } out.push_str(")"); return out; } pub fn map_var(&mut self, var: &ShVar) -> String { let mty = Cx::mtl_type_to_metal(&var.ty); match var.store { ShVarStore::Uniform(_) => return format!("_uni_dr.{}", var.name), ShVarStore::UniformColor(_) => return format!("_uni_col.{}", var.name), ShVarStore::UniformVw => return format!("_uni_vw.{}", var.name), ShVarStore::UniformCx => return format!("_uni_cx.{}", var.name), ShVarStore::Instance(_) => { if let SlTarget::Pixel = self.target { if self.auto_vary.iter().find( | v | v.name == var.name).is_none() { self.auto_vary.push(var.clone()); } return format!("_vary.{}", var.name); } else { return format!("{}(_inst.{})", mty, var.name); } }, ShVarStore::Geometry => { if let SlTarget::Pixel = self.target { if self.auto_vary.iter().find( | v | v.name == var.name).is_none() { self.auto_vary.push(var.clone()); } return format!("_vary.{}", var.name); } else { return format!("{}(_geom.{})", mty, var.name); } }, ShVarStore::Texture => return format!("_tex.{}", var.name), ShVarStore::Local => return format!("_loc.{}", var.name), ShVarStore::Varying => return format!("_vary.{}", var.name), } } }
println!("---- Metal shader -----\n{}", mtl_out); }
utils.py
import collections import math import os import random import subprocess from socket import gethostname from typing import Any, Dict, Set, Tuple, Union import numpy as np import torch from loguru import logger from torch import Tensor from torch._six import string_classes from torch.autograd import Function from torch.types import Number from df.config import config from df.model import ModelParams try: from torchaudio.functional import resample as ta_resample except ImportError: from torchaudio.compliance.kaldi import resample_waveform as ta_resample # type: ignore def get_resample_params(method: str) -> Dict[str, Any]: params = { "sinc_fast": {"resampling_method": "sinc_interpolation", "lowpass_filter_width": 16}, "sinc_best": {"resampling_method": "sinc_interpolation", "lowpass_filter_width": 64}, "kaiser_fast": { "resampling_method": "kaiser_window", "lowpass_filter_width": 16, "rolloff": 0.85, "beta": 8.555504641634386, }, "kaiser_best": { "resampling_method": "kaiser_window", "lowpass_filter_width": 16, "rolloff": 0.9475937167399596, "beta": 14.769656459379492, }, } assert method in params.keys(), f"method must be one of {list(params.keys())}" return params[method] def resample(audio: Tensor, orig_sr: int, new_sr: int, method="sinc_fast"): params = get_resample_params(method) return ta_resample(audio, orig_sr, new_sr, **params) def get_device(): s = config("DEVICE", default="", section="train") if s == "": if torch.cuda.is_available(): DEVICE = torch.device("cuda:0") else: DEVICE = torch.device("cpu") else: DEVICE = torch.device(s) return DEVICE def as_complex(x: Tensor): if torch.is_complex(x): return x if x.shape[-1] != 2: raise ValueError(f"Last dimension need to be of length 2 (re + im), but got {x.shape}") if x.stride(-1) != 1: x = x.contiguous() return torch.view_as_complex(x) def as_real(x: Tensor): if torch.is_complex(x): return torch.view_as_real(x) return x class angle_re_im(Function): """Similar to torch.angle but robustify the gradient for zero magnitude.""" @staticmethod def forward(ctx, re: Tensor, im: Tensor): ctx.save_for_backward(re, im) return torch.atan2(im, re) @staticmethod def backward(ctx, grad: Tensor) -> Tuple[Tensor, Tensor]: re, im = ctx.saved_tensors grad_inv = grad / (re.square() + im.square()).clamp_min_(1e-10) return -im * grad_inv, re * grad_inv class angle(Function): """Similar to torch.angle but robustify the gradient for zero magnitude.""" @staticmethod def forward(ctx, x: Tensor):
@staticmethod def backward(ctx, grad: Tensor): (x,) = ctx.saved_tensors grad_inv = grad / (x.real.square() + x.imag.square()).clamp_min_(1e-10) return torch.view_as_complex(torch.stack((-x.imag * grad_inv, x.real * grad_inv), dim=-1)) def check_finite_module(obj, name="Module", _raise=True) -> Set[str]: out: Set[str] = set() if isinstance(obj, torch.nn.Module): for name, child in obj.named_children(): out = out | check_finite_module(child, name) for name, param in obj.named_parameters(): out = out | check_finite_module(param, name) for name, buf in obj.named_buffers(): out = out | check_finite_module(buf, name) if _raise and len(out) > 0: raise ValueError(f"{name} not finite during checkpoint writing including: {out}") return out def make_np(x: Union[Tensor, np.ndarray, Number]) -> np.ndarray: """Transforms Tensor to numpy. Args: x: An instance of torch tensor or caffe blob name Returns: numpy.array: Numpy array """ if isinstance(x, np.ndarray): return x if np.isscalar(x): return np.array([x]) if isinstance(x, Tensor): return x.detach().cpu().numpy() raise NotImplementedError( "Got {}, but numpy array, scalar, or torch tensor are expected.".format(type(x)) ) def get_norm_alpha(log: bool = True) -> float: p = ModelParams() a_ = _calculate_norm_alpha(sr=p.sr, hop_size=p.hop_size, tau=p.norm_tau) precision = 3 a = 1.0 while a >= 1.0: a = round(a_, precision) precision += 1 if log: logger.info(f"Running with normalization window alpha = '{a}'") return a def _calculate_norm_alpha(sr: int, hop_size: int, tau: float): """Exponential decay factor alpha for a given tau (decay window size [s]).""" dt = hop_size / sr return math.exp(-dt / tau) def check_manual_seed(seed: int = None): """If manual seed is not specified, choose a random one and communicate it to the user.""" seed = seed or random.randint(1, 10000) np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) return seed def get_git_root(): git_local_dir = os.path.dirname(os.path.abspath(__file__)) args = ["git", "-C", git_local_dir, "rev-parse", "--show-toplevel"] return subprocess.check_output(args).strip().decode() def get_commit_hash(): """Returns the current git commit.""" try: git_dir = get_git_root() args = ["git", "-C", git_dir, "rev-parse", "--short", "--verify", "HEAD"] commit = subprocess.check_output(args).strip().decode() except subprocess.CalledProcessError: # probably not in git repo commit = None return commit def get_host() -> str: return gethostname() def get_branch_name(): try: git_dir = os.path.dirname(os.path.abspath(__file__)) args = ["git", "-C", git_dir, "rev-parse", "--abbrev-ref", "HEAD"] branch = subprocess.check_output(args).strip().decode() except subprocess.CalledProcessError: # probably not in git repo branch = None return branch # from pytorch/ignite: def apply_to_tensor(input_, func): """Apply a function on a tensor or mapping, or sequence of tensors.""" if isinstance(input_, torch.nn.Module): return [apply_to_tensor(c, func) for c in input_.children()] elif isinstance(input_, torch.nn.Parameter): return func(input_.data) elif isinstance(input_, Tensor): return func(input_) elif isinstance(input_, string_classes): return input_ elif isinstance(input_, collections.Mapping): return {k: apply_to_tensor(sample, func) for k, sample in input_.items()} elif isinstance(input_, collections.Iterable): return [apply_to_tensor(sample, func) for sample in input_] elif input_ is None: return input_ else: return input_ def detach_hidden(hidden: Any) -> Any: """Cut backpropagation graph. Auxillary function to cut the backpropagation graph by detaching the hidden vector. """ return apply_to_tensor(hidden, Tensor.detach)
ctx.save_for_backward(x) return torch.atan2(x.imag, x.real)
doc.go
/** * Copyright 2021 Comcast Cable Communications Management, LLC *
* * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* Package basculechecks provides bascule validators for JWT capability checking. */ package basculechecks
* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at
comments.service.ts
import debounce from 'lodash/debounce'; import flattenDeep from 'lodash/flattenDeep'; import groupBy from 'lodash/groupBy'; import { observable, computed, action } from 'mobx'; import { INJECTOR_TOKEN, Injector, Injectable, Autowired } from '@opensumi/di'; import { Disposable, IRange, URI, Emitter, AppConfig, localize, getIcon, Event, memoize, IDisposable, positionToRange, Deferred, path, LRUCache, } from '@opensumi/ide-core-browser'; import { IEditor } from '@opensumi/ide-editor'; import { IEditorDecorationCollectionService, IEditorDocumentModelService, ResourceService, WorkbenchEditorService, } from '@opensumi/ide-editor/lib/browser'; import { IMainLayoutService } from '@opensumi/ide-main-layout'; import { IIconService, IconType } from '@opensumi/ide-theme'; import * as model from '@opensumi/monaco-editor-core/esm/vs/editor/common/model'; import * as textModel from '@opensumi/monaco-editor-core/esm/vs/editor/common/model/textModel'; import * as monaco from '@opensumi/monaco-editor-core/esm/vs/editor/editor.api'; import { ICommentsService, ICommentsThread, ICommentsTreeNode, ICommentsFeatureRegistry, CommentPanelId, ICommentRangeProvider, ICommentsThreadOptions, } from '../common'; import { CommentsPanel } from './comments-panel.view'; import { CommentsThread } from './comments-thread'; const { dirname } = path; @Injectable() export class
extends Disposable implements ICommentsService { @Autowired(INJECTOR_TOKEN) private readonly injector: Injector; @Autowired(AppConfig) private readonly appConfig: AppConfig; @Autowired(IEditorDecorationCollectionService) private readonly editorDecorationCollectionService: IEditorDecorationCollectionService; @Autowired(IIconService) private readonly iconService: IIconService; @Autowired(ICommentsFeatureRegistry) private readonly commentsFeatureRegistry: ICommentsFeatureRegistry; @Autowired(IEditorDocumentModelService) private readonly documentService: IEditorDocumentModelService; @Autowired(IMainLayoutService) private readonly layoutService: IMainLayoutService; @Autowired(WorkbenchEditorService) private readonly workbenchEditorService: WorkbenchEditorService; @Autowired(ResourceService) private readonly resourceService: ResourceService; private decorationChangeEmitter = new Emitter<URI>(); @observable private threads = new Map<string, ICommentsThread>(); private threadsChangeEmitter = new Emitter<ICommentsThread>(); private threadsCreatedEmitter = new Emitter<ICommentsThread>(); private rangeProviderMap = new Map<string, ICommentRangeProvider>(); private rangeOwner = new Map<string, IRange[]>(); private providerDecorationCache = new LRUCache<string, Deferred<IRange[]>>(10000); // 默认在 file 协议和 git 协议中显示评论数据 private shouldShowCommentsSchemes = new Set(['file', 'git']); private decorationProviderDisposer = Disposable.NULL; @observable private forceUpdateCount = 0; @computed get commentsThreads() { return [...this.threads.values()]; } @memoize get isMultiCommentsForSingleLine() { return !!this.commentsFeatureRegistry.getConfig()?.isMultiCommentsForSingleLine; } @memoize get currentAuthorAvatar() { return this.commentsFeatureRegistry.getConfig()?.author?.avatar; } @memoize get filterThreadDecoration() { return this.commentsFeatureRegistry.getConfig()?.filterThreadDecoration; } get onThreadsChanged(): Event<ICommentsThread> { return this.threadsChangeEmitter.event; } get onThreadsCreated(): Event<ICommentsThread> { return this.threadsCreatedEmitter.event; } /** * -------------------------------- IMPORTANT -------------------------------- * 需要注意区分 model.IModelDecorationOptions 与 monaco.editor.IModelDecorationOptions 两个类型 * 将 model.IModelDecorationOptions 类型的对象传给签名为 monaco.editor.IModelDecorationOptions 的方法时需要做 Type Assertion * 这是因为 monaco.d.ts 与 vs/editor/common/model 分别导出了枚举 TrackedRangeStickiness * 这种情况下两个枚举的类型是不兼容的,即使他们是同一段代码的编译产物 * -------------------------------- IMPORTANT -------------------------------- * @param thread */ private createThreadDecoration(thread: ICommentsThread): model.IModelDecorationOptions { // 对于新增的空的 thread,默认显示当前用户的头像,否则使用第一个用户的头像 const avatar = thread.comments.length === 0 ? this.currentAuthorAvatar : thread.comments[0].author.iconPath?.toString(); const icon = avatar ? this.iconService.fromIcon('', avatar, IconType.Background) : getIcon('message'); const decorationOptions: model.IModelDecorationOptions = { description: 'comments-thread-decoration', // 创建评论显示在 glyph margin 处 glyphMarginClassName: ['comments-decoration', 'comments-thread', icon].join(' '), }; return textModel.ModelDecorationOptions.createDynamic(decorationOptions); } private createHoverDecoration(): model.IModelDecorationOptions { const decorationOptions: model.IModelDecorationOptions = { description: 'comments-hover-decoration', linesDecorationsClassName: ['comments-decoration', 'comments-add', getIcon('message')].join(' '), }; return textModel.ModelDecorationOptions.createDynamic(decorationOptions); } public init() { // 插件注册 ResourceProvider 时重新注册 CommentDecorationProvider // 例如 Github Pull Request 插件的 scheme 为 pr this.addDispose( this.resourceService.onRegisterResourceProvider((provider) => { if (provider.scheme) { this.shouldShowCommentsSchemes.add(provider.scheme); this.registerDecorationProvider(); } }), ); this.addDispose( this.resourceService.onUnregisterResourceProvider((provider) => { if (provider.scheme) { this.shouldShowCommentsSchemes.delete(provider.scheme); this.registerDecorationProvider(); } }), ); this.registerDecorationProvider(); } public handleOnCreateEditor(editor: IEditor) { const disposer = new Disposable(); disposer.addDispose( editor.monacoEditor.onMouseDown((event) => { if ( event.target.type === monaco.editor.MouseTargetType.GUTTER_LINE_DECORATIONS && event.target.element && event.target.element.className.indexOf('comments-add') > -1 ) { const { target } = event; if (target && target.range) { const { range } = target; // 如果已经存在一个待输入的评论组件,则不创建新的 if ( this.commentsThreads.some( (thread) => thread.comments.length === 0 && thread.uri.isEqual(editor.currentUri!) && thread.range.startLineNumber === range.startLineNumber, ) ) { return; } const thread = this.createThread(editor.currentUri!, range); thread.show(editor); } } else if ( event.target.type === monaco.editor.MouseTargetType.GUTTER_GLYPH_MARGIN && event.target.element && event.target.element.className.indexOf('comments-thread') > -1 ) { const { target } = event; if (target && target.range) { const { range } = target; const threads = this.commentsThreads.filter( (thread) => thread.uri.isEqual(editor.currentUri!) && thread.range.startLineNumber === range.startLineNumber, ); if (threads.length) { // 判断当前 widget 是否是显示的 const isShowWidget = threads.some((thread) => thread.isShowWidget(editor)); if (isShowWidget) { threads.forEach((thread) => thread.hide(editor)); } else { threads.forEach((thread) => thread.show(editor)); } } } } }), ); let oldDecorations: string[] = []; disposer.addDispose( editor.monacoEditor.onMouseMove( debounce(async (event) => { const uri = editor.currentUri; const range = event.target.range; if (uri && range && (await this.shouldShowHoverDecoration(uri, range))) { oldDecorations = editor.monacoEditor.deltaDecorations(oldDecorations, [ { range: positionToRange(range.startLineNumber), options: this.createHoverDecoration() as unknown as monaco.editor.IModelDecorationOptions, }, ]); } else { oldDecorations = editor.monacoEditor.deltaDecorations(oldDecorations, []); } }, 10), ), ); disposer.addDispose( editor.monacoEditor.onMouseLeave( debounce(() => { oldDecorations = editor.monacoEditor.deltaDecorations(oldDecorations, []); }, 10), ), ); return disposer; } private async shouldShowHoverDecoration(uri: URI, range: IRange) { if (!this.shouldShowCommentsSchemes.has(uri.scheme)) { return false; } const contributionRanges = await this.getContributionRanges(uri); const isProviderRanges = contributionRanges.some( (contributionRange) => range.startLineNumber >= contributionRange.startLineNumber && range.startLineNumber <= contributionRange.endLineNumber, ); // 如果不支持对同一行进行多个评论,那么过滤掉当前有 thread 行号的 decoration const isShowHoverToSingleLine = this.isMultiCommentsForSingleLine || !this.commentsThreads.some( (thread) => thread.uri.isEqual(uri) && thread.range.startLineNumber === range.startLineNumber, ); return isProviderRanges && isShowHoverToSingleLine; } public createThread( uri: URI, range: IRange, options: ICommentsThreadOptions = { comments: [], readOnly: false, }, ) { // 获取当前 range 的 providerId,用于 commentController contextKey 的生成 const providerId = this.getProviderIdsByLine(range.startLineNumber)[0]; const thread = this.injector.get(CommentsThread, [uri, range, providerId, options]); thread.onDispose(() => { this.threads.delete(thread.id); this.threadsChangeEmitter.fire(thread); this.decorationChangeEmitter.fire(uri); }); this.threads.set(thread.id, thread); this.addDispose(thread); this.threadsChangeEmitter.fire(thread); this.threadsCreatedEmitter.fire(thread); this.decorationChangeEmitter.fire(uri); return thread; } public getThreadsByUri(uri: URI) { return ( this.commentsThreads .filter((thread) => thread.uri.isEqual(uri)) // 默认按照 rang 顺序 升序排列 .sort((a, b) => a.range.startLineNumber - b.range.startLineNumber) ); } @action public forceUpdateTreeNodes() { this.forceUpdateCount++; } @computed get commentsTreeNodes(): ICommentsTreeNode[] { let treeNodes: ICommentsTreeNode[] = []; const commentThreads = [...this.threads.values()].filter((thread) => thread.comments.length); const threadUris = groupBy(commentThreads, (thread: ICommentsThread) => thread.uri); Object.keys(threadUris).forEach((uri) => { const threads: ICommentsThread[] = threadUris[uri]; if (threads.length === 0) { return; } const firstThread = threads[0]; const firstThreadUri = firstThread.uri; const filePath = dirname(firstThreadUri.path.toString().replace(this.appConfig.workspaceDir, '')); const rootNode: ICommentsTreeNode = { id: uri, name: firstThreadUri.displayName, uri: firstThreadUri, description: filePath.replace(/^\//, ''), parent: undefined, thread: firstThread, ...(threads.length && { expanded: true, children: [], }), // 跳过 mobx computed, 强制在走一次 getCommentsPanelTreeNodeHandlers 逻辑 _forceUpdateCount: this.forceUpdateCount, }; treeNodes.push(rootNode); threads.forEach((thread) => { if (thread.comments.length === 0) { return; } const [firstComment, ...otherComments] = thread.comments; const firstCommentNode: ICommentsTreeNode = { id: firstComment.id, name: firstComment.author.name, iconStyle: { marginRight: 5, backgroundSize: '14px 14px', }, icon: this.iconService.fromIcon('', firstComment.author.iconPath?.toString(), IconType.Background), description: firstComment.body, uri: thread.uri, parent: rootNode, depth: 1, thread, ...(otherComments.length && { expanded: true, children: [], }), comment: firstComment, }; const firstCommentChildren = otherComments.map((comment) => { const otherCommentNode: ICommentsTreeNode = { id: comment.id, name: comment.author.name, description: comment.body, uri: thread.uri, iconStyle: { marginRight: 5, backgroundSize: '14px 14px', }, icon: this.iconService.fromIcon('', comment.author.iconPath?.toString(), IconType.Background), parent: firstCommentNode, depth: 2, thread, comment, }; return otherCommentNode; }); treeNodes.push(firstCommentNode); treeNodes.push(...firstCommentChildren); }); }); for (const handler of this.commentsFeatureRegistry.getCommentsPanelTreeNodeHandlers()) { treeNodes = handler(treeNodes); } return treeNodes; } public async getContributionRanges(uri: URI): Promise<IRange[]> { // 一个diff editor对应两个uri,两个uri的rangeOwner不应该互相覆盖 const cache = this.providerDecorationCache.get(uri.toString()); // 优先从缓存中拿 if (cache) { return await cache.promise; } const model = this.documentService.getModelReference(uri); const rangePromise: Promise<IRange[] | undefined>[] = []; for (const rangeProvider of this.rangeProviderMap) { const [id, provider] = rangeProvider; rangePromise.push( (async () => { const ranges = await provider.getCommentingRanges(model?.instance!); if (ranges && ranges.length) { // FIXME: ranges 会被 Diff uri 的两个 range 互相覆盖,导致可能根据行查不到 provider this.rangeOwner.set(id, ranges); } return ranges; })(), ); } const deferredRes = new Deferred<IRange[]>(); this.providerDecorationCache.set(uri.toString(), deferredRes); const res = await Promise.all(rangePromise); // 消除 document 引用 model?.dispose(); // 拍平,去掉 undefined const flattenRange: IRange[] = flattenDeep(res).filter(Boolean) as IRange[]; deferredRes.resolve(flattenRange); return flattenRange; } private registerDecorationProvider() { // dispose 掉上一个 decorationProvider this.decorationProviderDisposer.dispose(); this.decorationProviderDisposer = this.editorDecorationCollectionService.registerDecorationProvider({ schemes: [...this.shouldShowCommentsSchemes.values()], key: 'comments', onDidDecorationChange: this.decorationChangeEmitter.event, provideEditorDecoration: (uri: URI) => this.commentsThreads .map((thread) => { if (thread.uri.isEqual(uri)) { // 恢复之前的现场 thread.showWidgetsIfShowed(); } else { // 临时隐藏,当切回来时会恢复 thread.hideWidgetsByDispose(); } return thread; }) .filter((thread) => { const isCurrentThread = thread.uri.isEqual(uri); if (this.filterThreadDecoration) { return isCurrentThread && this.filterThreadDecoration(thread); } return isCurrentThread; }) .map((thread) => ({ range: thread.range, options: this.createThreadDecoration(thread) as unknown as monaco.editor.IModelDecorationOptions, })), }); this.addDispose(this.decorationProviderDisposer); } public registerCommentPanel() { // 面板只注册一次 if (this.layoutService.getTabbarHandler(CommentPanelId)) { return; } this.layoutService.collectTabbarComponent( [ { id: CommentPanelId, component: CommentsPanel, }, ], { badge: this.panelBadge, containerId: CommentPanelId, title: localize('comments').toUpperCase(), hidden: false, activateKeyBinding: 'ctrlcmd+shift+c', ...this.commentsFeatureRegistry.getCommentsPanelOptions(), }, 'bottom', ); } get panelBadge() { const length = this.commentsThreads.length; return length ? length + '' : ''; } registerCommentRangeProvider(id: string, provider: ICommentRangeProvider): IDisposable { this.rangeProviderMap.set(id, provider); // 注册一个新的 range provider 后清理掉之前的缓存 this.providerDecorationCache.clear(); return Disposable.create(() => { this.rangeProviderMap.delete(id); this.rangeOwner.delete(id); this.providerDecorationCache.clear(); }); } forceUpdateDecoration(): void { // 默认适应当前 uri 去强刷 decoration // 这个值为 core editor 或者 modified editor const uri = this.workbenchEditorService.currentEditor?.currentUri; uri && this.decorationChangeEmitter.fire(uri); // diffeditor 的 originalUri 也需要更新 Decoration const originalUri = this.workbenchEditorService.currentEditorGroup?.diffEditor.originalEditor.currentUri; originalUri && this.decorationChangeEmitter.fire(originalUri); } public getProviderIdsByLine(line: number): string[] { const result: string[] = []; if (this.rangeOwner.size === 1) { // 只有一个provider,直接返回 return [this.rangeOwner.keys().next().value]; } for (const rangeOwner of this.rangeOwner) { const [id, ranges] = rangeOwner; if (ranges && ranges.some((range) => range.startLineNumber <= line && line <= range.endLineNumber)) { result.push(id); } } return result; } }
CommentsService
training.py
import torch import numpy as np import dnnutil.network as network import time __all__ = ['calculate_accuracy', 'Trainer', 'ClassifierTrainer', 'AutoencoderTrainer'] def calculate_accuracy(prediction, label, axis=1): '''calculate_accuracy(prediction, label) Computes the mean accuracy over a batch of predictions and corresponding ground-truth labels. Args: prediction (Tensor): A batch of predictions. Assumed to have shape [batch-size, nclasses, [d0, d1, ...]]. label (LongTensor): A batch of labels. Assumed to have shape [batch-size, [d0, d1, ...]]). The number of dimensions should be one less than prediction. Returns: accuracy (Tensor): A single-element Tensor containing the percent of correct predictions in the batch as a value between 0 and 1. ''' return torch.eq(prediction.argmax(axis), label).float().mean().item() class Trainer(object): '''Trainer(net, optim, loss_fn, accuracy_metric=None) Base class for all network trainers. Network trainer classes provide methods to facilitate training and testing deep network models. The goal is to encapsulate the common functionality, to reduce the boilerplate code that needs to be repeated across projects. Args: net (torch.nn.Module): An instance of a network that inherits from torch.nn.Module. optim (torch.optim.Optimizer): An instance of an optimizer that inherits from torch.optim.Optimizer. loss_fn (callable): A callable that calculates and returns a loss value. The loss value should be a single-element Tensor. accuracy_metric (callable): A callabel that calculates and returns an accuracy value. Usually this will be a floating point number in [0, 1]. ''' def __init__(self, net, optim, loss_fn, accuracy_metric=None): self.net = net self.loss_fn = loss_fn self.optim = optim if accuracy_metric is not None: self.measure_accuracy = accuracy_metric else: self.measure_accuracy = calculate_accuracy self.train_loss = 0. self.train_acc = 0. self.test_loss = 0. self.test_acc = 0. def _set_train_stats(self, stats): '''TODO:docs ''' self.train_loss = stats[0] self.train_acc = stats[1] def _set_test_stats(self, stats): '''TODO:docs ''' self.test_loss = stats[0] self.test_acc = stats[1] def get_stats(self): '''TODO:docs ''' return (self.train_loss, self.train_acc, self.test_loss, self.test_acc) def train(self, dataloader, epoch): '''Train the Trainer's network. Args: dataloader (torch.utils.data.DataLoader): An instance of a DataLoader, which will provide access to the training data. epoch (int): The current epoch. Returns: loss (float): The mean loss over the epoch. accuracy (float): The mean accuracy over the epoch (in [0, 1]). ''' self.net.train() stats = self._run_epoch(dataloader, epoch) self._set_train_stats(stats) return stats def eval(self, dataloader, epoch): '''Evaluate the Trainer's network. Args: dataloader (torch.utils.data.DataLoader): An instance of a DataLoader, which will provide access to the testing data. epoch (int): The current epoch. Returns: loss (float): The mean loss over the epoch. accuracy (float): The mean accuracy over the epoch (in [0, 1]). ''' self.net.eval() stats = self._run_epoch(dataloader, epoch) self._set_test_stats(stats) return stats def _run_epoch(self, dataloader, epoch): '''Perform a single epoch of either training or evaluation. Args: dataloader (torch.utils.data.DataLoader): An instance of a DataLoader, which will provide access to the testing data. epoch (int): The current epoch. Returns: loss (float): The mean loss over the epoch. accuracy (float): The mean accuracy over the epoch (in [0, 1]). ''' N = len(dataloader.batch_sampler) msg = 'train' if self.net.training else 'test' func = self.train_batch if self.net.training else self.test_batch loss = [] acc = [] at = 0 for i, batch in enumerate(dataloader): t = time.time() if self.net.training: self.update_lr(epoch * N + i + 1) batch_loss, batch_acc = func(batch) t = time.time() - t if i == 0: at = t else: at = at * i / (i + 1) + t / (i + 1) loss.append(batch_loss) acc.append(batch_acc) print(f'\rEPOCH {epoch}: {msg} ' f'batch {i + 1:04d}/{N} ' f'lr[ {self.optim.param_groups[0]["lr"]:1.3e} ] ' f'[ {t:.3f} ({at:.3f}) secs ]' f'{" "*10}', end='', flush=True) loss = np.mean(loss) acc = np.mean(acc) return loss, acc def update_lr(self, i=None): '''Update the optimizer's learning rate. Used for batch-level learning rate scheduling. If using an epoch-level scheduler, define and use it in the epoch loop. If the iteration number is not provided (None) or the Trainer has no lr_schedule attribute, this function does nothing and returns. Args: i (int): iteration number (starts at 1 for the first batch). ''' if i is None or not hasattr(self, 'lr_schedule'): return self.lr_schedule.step(i) def train_batch(self, batch): '''Train the Trainer's network on a single training batch. ''' raise NotImplementedError() def
(self, batch): '''Test the Trainer's network on a single testing batch. ''' raise NotImplementedError() class ClassifierTrainer(Trainer): '''ClassifierTrainer(net, optim, loss_fn, accuracy_metric=None) Trainer for training a network to do image classification. Args: net (torch.nn.Module): An instance of a network that inherits from torch.nn.Module. optim (torch.optim.Optimizer): An instance of an optimizer that inherits from torch.optim.Optimizer. loss_fn (callable): A callable that calculates and returns a loss value. The loss value should be a single-element Tensor. accuracy_metric (callable): A callabel that calculates and returns an accuracy value. Usually this will be a floating point number in [0, 1]. ''' def train_batch(self, batch): '''Train the Trainer's network on a single training batch. Args: batch (iterable): A 2-tuple of (images, labels). Images is a 4-d Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more dimensions (BxLx*) which matches images in the first (batch) dimension. The exact dimensionality of labels will depend on the application and loss function chosen, but often consists of integer class-indexes. Returns: loss (float): The mean loss over the batch. accuracy (float): The mean accuracy over the batch (in [0, 1]). ''' self.optim.zero_grad() imgs, labels = network.tocuda(batch) predictions = self.net(imgs) loss = self.loss_fn(predictions, labels) loss.backward() self.optim.step() loss = loss.item() with torch.no_grad(): accuracy = self.measure_accuracy(predictions, labels) return loss, accuracy @torch.no_grad() def test_batch(self, batch): '''Evaluate the Trainer's network on a single testing batch. Args: batch (iterable): A 2-tuple of (images, labels). Images is a 4-d Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more dimensions (BxLx*) which matches images in the first (batch) dimension. The exact dimensionality of labels will depend on the application and loss function chosen, but often consists of integer class-indexes. Returns: loss (float): The mean loss over the batch. accuracy (float): The mean accuracy over the batch (in [0, 1]). ''' imgs, labels = network.tocuda(batch) predictions = self.net(imgs) loss = self.loss_fn(predictions, labels).item() accuracy = self.measure_accuracy(predictions, labels) return loss, accuracy class AutoencoderTrainer(Trainer): '''AutoencoderTrainer(net, optim, loss_fn) Trainer for training an autoencoder network. Args: net (torch.nn.Module): An instance of a network that inherits from torch.nn.Module. optim (torch.optim.Optimizer): An instance of an optimizer that inherits from torch.optim.Optimizer. loss_fn (callable): A callable that calculates and returns a loss value. The loss value should be a single-element Tensor. ''' def __init__(self, net, optim, loss_fn): super(AutoencoderTrainer, self).__init__( net, optim, loss_fn, None) delattr(self, 'measure_accuracy') def train_batch(self, batch): '''Train the Trainer's network on a single training batch. Args: batch (iterable): A 2-tuple of (images, labels). Images is a 4-d Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more dimensions (BxLx*) which matches images in the first (batch) dimension. The exact dimensionality of labels will depend on the application and loss function chosen, but often consists of integer class-indexes. Returns: loss (float): The mean loss over the batch. ''' self.optim.zero_grad() imgs = network.tocuda(batch) predictions = self.net(imgs) loss = self.loss_fn(predictions, imgs) loss.backward() self.optim.step() loss = loss.item() return loss @torch.no_grad() def test_batch(self, batch): '''Evaluate the Trainer's network on a single testing batch. Args: batch (iterable): A 2-tuple of (images, labels). Images is a 4-d Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more dimensions (BxLx*) which matches images in the first (batch) dimension. The exact dimensionality of labels will depend on the application and loss function chosen, but often consists of integer class-indexes. Returns: loss (float): The mean loss over the batch. ''' imgs = network.tocuda(batch) predictions = self.net(imgs) loss = self.loss_fn(predictions, imgs).item() return loss def _run_epoch(self, dataloader, epoch): '''Perform a single epoch of either training or evaluation. Args: dataloader (torch.utils.data.DataLoader): An instance of a DataLoader, which will provide access to the testing data. epoch (int): The current epoch. Returns: loss (float): The mean loss over the epoch. ''' N = int(np.ceil(len(dataloader.dataset) / dataloader.batch_size)) msg = 'train' if self.net.training else 'test' func = self.train_batch if self.net.training else self.test_batch loss = [] for i, batch in enumerate(dataloader): batch_loss = func(batch) loss.append(batch_loss) print(f'\rEPOCH {epoch}: {msg} batch {i:04d}/{N}{" "*10}', end='', flush=True) loss = np.mean(loss) return loss
test_batch
utils.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use crypto::ElementHasher; use math::FieldElement; use utils::{iter_mut, uninit_vector}; #[cfg(feature = "concurrent")] use rayon::prelude::*; /// Maps positions in the evaluation domain to indexes of commitment Merkle tree. pub fn
( positions: &[usize], source_domain_size: usize, folding_factor: usize, num_partitions: usize, ) -> Vec<usize> { // if there was only 1 partition, order of elements in the commitment tree // is the same as the order of elements in the evaluation domain if num_partitions == 1 { return positions.to_vec(); } let target_domain_size = source_domain_size / folding_factor; let partition_size = target_domain_size / num_partitions; let mut result = Vec::new(); for position in positions { let partition_idx = position % num_partitions; let local_idx = (position - partition_idx) / num_partitions; let position = partition_idx * partition_size + local_idx; result.push(position); } result } /// Hashes each of the arrays in the provided slice and returns a vector of resulting hashes. pub fn hash_values<H, E, const N: usize>(values: &[[E; N]]) -> Vec<H::Digest> where E: FieldElement, H: ElementHasher<BaseField = E::BaseField>, { let mut result: Vec<H::Digest> = unsafe { uninit_vector(values.len()) }; iter_mut!(result, 1024).zip(values).for_each(|(r, v)| { *r = H::hash_elements(v); }); result }
map_positions_to_indexes
deployed_service_type_info_py3.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class DeployedServiceTypeInfo(Model): """Information about service type deployed on a node, information such as the status of the service type registration on a node. :param service_type_name: Name of the service type as specified in the service manifest. :type service_type_name: str :param service_manifest_name: The name of the service manifest in which this service type is defined. :type service_manifest_name: str :param code_package_name: The name of the code package that registered the service type. :type code_package_name: str :param status: The status of the service type registration on the node. Possible values include: 'Invalid', 'Disabled', 'Enabled', 'Registered' :type status: str or ~azure.servicefabric.models.ServiceTypeRegistrationStatus :param service_package_activation_id: The ActivationId of a deployed service package. If ServicePackageActivationMode specified at the time of creating the service is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ _attribute_map = { 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, 'status': {'key': 'Status', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } def __init__(self, *, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, status=None, service_package_activation_id: str=None, **kwargs) -> None:
super(DeployedServiceTypeInfo, self).__init__(**kwargs) self.service_type_name = service_type_name self.service_manifest_name = service_manifest_name self.code_package_name = code_package_name self.status = status self.service_package_activation_id = service_package_activation_id
pr-filters.tsx
import React from 'dom-chef'; import select from 'select-dom'; import delegate from 'delegate-it'; import cache from 'webext-storage-cache'; import CheckIcon from 'octicon/check.svg'; import features from '../libs/features'; import * as pageDetect from 'github-page-detection'; import * as api from '../libs/api'; import {getRepoGQL, getRepoURL} from '../libs/utils'; const reviewsFilterSelector = '#reviews-select-menu'; function addDropdownItem(dropdown: HTMLElement, title: string, filterCategory: string, filterValue: string): void { const filterQuery = `${filterCategory}:${filterValue}`; const searchParameter = new URLSearchParams(location.search); const currentQuerySegments = searchParameter.get('q')?.split(/\s+/) ?? []; const isSelected = currentQuerySegments.some( segment => segment.toLowerCase() === filterQuery ); const query = currentQuerySegments.filter( segment => !segment.startsWith(`${filterCategory}:`) ).join(' '); const search = new URLSearchParams({ q: query + (isSelected ? '' : ` ${filterQuery}`) }); dropdown.append( <a href={`?${String(search)}`} className="SelectMenu-item" aria-checked={isSelected ? 'true' : 'false'} role="menuitemradio" > <CheckIcon className="SelectMenu-icon SelectMenu-icon--check"/> <span>{title}</span> </a> ); } const hasDraftFilter = new WeakSet(); function addDraftFilter({delegateTarget: reviewsFilter}: delegate.Event): void { if (hasDraftFilter.has(reviewsFilter)) { return; } hasDraftFilter.add(reviewsFilter); const dropdown = select('.SelectMenu-list', reviewsFilter)!; dropdown.append( <div className="SelectMenu-divider"> Filter by draft pull requests </div> ); addDropdownItem(dropdown, 'Ready for review', 'draft', 'false'); addDropdownItem(dropdown, 'Not ready for review (Draft PR)', 'draft', 'true'); } const hasChecks = cache.function(async (): Promise<boolean> => { const {repository} = await api.v4(` repository(${getRepoGQL()}) { head: object(expression: "HEAD") { ... on Commit { history(first: 10) { nodes { statusCheckRollup { state } } } } } } `); return repository.head.history.nodes.some((commit: AnyObject) => commit.statusCheckRollup); }, { maxAge: 3, cacheKey: () => __filebasename + ':' + getRepoURL() }); async function addChecksFilter(): Promise<void> { const reviewsFilter = select(reviewsFilterSelector); if (!reviewsFilter) { return; }
// Copy existing element and adapt its content const checksFilter = reviewsFilter.cloneNode(true); checksFilter.id = ''; select('summary', checksFilter)!.firstChild!.textContent = 'Checks\u00A0'; // Only replace text node, keep caret select('.SelectMenu-title', checksFilter)!.textContent = 'Filter by checks status'; const dropdown = select('.SelectMenu-list', checksFilter)!; dropdown.textContent = ''; // Drop previous filters for (const status of ['Success', 'Failure', 'Pending']) { addDropdownItem(dropdown, status, 'status', status.toLowerCase()); } reviewsFilter.after(checksFilter); } function init(): void { delegate(document, reviewsFilterSelector, 'toggle', addDraftFilter, true); addChecksFilter(); } features.add({ id: __filebasename, description: 'Adds Checks and Draft PR dropdown filters in PR lists.', screenshot: 'https://user-images.githubusercontent.com/202916/74453250-6d9de200-4e82-11ea-8fd4-7c0de57e001a.png' }, { include: [ pageDetect.isPRList ], init });
if (!await hasChecks()) { return; }
dropmenu.js
/** * Drop Menu * author: Pavel Khoroshkov aka pgood * http://pgood.ru */ function initDropMenu(id){ var menu=todo.get(id); if(!menu)return; var _dmCookie=new Cookie('sidemenu'+id), indexes=_dmCookie.indexes?_dmCookie.indexes.split(','):[], isOpen=function(id){for(var i=0;i<indexes.length;i++)if(indexes[i]==id)return true;}, createFolderButton=function(state,parent){ var e=parent.appendChild(todo.create('span')); e._dmChangeState=function(){ this._dmState=!this._dmState; this.className=this._dmState?'open':'close'; }; e.onclick=function(){ if(this.parentNode.parentNode._dmSubmenu()) this._dmChangeState(); return false; }; e._dmState=!state; e._dmChangeState(); return e; }; _dmCookie.addIndex=function(id){ var idx=this.indexes?this.indexes.split(','):[]; for(var i=0;i<idx.length;i++)if(idx[i]==id)return; idx.push(id); this.indexes=idx.join(','); this.store(1); }; _dmCookie.removeIndex=function(id){ var idx=this.indexes?this.indexes.split(','):[]; for(var i=0;i<idx.length;i++)if(idx[i]==id){ idx.splice(i,1); this.indexes=idx.join(','); this.store(1); } }; todo.loop(menu.getElementsByTagName('dt'),function(k){ this._dmIndex=k; this._dmGetDd=function(){ return function(e){while(e=e.nextSibling)if(e.nodeType==1)return e.nodeName.toLowerCase()=='dd'?e:null;}(this); }; var dd=this._dmGetDd(), s=this.className.search(/open/)!=-1||isOpen(this._dmIndex); if(dd){ this._dmButton=createFolderButton(s,this.getElementsByTagName('a')[0]||this.getElementsByTagName('span')[0]); if(s)dd.style.height='auto'; this._dmSubmenu=function(){ var dd=this._dmGetDd(), m=function(){ var r=todo.motion(dd.style.height,dd._task.start,dd._task.finish,4); if(r.ready){ dd.style.height=dd._task.finish?'auto':0; window.clearInterval(dd._timer); dd._timer=null; }else dd.style.height=r.res+'px'; }; if(!dd._timer){ if(this._dmButton._dmState){ _dmCookie.removeIndex(this._dmIndex); dd.style.height=dd.offsetHeight+'px'; dd._task={'start':dd.offsetHeight,'finish':0}; }else{ _dmCookie.addIndex(this._dmIndex); if(isNaN(parseInt(dd.style.height)))dd.style.height=0; dd._task={'start':0,'finish':dd.getElementsByTagName('dl')[0].offsetHeight}; }; dd._timer=window.setInterval(m,50);
} return false; } } }); };
return true;
application.go
package store import () func (zk *ZkStore) CreateApp(app *Application) error { if zk.GetApp(app.ID) != nil { return ErrAppAlreadyExists } op := &AtomicOp{ Op: OP_ADD, Entity: ENTITY_APP, Param1: app.ID, Payload: app, } return zk.Apply(op, true) } func (zk *ZkStore) UpdateApp(app *Application) error { if zk.GetApp(app.ID) == nil { return ErrAppNotFound } op := &AtomicOp{ Op: OP_UPDATE, Entity: ENTITY_APP, Param1: app.ID, Payload: app, } return zk.Apply(op, true) } func (zk *ZkStore) GetApp(appId string) *Application { zk.mu.RLock() defer zk.mu.RUnlock() appStore, found := zk.Storage.Apps[appId] if !found { return nil } return appStore.App } func (zk *ZkStore) ListApps() []*Application { zk.mu.RLock() defer zk.mu.RUnlock() apps := make([]*Application, 0) for _, app := range zk.Storage.Apps { apps = append(apps, app.App) } return apps } func (zk *ZkStore) DeleteApp(appId string) error { if zk.GetApp(appId) == nil { return ErrAppNotFound } op := &AtomicOp{ Op: OP_REMOVE, Entity: ENTITY_APP, Param1: appId, } return zk.Apply(op, true)
}
main.rs
use std::net::{TcpListener}; use std::thread; use std::io::{Read, Write}; use std::io; fn start() -> io::Result<()> { let lis = try!(TcpListener::bind("0.0.0.0:12345")); println!("started on {:?}", lis); for stream in lis.incoming() { let mut stream = try!(stream); println!("{:?} connected.", try!(stream.peer_addr())); let _: thread::JoinHandle<io::Result<()>> = thread::spawn(move || { loop { let mut b = [0; 1024]; let n = try!(stream.read(&mut b)); if n == 0 { println!("{:?} closed.", try!(stream.peer_addr())); return Ok(()); } else { try!(stream.write(&b[0..n])); } } }); } Ok(()) } fn main()
{ match start() { Ok(_) => return, Err(e) =>println!("{:?}", e) } }
AddTwoInts.js
// Auto-generated. Do not edit! // (in-package novice_tutorials.srv) "use strict"; const _serializer = _ros_msg_utils.Serialize; const _arraySerializer = _serializer.Array; const _deserializer = _ros_msg_utils.Deserialize; const _arrayDeserializer = _deserializer.Array; const _finder = _ros_msg_utils.Find; const _getByteLength = _ros_msg_utils.getByteLength; //----------------------------------------------------------- //----------------------------------------------------------- class AddTwoIntsRequest { constructor(initObj={}) { if (initObj === null) { // initObj === null is a special case for deserialization where we don't initialize fields this.a = null; this.b = null; } else { if (initObj.hasOwnProperty('a')) { this.a = initObj.a } else { this.a = 0; } if (initObj.hasOwnProperty('b')) { this.b = initObj.b } else { this.b = 0; } } } static serialize(obj, buffer, bufferOffset) { // Serializes a message object of type AddTwoIntsRequest // Serialize message field [a] bufferOffset = _serializer.int64(obj.a, buffer, bufferOffset); // Serialize message field [b] bufferOffset = _serializer.int64(obj.b, buffer, bufferOffset); return bufferOffset; } static deserialize(buffer, bufferOffset=[0]) { //deserializes a message object of type AddTwoIntsRequest let len; let data = new AddTwoIntsRequest(null); // Deserialize message field [a] data.a = _deserializer.int64(buffer, bufferOffset); // Deserialize message field [b] data.b = _deserializer.int64(buffer, bufferOffset); return data; } static getMessageSize(object) { return 16; } static datatype() { // Returns string type for a service object return 'novice_tutorials/AddTwoIntsRequest'; } static md5sum() { //Returns md5sum for a message object return '36d09b846be0b371c5f190354dd3153e'; } static messageDefinition() { // Returns full string definition for message return ` int64 a int64 b `; } static Resolve(msg) { // deep-construct a valid message object instance of whatever was passed in if (typeof msg !== 'object' || msg === null) { msg = {}; } const resolved = new AddTwoIntsRequest(null); if (msg.a !== undefined) { resolved.a = msg.a; } else { resolved.a = 0 } if (msg.b !== undefined) { resolved.b = msg.b; } else { resolved.b = 0 } return resolved; } }; class
{ constructor(initObj={}) { if (initObj === null) { // initObj === null is a special case for deserialization where we don't initialize fields this.sum = null; } else { if (initObj.hasOwnProperty('sum')) { this.sum = initObj.sum } else { this.sum = 0; } } } static serialize(obj, buffer, bufferOffset) { // Serializes a message object of type AddTwoIntsResponse // Serialize message field [sum] bufferOffset = _serializer.int64(obj.sum, buffer, bufferOffset); return bufferOffset; } static deserialize(buffer, bufferOffset=[0]) { //deserializes a message object of type AddTwoIntsResponse let len; let data = new AddTwoIntsResponse(null); // Deserialize message field [sum] data.sum = _deserializer.int64(buffer, bufferOffset); return data; } static getMessageSize(object) { return 8; } static datatype() { // Returns string type for a service object return 'novice_tutorials/AddTwoIntsResponse'; } static md5sum() { //Returns md5sum for a message object return 'b88405221c77b1878a3cbbfff53428d7'; } static messageDefinition() { // Returns full string definition for message return ` int64 sum `; } static Resolve(msg) { // deep-construct a valid message object instance of whatever was passed in if (typeof msg !== 'object' || msg === null) { msg = {}; } const resolved = new AddTwoIntsResponse(null); if (msg.sum !== undefined) { resolved.sum = msg.sum; } else { resolved.sum = 0 } return resolved; } }; module.exports = { Request: AddTwoIntsRequest, Response: AddTwoIntsResponse, md5sum() { return '6a2e34150c00229791cc89ff309fff21'; }, datatype() { return 'novice_tutorials/AddTwoInts'; } };
AddTwoIntsResponse
termion.rs
extern crate termion; extern crate chan_signal; use self::termion::color as tcolor; use self::termion::event::Event as TEvent; use self::termion::event::Key as TKey; use self::termion::input::TermRead; use self::termion::raw::IntoRawMode; use self::termion::screen::AlternateScreen; use self::termion::style as tstyle; use backend; use chan; use event::{Event, Key}; use std::cell::Cell; use std::collections::BTreeMap; use std::fmt; use std::io::Write; use std::thread; use theme; pub struct Concrete { terminal: AlternateScreen<termion::raw::RawTerminal<::std::io::Stdout>>, current_style: Cell<theme::ColorStyle>, colors: BTreeMap<i16, (Box<tcolor::Color>, Box<tcolor::Color>)>, input: chan::Receiver<Event>, resize: chan::Receiver<chan_signal::Signal>, timeout: Option<u32>, } trait Effectable { fn on(&self); fn off(&self); } struct ColorRef<'a>(&'a tcolor::Color); impl<'a> tcolor::Color for ColorRef<'a> { #[inline] fn write_fg(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.write_fg(f) } #[inline] fn write_bg(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.write_bg(f) } } impl Effectable for theme::Effect { fn on(&self) { match *self { theme::Effect::Simple => (), theme::Effect::Reverse => print!("{}", tstyle::Invert), } } fn off(&self) { match *self { theme::Effect::Simple => (), theme::Effect::Reverse => print!("{}", tstyle::NoInvert), } } } fn apply_colors(fg: &tcolor::Color, bg: &tcolor::Color) { print!("{}{}", tcolor::Fg(ColorRef(fg)), tcolor::Bg(ColorRef(bg))); } impl Concrete { fn apply_colorstyle(&self, color_style: theme::ColorStyle) { let (ref fg, ref bg) = self.colors[&color_style.id()]; apply_colors(&**fg, &**bg); } fn apply_any_color(&self, fg_color: theme::Color, bg_color: theme::Color) { let ref fg = colour_to_termion_colour(&fg_color); let ref bg = colour_to_termion_colour(&bg_color); apply_colors(&**fg, &**bg); } } impl backend::Backend for Concrete { fn init() -> Self { print!("{}", termion::cursor::Hide); let resize = chan_signal::notify(&[chan_signal::Signal::WINCH]); let terminal = AlternateScreen::from(::std::io::stdout().into_raw_mode().unwrap()); let (sender, receiver) = chan::async(); thread::spawn(move || for key in ::std::io::stdin().events() { if let Ok(key) = key { sender.send(map_key(key)) } }); let backend = Concrete { terminal: terminal, current_style: Cell::new(theme::ColorStyle::Background), colors: BTreeMap::new(), input: receiver, resize: resize, timeout: None, }; backend } fn finish(&mut self) { print!("{}{}", termion::cursor::Show, termion::cursor::Goto(1, 1)); print!("{}[49m{}[39m{}", 27 as char, 27 as char, termion::clear::All); } fn init_color_style(&mut self, style: theme::ColorStyle, foreground: &theme::Color, background: &theme::Color) { // Step 1: convert foreground and background into proper termion Color self.colors.insert(style.id(), (colour_to_termion_colour(foreground), colour_to_termion_colour(background))); } fn with_color<F: FnOnce()>(&self, color: theme::ColorStyle, f: F) { let current_style = self.current_style.get(); self.apply_colorstyle(color); self.current_style.set(color); f(); self.current_style.set(current_style);
self.apply_colorstyle(current_style); } fn with_any_color<F: FnOnce()>(&self, fg_color: theme::Color, bg_color: theme::Color, f: F) { let current_style = self.current_style.get(); self.apply_any_color(fg_color, bg_color); f(); self.apply_colorstyle(current_style); } fn with_effect<F: FnOnce()>(&self, effect: theme::Effect, f: F) { effect.on(); f(); effect.off(); } fn has_colors(&self) -> bool { // TODO: color support detection? true } fn screen_size(&self) -> (usize, usize) { let (x, y) = termion::terminal_size().unwrap_or((1, 1)); (x as usize, y as usize) } fn clear(&self) { self.apply_colorstyle(theme::ColorStyle::Background); print!("{}", termion::clear::All); } fn refresh(&mut self) { self.terminal.flush().unwrap(); } fn print_at(&self, (x, y): (usize, usize), text: &str) { print!("{}{}", termion::cursor::Goto(1 + x as u16, 1 + y as u16), text); } fn set_refresh_rate(&mut self, fps: u32) { self.timeout = Some(1000 / fps as u32); } fn poll_event(&self) -> Event { let input = &self.input; let resize = &self.resize; if let Some(timeout) = self.timeout { let timeout = chan::after_ms(timeout); chan_select!{ timeout.recv() => return Event::Refresh, resize.recv() => return Event::WindowResize, input.recv() -> input => return input.unwrap(), } } else { chan_select!{ resize.recv() => return Event::WindowResize, input.recv() -> input => return input.unwrap(), } } } } fn map_key(event: TEvent) -> Event { match event { TEvent::Unsupported(bytes) => Event::Unknown(bytes), TEvent::Key(TKey::Esc) => Event::Key(Key::Esc), TEvent::Key(TKey::Backspace) => Event::Key(Key::Backspace), TEvent::Key(TKey::Left) => Event::Key(Key::Left), TEvent::Key(TKey::Right) => Event::Key(Key::Right), TEvent::Key(TKey::Up) => Event::Key(Key::Up), TEvent::Key(TKey::Down) => Event::Key(Key::Down), TEvent::Key(TKey::Home) => Event::Key(Key::Home), TEvent::Key(TKey::End) => Event::Key(Key::End), TEvent::Key(TKey::PageUp) => Event::Key(Key::PageUp), TEvent::Key(TKey::PageDown) => Event::Key(Key::PageDown), TEvent::Key(TKey::Delete) => Event::Key(Key::Del), TEvent::Key(TKey::Insert) => Event::Key(Key::Ins), TEvent::Key(TKey::F(i)) if i < 12 => Event::Key(Key::from_f(i)), TEvent::Key(TKey::F(j)) => Event::Unknown(vec![j]), TEvent::Key(TKey::Char('\n')) => Event::Key(Key::Enter), TEvent::Key(TKey::Char('\t')) => Event::Key(Key::Tab), TEvent::Key(TKey::Char(c)) => Event::Char(c), TEvent::Key(TKey::Ctrl('c')) => Event::Exit, TEvent::Key(TKey::Ctrl(c)) => Event::CtrlChar(c), TEvent::Key(TKey::Alt(c)) => Event::AltChar(c), _ => Event::Unknown(vec![]), } } fn colour_to_termion_colour(clr: &theme::Color) -> Box<tcolor::Color> { match *clr { theme::Color::Dark(theme::BaseColor::Black) => Box::new(tcolor::Black), theme::Color::Dark(theme::BaseColor::Red) => Box::new(tcolor::Red), theme::Color::Dark(theme::BaseColor::Green) => Box::new(tcolor::Green), theme::Color::Dark(theme::BaseColor::Yellow) => { Box::new(tcolor::Yellow) } theme::Color::Dark(theme::BaseColor::Blue) => Box::new(tcolor::Blue), theme::Color::Dark(theme::BaseColor::Magenta) => { Box::new(tcolor::Magenta) } theme::Color::Dark(theme::BaseColor::Cyan) => Box::new(tcolor::Cyan), theme::Color::Dark(theme::BaseColor::White) => Box::new(tcolor::White), theme::Color::Light(theme::BaseColor::Black) => { Box::new(tcolor::LightBlack) } theme::Color::Light(theme::BaseColor::Red) => { Box::new(tcolor::LightRed) } theme::Color::Light(theme::BaseColor::Green) => { Box::new(tcolor::LightGreen) } theme::Color::Light(theme::BaseColor::Yellow) => { Box::new(tcolor::LightYellow) } theme::Color::Light(theme::BaseColor::Blue) => { Box::new(tcolor::LightBlue) } theme::Color::Light(theme::BaseColor::Magenta) => { Box::new(tcolor::LightMagenta) } theme::Color::Light(theme::BaseColor::Cyan) => { Box::new(tcolor::LightCyan) } theme::Color::Light(theme::BaseColor::White) => { Box::new(tcolor::LightWhite) } theme::Color::Rgb(r, g, b) => Box::new(tcolor::Rgb(r, g, b)), theme::Color::RgbLowRes(r, g, b) => { Box::new(tcolor::AnsiValue::rgb(r, g, b)) } } }
metadata.go
// Package metadata is a way of defining message headers package metadata import ( "context" ) type metaKey struct{} // Metadata is our way of representing request headers internally. // They're used at the RPC level and translate back and forth // from Transport headers. type Metadata map[string]string // Copy makes a copy of the metadata func Copy(md Metadata) Metadata { cmd := make(Metadata) for k, v := range md { cmd[k] = v } return cmd } // FromContext returns metadata from the given context func FromContext(ctx context.Context) (Metadata, bool) {
// NewContext creates a new context with the given metadata func NewContext(ctx context.Context, md Metadata) context.Context { return context.WithValue(ctx, metaKey{}, md) } // MergeContext merges metadata to existing metadata, overwriting if specified func MergeContext(ctx context.Context, patchMd Metadata, overwrite bool) context.Context { md, _ := ctx.Value(metaKey{}).(Metadata) cmd := make(Metadata) for k, v := range md { cmd[k] = v } for k, v := range patchMd { if _, ok := cmd[k]; ok && !overwrite { // skip } else { cmd[k] = v } } return context.WithValue(ctx, metaKey{}, cmd) }
md, ok := ctx.Value(metaKey{}).(Metadata) return md, ok }
dynamic_response.rs
extern crate env_logger; #[macro_use] extern crate log; extern crate simple_test_bbarekas; use simple_test_bbarekas::{Method, Server, StatusCode}; fn main()
{ let host = "127.0.0.1"; let port = "7878"; let server = Server::new(|request, mut response| { info!("Request received. {} {}", request.method(), request.uri()); match request.method() { &Method::GET => { let body = format!("The path you requested was '{}'", request.uri().path()); Ok(response.body(body.into_bytes())?) } &Method::POST => { let data = String::from_utf8_lossy(request.body()).into_owned(); let body = format!("The data you posted was '{}'", data); Ok(response.body(body.into_bytes())?) } _ => { response.status(StatusCode::NOT_FOUND); Ok(response.body(b"<h1>404</h1><p>Not found!<p>".to_vec())?) } } }); server.listen(host, port); }
qms-control-details.model.ts
import { Moment } from 'moment'; export interface IQmsControlDetails { id?: number; inspectionCd?: string; inspectionItem?: string; technicalRequirement?: string; inspectionInstrument?: string; inspectionResultDiff?: string; flagStatus?: string; compPkid?: string; remark?: string; reserveFirst?: string; reserveSecond?: string; reserveThird?: string; makeUser?: string; makeTime?: Moment; modifyUser?: string; modifyTime?: Moment; } export class
implements IQmsControlDetails { constructor( public id?: number, public inspectionCd?: string, public inspectionItem?: string, public technicalRequirement?: string, public inspectionInstrument?: string, public inspectionResultDiff?: string, public flagStatus?: string, public compPkid?: string, public remark?: string, public reserveFirst?: string, public reserveSecond?: string, public reserveThird?: string, public makeUser?: string, public makeTime?: Moment, public modifyUser?: string, public modifyTime?: Moment ) {} }
QmsControlDetails
service_test.go
package configmanager import ( "github.com/kaanaktas/openbanking-accountinformation/internal/cache" "github.com/kaanaktas/openbanking-accountinformation/internal/store" "testing" ) func
(t *testing.T) { repo := NewRepository(store.LoadDBConnection()) type fields struct { repo Repository ch cache.Cache } type args struct { aspspId string configName string } tests := []struct { name string fields fields args args want string wantErr bool }{ { "test", fields{repo: repo, ch: cache.LoadInMemory()}, args{ aspspId: "danske", configName: "FAPI_FINANCIAL_ID", }, "0015800000jf7AeAAI", false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := service{ repository: tt.fields.repo, ch: tt.fields.ch, } got, err := s.FindByConfigName(tt.args.aspspId, tt.args.configName) if (err != nil) != tt.wantErr { t.Errorf("FindByConfigName() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { t.Errorf("FindByConfigName() got = %v, want %v", got, tt.want) } }) } }
Test_service_FindByConfigName
urls.py
try: from django.urls import path from django.contrib import admin
# django < 2.0 from django.conf.urls import include, url from django.contrib import admin urlpatterns = [url(r'^admin/', include(admin.site.urls))]
urlpatterns = [path('admin', admin.site.urls)] except ImportError:
primitive_types5.rs
// primitive_types5.rs // Destructure the `cat` tuple so that the println will work. // Scroll down for hints! fn main() { let cat = ("Furry McFurson", 3.5); println!("{} is {} years old.", cat.0, cat.1);
// Take a look at the Data Types -> The Tuple Type section of the book: // https://doc.rust-lang.org/book/ch03-02-data-types.html#the-tuple-type // Particularly the part about destructuring (second to last example in the section). // You'll need to make a pattern to bind `name` and `age` to the appropriate parts // of the tuple. You can do it!!
}
replace_nameserver_responses.go
// Code generated by go-swagger; DO NOT EDIT. package nameserver // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" models2 "github.com/anantadwi13/homan/internal/homan/external/api/haproxy/models" "io" "github.com/go-openapi/runtime" "github.com/go-openapi/strfmt" ) // ReplaceNameserverReader is a Reader for the ReplaceNameserver structure. type ReplaceNameserverReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the received o. func (o *ReplaceNameserverReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewReplaceNameserverOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil case 202: result := NewReplaceNameserverAccepted() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil case 400: result := NewReplaceNameserverBadRequest() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result case 404: result := NewReplaceNameserverNotFound() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result default: result := NewReplaceNameserverDefault(response.Code()) if err := result.readResponse(response, consumer, o.formats); err != nil
if response.Code()/100 == 2 { return result, nil } return nil, result } } // NewReplaceNameserverOK creates a ReplaceNameserverOK with default headers values func NewReplaceNameserverOK() *ReplaceNameserverOK { return &ReplaceNameserverOK{} } /* ReplaceNameserverOK describes a response with status code 200, with default header values. Nameserver replaced */ type ReplaceNameserverOK struct { Payload *models2.Nameserver } func (o *ReplaceNameserverOK) Error() string { return fmt.Sprintf("[PUT /services/haproxy/configuration/nameservers/{name}][%d] replaceNameserverOK %+v", 200, o.Payload) } func (o *ReplaceNameserverOK) GetPayload() *models2.Nameserver { return o.Payload } func (o *ReplaceNameserverOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(models2.Nameserver) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil } // NewReplaceNameserverAccepted creates a ReplaceNameserverAccepted with default headers values func NewReplaceNameserverAccepted() *ReplaceNameserverAccepted { return &ReplaceNameserverAccepted{} } /* ReplaceNameserverAccepted describes a response with status code 202, with default header values. Configuration change accepted and reload requested */ type ReplaceNameserverAccepted struct { /* ID of the requested reload */ ReloadID string Payload *models2.Nameserver } func (o *ReplaceNameserverAccepted) Error() string { return fmt.Sprintf("[PUT /services/haproxy/configuration/nameservers/{name}][%d] replaceNameserverAccepted %+v", 202, o.Payload) } func (o *ReplaceNameserverAccepted) GetPayload() *models2.Nameserver { return o.Payload } func (o *ReplaceNameserverAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // hydrates response header Reload-ID hdrReloadID := response.GetHeader("Reload-ID") if hdrReloadID != "" { o.ReloadID = hdrReloadID } o.Payload = new(models2.Nameserver) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil } // NewReplaceNameserverBadRequest creates a ReplaceNameserverBadRequest with default headers values func NewReplaceNameserverBadRequest() *ReplaceNameserverBadRequest { return &ReplaceNameserverBadRequest{} } /* ReplaceNameserverBadRequest describes a response with status code 400, with default header values. Bad request */ type ReplaceNameserverBadRequest struct { /* Configuration file version */ ConfigurationVersion string Payload *models2.Error } func (o *ReplaceNameserverBadRequest) Error() string { return fmt.Sprintf("[PUT /services/haproxy/configuration/nameservers/{name}][%d] replaceNameserverBadRequest %+v", 400, o.Payload) } func (o *ReplaceNameserverBadRequest) GetPayload() *models2.Error { return o.Payload } func (o *ReplaceNameserverBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // hydrates response header Configuration-Version hdrConfigurationVersion := response.GetHeader("Configuration-Version") if hdrConfigurationVersion != "" { o.ConfigurationVersion = hdrConfigurationVersion } o.Payload = new(models2.Error) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil } // NewReplaceNameserverNotFound creates a ReplaceNameserverNotFound with default headers values func NewReplaceNameserverNotFound() *ReplaceNameserverNotFound { return &ReplaceNameserverNotFound{} } /* ReplaceNameserverNotFound describes a response with status code 404, with default header values. The specified resource was not found */ type ReplaceNameserverNotFound struct { /* Configuration file version */ ConfigurationVersion string Payload *models2.Error } func (o *ReplaceNameserverNotFound) Error() string { return fmt.Sprintf("[PUT /services/haproxy/configuration/nameservers/{name}][%d] replaceNameserverNotFound %+v", 404, o.Payload) } func (o *ReplaceNameserverNotFound) GetPayload() *models2.Error { return o.Payload } func (o *ReplaceNameserverNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // hydrates response header Configuration-Version hdrConfigurationVersion := response.GetHeader("Configuration-Version") if hdrConfigurationVersion != "" { o.ConfigurationVersion = hdrConfigurationVersion } o.Payload = new(models2.Error) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil } // NewReplaceNameserverDefault creates a ReplaceNameserverDefault with default headers values func NewReplaceNameserverDefault(code int) *ReplaceNameserverDefault { return &ReplaceNameserverDefault{ _statusCode: code, } } /* ReplaceNameserverDefault describes a response with status code -1, with default header values. General Error */ type ReplaceNameserverDefault struct { _statusCode int /* Configuration file version */ ConfigurationVersion string Payload *models2.Error } // Code gets the status code for the replace nameserver default response func (o *ReplaceNameserverDefault) Code() int { return o._statusCode } func (o *ReplaceNameserverDefault) Error() string { return fmt.Sprintf("[PUT /services/haproxy/configuration/nameservers/{name}][%d] replaceNameserver default %+v", o._statusCode, o.Payload) } func (o *ReplaceNameserverDefault) GetPayload() *models2.Error { return o.Payload } func (o *ReplaceNameserverDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // hydrates response header Configuration-Version hdrConfigurationVersion := response.GetHeader("Configuration-Version") if hdrConfigurationVersion != "" { o.ConfigurationVersion = hdrConfigurationVersion } o.Payload = new(models2.Error) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil }
{ return nil, err }
get.rs
use crate::page::Page; use anyhow::{bail, Result}; use pinwheel::prelude::*; use std::{str::FromStr, sync::Arc}; use tangram_app_context::Context; use tangram_app_core::{ alerts::AlertModelType, error::{bad_request, not_found, redirect_to_login, service_unavailable}, model::get_model_bytes, path_components, user::{authorize_user, authorize_user_for_model}, }; use tangram_app_layouts::model_layout::{model_layout_info, ModelNavItem}; use tangram_id::Id; use tracing::error; pub async fn get(request: &mut http::Request<hyper::Body>) -> Result<http::Response<hyper::Body>> { let context = request.extensions().get::<Arc<Context>>().unwrap().clone(); let app = &context.app; let app_state = &app.state; let (model_id, alert_id) = if let ["repos", _, "models", model_id, "alerts", alert_id] = path_components(request).as_slice() { (model_id.to_owned(), alert_id.to_owned()) } else { bail!("unexpected path"); }; let model_id: Id = match model_id.parse() { Ok(model_id) => model_id, Err(_) => return Ok(bad_request()), }; let mut db = match app.begin_transaction().await { Ok(db) => db, Err(_) => return Ok(service_unavailable()), }; let user = match authorize_user(request, &mut db, app_state.options.auth_enabled()).await? { Ok(user) => user, Err(_) => return Ok(redirect_to_login()), }; if !authorize_user_for_model(&mut db, &user, model_id).await? { return Ok(not_found()); }
let model_type = AlertModelType::from(model.inner()); let model_layout_info = model_layout_info(&mut db, app_state, model_id, ModelNavItem::Monitors).await?; let alert = app.get_alert(&mut db, Id::from_str(alert_id)?).await?; if alert.is_none() { error!("Alert {} not found in database", alert_id); return Ok(bad_request()); } let page = Page { alert: alert.unwrap(), alert_id: alert_id.to_string(), model_layout_info, model_type, error: None, }; app.commit_transaction(db).await?; let html = html(page); let response = http::Response::builder() .status(http::StatusCode::OK) .body(hyper::Body::from(html)) .unwrap(); Ok(response) }
let bytes = get_model_bytes(&app_state.storage, model_id).await?; let model = tangram_model::from_bytes(&bytes)?;
opponents.py
from GLOBAL_VARIABLES import PLAYER_NAME import copy def opponents(parent_folder_ts: str, ts_filename: str) -> list: """ Extracts the names of the players in a tournament Parameters: parent_folder_ts (str): the parent folder where the tournament summary file is located ts_filename (str): the name of the tournament summary file Returns: opp_no_duplicates (list): a list of players names """ if ts_filename is None:
# open ts with open(parent_folder_ts + ts_filename, 'r') as f: data = f.read() ts = eval(copy.deepcopy(data)) opponents = [] for opp in ts['tournament_finishes_and_winnings']: opponents.append(opp['player_name']) opp_no_duplicates = list(dict.fromkeys(opponents)) opp_no_duplicates.remove(PLAYER_NAME) return opp_no_duplicates
return []