file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
gru_cell.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from lib.rnn_cells.base_cell import BaseCell
from lib import linalg
#***************************************************************
class GRUCell(BaseCell):
""""""
#=============================================================
def | (self, inputs, state, scope=None):
""""""
with tf.variable_scope(scope or type(self).__name__):
cell_tm1, hidden_tm1 = tf.split(axis=1, num_or_size_splits=2, value=state)
with tf.variable_scope('Gates'):
linear = linalg.linear([inputs, hidden_tm1],
self.output_size,
add_bias=True,
n_splits=2,
moving_params=self.moving_params)
update_act, reset_act = linear
update_gate = linalg.sigmoid(update_act-self.forget_bias)
reset_gate = linalg.sigmoid(reset_act)
reset_state = reset_gate * hidden_tm1
with tf.variable_scope('Candidate'):
hidden_act = linalg.linear([inputs, reset_state],
self.output_size,
add_bias=True,
moving_params=self.moving_params)
hidden_tilde = self.recur_func(hidden_act)
cell_t = update_gate * cell_tm1 + (1-update_gate) * hidden_tilde
return cell_t, tf.concat(axis=1, values=[cell_t, cell_t])
#=============================================================
@property
def state_size(self):
return self.output_size * 2
| __call__ | identifier_name |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
Modifier(box expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if !tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if !tts.is_empty() |
// Most of this is modelled after the expansion of the `quote_expr!`
// macro ...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
// ... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {}
| {
cx.span_fatal(sp, "forged_ident takes no arguments");
} | conditional_block |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
Modifier(box expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> { | }
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if !tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro ...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
// ... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {} | if !tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i)) | random_line_split |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
Modifier(box expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if !tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> |
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if !tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro ...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
// ... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {}
| {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
} | identifier_body |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn | (reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
Modifier(box expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if !tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if !tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro ...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
// ... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {}
| plugin_registrar | identifier_name |
index.d.ts | // Type definitions for tiny-async-pool 1.0
// Project: https://github.com/rxaviers/async-pool#readme
// Definitions by: Karl-Philipp Wulfert <https://github.com/krlwlfrt>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
/**
* This declaration specifies that the function is the exported object from the file
*/
export = asyncPool;
/**
* Runs multiple promise-returning & async functions in a limited concurrency pool.
* It rejects immediately as soon as one of the promises rejects.
* It resolves when all the promises completes.
* It calls the iterator function as soon as possible (under concurrency limit).
*
* @param poolLimit The pool limit number (>= 1).
* @param array Input array.
* @param iteratorFn Iterator function that takes two arguments (array item and the array itself).
* The iterator function should either return a promise or be an async function.
*
* @template IN Type of the input array | */
declare function asyncPool<IN, OUT>(
poolLimit: number,
array: ReadonlyArray<IN>,
iteratorFn: (generator: IN) => Promise<OUT>
): Promise<OUT[]>; | * @template OUT Type of the resolves of the promises | random_line_split |
shadows.rs | /*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ip.rsh"
#pragma rs_fp_relaxed
static double shadowFilterMap[] = {
-0.00591, 0.0001,
1.16488, 0.01668,
-0.18027, -0.06791,
-0.12625, 0.09001,
0.15065, -0.03897
};
static double poly[] = {
0., 0.,
0., 0.,
0.
};
static const int ABITS = 4;
static const int HSCALE = 256;
static const int k1=255 << ABITS;
static const int k2=HSCALE << ABITS;
static double fastevalPoly(double *poly,int n, double x){
double f =x;
double sum = poly[0]+poly[1]*f;
int i;
for (i = 2; i < n; i++) {
f*=x;
sum += poly[i]*f;
}
return sum;
}
static ushort3 rgb2hsv( uchar4 rgb)
{
int iMin,iMax,chroma;
int ri = rgb.r;
int gi = rgb.g;
int bi = rgb.b;
short rv,rs,rh;
if (ri > gi) {
iMax = max (ri, bi);
iMin = min (gi, bi);
} else {
iMax = max (gi, bi);
iMin = min (ri, bi);
}
chroma = iMax - iMin;
// set value
rv = (short)( iMax << ABITS);
// set saturation
if (rv == 0)
rs = 0;
else
rs = (short)((k1*chroma)/iMax);
// set hue
if (rs == 0)
rh = 0;
else {
if ( ri == iMax ) {
rh = (short)( (k2*(6*chroma+gi - bi))/(6*chroma));
if (rh >= k2) rh -= k2;
} else if (gi == iMax)
rh = (short)( (k2*(2*chroma+bi - ri ))/(6*chroma));
else // (bi == iMax )
rh = (short)( (k2*(4*chroma+ri - gi ))/(6*chroma));
}
ushort3 out;
out.x = rv;
out.y = rs;
out.z = rh;
return out;
}
static uchar4 hsv2rgb(ushort3 hsv)
{
int ABITS = 4;
int HSCALE = 256;
int m;
int H,X,ih,is,iv;
int k1=255<<ABITS;
int k2=HSCALE<<ABITS;
int k3=1<<(ABITS-1);
int rr=0;
int rg=0;
int rb=0;
short cv = hsv.x;
short cs = hsv.y;
short ch = hsv.z;
// set chroma and min component value m
//chroma = ( cv * cs )/k1;
//m = cv - chroma;
m = ((int)cv*(k1 - (int)cs ))/k1;
// chroma == 0 <-> cs == 0 --> m=cv
if (cs == 0) {
rb = ( rg = ( rr =( cv >> ABITS) ));
} else {
ih=(int)ch;
is=(int)cs;
iv=(int)cv;
H = (6*ih)/k2;
X = ((iv*is)/k2)*(k2- abs(6*ih- 2*(H>>1)*k2 - k2)) ;
// removing additional bits --> unit8
X=( (X+iv*(k1 - is ))/k1 + k3 ) >> ABITS;
m=m >> ABITS;
// ( chroma + m ) --> cv ;
cv=(short) (cv >> ABITS);
switch (H) {
case 0:
rr = cv;
rg = X;
rb = m;
break;
case 1:
rr = X;
rg = cv;
rb = m;
break;
case 2:
rr = m;
rg = cv;
rb = X;
break;
case 3:
rr = m;
rg = X;
rb = cv;
break;
case 4:
rr = X;
rg = m;
rb = cv;
break;
case 5:
rr = cv;
rg = m ;
rb = X;
break;
}
}
uchar4 rgb;
rgb.r = rr;
rgb.g = rg;
rgb.b = rb;
return rgb;
}
void prepareShadows(float scale) {
double s = (scale>=0)?scale:scale/5;
for (int i = 0; i < 5; i++) {
poly[i] = fastevalPoly(shadowFilterMap+i*2,2 , s); | }
void shadowsKernel(const uchar4 *in, uchar4 *out) {
ushort3 hsv = rgb2hsv(*in);
double v = (fastevalPoly(poly,5,hsv.x/4080.)*4080);
if (v>4080) v = 4080;
hsv.x = (unsigned short) ((v>0)?v:0);
*out = hsv2rgb(hsv);
} | } | random_line_split |
shadows.rs | /*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ip.rsh"
#pragma rs_fp_relaxed
static double shadowFilterMap[] = {
-0.00591, 0.0001,
1.16488, 0.01668,
-0.18027, -0.06791,
-0.12625, 0.09001,
0.15065, -0.03897
};
static double poly[] = {
0., 0.,
0., 0.,
0.
};
static const int ABITS = 4;
static const int HSCALE = 256;
static const int k1=255 << ABITS;
static const int k2=HSCALE << ABITS;
static double fastevalPoly(double *poly,int n, double x){
double f =x;
double sum = poly[0]+poly[1]*f;
int i;
for (i = 2; i < n; i++) {
f*=x;
sum += poly[i]*f;
}
return sum;
}
static ushort3 rgb2hsv( uchar4 rgb)
{
int iMin,iMax,chroma;
int ri = rgb.r;
int gi = rgb.g;
int bi = rgb.b;
short rv,rs,rh;
if (ri > gi) {
iMax = max (ri, bi);
iMin = min (gi, bi);
} else {
iMax = max (gi, bi);
iMin = min (ri, bi);
}
chroma = iMax - iMin;
// set value
rv = (short)( iMax << ABITS);
// set saturation
if (rv == 0)
rs = 0;
else
rs = (short)((k1*chroma)/iMax);
// set hue
if (rs == 0)
rh = 0;
else {
if ( ri == iMax ) {
rh = (short)( (k2*(6*chroma+gi - bi))/(6*chroma));
if (rh >= k2) rh -= k2;
} else if (gi == iMax)
rh = (short)( (k2*(2*chroma+bi - ri ))/(6*chroma));
else // (bi == iMax )
rh = (short)( (k2*(4*chroma+ri - gi ))/(6*chroma));
}
ushort3 out;
out.x = rv;
out.y = rs;
out.z = rh;
return out;
}
static uchar4 hsv2rgb(ushort3 hsv)
{
int ABITS = 4;
int HSCALE = 256;
int m;
int H,X,ih,is,iv;
int k1=255<<ABITS;
int k2=HSCALE<<ABITS;
int k3=1<<(ABITS-1);
int rr=0;
int rg=0;
int rb=0;
short cv = hsv.x;
short cs = hsv.y;
short ch = hsv.z;
// set chroma and min component value m
//chroma = ( cv * cs )/k1;
//m = cv - chroma;
m = ((int)cv*(k1 - (int)cs ))/k1;
// chroma == 0 <-> cs == 0 --> m=cv
if (cs == 0) {
rb = ( rg = ( rr =( cv >> ABITS) ));
} else |
uchar4 rgb;
rgb.r = rr;
rgb.g = rg;
rgb.b = rb;
return rgb;
}
void prepareShadows(float scale) {
double s = (scale>=0)?scale:scale/5;
for (int i = 0; i < 5; i++) {
poly[i] = fastevalPoly(shadowFilterMap+i*2,2 , s);
}
}
void shadowsKernel(const uchar4 *in, uchar4 *out) {
ushort3 hsv = rgb2hsv(*in);
double v = (fastevalPoly(poly,5,hsv.x/4080.)*4080);
if (v>4080) v = 4080;
hsv.x = (unsigned short) ((v>0)?v:0);
*out = hsv2rgb(hsv);
}
| {
ih=(int)ch;
is=(int)cs;
iv=(int)cv;
H = (6*ih)/k2;
X = ((iv*is)/k2)*(k2- abs(6*ih- 2*(H>>1)*k2 - k2)) ;
// removing additional bits --> unit8
X=( (X+iv*(k1 - is ))/k1 + k3 ) >> ABITS;
m=m >> ABITS;
// ( chroma + m ) --> cv ;
cv=(short) (cv >> ABITS);
switch (H) {
case 0:
rr = cv;
rg = X;
rb = m;
break;
case 1:
rr = X;
rg = cv;
rb = m;
break;
case 2:
rr = m;
rg = cv;
rb = X;
break;
case 3:
rr = m;
rg = X;
rb = cv;
break;
case 4:
rr = X;
rg = m;
rb = cv;
break;
case 5:
rr = cv;
rg = m ;
rb = X;
break;
}
} | conditional_block |
main.rs | #![feature(iter_arith)]
use std::io::Read;
use std::fs::File;
fn main() | {
let mut f = File::open("../input.txt").unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
let s = s;
let answer: i32 = s.lines().map(|x| {
let char_num = x.len() as i32;
let mut chars_in_memory = 0;
let mut index = 1;
let chars = x.chars().collect::<Vec<_>>();
while index < x.len() - 1 {
match chars[index] {
'\\' => {
index += 1;
match chars[index] {
'x' => {
index += 3;
chars_in_memory += 1;
},
_ => {
index += 1;
chars_in_memory += 1;
}
}
},
_ => {
chars_in_memory += 1;
index += 1;
}
}
}
char_num - chars_in_memory
}).sum();
println!("The answer is {} :D", answer);
} | identifier_body |
|
main.rs | #![feature(iter_arith)]
use std::io::Read;
use std::fs::File;
fn | () {
let mut f = File::open("../input.txt").unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
let s = s;
let answer: i32 = s.lines().map(|x| {
let char_num = x.len() as i32;
let mut chars_in_memory = 0;
let mut index = 1;
let chars = x.chars().collect::<Vec<_>>();
while index < x.len() - 1 {
match chars[index] {
'\\' => {
index += 1;
match chars[index] {
'x' => {
index += 3;
chars_in_memory += 1;
},
_ => {
index += 1;
chars_in_memory += 1;
}
}
},
_ => {
chars_in_memory += 1;
index += 1;
}
}
}
char_num - chars_in_memory
}).sum();
println!("The answer is {} :D", answer);
}
| main | identifier_name |
main.rs | #![feature(iter_arith)]
use std::io::Read;
use std::fs::File;
fn main() {
let mut f = File::open("../input.txt").unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
let s = s;
let answer: i32 = s.lines().map(|x| {
let char_num = x.len() as i32;
let mut chars_in_memory = 0;
let mut index = 1;
let chars = x.chars().collect::<Vec<_>>();
while index < x.len() - 1 {
match chars[index] {
'\\' => { | index += 3;
chars_in_memory += 1;
},
_ => {
index += 1;
chars_in_memory += 1;
}
}
},
_ => {
chars_in_memory += 1;
index += 1;
}
}
}
char_num - chars_in_memory
}).sum();
println!("The answer is {} :D", answer);
} | index += 1;
match chars[index] {
'x' => { | random_line_split |
KarmaCukesListener.js | /**
* Karma Listener listening for CucumberJS events
*
* @author Benjamin Nowack <[email protected]>
*
* @param {module:karma} karma - Karma
* @returns {KarmaCukesListener}
*/
var KarmaCukesListener = function(karma) {
/**
* Initialises the listener
*/
this.init = function() {
this.karma = karma;
this.feature = null;
this.scenario = null;
this.step = null;
this.stepCount = 0;
// don't let errors cause Karma to exit
window.onerror = $.proxy(this.onError, this);
};
/*
* Registers event handlers for cucumber events
*
* Available events:
*
* * BeforeFeatures
* * BeforeFeature
* * Background
* * BeforeScenario
* * BeforeStep
* * StepResult
* * AfterStep
* * ScenarioResult
* * AfterScenario
* * AfterFeature
* * FeaturesResult
* * AfterFeatures
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
* @param {function} callback - Callback for cucumber's AST walker
*/
this.hear = function (event, defaultTimeout, callback) {
var eventName = event.getName();
var methodName = 'on' + eventName;
if (this[methodName]) {
this[methodName](event);
}
callback();
};
/**
* Initializes the listener before any features are run
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onBeforeFeatures = function(event) {
this.init();
};
/**
* Sets the current feature reference
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onBeforeFeature = function(event) {
var feature = event.getPayload();
this.feature = {
id: feature.getName().toLowerCase().replace(/\s+/g, '-'),
uri: feature.getUri().replace(/^(.*\/(base)\/)/, ''), // remove leading path noise
name: feature.getName(),
description: feature.getDescription(),
line: feature.getLine(),
keyword: feature.getKeyword(),
tags: feature.getTags().map(function(tag) {
return { name: tag.getName(), line: tag.getLine() };
})
};
};
/**
* Sets the current scenario reference
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onBeforeScenario = function(event) {
var scenario = event.getPayload();
this.scenario = {
id: this.feature.id + ';' + scenario.getName().toLowerCase().replace(/\s+/g, '-'),
name: scenario.getName(),
line: scenario.getLine(),
keyword: 'Scenario',
description: scenario.getDescription(),
type: 'scenario',
tags: scenario.getTags().map(function(tag) {
return { name: tag.getName(), line: tag.getLine() };
}),
examples: []// does not seem to be fillable via CucumberJS as outlines are split into individual scenarios
};
};
/**
* Sets the current step reference
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onBeforeStep = function(event) {
var step = event.getPayload();
this.step = {
keyword: step.getKeyword(),
name: step.getName(),
line: step.getLine(),
//hidden: step.isHidden(),
match: {
location: step.getUri() + ':' + step.getLine()
},
result: {
status: null,
error_message: '',
duration: 0
}
};
};
/**
* Creates a step/spec result and passes it to karma (which passes it to registered reporters)
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onStepResult = function(event) {
var stepResult = event.getPayload();
// don't report hook results
if (this.isHook(stepResult.getStep())) {
return;
}
var karmaResult = {
feature: this.feature,
scenario: this.scenario,
step: this.step,
// add some standard props for other reporters that rely on them (e.g. karma-junit-reporter)
suite: [this.feature.name + ': ' + this.scenario.name],
description: this.step.keyword + this.step.name,
log: [],
time: (stepResult.getDuration() || 0) / 1000000
};
// match.location
var stepDefinition = stepResult.getStepDefinition();
if (stepDefinition && stepDefinition.getUri() !== 'unknown') {
karmaResult.step.match.location = stepDefinition.getUri() + ':' + stepDefinition.getLine();
}
karmaResult.step.match.location = karmaResult.step.match.location
.replace(/^(.*\/(base)\/)/, '') // remove leading path noise
.replace(/^(.*\/(absolute)\/)/, '/') // remove leading path noise
.replace(/\?[^\:]+/g, '') // remove query strings
;
// result.status
karmaResult.step.result.status = stepResult.getStatus();
// result.duration
karmaResult.step.result.duration = stepResult.getDuration() || 0;
// error message
if (karmaResult.step.result.status === 'failed') {
var failureException = stepResult.getFailureException();
var stack = '';
if (failureException && typeof failureException.stack === 'string') {
stack = failureException.stack;
} else if (failureException && failureException.message && failureException instanceof Error) | else if (failureException && typeof failureException === 'string') {
stack = failureException;
}
karmaResult.step.result.error_message += stack
.replace(/^(.*\/(base)\/)/gm, '') // remove leading path noise
.replace(/^(.*\/(absolute)\/)/gm, '/') // remove leading path noise
.replace(/^(.*\/release\/cucumber.js.*$)/gm, '') // cucumberjs entries
.replace(/\?[^\:]+/g, '') // remove query strings
.replace(/\n*$/, '') // remove trailing line-breaks
;
}
// attachments
var attachments = stepResult.getAttachments();
if (attachments && attachments.length) {
karmaResult.step.embeddings = [];
attachments.forEach(function (attachment) {
karmaResult.step.embeddings.push({
mime_type: attachment.getMimeType(),
data: base64encode(attachment.getData())
});
});
}
// report step count to karma
this.karma.info({ total: ++this.stepCount });
// inject karma result keywords to trigger correct exit code
karmaResult.success = (karmaResult.step.result.status.match(/(passed)/));
karmaResult.skipped = (karmaResult.step.result.status.match(/(skipped|pending|undefined|ambiguous)/));
// pass result to all registered karma reporters
this.karma.result(karmaResult);// triggers `reporter.onSpecComplete(browser, karmaResult)`
};
/**
* Cleans up object references
*/
onAfterFeatures = function() {
this.feature = null;
this.scenario = null;
this.step = null;
this.stepCount = 0;
};
/**
* Adds script errors to step's error message
*
* @param {string} message - Error message
* @param {string} source - Error source/file
* @param {number} line - Error line
* @param {number} column - Error column
*/
this.onError = function (message, source, line, column) {
var fullMessage = message + ' at ' + source.replace(/\?.*$/, '') + ':' + line + ':' + column;
console.error(fullMessage);
this.step.result.error_message += fullMessage + "\n";
};
/**
* Checks if a step is a hook
*
* @param {module:cucumber/step} step - Step object
* @returns {boolean} TRUE for hooks, FALSE otherwise
*/
this.isHook = function(step) {
return step.getKeyword().match(/^(Before|After)/);
};
};
| {
stack = failureException.message;
} | conditional_block |
KarmaCukesListener.js | /**
* Karma Listener listening for CucumberJS events
*
* @author Benjamin Nowack <[email protected]>
*
* @param {module:karma} karma - Karma
* @returns {KarmaCukesListener}
*/
var KarmaCukesListener = function(karma) {
/**
* Initialises the listener
*/
this.init = function() {
this.karma = karma;
this.feature = null;
this.scenario = null;
this.step = null;
this.stepCount = 0;
// don't let errors cause Karma to exit
window.onerror = $.proxy(this.onError, this);
};
/*
* Registers event handlers for cucumber events
*
* Available events:
*
* * BeforeFeatures
* * BeforeFeature
* * Background
* * BeforeScenario
* * BeforeStep
* * StepResult
* * AfterStep
* * ScenarioResult
* * AfterScenario
* * AfterFeature
* * FeaturesResult
* * AfterFeatures
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
* @param {function} callback - Callback for cucumber's AST walker
*/
this.hear = function (event, defaultTimeout, callback) {
var eventName = event.getName();
var methodName = 'on' + eventName;
if (this[methodName]) {
this[methodName](event);
}
callback();
};
/**
* Initializes the listener before any features are run
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onBeforeFeatures = function(event) {
this.init();
}; | *
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onBeforeFeature = function(event) {
var feature = event.getPayload();
this.feature = {
id: feature.getName().toLowerCase().replace(/\s+/g, '-'),
uri: feature.getUri().replace(/^(.*\/(base)\/)/, ''), // remove leading path noise
name: feature.getName(),
description: feature.getDescription(),
line: feature.getLine(),
keyword: feature.getKeyword(),
tags: feature.getTags().map(function(tag) {
return { name: tag.getName(), line: tag.getLine() };
})
};
};
/**
* Sets the current scenario reference
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onBeforeScenario = function(event) {
var scenario = event.getPayload();
this.scenario = {
id: this.feature.id + ';' + scenario.getName().toLowerCase().replace(/\s+/g, '-'),
name: scenario.getName(),
line: scenario.getLine(),
keyword: 'Scenario',
description: scenario.getDescription(),
type: 'scenario',
tags: scenario.getTags().map(function(tag) {
return { name: tag.getName(), line: tag.getLine() };
}),
examples: []// does not seem to be fillable via CucumberJS as outlines are split into individual scenarios
};
};
/**
* Sets the current step reference
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onBeforeStep = function(event) {
var step = event.getPayload();
this.step = {
keyword: step.getKeyword(),
name: step.getName(),
line: step.getLine(),
//hidden: step.isHidden(),
match: {
location: step.getUri() + ':' + step.getLine()
},
result: {
status: null,
error_message: '',
duration: 0
}
};
};
/**
* Creates a step/spec result and passes it to karma (which passes it to registered reporters)
*
* @param {module:cucumber/runtime/ast_tree_walker/event.js} event - Cucumber event
*/
this.onStepResult = function(event) {
var stepResult = event.getPayload();
// don't report hook results
if (this.isHook(stepResult.getStep())) {
return;
}
var karmaResult = {
feature: this.feature,
scenario: this.scenario,
step: this.step,
// add some standard props for other reporters that rely on them (e.g. karma-junit-reporter)
suite: [this.feature.name + ': ' + this.scenario.name],
description: this.step.keyword + this.step.name,
log: [],
time: (stepResult.getDuration() || 0) / 1000000
};
// match.location
var stepDefinition = stepResult.getStepDefinition();
if (stepDefinition && stepDefinition.getUri() !== 'unknown') {
karmaResult.step.match.location = stepDefinition.getUri() + ':' + stepDefinition.getLine();
}
karmaResult.step.match.location = karmaResult.step.match.location
.replace(/^(.*\/(base)\/)/, '') // remove leading path noise
.replace(/^(.*\/(absolute)\/)/, '/') // remove leading path noise
.replace(/\?[^\:]+/g, '') // remove query strings
;
// result.status
karmaResult.step.result.status = stepResult.getStatus();
// result.duration
karmaResult.step.result.duration = stepResult.getDuration() || 0;
// error message
if (karmaResult.step.result.status === 'failed') {
var failureException = stepResult.getFailureException();
var stack = '';
if (failureException && typeof failureException.stack === 'string') {
stack = failureException.stack;
} else if (failureException && failureException.message && failureException instanceof Error) {
stack = failureException.message;
} else if (failureException && typeof failureException === 'string') {
stack = failureException;
}
karmaResult.step.result.error_message += stack
.replace(/^(.*\/(base)\/)/gm, '') // remove leading path noise
.replace(/^(.*\/(absolute)\/)/gm, '/') // remove leading path noise
.replace(/^(.*\/release\/cucumber.js.*$)/gm, '') // cucumberjs entries
.replace(/\?[^\:]+/g, '') // remove query strings
.replace(/\n*$/, '') // remove trailing line-breaks
;
}
// attachments
var attachments = stepResult.getAttachments();
if (attachments && attachments.length) {
karmaResult.step.embeddings = [];
attachments.forEach(function (attachment) {
karmaResult.step.embeddings.push({
mime_type: attachment.getMimeType(),
data: base64encode(attachment.getData())
});
});
}
// report step count to karma
this.karma.info({ total: ++this.stepCount });
// inject karma result keywords to trigger correct exit code
karmaResult.success = (karmaResult.step.result.status.match(/(passed)/));
karmaResult.skipped = (karmaResult.step.result.status.match(/(skipped|pending|undefined|ambiguous)/));
// pass result to all registered karma reporters
this.karma.result(karmaResult);// triggers `reporter.onSpecComplete(browser, karmaResult)`
};
/**
* Cleans up object references
*/
onAfterFeatures = function() {
this.feature = null;
this.scenario = null;
this.step = null;
this.stepCount = 0;
};
/**
* Adds script errors to step's error message
*
* @param {string} message - Error message
* @param {string} source - Error source/file
* @param {number} line - Error line
* @param {number} column - Error column
*/
this.onError = function (message, source, line, column) {
var fullMessage = message + ' at ' + source.replace(/\?.*$/, '') + ':' + line + ':' + column;
console.error(fullMessage);
this.step.result.error_message += fullMessage + "\n";
};
/**
* Checks if a step is a hook
*
* @param {module:cucumber/step} step - Step object
* @returns {boolean} TRUE for hooks, FALSE otherwise
*/
this.isHook = function(step) {
return step.getKeyword().match(/^(Before|After)/);
};
}; |
/**
* Sets the current feature reference | random_line_split |
to_csv_pretty.py | #!/usr/bin/env python2
# encoding=utf-8
from __future__ import division, print_function
from math import ceil, floor, log10, pi
from sys import argv, stdout
from xml.dom import minidom
import bz2
import csv
# local imports
from my_helper_functions_bare import *
def pretty_mean_std(data):
return uncertain_number_string(my_mean(data), my_means_std(data))
varying_parameters = ["pressures_virial", "pressures_collision", "msds_val",
"msds_diffusion", "times"]
data = { i:[] for i in varying_parameters }
data = dict(data.items() + {"packings": [], "collisions": [], "n_atoms": []}.items())
for input_file in argv[1:]:
xmldoc = minidom.parse(bz2.BZ2File(input_file))
packing = float(xmldoc.getElementsByTagName('PackingFraction')[0].attributes['val'].value)
n_atoms = int(xmldoc.getElementsByTagName('ParticleCount')[0].attributes['val'].value)
if len(data["packings"]) == 0 or packing != data["packings"][-1] \
or n_atoms != data["n_atoms"][-1]:
data["packings"].append(packing)
data["n_atoms"].append(n_atoms)
data["collisions"].append(int(xmldoc.getElementsByTagName(
'Duration')[0].attributes['TwoParticleEvents'].value))
for parameter in varying_parameters:
data[parameter].append([])
data["times"][-1].append(float(
xmldoc.getElementsByTagName('Duration')[0].attributes['Time'].value))
data["pressures_virial"][-1].append(float(
xmldoc.getElementsByTagName('Pressure')[0].attributes['Avg'].value))
data["pressures_collision"][-1].append(my_pressure(data["n_atoms"][-1],
data["collisions"][-1], data["times"][-1][-1]))
try:
data["msds_val"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['val'].value))
data["msds_diffusion"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['diffusionCoeff'].value))
except:
data["msds_val"][-1].append(None)
data["msds_diffusion"][-1].append(None)
stdout_writer = csv.writer(stdout, delimiter='\t')
"""
stdout.write("### Data format: packings\tdensities\tcollisions\tn_atoms\t"
"pressures_virial\tpressures_collision\tmsds_val\tmsds_diffusion\t"
"times\n")
"""
stdout.write("\multicolumn{1}{c}{$\zeta$}\t\multicolumn{1}{c}{$Z_{MD}$}\t"
"\multicolumn{1}{c}{$\Delta Z_{MD}$}\n")
for i in xrange(len(data["packings"])):
| if data["msds_diffusion"][i][0] is None:
continue
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(data["packings"][i]*6.0/pi),
data["collisions"][i],
data["n_atoms"][i],
pretty_mean_std(data["pressures_virial"][i]),
pretty_mean_std(data["pressures_collision"][i]),
pretty_mean_std(data["msds_val"][i]),
pretty_mean_std(data["msds_diffusion"][i]),
pretty_mean_std(data["times"][i])
])
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(my_mean(data["pressures_collision"][i])),
"{:.9f}".format(my_means_std(data["pressures_collision"][i]))
]) | conditional_block |
|
to_csv_pretty.py | #!/usr/bin/env python2
# encoding=utf-8
from __future__ import division, print_function
from math import ceil, floor, log10, pi
from sys import argv, stdout
from xml.dom import minidom
import bz2
import csv
# local imports
from my_helper_functions_bare import *
def | (data):
return uncertain_number_string(my_mean(data), my_means_std(data))
varying_parameters = ["pressures_virial", "pressures_collision", "msds_val",
"msds_diffusion", "times"]
data = { i:[] for i in varying_parameters }
data = dict(data.items() + {"packings": [], "collisions": [], "n_atoms": []}.items())
for input_file in argv[1:]:
xmldoc = minidom.parse(bz2.BZ2File(input_file))
packing = float(xmldoc.getElementsByTagName('PackingFraction')[0].attributes['val'].value)
n_atoms = int(xmldoc.getElementsByTagName('ParticleCount')[0].attributes['val'].value)
if len(data["packings"]) == 0 or packing != data["packings"][-1] \
or n_atoms != data["n_atoms"][-1]:
data["packings"].append(packing)
data["n_atoms"].append(n_atoms)
data["collisions"].append(int(xmldoc.getElementsByTagName(
'Duration')[0].attributes['TwoParticleEvents'].value))
for parameter in varying_parameters:
data[parameter].append([])
data["times"][-1].append(float(
xmldoc.getElementsByTagName('Duration')[0].attributes['Time'].value))
data["pressures_virial"][-1].append(float(
xmldoc.getElementsByTagName('Pressure')[0].attributes['Avg'].value))
data["pressures_collision"][-1].append(my_pressure(data["n_atoms"][-1],
data["collisions"][-1], data["times"][-1][-1]))
try:
data["msds_val"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['val'].value))
data["msds_diffusion"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['diffusionCoeff'].value))
except:
data["msds_val"][-1].append(None)
data["msds_diffusion"][-1].append(None)
stdout_writer = csv.writer(stdout, delimiter='\t')
"""
stdout.write("### Data format: packings\tdensities\tcollisions\tn_atoms\t"
"pressures_virial\tpressures_collision\tmsds_val\tmsds_diffusion\t"
"times\n")
"""
stdout.write("\multicolumn{1}{c}{$\zeta$}\t\multicolumn{1}{c}{$Z_{MD}$}\t"
"\multicolumn{1}{c}{$\Delta Z_{MD}$}\n")
for i in xrange(len(data["packings"])):
if data["msds_diffusion"][i][0] is None:
continue
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(data["packings"][i]*6.0/pi),
data["collisions"][i],
data["n_atoms"][i],
pretty_mean_std(data["pressures_virial"][i]),
pretty_mean_std(data["pressures_collision"][i]),
pretty_mean_std(data["msds_val"][i]),
pretty_mean_std(data["msds_diffusion"][i]),
pretty_mean_std(data["times"][i])
])
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(my_mean(data["pressures_collision"][i])),
"{:.9f}".format(my_means_std(data["pressures_collision"][i]))
])
| pretty_mean_std | identifier_name |
to_csv_pretty.py | #!/usr/bin/env python2
# encoding=utf-8
from __future__ import division, print_function
from math import ceil, floor, log10, pi
from sys import argv, stdout
from xml.dom import minidom
import bz2
import csv
# local imports
from my_helper_functions_bare import *
def pretty_mean_std(data):
|
varying_parameters = ["pressures_virial", "pressures_collision", "msds_val",
"msds_diffusion", "times"]
data = { i:[] for i in varying_parameters }
data = dict(data.items() + {"packings": [], "collisions": [], "n_atoms": []}.items())
for input_file in argv[1:]:
xmldoc = minidom.parse(bz2.BZ2File(input_file))
packing = float(xmldoc.getElementsByTagName('PackingFraction')[0].attributes['val'].value)
n_atoms = int(xmldoc.getElementsByTagName('ParticleCount')[0].attributes['val'].value)
if len(data["packings"]) == 0 or packing != data["packings"][-1] \
or n_atoms != data["n_atoms"][-1]:
data["packings"].append(packing)
data["n_atoms"].append(n_atoms)
data["collisions"].append(int(xmldoc.getElementsByTagName(
'Duration')[0].attributes['TwoParticleEvents'].value))
for parameter in varying_parameters:
data[parameter].append([])
data["times"][-1].append(float(
xmldoc.getElementsByTagName('Duration')[0].attributes['Time'].value))
data["pressures_virial"][-1].append(float(
xmldoc.getElementsByTagName('Pressure')[0].attributes['Avg'].value))
data["pressures_collision"][-1].append(my_pressure(data["n_atoms"][-1],
data["collisions"][-1], data["times"][-1][-1]))
try:
data["msds_val"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['val'].value))
data["msds_diffusion"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['diffusionCoeff'].value))
except:
data["msds_val"][-1].append(None)
data["msds_diffusion"][-1].append(None)
stdout_writer = csv.writer(stdout, delimiter='\t')
"""
stdout.write("### Data format: packings\tdensities\tcollisions\tn_atoms\t"
"pressures_virial\tpressures_collision\tmsds_val\tmsds_diffusion\t"
"times\n")
"""
stdout.write("\multicolumn{1}{c}{$\zeta$}\t\multicolumn{1}{c}{$Z_{MD}$}\t"
"\multicolumn{1}{c}{$\Delta Z_{MD}$}\n")
for i in xrange(len(data["packings"])):
if data["msds_diffusion"][i][0] is None:
continue
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(data["packings"][i]*6.0/pi),
data["collisions"][i],
data["n_atoms"][i],
pretty_mean_std(data["pressures_virial"][i]),
pretty_mean_std(data["pressures_collision"][i]),
pretty_mean_std(data["msds_val"][i]),
pretty_mean_std(data["msds_diffusion"][i]),
pretty_mean_std(data["times"][i])
])
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(my_mean(data["pressures_collision"][i])),
"{:.9f}".format(my_means_std(data["pressures_collision"][i]))
])
| return uncertain_number_string(my_mean(data), my_means_std(data)) | identifier_body |
to_csv_pretty.py | #!/usr/bin/env python2
# encoding=utf-8
from __future__ import division, print_function
from math import ceil, floor, log10, pi
from sys import argv, stdout
from xml.dom import minidom
import bz2
import csv
# local imports
from my_helper_functions_bare import *
def pretty_mean_std(data):
return uncertain_number_string(my_mean(data), my_means_std(data))
varying_parameters = ["pressures_virial", "pressures_collision", "msds_val",
"msds_diffusion", "times"]
data = { i:[] for i in varying_parameters }
data = dict(data.items() + {"packings": [], "collisions": [], "n_atoms": []}.items())
for input_file in argv[1:]:
xmldoc = minidom.parse(bz2.BZ2File(input_file))
packing = float(xmldoc.getElementsByTagName('PackingFraction')[0].attributes['val'].value)
n_atoms = int(xmldoc.getElementsByTagName('ParticleCount')[0].attributes['val'].value)
if len(data["packings"]) == 0 or packing != data["packings"][-1] \
or n_atoms != data["n_atoms"][-1]:
data["packings"].append(packing)
data["n_atoms"].append(n_atoms)
data["collisions"].append(int(xmldoc.getElementsByTagName(
'Duration')[0].attributes['TwoParticleEvents'].value))
for parameter in varying_parameters:
data[parameter].append([])
data["times"][-1].append(float(
xmldoc.getElementsByTagName('Duration')[0].attributes['Time'].value))
data["pressures_virial"][-1].append(float(
xmldoc.getElementsByTagName('Pressure')[0].attributes['Avg'].value))
data["pressures_collision"][-1].append(my_pressure(data["n_atoms"][-1],
data["collisions"][-1], data["times"][-1][-1]))
try:
data["msds_val"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['val'].value))
data["msds_diffusion"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['diffusionCoeff'].value))
except:
data["msds_val"][-1].append(None)
data["msds_diffusion"][-1].append(None)
stdout_writer = csv.writer(stdout, delimiter='\t')
"""
stdout.write("### Data format: packings\tdensities\tcollisions\tn_atoms\t"
"pressures_virial\tpressures_collision\tmsds_val\tmsds_diffusion\t"
"times\n")
""" | stdout.write("\multicolumn{1}{c}{$\zeta$}\t\multicolumn{1}{c}{$Z_{MD}$}\t"
"\multicolumn{1}{c}{$\Delta Z_{MD}$}\n")
for i in xrange(len(data["packings"])):
if data["msds_diffusion"][i][0] is None:
continue
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(data["packings"][i]*6.0/pi),
data["collisions"][i],
data["n_atoms"][i],
pretty_mean_std(data["pressures_virial"][i]),
pretty_mean_std(data["pressures_collision"][i]),
pretty_mean_std(data["msds_val"][i]),
pretty_mean_std(data["msds_diffusion"][i]),
pretty_mean_std(data["times"][i])
])
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(my_mean(data["pressures_collision"][i])),
"{:.9f}".format(my_means_std(data["pressures_collision"][i]))
]) | random_line_split |
|
15.4.4.4-2.js | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
/**
File Name: 15.4.4.3-1.js
ECMA Section: 15.4.4.3-1 Array.prototype.reverse()
Description:
The elements of the array are rearranged so as to reverse their order.
This object is returned as the result of the call.
1. Call the [[Get]] method of this object with argument "length".
2. Call ToUint32(Result(1)).
3. Compute floor(Result(2)/2).
4. Let k be 0.
5. If k equals Result(3), return this object.
6. Compute Result(2)k1.
7. Call ToString(k).
8. ToString(Result(6)).
9. Call the [[Get]] method of this object with argument Result(7).
10. Call the [[Get]] method of this object with argument Result(8).
11. If this object has a property named by Result(8), go to step 12; but
if this object has no property named by Result(8), then go to either
step 12 or step 14, depending on the implementation.
12. Call the [[Put]] method of this object with arguments Result(7) and
Result(10).
13. Go to step 15.
14. Call the [[Delete]] method on this object, providing Result(7) as the
name of the property to delete.
15. If this object has a property named by Result(7), go to step 16; but if
this object has no property named by Result(7), then go to either step 16
or step 18, depending on the implementation.
16. Call the [[Put]] method of this object with arguments Result(8) and
Result(9).
17. Go to step 19.
18. Call the [[Delete]] method on this object, providing Result(8) as the
name of the property to delete.
19. Increase k by 1.
20. Go to step 5.
Note that the reverse function is intentionally generic; it does not require
that its this value be an Array object. Therefore it can be transferred to other
kinds of objects for use as a method. Whether the reverse function can be applied
successfully to a host object is implementation dependent.
Note: Array.prototype.reverse allows some flexibility in implementation
regarding array indices that have not been populated. This test covers the
cases in which unpopulated indices are not deleted, since the JavaScript
implementation does not delete uninitialzed indices.
Author: [email protected]
Date: 7 october 1997
*/
var SECTION = "15.4.4.4-1";
var VERSION = "ECMA_1";
startTest();
writeHeaderToLog( SECTION + " Array.prototype.reverse()");
var ARR_PROTOTYPE = Array.prototype;
new TestCase( SECTION, "Array.prototype.reverse.length", 0, Array.prototype.reverse.length );
new TestCase( SECTION, "delete Array.prototype.reverse.length", false, delete Array.prototype.reverse.length ); | "var A = new Array(); A.reverse(); A.length",
0,
eval("var A = new Array(); A.reverse(); A.length") );
test();
function CheckItems( R, A ) {
for ( var i = 0; i < R.length; i++ ) {
new TestCase(
SECTION,
"A["+i+ "]",
R[i],
A[i] );
}
}
test();
function Object_1( value ) {
this.array = value.split(",");
this.length = this.array.length;
for ( var i = 0; i < this.length; i++ ) {
this[i] = eval(this.array[i]);
}
this.join = Array.prototype.reverse;
this.getClass = Object.prototype.toString;
}
function Reverse( array ) {
var r2 = array.length;
var k = 0;
var r3 = Math.floor( r2/2 );
if ( r3 == k ) {
return array;
}
for ( k = 0; k < r3; k++ ) {
var r6 = r2 - k - 1;
// var r7 = String( k );
var r7 = k;
var r8 = String( r6 );
var r9 = array[r7];
var r10 = array[r8];
array[r7] = r10;
array[r8] = r9;
}
return array;
}
function Iterate( array ) {
for ( var i = 0; i < array.length; i++ ) {
// print( i+": "+ array[String(i)] );
}
}
function Object_1( value ) {
this.array = value.split(",");
this.length = this.array.length;
for ( var i = 0; i < this.length; i++ ) {
this[i] = this.array[i];
}
this.reverse = Array.prototype.reverse;
this.getClass = Object.prototype.toString;
} | new TestCase( SECTION, "delete Array.prototype.reverse.length; Array.prototype.reverse.length", 0, eval("delete Array.prototype.reverse.length; Array.prototype.reverse.length") );
// length of array is 0
new TestCase( SECTION, | random_line_split |
15.4.4.4-2.js | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
/**
File Name: 15.4.4.3-1.js
ECMA Section: 15.4.4.3-1 Array.prototype.reverse()
Description:
The elements of the array are rearranged so as to reverse their order.
This object is returned as the result of the call.
1. Call the [[Get]] method of this object with argument "length".
2. Call ToUint32(Result(1)).
3. Compute floor(Result(2)/2).
4. Let k be 0.
5. If k equals Result(3), return this object.
6. Compute Result(2)k1.
7. Call ToString(k).
8. ToString(Result(6)).
9. Call the [[Get]] method of this object with argument Result(7).
10. Call the [[Get]] method of this object with argument Result(8).
11. If this object has a property named by Result(8), go to step 12; but
if this object has no property named by Result(8), then go to either
step 12 or step 14, depending on the implementation.
12. Call the [[Put]] method of this object with arguments Result(7) and
Result(10).
13. Go to step 15.
14. Call the [[Delete]] method on this object, providing Result(7) as the
name of the property to delete.
15. If this object has a property named by Result(7), go to step 16; but if
this object has no property named by Result(7), then go to either step 16
or step 18, depending on the implementation.
16. Call the [[Put]] method of this object with arguments Result(8) and
Result(9).
17. Go to step 19.
18. Call the [[Delete]] method on this object, providing Result(8) as the
name of the property to delete.
19. Increase k by 1.
20. Go to step 5.
Note that the reverse function is intentionally generic; it does not require
that its this value be an Array object. Therefore it can be transferred to other
kinds of objects for use as a method. Whether the reverse function can be applied
successfully to a host object is implementation dependent.
Note: Array.prototype.reverse allows some flexibility in implementation
regarding array indices that have not been populated. This test covers the
cases in which unpopulated indices are not deleted, since the JavaScript
implementation does not delete uninitialzed indices.
Author: [email protected]
Date: 7 october 1997
*/
var SECTION = "15.4.4.4-1";
var VERSION = "ECMA_1";
startTest();
writeHeaderToLog( SECTION + " Array.prototype.reverse()");
var ARR_PROTOTYPE = Array.prototype;
new TestCase( SECTION, "Array.prototype.reverse.length", 0, Array.prototype.reverse.length );
new TestCase( SECTION, "delete Array.prototype.reverse.length", false, delete Array.prototype.reverse.length );
new TestCase( SECTION, "delete Array.prototype.reverse.length; Array.prototype.reverse.length", 0, eval("delete Array.prototype.reverse.length; Array.prototype.reverse.length") );
// length of array is 0
new TestCase( SECTION,
"var A = new Array(); A.reverse(); A.length",
0,
eval("var A = new Array(); A.reverse(); A.length") );
test();
function CheckItems( R, A ) {
for ( var i = 0; i < R.length; i++ ) {
new TestCase(
SECTION,
"A["+i+ "]",
R[i],
A[i] );
}
}
test();
function Object_1( value ) {
this.array = value.split(",");
this.length = this.array.length;
for ( var i = 0; i < this.length; i++ ) {
this[i] = eval(this.array[i]);
}
this.join = Array.prototype.reverse;
this.getClass = Object.prototype.toString;
}
function Reverse( array ) {
var r2 = array.length;
var k = 0;
var r3 = Math.floor( r2/2 );
if ( r3 == k ) {
return array;
}
for ( k = 0; k < r3; k++ ) {
var r6 = r2 - k - 1;
// var r7 = String( k );
var r7 = k;
var r8 = String( r6 );
var r9 = array[r7];
var r10 = array[r8];
array[r7] = r10;
array[r8] = r9;
}
return array;
}
function Iterate( array ) |
function Object_1( value ) {
this.array = value.split(",");
this.length = this.array.length;
for ( var i = 0; i < this.length; i++ ) {
this[i] = this.array[i];
}
this.reverse = Array.prototype.reverse;
this.getClass = Object.prototype.toString;
}
| {
for ( var i = 0; i < array.length; i++ ) {
// print( i+": "+ array[String(i)] );
}
} | identifier_body |
15.4.4.4-2.js | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
/**
File Name: 15.4.4.3-1.js
ECMA Section: 15.4.4.3-1 Array.prototype.reverse()
Description:
The elements of the array are rearranged so as to reverse their order.
This object is returned as the result of the call.
1. Call the [[Get]] method of this object with argument "length".
2. Call ToUint32(Result(1)).
3. Compute floor(Result(2)/2).
4. Let k be 0.
5. If k equals Result(3), return this object.
6. Compute Result(2)k1.
7. Call ToString(k).
8. ToString(Result(6)).
9. Call the [[Get]] method of this object with argument Result(7).
10. Call the [[Get]] method of this object with argument Result(8).
11. If this object has a property named by Result(8), go to step 12; but
if this object has no property named by Result(8), then go to either
step 12 or step 14, depending on the implementation.
12. Call the [[Put]] method of this object with arguments Result(7) and
Result(10).
13. Go to step 15.
14. Call the [[Delete]] method on this object, providing Result(7) as the
name of the property to delete.
15. If this object has a property named by Result(7), go to step 16; but if
this object has no property named by Result(7), then go to either step 16
or step 18, depending on the implementation.
16. Call the [[Put]] method of this object with arguments Result(8) and
Result(9).
17. Go to step 19.
18. Call the [[Delete]] method on this object, providing Result(8) as the
name of the property to delete.
19. Increase k by 1.
20. Go to step 5.
Note that the reverse function is intentionally generic; it does not require
that its this value be an Array object. Therefore it can be transferred to other
kinds of objects for use as a method. Whether the reverse function can be applied
successfully to a host object is implementation dependent.
Note: Array.prototype.reverse allows some flexibility in implementation
regarding array indices that have not been populated. This test covers the
cases in which unpopulated indices are not deleted, since the JavaScript
implementation does not delete uninitialzed indices.
Author: [email protected]
Date: 7 october 1997
*/
var SECTION = "15.4.4.4-1";
var VERSION = "ECMA_1";
startTest();
writeHeaderToLog( SECTION + " Array.prototype.reverse()");
var ARR_PROTOTYPE = Array.prototype;
new TestCase( SECTION, "Array.prototype.reverse.length", 0, Array.prototype.reverse.length );
new TestCase( SECTION, "delete Array.prototype.reverse.length", false, delete Array.prototype.reverse.length );
new TestCase( SECTION, "delete Array.prototype.reverse.length; Array.prototype.reverse.length", 0, eval("delete Array.prototype.reverse.length; Array.prototype.reverse.length") );
// length of array is 0
new TestCase( SECTION,
"var A = new Array(); A.reverse(); A.length",
0,
eval("var A = new Array(); A.reverse(); A.length") );
test();
function CheckItems( R, A ) {
for ( var i = 0; i < R.length; i++ ) {
new TestCase(
SECTION,
"A["+i+ "]",
R[i],
A[i] );
}
}
test();
function Object_1( value ) {
this.array = value.split(",");
this.length = this.array.length;
for ( var i = 0; i < this.length; i++ ) {
this[i] = eval(this.array[i]);
}
this.join = Array.prototype.reverse;
this.getClass = Object.prototype.toString;
}
function Reverse( array ) {
var r2 = array.length;
var k = 0;
var r3 = Math.floor( r2/2 );
if ( r3 == k ) {
return array;
}
for ( k = 0; k < r3; k++ ) {
var r6 = r2 - k - 1;
// var r7 = String( k );
var r7 = k;
var r8 = String( r6 );
var r9 = array[r7];
var r10 = array[r8];
array[r7] = r10;
array[r8] = r9;
}
return array;
}
function Iterate( array ) {
for ( var i = 0; i < array.length; i++ ) |
}
function Object_1( value ) {
this.array = value.split(",");
this.length = this.array.length;
for ( var i = 0; i < this.length; i++ ) {
this[i] = this.array[i];
}
this.reverse = Array.prototype.reverse;
this.getClass = Object.prototype.toString;
}
| {
// print( i+": "+ array[String(i)] );
} | conditional_block |
15.4.4.4-2.js | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
/**
File Name: 15.4.4.3-1.js
ECMA Section: 15.4.4.3-1 Array.prototype.reverse()
Description:
The elements of the array are rearranged so as to reverse their order.
This object is returned as the result of the call.
1. Call the [[Get]] method of this object with argument "length".
2. Call ToUint32(Result(1)).
3. Compute floor(Result(2)/2).
4. Let k be 0.
5. If k equals Result(3), return this object.
6. Compute Result(2)k1.
7. Call ToString(k).
8. ToString(Result(6)).
9. Call the [[Get]] method of this object with argument Result(7).
10. Call the [[Get]] method of this object with argument Result(8).
11. If this object has a property named by Result(8), go to step 12; but
if this object has no property named by Result(8), then go to either
step 12 or step 14, depending on the implementation.
12. Call the [[Put]] method of this object with arguments Result(7) and
Result(10).
13. Go to step 15.
14. Call the [[Delete]] method on this object, providing Result(7) as the
name of the property to delete.
15. If this object has a property named by Result(7), go to step 16; but if
this object has no property named by Result(7), then go to either step 16
or step 18, depending on the implementation.
16. Call the [[Put]] method of this object with arguments Result(8) and
Result(9).
17. Go to step 19.
18. Call the [[Delete]] method on this object, providing Result(8) as the
name of the property to delete.
19. Increase k by 1.
20. Go to step 5.
Note that the reverse function is intentionally generic; it does not require
that its this value be an Array object. Therefore it can be transferred to other
kinds of objects for use as a method. Whether the reverse function can be applied
successfully to a host object is implementation dependent.
Note: Array.prototype.reverse allows some flexibility in implementation
regarding array indices that have not been populated. This test covers the
cases in which unpopulated indices are not deleted, since the JavaScript
implementation does not delete uninitialzed indices.
Author: [email protected]
Date: 7 october 1997
*/
var SECTION = "15.4.4.4-1";
var VERSION = "ECMA_1";
startTest();
writeHeaderToLog( SECTION + " Array.prototype.reverse()");
var ARR_PROTOTYPE = Array.prototype;
new TestCase( SECTION, "Array.prototype.reverse.length", 0, Array.prototype.reverse.length );
new TestCase( SECTION, "delete Array.prototype.reverse.length", false, delete Array.prototype.reverse.length );
new TestCase( SECTION, "delete Array.prototype.reverse.length; Array.prototype.reverse.length", 0, eval("delete Array.prototype.reverse.length; Array.prototype.reverse.length") );
// length of array is 0
new TestCase( SECTION,
"var A = new Array(); A.reverse(); A.length",
0,
eval("var A = new Array(); A.reverse(); A.length") );
test();
function CheckItems( R, A ) {
for ( var i = 0; i < R.length; i++ ) {
new TestCase(
SECTION,
"A["+i+ "]",
R[i],
A[i] );
}
}
test();
function | ( value ) {
this.array = value.split(",");
this.length = this.array.length;
for ( var i = 0; i < this.length; i++ ) {
this[i] = eval(this.array[i]);
}
this.join = Array.prototype.reverse;
this.getClass = Object.prototype.toString;
}
function Reverse( array ) {
var r2 = array.length;
var k = 0;
var r3 = Math.floor( r2/2 );
if ( r3 == k ) {
return array;
}
for ( k = 0; k < r3; k++ ) {
var r6 = r2 - k - 1;
// var r7 = String( k );
var r7 = k;
var r8 = String( r6 );
var r9 = array[r7];
var r10 = array[r8];
array[r7] = r10;
array[r8] = r9;
}
return array;
}
function Iterate( array ) {
for ( var i = 0; i < array.length; i++ ) {
// print( i+": "+ array[String(i)] );
}
}
function Object_1( value ) {
this.array = value.split(",");
this.length = this.array.length;
for ( var i = 0; i < this.length; i++ ) {
this[i] = this.array[i];
}
this.reverse = Array.prototype.reverse;
this.getClass = Object.prototype.toString;
}
| Object_1 | identifier_name |
CreateNetworkDialog.tsx | import { useDispatch } from 'react-redux';
import { translate } from '@waldur/i18n';
import { closeModalDialog } from '@waldur/modal/actions';
import {
createLatinNameField,
createDescriptionField,
} from '@waldur/resource/actions/base';
import { ResourceActionDialog } from '@waldur/resource/actions/ResourceActionDialog';
import { showSuccess, showErrorResponse } from '@waldur/store/notify';
import { createNetwork } from '../../api';
export const CreateNetworkDialog = ({ resolve: { resource } }) => {
const dispatch = useDispatch();
return (
<ResourceActionDialog
dialogTitle={translate('Create network for OpenStack tenant')}
fields={[createLatinNameField(), createDescriptionField()]}
submitForm={ | (formData) => {
try {
await createNetwork(resource.uuid, formData);
dispatch(
showSuccess(translate('OpenStack networks has been created.')),
);
dispatch(closeModalDialog());
} catch (e) {
dispatch(
showErrorResponse(
e,
translate('Unable to create OpenStack networks.'),
),
);
}
}}
/>
);
};
| async | identifier_name |
CreateNetworkDialog.tsx | import { useDispatch } from 'react-redux';
import { translate } from '@waldur/i18n';
import { closeModalDialog } from '@waldur/modal/actions';
import {
createLatinNameField,
createDescriptionField,
} from '@waldur/resource/actions/base';
import { ResourceActionDialog } from '@waldur/resource/actions/ResourceActionDialog';
import { showSuccess, showErrorResponse } from '@waldur/store/notify';
import { createNetwork } from '../../api';
export const CreateNetworkDialog = ({ resolve: { resource } }) => {
const dispatch = useDispatch();
return (
<ResourceActionDialog
dialogTitle={translate('Create network for OpenStack tenant')}
fields={[createLatinNameField(), createDescriptionField()]}
submitForm={async (formData) => | }
/>
);
};
| {
try {
await createNetwork(resource.uuid, formData);
dispatch(
showSuccess(translate('OpenStack networks has been created.')),
);
dispatch(closeModalDialog());
} catch (e) {
dispatch(
showErrorResponse(
e,
translate('Unable to create OpenStack networks.'),
),
);
}
} | identifier_body |
CreateNetworkDialog.tsx | import { useDispatch } from 'react-redux';
| createLatinNameField,
createDescriptionField,
} from '@waldur/resource/actions/base';
import { ResourceActionDialog } from '@waldur/resource/actions/ResourceActionDialog';
import { showSuccess, showErrorResponse } from '@waldur/store/notify';
import { createNetwork } from '../../api';
export const CreateNetworkDialog = ({ resolve: { resource } }) => {
const dispatch = useDispatch();
return (
<ResourceActionDialog
dialogTitle={translate('Create network for OpenStack tenant')}
fields={[createLatinNameField(), createDescriptionField()]}
submitForm={async (formData) => {
try {
await createNetwork(resource.uuid, formData);
dispatch(
showSuccess(translate('OpenStack networks has been created.')),
);
dispatch(closeModalDialog());
} catch (e) {
dispatch(
showErrorResponse(
e,
translate('Unable to create OpenStack networks.'),
),
);
}
}}
/>
);
}; | import { translate } from '@waldur/i18n';
import { closeModalDialog } from '@waldur/modal/actions';
import { | random_line_split |
loadjs.js | loadjs = (function () {
/**
* Global dependencies.
* @global {Object} document - DOM
*/
var devnull = function() {},
bundleIdCache = {},
bundleResultCache = {},
bundleCallbackQueue = {};
/**
* Subscribe to bundle load event.
* @param {string[]} bundleIds - Bundle ids
* @param {Function} callbackFn - The callback function
*/
function subscribe(bundleIds, callbackFn) |
/**
* Publish bundle load event.
* @param {string} bundleId - Bundle id
* @param {string[]} pathsNotFound - List of files not found
*/
function publish(bundleId, pathsNotFound) {
// exit if id isn't defined
if (!bundleId) return;
var q = bundleCallbackQueue[bundleId];
// cache result
bundleResultCache[bundleId] = pathsNotFound;
// exit if queue is empty
if (!q) return;
// empty callback queue
while (q.length) {
q[0](bundleId, pathsNotFound);
q.splice(0, 1);
}
}
/**
* Load individual file.
* @param {string} path - The file path
* @param {Function} callbackFn - The callback function
*/
function loadFile(path, callbackFn, args, numTries) {
var doc = document,
async = args.async,
maxTries = (args.numRetries || 0) + 1,
beforeCallbackFn = args.before || devnull,
isCss,
e;
numTries = numTries || 0;
if (/\.css$/.test(path)) {
isCss = true;
// css
e = doc.createElement('link');
e.rel = 'stylesheet';
e.href = path;
} else {
// javascript
e = doc.createElement('script');
e.src = path;
e.async = async === undefined ? true : async;
}
e.onload = e.onerror = e.onbeforeload = function (ev) {
var result = ev.type[0];
// Note: The following code isolates IE using `hideFocus` and treats empty
// stylesheets as failures to get around lack of onerror support
if (isCss && 'hideFocus' in e) {
try {
if (!e.sheet.cssText.length) result = 'e';
} catch (x) {
// sheets objects created from load errors don't allow access to
// `cssText`
result = 'e';
}
}
// handle retries in case of load failure
if (result == 'e') {
// increment counter
numTries += 1;
// exit function and try again
if (numTries < maxTries) {
return loadFile(path, callbackFn, args, numTries);
}
}
// execute callback
callbackFn(path, result, ev.defaultPrevented);
};
// execute before callback
beforeCallbackFn(path, e);
// add to document
doc.head.appendChild(e);
}
/**
* Load multiple files.
* @param {string[]} paths - The file paths
* @param {Function} callbackFn - The callback function
*/
function loadFiles(paths, callbackFn, args) {
// listify paths
paths = paths.push ? paths : [paths];
var numWaiting = paths.length,
x = numWaiting,
pathsNotFound = [],
fn,
i;
// define callback function
fn = function(path, result, defaultPrevented) {
// handle error
if (result == 'e') pathsNotFound.push(path);
// handle beforeload event. If defaultPrevented then that means the load
// will be blocked (ex. Ghostery/ABP on Safari)
if (result == 'b') {
if (defaultPrevented) pathsNotFound.push(path);
else return;
}
numWaiting--;
if (!numWaiting) callbackFn(pathsNotFound);
};
// load scripts
for (i=0; i < x; i++) loadFile(paths[i], fn, args);
}
/**
* Initiate script load and register bundle.
* @param {(string|string[])} paths - The file paths
* @param {(string|Function)} [arg1] - The bundleId or success callback
* @param {Function} [arg2] - The success or error callback
* @param {Function} [arg3] - The error callback
*/
function loadjs(paths, arg1, arg2) {
var bundleId,
args;
// bundleId (if string)
if (arg1 && arg1.trim) bundleId = arg1;
// args (default is {})
args = (bundleId ? arg2 : arg1) || {};
// throw error if bundle is already defined
if (bundleId) {
if (bundleId in bundleIdCache) {
throw "LoadJS";
} else {
bundleIdCache[bundleId] = true;
}
}
// load scripts
loadFiles(paths, function (pathsNotFound) {
// success and error callbacks
if (pathsNotFound.length) (args.error || devnull)(pathsNotFound);
else (args.success || devnull)();
// publish bundle load event
publish(bundleId, pathsNotFound);
}, args);
}
/**
* Execute callbacks when dependencies have been satisfied.
* @param {(string|string[])} deps - List of bundle ids
* @param {Object} args - success/error arguments
*/
loadjs.ready = function ready(deps, args) {
// subscribe to bundle load event
subscribe(deps, function (depsNotFound) {
// execute callbacks
if (depsNotFound.length) (args.error || devnull)(depsNotFound);
else (args.success || devnull)();
});
return loadjs;
};
/**
* Manually satisfy bundle dependencies.
* @param {string} bundleId - The bundle id
*/
loadjs.done = function done(bundleId) {
publish(bundleId, []);
};
/**
* Reset loadjs dependencies statuses
*/
loadjs.reset = function reset() {
bundleIdCache = {};
bundleResultCache = {};
bundleCallbackQueue = {};
};
/**
* Determine if bundle has already been defined
* @param String} bundleId - The bundle id
*/
loadjs.isDefined = function isDefined(bundleId) {
return bundleId in bundleIdCache;
};
// export
return loadjs;
})();
| {
// listify
bundleIds = bundleIds.push ? bundleIds : [bundleIds];
var depsNotFound = [],
i = bundleIds.length,
numWaiting = i,
fn,
bundleId,
r,
q;
// define callback function
fn = function (bundleId, pathsNotFound) {
if (pathsNotFound.length) depsNotFound.push(bundleId);
numWaiting--;
if (!numWaiting) callbackFn(depsNotFound);
};
// register callback
while (i--) {
bundleId = bundleIds[i];
// execute callback if in result cache
r = bundleResultCache[bundleId];
if (r) {
fn(bundleId, r);
continue;
}
// add to callback queue
q = bundleCallbackQueue[bundleId] = bundleCallbackQueue[bundleId] || [];
q.push(fn);
}
} | identifier_body |
loadjs.js | loadjs = (function () {
/**
* Global dependencies.
* @global {Object} document - DOM
*/
var devnull = function() {},
bundleIdCache = {},
bundleResultCache = {},
bundleCallbackQueue = {};
/**
* Subscribe to bundle load event.
* @param {string[]} bundleIds - Bundle ids
* @param {Function} callbackFn - The callback function
*/
function subscribe(bundleIds, callbackFn) {
// listify
bundleIds = bundleIds.push ? bundleIds : [bundleIds];
var depsNotFound = [],
i = bundleIds.length,
numWaiting = i,
fn,
bundleId,
r,
q;
// define callback function
fn = function (bundleId, pathsNotFound) {
if (pathsNotFound.length) depsNotFound.push(bundleId);
numWaiting--;
if (!numWaiting) callbackFn(depsNotFound);
};
// register callback
while (i--) {
bundleId = bundleIds[i];
// execute callback if in result cache
r = bundleResultCache[bundleId];
if (r) {
fn(bundleId, r);
continue;
}
// add to callback queue
q = bundleCallbackQueue[bundleId] = bundleCallbackQueue[bundleId] || [];
q.push(fn);
}
}
/**
* Publish bundle load event.
* @param {string} bundleId - Bundle id
* @param {string[]} pathsNotFound - List of files not found
*/
function publish(bundleId, pathsNotFound) {
// exit if id isn't defined
if (!bundleId) return;
var q = bundleCallbackQueue[bundleId];
// cache result
bundleResultCache[bundleId] = pathsNotFound;
// exit if queue is empty
if (!q) return;
// empty callback queue
while (q.length) {
q[0](bundleId, pathsNotFound);
q.splice(0, 1);
}
}
/**
* Load individual file.
* @param {string} path - The file path
* @param {Function} callbackFn - The callback function
*/
function loadFile(path, callbackFn, args, numTries) {
var doc = document,
async = args.async,
maxTries = (args.numRetries || 0) + 1,
beforeCallbackFn = args.before || devnull,
isCss,
e;
numTries = numTries || 0;
if (/\.css$/.test(path)) {
isCss = true;
// css
e = doc.createElement('link');
e.rel = 'stylesheet';
e.href = path;
} else {
// javascript
e = doc.createElement('script');
e.src = path;
e.async = async === undefined ? true : async;
}
e.onload = e.onerror = e.onbeforeload = function (ev) {
var result = ev.type[0];
// Note: The following code isolates IE using `hideFocus` and treats empty
// stylesheets as failures to get around lack of onerror support
if (isCss && 'hideFocus' in e) {
try {
if (!e.sheet.cssText.length) result = 'e';
} catch (x) {
// sheets objects created from load errors don't allow access to
// `cssText`
result = 'e';
}
}
// handle retries in case of load failure
if (result == 'e') {
// increment counter
numTries += 1;
// exit function and try again
if (numTries < maxTries) {
return loadFile(path, callbackFn, args, numTries);
}
}
// execute callback
callbackFn(path, result, ev.defaultPrevented);
};
// execute before callback
beforeCallbackFn(path, e);
// add to document
doc.head.appendChild(e);
}
/**
* Load multiple files.
* @param {string[]} paths - The file paths
* @param {Function} callbackFn - The callback function
*/
function | (paths, callbackFn, args) {
// listify paths
paths = paths.push ? paths : [paths];
var numWaiting = paths.length,
x = numWaiting,
pathsNotFound = [],
fn,
i;
// define callback function
fn = function(path, result, defaultPrevented) {
// handle error
if (result == 'e') pathsNotFound.push(path);
// handle beforeload event. If defaultPrevented then that means the load
// will be blocked (ex. Ghostery/ABP on Safari)
if (result == 'b') {
if (defaultPrevented) pathsNotFound.push(path);
else return;
}
numWaiting--;
if (!numWaiting) callbackFn(pathsNotFound);
};
// load scripts
for (i=0; i < x; i++) loadFile(paths[i], fn, args);
}
/**
* Initiate script load and register bundle.
* @param {(string|string[])} paths - The file paths
* @param {(string|Function)} [arg1] - The bundleId or success callback
* @param {Function} [arg2] - The success or error callback
* @param {Function} [arg3] - The error callback
*/
function loadjs(paths, arg1, arg2) {
var bundleId,
args;
// bundleId (if string)
if (arg1 && arg1.trim) bundleId = arg1;
// args (default is {})
args = (bundleId ? arg2 : arg1) || {};
// throw error if bundle is already defined
if (bundleId) {
if (bundleId in bundleIdCache) {
throw "LoadJS";
} else {
bundleIdCache[bundleId] = true;
}
}
// load scripts
loadFiles(paths, function (pathsNotFound) {
// success and error callbacks
if (pathsNotFound.length) (args.error || devnull)(pathsNotFound);
else (args.success || devnull)();
// publish bundle load event
publish(bundleId, pathsNotFound);
}, args);
}
/**
* Execute callbacks when dependencies have been satisfied.
* @param {(string|string[])} deps - List of bundle ids
* @param {Object} args - success/error arguments
*/
loadjs.ready = function ready(deps, args) {
// subscribe to bundle load event
subscribe(deps, function (depsNotFound) {
// execute callbacks
if (depsNotFound.length) (args.error || devnull)(depsNotFound);
else (args.success || devnull)();
});
return loadjs;
};
/**
* Manually satisfy bundle dependencies.
* @param {string} bundleId - The bundle id
*/
loadjs.done = function done(bundleId) {
publish(bundleId, []);
};
/**
* Reset loadjs dependencies statuses
*/
loadjs.reset = function reset() {
bundleIdCache = {};
bundleResultCache = {};
bundleCallbackQueue = {};
};
/**
* Determine if bundle has already been defined
* @param String} bundleId - The bundle id
*/
loadjs.isDefined = function isDefined(bundleId) {
return bundleId in bundleIdCache;
};
// export
return loadjs;
})();
| loadFiles | identifier_name |
loadjs.js | loadjs = (function () {
/**
* Global dependencies.
* @global {Object} document - DOM
*/
var devnull = function() {},
bundleIdCache = {},
bundleResultCache = {},
bundleCallbackQueue = {};
/**
* Subscribe to bundle load event.
* @param {string[]} bundleIds - Bundle ids
* @param {Function} callbackFn - The callback function
*/
function subscribe(bundleIds, callbackFn) {
// listify
bundleIds = bundleIds.push ? bundleIds : [bundleIds];
var depsNotFound = [],
i = bundleIds.length,
numWaiting = i,
fn,
bundleId,
r,
q;
// define callback function
fn = function (bundleId, pathsNotFound) {
if (pathsNotFound.length) depsNotFound.push(bundleId);
numWaiting--;
if (!numWaiting) callbackFn(depsNotFound);
};
// register callback
while (i--) {
bundleId = bundleIds[i];
// execute callback if in result cache
r = bundleResultCache[bundleId];
if (r) {
fn(bundleId, r);
continue;
}
// add to callback queue
q = bundleCallbackQueue[bundleId] = bundleCallbackQueue[bundleId] || [];
q.push(fn);
}
}
/**
* Publish bundle load event.
* @param {string} bundleId - Bundle id
* @param {string[]} pathsNotFound - List of files not found
*/
function publish(bundleId, pathsNotFound) {
// exit if id isn't defined
if (!bundleId) return;
var q = bundleCallbackQueue[bundleId];
// cache result
bundleResultCache[bundleId] = pathsNotFound;
// exit if queue is empty
if (!q) return;
// empty callback queue
while (q.length) {
q[0](bundleId, pathsNotFound);
q.splice(0, 1);
}
}
/**
* Load individual file.
* @param {string} path - The file path
* @param {Function} callbackFn - The callback function
*/
function loadFile(path, callbackFn, args, numTries) {
var doc = document,
async = args.async,
maxTries = (args.numRetries || 0) + 1,
beforeCallbackFn = args.before || devnull,
isCss,
e;
numTries = numTries || 0;
if (/\.css$/.test(path)) | else {
// javascript
e = doc.createElement('script');
e.src = path;
e.async = async === undefined ? true : async;
}
e.onload = e.onerror = e.onbeforeload = function (ev) {
var result = ev.type[0];
// Note: The following code isolates IE using `hideFocus` and treats empty
// stylesheets as failures to get around lack of onerror support
if (isCss && 'hideFocus' in e) {
try {
if (!e.sheet.cssText.length) result = 'e';
} catch (x) {
// sheets objects created from load errors don't allow access to
// `cssText`
result = 'e';
}
}
// handle retries in case of load failure
if (result == 'e') {
// increment counter
numTries += 1;
// exit function and try again
if (numTries < maxTries) {
return loadFile(path, callbackFn, args, numTries);
}
}
// execute callback
callbackFn(path, result, ev.defaultPrevented);
};
// execute before callback
beforeCallbackFn(path, e);
// add to document
doc.head.appendChild(e);
}
/**
* Load multiple files.
* @param {string[]} paths - The file paths
* @param {Function} callbackFn - The callback function
*/
function loadFiles(paths, callbackFn, args) {
// listify paths
paths = paths.push ? paths : [paths];
var numWaiting = paths.length,
x = numWaiting,
pathsNotFound = [],
fn,
i;
// define callback function
fn = function(path, result, defaultPrevented) {
// handle error
if (result == 'e') pathsNotFound.push(path);
// handle beforeload event. If defaultPrevented then that means the load
// will be blocked (ex. Ghostery/ABP on Safari)
if (result == 'b') {
if (defaultPrevented) pathsNotFound.push(path);
else return;
}
numWaiting--;
if (!numWaiting) callbackFn(pathsNotFound);
};
// load scripts
for (i=0; i < x; i++) loadFile(paths[i], fn, args);
}
/**
* Initiate script load and register bundle.
* @param {(string|string[])} paths - The file paths
* @param {(string|Function)} [arg1] - The bundleId or success callback
* @param {Function} [arg2] - The success or error callback
* @param {Function} [arg3] - The error callback
*/
function loadjs(paths, arg1, arg2) {
var bundleId,
args;
// bundleId (if string)
if (arg1 && arg1.trim) bundleId = arg1;
// args (default is {})
args = (bundleId ? arg2 : arg1) || {};
// throw error if bundle is already defined
if (bundleId) {
if (bundleId in bundleIdCache) {
throw "LoadJS";
} else {
bundleIdCache[bundleId] = true;
}
}
// load scripts
loadFiles(paths, function (pathsNotFound) {
// success and error callbacks
if (pathsNotFound.length) (args.error || devnull)(pathsNotFound);
else (args.success || devnull)();
// publish bundle load event
publish(bundleId, pathsNotFound);
}, args);
}
/**
* Execute callbacks when dependencies have been satisfied.
* @param {(string|string[])} deps - List of bundle ids
* @param {Object} args - success/error arguments
*/
loadjs.ready = function ready(deps, args) {
// subscribe to bundle load event
subscribe(deps, function (depsNotFound) {
// execute callbacks
if (depsNotFound.length) (args.error || devnull)(depsNotFound);
else (args.success || devnull)();
});
return loadjs;
};
/**
* Manually satisfy bundle dependencies.
* @param {string} bundleId - The bundle id
*/
loadjs.done = function done(bundleId) {
publish(bundleId, []);
};
/**
* Reset loadjs dependencies statuses
*/
loadjs.reset = function reset() {
bundleIdCache = {};
bundleResultCache = {};
bundleCallbackQueue = {};
};
/**
* Determine if bundle has already been defined
* @param String} bundleId - The bundle id
*/
loadjs.isDefined = function isDefined(bundleId) {
return bundleId in bundleIdCache;
};
// export
return loadjs;
})();
| {
isCss = true;
// css
e = doc.createElement('link');
e.rel = 'stylesheet';
e.href = path;
} | conditional_block |
loadjs.js | loadjs = (function () {
/**
* Global dependencies.
* @global {Object} document - DOM
*/
var devnull = function() {},
bundleIdCache = {},
bundleResultCache = {},
bundleCallbackQueue = {};
/**
* Subscribe to bundle load event.
* @param {string[]} bundleIds - Bundle ids
* @param {Function} callbackFn - The callback function
*/
function subscribe(bundleIds, callbackFn) {
// listify
bundleIds = bundleIds.push ? bundleIds : [bundleIds];
var depsNotFound = [],
i = bundleIds.length,
numWaiting = i,
fn,
bundleId,
r,
q;
// define callback function
fn = function (bundleId, pathsNotFound) {
if (pathsNotFound.length) depsNotFound.push(bundleId);
numWaiting--;
if (!numWaiting) callbackFn(depsNotFound);
};
// register callback
while (i--) {
bundleId = bundleIds[i];
// execute callback if in result cache
r = bundleResultCache[bundleId];
if (r) {
fn(bundleId, r);
continue;
}
// add to callback queue
q = bundleCallbackQueue[bundleId] = bundleCallbackQueue[bundleId] || [];
q.push(fn);
}
}
/**
* Publish bundle load event.
* @param {string} bundleId - Bundle id
* @param {string[]} pathsNotFound - List of files not found
*/
function publish(bundleId, pathsNotFound) {
// exit if id isn't defined
if (!bundleId) return;
var q = bundleCallbackQueue[bundleId];
// cache result
bundleResultCache[bundleId] = pathsNotFound;
// exit if queue is empty
if (!q) return;
// empty callback queue
while (q.length) {
q[0](bundleId, pathsNotFound);
q.splice(0, 1);
}
}
/**
* Load individual file.
* @param {string} path - The file path
* @param {Function} callbackFn - The callback function
*/
function loadFile(path, callbackFn, args, numTries) {
var doc = document,
async = args.async,
maxTries = (args.numRetries || 0) + 1,
beforeCallbackFn = args.before || devnull,
isCss,
e;
numTries = numTries || 0;
if (/\.css$/.test(path)) {
isCss = true;
// css
e = doc.createElement('link');
e.rel = 'stylesheet';
e.href = path;
} else {
// javascript
e = doc.createElement('script');
e.src = path;
e.async = async === undefined ? true : async;
}
e.onload = e.onerror = e.onbeforeload = function (ev) {
var result = ev.type[0];
// Note: The following code isolates IE using `hideFocus` and treats empty
// stylesheets as failures to get around lack of onerror support
if (isCss && 'hideFocus' in e) {
try {
if (!e.sheet.cssText.length) result = 'e';
} catch (x) {
// sheets objects created from load errors don't allow access to
// `cssText`
result = 'e';
}
}
// handle retries in case of load failure
if (result == 'e') {
// increment counter
numTries += 1;
// exit function and try again
if (numTries < maxTries) {
return loadFile(path, callbackFn, args, numTries);
}
}
// execute callback
callbackFn(path, result, ev.defaultPrevented);
};
// execute before callback
beforeCallbackFn(path, e);
// add to document
doc.head.appendChild(e);
}
/**
* Load multiple files.
* @param {string[]} paths - The file paths
* @param {Function} callbackFn - The callback function
*/
function loadFiles(paths, callbackFn, args) {
// listify paths
paths = paths.push ? paths : [paths];
var numWaiting = paths.length,
x = numWaiting,
pathsNotFound = [],
fn,
i;
// define callback function
fn = function(path, result, defaultPrevented) {
// handle error
if (result == 'e') pathsNotFound.push(path);
// handle beforeload event. If defaultPrevented then that means the load
// will be blocked (ex. Ghostery/ABP on Safari)
if (result == 'b') {
if (defaultPrevented) pathsNotFound.push(path);
else return;
}
numWaiting--;
if (!numWaiting) callbackFn(pathsNotFound);
};
// load scripts
for (i=0; i < x; i++) loadFile(paths[i], fn, args);
}
/**
* Initiate script load and register bundle.
* @param {(string|string[])} paths - The file paths
* @param {(string|Function)} [arg1] - The bundleId or success callback
* @param {Function} [arg2] - The success or error callback
* @param {Function} [arg3] - The error callback
*/
function loadjs(paths, arg1, arg2) {
var bundleId,
args;
// bundleId (if string)
if (arg1 && arg1.trim) bundleId = arg1;
// args (default is {})
args = (bundleId ? arg2 : arg1) || {};
// throw error if bundle is already defined
if (bundleId) {
if (bundleId in bundleIdCache) {
throw "LoadJS";
} else {
bundleIdCache[bundleId] = true;
}
}
// load scripts
loadFiles(paths, function (pathsNotFound) {
// success and error callbacks
if (pathsNotFound.length) (args.error || devnull)(pathsNotFound);
else (args.success || devnull)();
// publish bundle load event
publish(bundleId, pathsNotFound);
}, args);
}
/**
* Execute callbacks when dependencies have been satisfied.
* @param {(string|string[])} deps - List of bundle ids
* @param {Object} args - success/error arguments
*/
loadjs.ready = function ready(deps, args) {
// subscribe to bundle load event
subscribe(deps, function (depsNotFound) {
// execute callbacks
if (depsNotFound.length) (args.error || devnull)(depsNotFound);
else (args.success || devnull)();
});
return loadjs;
};
| */
loadjs.done = function done(bundleId) {
publish(bundleId, []);
};
/**
* Reset loadjs dependencies statuses
*/
loadjs.reset = function reset() {
bundleIdCache = {};
bundleResultCache = {};
bundleCallbackQueue = {};
};
/**
* Determine if bundle has already been defined
* @param String} bundleId - The bundle id
*/
loadjs.isDefined = function isDefined(bundleId) {
return bundleId in bundleIdCache;
};
// export
return loadjs;
})(); | /**
* Manually satisfy bundle dependencies.
* @param {string} bundleId - The bundle id | random_line_split |
torlock.py | #VERSION: 2.1
# AUTHORS: Douman ([email protected])
# CONTRIBUTORS: Diego de las Heras ([email protected])
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from re import compile as re_compile
from HTMLParser import HTMLParser
class torlock(object):
url = "https://www.torlock.com"
name = "TorLock"
supported_categories = {'all': 'all',
'anime': 'anime',
'software': 'software',
'games': 'game',
'movies': 'movie',
'music': 'music',
'tv': 'television',
'books': 'ebooks'}
def download_torrent(self, info):
print(download_file(info))
class | (HTMLParser):
""" Sub-class for parsing results """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.article_found = False # true when <article> with results is found
self.item_found = False
self.item_bad = False # set to True for malicious links
self.current_item = None # dict for found item
self.item_name = None # key's name in current_item dict
self.parser_class = {"ts": "size",
"tul": "seeds",
"tdl": "leech"}
def handle_starttag(self, tag, attrs):
params = dict(attrs)
if self.item_found:
if tag == "td":
if "class" in params:
self.item_name = self.parser_class.get(params["class"], None)
if self.item_name:
self.current_item[self.item_name] = ""
elif self.article_found and tag == "a":
if "href" in params:
link = params["href"]
if link.startswith("/torrent"):
self.current_item["desc_link"] = "".join((self.url, link))
self.current_item["link"] = "".join((self.url, "/tor/",
link.split('/')[2], ".torrent"))
self.current_item["engine_url"] = self.url
self.item_found = True
self.item_name = "name"
self.current_item["name"] = ""
self.item_bad = "rel" in params and params["rel"] == "nofollow"
elif tag == "article":
self.article_found = True
self.current_item = {}
def handle_data(self, data):
if self.item_name:
self.current_item[self.item_name] += data
def handle_endtag(self, tag):
if tag == "article":
self.article_found = False
elif self.item_name and (tag == "a" or tag == "td"):
self.item_name = None
elif self.item_found and tag == "tr":
self.item_found = False
if not self.item_bad:
prettyPrinter(self.current_item)
self.current_item = {}
def search(self, query, cat='all'):
""" Performs search """
query = query.replace("%20", "-")
parser = self.MyHtmlParser(self.url)
page = "".join((self.url, "/", self.supported_categories[cat],
"/torrents/", query, ".html?sort=seeds&page=1"))
html = retrieve_url(page)
parser.feed(html)
counter = 1
additional_pages = re_compile(r"/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+"
.format(self.supported_categories[cat], query))
list_searches = additional_pages.findall(html)[:-1] # last link is next(i.e. second)
for page in map(lambda link: "".join((self.url, link)), list_searches):
html = retrieve_url(page)
parser.feed(html)
counter += 1
if counter > 3:
break
parser.close()
| MyHtmlParser | identifier_name |
torlock.py | #VERSION: 2.1
# AUTHORS: Douman ([email protected])
# CONTRIBUTORS: Diego de las Heras ([email protected])
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from re import compile as re_compile
from HTMLParser import HTMLParser
class torlock(object):
url = "https://www.torlock.com"
name = "TorLock"
supported_categories = {'all': 'all',
'anime': 'anime',
'software': 'software',
'games': 'game',
'movies': 'movie',
'music': 'music',
'tv': 'television',
'books': 'ebooks'}
def download_torrent(self, info):
print(download_file(info))
class MyHtmlParser(HTMLParser):
""" Sub-class for parsing results """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.article_found = False # true when <article> with results is found
self.item_found = False
self.item_bad = False # set to True for malicious links
self.current_item = None # dict for found item
self.item_name = None # key's name in current_item dict
self.parser_class = {"ts": "size",
"tul": "seeds",
"tdl": "leech"}
def handle_starttag(self, tag, attrs):
params = dict(attrs)
if self.item_found:
if tag == "td":
if "class" in params:
|
elif self.article_found and tag == "a":
if "href" in params:
link = params["href"]
if link.startswith("/torrent"):
self.current_item["desc_link"] = "".join((self.url, link))
self.current_item["link"] = "".join((self.url, "/tor/",
link.split('/')[2], ".torrent"))
self.current_item["engine_url"] = self.url
self.item_found = True
self.item_name = "name"
self.current_item["name"] = ""
self.item_bad = "rel" in params and params["rel"] == "nofollow"
elif tag == "article":
self.article_found = True
self.current_item = {}
def handle_data(self, data):
if self.item_name:
self.current_item[self.item_name] += data
def handle_endtag(self, tag):
if tag == "article":
self.article_found = False
elif self.item_name and (tag == "a" or tag == "td"):
self.item_name = None
elif self.item_found and tag == "tr":
self.item_found = False
if not self.item_bad:
prettyPrinter(self.current_item)
self.current_item = {}
def search(self, query, cat='all'):
""" Performs search """
query = query.replace("%20", "-")
parser = self.MyHtmlParser(self.url)
page = "".join((self.url, "/", self.supported_categories[cat],
"/torrents/", query, ".html?sort=seeds&page=1"))
html = retrieve_url(page)
parser.feed(html)
counter = 1
additional_pages = re_compile(r"/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+"
.format(self.supported_categories[cat], query))
list_searches = additional_pages.findall(html)[:-1] # last link is next(i.e. second)
for page in map(lambda link: "".join((self.url, link)), list_searches):
html = retrieve_url(page)
parser.feed(html)
counter += 1
if counter > 3:
break
parser.close()
| self.item_name = self.parser_class.get(params["class"], None)
if self.item_name:
self.current_item[self.item_name] = "" | conditional_block |
torlock.py | #VERSION: 2.1
# AUTHORS: Douman ([email protected])
# CONTRIBUTORS: Diego de las Heras ([email protected])
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from re import compile as re_compile
from HTMLParser import HTMLParser
class torlock(object):
url = "https://www.torlock.com"
name = "TorLock"
supported_categories = {'all': 'all',
'anime': 'anime',
'software': 'software',
'games': 'game',
'movies': 'movie',
'music': 'music',
'tv': 'television',
'books': 'ebooks'}
def download_torrent(self, info):
print(download_file(info))
class MyHtmlParser(HTMLParser):
""" Sub-class for parsing results """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.article_found = False # true when <article> with results is found
self.item_found = False
self.item_bad = False # set to True for malicious links
self.current_item = None # dict for found item
self.item_name = None # key's name in current_item dict
self.parser_class = {"ts": "size",
"tul": "seeds",
"tdl": "leech"}
def handle_starttag(self, tag, attrs):
|
def handle_data(self, data):
if self.item_name:
self.current_item[self.item_name] += data
def handle_endtag(self, tag):
if tag == "article":
self.article_found = False
elif self.item_name and (tag == "a" or tag == "td"):
self.item_name = None
elif self.item_found and tag == "tr":
self.item_found = False
if not self.item_bad:
prettyPrinter(self.current_item)
self.current_item = {}
def search(self, query, cat='all'):
""" Performs search """
query = query.replace("%20", "-")
parser = self.MyHtmlParser(self.url)
page = "".join((self.url, "/", self.supported_categories[cat],
"/torrents/", query, ".html?sort=seeds&page=1"))
html = retrieve_url(page)
parser.feed(html)
counter = 1
additional_pages = re_compile(r"/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+"
.format(self.supported_categories[cat], query))
list_searches = additional_pages.findall(html)[:-1] # last link is next(i.e. second)
for page in map(lambda link: "".join((self.url, link)), list_searches):
html = retrieve_url(page)
parser.feed(html)
counter += 1
if counter > 3:
break
parser.close()
| params = dict(attrs)
if self.item_found:
if tag == "td":
if "class" in params:
self.item_name = self.parser_class.get(params["class"], None)
if self.item_name:
self.current_item[self.item_name] = ""
elif self.article_found and tag == "a":
if "href" in params:
link = params["href"]
if link.startswith("/torrent"):
self.current_item["desc_link"] = "".join((self.url, link))
self.current_item["link"] = "".join((self.url, "/tor/",
link.split('/')[2], ".torrent"))
self.current_item["engine_url"] = self.url
self.item_found = True
self.item_name = "name"
self.current_item["name"] = ""
self.item_bad = "rel" in params and params["rel"] == "nofollow"
elif tag == "article":
self.article_found = True
self.current_item = {} | identifier_body |
torlock.py | #VERSION: 2.1
# AUTHORS: Douman ([email protected])
# CONTRIBUTORS: Diego de las Heras ([email protected])
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from re import compile as re_compile
from HTMLParser import HTMLParser
class torlock(object):
url = "https://www.torlock.com" | supported_categories = {'all': 'all',
'anime': 'anime',
'software': 'software',
'games': 'game',
'movies': 'movie',
'music': 'music',
'tv': 'television',
'books': 'ebooks'}
def download_torrent(self, info):
print(download_file(info))
class MyHtmlParser(HTMLParser):
""" Sub-class for parsing results """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.article_found = False # true when <article> with results is found
self.item_found = False
self.item_bad = False # set to True for malicious links
self.current_item = None # dict for found item
self.item_name = None # key's name in current_item dict
self.parser_class = {"ts": "size",
"tul": "seeds",
"tdl": "leech"}
def handle_starttag(self, tag, attrs):
params = dict(attrs)
if self.item_found:
if tag == "td":
if "class" in params:
self.item_name = self.parser_class.get(params["class"], None)
if self.item_name:
self.current_item[self.item_name] = ""
elif self.article_found and tag == "a":
if "href" in params:
link = params["href"]
if link.startswith("/torrent"):
self.current_item["desc_link"] = "".join((self.url, link))
self.current_item["link"] = "".join((self.url, "/tor/",
link.split('/')[2], ".torrent"))
self.current_item["engine_url"] = self.url
self.item_found = True
self.item_name = "name"
self.current_item["name"] = ""
self.item_bad = "rel" in params and params["rel"] == "nofollow"
elif tag == "article":
self.article_found = True
self.current_item = {}
def handle_data(self, data):
if self.item_name:
self.current_item[self.item_name] += data
def handle_endtag(self, tag):
if tag == "article":
self.article_found = False
elif self.item_name and (tag == "a" or tag == "td"):
self.item_name = None
elif self.item_found and tag == "tr":
self.item_found = False
if not self.item_bad:
prettyPrinter(self.current_item)
self.current_item = {}
def search(self, query, cat='all'):
""" Performs search """
query = query.replace("%20", "-")
parser = self.MyHtmlParser(self.url)
page = "".join((self.url, "/", self.supported_categories[cat],
"/torrents/", query, ".html?sort=seeds&page=1"))
html = retrieve_url(page)
parser.feed(html)
counter = 1
additional_pages = re_compile(r"/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+"
.format(self.supported_categories[cat], query))
list_searches = additional_pages.findall(html)[:-1] # last link is next(i.e. second)
for page in map(lambda link: "".join((self.url, link)), list_searches):
html = retrieve_url(page)
parser.feed(html)
counter += 1
if counter > 3:
break
parser.close() | name = "TorLock" | random_line_split |
metalib_links.js | $(document).ready(function() {
checkMetaLibLinks();
});
function | () {
var id = $.map($('.recordId'), function(i) {
var id = $(i).attr('id').substr('record'.length);
if (id.substr(0, 8) == 'metalib_') {
return id;
}
return null;
});
if (id.length) {
// set the spinner going
$('.metalib_link').addClass('ajax_fulltext_availability');
url = path + '/AJAX/JSON_MetaLib?method=getSearchLinkStatuses';
$.getJSON(url, {id:id}, function(response) {
$('.metalib_link').removeClass('ajax_fulltext_availability');
if (response.status != 'OK') {
$('.metalib_link').text("MetaLib link check failed.");
return;
}
$.each(response.data, function(i, result) {
var safeId = jqEscape(result.id);
if (result.status == 'allowed') {
$('#metalib_link_' + safeId).show();
} else {
$('#metalib_link_na_' + safeId).show();
}
});
}).error(function() {
$('.metalib_link').removeClass('ajax_fulltext_availability');
$('.metalib_link').text("MetaLib link check failed.");
});
}
}
| checkMetaLibLinks | identifier_name |
metalib_links.js | $(document).ready(function() {
checkMetaLibLinks(); | var id = $.map($('.recordId'), function(i) {
var id = $(i).attr('id').substr('record'.length);
if (id.substr(0, 8) == 'metalib_') {
return id;
}
return null;
});
if (id.length) {
// set the spinner going
$('.metalib_link').addClass('ajax_fulltext_availability');
url = path + '/AJAX/JSON_MetaLib?method=getSearchLinkStatuses';
$.getJSON(url, {id:id}, function(response) {
$('.metalib_link').removeClass('ajax_fulltext_availability');
if (response.status != 'OK') {
$('.metalib_link').text("MetaLib link check failed.");
return;
}
$.each(response.data, function(i, result) {
var safeId = jqEscape(result.id);
if (result.status == 'allowed') {
$('#metalib_link_' + safeId).show();
} else {
$('#metalib_link_na_' + safeId).show();
}
});
}).error(function() {
$('.metalib_link').removeClass('ajax_fulltext_availability');
$('.metalib_link').text("MetaLib link check failed.");
});
}
} | });
function checkMetaLibLinks() { | random_line_split |
metalib_links.js | $(document).ready(function() {
checkMetaLibLinks();
});
function checkMetaLibLinks() | {
var id = $.map($('.recordId'), function(i) {
var id = $(i).attr('id').substr('record'.length);
if (id.substr(0, 8) == 'metalib_') {
return id;
}
return null;
});
if (id.length) {
// set the spinner going
$('.metalib_link').addClass('ajax_fulltext_availability');
url = path + '/AJAX/JSON_MetaLib?method=getSearchLinkStatuses';
$.getJSON(url, {id:id}, function(response) {
$('.metalib_link').removeClass('ajax_fulltext_availability');
if (response.status != 'OK') {
$('.metalib_link').text("MetaLib link check failed.");
return;
}
$.each(response.data, function(i, result) {
var safeId = jqEscape(result.id);
if (result.status == 'allowed') {
$('#metalib_link_' + safeId).show();
} else {
$('#metalib_link_na_' + safeId).show();
}
});
}).error(function() {
$('.metalib_link').removeClass('ajax_fulltext_availability');
$('.metalib_link').text("MetaLib link check failed.");
});
}
} | identifier_body |
|
metalib_links.js | $(document).ready(function() {
checkMetaLibLinks();
});
function checkMetaLibLinks() {
var id = $.map($('.recordId'), function(i) {
var id = $(i).attr('id').substr('record'.length);
if (id.substr(0, 8) == 'metalib_') {
return id;
}
return null;
});
if (id.length) {
// set the spinner going
$('.metalib_link').addClass('ajax_fulltext_availability');
url = path + '/AJAX/JSON_MetaLib?method=getSearchLinkStatuses';
$.getJSON(url, {id:id}, function(response) {
$('.metalib_link').removeClass('ajax_fulltext_availability');
if (response.status != 'OK') {
$('.metalib_link').text("MetaLib link check failed.");
return;
}
$.each(response.data, function(i, result) {
var safeId = jqEscape(result.id);
if (result.status == 'allowed') | else {
$('#metalib_link_na_' + safeId).show();
}
});
}).error(function() {
$('.metalib_link').removeClass('ajax_fulltext_availability');
$('.metalib_link').text("MetaLib link check failed.");
});
}
}
| {
$('#metalib_link_' + safeId).show();
} | conditional_block |
ziv_service.py | from requests import post
import io
import base64
class ZivService(object):
| def __init__(self, cnc_url, user=None, password=None, sync=True):
self.cnc_url = cnc_url
self.sync = sync
self.auth = None
if user and password:
self.auth = (user,password)
def send_cycle(self, filename, cycle_filedata):
"""Send a cycle file to the concentrator service
Keyword arguments:
filename -- the name of our file (doesn't matter)
cycle_filedata -- the file to send, encoded as a base64 string
"""
filecontent = base64.b64decode(cycle_filedata)
url = self.cnc_url + ('/' if (self.cnc_url[-1] != '/') else '') +'cct/cycles/'
result = None
if self.auth:
result = post(url, files={'file': (filename, filecontent)}, auth=self.auth)
else:
result = post(url, files={'file': (filename, filecontent)})
return result | identifier_body |
|
ziv_service.py | from requests import post
import io
import base64
class ZivService(object):
def __init__(self, cnc_url, user=None, password=None, sync=True):
self.cnc_url = cnc_url
self.sync = sync
self.auth = None
if user and password:
self.auth = (user,password)
def | (self, filename, cycle_filedata):
"""Send a cycle file to the concentrator service
Keyword arguments:
filename -- the name of our file (doesn't matter)
cycle_filedata -- the file to send, encoded as a base64 string
"""
filecontent = base64.b64decode(cycle_filedata)
url = self.cnc_url + ('/' if (self.cnc_url[-1] != '/') else '') +'cct/cycles/'
result = None
if self.auth:
result = post(url, files={'file': (filename, filecontent)}, auth=self.auth)
else:
result = post(url, files={'file': (filename, filecontent)})
return result
| send_cycle | identifier_name |
ziv_service.py | from requests import post | import base64
class ZivService(object):
def __init__(self, cnc_url, user=None, password=None, sync=True):
self.cnc_url = cnc_url
self.sync = sync
self.auth = None
if user and password:
self.auth = (user,password)
def send_cycle(self, filename, cycle_filedata):
"""Send a cycle file to the concentrator service
Keyword arguments:
filename -- the name of our file (doesn't matter)
cycle_filedata -- the file to send, encoded as a base64 string
"""
filecontent = base64.b64decode(cycle_filedata)
url = self.cnc_url + ('/' if (self.cnc_url[-1] != '/') else '') +'cct/cycles/'
result = None
if self.auth:
result = post(url, files={'file': (filename, filecontent)}, auth=self.auth)
else:
result = post(url, files={'file': (filename, filecontent)})
return result | import io | random_line_split |
ziv_service.py | from requests import post
import io
import base64
class ZivService(object):
def __init__(self, cnc_url, user=None, password=None, sync=True):
self.cnc_url = cnc_url
self.sync = sync
self.auth = None
if user and password:
self.auth = (user,password)
def send_cycle(self, filename, cycle_filedata):
"""Send a cycle file to the concentrator service
Keyword arguments:
filename -- the name of our file (doesn't matter)
cycle_filedata -- the file to send, encoded as a base64 string
"""
filecontent = base64.b64decode(cycle_filedata)
url = self.cnc_url + ('/' if (self.cnc_url[-1] != '/') else '') +'cct/cycles/'
result = None
if self.auth:
|
else:
result = post(url, files={'file': (filename, filecontent)})
return result
| result = post(url, files={'file': (filename, filecontent)}, auth=self.auth) | conditional_block |
admin.component.ts | import { Observable } from 'rxjs/Rx';
import { Component } from '@angular/core';
import { AuthService } from '../authentication';
import { Router } from '@angular/router';
import { MettAppointmentModel, MettOrder, AppointmentService } from './../mett-appointment';
@Component({
selector: 'app-admin',
templateUrl: './admin.component.html',
styleUrls: ['./admin.component.css']
})
export class | {
public appointments: Observable<any[]>;
public selectedItem: MettOrder[];
constructor(private appointmentService: AppointmentService, private authService: AuthService, private router: Router){
this.appointments = this.appointmentService.appointments.map((item: MettAppointmentModel[]) => {
let mapped = item.map(x => {
return {
Date: x.Date,
CreatedBy: x.CreatedBy,
Orders: x.Orders.length,
Buns: x.Orders.reduce((prev, cur, curIndex) => prev += cur.value, 0 ),
Id: x.Id,
OrderList: x.Orders
};
});
return mapped;
});
this.appointmentService.loadAppointments(this.authService.id);
}
selectItem(item: any) {
this.selectedItem = item.OrderList;
}
}
| AdminComponent | identifier_name |
admin.component.ts | import { Observable } from 'rxjs/Rx';
import { Component } from '@angular/core';
import { AuthService } from '../authentication';
import { Router } from '@angular/router';
import { MettAppointmentModel, MettOrder, AppointmentService } from './../mett-appointment';
@Component({
selector: 'app-admin',
templateUrl: './admin.component.html',
styleUrls: ['./admin.component.css']
}) | constructor(private appointmentService: AppointmentService, private authService: AuthService, private router: Router){
this.appointments = this.appointmentService.appointments.map((item: MettAppointmentModel[]) => {
let mapped = item.map(x => {
return {
Date: x.Date,
CreatedBy: x.CreatedBy,
Orders: x.Orders.length,
Buns: x.Orders.reduce((prev, cur, curIndex) => prev += cur.value, 0 ),
Id: x.Id,
OrderList: x.Orders
};
});
return mapped;
});
this.appointmentService.loadAppointments(this.authService.id);
}
selectItem(item: any) {
this.selectedItem = item.OrderList;
}
} | export class AdminComponent{
public appointments: Observable<any[]>;
public selectedItem: MettOrder[];
| random_line_split |
opendata.footer.js | import React, { Component } from 'react';
import { connect } from 'react-redux';
export class | extends Component {
render() {
return (
<footer className="opendata-footer footer">
<nav className="navbar bottom navbar-expand-lg navbar-dark bg-primary">
<div>
<span style={{ color: "#fff" }}>{this.props.text}:</span>
<a href="http://www.vectr.consulting"><img src="logos/VectrConsulting.svg" height={30} /></a>
<a href="http://www.vereenvoudiging.be/" ><img src="logos/DAV.png" height={30} /></a>
<a href="http://www.dekamer.be/" ><img src="logos/dekamer.jpg" height={30} /></a>
<a href="https://neo4j.com/" ><img src="logos/Neo4j.png" height={30} /></a>
</div>
</nav>
</footer>
);
}
}
export default connect(
state => ({ text: state.locale.translation.footer })
)(Footer) | Footer | identifier_name |
opendata.footer.js | import React, { Component } from 'react';
import { connect } from 'react-redux';
export class Footer extends Component {
render() {
return (
<footer className="opendata-footer footer">
<nav className="navbar bottom navbar-expand-lg navbar-dark bg-primary">
<div>
<span style={{ color: "#fff" }}>{this.props.text}:</span>
<a href="http://www.vectr.consulting"><img src="logos/VectrConsulting.svg" height={30} /></a>
<a href="http://www.vereenvoudiging.be/" ><img src="logos/DAV.png" height={30} /></a>
<a href="http://www.dekamer.be/" ><img src="logos/dekamer.jpg" height={30} /></a>
<a href="https://neo4j.com/" ><img src="logos/Neo4j.png" height={30} /></a>
</div>
</nav> | }
export default connect(
state => ({ text: state.locale.translation.footer })
)(Footer) | </footer>
);
} | random_line_split |
header.component.ts | import { Component, AfterViewInit, ElementRef } from '@angular/core';
declare const fabric: any;
/**
* This class represents the header component.
*/
@Component({
moduleId: module.id,
selector: 'header',
templateUrl: 'header.component.html',
styleUrls: ['header.component.css']
})
export class HeaderComponent implements AfterViewInit {
constructor(private element: ElementRef) {
}
ngAfterViewInit() {
// let CommandBarElements = this.element.nativeElement.querySelectorAll(".ms-CommandBar");
// for (var i = 0; i < CommandBarElements.length; i++) {
// new fabric['CommandBar'](CommandBarElements[i]);
// }
let ContextualMenuElement = this.element.nativeElement.querySelector(".ms-ContextualMenu-basic .ms-ContextualMenu");
let ButtonElement = this.element.nativeElement.querySelector(".ms-ContextualMenu-basic .Header-button"); |
} | let contextualMenu = new fabric['ContextualMenu'](ContextualMenuElement, ButtonElement);
} | random_line_split |
header.component.ts | import { Component, AfterViewInit, ElementRef } from '@angular/core';
declare const fabric: any;
/**
* This class represents the header component.
*/
@Component({
moduleId: module.id,
selector: 'header',
templateUrl: 'header.component.html',
styleUrls: ['header.component.css']
})
export class HeaderComponent implements AfterViewInit {
constructor(private element: ElementRef) |
ngAfterViewInit() {
// let CommandBarElements = this.element.nativeElement.querySelectorAll(".ms-CommandBar");
// for (var i = 0; i < CommandBarElements.length; i++) {
// new fabric['CommandBar'](CommandBarElements[i]);
// }
let ContextualMenuElement = this.element.nativeElement.querySelector(".ms-ContextualMenu-basic .ms-ContextualMenu");
let ButtonElement = this.element.nativeElement.querySelector(".ms-ContextualMenu-basic .Header-button");
let contextualMenu = new fabric['ContextualMenu'](ContextualMenuElement, ButtonElement);
}
}
| {
} | identifier_body |
header.component.ts | import { Component, AfterViewInit, ElementRef } from '@angular/core';
declare const fabric: any;
/**
* This class represents the header component.
*/
@Component({
moduleId: module.id,
selector: 'header',
templateUrl: 'header.component.html',
styleUrls: ['header.component.css']
})
export class | implements AfterViewInit {
constructor(private element: ElementRef) {
}
ngAfterViewInit() {
// let CommandBarElements = this.element.nativeElement.querySelectorAll(".ms-CommandBar");
// for (var i = 0; i < CommandBarElements.length; i++) {
// new fabric['CommandBar'](CommandBarElements[i]);
// }
let ContextualMenuElement = this.element.nativeElement.querySelector(".ms-ContextualMenu-basic .ms-ContextualMenu");
let ButtonElement = this.element.nativeElement.querySelector(".ms-ContextualMenu-basic .Header-button");
let contextualMenu = new fabric['ContextualMenu'](ContextualMenuElement, ButtonElement);
}
}
| HeaderComponent | identifier_name |
main_waveform_20170517.py | # coding = utf-8
# import modules
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import my_config
path = my_config.ROOT_DIR # Please create your config file
file = my_config.FILE # Please create your config file
# get time series for ch0 and plot
import wave
def time_series(file, i_ch = 0):
|
fs, t, y = time_series(os.path.join(path, file), i_ch = 0)
plt.figure(1)
plt.plot(t, y)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal')
plt.grid()
# detrend and plot
from scipy.signal import detrend
y_detrend = detrend(y)
plt.figure(2)
plt.plot(t, y_detrend)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal-detrend')
plt.grid()
# get auto-correlation and plot
from scipy.signal import correlate, convolve
corr = correlate(y_detrend, y_detrend, mode = 'full')
n_data = np.minimum(len(t), len(corr))
plt.figure(3)
plt.plot(t[0:n_data], corr[0:n_data])
plt.title('Auto-Correlation (Fs = {})'.format(fs))
plt.xlabel('Time Lag [s]')
plt.ylabel('Auto-Correlation')
plt.grid()
# get-filterred signal and plot
from scipy.signal import butter, lfilter
cutoff = 500
N = 4 # filter oder
Wn = cutoff / (fs * 0.5)
b, a = butter(N, Wn , btype = 'low', analog = False)
y_filtered = lfilter(b, a, y_detrend) # low pass filter
plt.figure(4)
plt.plot(t, y_filtered)
plt.title('Time series (Fs = {}) (Cutoff Freq. = {})'.format(fs, cutoff))
plt.xlabel('Time [s]')
plt.ylabel('Signal - filtered')
plt.grid()
# get fft and plot
T = 1.0 / fs # time interval
n_sample = len(y_filtered)
freq = np.linspace(0.0, 1.0/(2.0*T), n_sample//2)
yf = sp.fft(y_filtered)
plt.figure(5)
plt.plot(freq, 2.0/n_sample * np.abs(yf[0:n_sample//2]))
plt.title('FFT')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Fourier Coef.')
plt.grid()
# get psd and plot
from scipy.signal import welch
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped rate 90%
f, Pxx = welch(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(6)
plt.plot(f, Pxx)
plt.title('PSD')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Power')
plt.grid()
# get spectrogram
from scipy.signal import spectrogram
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped at 90%
f, t, Sxx = spectrogram(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(7)
plt.pcolormesh(t, f, Sxx)
plt.title('Spectrogram')
plt.xlabel('Time [s]')
plt.ylabel('Freq. [Hz]')
plt.grid()
plt.show()
| with wave.open(file,'r') as wav_file:
# Extract Raw Audio from Wav File
signal = wav_file.readframes(-1)
signal = np.fromstring(signal, 'Int16')
# Split the data into channels
channels = [[] for channel in range(wav_file.getnchannels())]
for index, datum in enumerate(signal):
channels[index%len(channels)].append(datum)
#Get time from indices
fs = wav_file.getframerate()
Time = np.linspace(0, len(signal)/len(channels)/fs, num=len(signal)/len(channels))
# return
return fs, Time, channels[i_ch] | identifier_body |
main_waveform_20170517.py | # coding = utf-8
# import modules
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import my_config
path = my_config.ROOT_DIR # Please create your config file
file = my_config.FILE # Please create your config file
# get time series for ch0 and plot
import wave
def | (file, i_ch = 0):
with wave.open(file,'r') as wav_file:
# Extract Raw Audio from Wav File
signal = wav_file.readframes(-1)
signal = np.fromstring(signal, 'Int16')
# Split the data into channels
channels = [[] for channel in range(wav_file.getnchannels())]
for index, datum in enumerate(signal):
channels[index%len(channels)].append(datum)
#Get time from indices
fs = wav_file.getframerate()
Time = np.linspace(0, len(signal)/len(channels)/fs, num=len(signal)/len(channels))
# return
return fs, Time, channels[i_ch]
fs, t, y = time_series(os.path.join(path, file), i_ch = 0)
plt.figure(1)
plt.plot(t, y)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal')
plt.grid()
# detrend and plot
from scipy.signal import detrend
y_detrend = detrend(y)
plt.figure(2)
plt.plot(t, y_detrend)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal-detrend')
plt.grid()
# get auto-correlation and plot
from scipy.signal import correlate, convolve
corr = correlate(y_detrend, y_detrend, mode = 'full')
n_data = np.minimum(len(t), len(corr))
plt.figure(3)
plt.plot(t[0:n_data], corr[0:n_data])
plt.title('Auto-Correlation (Fs = {})'.format(fs))
plt.xlabel('Time Lag [s]')
plt.ylabel('Auto-Correlation')
plt.grid()
# get-filterred signal and plot
from scipy.signal import butter, lfilter
cutoff = 500
N = 4 # filter oder
Wn = cutoff / (fs * 0.5)
b, a = butter(N, Wn , btype = 'low', analog = False)
y_filtered = lfilter(b, a, y_detrend) # low pass filter
plt.figure(4)
plt.plot(t, y_filtered)
plt.title('Time series (Fs = {}) (Cutoff Freq. = {})'.format(fs, cutoff))
plt.xlabel('Time [s]')
plt.ylabel('Signal - filtered')
plt.grid()
# get fft and plot
T = 1.0 / fs # time interval
n_sample = len(y_filtered)
freq = np.linspace(0.0, 1.0/(2.0*T), n_sample//2)
yf = sp.fft(y_filtered)
plt.figure(5)
plt.plot(freq, 2.0/n_sample * np.abs(yf[0:n_sample//2]))
plt.title('FFT')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Fourier Coef.')
plt.grid()
# get psd and plot
from scipy.signal import welch
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped rate 90%
f, Pxx = welch(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(6)
plt.plot(f, Pxx)
plt.title('PSD')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Power')
plt.grid()
# get spectrogram
from scipy.signal import spectrogram
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped at 90%
f, t, Sxx = spectrogram(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(7)
plt.pcolormesh(t, f, Sxx)
plt.title('Spectrogram')
plt.xlabel('Time [s]')
plt.ylabel('Freq. [Hz]')
plt.grid()
plt.show()
| time_series | identifier_name |
main_waveform_20170517.py | # coding = utf-8
# import modules
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import my_config
path = my_config.ROOT_DIR # Please create your config file
file = my_config.FILE # Please create your config file
# get time series for ch0 and plot
import wave
def time_series(file, i_ch = 0):
with wave.open(file,'r') as wav_file:
# Extract Raw Audio from Wav File
signal = wav_file.readframes(-1)
signal = np.fromstring(signal, 'Int16')
# Split the data into channels
channels = [[] for channel in range(wav_file.getnchannels())]
for index, datum in enumerate(signal):
|
#Get time from indices
fs = wav_file.getframerate()
Time = np.linspace(0, len(signal)/len(channels)/fs, num=len(signal)/len(channels))
# return
return fs, Time, channels[i_ch]
fs, t, y = time_series(os.path.join(path, file), i_ch = 0)
plt.figure(1)
plt.plot(t, y)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal')
plt.grid()
# detrend and plot
from scipy.signal import detrend
y_detrend = detrend(y)
plt.figure(2)
plt.plot(t, y_detrend)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal-detrend')
plt.grid()
# get auto-correlation and plot
from scipy.signal import correlate, convolve
corr = correlate(y_detrend, y_detrend, mode = 'full')
n_data = np.minimum(len(t), len(corr))
plt.figure(3)
plt.plot(t[0:n_data], corr[0:n_data])
plt.title('Auto-Correlation (Fs = {})'.format(fs))
plt.xlabel('Time Lag [s]')
plt.ylabel('Auto-Correlation')
plt.grid()
# get-filterred signal and plot
from scipy.signal import butter, lfilter
cutoff = 500
N = 4 # filter oder
Wn = cutoff / (fs * 0.5)
b, a = butter(N, Wn , btype = 'low', analog = False)
y_filtered = lfilter(b, a, y_detrend) # low pass filter
plt.figure(4)
plt.plot(t, y_filtered)
plt.title('Time series (Fs = {}) (Cutoff Freq. = {})'.format(fs, cutoff))
plt.xlabel('Time [s]')
plt.ylabel('Signal - filtered')
plt.grid()
# get fft and plot
T = 1.0 / fs # time interval
n_sample = len(y_filtered)
freq = np.linspace(0.0, 1.0/(2.0*T), n_sample//2)
yf = sp.fft(y_filtered)
plt.figure(5)
plt.plot(freq, 2.0/n_sample * np.abs(yf[0:n_sample//2]))
plt.title('FFT')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Fourier Coef.')
plt.grid()
# get psd and plot
from scipy.signal import welch
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped rate 90%
f, Pxx = welch(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(6)
plt.plot(f, Pxx)
plt.title('PSD')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Power')
plt.grid()
# get spectrogram
from scipy.signal import spectrogram
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped at 90%
f, t, Sxx = spectrogram(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(7)
plt.pcolormesh(t, f, Sxx)
plt.title('Spectrogram')
plt.xlabel('Time [s]')
plt.ylabel('Freq. [Hz]')
plt.grid()
plt.show()
| channels[index%len(channels)].append(datum) | conditional_block |
main_waveform_20170517.py | # coding = utf-8
# import modules
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import my_config
path = my_config.ROOT_DIR # Please create your config file
file = my_config.FILE # Please create your config file
# get time series for ch0 and plot
import wave
def time_series(file, i_ch = 0):
with wave.open(file,'r') as wav_file:
# Extract Raw Audio from Wav File
signal = wav_file.readframes(-1)
signal = np.fromstring(signal, 'Int16')
# Split the data into channels
channels = [[] for channel in range(wav_file.getnchannels())]
for index, datum in enumerate(signal):
channels[index%len(channels)].append(datum)
#Get time from indices
fs = wav_file.getframerate()
Time = np.linspace(0, len(signal)/len(channels)/fs, num=len(signal)/len(channels))
# return
return fs, Time, channels[i_ch]
fs, t, y = time_series(os.path.join(path, file), i_ch = 0)
plt.figure(1)
plt.plot(t, y)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal')
plt.grid()
# detrend and plot
from scipy.signal import detrend
y_detrend = detrend(y)
plt.figure(2)
plt.plot(t, y_detrend)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal-detrend')
plt.grid()
# get auto-correlation and plot
from scipy.signal import correlate, convolve
corr = correlate(y_detrend, y_detrend, mode = 'full')
n_data = np.minimum(len(t), len(corr))
plt.figure(3)
plt.plot(t[0:n_data], corr[0:n_data])
plt.title('Auto-Correlation (Fs = {})'.format(fs))
plt.xlabel('Time Lag [s]')
plt.ylabel('Auto-Correlation')
plt.grid()
# get-filterred signal and plot
from scipy.signal import butter, lfilter
cutoff = 500
N = 4 # filter oder
Wn = cutoff / (fs * 0.5)
b, a = butter(N, Wn , btype = 'low', analog = False)
y_filtered = lfilter(b, a, y_detrend) # low pass filter
plt.figure(4)
plt.plot(t, y_filtered)
plt.title('Time series (Fs = {}) (Cutoff Freq. = {})'.format(fs, cutoff))
plt.xlabel('Time [s]')
plt.ylabel('Signal - filtered')
plt.grid() |
# get fft and plot
T = 1.0 / fs # time interval
n_sample = len(y_filtered)
freq = np.linspace(0.0, 1.0/(2.0*T), n_sample//2)
yf = sp.fft(y_filtered)
plt.figure(5)
plt.plot(freq, 2.0/n_sample * np.abs(yf[0:n_sample//2]))
plt.title('FFT')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Fourier Coef.')
plt.grid()
# get psd and plot
from scipy.signal import welch
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped rate 90%
f, Pxx = welch(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(6)
plt.plot(f, Pxx)
plt.title('PSD')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Power')
plt.grid()
# get spectrogram
from scipy.signal import spectrogram
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped at 90%
f, t, Sxx = spectrogram(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(7)
plt.pcolormesh(t, f, Sxx)
plt.title('Spectrogram')
plt.xlabel('Time [s]')
plt.ylabel('Freq. [Hz]')
plt.grid()
plt.show() | random_line_split |
|
PlatformDropbox.js | // (C) Copyright 2014-2015 Hewlett Packard Enterprise Development LP
import React, { Component, PropTypes } from 'react';
import classnames from 'classnames';
import CSSClassnames from '../../../utils/CSSClassnames';
import Intl from '../../../utils/Intl';
const CLASS_ROOT = CSSClassnames.CONTROL_ICON;
const COLOR_INDEX = CSSClassnames.COLOR_INDEX;
export default class Icon extends Component {
render () |
};
Icon.contextTypes = {
intl: PropTypes.object
};
Icon.defaultProps = {
responsive: true
};
Icon.displayName = 'PlatformDropbox';
Icon.icon = true;
Icon.propTypes = {
a11yTitle: PropTypes.string,
colorIndex: PropTypes.string,
size: PropTypes.oneOf(['small', 'medium', 'large', 'xlarge', 'huge']),
responsive: PropTypes.bool
};
| {
const { className, colorIndex } = this.props;
let { a11yTitle, size, responsive } = this.props;
let { intl } = this.context;
const classes = classnames(
CLASS_ROOT,
`${CLASS_ROOT}-platform-dropbox`,
className,
{
[`${CLASS_ROOT}--${size}`]: size,
[`${CLASS_ROOT}--responsive`]: responsive,
[`${COLOR_INDEX}-${colorIndex}`]: colorIndex
}
);
a11yTitle = a11yTitle || Intl.getMessage(intl, 'platform-dropbox');
return <svg version="1.1" viewBox="0 0 24 24" width="24px" height="24px" role="img" className={classes} aria-label={a11yTitle}><path fill="#000000" fillRule="evenodd" d="M11.9998861,5.17831096 L4.95498114,9.52849791 L11.9998164,13.8787284 L19.0451308,9.52856802 L11.9998861,5.17831096 Z M11.9998865,5.09510125 L7.09366507,1 L0,5.6316555 L4.86638378,9.52849941 L0,13.425799 L7.09366507,18.0574545 L12.0001376,13.9621062 L16.9063349,18.0572249 L24,13.4255694 L19.1337313,9.52840727 L24,5.6316555 L16.9063349,1 L11.9998865,5.09510125 Z M11.9315598,14.7799043 L6.95333971,18.91089 L4.82296651,17.5200383 L4.82296651,19.0792344 L11.9315598,23.3420478 L19.0401531,19.0792344 L19.0401531,17.5200383 L16.9097799,18.91089 L11.9315598,14.7799043 Z" stroke="none"/></svg>;
} | identifier_body |
PlatformDropbox.js | // (C) Copyright 2014-2015 Hewlett Packard Enterprise Development LP
import React, { Component, PropTypes } from 'react';
import classnames from 'classnames';
import CSSClassnames from '../../../utils/CSSClassnames';
import Intl from '../../../utils/Intl';
const CLASS_ROOT = CSSClassnames.CONTROL_ICON;
const COLOR_INDEX = CSSClassnames.COLOR_INDEX;
export default class Icon extends Component {
| () {
const { className, colorIndex } = this.props;
let { a11yTitle, size, responsive } = this.props;
let { intl } = this.context;
const classes = classnames(
CLASS_ROOT,
`${CLASS_ROOT}-platform-dropbox`,
className,
{
[`${CLASS_ROOT}--${size}`]: size,
[`${CLASS_ROOT}--responsive`]: responsive,
[`${COLOR_INDEX}-${colorIndex}`]: colorIndex
}
);
a11yTitle = a11yTitle || Intl.getMessage(intl, 'platform-dropbox');
return <svg version="1.1" viewBox="0 0 24 24" width="24px" height="24px" role="img" className={classes} aria-label={a11yTitle}><path fill="#000000" fillRule="evenodd" d="M11.9998861,5.17831096 L4.95498114,9.52849791 L11.9998164,13.8787284 L19.0451308,9.52856802 L11.9998861,5.17831096 Z M11.9998865,5.09510125 L7.09366507,1 L0,5.6316555 L4.86638378,9.52849941 L0,13.425799 L7.09366507,18.0574545 L12.0001376,13.9621062 L16.9063349,18.0572249 L24,13.4255694 L19.1337313,9.52840727 L24,5.6316555 L16.9063349,1 L11.9998865,5.09510125 Z M11.9315598,14.7799043 L6.95333971,18.91089 L4.82296651,17.5200383 L4.82296651,19.0792344 L11.9315598,23.3420478 L19.0401531,19.0792344 L19.0401531,17.5200383 L16.9097799,18.91089 L11.9315598,14.7799043 Z" stroke="none"/></svg>;
}
};
Icon.contextTypes = {
intl: PropTypes.object
};
Icon.defaultProps = {
responsive: true
};
Icon.displayName = 'PlatformDropbox';
Icon.icon = true;
Icon.propTypes = {
a11yTitle: PropTypes.string,
colorIndex: PropTypes.string,
size: PropTypes.oneOf(['small', 'medium', 'large', 'xlarge', 'huge']),
responsive: PropTypes.bool
};
| render | identifier_name |
PlatformDropbox.js | // (C) Copyright 2014-2015 Hewlett Packard Enterprise Development LP
import React, { Component, PropTypes } from 'react';
import classnames from 'classnames';
import CSSClassnames from '../../../utils/CSSClassnames';
import Intl from '../../../utils/Intl';
const CLASS_ROOT = CSSClassnames.CONTROL_ICON;
const COLOR_INDEX = CSSClassnames.COLOR_INDEX;
export default class Icon extends Component {
render () {
const { className, colorIndex } = this.props;
let { a11yTitle, size, responsive } = this.props;
let { intl } = this.context;
const classes = classnames(
CLASS_ROOT,
`${CLASS_ROOT}-platform-dropbox`,
className,
{
[`${CLASS_ROOT}--${size}`]: size,
[`${CLASS_ROOT}--responsive`]: responsive,
[`${COLOR_INDEX}-${colorIndex}`]: colorIndex
}
);
a11yTitle = a11yTitle || Intl.getMessage(intl, 'platform-dropbox');
return <svg version="1.1" viewBox="0 0 24 24" width="24px" height="24px" role="img" className={classes} aria-label={a11yTitle}><path fill="#000000" fillRule="evenodd" d="M11.9998861,5.17831096 L4.95498114,9.52849791 L11.9998164,13.8787284 L19.0451308,9.52856802 L11.9998861,5.17831096 Z M11.9998865,5.09510125 L7.09366507,1 L0,5.6316555 L4.86638378,9.52849941 L0,13.425799 L7.09366507,18.0574545 L12.0001376,13.9621062 L16.9063349,18.0572249 L24,13.4255694 L19.1337313,9.52840727 L24,5.6316555 L16.9063349,1 L11.9998865,5.09510125 Z M11.9315598,14.7799043 L6.95333971,18.91089 L4.82296651,17.5200383 L4.82296651,19.0792344 L11.9315598,23.3420478 L19.0401531,19.0792344 L19.0401531,17.5200383 L16.9097799,18.91089 L11.9315598,14.7799043 Z" stroke="none"/></svg>;
}
};
Icon.contextTypes = {
intl: PropTypes.object
};
Icon.defaultProps = {
responsive: true
};
Icon.displayName = 'PlatformDropbox';
Icon.icon = true;
Icon.propTypes = { | a11yTitle: PropTypes.string,
colorIndex: PropTypes.string,
size: PropTypes.oneOf(['small', 'medium', 'large', 'xlarge', 'huge']),
responsive: PropTypes.bool
}; | random_line_split |
|
spark_webservice_demo.py | # Copyright 2015 David Wang. All rights reserved.
# Use of this source code is governed by MIT license.
# Please see LICENSE file
# WebSpark
# Spark web service demo
# version 0.2
# use REPL or define sc SparkContext
import urllib2, urllib
import math
import time
import traceback
# Spark Web Application demo with parallel processing
# see demoservice function
ServerAddr="http://<enter WebSpark IP address here>:8001"
RegisterURL=ServerAddr + "/addapi?"
RespondURL=ServerAddr + "/respond?"
errwaitseconds = 3
element = '<li class="list-group-item">first prime above %d is %d</li>'
with open('template.html') as f:
template = f.read()
def slow_isprime(num):
if num<2:
return False
for i in range(2, int(math.sqrt(num))+1):
if num%i==0:
return False
return True
def firstprimeabove(num):
i=num+1
while True:
if slow_isprime(i):
return i
i+=1
servicename = 'demo'
# Spark Web Application demo
def | (url):
rawdata = range(1000, 20000, 1100)
data = sc.parallelize(rawdata)
above=data.map(lambda x: (x, firstprimeabove(x))).collect()
primelist=[element%x for x in above]
response = template % ' '.join(primelist)
return response
def parserequest(rawrequest):
lines = rawrequest.split('\n')
if len(lines)<4:
print 'incorrect WebSpark request'
else:
name = lines[0]
url = lines[1]
remoteaddr = lines[2]
header = lines[3:]
return name, url, remoteaddr, header
st =''
# publish web service with WebSpark
while True:
try:
url = RegisterURL + urllib.urlencode({'name': servicename})
conn = urllib2.urlopen(url)
data = conn.read()
conn.close()
name, clienturl, remoteaddr, header = parserequest(data)
print name, clienturl, remoteaddr, header
response = demo(clienturl)
url = RespondURL + urllib.urlencode({'name': name})
conn = urllib2.urlopen(url, response)
conn.close()
except Exception as ex:
print 'error connecting to WebSpark at', ServerAddr
traceback.print_exc()
time.sleep(errwaitseconds)
continue
| demo | identifier_name |
spark_webservice_demo.py | # Copyright 2015 David Wang. All rights reserved.
# Use of this source code is governed by MIT license.
# Please see LICENSE file
# WebSpark
# Spark web service demo
# version 0.2
# use REPL or define sc SparkContext
import urllib2, urllib
import math
import time
import traceback
# Spark Web Application demo with parallel processing
# see demoservice function
ServerAddr="http://<enter WebSpark IP address here>:8001"
RegisterURL=ServerAddr + "/addapi?"
RespondURL=ServerAddr + "/respond?"
errwaitseconds = 3
element = '<li class="list-group-item">first prime above %d is %d</li>'
with open('template.html') as f:
template = f.read()
def slow_isprime(num):
if num<2:
return False
for i in range(2, int(math.sqrt(num))+1):
if num%i==0:
return False
return True
def firstprimeabove(num):
i=num+1
while True:
if slow_isprime(i):
return i
i+=1
servicename = 'demo'
# Spark Web Application demo
def demo(url):
rawdata = range(1000, 20000, 1100)
data = sc.parallelize(rawdata)
above=data.map(lambda x: (x, firstprimeabove(x))).collect()
primelist=[element%x for x in above]
response = template % ' '.join(primelist)
return response
def parserequest(rawrequest):
lines = rawrequest.split('\n')
if len(lines)<4:
|
else:
name = lines[0]
url = lines[1]
remoteaddr = lines[2]
header = lines[3:]
return name, url, remoteaddr, header
st =''
# publish web service with WebSpark
while True:
try:
url = RegisterURL + urllib.urlencode({'name': servicename})
conn = urllib2.urlopen(url)
data = conn.read()
conn.close()
name, clienturl, remoteaddr, header = parserequest(data)
print name, clienturl, remoteaddr, header
response = demo(clienturl)
url = RespondURL + urllib.urlencode({'name': name})
conn = urllib2.urlopen(url, response)
conn.close()
except Exception as ex:
print 'error connecting to WebSpark at', ServerAddr
traceback.print_exc()
time.sleep(errwaitseconds)
continue
| print 'incorrect WebSpark request' | conditional_block |
spark_webservice_demo.py | # Copyright 2015 David Wang. All rights reserved.
# Use of this source code is governed by MIT license.
# Please see LICENSE file
# WebSpark
# Spark web service demo
# version 0.2
# use REPL or define sc SparkContext
import urllib2, urllib
import math
import time
import traceback
# Spark Web Application demo with parallel processing
# see demoservice function
ServerAddr="http://<enter WebSpark IP address here>:8001"
RegisterURL=ServerAddr + "/addapi?"
RespondURL=ServerAddr + "/respond?"
errwaitseconds = 3
element = '<li class="list-group-item">first prime above %d is %d</li>'
with open('template.html') as f:
template = f.read()
def slow_isprime(num):
|
def firstprimeabove(num):
i=num+1
while True:
if slow_isprime(i):
return i
i+=1
servicename = 'demo'
# Spark Web Application demo
def demo(url):
rawdata = range(1000, 20000, 1100)
data = sc.parallelize(rawdata)
above=data.map(lambda x: (x, firstprimeabove(x))).collect()
primelist=[element%x for x in above]
response = template % ' '.join(primelist)
return response
def parserequest(rawrequest):
lines = rawrequest.split('\n')
if len(lines)<4:
print 'incorrect WebSpark request'
else:
name = lines[0]
url = lines[1]
remoteaddr = lines[2]
header = lines[3:]
return name, url, remoteaddr, header
st =''
# publish web service with WebSpark
while True:
try:
url = RegisterURL + urllib.urlencode({'name': servicename})
conn = urllib2.urlopen(url)
data = conn.read()
conn.close()
name, clienturl, remoteaddr, header = parserequest(data)
print name, clienturl, remoteaddr, header
response = demo(clienturl)
url = RespondURL + urllib.urlencode({'name': name})
conn = urllib2.urlopen(url, response)
conn.close()
except Exception as ex:
print 'error connecting to WebSpark at', ServerAddr
traceback.print_exc()
time.sleep(errwaitseconds)
continue
| if num<2:
return False
for i in range(2, int(math.sqrt(num))+1):
if num%i==0:
return False
return True | identifier_body |
spark_webservice_demo.py | # Copyright 2015 David Wang. All rights reserved.
# Use of this source code is governed by MIT license.
# Please see LICENSE file
# WebSpark
# Spark web service demo
# version 0.2
# use REPL or define sc SparkContext
import urllib2, urllib
import math
import time
import traceback
|
ServerAddr="http://<enter WebSpark IP address here>:8001"
RegisterURL=ServerAddr + "/addapi?"
RespondURL=ServerAddr + "/respond?"
errwaitseconds = 3
element = '<li class="list-group-item">first prime above %d is %d</li>'
with open('template.html') as f:
template = f.read()
def slow_isprime(num):
if num<2:
return False
for i in range(2, int(math.sqrt(num))+1):
if num%i==0:
return False
return True
def firstprimeabove(num):
i=num+1
while True:
if slow_isprime(i):
return i
i+=1
servicename = 'demo'
# Spark Web Application demo
def demo(url):
rawdata = range(1000, 20000, 1100)
data = sc.parallelize(rawdata)
above=data.map(lambda x: (x, firstprimeabove(x))).collect()
primelist=[element%x for x in above]
response = template % ' '.join(primelist)
return response
def parserequest(rawrequest):
lines = rawrequest.split('\n')
if len(lines)<4:
print 'incorrect WebSpark request'
else:
name = lines[0]
url = lines[1]
remoteaddr = lines[2]
header = lines[3:]
return name, url, remoteaddr, header
st =''
# publish web service with WebSpark
while True:
try:
url = RegisterURL + urllib.urlencode({'name': servicename})
conn = urllib2.urlopen(url)
data = conn.read()
conn.close()
name, clienturl, remoteaddr, header = parserequest(data)
print name, clienturl, remoteaddr, header
response = demo(clienturl)
url = RespondURL + urllib.urlencode({'name': name})
conn = urllib2.urlopen(url, response)
conn.close()
except Exception as ex:
print 'error connecting to WebSpark at', ServerAddr
traceback.print_exc()
time.sleep(errwaitseconds)
continue |
# Spark Web Application demo with parallel processing
# see demoservice function | random_line_split |
habhub.view.ts | namespace $.$$ { |
export class $mol_app_habhub extends $.$mol_app_habhub {
uriSource(){
return 'https://api.github.com/search/issues?q=label:HabHub+is:open&sort=reactions'
}
gists() {
return $mol_github_search_issues.item( this.uriSource() ).items()
}
gists_dict() {
const dict = {} as { [ key : string ] : $mol_github_issue }
for( let gist of this.gists() ) {
dict[ gist.uri() ] = gist
}
return dict
}
gist( id : number ) {
return this.gists_dict()[ id ]
}
gist_current() {
return $mol_maybe( $mol_state_arg.value( 'gist' ) ).map( uri => this.gists_dict()[ uri ] )[0] || null
}
pages() {
return [
this.Menu_page() ,
... this.gist_current() ? [ this.Details() ] : []
]
}
Placeholder() {
return this.gist_current() ? null : super.Placeholder()
}
menu_rows() : $mol_view[] {
return this.gists().map( ( gist , index ) => this.Menu_row( gist.uri() ) )
}
gist_title( id : number ) {
return this.gist( id ).title()
}
gist_arg( id : number ) {
return { gist : id }
}
gist_current_title() {
return this.gist_current().title()
}
gist_current_content() {
return this.gist_current().text()
}
gist_current_issue() {
return this.gist_current()
}
details_scroll_top( next? : number ) {
const current = this.gist_current()
return $mol_state_session.value( `${ this }.details_scroll_top(${ current.uri() })` , next )
}
}
} | random_line_split |
|
habhub.view.ts | namespace $.$$ {
export class $mol_app_habhub extends $.$mol_app_habhub {
uriSource(){
return 'https://api.github.com/search/issues?q=label:HabHub+is:open&sort=reactions'
}
gists() {
return $mol_github_search_issues.item( this.uriSource() ).items()
}
| () {
const dict = {} as { [ key : string ] : $mol_github_issue }
for( let gist of this.gists() ) {
dict[ gist.uri() ] = gist
}
return dict
}
gist( id : number ) {
return this.gists_dict()[ id ]
}
gist_current() {
return $mol_maybe( $mol_state_arg.value( 'gist' ) ).map( uri => this.gists_dict()[ uri ] )[0] || null
}
pages() {
return [
this.Menu_page() ,
... this.gist_current() ? [ this.Details() ] : []
]
}
Placeholder() {
return this.gist_current() ? null : super.Placeholder()
}
menu_rows() : $mol_view[] {
return this.gists().map( ( gist , index ) => this.Menu_row( gist.uri() ) )
}
gist_title( id : number ) {
return this.gist( id ).title()
}
gist_arg( id : number ) {
return { gist : id }
}
gist_current_title() {
return this.gist_current().title()
}
gist_current_content() {
return this.gist_current().text()
}
gist_current_issue() {
return this.gist_current()
}
details_scroll_top( next? : number ) {
const current = this.gist_current()
return $mol_state_session.value( `${ this }.details_scroll_top(${ current.uri() })` , next )
}
}
}
| gists_dict | identifier_name |
data.py | # ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from __future__ import division
from builtins import str
import h5py
import numpy as np
def create_minibatch_x(minibatches, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for data captured per minibatch.
Arguments:
minibatches (int): how many total minibatches
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((minibatches,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
x[last_e:e] = e_idx + (np.arange(float(e_minibatches)) / e_minibatches)
last_e = e
else:
x = np.arange(minibatches)
return x
def create_epoch_x(points, epoch_freq, minibatch_markers, epoch_axis):
|
def h5_cost_data(filename, epoch_axis=True):
"""
Read cost data from hdf5 file. Generate x axis data for each cost line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, x data, y data)
"""
ret = list()
with h5py.File(filename, "r") as f:
config, cost, time_markers = [f[x] for x in ['config', 'cost', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
minibatch_markers = time_markers['minibatch']
for name, ydata in cost.items():
y = ydata[...]
if ydata.attrs['time_markers'] == 'epoch_freq':
y_epoch_freq = ydata.attrs['epoch_freq']
assert len(y) == total_epochs // y_epoch_freq
x = create_epoch_x(len(y), y_epoch_freq, minibatch_markers, epoch_axis)
elif ydata.attrs['time_markers'] == 'minibatch':
assert len(y) == total_minibatches
x = create_minibatch_x(total_minibatches, minibatch_markers, epoch_axis)
else:
raise TypeError('Unsupported data format for h5_cost_data')
ret.append((name, x, y))
return ret
def h5_hist_data(filename, epoch_axis=True):
"""
Read histogram data from hdf5 file. Generate x axis data for each hist line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, data, dh, dw, bins, offset)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'hist' in f:
hists, config = [f[x] for x in ['hist', 'config']]
bins, offset, time_markers = [hists.attrs[x]
for x in ['bins', 'offset', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
for hname, hdata in hists.items():
dw = total_epochs if (time_markers == 'epoch_freq') else total_minibatches
dh = bins
ret.append((hname, hdata[...], dh, dw, bins, offset))
return ret
def convert_rgb_to_bokehrgba(img_data, downsample=1):
"""
Convert RGB image to two-dimensional array of RGBA values (encoded as 32-bit integers)
(required by Bokeh). The functionality is currently not available in Bokeh.
An issue was raised here: https://github.com/bokeh/bokeh/issues/1699 and this function is a
modified version of the suggested solution.
Arguments:
img_data: img (ndarray, shape: [N, M, 3], dtype: uint8): image data
dh: height of image
dw: width of image
Returns:
img (ndarray): 2D image array of RGBA values
"""
if img_data.dtype != np.uint8:
raise NotImplementedError
if img_data.ndim != 3:
raise NotImplementedError
# downsample for render performance, v-flip since plot origin is bottom left
# img_data = np.transpose(img_data, (1,2,0))
img_data = img_data[::-downsample, ::downsample, :]
img_h, img_w, C = img_data.shape
# add an alpha channel to the image and recast from pixels of u8u8u8u8 to u32
bokeh_img = np.dstack([img_data, 255 * np.ones((img_h, img_w), np.uint8)])
final_image = bokeh_img.reshape(img_h, img_w * (C + 1)).view(np.uint32)
return final_image
def h5_deconv_data(filename):
"""
Read deconv visualization data from hdf5 file.
Arguments:
filename (str): Filename with hdf5 deconv data
Returns:
list of lists. Each inner list represents one layer, and consists of
tuples (fm, deconv_data)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'deconv' not in list(f.keys()):
return None
act_data = f['deconv/max_act']
img_data = f['deconv/img']
for layer in list(act_data.keys()):
layer_data = list()
for fm in range(act_data[layer]['vis'].shape[0]):
# to avoid storing entire dataset, imgs are cached as needed, have to look up
batch_ind, img_ind = act_data[layer]['batch_img'][fm]
img_store = img_data['batch_{}'.format(batch_ind)]
img_cache_ofs = img_store.attrs[str(img_ind)]
# have to convert from rgb to rgba and cast as uint32 dtype for bokeh
plot_img = convert_rgb_to_bokehrgba(img_store['HWC_uint8'][:, :, :, img_cache_ofs])
plot_deconv = convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])
layer_data.append((fm, plot_deconv, plot_img))
ret.append((layer, layer_data))
return ret
| """
Helper function to build x axis for points captured per epoch.
Arguments:
points (int): how many data points need a corresponding x axis points
epoch_freq (int): are points once an epoch or once every n epochs?
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((points,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
if (e_idx + 1) % epoch_freq == 0:
x[e_idx // epoch_freq] = e_idx + ((e_minibatches - 1) // e_minibatches)
last_e = e
else:
x = minibatch_markers[(epoch_freq - 1)::epoch_freq] - 1
return x | identifier_body |
data.py | # ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from __future__ import division
from builtins import str
import h5py
import numpy as np
def create_minibatch_x(minibatches, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for data captured per minibatch.
Arguments:
minibatches (int): how many total minibatches
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((minibatches,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
x[last_e:e] = e_idx + (np.arange(float(e_minibatches)) / e_minibatches)
last_e = e
else:
x = np.arange(minibatches)
return x
def create_epoch_x(points, epoch_freq, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for points captured per epoch.
Arguments:
points (int): how many data points need a corresponding x axis points
epoch_freq (int): are points once an epoch or once every n epochs?
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((points,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
if (e_idx + 1) % epoch_freq == 0:
|
last_e = e
else:
x = minibatch_markers[(epoch_freq - 1)::epoch_freq] - 1
return x
def h5_cost_data(filename, epoch_axis=True):
"""
Read cost data from hdf5 file. Generate x axis data for each cost line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, x data, y data)
"""
ret = list()
with h5py.File(filename, "r") as f:
config, cost, time_markers = [f[x] for x in ['config', 'cost', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
minibatch_markers = time_markers['minibatch']
for name, ydata in cost.items():
y = ydata[...]
if ydata.attrs['time_markers'] == 'epoch_freq':
y_epoch_freq = ydata.attrs['epoch_freq']
assert len(y) == total_epochs // y_epoch_freq
x = create_epoch_x(len(y), y_epoch_freq, minibatch_markers, epoch_axis)
elif ydata.attrs['time_markers'] == 'minibatch':
assert len(y) == total_minibatches
x = create_minibatch_x(total_minibatches, minibatch_markers, epoch_axis)
else:
raise TypeError('Unsupported data format for h5_cost_data')
ret.append((name, x, y))
return ret
def h5_hist_data(filename, epoch_axis=True):
"""
Read histogram data from hdf5 file. Generate x axis data for each hist line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, data, dh, dw, bins, offset)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'hist' in f:
hists, config = [f[x] for x in ['hist', 'config']]
bins, offset, time_markers = [hists.attrs[x]
for x in ['bins', 'offset', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
for hname, hdata in hists.items():
dw = total_epochs if (time_markers == 'epoch_freq') else total_minibatches
dh = bins
ret.append((hname, hdata[...], dh, dw, bins, offset))
return ret
def convert_rgb_to_bokehrgba(img_data, downsample=1):
"""
Convert RGB image to two-dimensional array of RGBA values (encoded as 32-bit integers)
(required by Bokeh). The functionality is currently not available in Bokeh.
An issue was raised here: https://github.com/bokeh/bokeh/issues/1699 and this function is a
modified version of the suggested solution.
Arguments:
img_data: img (ndarray, shape: [N, M, 3], dtype: uint8): image data
dh: height of image
dw: width of image
Returns:
img (ndarray): 2D image array of RGBA values
"""
if img_data.dtype != np.uint8:
raise NotImplementedError
if img_data.ndim != 3:
raise NotImplementedError
# downsample for render performance, v-flip since plot origin is bottom left
# img_data = np.transpose(img_data, (1,2,0))
img_data = img_data[::-downsample, ::downsample, :]
img_h, img_w, C = img_data.shape
# add an alpha channel to the image and recast from pixels of u8u8u8u8 to u32
bokeh_img = np.dstack([img_data, 255 * np.ones((img_h, img_w), np.uint8)])
final_image = bokeh_img.reshape(img_h, img_w * (C + 1)).view(np.uint32)
return final_image
def h5_deconv_data(filename):
"""
Read deconv visualization data from hdf5 file.
Arguments:
filename (str): Filename with hdf5 deconv data
Returns:
list of lists. Each inner list represents one layer, and consists of
tuples (fm, deconv_data)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'deconv' not in list(f.keys()):
return None
act_data = f['deconv/max_act']
img_data = f['deconv/img']
for layer in list(act_data.keys()):
layer_data = list()
for fm in range(act_data[layer]['vis'].shape[0]):
# to avoid storing entire dataset, imgs are cached as needed, have to look up
batch_ind, img_ind = act_data[layer]['batch_img'][fm]
img_store = img_data['batch_{}'.format(batch_ind)]
img_cache_ofs = img_store.attrs[str(img_ind)]
# have to convert from rgb to rgba and cast as uint32 dtype for bokeh
plot_img = convert_rgb_to_bokehrgba(img_store['HWC_uint8'][:, :, :, img_cache_ofs])
plot_deconv = convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])
layer_data.append((fm, plot_deconv, plot_img))
ret.append((layer, layer_data))
return ret
| x[e_idx // epoch_freq] = e_idx + ((e_minibatches - 1) // e_minibatches) | conditional_block |
data.py | # ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from __future__ import division
from builtins import str
import h5py
import numpy as np
def create_minibatch_x(minibatches, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for data captured per minibatch.
Arguments:
minibatches (int): how many total minibatches
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((minibatches,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
x[last_e:e] = e_idx + (np.arange(float(e_minibatches)) / e_minibatches)
last_e = e
else:
x = np.arange(minibatches)
return x
def create_epoch_x(points, epoch_freq, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for points captured per epoch.
Arguments:
points (int): how many data points need a corresponding x axis points
epoch_freq (int): are points once an epoch or once every n epochs?
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((points,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
if (e_idx + 1) % epoch_freq == 0:
x[e_idx // epoch_freq] = e_idx + ((e_minibatches - 1) // e_minibatches)
last_e = e
else:
x = minibatch_markers[(epoch_freq - 1)::epoch_freq] - 1
return x
def h5_cost_data(filename, epoch_axis=True):
"""
Read cost data from hdf5 file. Generate x axis data for each cost line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, x data, y data)
"""
ret = list()
with h5py.File(filename, "r") as f:
config, cost, time_markers = [f[x] for x in ['config', 'cost', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
minibatch_markers = time_markers['minibatch']
for name, ydata in cost.items():
y = ydata[...]
if ydata.attrs['time_markers'] == 'epoch_freq':
y_epoch_freq = ydata.attrs['epoch_freq']
assert len(y) == total_epochs // y_epoch_freq
x = create_epoch_x(len(y), y_epoch_freq, minibatch_markers, epoch_axis)
elif ydata.attrs['time_markers'] == 'minibatch':
assert len(y) == total_minibatches
x = create_minibatch_x(total_minibatches, minibatch_markers, epoch_axis)
else:
raise TypeError('Unsupported data format for h5_cost_data')
ret.append((name, x, y))
return ret
def | (filename, epoch_axis=True):
"""
Read histogram data from hdf5 file. Generate x axis data for each hist line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, data, dh, dw, bins, offset)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'hist' in f:
hists, config = [f[x] for x in ['hist', 'config']]
bins, offset, time_markers = [hists.attrs[x]
for x in ['bins', 'offset', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
for hname, hdata in hists.items():
dw = total_epochs if (time_markers == 'epoch_freq') else total_minibatches
dh = bins
ret.append((hname, hdata[...], dh, dw, bins, offset))
return ret
def convert_rgb_to_bokehrgba(img_data, downsample=1):
"""
Convert RGB image to two-dimensional array of RGBA values (encoded as 32-bit integers)
(required by Bokeh). The functionality is currently not available in Bokeh.
An issue was raised here: https://github.com/bokeh/bokeh/issues/1699 and this function is a
modified version of the suggested solution.
Arguments:
img_data: img (ndarray, shape: [N, M, 3], dtype: uint8): image data
dh: height of image
dw: width of image
Returns:
img (ndarray): 2D image array of RGBA values
"""
if img_data.dtype != np.uint8:
raise NotImplementedError
if img_data.ndim != 3:
raise NotImplementedError
# downsample for render performance, v-flip since plot origin is bottom left
# img_data = np.transpose(img_data, (1,2,0))
img_data = img_data[::-downsample, ::downsample, :]
img_h, img_w, C = img_data.shape
# add an alpha channel to the image and recast from pixels of u8u8u8u8 to u32
bokeh_img = np.dstack([img_data, 255 * np.ones((img_h, img_w), np.uint8)])
final_image = bokeh_img.reshape(img_h, img_w * (C + 1)).view(np.uint32)
return final_image
def h5_deconv_data(filename):
"""
Read deconv visualization data from hdf5 file.
Arguments:
filename (str): Filename with hdf5 deconv data
Returns:
list of lists. Each inner list represents one layer, and consists of
tuples (fm, deconv_data)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'deconv' not in list(f.keys()):
return None
act_data = f['deconv/max_act']
img_data = f['deconv/img']
for layer in list(act_data.keys()):
layer_data = list()
for fm in range(act_data[layer]['vis'].shape[0]):
# to avoid storing entire dataset, imgs are cached as needed, have to look up
batch_ind, img_ind = act_data[layer]['batch_img'][fm]
img_store = img_data['batch_{}'.format(batch_ind)]
img_cache_ofs = img_store.attrs[str(img_ind)]
# have to convert from rgb to rgba and cast as uint32 dtype for bokeh
plot_img = convert_rgb_to_bokehrgba(img_store['HWC_uint8'][:, :, :, img_cache_ofs])
plot_deconv = convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])
layer_data.append((fm, plot_deconv, plot_img))
ret.append((layer, layer_data))
return ret
| h5_hist_data | identifier_name |
data.py | # ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from __future__ import division
from builtins import str
import h5py
import numpy as np
def create_minibatch_x(minibatches, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for data captured per minibatch.
Arguments:
minibatches (int): how many total minibatches
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((minibatches,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
x[last_e:e] = e_idx + (np.arange(float(e_minibatches)) / e_minibatches)
last_e = e
else:
x = np.arange(minibatches)
return x
def create_epoch_x(points, epoch_freq, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for points captured per epoch.
Arguments:
points (int): how many data points need a corresponding x axis points
epoch_freq (int): are points once an epoch or once every n epochs?
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((points,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
if (e_idx + 1) % epoch_freq == 0:
x[e_idx // epoch_freq] = e_idx + ((e_minibatches - 1) // e_minibatches)
last_e = e
else:
x = minibatch_markers[(epoch_freq - 1)::epoch_freq] - 1
return x
def h5_cost_data(filename, epoch_axis=True):
"""
Read cost data from hdf5 file. Generate x axis data for each cost line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, x data, y data)
"""
ret = list()
with h5py.File(filename, "r") as f:
config, cost, time_markers = [f[x] for x in ['config', 'cost', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
minibatch_markers = time_markers['minibatch']
for name, ydata in cost.items():
y = ydata[...]
if ydata.attrs['time_markers'] == 'epoch_freq':
y_epoch_freq = ydata.attrs['epoch_freq']
assert len(y) == total_epochs // y_epoch_freq
x = create_epoch_x(len(y), y_epoch_freq, minibatch_markers, epoch_axis)
elif ydata.attrs['time_markers'] == 'minibatch': | x = create_minibatch_x(total_minibatches, minibatch_markers, epoch_axis)
else:
raise TypeError('Unsupported data format for h5_cost_data')
ret.append((name, x, y))
return ret
def h5_hist_data(filename, epoch_axis=True):
"""
Read histogram data from hdf5 file. Generate x axis data for each hist line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, data, dh, dw, bins, offset)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'hist' in f:
hists, config = [f[x] for x in ['hist', 'config']]
bins, offset, time_markers = [hists.attrs[x]
for x in ['bins', 'offset', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
for hname, hdata in hists.items():
dw = total_epochs if (time_markers == 'epoch_freq') else total_minibatches
dh = bins
ret.append((hname, hdata[...], dh, dw, bins, offset))
return ret
def convert_rgb_to_bokehrgba(img_data, downsample=1):
"""
Convert RGB image to two-dimensional array of RGBA values (encoded as 32-bit integers)
(required by Bokeh). The functionality is currently not available in Bokeh.
An issue was raised here: https://github.com/bokeh/bokeh/issues/1699 and this function is a
modified version of the suggested solution.
Arguments:
img_data: img (ndarray, shape: [N, M, 3], dtype: uint8): image data
dh: height of image
dw: width of image
Returns:
img (ndarray): 2D image array of RGBA values
"""
if img_data.dtype != np.uint8:
raise NotImplementedError
if img_data.ndim != 3:
raise NotImplementedError
# downsample for render performance, v-flip since plot origin is bottom left
# img_data = np.transpose(img_data, (1,2,0))
img_data = img_data[::-downsample, ::downsample, :]
img_h, img_w, C = img_data.shape
# add an alpha channel to the image and recast from pixels of u8u8u8u8 to u32
bokeh_img = np.dstack([img_data, 255 * np.ones((img_h, img_w), np.uint8)])
final_image = bokeh_img.reshape(img_h, img_w * (C + 1)).view(np.uint32)
return final_image
def h5_deconv_data(filename):
"""
Read deconv visualization data from hdf5 file.
Arguments:
filename (str): Filename with hdf5 deconv data
Returns:
list of lists. Each inner list represents one layer, and consists of
tuples (fm, deconv_data)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'deconv' not in list(f.keys()):
return None
act_data = f['deconv/max_act']
img_data = f['deconv/img']
for layer in list(act_data.keys()):
layer_data = list()
for fm in range(act_data[layer]['vis'].shape[0]):
# to avoid storing entire dataset, imgs are cached as needed, have to look up
batch_ind, img_ind = act_data[layer]['batch_img'][fm]
img_store = img_data['batch_{}'.format(batch_ind)]
img_cache_ofs = img_store.attrs[str(img_ind)]
# have to convert from rgb to rgba and cast as uint32 dtype for bokeh
plot_img = convert_rgb_to_bokehrgba(img_store['HWC_uint8'][:, :, :, img_cache_ofs])
plot_deconv = convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])
layer_data.append((fm, plot_deconv, plot_img))
ret.append((layer, layer_data))
return ret | assert len(y) == total_minibatches | random_line_split |
mastakilla_spider.py | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from lyricwiki.items import LyricWikiItem
class LyricWikiSpider(CrawlSpider):
| name = "mastakilla" #CHANGE NAME
allowed_domains = ["lyrics.wikia.com"]
start_urls = [
"http://lyrics.wikia.com/Masta_Killa", #CHANGE URL
]
rules = ( #CHANGE REGEX
Rule(SgmlLinkExtractor(allow=('/Masta_Killa.*',),restrict_xpaths=('//ol/li',)), callback='parse_item', follow=True),
)
def parse_item(self, response):
sel = Selector(response)
info = sel.xpath('//div[@class="mw-content-ltr"]')
item = LyricWikiItem()
item['title'] = sel.xpath('//header[@id="WikiaPageHeader"]/h1/text()').extract()
item['artist'] = info.xpath('b/a/text()').extract()
item['album'] = info.xpath('i/a/text()').extract()
item['lyrics'] = sel.xpath('//div[@class="lyricbox"]/text()').extract()
return item | identifier_body |
|
mastakilla_spider.py | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from lyricwiki.items import LyricWikiItem
class LyricWikiSpider(CrawlSpider):
name = "mastakilla" #CHANGE NAME
allowed_domains = ["lyrics.wikia.com"]
start_urls = [
"http://lyrics.wikia.com/Masta_Killa", #CHANGE URL
]
rules = ( #CHANGE REGEX
Rule(SgmlLinkExtractor(allow=('/Masta_Killa.*',),restrict_xpaths=('//ol/li',)), callback='parse_item', follow=True),
)
def | (self, response):
sel = Selector(response)
info = sel.xpath('//div[@class="mw-content-ltr"]')
item = LyricWikiItem()
item['title'] = sel.xpath('//header[@id="WikiaPageHeader"]/h1/text()').extract()
item['artist'] = info.xpath('b/a/text()').extract()
item['album'] = info.xpath('i/a/text()').extract()
item['lyrics'] = sel.xpath('//div[@class="lyricbox"]/text()').extract()
return item | parse_item | identifier_name |
mastakilla_spider.py | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
|
from lyricwiki.items import LyricWikiItem
class LyricWikiSpider(CrawlSpider):
name = "mastakilla" #CHANGE NAME
allowed_domains = ["lyrics.wikia.com"]
start_urls = [
"http://lyrics.wikia.com/Masta_Killa", #CHANGE URL
]
rules = ( #CHANGE REGEX
Rule(SgmlLinkExtractor(allow=('/Masta_Killa.*',),restrict_xpaths=('//ol/li',)), callback='parse_item', follow=True),
)
def parse_item(self, response):
sel = Selector(response)
info = sel.xpath('//div[@class="mw-content-ltr"]')
item = LyricWikiItem()
item['title'] = sel.xpath('//header[@id="WikiaPageHeader"]/h1/text()').extract()
item['artist'] = info.xpath('b/a/text()').extract()
item['album'] = info.xpath('i/a/text()').extract()
item['lyrics'] = sel.xpath('//div[@class="lyricbox"]/text()').extract()
return item | from scrapy.selector import Selector
| random_line_split |
mount.ts | import { DEBUG } from '@glimmer/env';
import { ComponentCapabilities } from '@glimmer/interfaces';
import { CONSTANT_TAG, Tag, VersionedPathReference } from '@glimmer/reference';
import { ComponentDefinition, Invocation, WithDynamicLayout } from '@glimmer/runtime';
import { Destroyable, Opaque, Option } from '@glimmer/util';
import { generateControllerFactory } from '@ember/-internals/routing';
import { OwnedTemplateMeta } from '@ember/-internals/views';
import { EMBER_ENGINES_MOUNT_PARAMS } from '@ember/canary-features';
import Environment from '../environment';
import RuntimeResolver from '../resolver';
import { OwnedTemplate } from '../template';
import { RootReference } from '../utils/references';
import AbstractManager from './abstract';
// TODO: remove these stubbed interfaces when better typing is in place
interface Engine {
boot(): void;
destroy(): void;
lookup(name: string): any;
factoryFor(name: string): any;
}
interface EngineState {
engine: Engine;
controller: any;
self: RootReference<any>;
tag: Tag;
}
interface EngineWithModelState extends EngineState {
modelRef: VersionedPathReference<Opaque>;
modelRev: number;
}
interface EngineDefinitionState {
name: string;
modelRef: VersionedPathReference<Opaque> | undefined;
}
const CAPABILITIES = {
dynamicLayout: true,
dynamicTag: false,
prepareArgs: false,
createArgs: false,
attributeHook: false,
elementHook: false,
createCaller: true,
dynamicScope: true,
updateHook: true,
createInstance: true,
};
class MountManager
extends AbstractManager<EngineState | EngineWithModelState, EngineDefinitionState>
implements
WithDynamicLayout<EngineState | EngineWithModelState, OwnedTemplateMeta, RuntimeResolver> {
getDynamicLayout(state: EngineState, _: RuntimeResolver): Invocation {
let template = state.engine.lookup('template:application') as OwnedTemplate;
let layout = template.asLayout();
return {
handle: layout.compile(),
symbolTable: layout.symbolTable,
};
}
getCapabilities(): ComponentCapabilities {
return CAPABILITIES;
}
create(environment: Environment, state: EngineDefinitionState) {
if (DEBUG) {
this._pushEngineToDebugStack(`engine:${state.name}`, environment);
}
// TODO
// mount is a runtime helper, this shouldn't use dynamic layout
// we should resolve the engine app template in the helper
// it also should use the owner that looked up the mount helper.
let engine = environment.owner.buildChildEngineInstance<Engine>(state.name);
engine.boot();
let applicationFactory = engine.factoryFor(`controller:application`);
let controllerFactory = applicationFactory || generateControllerFactory(engine, 'application');
let controller: any;
let self: RootReference<any>;
let bucket: EngineState | EngineWithModelState;
let tag: Tag;
if (EMBER_ENGINES_MOUNT_PARAMS) {
let modelRef = state.modelRef;
if (modelRef === undefined) {
controller = controllerFactory.create();
self = new RootReference(controller);
tag = CONSTANT_TAG;
bucket = { engine, controller, self, tag };
} else {
let model = modelRef.value();
let modelRev = modelRef.tag.value();
controller = controllerFactory.create({ model });
self = new RootReference(controller);
tag = modelRef.tag;
bucket = { engine, controller, self, tag, modelRef, modelRev };
}
} else |
return bucket;
}
getSelf({ self }: EngineState): VersionedPathReference<Opaque> {
return self;
}
getTag(state: EngineState | EngineWithModelState): Tag {
return state.tag;
}
getDestructor({ engine }: EngineState): Option<Destroyable> {
return engine;
}
didRenderLayout(): void {
if (DEBUG) {
this.debugStack.pop();
}
}
update(bucket: EngineWithModelState): void {
if (EMBER_ENGINES_MOUNT_PARAMS) {
let { controller, modelRef, modelRev } = bucket;
if (!modelRef.tag.validate(modelRev!)) {
let model = modelRef.value();
bucket.modelRev = modelRef.tag.value();
controller.set('model', model);
}
}
}
}
const MOUNT_MANAGER = new MountManager();
export class MountDefinition implements ComponentDefinition {
public state: EngineDefinitionState;
public manager = MOUNT_MANAGER;
constructor(name: string, modelRef: VersionedPathReference<Opaque> | undefined) {
this.state = { name, modelRef };
}
}
| {
controller = controllerFactory.create();
self = new RootReference(controller);
tag = CONSTANT_TAG;
bucket = { engine, controller, self, tag };
} | conditional_block |
mount.ts | import { DEBUG } from '@glimmer/env';
import { ComponentCapabilities } from '@glimmer/interfaces';
import { CONSTANT_TAG, Tag, VersionedPathReference } from '@glimmer/reference';
import { ComponentDefinition, Invocation, WithDynamicLayout } from '@glimmer/runtime';
import { Destroyable, Opaque, Option } from '@glimmer/util';
import { generateControllerFactory } from '@ember/-internals/routing';
import { OwnedTemplateMeta } from '@ember/-internals/views';
import { EMBER_ENGINES_MOUNT_PARAMS } from '@ember/canary-features';
import Environment from '../environment';
import RuntimeResolver from '../resolver';
import { OwnedTemplate } from '../template';
import { RootReference } from '../utils/references';
import AbstractManager from './abstract';
// TODO: remove these stubbed interfaces when better typing is in place
interface Engine {
boot(): void;
destroy(): void;
lookup(name: string): any;
factoryFor(name: string): any;
}
interface EngineState {
engine: Engine;
controller: any;
self: RootReference<any>;
tag: Tag;
}
interface EngineWithModelState extends EngineState {
modelRef: VersionedPathReference<Opaque>;
modelRev: number;
}
interface EngineDefinitionState {
name: string;
modelRef: VersionedPathReference<Opaque> | undefined;
}
const CAPABILITIES = {
dynamicLayout: true,
dynamicTag: false,
prepareArgs: false,
createArgs: false,
attributeHook: false,
elementHook: false,
createCaller: true,
dynamicScope: true,
updateHook: true,
createInstance: true,
};
class MountManager
extends AbstractManager<EngineState | EngineWithModelState, EngineDefinitionState>
implements
WithDynamicLayout<EngineState | EngineWithModelState, OwnedTemplateMeta, RuntimeResolver> {
getDynamicLayout(state: EngineState, _: RuntimeResolver): Invocation {
let template = state.engine.lookup('template:application') as OwnedTemplate;
let layout = template.asLayout();
return {
handle: layout.compile(),
symbolTable: layout.symbolTable,
};
}
getCapabilities(): ComponentCapabilities {
return CAPABILITIES;
}
create(environment: Environment, state: EngineDefinitionState) {
if (DEBUG) {
this._pushEngineToDebugStack(`engine:${state.name}`, environment);
}
// TODO
// mount is a runtime helper, this shouldn't use dynamic layout
// we should resolve the engine app template in the helper
// it also should use the owner that looked up the mount helper.
let engine = environment.owner.buildChildEngineInstance<Engine>(state.name);
engine.boot();
let applicationFactory = engine.factoryFor(`controller:application`);
let controllerFactory = applicationFactory || generateControllerFactory(engine, 'application');
let controller: any;
let self: RootReference<any>;
let bucket: EngineState | EngineWithModelState;
let tag: Tag;
if (EMBER_ENGINES_MOUNT_PARAMS) {
let modelRef = state.modelRef;
if (modelRef === undefined) {
controller = controllerFactory.create();
self = new RootReference(controller);
tag = CONSTANT_TAG;
bucket = { engine, controller, self, tag };
} else {
let model = modelRef.value();
let modelRev = modelRef.tag.value();
controller = controllerFactory.create({ model });
self = new RootReference(controller);
tag = modelRef.tag;
bucket = { engine, controller, self, tag, modelRef, modelRev };
}
} else {
controller = controllerFactory.create();
self = new RootReference(controller);
tag = CONSTANT_TAG;
bucket = { engine, controller, self, tag };
}
return bucket;
}
getSelf({ self }: EngineState): VersionedPathReference<Opaque> {
return self;
}
getTag(state: EngineState | EngineWithModelState): Tag {
return state.tag;
}
getDestructor({ engine }: EngineState): Option<Destroyable> |
didRenderLayout(): void {
if (DEBUG) {
this.debugStack.pop();
}
}
update(bucket: EngineWithModelState): void {
if (EMBER_ENGINES_MOUNT_PARAMS) {
let { controller, modelRef, modelRev } = bucket;
if (!modelRef.tag.validate(modelRev!)) {
let model = modelRef.value();
bucket.modelRev = modelRef.tag.value();
controller.set('model', model);
}
}
}
}
const MOUNT_MANAGER = new MountManager();
export class MountDefinition implements ComponentDefinition {
public state: EngineDefinitionState;
public manager = MOUNT_MANAGER;
constructor(name: string, modelRef: VersionedPathReference<Opaque> | undefined) {
this.state = { name, modelRef };
}
}
| {
return engine;
} | identifier_body |
mount.ts | import { DEBUG } from '@glimmer/env';
import { ComponentCapabilities } from '@glimmer/interfaces';
import { CONSTANT_TAG, Tag, VersionedPathReference } from '@glimmer/reference';
import { ComponentDefinition, Invocation, WithDynamicLayout } from '@glimmer/runtime';
import { Destroyable, Opaque, Option } from '@glimmer/util';
import { generateControllerFactory } from '@ember/-internals/routing';
import { OwnedTemplateMeta } from '@ember/-internals/views';
import { EMBER_ENGINES_MOUNT_PARAMS } from '@ember/canary-features';
import Environment from '../environment';
import RuntimeResolver from '../resolver';
import { OwnedTemplate } from '../template';
import { RootReference } from '../utils/references';
import AbstractManager from './abstract';
// TODO: remove these stubbed interfaces when better typing is in place
interface Engine {
boot(): void;
destroy(): void;
lookup(name: string): any;
factoryFor(name: string): any;
}
interface EngineState {
engine: Engine;
controller: any;
self: RootReference<any>;
tag: Tag;
}
interface EngineWithModelState extends EngineState {
modelRef: VersionedPathReference<Opaque>;
modelRev: number;
}
interface EngineDefinitionState {
name: string;
modelRef: VersionedPathReference<Opaque> | undefined;
}
const CAPABILITIES = {
dynamicLayout: true,
dynamicTag: false,
prepareArgs: false,
createArgs: false,
attributeHook: false,
elementHook: false,
createCaller: true,
dynamicScope: true,
updateHook: true,
createInstance: true,
};
class MountManager
extends AbstractManager<EngineState | EngineWithModelState, EngineDefinitionState>
implements
WithDynamicLayout<EngineState | EngineWithModelState, OwnedTemplateMeta, RuntimeResolver> {
| (state: EngineState, _: RuntimeResolver): Invocation {
let template = state.engine.lookup('template:application') as OwnedTemplate;
let layout = template.asLayout();
return {
handle: layout.compile(),
symbolTable: layout.symbolTable,
};
}
getCapabilities(): ComponentCapabilities {
return CAPABILITIES;
}
create(environment: Environment, state: EngineDefinitionState) {
if (DEBUG) {
this._pushEngineToDebugStack(`engine:${state.name}`, environment);
}
// TODO
// mount is a runtime helper, this shouldn't use dynamic layout
// we should resolve the engine app template in the helper
// it also should use the owner that looked up the mount helper.
let engine = environment.owner.buildChildEngineInstance<Engine>(state.name);
engine.boot();
let applicationFactory = engine.factoryFor(`controller:application`);
let controllerFactory = applicationFactory || generateControllerFactory(engine, 'application');
let controller: any;
let self: RootReference<any>;
let bucket: EngineState | EngineWithModelState;
let tag: Tag;
if (EMBER_ENGINES_MOUNT_PARAMS) {
let modelRef = state.modelRef;
if (modelRef === undefined) {
controller = controllerFactory.create();
self = new RootReference(controller);
tag = CONSTANT_TAG;
bucket = { engine, controller, self, tag };
} else {
let model = modelRef.value();
let modelRev = modelRef.tag.value();
controller = controllerFactory.create({ model });
self = new RootReference(controller);
tag = modelRef.tag;
bucket = { engine, controller, self, tag, modelRef, modelRev };
}
} else {
controller = controllerFactory.create();
self = new RootReference(controller);
tag = CONSTANT_TAG;
bucket = { engine, controller, self, tag };
}
return bucket;
}
getSelf({ self }: EngineState): VersionedPathReference<Opaque> {
return self;
}
getTag(state: EngineState | EngineWithModelState): Tag {
return state.tag;
}
getDestructor({ engine }: EngineState): Option<Destroyable> {
return engine;
}
didRenderLayout(): void {
if (DEBUG) {
this.debugStack.pop();
}
}
update(bucket: EngineWithModelState): void {
if (EMBER_ENGINES_MOUNT_PARAMS) {
let { controller, modelRef, modelRev } = bucket;
if (!modelRef.tag.validate(modelRev!)) {
let model = modelRef.value();
bucket.modelRev = modelRef.tag.value();
controller.set('model', model);
}
}
}
}
const MOUNT_MANAGER = new MountManager();
export class MountDefinition implements ComponentDefinition {
public state: EngineDefinitionState;
public manager = MOUNT_MANAGER;
constructor(name: string, modelRef: VersionedPathReference<Opaque> | undefined) {
this.state = { name, modelRef };
}
}
| getDynamicLayout | identifier_name |
mount.ts | import { DEBUG } from '@glimmer/env';
import { ComponentCapabilities } from '@glimmer/interfaces';
import { CONSTANT_TAG, Tag, VersionedPathReference } from '@glimmer/reference';
import { ComponentDefinition, Invocation, WithDynamicLayout } from '@glimmer/runtime';
import { Destroyable, Opaque, Option } from '@glimmer/util';
import { generateControllerFactory } from '@ember/-internals/routing';
import { OwnedTemplateMeta } from '@ember/-internals/views';
import { EMBER_ENGINES_MOUNT_PARAMS } from '@ember/canary-features';
import Environment from '../environment';
import RuntimeResolver from '../resolver';
import { OwnedTemplate } from '../template';
import { RootReference } from '../utils/references';
import AbstractManager from './abstract';
// TODO: remove these stubbed interfaces when better typing is in place
interface Engine {
boot(): void;
destroy(): void;
lookup(name: string): any;
factoryFor(name: string): any;
}
interface EngineState {
engine: Engine;
controller: any;
self: RootReference<any>;
tag: Tag;
}
interface EngineWithModelState extends EngineState {
modelRef: VersionedPathReference<Opaque>;
modelRev: number;
}
interface EngineDefinitionState {
name: string;
modelRef: VersionedPathReference<Opaque> | undefined;
}
const CAPABILITIES = {
dynamicLayout: true,
dynamicTag: false,
prepareArgs: false,
createArgs: false,
attributeHook: false,
elementHook: false,
createCaller: true,
dynamicScope: true,
updateHook: true,
createInstance: true,
};
class MountManager | let template = state.engine.lookup('template:application') as OwnedTemplate;
let layout = template.asLayout();
return {
handle: layout.compile(),
symbolTable: layout.symbolTable,
};
}
getCapabilities(): ComponentCapabilities {
return CAPABILITIES;
}
create(environment: Environment, state: EngineDefinitionState) {
if (DEBUG) {
this._pushEngineToDebugStack(`engine:${state.name}`, environment);
}
// TODO
// mount is a runtime helper, this shouldn't use dynamic layout
// we should resolve the engine app template in the helper
// it also should use the owner that looked up the mount helper.
let engine = environment.owner.buildChildEngineInstance<Engine>(state.name);
engine.boot();
let applicationFactory = engine.factoryFor(`controller:application`);
let controllerFactory = applicationFactory || generateControllerFactory(engine, 'application');
let controller: any;
let self: RootReference<any>;
let bucket: EngineState | EngineWithModelState;
let tag: Tag;
if (EMBER_ENGINES_MOUNT_PARAMS) {
let modelRef = state.modelRef;
if (modelRef === undefined) {
controller = controllerFactory.create();
self = new RootReference(controller);
tag = CONSTANT_TAG;
bucket = { engine, controller, self, tag };
} else {
let model = modelRef.value();
let modelRev = modelRef.tag.value();
controller = controllerFactory.create({ model });
self = new RootReference(controller);
tag = modelRef.tag;
bucket = { engine, controller, self, tag, modelRef, modelRev };
}
} else {
controller = controllerFactory.create();
self = new RootReference(controller);
tag = CONSTANT_TAG;
bucket = { engine, controller, self, tag };
}
return bucket;
}
getSelf({ self }: EngineState): VersionedPathReference<Opaque> {
return self;
}
getTag(state: EngineState | EngineWithModelState): Tag {
return state.tag;
}
getDestructor({ engine }: EngineState): Option<Destroyable> {
return engine;
}
didRenderLayout(): void {
if (DEBUG) {
this.debugStack.pop();
}
}
update(bucket: EngineWithModelState): void {
if (EMBER_ENGINES_MOUNT_PARAMS) {
let { controller, modelRef, modelRev } = bucket;
if (!modelRef.tag.validate(modelRev!)) {
let model = modelRef.value();
bucket.modelRev = modelRef.tag.value();
controller.set('model', model);
}
}
}
}
const MOUNT_MANAGER = new MountManager();
export class MountDefinition implements ComponentDefinition {
public state: EngineDefinitionState;
public manager = MOUNT_MANAGER;
constructor(name: string, modelRef: VersionedPathReference<Opaque> | undefined) {
this.state = { name, modelRef };
}
} | extends AbstractManager<EngineState | EngineWithModelState, EngineDefinitionState>
implements
WithDynamicLayout<EngineState | EngineWithModelState, OwnedTemplateMeta, RuntimeResolver> {
getDynamicLayout(state: EngineState, _: RuntimeResolver): Invocation { | random_line_split |
de.js | /**
* @license Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.md or http://ckeditor.com/license
*/
/**
* @fileOverview Defines the {@link CKEDITOR.lang} object, for the
* German language.
*/
/**#@+
@type String
@example
*/
/**
* Contains the dictionary of language entries.
* @namespace
*/
CKEDITOR.lang[ 'de' ] = {
// ARIA description.
editor: 'WYSIWYG-Editor',
editorPanel: 'WYSIWYG-Editor-Leiste',
// Common messages and labels. | editorHelp: 'Drücken Sie ALT 0 für Hilfe',
browseServer: 'Server durchsuchen',
url: 'URL',
protocol: 'Protokoll',
upload: 'Hochladen',
uploadSubmit: 'Zum Server senden',
image: 'Bild',
flash: 'Flash',
form: 'Formular',
checkbox: 'Checkbox',
radio: 'Radiobutton',
textField: 'Textfeld einzeilig',
textarea: 'Textfeld mehrzeilig',
hiddenField: 'Verstecktes Feld',
button: 'Klickbutton',
select: 'Auswahlfeld',
imageButton: 'Bildbutton',
notSet: '<nichts>',
id: 'ID',
name: 'Name',
langDir: 'Schreibrichtung',
langDirLtr: 'Links nach Rechts (LTR)',
langDirRtl: 'Rechts nach Links (RTL)',
langCode: 'Sprachenkürzel',
longDescr: 'Langform URL',
cssClass: 'Stylesheet Klasse',
advisoryTitle: 'Titel Beschreibung',
cssStyle: 'Style',
ok: 'OK',
cancel: 'Abbrechen',
close: 'Schließen',
preview: 'Vorschau',
resize: 'Zum Vergrößern ziehen',
generalTab: 'Allgemein',
advancedTab: 'Erweitert',
validateNumberFailed: 'Dieser Wert ist keine Nummer.',
confirmNewPage: 'Alle nicht gespeicherten Änderungen gehen verlohren. Sind Sie sicher die neue Seite zu laden?',
confirmCancel: 'Einige Optionen wurden geändert. Wollen Sie den Dialog dennoch schließen?',
options: 'Optionen',
target: 'Zielseite',
targetNew: 'Neues Fenster (_blank)',
targetTop: 'Oberstes Fenster (_top)',
targetSelf: 'Gleiches Fenster (_self)',
targetParent: 'Oberes Fenster (_parent)',
langDirLTR: 'Links nach Rechts (LNR)',
langDirRTL: 'Rechts nach Links (RNL)',
styles: 'Style',
cssClasses: 'Stylesheet Klasse',
width: 'Breite',
height: 'Höhe',
align: 'Ausrichtung',
alignLeft: 'Links',
alignRight: 'Rechts',
alignCenter: 'Zentriert',
alignTop: 'Oben',
alignMiddle: 'Mitte',
alignBottom: 'Unten',
invalidValue : 'Ungültiger Wert.',
invalidHeight: 'Höhe muss eine Zahl sein.',
invalidWidth: 'Breite muss eine Zahl sein.',
invalidCssLength: 'Wert spezifiziert für "%1" Feld muss ein positiver numerischer Wert sein mit oder ohne korrekte CSS Messeinheit (px, %, in, cm, mm, em, ex, pt oder pc).',
invalidHtmlLength: 'Wert spezifiziert für "%1" Feld muss ein positiver numerischer Wert sein mit oder ohne korrekte HTML Messeinheit (px oder %).',
invalidInlineStyle: 'Wert spezifiziert für inline Stilart muss enthalten ein oder mehr Tupels mit dem Format "Name : Wert" getrennt mit Semikolons.',
cssLengthTooltip: 'Gebe eine Zahl ein für ein Wert in pixels oder eine Zahl mit einer korrekten CSS Messeinheit (px, %, in, cm, mm, em, ex, pt oder pc).',
// Put the voice-only part of the label in the span.
unavailable: '%1<span class="cke_accessibility">, nicht verfügbar</span>'
}
}; | common: {
// Screenreader titles. Please note that screenreaders are not always capable
// of reading non-English words. So be careful while translating it. | random_line_split |
info.service.ts | import {Injectable} from "@angular/core";
import {Http} from "@angular/http";
import {Observable} from "rxjs/Observable";
import {Info} from "../model/info";
import {BaseService} from "./base.service"; | isLoginSuccess: boolean = false;
redirectUrl: string;
constructor(private http:Http) {
super()
}
getInfo(): Observable<Info>{
return this.http.get(Constants.BASE_URL+"/info")
.map(res => {
console.log('getInfo: ', res.text());
return res.json();
})
.catch(this.handleError);
}
getResume(): Observable<string>{
return this.http.get(Constants.BASE_URL+"/about")
.map(res => {
console.log('getResume: ', res.text());
return res.text();
})
.catch(this.handleError);
}
getResumeMd(): Observable<string>{
return this.http.get(Constants.ADMIN_BASE_URL+"/resume")
.map(res => {
console.log('getResumeMd: ', res.text());
return res.text();
})
.catch(this.handleError);
}
doLogin(info: Info): Observable<boolean>{
return this.http.post(Constants.BASE_URL+'/login.action',
JSON.stringify(info), {headers: Constants.HEADERS_JSON})
.map( res => {
console.log('doLogin: ', res.text());
if (res.text()=='true') this.isLoginSuccess = true;
else this.isLoginSuccess =false;
return res.text();
})
.catch(this.handleError);
}
logout(){
return this.http.get(Constants.BASE_URL+"/logout")
.map(res => {
console.log('logout: ', res.status);
this.isLoginSuccess = false;
})
.catch(this.handleError);
}
} | import {Constants} from "../constants";
@Injectable()
export class InfoService extends BaseService{
| random_line_split |
info.service.ts | import {Injectable} from "@angular/core";
import {Http} from "@angular/http";
import {Observable} from "rxjs/Observable";
import {Info} from "../model/info";
import {BaseService} from "./base.service";
import {Constants} from "../constants";
@Injectable()
export class InfoService extends BaseService{
isLoginSuccess: boolean = false;
redirectUrl: string;
constructor(private http:Http) {
super()
}
getInfo(): Observable<Info>{
return this.http.get(Constants.BASE_URL+"/info")
.map(res => {
console.log('getInfo: ', res.text());
return res.json();
})
.catch(this.handleError);
}
getResume(): Observable<string>{
return this.http.get(Constants.BASE_URL+"/about")
.map(res => {
console.log('getResume: ', res.text());
return res.text();
})
.catch(this.handleError);
}
getResumeMd(): Observable<string>{
return this.http.get(Constants.ADMIN_BASE_URL+"/resume")
.map(res => {
console.log('getResumeMd: ', res.text());
return res.text();
})
.catch(this.handleError);
}
doLogin(info: Info): Observable<boolean>{
return this.http.post(Constants.BASE_URL+'/login.action',
JSON.stringify(info), {headers: Constants.HEADERS_JSON})
.map( res => {
console.log('doLogin: ', res.text());
if (res.text()=='true') this.isLoginSuccess = true;
else this.isLoginSuccess =false;
return res.text();
})
.catch(this.handleError);
}
logout() |
} | {
return this.http.get(Constants.BASE_URL+"/logout")
.map(res => {
console.log('logout: ', res.status);
this.isLoginSuccess = false;
})
.catch(this.handleError);
} | identifier_body |
info.service.ts | import {Injectable} from "@angular/core";
import {Http} from "@angular/http";
import {Observable} from "rxjs/Observable";
import {Info} from "../model/info";
import {BaseService} from "./base.service";
import {Constants} from "../constants";
@Injectable()
export class InfoService extends BaseService{
isLoginSuccess: boolean = false;
redirectUrl: string;
constructor(private http:Http) {
super()
}
getInfo(): Observable<Info>{
return this.http.get(Constants.BASE_URL+"/info")
.map(res => {
console.log('getInfo: ', res.text());
return res.json();
})
.catch(this.handleError);
}
getResume(): Observable<string>{
return this.http.get(Constants.BASE_URL+"/about")
.map(res => {
console.log('getResume: ', res.text());
return res.text();
})
.catch(this.handleError);
}
getResumeMd(): Observable<string>{
return this.http.get(Constants.ADMIN_BASE_URL+"/resume")
.map(res => {
console.log('getResumeMd: ', res.text());
return res.text();
})
.catch(this.handleError);
}
| (info: Info): Observable<boolean>{
return this.http.post(Constants.BASE_URL+'/login.action',
JSON.stringify(info), {headers: Constants.HEADERS_JSON})
.map( res => {
console.log('doLogin: ', res.text());
if (res.text()=='true') this.isLoginSuccess = true;
else this.isLoginSuccess =false;
return res.text();
})
.catch(this.handleError);
}
logout(){
return this.http.get(Constants.BASE_URL+"/logout")
.map(res => {
console.log('logout: ', res.status);
this.isLoginSuccess = false;
})
.catch(this.handleError);
}
} | doLogin | identifier_name |
MagicWordManagerAI.py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from otp.ai.MagicWordGlobal import *
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.MsgTypes import *
class MagicWordManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("MagicWordManagerAI")
def sendMagicWord(self, word, targetId):
invokerId = self.air.getAvatarIdFromSender()
invoker = self.air.doId2do.get(invokerId)
if not 'DistributedToonAI' in str(self.air.doId2do.get(targetId)):
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['Target is not a toon object!'])
return
if not invoker:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing invoker'])
return
if invoker.getAdminAccess() < MINIMUM_MAGICWORD_ACCESS:
self.air.writeServerEvent('suspicious', invokerId, 'Attempted to issue magic word: %s' % word)
dg = PyDatagram()
dg.addServerHeader(self.GetPuppetConnectionChannel(invokerId), self.air.ourChannel, CLIENTAGENT_EJECT)
dg.addUint16(126)
dg.addString('Magic Words are reserved for administrators only!')
self.air.send(dg)
return | return
response = spellbook.process(invoker, target, word)
if response:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', [response])
self.air.writeServerEvent('magic-word',
invokerId, invoker.getAdminAccess(),
targetId, target.getAdminAccess(),
word, response) |
target = self.air.doId2do.get(targetId)
if not target:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing target']) | random_line_split |
MagicWordManagerAI.py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from otp.ai.MagicWordGlobal import *
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.MsgTypes import *
class | (DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("MagicWordManagerAI")
def sendMagicWord(self, word, targetId):
invokerId = self.air.getAvatarIdFromSender()
invoker = self.air.doId2do.get(invokerId)
if not 'DistributedToonAI' in str(self.air.doId2do.get(targetId)):
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['Target is not a toon object!'])
return
if not invoker:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing invoker'])
return
if invoker.getAdminAccess() < MINIMUM_MAGICWORD_ACCESS:
self.air.writeServerEvent('suspicious', invokerId, 'Attempted to issue magic word: %s' % word)
dg = PyDatagram()
dg.addServerHeader(self.GetPuppetConnectionChannel(invokerId), self.air.ourChannel, CLIENTAGENT_EJECT)
dg.addUint16(126)
dg.addString('Magic Words are reserved for administrators only!')
self.air.send(dg)
return
target = self.air.doId2do.get(targetId)
if not target:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing target'])
return
response = spellbook.process(invoker, target, word)
if response:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', [response])
self.air.writeServerEvent('magic-word',
invokerId, invoker.getAdminAccess(),
targetId, target.getAdminAccess(),
word, response)
| MagicWordManagerAI | identifier_name |
MagicWordManagerAI.py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from otp.ai.MagicWordGlobal import *
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.MsgTypes import *
class MagicWordManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("MagicWordManagerAI")
def sendMagicWord(self, word, targetId):
invokerId = self.air.getAvatarIdFromSender()
invoker = self.air.doId2do.get(invokerId)
if not 'DistributedToonAI' in str(self.air.doId2do.get(targetId)):
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['Target is not a toon object!'])
return
if not invoker:
|
if invoker.getAdminAccess() < MINIMUM_MAGICWORD_ACCESS:
self.air.writeServerEvent('suspicious', invokerId, 'Attempted to issue magic word: %s' % word)
dg = PyDatagram()
dg.addServerHeader(self.GetPuppetConnectionChannel(invokerId), self.air.ourChannel, CLIENTAGENT_EJECT)
dg.addUint16(126)
dg.addString('Magic Words are reserved for administrators only!')
self.air.send(dg)
return
target = self.air.doId2do.get(targetId)
if not target:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing target'])
return
response = spellbook.process(invoker, target, word)
if response:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', [response])
self.air.writeServerEvent('magic-word',
invokerId, invoker.getAdminAccess(),
targetId, target.getAdminAccess(),
word, response)
| self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing invoker'])
return | conditional_block |
MagicWordManagerAI.py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from otp.ai.MagicWordGlobal import *
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.MsgTypes import *
class MagicWordManagerAI(DistributedObjectAI):
| notify = DirectNotifyGlobal.directNotify.newCategory("MagicWordManagerAI")
def sendMagicWord(self, word, targetId):
invokerId = self.air.getAvatarIdFromSender()
invoker = self.air.doId2do.get(invokerId)
if not 'DistributedToonAI' in str(self.air.doId2do.get(targetId)):
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['Target is not a toon object!'])
return
if not invoker:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing invoker'])
return
if invoker.getAdminAccess() < MINIMUM_MAGICWORD_ACCESS:
self.air.writeServerEvent('suspicious', invokerId, 'Attempted to issue magic word: %s' % word)
dg = PyDatagram()
dg.addServerHeader(self.GetPuppetConnectionChannel(invokerId), self.air.ourChannel, CLIENTAGENT_EJECT)
dg.addUint16(126)
dg.addString('Magic Words are reserved for administrators only!')
self.air.send(dg)
return
target = self.air.doId2do.get(targetId)
if not target:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing target'])
return
response = spellbook.process(invoker, target, word)
if response:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', [response])
self.air.writeServerEvent('magic-word',
invokerId, invoker.getAdminAccess(),
targetId, target.getAdminAccess(),
word, response) | identifier_body |
|
test_combiner.py | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Bespin.
# | #
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
from cStringIO import StringIO
from path import path
from dryice.combiner import Package, combine_files
from dryice.plugins import Plugin
def test_package_index_generation():
p = path(__file__).dirname() / "noindexapp"
output = StringIO()
plugin = Plugin("noindexapp", p, dict(name="testing"))
combine_files(output, StringIO(), plugin, p)
combined = output.getvalue()
print combined
assert 'tiki.module("noindexapp:index"' in combined
assert 'tiki.main' not in combined | # The Initial Developer of the Original Code is
# Mozilla.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved. | random_line_split |
test_combiner.py | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Bespin.
#
# The Initial Developer of the Original Code is
# Mozilla.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
from cStringIO import StringIO
from path import path
from dryice.combiner import Package, combine_files
from dryice.plugins import Plugin
def test_package_index_generation():
| p = path(__file__).dirname() / "noindexapp"
output = StringIO()
plugin = Plugin("noindexapp", p, dict(name="testing"))
combine_files(output, StringIO(), plugin, p)
combined = output.getvalue()
print combined
assert 'tiki.module("noindexapp:index"' in combined
assert 'tiki.main' not in combined | identifier_body |
|
test_combiner.py | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Bespin.
#
# The Initial Developer of the Original Code is
# Mozilla.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
from cStringIO import StringIO
from path import path
from dryice.combiner import Package, combine_files
from dryice.plugins import Plugin
def | ():
p = path(__file__).dirname() / "noindexapp"
output = StringIO()
plugin = Plugin("noindexapp", p, dict(name="testing"))
combine_files(output, StringIO(), plugin, p)
combined = output.getvalue()
print combined
assert 'tiki.module("noindexapp:index"' in combined
assert 'tiki.main' not in combined
| test_package_index_generation | identifier_name |
item.ts | import { Map } from 'immutable';
import { ItemException } from '../exceptions/index';
// This can be removed when Symbol is known to TypeScript
declare const Symbol;
/**
* Interfaces
*/
export interface ItemData {
[x: string]: any;
}
export interface ItemInterface {
create(data: ItemData): this;
fill(data: ItemData): this;
set(key: string, value: any): this;
get(keys: string[] | string): any;
equals(key: string, value: any): boolean;
uid(): symbol;
isUid(symbol: symbol): boolean;
toObject(): any;
}
/**
* Base Item which can be expanded as required for unique models.
*/
export class Item implements ItemInterface {
/**
* Each item has it's own 'symbol' property which is defined upon construction.
*/
protected symbol : symbol;
/**
* Immutable object for the Items data.
*/
protected data : Map<string, any>;
/**
* Constructor.
*
* Defines this.symbol.
*/
constructor() {
this.symbol = Symbol();
}
/**
* Creates / replaces this.data with immutable object containing data provided.
*
* @param data
* @returns {Item}
*/
create(data : ItemData) : this {
if(!Object.keys(data).length) {
throw new ItemException('Unable to create item from empty object.');
}
this.data = Map(data);
return this;
}
/**
* Merges data provided with data contained in this.data.
*
* @param data
* @returns {Item}
*/
fill(data : ItemData) : this {
this.data = this.data.merge(data);
return this;
}
/**
* Sets the given key / value pair in this.data. Will overwrite any existing value for key.
*
* @param key
* @param value
* @returns {Item}
*/
set(key : string, value : any) {
this.data = this.data.set(key, value);
return this;
}
/**
* Return single or array of values corresponding to string / array of keys provided.
*
* @param keys
* @returns {*[]}
*/
get(keys : string[] | string) : any | any[] {
const values = this.getFromArray(this.convertToArray(keys));
if(values.length === 0) |
return values.length === 1 ? values[0] : values;
}
/**
* Gets all values mapping back to array of keys.
*
* @param keys
* @returns {Array<any>}
*/
protected getFromArray(keys : string[]) : any[] {
return this.data.filter((val, key) => keys.includes(key)).toArray();
}
/**
* Determines whether value provided equals known value for key.
*
* @param key
* @param value
* @returns {boolean}
*/
equals(key : string, value : any) : boolean {
return this.data.get(key) === value;
}
/**
* Returns unique symbol for Item.
*
* @returns {symbol}
*/
uid() : symbol {
return this.symbol;
}
/**
* Returns true if provided symbol equals known symbol for Item.
*
* @param symbol
* @returns {boolean}
*/
isUid(symbol : symbol) : boolean {
return this.symbol === symbol;
}
/**
* Returns plain js object of Item's data.
*
* @returns {{}}
*/
toObject() : any {
return this.data.toObject();
}
protected convertToArray<T>(data : T | T[]) : T[] {
return Array.isArray(data) ? data : [data];
}
}
| {
return;
} | conditional_block |
item.ts | import { Map } from 'immutable';
import { ItemException } from '../exceptions/index';
// This can be removed when Symbol is known to TypeScript
declare const Symbol;
/**
* Interfaces
*/
export interface ItemData {
[x: string]: any;
}
export interface ItemInterface {
create(data: ItemData): this;
fill(data: ItemData): this;
set(key: string, value: any): this;
get(keys: string[] | string): any;
equals(key: string, value: any): boolean;
uid(): symbol;
isUid(symbol: symbol): boolean;
toObject(): any;
}
/**
* Base Item which can be expanded as required for unique models.
*/
export class Item implements ItemInterface {
/**
* Each item has it's own 'symbol' property which is defined upon construction.
*/
protected symbol : symbol;
/**
* Immutable object for the Items data.
*/
protected data : Map<string, any>;
/**
* Constructor.
*
* Defines this.symbol.
*/
constructor() {
this.symbol = Symbol();
}
/**
* Creates / replaces this.data with immutable object containing data provided.
*
* @param data
* @returns {Item}
*/
create(data : ItemData) : this {
if(!Object.keys(data).length) {
throw new ItemException('Unable to create item from empty object.');
}
this.data = Map(data);
return this;
}
/**
* Merges data provided with data contained in this.data.
*
* @param data
* @returns {Item}
*/
| (data : ItemData) : this {
this.data = this.data.merge(data);
return this;
}
/**
* Sets the given key / value pair in this.data. Will overwrite any existing value for key.
*
* @param key
* @param value
* @returns {Item}
*/
set(key : string, value : any) {
this.data = this.data.set(key, value);
return this;
}
/**
* Return single or array of values corresponding to string / array of keys provided.
*
* @param keys
* @returns {*[]}
*/
get(keys : string[] | string) : any | any[] {
const values = this.getFromArray(this.convertToArray(keys));
if(values.length === 0) {
return;
}
return values.length === 1 ? values[0] : values;
}
/**
* Gets all values mapping back to array of keys.
*
* @param keys
* @returns {Array<any>}
*/
protected getFromArray(keys : string[]) : any[] {
return this.data.filter((val, key) => keys.includes(key)).toArray();
}
/**
* Determines whether value provided equals known value for key.
*
* @param key
* @param value
* @returns {boolean}
*/
equals(key : string, value : any) : boolean {
return this.data.get(key) === value;
}
/**
* Returns unique symbol for Item.
*
* @returns {symbol}
*/
uid() : symbol {
return this.symbol;
}
/**
* Returns true if provided symbol equals known symbol for Item.
*
* @param symbol
* @returns {boolean}
*/
isUid(symbol : symbol) : boolean {
return this.symbol === symbol;
}
/**
* Returns plain js object of Item's data.
*
* @returns {{}}
*/
toObject() : any {
return this.data.toObject();
}
protected convertToArray<T>(data : T | T[]) : T[] {
return Array.isArray(data) ? data : [data];
}
}
| fill | identifier_name |
item.ts | import { Map } from 'immutable';
import { ItemException } from '../exceptions/index';
// This can be removed when Symbol is known to TypeScript
declare const Symbol;
/**
* Interfaces
*/
export interface ItemData {
[x: string]: any;
}
export interface ItemInterface {
create(data: ItemData): this;
fill(data: ItemData): this;
set(key: string, value: any): this;
get(keys: string[] | string): any;
equals(key: string, value: any): boolean;
uid(): symbol;
isUid(symbol: symbol): boolean;
toObject(): any;
}
/**
* Base Item which can be expanded as required for unique models.
*/
export class Item implements ItemInterface {
/**
* Each item has it's own 'symbol' property which is defined upon construction.
*/
protected symbol : symbol;
/**
* Immutable object for the Items data.
*/
protected data : Map<string, any>;
/**
* Constructor.
*
* Defines this.symbol.
*/
constructor() {
this.symbol = Symbol();
}
/**
* Creates / replaces this.data with immutable object containing data provided.
*
* @param data
* @returns {Item}
*/
create(data : ItemData) : this {
if(!Object.keys(data).length) {
throw new ItemException('Unable to create item from empty object.');
}
this.data = Map(data);
return this;
}
/**
* Merges data provided with data contained in this.data.
*
* @param data
* @returns {Item}
*/
fill(data : ItemData) : this {
this.data = this.data.merge(data);
return this; | *
* @param key
* @param value
* @returns {Item}
*/
set(key : string, value : any) {
this.data = this.data.set(key, value);
return this;
}
/**
* Return single or array of values corresponding to string / array of keys provided.
*
* @param keys
* @returns {*[]}
*/
get(keys : string[] | string) : any | any[] {
const values = this.getFromArray(this.convertToArray(keys));
if(values.length === 0) {
return;
}
return values.length === 1 ? values[0] : values;
}
/**
* Gets all values mapping back to array of keys.
*
* @param keys
* @returns {Array<any>}
*/
protected getFromArray(keys : string[]) : any[] {
return this.data.filter((val, key) => keys.includes(key)).toArray();
}
/**
* Determines whether value provided equals known value for key.
*
* @param key
* @param value
* @returns {boolean}
*/
equals(key : string, value : any) : boolean {
return this.data.get(key) === value;
}
/**
* Returns unique symbol for Item.
*
* @returns {symbol}
*/
uid() : symbol {
return this.symbol;
}
/**
* Returns true if provided symbol equals known symbol for Item.
*
* @param symbol
* @returns {boolean}
*/
isUid(symbol : symbol) : boolean {
return this.symbol === symbol;
}
/**
* Returns plain js object of Item's data.
*
* @returns {{}}
*/
toObject() : any {
return this.data.toObject();
}
protected convertToArray<T>(data : T | T[]) : T[] {
return Array.isArray(data) ? data : [data];
}
} | }
/**
* Sets the given key / value pair in this.data. Will overwrite any existing value for key. | random_line_split |
item.ts | import { Map } from 'immutable';
import { ItemException } from '../exceptions/index';
// This can be removed when Symbol is known to TypeScript
declare const Symbol;
/**
* Interfaces
*/
export interface ItemData {
[x: string]: any;
}
export interface ItemInterface {
create(data: ItemData): this;
fill(data: ItemData): this;
set(key: string, value: any): this;
get(keys: string[] | string): any;
equals(key: string, value: any): boolean;
uid(): symbol;
isUid(symbol: symbol): boolean;
toObject(): any;
}
/**
* Base Item which can be expanded as required for unique models.
*/
export class Item implements ItemInterface {
/**
* Each item has it's own 'symbol' property which is defined upon construction.
*/
protected symbol : symbol;
/**
* Immutable object for the Items data.
*/
protected data : Map<string, any>;
/**
* Constructor.
*
* Defines this.symbol.
*/
constructor() {
this.symbol = Symbol();
}
/**
* Creates / replaces this.data with immutable object containing data provided.
*
* @param data
* @returns {Item}
*/
create(data : ItemData) : this {
if(!Object.keys(data).length) {
throw new ItemException('Unable to create item from empty object.');
}
this.data = Map(data);
return this;
}
/**
* Merges data provided with data contained in this.data.
*
* @param data
* @returns {Item}
*/
fill(data : ItemData) : this {
this.data = this.data.merge(data);
return this;
}
/**
* Sets the given key / value pair in this.data. Will overwrite any existing value for key.
*
* @param key
* @param value
* @returns {Item}
*/
set(key : string, value : any) {
this.data = this.data.set(key, value);
return this;
}
/**
* Return single or array of values corresponding to string / array of keys provided.
*
* @param keys
* @returns {*[]}
*/
get(keys : string[] | string) : any | any[] {
const values = this.getFromArray(this.convertToArray(keys));
if(values.length === 0) {
return;
}
return values.length === 1 ? values[0] : values;
}
/**
* Gets all values mapping back to array of keys.
*
* @param keys
* @returns {Array<any>}
*/
protected getFromArray(keys : string[]) : any[] {
return this.data.filter((val, key) => keys.includes(key)).toArray();
}
/**
* Determines whether value provided equals known value for key.
*
* @param key
* @param value
* @returns {boolean}
*/
equals(key : string, value : any) : boolean |
/**
* Returns unique symbol for Item.
*
* @returns {symbol}
*/
uid() : symbol {
return this.symbol;
}
/**
* Returns true if provided symbol equals known symbol for Item.
*
* @param symbol
* @returns {boolean}
*/
isUid(symbol : symbol) : boolean {
return this.symbol === symbol;
}
/**
* Returns plain js object of Item's data.
*
* @returns {{}}
*/
toObject() : any {
return this.data.toObject();
}
protected convertToArray<T>(data : T | T[]) : T[] {
return Array.isArray(data) ? data : [data];
}
}
| {
return this.data.get(key) === value;
} | identifier_body |
convert_seconds_to_compound_duration.rs | // http://rosettacode.org/wiki/Convert_seconds_to_compound_duration
fn seconds_to_compound(secs: u32) -> String {
let part = |comps: &mut String, c: &str, one: u32, secs: &mut u32| {
if *secs >= one |
};
let mut secs = secs;
let mut comps = String::new();
part(&mut comps, " wk", 60 * 60 * 24 * 7, &mut secs);
part(&mut comps, " d", 60 * 60 * 24, &mut secs);
part(&mut comps, " hr", 60 * 60, &mut secs);
part(&mut comps, " min", 60, &mut secs);
part(&mut comps, " sec", 1, &mut secs);
comps
}
#[test]
fn hours_and_seconds() {
assert_eq!(seconds_to_compound(7259), "2 hr, 59 sec");
}
#[test]
fn one_day() {
assert_eq!(seconds_to_compound(86400), "1 d");
}
#[test]
fn six_million_seconds() {
assert_eq!(seconds_to_compound(6000000), "9 wk, 6 d, 10 hr, 40 min");
}
fn main() {
println!("7,259 seconds = {}", seconds_to_compound(7259));
println!("86,400 seconds = {}", seconds_to_compound(86400));
println!("6,000,000 seconds = {}", seconds_to_compound(6000000));
}
| {
let div = *secs / one;
comps.push_str(&(div.to_string() + c));
*secs -= one * div;
if *secs > 0 {
comps.push_str(", ");
}
} | conditional_block |
convert_seconds_to_compound_duration.rs | // http://rosettacode.org/wiki/Convert_seconds_to_compound_duration
fn seconds_to_compound(secs: u32) -> String {
let part = |comps: &mut String, c: &str, one: u32, secs: &mut u32| {
if *secs >= one {
let div = *secs / one;
comps.push_str(&(div.to_string() + c));
*secs -= one * div;
if *secs > 0 {
comps.push_str(", ");
}
}
};
let mut secs = secs;
let mut comps = String::new();
part(&mut comps, " wk", 60 * 60 * 24 * 7, &mut secs);
part(&mut comps, " d", 60 * 60 * 24, &mut secs);
part(&mut comps, " hr", 60 * 60, &mut secs);
part(&mut comps, " min", 60, &mut secs);
part(&mut comps, " sec", 1, &mut secs);
comps
}
#[test]
fn hours_and_seconds() {
assert_eq!(seconds_to_compound(7259), "2 hr, 59 sec");
}
#[test]
fn one_day() {
assert_eq!(seconds_to_compound(86400), "1 d");
}
#[test]
fn six_million_seconds() {
assert_eq!(seconds_to_compound(6000000), "9 wk, 6 d, 10 hr, 40 min");
}
fn main() | {
println!("7,259 seconds = {}", seconds_to_compound(7259));
println!("86,400 seconds = {}", seconds_to_compound(86400));
println!("6,000,000 seconds = {}", seconds_to_compound(6000000));
} | identifier_body |
|
convert_seconds_to_compound_duration.rs | // http://rosettacode.org/wiki/Convert_seconds_to_compound_duration
fn seconds_to_compound(secs: u32) -> String {
let part = |comps: &mut String, c: &str, one: u32, secs: &mut u32| {
if *secs >= one {
let div = *secs / one;
comps.push_str(&(div.to_string() + c));
*secs -= one * div;
if *secs > 0 {
comps.push_str(", ");
}
}
};
let mut secs = secs;
let mut comps = String::new();
part(&mut comps, " wk", 60 * 60 * 24 * 7, &mut secs);
part(&mut comps, " d", 60 * 60 * 24, &mut secs);
part(&mut comps, " hr", 60 * 60, &mut secs);
part(&mut comps, " min", 60, &mut secs);
part(&mut comps, " sec", 1, &mut secs);
comps
}
#[test]
fn | () {
assert_eq!(seconds_to_compound(7259), "2 hr, 59 sec");
}
#[test]
fn one_day() {
assert_eq!(seconds_to_compound(86400), "1 d");
}
#[test]
fn six_million_seconds() {
assert_eq!(seconds_to_compound(6000000), "9 wk, 6 d, 10 hr, 40 min");
}
fn main() {
println!("7,259 seconds = {}", seconds_to_compound(7259));
println!("86,400 seconds = {}", seconds_to_compound(86400));
println!("6,000,000 seconds = {}", seconds_to_compound(6000000));
}
| hours_and_seconds | identifier_name |
convert_seconds_to_compound_duration.rs | // http://rosettacode.org/wiki/Convert_seconds_to_compound_duration
fn seconds_to_compound(secs: u32) -> String {
let part = |comps: &mut String, c: &str, one: u32, secs: &mut u32| { | let div = *secs / one;
comps.push_str(&(div.to_string() + c));
*secs -= one * div;
if *secs > 0 {
comps.push_str(", ");
}
}
};
let mut secs = secs;
let mut comps = String::new();
part(&mut comps, " wk", 60 * 60 * 24 * 7, &mut secs);
part(&mut comps, " d", 60 * 60 * 24, &mut secs);
part(&mut comps, " hr", 60 * 60, &mut secs);
part(&mut comps, " min", 60, &mut secs);
part(&mut comps, " sec", 1, &mut secs);
comps
}
#[test]
fn hours_and_seconds() {
assert_eq!(seconds_to_compound(7259), "2 hr, 59 sec");
}
#[test]
fn one_day() {
assert_eq!(seconds_to_compound(86400), "1 d");
}
#[test]
fn six_million_seconds() {
assert_eq!(seconds_to_compound(6000000), "9 wk, 6 d, 10 hr, 40 min");
}
fn main() {
println!("7,259 seconds = {}", seconds_to_compound(7259));
println!("86,400 seconds = {}", seconds_to_compound(86400));
println!("6,000,000 seconds = {}", seconds_to_compound(6000000));
} | if *secs >= one { | random_line_split |
pm.rs | use core::marker::PhantomData;
use volatile::*;
#[repr(C, packed)]
pub struct PowerManager {
pub control: ReadWrite<u8>, // Offset: 0x00 (R/W 8) Control
pub sleep: ReadWrite<u8>, // Offset: 0x01 (R/W 8) Sleep Mode
pub external_control: ReadWrite<u8>, // Offset: 0x02 (R/W 8) External Reset Controller
reserved_1: [u8; 5],
pub cpu_select: ReadWrite<u8>, // Offset: 0x08 (R/W 8) CPU Clock Select
pub apba_select: ReadWrite<u8>, // Offset: 0x09 (R/W 8) APBA Clock Select
pub apbb_select: ReadWrite<u8>, // Offset: 0x0A (R/W 8) APBB Clock Select
pub apbc_select: ReadWrite<u8>, // Offset: 0x0B (R/W 8) APBC Clock Select
reserved_2: [u8; 8],
pub ahb_mask: ReadWrite<u32>, // Offset: 0x14 (R/W 32) AHB Mask
pub apba_mask: ReadWrite<u32>, // Offset: 0x18 (R/W 32) APBA Mask
pub apbb_mask: ReadWrite<u32>, // Offset: 0x1C (R/W 32) APBB Mask
pub apbc_mask: ReadWrite<u32>, // Offset: 0x20 (R/W 32) APBC Mask
reserved_3: [u8; 10],
pub interrupt_clear: ReadWrite<u8>, // Offset: 0x34 (R/W 8) Interrupt Enable Clear
pub interrupt_set: ReadWrite<u8>, // Offset: 0x35 (R/W 8) Interrupt Enable Set
pub interrupt_flag: ReadWrite<u8>, // Offset: 0x36 (R/W 8) Interrupt Flag Status and Clear
reserved_4: [u8; 1],
pub reset_cause: ReadOnly<u8>, // Offset: 0x38 (R/ 8) Reset Cause
private: PhantomData<()>, // This type cannot be constructed.
}
pub const PM_APBAMASK_GCLK: u32 = 1 << 3;
impl PowerManager {
pub unsafe fn set_cpu_and_bus_blocks(&mut self) {
// Now that all system clocks are configured, we can set CPU and APBx BUS clocks.
// There[sic] values are normally the one present after Reset.
let PM_CPUSEL_CPUDIV_POS = 0; // (PM_CPUSEL) CPU Prescaler Selection
let PM_CPUSEL_CPUDIV_DIV1_VAL = 0x0; // (PM_CPUSEL) Divide by 1
let PM_CPUSEL_CPUDIV_DIV1 = PM_CPUSEL_CPUDIV_DIV1_VAL << PM_CPUSEL_CPUDIV_POS;
let PM_APBASEL_APBADIV_DIV1_VAL = 0x0; // (PM_APBASEL) Divide by 1
let PM_APBBSEL_APBBDIV_DIV1_VAL = 0x0; // (PM_APBBSEL) Divide by 1
let PM_APBCSEL_APBCDIV_DIV1_VAL = 0x0; // (PM_APBCSEL) Divide by 1
self.cpu_select.write(PM_CPUSEL_CPUDIV_DIV1);
self.apba_select.write(PM_APBASEL_APBADIV_DIV1_VAL);
self.apbb_select.write(PM_APBBSEL_APBBDIV_DIV1_VAL);
self.apbc_select.write(PM_APBCSEL_APBCDIV_DIV1_VAL); | }
} | random_line_split |
|
pm.rs | use core::marker::PhantomData;
use volatile::*;
#[repr(C, packed)]
pub struct | {
pub control: ReadWrite<u8>, // Offset: 0x00 (R/W 8) Control
pub sleep: ReadWrite<u8>, // Offset: 0x01 (R/W 8) Sleep Mode
pub external_control: ReadWrite<u8>, // Offset: 0x02 (R/W 8) External Reset Controller
reserved_1: [u8; 5],
pub cpu_select: ReadWrite<u8>, // Offset: 0x08 (R/W 8) CPU Clock Select
pub apba_select: ReadWrite<u8>, // Offset: 0x09 (R/W 8) APBA Clock Select
pub apbb_select: ReadWrite<u8>, // Offset: 0x0A (R/W 8) APBB Clock Select
pub apbc_select: ReadWrite<u8>, // Offset: 0x0B (R/W 8) APBC Clock Select
reserved_2: [u8; 8],
pub ahb_mask: ReadWrite<u32>, // Offset: 0x14 (R/W 32) AHB Mask
pub apba_mask: ReadWrite<u32>, // Offset: 0x18 (R/W 32) APBA Mask
pub apbb_mask: ReadWrite<u32>, // Offset: 0x1C (R/W 32) APBB Mask
pub apbc_mask: ReadWrite<u32>, // Offset: 0x20 (R/W 32) APBC Mask
reserved_3: [u8; 10],
pub interrupt_clear: ReadWrite<u8>, // Offset: 0x34 (R/W 8) Interrupt Enable Clear
pub interrupt_set: ReadWrite<u8>, // Offset: 0x35 (R/W 8) Interrupt Enable Set
pub interrupt_flag: ReadWrite<u8>, // Offset: 0x36 (R/W 8) Interrupt Flag Status and Clear
reserved_4: [u8; 1],
pub reset_cause: ReadOnly<u8>, // Offset: 0x38 (R/ 8) Reset Cause
private: PhantomData<()>, // This type cannot be constructed.
}
pub const PM_APBAMASK_GCLK: u32 = 1 << 3;
impl PowerManager {
pub unsafe fn set_cpu_and_bus_blocks(&mut self) {
// Now that all system clocks are configured, we can set CPU and APBx BUS clocks.
// There[sic] values are normally the one present after Reset.
let PM_CPUSEL_CPUDIV_POS = 0; // (PM_CPUSEL) CPU Prescaler Selection
let PM_CPUSEL_CPUDIV_DIV1_VAL = 0x0; // (PM_CPUSEL) Divide by 1
let PM_CPUSEL_CPUDIV_DIV1 = PM_CPUSEL_CPUDIV_DIV1_VAL << PM_CPUSEL_CPUDIV_POS;
let PM_APBASEL_APBADIV_DIV1_VAL = 0x0; // (PM_APBASEL) Divide by 1
let PM_APBBSEL_APBBDIV_DIV1_VAL = 0x0; // (PM_APBBSEL) Divide by 1
let PM_APBCSEL_APBCDIV_DIV1_VAL = 0x0; // (PM_APBCSEL) Divide by 1
self.cpu_select.write(PM_CPUSEL_CPUDIV_DIV1);
self.apba_select.write(PM_APBASEL_APBADIV_DIV1_VAL);
self.apbb_select.write(PM_APBBSEL_APBBDIV_DIV1_VAL);
self.apbc_select.write(PM_APBCSEL_APBCDIV_DIV1_VAL);
}
}
| PowerManager | identifier_name |
pm.rs | use core::marker::PhantomData;
use volatile::*;
#[repr(C, packed)]
pub struct PowerManager {
pub control: ReadWrite<u8>, // Offset: 0x00 (R/W 8) Control
pub sleep: ReadWrite<u8>, // Offset: 0x01 (R/W 8) Sleep Mode
pub external_control: ReadWrite<u8>, // Offset: 0x02 (R/W 8) External Reset Controller
reserved_1: [u8; 5],
pub cpu_select: ReadWrite<u8>, // Offset: 0x08 (R/W 8) CPU Clock Select
pub apba_select: ReadWrite<u8>, // Offset: 0x09 (R/W 8) APBA Clock Select
pub apbb_select: ReadWrite<u8>, // Offset: 0x0A (R/W 8) APBB Clock Select
pub apbc_select: ReadWrite<u8>, // Offset: 0x0B (R/W 8) APBC Clock Select
reserved_2: [u8; 8],
pub ahb_mask: ReadWrite<u32>, // Offset: 0x14 (R/W 32) AHB Mask
pub apba_mask: ReadWrite<u32>, // Offset: 0x18 (R/W 32) APBA Mask
pub apbb_mask: ReadWrite<u32>, // Offset: 0x1C (R/W 32) APBB Mask
pub apbc_mask: ReadWrite<u32>, // Offset: 0x20 (R/W 32) APBC Mask
reserved_3: [u8; 10],
pub interrupt_clear: ReadWrite<u8>, // Offset: 0x34 (R/W 8) Interrupt Enable Clear
pub interrupt_set: ReadWrite<u8>, // Offset: 0x35 (R/W 8) Interrupt Enable Set
pub interrupt_flag: ReadWrite<u8>, // Offset: 0x36 (R/W 8) Interrupt Flag Status and Clear
reserved_4: [u8; 1],
pub reset_cause: ReadOnly<u8>, // Offset: 0x38 (R/ 8) Reset Cause
private: PhantomData<()>, // This type cannot be constructed.
}
pub const PM_APBAMASK_GCLK: u32 = 1 << 3;
impl PowerManager {
pub unsafe fn set_cpu_and_bus_blocks(&mut self) |
}
| {
// Now that all system clocks are configured, we can set CPU and APBx BUS clocks.
// There[sic] values are normally the one present after Reset.
let PM_CPUSEL_CPUDIV_POS = 0; // (PM_CPUSEL) CPU Prescaler Selection
let PM_CPUSEL_CPUDIV_DIV1_VAL = 0x0; // (PM_CPUSEL) Divide by 1
let PM_CPUSEL_CPUDIV_DIV1 = PM_CPUSEL_CPUDIV_DIV1_VAL << PM_CPUSEL_CPUDIV_POS;
let PM_APBASEL_APBADIV_DIV1_VAL = 0x0; // (PM_APBASEL) Divide by 1
let PM_APBBSEL_APBBDIV_DIV1_VAL = 0x0; // (PM_APBBSEL) Divide by 1
let PM_APBCSEL_APBCDIV_DIV1_VAL = 0x0; // (PM_APBCSEL) Divide by 1
self.cpu_select.write(PM_CPUSEL_CPUDIV_DIV1);
self.apba_select.write(PM_APBASEL_APBADIV_DIV1_VAL);
self.apbb_select.write(PM_APBBSEL_APBBDIV_DIV1_VAL);
self.apbc_select.write(PM_APBCSEL_APBCDIV_DIV1_VAL);
} | identifier_body |
payments.js | "use strict";
const express = require('express');
const router = express.Router();
const mongoose = require('mongoose');
const PaymentSchema = require("./../schemas/payment");
const PaymentModel = mongoose.model('Payment', PaymentSchema);
const parser = require("./../parser/parser");
const _ = require("lodash");
const moment = require("moment");
router.get('/get', (req, res) => {
PaymentModel
.filter(req)
.then(result => res.json(result))
.catch(error => {
res.json({
success: false,
error: error
});
});
});
router.post('/add', (req, res) => {
PaymentModel.addOne(req, (err, result) => {
res.json(result);
});
});
router.post('/addmany', (req, res) => {
if (req.body.data) {
for (const item of req.body.data) {
delete item._id;
}
}
PaymentModel
.addMany(req)
.then(payments => res.json(payments))
.catch(error => res.json({error}));
});
/*router.post('/importcollection', (req, res) => {
const collection = require("./../parser/payments.json");
const request = _.chain(collection)
.map(item => {
delete item._id;
return item;
})
.thru(value => {
return {
body: {
data: value
}
}
})
.value();
PaymentModel
.addMany(request)
.then(payments => res.json(payments))
.catch(error => res.json({error}));
});*/
router.post('/importxls', (req, res) => {
const collection = parser.readAmroReport(req.files.myfile.data);
const mappedCollection = _.map(collection, item => {
const payment = {
name: item.description,
category: "",
date: moment(item.valuedate, "YYYYMMDD").utc().format(),
value: Math.abs(item.amount), | });
res.json(mappedCollection);
});
router.put('/:id', (req, res) => {
PaymentModel
.updateOnePayment(req)
.then(() => res.json({}))
.catch(error => res.json({error}));
});
router.delete('/:id', (req, res) => {
PaymentModel
.deleteOnePayment(req)
.then(() => res.send({}))
.catch(error => res.json({error}));
});
router.get('/report/groups', (req, res) => {
PaymentModel
.getGroupedReport()
.then(docs => res.send(docs))
.catch(error => res.send({error}));
});
module.exports = router; | };
parser.prePopulateCategoryForPayment(payment, item.description);
return payment; | random_line_split |
payments.js | "use strict";
const express = require('express');
const router = express.Router();
const mongoose = require('mongoose');
const PaymentSchema = require("./../schemas/payment");
const PaymentModel = mongoose.model('Payment', PaymentSchema);
const parser = require("./../parser/parser");
const _ = require("lodash");
const moment = require("moment");
router.get('/get', (req, res) => {
PaymentModel
.filter(req)
.then(result => res.json(result))
.catch(error => {
res.json({
success: false,
error: error
});
});
});
router.post('/add', (req, res) => {
PaymentModel.addOne(req, (err, result) => {
res.json(result);
});
});
router.post('/addmany', (req, res) => {
if (req.body.data) |
PaymentModel
.addMany(req)
.then(payments => res.json(payments))
.catch(error => res.json({error}));
});
/*router.post('/importcollection', (req, res) => {
const collection = require("./../parser/payments.json");
const request = _.chain(collection)
.map(item => {
delete item._id;
return item;
})
.thru(value => {
return {
body: {
data: value
}
}
})
.value();
PaymentModel
.addMany(request)
.then(payments => res.json(payments))
.catch(error => res.json({error}));
});*/
router.post('/importxls', (req, res) => {
const collection = parser.readAmroReport(req.files.myfile.data);
const mappedCollection = _.map(collection, item => {
const payment = {
name: item.description,
category: "",
date: moment(item.valuedate, "YYYYMMDD").utc().format(),
value: Math.abs(item.amount),
};
parser.prePopulateCategoryForPayment(payment, item.description);
return payment;
});
res.json(mappedCollection);
});
router.put('/:id', (req, res) => {
PaymentModel
.updateOnePayment(req)
.then(() => res.json({}))
.catch(error => res.json({error}));
});
router.delete('/:id', (req, res) => {
PaymentModel
.deleteOnePayment(req)
.then(() => res.send({}))
.catch(error => res.json({error}));
});
router.get('/report/groups', (req, res) => {
PaymentModel
.getGroupedReport()
.then(docs => res.send(docs))
.catch(error => res.send({error}));
});
module.exports = router;
| {
for (const item of req.body.data) {
delete item._id;
}
} | conditional_block |
log_parser.py | #==============================================================================
# Copyright 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import re
def parse_logs(log_lines, verbose=False):
"""
Returns ngraph metrics parsed out of the specified log output.
Regular log parsing will return:
- Number of nodes in the graph
- Number of nodes marked for clustering
- Number of ngraph clusters
Verbose log parsing will return all of the above, in addition to:
- Percentage of nodes clustered
- Has deadness issues
- Has static input issues
- Reasons why edge connected clusters did not merge
- Reasons why edge connected encapsulates did not merge
- Nodes per cluster
- Types of edges
- Op not supported
- Op failed type constraint
"""
if type(log_lines) == type(''):
log_lines = log_lines.split('\n')
else:
assert type(log_lines) == type(
[]
), "If log_lines if not a string, it should have been a list, but instead it is a " + type(
log_lines)
assert all([
type(i) == type('') and '\n' not in i for i in log_lines
]), 'Each element of the list should be a string and not contain new lines'
all_results = {}
curr_result = {}
ctr = 0
prev_line = ""
for line in log_lines:
start_of_subgraph = "NGTF_SUMMARY: Op_not_supported:" in line
# If logs of a new sub-graph is starting, save the old one
if start_of_subgraph:
if len(curr_result) > 0:
all_results[str(ctr)] = curr_result
curr_result = {}
ctr += 1
# keep collecting information in curr_result
if line.startswith('NGTF_SUMMARY'):
if 'Number of nodes in the graph' in line:
curr_result['num_nodes_in_graph'] = int(
line.split(':')[-1].strip())
elif 'Number of nodes marked for clustering' in line:
curr_result['num_nodes_marked_for_clustering'] = int(
line.split(':')[-1].strip().split(' ')[0].strip())
if verbose:
# get percentage of total nodes
match = re.search("(\d+(\.\d+)?%)", line)
nodes_clustered = ""
if match:
nodes_clustered = match.group(0)
curr_result["percentage_nodes_clustered"] = nodes_clustered
elif 'Number of ngraph clusters' in line:
curr_result['num_ng_clusters'] = int(
line.split(':')[-1].strip())
if verbose and ('DEADNESS' in line and 'STATICINPUT' in line):
line = line[len("NGTF_SUMMARY:"):]
reasons = dict([i.strip()
for i in item.split(":")]
for item in line.split(","))
if "reasons why a pair of edge connected encapsulates did not merge" in prev_line:
curr_result[
'why_edge_connected_encapsulates_did_not_merge'] = reasons
elif "reasons why a pair of edge connected clusters did not merge" in prev_line:
curr_result[
'why_edge_connected_clusters_did_not_merge'] = reasons
# default has_deadness_issues and has_static_input_issues to 'No'
if 'has_deadness_issues' not in curr_result.keys():
curr_result['has_deadness_issues'] = "No"
if 'has_static_input_issues' not in curr_result.keys():
curr_result['has_static_input_issues'] = "No"
# set has deadness/static input issues to 'Yes' if the value is > 0
if int(reasons['DEADNESS']) > 0:
curr_result['has_deadness_issues'] = "Yes"
if int(reasons['STATICINPUT']) > 0:
curr_result['has_static_input_issues'] = "Yes"
elif verbose and 'Nodes per cluster' in line:
curr_result['nodes_per_cluster'] = float(
line.split(':')[-1].strip())
elif verbose and 'Types of edges::' in line:
line = line[len("NGTF_SUMMARY: Types of edges:: "):]
edge_types = dict([i.strip()
for i in item.split(":")]
for item in line.split(","))
curr_result["types_of_edges"] = edge_type
s
elif verbose and 'Op_not_supported' in line:
curr_result["op_not_supported"] = \
[i.strip() for i in line[len("NGTF_SUMMARY: Op_not_supported: "):].split(",")]
elif verbose and 'Op_failed_type_constraint' in line:
curr_result["op_failed_type_constraint"] = \
[i.strip() for i in line[len(
"NGTF_SUMMARY: Op_failed_type_constraint: "):].split(",")]
prev_line = line
# add the last section to the results
all_results[str(ctr)] = curr_result
return all_results
def compare_parsed_values(parsed_vals, expected_vals):
# Both inputs are expected to be 2 dictionaries (representing jsons)
# The constraints in expected is <= parsed_vals. Parsed_vals should have all possible values that the parser can spit out. However expected_vals can be relaxed (even empty) and choose to only verify/match certain fields
| match = lambda current, expected: all(
[expected[k] == current[k] for k in expected])
for graph_id_1 in expected_vals:
# The ordering is not important and could be different, hence search through all elements of parsed_vals
matching_id = None
for graph_id_2 in parsed_vals:
if match(expected_vals[graph_id_1], parsed_vals[graph_id_2]):
matching_id = graph_id_2
break
if matching_id is None:
return False, 'Failed to match expected graph info ' + graph_id_1 + " which was: " + str(
expected_vals[graph_id_1]
) + "\n. Got the following parsed results: " + str(parsed_vals)
else:
parsed_vals.pop(matching_id)
return True, '' | identifier_body |
|
log_parser.py | #==============================================================================
# Copyright 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import re
def parse_logs(log_lines, verbose=False):
"""
Returns ngraph metrics parsed out of the specified log output.
Regular log parsing will return:
- Number of nodes in the graph
- Number of nodes marked for clustering
- Number of ngraph clusters
Verbose log parsing will return all of the above, in addition to:
- Percentage of nodes clustered
- Has deadness issues
- Has static input issues
- Reasons why edge connected clusters did not merge
- Reasons why edge connected encapsulates did not merge
- Nodes per cluster
- Types of edges
- Op not supported
- Op failed type constraint
"""
if type(log_lines) == type(''):
log_lines = log_lines.split('\n')
else:
assert type(log_lines) == type(
[]
), "If log_lines if not a string, it should have been a list, but instead it is a " + type(
log_lines)
assert all([
type(i) == type('') and '\n' not in i for i in log_lines
]), 'Each element of the list should be a string and not contain new lines'
all_results = {}
curr_result = {}
ctr = 0
prev_line = ""
for line in log_lines:
start_of_subgraph = "NGTF_SUMMARY: Op_not_supported:" in line
# If logs of a new sub-graph is starting, save the old one
if start_of_subgraph:
if len(curr_result) > 0:
all_results[str(ctr)] = curr_result
curr_result = {}
ctr += 1
# keep collecting information in curr_result
if line.startswith('NGTF_SUMMARY'):
if 'Number of nodes in the graph' in line:
curr_result['num_nodes_in_graph'] = int(
line.split(':')[-1].strip())
elif 'Number of nodes marked for clustering' in line:
curr_result['num_nodes_marked_for_clustering'] = int(
line.split(':')[-1].strip().split(' ')[0].strip())
if verbose:
# get percentage of total nodes
match = re.search("(\d+(\.\d+)?%)", line)
nodes_clustered = ""
if match:
nodes_clustered = match.group(0)
curr_result["percentage_nodes_clustered"] = nodes_clustered
elif 'Number of ngraph clusters' in line:
curr_result['num_ng_clusters'] = int(
line.split(':')[-1].strip())
if verbose and ('DEADNESS' in line and 'STATICINPUT' in line):
line = line[len("NGTF_SUMMARY:"):]
reasons = dict([i.strip()
for i in item.split(":")]
for item in line.split(","))
if "reasons why a pair of edge connected encapsulates did not merge" in prev_line:
curr_result[
'why_edge_connected_encapsulates_did_not_merge'] = reasons
elif "reasons why a pair of edge connected clusters did not merge" in prev_line:
curr_result[
'why_edge_connected_clusters_did_not_merge'] = reasons
# default has_deadness_issues and has_static_input_issues to 'No'
if 'has_deadness_issues' not in curr_result.keys():
curr_result['has_deadness_issues'] = "No"
if 'has_static_input_issues' not in curr_result.keys():
curr_result['has_static_input_issues'] = "No"
# set has deadness/static input issues to 'Yes' if the value is > 0
if int(reasons['DEADNESS']) > 0:
curr_result['has_deadness_issues'] = "Yes"
if int(reasons['STATICINPUT']) > 0:
curr_result['has_static_input_issues'] = "Yes"
elif verbose and 'Nodes per cluster' in line:
curr_result['nodes_per_cluster'] = float(
line.split(':')[-1].strip())
elif verbose and 'Types of edges::' in line:
line = line[len("NGTF_SUMMARY: Types of edges:: "):]
edge_types = dict([i.strip()
for i in item.split(":")]
for item in line.split(","))
curr_result["types_of_edges"] = edge_type
s
elif verbose and 'Op_not_supported' in line:
curr_result["op_not_supported"] = \
[i.strip() for i in line[len("NGTF_SUMMARY: Op_not_supported: "):].split(",")]
elif verbose and 'Op_failed_type_constraint' in line:
curr_result["op_failed_type_constraint"] = \
[i.strip() for i in line[len(
"NGTF_SUMMARY: Op_failed_type_constraint: "):].split(",")]
prev_line = line
|
return all_results
def compare_parsed_values(parsed_vals, expected_vals):
# Both inputs are expected to be 2 dictionaries (representing jsons)
# The constraints in expected is <= parsed_vals. Parsed_vals should have all possible values that the parser can spit out. However expected_vals can be relaxed (even empty) and choose to only verify/match certain fields
match = lambda current, expected: all(
[expected[k] == current[k] for k in expected])
for graph_id_1 in expected_vals:
# The ordering is not important and could be different, hence search through all elements of parsed_vals
matching_id = None
for graph_id_2 in parsed_vals:
if match(expected_vals[graph_id_1], parsed_vals[graph_id_2]):
matching_id = graph_id_2
break
if matching_id is None:
return False, 'Failed to match expected graph info ' + graph_id_1 + " which was: " + str(
expected_vals[graph_id_1]
) + "\n. Got the following parsed results: " + str(parsed_vals)
else:
parsed_vals.pop(matching_id)
return True, '' | # add the last section to the results
all_results[str(ctr)] = curr_result | random_line_split |
log_parser.py | #==============================================================================
# Copyright 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import re
def parse_logs(log_lines, verbose=False):
"""
Returns ngraph metrics parsed out of the specified log output.
Regular log parsing will return:
- Number of nodes in the graph
- Number of nodes marked for clustering
- Number of ngraph clusters
Verbose log parsing will return all of the above, in addition to:
- Percentage of nodes clustered
- Has deadness issues
- Has static input issues
- Reasons why edge connected clusters did not merge
- Reasons why edge connected encapsulates did not merge
- Nodes per cluster
- Types of edges
- Op not supported
- Op failed type constraint
"""
if type(log_lines) == type(''):
log_lines = log_lines.split('\n')
else:
assert type(log_lines) == type(
[]
), "If log_lines if not a string, it should have been a list, but instead it is a " + type(
log_lines)
assert all([
type(i) == type('') and '\n' not in i for i in log_lines
]), 'Each element of the list should be a string and not contain new lines'
all_results = {}
curr_result = {}
ctr = 0
prev_line = ""
for line in log_lines:
start_of_subgraph = "NGTF_SUMMARY: Op_not_supported:" in line
# If logs of a new sub-graph is starting, save the old one
if start_of_subgraph:
if len(curr_result) > 0:
all_results[str(ctr)] = curr_result
curr_result = {}
ctr += 1
# keep collecting information in curr_result
if line.startswith('NGTF_SUMMARY'):
if 'Number of nodes in the graph' in line:
|
elif 'Number of nodes marked for clustering' in line:
curr_result['num_nodes_marked_for_clustering'] = int(
line.split(':')[-1].strip().split(' ')[0].strip())
if verbose:
# get percentage of total nodes
match = re.search("(\d+(\.\d+)?%)", line)
nodes_clustered = ""
if match:
nodes_clustered = match.group(0)
curr_result["percentage_nodes_clustered"] = nodes_clustered
elif 'Number of ngraph clusters' in line:
curr_result['num_ng_clusters'] = int(
line.split(':')[-1].strip())
if verbose and ('DEADNESS' in line and 'STATICINPUT' in line):
line = line[len("NGTF_SUMMARY:"):]
reasons = dict([i.strip()
for i in item.split(":")]
for item in line.split(","))
if "reasons why a pair of edge connected encapsulates did not merge" in prev_line:
curr_result[
'why_edge_connected_encapsulates_did_not_merge'] = reasons
elif "reasons why a pair of edge connected clusters did not merge" in prev_line:
curr_result[
'why_edge_connected_clusters_did_not_merge'] = reasons
# default has_deadness_issues and has_static_input_issues to 'No'
if 'has_deadness_issues' not in curr_result.keys():
curr_result['has_deadness_issues'] = "No"
if 'has_static_input_issues' not in curr_result.keys():
curr_result['has_static_input_issues'] = "No"
# set has deadness/static input issues to 'Yes' if the value is > 0
if int(reasons['DEADNESS']) > 0:
curr_result['has_deadness_issues'] = "Yes"
if int(reasons['STATICINPUT']) > 0:
curr_result['has_static_input_issues'] = "Yes"
elif verbose and 'Nodes per cluster' in line:
curr_result['nodes_per_cluster'] = float(
line.split(':')[-1].strip())
elif verbose and 'Types of edges::' in line:
line = line[len("NGTF_SUMMARY: Types of edges:: "):]
edge_types = dict([i.strip()
for i in item.split(":")]
for item in line.split(","))
curr_result["types_of_edges"] = edge_type
s
elif verbose and 'Op_not_supported' in line:
curr_result["op_not_supported"] = \
[i.strip() for i in line[len("NGTF_SUMMARY: Op_not_supported: "):].split(",")]
elif verbose and 'Op_failed_type_constraint' in line:
curr_result["op_failed_type_constraint"] = \
[i.strip() for i in line[len(
"NGTF_SUMMARY: Op_failed_type_constraint: "):].split(",")]
prev_line = line
# add the last section to the results
all_results[str(ctr)] = curr_result
return all_results
def compare_parsed_values(parsed_vals, expected_vals):
# Both inputs are expected to be 2 dictionaries (representing jsons)
# The constraints in expected is <= parsed_vals. Parsed_vals should have all possible values that the parser can spit out. However expected_vals can be relaxed (even empty) and choose to only verify/match certain fields
match = lambda current, expected: all(
[expected[k] == current[k] for k in expected])
for graph_id_1 in expected_vals:
# The ordering is not important and could be different, hence search through all elements of parsed_vals
matching_id = None
for graph_id_2 in parsed_vals:
if match(expected_vals[graph_id_1], parsed_vals[graph_id_2]):
matching_id = graph_id_2
break
if matching_id is None:
return False, 'Failed to match expected graph info ' + graph_id_1 + " which was: " + str(
expected_vals[graph_id_1]
) + "\n. Got the following parsed results: " + str(parsed_vals)
else:
parsed_vals.pop(matching_id)
return True, ''
| curr_result['num_nodes_in_graph'] = int(
line.split(':')[-1].strip()) | conditional_block |
log_parser.py | #==============================================================================
# Copyright 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import re
def parse_logs(log_lines, verbose=False):
"""
Returns ngraph metrics parsed out of the specified log output.
Regular log parsing will return:
- Number of nodes in the graph
- Number of nodes marked for clustering
- Number of ngraph clusters
Verbose log parsing will return all of the above, in addition to:
- Percentage of nodes clustered
- Has deadness issues
- Has static input issues
- Reasons why edge connected clusters did not merge
- Reasons why edge connected encapsulates did not merge
- Nodes per cluster
- Types of edges
- Op not supported
- Op failed type constraint
"""
if type(log_lines) == type(''):
log_lines = log_lines.split('\n')
else:
assert type(log_lines) == type(
[]
), "If log_lines if not a string, it should have been a list, but instead it is a " + type(
log_lines)
assert all([
type(i) == type('') and '\n' not in i for i in log_lines
]), 'Each element of the list should be a string and not contain new lines'
all_results = {}
curr_result = {}
ctr = 0
prev_line = ""
for line in log_lines:
start_of_subgraph = "NGTF_SUMMARY: Op_not_supported:" in line
# If logs of a new sub-graph is starting, save the old one
if start_of_subgraph:
if len(curr_result) > 0:
all_results[str(ctr)] = curr_result
curr_result = {}
ctr += 1
# keep collecting information in curr_result
if line.startswith('NGTF_SUMMARY'):
if 'Number of nodes in the graph' in line:
curr_result['num_nodes_in_graph'] = int(
line.split(':')[-1].strip())
elif 'Number of nodes marked for clustering' in line:
curr_result['num_nodes_marked_for_clustering'] = int(
line.split(':')[-1].strip().split(' ')[0].strip())
if verbose:
# get percentage of total nodes
match = re.search("(\d+(\.\d+)?%)", line)
nodes_clustered = ""
if match:
nodes_clustered = match.group(0)
curr_result["percentage_nodes_clustered"] = nodes_clustered
elif 'Number of ngraph clusters' in line:
curr_result['num_ng_clusters'] = int(
line.split(':')[-1].strip())
if verbose and ('DEADNESS' in line and 'STATICINPUT' in line):
line = line[len("NGTF_SUMMARY:"):]
reasons = dict([i.strip()
for i in item.split(":")]
for item in line.split(","))
if "reasons why a pair of edge connected encapsulates did not merge" in prev_line:
curr_result[
'why_edge_connected_encapsulates_did_not_merge'] = reasons
elif "reasons why a pair of edge connected clusters did not merge" in prev_line:
curr_result[
'why_edge_connected_clusters_did_not_merge'] = reasons
# default has_deadness_issues and has_static_input_issues to 'No'
if 'has_deadness_issues' not in curr_result.keys():
curr_result['has_deadness_issues'] = "No"
if 'has_static_input_issues' not in curr_result.keys():
curr_result['has_static_input_issues'] = "No"
# set has deadness/static input issues to 'Yes' if the value is > 0
if int(reasons['DEADNESS']) > 0:
curr_result['has_deadness_issues'] = "Yes"
if int(reasons['STATICINPUT']) > 0:
curr_result['has_static_input_issues'] = "Yes"
elif verbose and 'Nodes per cluster' in line:
curr_result['nodes_per_cluster'] = float(
line.split(':')[-1].strip())
elif verbose and 'Types of edges::' in line:
line = line[len("NGTF_SUMMARY: Types of edges:: "):]
edge_types = dict([i.strip()
for i in item.split(":")]
for item in line.split(","))
curr_result["types_of_edges"] = edge_type
s
elif verbose and 'Op_not_supported' in line:
curr_result["op_not_supported"] = \
[i.strip() for i in line[len("NGTF_SUMMARY: Op_not_supported: "):].split(",")]
elif verbose and 'Op_failed_type_constraint' in line:
curr_result["op_failed_type_constraint"] = \
[i.strip() for i in line[len(
"NGTF_SUMMARY: Op_failed_type_constraint: "):].split(",")]
prev_line = line
# add the last section to the results
all_results[str(ctr)] = curr_result
return all_results
def | (parsed_vals, expected_vals):
# Both inputs are expected to be 2 dictionaries (representing jsons)
# The constraints in expected is <= parsed_vals. Parsed_vals should have all possible values that the parser can spit out. However expected_vals can be relaxed (even empty) and choose to only verify/match certain fields
match = lambda current, expected: all(
[expected[k] == current[k] for k in expected])
for graph_id_1 in expected_vals:
# The ordering is not important and could be different, hence search through all elements of parsed_vals
matching_id = None
for graph_id_2 in parsed_vals:
if match(expected_vals[graph_id_1], parsed_vals[graph_id_2]):
matching_id = graph_id_2
break
if matching_id is None:
return False, 'Failed to match expected graph info ' + graph_id_1 + " which was: " + str(
expected_vals[graph_id_1]
) + "\n. Got the following parsed results: " + str(parsed_vals)
else:
parsed_vals.pop(matching_id)
return True, ''
| compare_parsed_values | identifier_name |
pyrarcr-0.2.py | #!/usr/bin/env python3
##### ##### ##### ##### #### ####
# # # # # # # # # # #### #### # # #
##### #### ##### ##### ##### # # # # ####
# # # # # # # # # # # # #
# # # # # # # # #### # #### # ####
#finds the password of a desired rar or zip file using a brute-force algorithm
##will fail to find the password if the password has a character that isnt in
##the english alphabet or isnt a number (you can change the char. list though)
#now using itertools!
#importing needed modules
import time,os,sys,shutil,itertools
#checking if the user has unrar/p7zip installed
for which in ["unrar","p7zip"]:
if not shutil.which(which):
print("ERROR:",which,"isn't installed.\nExiting...")
sys.exit(-1)
#defining the function
def rc(rf):
alphabet="aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ1234567890"
start=time.time()
tryn=0
for a in range(1,len(alphabet)+1):
for b in itertools.product(alphabet,repeat=a):
k="".join(b)
if rf[-4:]==".rar":
print("Trying:",k)
kf=os.popen("unrar t -y -p%s %s 2>&1|grep 'All OK'"%(k,rf))
tryn+=1
for rkf in kf.readlines():
if rkf=="All OK\n":
print("Found password:",repr(k))
print("Tried combination count:",tryn)
print("It took",round(time.time()-start,3),"seconds")
print("Exiting...")
time.sleep(2)
sys.exit(1)
elif rf[-4:]==".zip" or rf[-3:]==".7z":
print("Trying:",k)
kf=os.popen("7za t -p%s %s 2>&1|grep 'Everything is Ok'"%(k,rf))
tryn+=1
for rkf in kf.readlines():
if rkf=="Everything is Ok\n":
print("Found password:",repr(k))
print("Tried combination count:",tryn)
print("It took",round(time.time()-start,3),"seconds")
print("Exiting...")
time.sleep(2)
sys.exit(1)
else:
print("ERROR: File isnt a RAR, ZIP or 7z file.\nExiting...")
#checking if the file exists/running the function
if len(sys.argv)==2:
if os.path.exists(sys.argv[1]):
|
else:
print("ERROR: File doesn't exist.\nExiting...")
else:
print("Usage:",os.path.basename(__file__),"[rar file]")
print("Example:",os.path.basename(__file__),"foobar.rar")
| rc(sys.argv[1]) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.