file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
processOps.ts | const BASE_PATH = 'editor.composition.i'
const TOOLBOX_PATH = 'editor.$toolbox'
declare var global: any
export function processOps
( { state, props } ) {
const { ops } = props
if ( ! ops ) {
return
}
let newselection
ops.forEach
( op => {
const path = op.path && `${BASE_PATH}.${op.path.join('.i.')}`
switch ( op.op ) {
case 'update':
state.set ( path, op.value )
break
case 'delete':
state.unset ( path )
break
case 'select':
newselection = op.value
newselection.stringPath = `${BASE_PATH}.${newselection.anchorPath.join('.i.')}`
break
case 'toolbox':
state.set ( TOOLBOX_PATH, op.value )
break
default:
throw new Error ( `Unkown operation '${op.op}'` )
}
}
)
if ( newselection ) {
// FIXME: global.selection is bad find another way
global.selection = newselection | return { selection: newselection }
}
} | random_line_split |
|
list.rs |
use super::{print_size, io, Header, Operation, App, Arg, ArgMatches, SubCommand, PathBuf, Regex,
RegexFault};
fn valid_path(x: String) -> Result<(), String> {
let p = PathBuf::from(&x);
match (p.exists(), p.is_file()) {
(true, true) => Ok(()),
(false, _) => Err(format!("Cannot process {} it does not exist", &x)),
(true, false) => Err(format!("Cannot process {} it is a directory or link", &x)),
}
}
fn valid_regex(x: String) -> Result<(), String> {
match Regex::new(&x) {
Ok(_) => Ok(()),
Err(RegexFault::CompiledTooBig(val)) => Err(format!(
"Input regex is too large. Set size limit {:?}",
val
)),
Err(RegexFault::Syntax(s)) => Err(format!("Regex Syntax Error: {}", s)),
Err(_) => Err(format!("Regex Syntax Error. Source undocumented :(")),
}
}
pub fn build<'a>() -> App<'static, 'a> {
SubCommand::with_name("list")
.about("lists contents of a regex")
.arg(
Arg::with_name("group")
.long("groupname")
.takes_value(false)
.next_line_help(true)
.help("display group name"),
)
.arg(
Arg::with_name("user")
.long("username")
.takes_value(false)
.next_line_help(true)
.help("display username"),
)
.arg(
Arg::with_name("uid")
.long("uid")
.takes_value(false)
.next_line_help(true)
.help("display uid"),
)
.arg(
Arg::with_name("gid")
.long("gid")
.takes_value(false)
.next_line_help(true)
.help("display gid"),
)
.arg(
Arg::with_name("size")
.long("size")
.takes_value(false)
.next_line_help(true)
.help("display file size"),
)
.arg(
Arg::with_name("file")
.short("f")
.long("file")
.takes_value(true)
.multiple(false)
.value_name("INFILE")
.required(true)
.validator(valid_path)
.next_line_help(true)
.help("file to read"),
)
.arg(
Arg::with_name("regex")
.short("r")
.long("regex")
.takes_value(true)
.multiple(false)
.value_name("REGEX")
.validator(valid_regex)
.next_line_help(true)
.help("regex to filter list by"),
)
}
/// print data
pub fn exec(
header: &Header,
regex: &Option<Regex>,
group: bool,
user: bool,
gid: bool,
uid: bool,
size: bool,
) -> io::Result<()> {
let flag = match regex {
&Option::None => true,
&Option::Some(ref regex) => {
let path = header.path()?;
match path.file_name() {
Option::None => false,
Option::Some(f_name) => {
match f_name.to_str() {
Option::None => false,
Option::Some(f_name_str) => regex.is_match(f_name_str),
}
}
}
}
};
if flag {
if group {
let g = match header.groupname() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No groupname",
Err(_) => "UTF8 ERROR",
};
println!("\tGroup Name: {}", g);
}
let path = header.path()?;
println!("{:?}", path);
if user {
let u = match header.username() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No username",
Err(_) => "UTF8 ERROR",
};
println!("\tUser Name: {}", u);
}
if gid {
println!("\tUser Group ID (gid): 0x{:X}", header.gid()?);
}
if uid {
println!("\tUser ID (uid): 0x{:X}", header.uid()?);
}
if size {
println!("\tSize: {}", print_size(header.size()?));
}
}
Ok(())
}
pub fn | (x: &ArgMatches) -> Operation {
Operation::List(
PathBuf::from(x.value_of("file").unwrap()),
match x.value_of("regex") {
Option::None => None,
Option::Some(r) => Regex::new(&r).ok(),
},
x.is_present("group"),
x.is_present("user"),
x.is_present("gid"),
x.is_present("uid"),
x.is_present("mtime"),
x.is_present("size"),
)
}
| get | identifier_name |
list.rs |
use super::{print_size, io, Header, Operation, App, Arg, ArgMatches, SubCommand, PathBuf, Regex,
RegexFault};
fn valid_path(x: String) -> Result<(), String> {
let p = PathBuf::from(&x);
match (p.exists(), p.is_file()) {
(true, true) => Ok(()),
(false, _) => Err(format!("Cannot process {} it does not exist", &x)),
(true, false) => Err(format!("Cannot process {} it is a directory or link", &x)),
}
}
fn valid_regex(x: String) -> Result<(), String> {
match Regex::new(&x) {
Ok(_) => Ok(()),
Err(RegexFault::CompiledTooBig(val)) => Err(format!(
"Input regex is too large. Set size limit {:?}",
val
)),
Err(RegexFault::Syntax(s)) => Err(format!("Regex Syntax Error: {}", s)),
Err(_) => Err(format!("Regex Syntax Error. Source undocumented :(")),
}
}
pub fn build<'a>() -> App<'static, 'a> |
/// print data
pub fn exec(
header: &Header,
regex: &Option<Regex>,
group: bool,
user: bool,
gid: bool,
uid: bool,
size: bool,
) -> io::Result<()> {
let flag = match regex {
&Option::None => true,
&Option::Some(ref regex) => {
let path = header.path()?;
match path.file_name() {
Option::None => false,
Option::Some(f_name) => {
match f_name.to_str() {
Option::None => false,
Option::Some(f_name_str) => regex.is_match(f_name_str),
}
}
}
}
};
if flag {
if group {
let g = match header.groupname() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No groupname",
Err(_) => "UTF8 ERROR",
};
println!("\tGroup Name: {}", g);
}
let path = header.path()?;
println!("{:?}", path);
if user {
let u = match header.username() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No username",
Err(_) => "UTF8 ERROR",
};
println!("\tUser Name: {}", u);
}
if gid {
println!("\tUser Group ID (gid): 0x{:X}", header.gid()?);
}
if uid {
println!("\tUser ID (uid): 0x{:X}", header.uid()?);
}
if size {
println!("\tSize: {}", print_size(header.size()?));
}
}
Ok(())
}
pub fn get(x: &ArgMatches) -> Operation {
Operation::List(
PathBuf::from(x.value_of("file").unwrap()),
match x.value_of("regex") {
Option::None => None,
Option::Some(r) => Regex::new(&r).ok(),
},
x.is_present("group"),
x.is_present("user"),
x.is_present("gid"),
x.is_present("uid"),
x.is_present("mtime"),
x.is_present("size"),
)
}
| {
SubCommand::with_name("list")
.about("lists contents of a regex")
.arg(
Arg::with_name("group")
.long("groupname")
.takes_value(false)
.next_line_help(true)
.help("display group name"),
)
.arg(
Arg::with_name("user")
.long("username")
.takes_value(false)
.next_line_help(true)
.help("display username"),
)
.arg(
Arg::with_name("uid")
.long("uid")
.takes_value(false)
.next_line_help(true)
.help("display uid"),
)
.arg(
Arg::with_name("gid")
.long("gid")
.takes_value(false)
.next_line_help(true)
.help("display gid"),
)
.arg(
Arg::with_name("size")
.long("size")
.takes_value(false)
.next_line_help(true)
.help("display file size"),
)
.arg(
Arg::with_name("file")
.short("f")
.long("file")
.takes_value(true)
.multiple(false)
.value_name("INFILE")
.required(true)
.validator(valid_path)
.next_line_help(true)
.help("file to read"),
)
.arg(
Arg::with_name("regex")
.short("r")
.long("regex")
.takes_value(true)
.multiple(false)
.value_name("REGEX")
.validator(valid_regex)
.next_line_help(true)
.help("regex to filter list by"),
)
} | identifier_body |
list.rs | use super::{print_size, io, Header, Operation, App, Arg, ArgMatches, SubCommand, PathBuf, Regex,
RegexFault};
fn valid_path(x: String) -> Result<(), String> {
let p = PathBuf::from(&x);
match (p.exists(), p.is_file()) {
(true, true) => Ok(()),
(false, _) => Err(format!("Cannot process {} it does not exist", &x)),
(true, false) => Err(format!("Cannot process {} it is a directory or link", &x)),
}
}
fn valid_regex(x: String) -> Result<(), String> {
match Regex::new(&x) {
Ok(_) => Ok(()),
Err(RegexFault::CompiledTooBig(val)) => Err(format!(
"Input regex is too large. Set size limit {:?}",
val
)),
Err(RegexFault::Syntax(s)) => Err(format!("Regex Syntax Error: {}", s)),
Err(_) => Err(format!("Regex Syntax Error. Source undocumented :(")),
}
}
pub fn build<'a>() -> App<'static, 'a> {
SubCommand::with_name("list")
.about("lists contents of a regex")
.arg(
Arg::with_name("group")
.long("groupname")
.takes_value(false)
.next_line_help(true)
.help("display group name"),
)
.arg(
Arg::with_name("user")
.long("username")
.takes_value(false)
.next_line_help(true)
.help("display username"),
)
.arg(
Arg::with_name("uid")
.long("uid")
.takes_value(false)
.next_line_help(true)
.help("display uid"),
)
.arg(
Arg::with_name("gid")
.long("gid")
.takes_value(false)
.next_line_help(true)
.help("display gid"),
)
.arg(
Arg::with_name("size")
.long("size")
.takes_value(false)
.next_line_help(true)
.help("display file size"),
)
.arg(
Arg::with_name("file")
.short("f")
.long("file")
.takes_value(true)
.multiple(false)
.value_name("INFILE")
.required(true)
.validator(valid_path)
.next_line_help(true)
.help("file to read"),
)
.arg( | .takes_value(true)
.multiple(false)
.value_name("REGEX")
.validator(valid_regex)
.next_line_help(true)
.help("regex to filter list by"),
)
}
/// print data
pub fn exec(
header: &Header,
regex: &Option<Regex>,
group: bool,
user: bool,
gid: bool,
uid: bool,
size: bool,
) -> io::Result<()> {
let flag = match regex {
&Option::None => true,
&Option::Some(ref regex) => {
let path = header.path()?;
match path.file_name() {
Option::None => false,
Option::Some(f_name) => {
match f_name.to_str() {
Option::None => false,
Option::Some(f_name_str) => regex.is_match(f_name_str),
}
}
}
}
};
if flag {
if group {
let g = match header.groupname() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No groupname",
Err(_) => "UTF8 ERROR",
};
println!("\tGroup Name: {}", g);
}
let path = header.path()?;
println!("{:?}", path);
if user {
let u = match header.username() {
Ok(Option::Some(x)) => x,
Ok(Option::None) => "No username",
Err(_) => "UTF8 ERROR",
};
println!("\tUser Name: {}", u);
}
if gid {
println!("\tUser Group ID (gid): 0x{:X}", header.gid()?);
}
if uid {
println!("\tUser ID (uid): 0x{:X}", header.uid()?);
}
if size {
println!("\tSize: {}", print_size(header.size()?));
}
}
Ok(())
}
pub fn get(x: &ArgMatches) -> Operation {
Operation::List(
PathBuf::from(x.value_of("file").unwrap()),
match x.value_of("regex") {
Option::None => None,
Option::Some(r) => Regex::new(&r).ok(),
},
x.is_present("group"),
x.is_present("user"),
x.is_present("gid"),
x.is_present("uid"),
x.is_present("mtime"),
x.is_present("size"),
)
} | Arg::with_name("regex")
.short("r")
.long("regex") | random_line_split |
syndicate-create-write-read.py | #!/usr/bin/env python
"""
Copyright 2016 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import subprocess
import random
import time
import testlib
import testconf
import shutil
PUT_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-put")
WRITE_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-write")
READ_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-read")
RG_PATH = os.path.join(testconf.SYNDICATE_RG_ROOT, "syndicate-rg")
RG_DRIVER = os.path.join(testconf.SYNDICATE_PYTHON_ROOT, "syndicate/rg/drivers/disk" )
| testlib.save_output( output_dir, save_name, out )
return exitcode, out
def overlay( expected_data, buf, offset ):
expected_data_list = list(expected_data)
i = offset
for c in list(buf):
if i >= len(expected_data_list):
padlen = i - len(expected_data_list) + 1
for j in xrange(0, padlen):
expected_data_list.append('\0')
expected_data_list[i] = c
i += 1
return "".join(expected_data_list)
if __name__ == "__main__":
local_path = testlib.make_random_file(16384)
expected_data = None
with open(local_path, "r") as f:
expected_data = f.read()
config_dir, output_dir = testlib.test_setup()
volume_name = testlib.add_test_volume( config_dir, blocksize=1024 )
RG_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "RG", caps="NONE", email=testconf.SYNDICATE_ADMIN )
testlib.update_gateway( config_dir, RG_gateway_name, "port=31112", "driver=%s" % RG_DRIVER )
rg_proc, rg_out_path = testlib.start_gateway( config_dir, RG_PATH, testconf.SYNDICATE_ADMIN, volume_name, RG_gateway_name )
if not testlib.gateway_ping( 31112, 15 ):
raise Exception("%s exited %s" % (RG_PATH, rg_proc.poll()))
# should cause the RG to get updated that there's a new gateway
gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
read_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
read_gateway_info = testlib.read_gateway( config_dir, read_gateway_name )
read_gateway_id = read_gateway_info['g_id']
volume_info = testlib.read_volume( config_dir, volume_name )
volume_id = volume_info['volume_id']
random_part = hex(random.randint(0, 2**32-1))[2:]
output_path = "/put-%s" % random_part
exitcode, out = testlib.run( PUT_PATH, '-d3', '-f', '-c', os.path.join(config_dir, 'syndicate.conf'), '-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', gateway_name, local_path, output_path )
testlib.save_output( output_dir, "syndicate-put", out )
if exitcode != 0:
raise Exception("%s exited %s" % (PUT_PATH, exitcode))
# try reading and writing various ranges (these are (start, end) absolute ranges, not offset/length)
ranges = [
(5000, 16000),
(0, 1), # 1 block, tail unaligned
(1, 200), # 1 block, unaligned head and tail
(0, 4096), # 4 block, aligned
(0, 8192), # 8 blocks, aligned
(0, 1000), # 1 block, tail unaligned
(0, 6000),
(100, 4000),
(5000, 10000),
(4096, 10000),
(5000, 8192),
(4096, 16834),
(5000, 16384),
(4096, 16000),
(5000, 16000),
]
# write each range
for (start, end) in ranges:
range_file_path = testlib.make_random_file( end - start )
range_fd = open(range_file_path, "r")
range_data = range_fd.read()
range_fd.close()
exitcode, out = testlib.run( WRITE_PATH, '-d3', '-f', '-c', os.path.join(config_dir, "syndicate.conf"),
'-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', gateway_name,
output_path, range_file_path, start, valgrind=True )
testlib.save_output( output_dir, "syndicate-write-%s-%s" % (start, end), out)
if exitcode != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
raise Exception("%s exited %s" % (WRITE_PATH, exitcode))
expected_data = overlay( expected_data, range_data, start )
# read each range back--cached and uncached
for (start, end) in ranges:
# only clear reader's cache
testlib.clear_cache( config_dir, volume_id=volume_id, gateway_id=read_gateway_id )
# do each read twice--once uncached, and one cached
for i in xrange(0, 2):
exitcode, out = testlib.run( READ_PATH, '-d2', '-f', '-c', os.path.join(config_dir, 'syndicate.conf'),
'-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', read_gateway_name,
output_path, start, end - start, valgrind=True )
outname = "uncached"
if i > 0:
outname = "cached"
testlib.save_output( output_dir, 'syndicate-read-%s-%s-%s' % (start, end, outname), out )
if exitcode != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
raise Exception("%s exited %s" % (READ_PATH, exitcode))
# correctness
if expected_data[start:end] not in out:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
print >> sys.stderr, "Missing data\n%s\n" % expected_data[start:end]
raise Exception("Missing data for %s-%s" % (start, end))
rg_exitcode, rg_out = testlib.stop_gateway( rg_proc, rg_out_path )
testlib.save_output( output_dir, "syndicate-rg", rg_out )
if rg_exitcode != 0:
raise Exception("%s exited %s" % (RG_PATH, rg_exitcode))
sys.exit(0) | def stop_and_save( output_dir, proc, out_path, save_name ):
exitcode, out = testlib.stop_gateway( proc, out_path ) | random_line_split |
syndicate-create-write-read.py | #!/usr/bin/env python
"""
Copyright 2016 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import subprocess
import random
import time
import testlib
import testconf
import shutil
PUT_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-put")
WRITE_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-write")
READ_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-read")
RG_PATH = os.path.join(testconf.SYNDICATE_RG_ROOT, "syndicate-rg")
RG_DRIVER = os.path.join(testconf.SYNDICATE_PYTHON_ROOT, "syndicate/rg/drivers/disk" )
def | ( output_dir, proc, out_path, save_name ):
exitcode, out = testlib.stop_gateway( proc, out_path )
testlib.save_output( output_dir, save_name, out )
return exitcode, out
def overlay( expected_data, buf, offset ):
expected_data_list = list(expected_data)
i = offset
for c in list(buf):
if i >= len(expected_data_list):
padlen = i - len(expected_data_list) + 1
for j in xrange(0, padlen):
expected_data_list.append('\0')
expected_data_list[i] = c
i += 1
return "".join(expected_data_list)
if __name__ == "__main__":
local_path = testlib.make_random_file(16384)
expected_data = None
with open(local_path, "r") as f:
expected_data = f.read()
config_dir, output_dir = testlib.test_setup()
volume_name = testlib.add_test_volume( config_dir, blocksize=1024 )
RG_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "RG", caps="NONE", email=testconf.SYNDICATE_ADMIN )
testlib.update_gateway( config_dir, RG_gateway_name, "port=31112", "driver=%s" % RG_DRIVER )
rg_proc, rg_out_path = testlib.start_gateway( config_dir, RG_PATH, testconf.SYNDICATE_ADMIN, volume_name, RG_gateway_name )
if not testlib.gateway_ping( 31112, 15 ):
raise Exception("%s exited %s" % (RG_PATH, rg_proc.poll()))
# should cause the RG to get updated that there's a new gateway
gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
read_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
read_gateway_info = testlib.read_gateway( config_dir, read_gateway_name )
read_gateway_id = read_gateway_info['g_id']
volume_info = testlib.read_volume( config_dir, volume_name )
volume_id = volume_info['volume_id']
random_part = hex(random.randint(0, 2**32-1))[2:]
output_path = "/put-%s" % random_part
exitcode, out = testlib.run( PUT_PATH, '-d3', '-f', '-c', os.path.join(config_dir, 'syndicate.conf'), '-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', gateway_name, local_path, output_path )
testlib.save_output( output_dir, "syndicate-put", out )
if exitcode != 0:
raise Exception("%s exited %s" % (PUT_PATH, exitcode))
# try reading and writing various ranges (these are (start, end) absolute ranges, not offset/length)
ranges = [
(5000, 16000),
(0, 1), # 1 block, tail unaligned
(1, 200), # 1 block, unaligned head and tail
(0, 4096), # 4 block, aligned
(0, 8192), # 8 blocks, aligned
(0, 1000), # 1 block, tail unaligned
(0, 6000),
(100, 4000),
(5000, 10000),
(4096, 10000),
(5000, 8192),
(4096, 16834),
(5000, 16384),
(4096, 16000),
(5000, 16000),
]
# write each range
for (start, end) in ranges:
range_file_path = testlib.make_random_file( end - start )
range_fd = open(range_file_path, "r")
range_data = range_fd.read()
range_fd.close()
exitcode, out = testlib.run( WRITE_PATH, '-d3', '-f', '-c', os.path.join(config_dir, "syndicate.conf"),
'-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', gateway_name,
output_path, range_file_path, start, valgrind=True )
testlib.save_output( output_dir, "syndicate-write-%s-%s" % (start, end), out)
if exitcode != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
raise Exception("%s exited %s" % (WRITE_PATH, exitcode))
expected_data = overlay( expected_data, range_data, start )
# read each range back--cached and uncached
for (start, end) in ranges:
# only clear reader's cache
testlib.clear_cache( config_dir, volume_id=volume_id, gateway_id=read_gateway_id )
# do each read twice--once uncached, and one cached
for i in xrange(0, 2):
exitcode, out = testlib.run( READ_PATH, '-d2', '-f', '-c', os.path.join(config_dir, 'syndicate.conf'),
'-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', read_gateway_name,
output_path, start, end - start, valgrind=True )
outname = "uncached"
if i > 0:
outname = "cached"
testlib.save_output( output_dir, 'syndicate-read-%s-%s-%s' % (start, end, outname), out )
if exitcode != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
raise Exception("%s exited %s" % (READ_PATH, exitcode))
# correctness
if expected_data[start:end] not in out:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
print >> sys.stderr, "Missing data\n%s\n" % expected_data[start:end]
raise Exception("Missing data for %s-%s" % (start, end))
rg_exitcode, rg_out = testlib.stop_gateway( rg_proc, rg_out_path )
testlib.save_output( output_dir, "syndicate-rg", rg_out )
if rg_exitcode != 0:
raise Exception("%s exited %s" % (RG_PATH, rg_exitcode))
sys.exit(0)
| stop_and_save | identifier_name |
syndicate-create-write-read.py | #!/usr/bin/env python
"""
Copyright 2016 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import subprocess
import random
import time
import testlib
import testconf
import shutil
PUT_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-put")
WRITE_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-write")
READ_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-read")
RG_PATH = os.path.join(testconf.SYNDICATE_RG_ROOT, "syndicate-rg")
RG_DRIVER = os.path.join(testconf.SYNDICATE_PYTHON_ROOT, "syndicate/rg/drivers/disk" )
def stop_and_save( output_dir, proc, out_path, save_name ):
exitcode, out = testlib.stop_gateway( proc, out_path )
testlib.save_output( output_dir, save_name, out )
return exitcode, out
def overlay( expected_data, buf, offset ):
|
if __name__ == "__main__":
local_path = testlib.make_random_file(16384)
expected_data = None
with open(local_path, "r") as f:
expected_data = f.read()
config_dir, output_dir = testlib.test_setup()
volume_name = testlib.add_test_volume( config_dir, blocksize=1024 )
RG_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "RG", caps="NONE", email=testconf.SYNDICATE_ADMIN )
testlib.update_gateway( config_dir, RG_gateway_name, "port=31112", "driver=%s" % RG_DRIVER )
rg_proc, rg_out_path = testlib.start_gateway( config_dir, RG_PATH, testconf.SYNDICATE_ADMIN, volume_name, RG_gateway_name )
if not testlib.gateway_ping( 31112, 15 ):
raise Exception("%s exited %s" % (RG_PATH, rg_proc.poll()))
# should cause the RG to get updated that there's a new gateway
gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
read_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
read_gateway_info = testlib.read_gateway( config_dir, read_gateway_name )
read_gateway_id = read_gateway_info['g_id']
volume_info = testlib.read_volume( config_dir, volume_name )
volume_id = volume_info['volume_id']
random_part = hex(random.randint(0, 2**32-1))[2:]
output_path = "/put-%s" % random_part
exitcode, out = testlib.run( PUT_PATH, '-d3', '-f', '-c', os.path.join(config_dir, 'syndicate.conf'), '-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', gateway_name, local_path, output_path )
testlib.save_output( output_dir, "syndicate-put", out )
if exitcode != 0:
raise Exception("%s exited %s" % (PUT_PATH, exitcode))
# try reading and writing various ranges (these are (start, end) absolute ranges, not offset/length)
ranges = [
(5000, 16000),
(0, 1), # 1 block, tail unaligned
(1, 200), # 1 block, unaligned head and tail
(0, 4096), # 4 block, aligned
(0, 8192), # 8 blocks, aligned
(0, 1000), # 1 block, tail unaligned
(0, 6000),
(100, 4000),
(5000, 10000),
(4096, 10000),
(5000, 8192),
(4096, 16834),
(5000, 16384),
(4096, 16000),
(5000, 16000),
]
# write each range
for (start, end) in ranges:
range_file_path = testlib.make_random_file( end - start )
range_fd = open(range_file_path, "r")
range_data = range_fd.read()
range_fd.close()
exitcode, out = testlib.run( WRITE_PATH, '-d3', '-f', '-c', os.path.join(config_dir, "syndicate.conf"),
'-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', gateway_name,
output_path, range_file_path, start, valgrind=True )
testlib.save_output( output_dir, "syndicate-write-%s-%s" % (start, end), out)
if exitcode != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
raise Exception("%s exited %s" % (WRITE_PATH, exitcode))
expected_data = overlay( expected_data, range_data, start )
# read each range back--cached and uncached
for (start, end) in ranges:
# only clear reader's cache
testlib.clear_cache( config_dir, volume_id=volume_id, gateway_id=read_gateway_id )
# do each read twice--once uncached, and one cached
for i in xrange(0, 2):
exitcode, out = testlib.run( READ_PATH, '-d2', '-f', '-c', os.path.join(config_dir, 'syndicate.conf'),
'-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', read_gateway_name,
output_path, start, end - start, valgrind=True )
outname = "uncached"
if i > 0:
outname = "cached"
testlib.save_output( output_dir, 'syndicate-read-%s-%s-%s' % (start, end, outname), out )
if exitcode != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
raise Exception("%s exited %s" % (READ_PATH, exitcode))
# correctness
if expected_data[start:end] not in out:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
print >> sys.stderr, "Missing data\n%s\n" % expected_data[start:end]
raise Exception("Missing data for %s-%s" % (start, end))
rg_exitcode, rg_out = testlib.stop_gateway( rg_proc, rg_out_path )
testlib.save_output( output_dir, "syndicate-rg", rg_out )
if rg_exitcode != 0:
raise Exception("%s exited %s" % (RG_PATH, rg_exitcode))
sys.exit(0)
| expected_data_list = list(expected_data)
i = offset
for c in list(buf):
if i >= len(expected_data_list):
padlen = i - len(expected_data_list) + 1
for j in xrange(0, padlen):
expected_data_list.append('\0')
expected_data_list[i] = c
i += 1
return "".join(expected_data_list) | identifier_body |
syndicate-create-write-read.py | #!/usr/bin/env python
"""
Copyright 2016 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import subprocess
import random
import time
import testlib
import testconf
import shutil
PUT_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-put")
WRITE_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-write")
READ_PATH = os.path.join(testconf.SYNDICATE_UG_ROOT, "syndicate-read")
RG_PATH = os.path.join(testconf.SYNDICATE_RG_ROOT, "syndicate-rg")
RG_DRIVER = os.path.join(testconf.SYNDICATE_PYTHON_ROOT, "syndicate/rg/drivers/disk" )
def stop_and_save( output_dir, proc, out_path, save_name ):
exitcode, out = testlib.stop_gateway( proc, out_path )
testlib.save_output( output_dir, save_name, out )
return exitcode, out
def overlay( expected_data, buf, offset ):
expected_data_list = list(expected_data)
i = offset
for c in list(buf):
if i >= len(expected_data_list):
padlen = i - len(expected_data_list) + 1
for j in xrange(0, padlen):
expected_data_list.append('\0')
expected_data_list[i] = c
i += 1
return "".join(expected_data_list)
if __name__ == "__main__":
local_path = testlib.make_random_file(16384)
expected_data = None
with open(local_path, "r") as f:
expected_data = f.read()
config_dir, output_dir = testlib.test_setup()
volume_name = testlib.add_test_volume( config_dir, blocksize=1024 )
RG_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "RG", caps="NONE", email=testconf.SYNDICATE_ADMIN )
testlib.update_gateway( config_dir, RG_gateway_name, "port=31112", "driver=%s" % RG_DRIVER )
rg_proc, rg_out_path = testlib.start_gateway( config_dir, RG_PATH, testconf.SYNDICATE_ADMIN, volume_name, RG_gateway_name )
if not testlib.gateway_ping( 31112, 15 ):
raise Exception("%s exited %s" % (RG_PATH, rg_proc.poll()))
# should cause the RG to get updated that there's a new gateway
gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
read_gateway_name = testlib.add_test_gateway( config_dir, volume_name, "UG", caps="ALL", email=testconf.SYNDICATE_ADMIN )
read_gateway_info = testlib.read_gateway( config_dir, read_gateway_name )
read_gateway_id = read_gateway_info['g_id']
volume_info = testlib.read_volume( config_dir, volume_name )
volume_id = volume_info['volume_id']
random_part = hex(random.randint(0, 2**32-1))[2:]
output_path = "/put-%s" % random_part
exitcode, out = testlib.run( PUT_PATH, '-d3', '-f', '-c', os.path.join(config_dir, 'syndicate.conf'), '-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', gateway_name, local_path, output_path )
testlib.save_output( output_dir, "syndicate-put", out )
if exitcode != 0:
raise Exception("%s exited %s" % (PUT_PATH, exitcode))
# try reading and writing various ranges (these are (start, end) absolute ranges, not offset/length)
ranges = [
(5000, 16000),
(0, 1), # 1 block, tail unaligned
(1, 200), # 1 block, unaligned head and tail
(0, 4096), # 4 block, aligned
(0, 8192), # 8 blocks, aligned
(0, 1000), # 1 block, tail unaligned
(0, 6000),
(100, 4000),
(5000, 10000),
(4096, 10000),
(5000, 8192),
(4096, 16834),
(5000, 16384),
(4096, 16000),
(5000, 16000),
]
# write each range
for (start, end) in ranges:
range_file_path = testlib.make_random_file( end - start )
range_fd = open(range_file_path, "r")
range_data = range_fd.read()
range_fd.close()
exitcode, out = testlib.run( WRITE_PATH, '-d3', '-f', '-c', os.path.join(config_dir, "syndicate.conf"),
'-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', gateway_name,
output_path, range_file_path, start, valgrind=True )
testlib.save_output( output_dir, "syndicate-write-%s-%s" % (start, end), out)
if exitcode != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
raise Exception("%s exited %s" % (WRITE_PATH, exitcode))
expected_data = overlay( expected_data, range_data, start )
# read each range back--cached and uncached
for (start, end) in ranges:
# only clear reader's cache
|
rg_exitcode, rg_out = testlib.stop_gateway( rg_proc, rg_out_path )
testlib.save_output( output_dir, "syndicate-rg", rg_out )
if rg_exitcode != 0:
raise Exception("%s exited %s" % (RG_PATH, rg_exitcode))
sys.exit(0)
| testlib.clear_cache( config_dir, volume_id=volume_id, gateway_id=read_gateway_id )
# do each read twice--once uncached, and one cached
for i in xrange(0, 2):
exitcode, out = testlib.run( READ_PATH, '-d2', '-f', '-c', os.path.join(config_dir, 'syndicate.conf'),
'-u', testconf.SYNDICATE_ADMIN, '-v', volume_name, '-g', read_gateway_name,
output_path, start, end - start, valgrind=True )
outname = "uncached"
if i > 0:
outname = "cached"
testlib.save_output( output_dir, 'syndicate-read-%s-%s-%s' % (start, end, outname), out )
if exitcode != 0:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
raise Exception("%s exited %s" % (READ_PATH, exitcode))
# correctness
if expected_data[start:end] not in out:
stop_and_save( output_dir, rg_proc, rg_out_path, "syndicate-rg" )
print >> sys.stderr, "Missing data\n%s\n" % expected_data[start:end]
raise Exception("Missing data for %s-%s" % (start, end)) | conditional_block |
hero-detail.component.ts | import { OnInit,Input, Component} from '@angular/core';
import { ActivatedRoute, Params } from '@angular/router';
import { Location } from '@angular/common';
import 'rxjs/add/operator/switchMap';
import { HeroService } from './hero.service';
@Component({
moduleId: module.id,
selector: 'my-hero-detail',
templateUrl: 'hero-detail.component.html',
styleUrls: ['hero-detail.component.css']
})
export class HeroDetailComponent implements OnInit{
@Input() hero: Hero;
constructor(
private heroService: HeroService,
private route: ActivatedRoute,
private location: Location
){}
ngOnInit(): void {
this.route.params
.switchMap((params: Params) => this.heroService.getHero(+params['id']))
.subscribe(hero => this.hero = hero);
}
goBack(): void{
this.location.back();
}
save(): void {
this.heroService.update(this.hero)
.then(() => this.goBack());
}
} | import {Hero} from './hero'; | random_line_split |
|
hero-detail.component.ts | import {Hero} from './hero';
import { OnInit,Input, Component} from '@angular/core';
import { ActivatedRoute, Params } from '@angular/router';
import { Location } from '@angular/common';
import 'rxjs/add/operator/switchMap';
import { HeroService } from './hero.service';
@Component({
moduleId: module.id,
selector: 'my-hero-detail',
templateUrl: 'hero-detail.component.html',
styleUrls: ['hero-detail.component.css']
})
export class HeroDetailComponent implements OnInit{
@Input() hero: Hero;
constructor(
private heroService: HeroService,
private route: ActivatedRoute,
private location: Location
){}
| (): void {
this.route.params
.switchMap((params: Params) => this.heroService.getHero(+params['id']))
.subscribe(hero => this.hero = hero);
}
goBack(): void{
this.location.back();
}
save(): void {
this.heroService.update(this.hero)
.then(() => this.goBack());
}
}
| ngOnInit | identifier_name |
hero-detail.component.ts | import {Hero} from './hero';
import { OnInit,Input, Component} from '@angular/core';
import { ActivatedRoute, Params } from '@angular/router';
import { Location } from '@angular/common';
import 'rxjs/add/operator/switchMap';
import { HeroService } from './hero.service';
@Component({
moduleId: module.id,
selector: 'my-hero-detail',
templateUrl: 'hero-detail.component.html',
styleUrls: ['hero-detail.component.css']
})
export class HeroDetailComponent implements OnInit{
@Input() hero: Hero;
constructor(
private heroService: HeroService,
private route: ActivatedRoute,
private location: Location
) |
ngOnInit(): void {
this.route.params
.switchMap((params: Params) => this.heroService.getHero(+params['id']))
.subscribe(hero => this.hero = hero);
}
goBack(): void{
this.location.back();
}
save(): void {
this.heroService.update(this.hero)
.then(() => this.goBack());
}
}
| {} | identifier_body |
long-live-the-unsized-temporary.rs | #![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
use std::fmt;
fn gen_foo() -> Box<fmt::Display> {
Box::new(Box::new("foo"))
}
fn foo(x: fmt::Display) {
assert_eq!(x.to_string(), "foo");
}
fn foo_indirect(x: fmt::Display) {
foo(x);
}
fn main() | {
foo(*gen_foo());
foo_indirect(*gen_foo());
{
let x: fmt::Display = *gen_foo();
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let y: fmt::Display = *gen_foo();
foo(x);
foo(y);
}
{
let mut cnt: usize = 3;
let x = loop {
let x: fmt::Display = *gen_foo();
if cnt == 0 {
break x;
} else {
cnt -= 1;
}
};
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let x = if true { x } else { *gen_foo() };
foo(x);
}
} | identifier_body |
|
long-live-the-unsized-temporary.rs | #![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
use std::fmt;
fn gen_foo() -> Box<fmt::Display> {
Box::new(Box::new("foo"))
}
fn foo(x: fmt::Display) {
assert_eq!(x.to_string(), "foo");
} | fn foo_indirect(x: fmt::Display) {
foo(x);
}
fn main() {
foo(*gen_foo());
foo_indirect(*gen_foo());
{
let x: fmt::Display = *gen_foo();
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let y: fmt::Display = *gen_foo();
foo(x);
foo(y);
}
{
let mut cnt: usize = 3;
let x = loop {
let x: fmt::Display = *gen_foo();
if cnt == 0 {
break x;
} else {
cnt -= 1;
}
};
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let x = if true { x } else { *gen_foo() };
foo(x);
}
} | random_line_split |
|
long-live-the-unsized-temporary.rs | #![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
use std::fmt;
fn gen_foo() -> Box<fmt::Display> {
Box::new(Box::new("foo"))
}
fn foo(x: fmt::Display) {
assert_eq!(x.to_string(), "foo");
}
fn foo_indirect(x: fmt::Display) {
foo(x);
}
fn main() {
foo(*gen_foo());
foo_indirect(*gen_foo());
{
let x: fmt::Display = *gen_foo();
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let y: fmt::Display = *gen_foo();
foo(x);
foo(y);
}
{
let mut cnt: usize = 3;
let x = loop {
let x: fmt::Display = *gen_foo();
if cnt == 0 {
break x;
} else |
};
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let x = if true { x } else { *gen_foo() };
foo(x);
}
}
| {
cnt -= 1;
} | conditional_block |
long-live-the-unsized-temporary.rs | #![allow(incomplete_features)]
#![feature(unsized_locals, unsized_fn_params)]
use std::fmt;
fn gen_foo() -> Box<fmt::Display> {
Box::new(Box::new("foo"))
}
fn | (x: fmt::Display) {
assert_eq!(x.to_string(), "foo");
}
fn foo_indirect(x: fmt::Display) {
foo(x);
}
fn main() {
foo(*gen_foo());
foo_indirect(*gen_foo());
{
let x: fmt::Display = *gen_foo();
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let y: fmt::Display = *gen_foo();
foo(x);
foo(y);
}
{
let mut cnt: usize = 3;
let x = loop {
let x: fmt::Display = *gen_foo();
if cnt == 0 {
break x;
} else {
cnt -= 1;
}
};
foo(x);
}
{
let x: fmt::Display = *gen_foo();
let x = if true { x } else { *gen_foo() };
foo(x);
}
}
| foo | identifier_name |
test.js | /**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// MODULES //
var tape = require( 'tape' );
var abs = require( '@stdlib/math/base/special/abs' );
var isnan = require( '@stdlib/math/base/assert/is-nan' );
var PINF = require( '@stdlib/constants/float64/pinf' );
var NINF = require( '@stdlib/constants/float64/ninf' );
var EPS = require( '@stdlib/constants/float64/eps' );
var variance = require( './../lib' );
// FIXTURES //
var data = require( './fixtures/julia/data.json' );
// TESTS //
tape( 'main export is a function', function test( t ) {
t.ok( true, __filename );
t.equal( typeof variance, 'function', 'main export is a function' );
t.end();
});
tape( 'if provided `NaN` for any parameter, the function returns `NaN`', function test( t ) {
var v = variance( NaN, 0.5 );
t.equal( isnan( v ), true, 'returns NaN' );
v = variance( 10.0, NaN );
t.equal( isnan( v ), true, 'returns NaN' );
t.end();
});
tape( 'if provided `k <= 0`, the function returns `NaN`', function test( t ) {
var y;
y = variance( -1.0, 2.0 );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, 1.0 );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, PINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, NaN );
t.equal( isnan( y ), true, 'returns NaN' );
t.end();
});
tape( 'if provided `lambda <= 0`, the function returns `NaN`', function test( t ) {
var y;
y = variance( 2.0, -1.0 );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( 1.0, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( PINF, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NaN, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
t.end();
});
tape( 'the function returns the variance of a Weibull distribution', function test( t ) {
var expected;
var lambda;
var delta;
var tol;
var k;
var i;
var y;
expected = data.expected;
k = data.k;
lambda = data.lambda;
for ( i = 0; i < expected.length; i++ ) {
y = variance( k[i], lambda[i] );
if ( y === expected[i] ) | else {
delta = abs( y - expected[ i ] );
tol = 300.0 * EPS * abs( expected[ i ] );
t.ok( delta <= tol, 'within tolerance. k: '+k[i]+'. lambda: '+lambda[i]+'. y: '+y+'. E: '+expected[ i ]+'. Δ: '+delta+'. tol: '+tol+'.' );
}
}
t.end();
});
| {
t.equal( y, expected[i], 'k: '+k[i]+', lambda: '+lambda[i]+', y: '+y+', expected: '+expected[i] );
} | conditional_block |
test.js | /**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
|
// MODULES //
var tape = require( 'tape' );
var abs = require( '@stdlib/math/base/special/abs' );
var isnan = require( '@stdlib/math/base/assert/is-nan' );
var PINF = require( '@stdlib/constants/float64/pinf' );
var NINF = require( '@stdlib/constants/float64/ninf' );
var EPS = require( '@stdlib/constants/float64/eps' );
var variance = require( './../lib' );
// FIXTURES //
var data = require( './fixtures/julia/data.json' );
// TESTS //
tape( 'main export is a function', function test( t ) {
t.ok( true, __filename );
t.equal( typeof variance, 'function', 'main export is a function' );
t.end();
});
tape( 'if provided `NaN` for any parameter, the function returns `NaN`', function test( t ) {
var v = variance( NaN, 0.5 );
t.equal( isnan( v ), true, 'returns NaN' );
v = variance( 10.0, NaN );
t.equal( isnan( v ), true, 'returns NaN' );
t.end();
});
tape( 'if provided `k <= 0`, the function returns `NaN`', function test( t ) {
var y;
y = variance( -1.0, 2.0 );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, 1.0 );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, PINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, NaN );
t.equal( isnan( y ), true, 'returns NaN' );
t.end();
});
tape( 'if provided `lambda <= 0`, the function returns `NaN`', function test( t ) {
var y;
y = variance( 2.0, -1.0 );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( 1.0, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( PINF, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NINF, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
y = variance( NaN, NINF );
t.equal( isnan( y ), true, 'returns NaN' );
t.end();
});
tape( 'the function returns the variance of a Weibull distribution', function test( t ) {
var expected;
var lambda;
var delta;
var tol;
var k;
var i;
var y;
expected = data.expected;
k = data.k;
lambda = data.lambda;
for ( i = 0; i < expected.length; i++ ) {
y = variance( k[i], lambda[i] );
if ( y === expected[i] ) {
t.equal( y, expected[i], 'k: '+k[i]+', lambda: '+lambda[i]+', y: '+y+', expected: '+expected[i] );
} else {
delta = abs( y - expected[ i ] );
tol = 300.0 * EPS * abs( expected[ i ] );
t.ok( delta <= tol, 'within tolerance. k: '+k[i]+'. lambda: '+lambda[i]+'. y: '+y+'. E: '+expected[ i ]+'. Δ: '+delta+'. tol: '+tol+'.' );
}
}
t.end();
}); | 'use strict'; | random_line_split |
hash-navigation.js | import { window, document } from 'ssr-window';
import $ from '../../utils/dom';
import Utils from '../../utils/utils';
const HashNavigation = {
onHashCange() {
const swiper = this;
const newHash = document.location.hash.replace('#', '');
const activeSlideHash = swiper.slides.eq(swiper.activeIndex).attr('data-hash');
if (newHash !== activeSlideHash) {
const newIndex = swiper.$wrapperEl.children(`.${swiper.params.slideClass}[data-hash="${newHash}"]`).index();
if (typeof newIndex === 'undefined') return;
swiper.slideTo(newIndex);
}
},
| () {
const swiper = this;
if (!swiper.hashNavigation.initialized || !swiper.params.hashNavigation.enabled) return;
if (swiper.params.hashNavigation.replaceState && window.history && window.history.replaceState) {
window.history.replaceState(null, null, (`#${swiper.slides.eq(swiper.activeIndex).attr('data-hash')}` || ''));
} else {
const slide = swiper.slides.eq(swiper.activeIndex);
const hash = slide.attr('data-hash') || slide.attr('data-history');
document.location.hash = hash || '';
}
},
init() {
const swiper = this;
if (!swiper.params.hashNavigation.enabled || (swiper.params.history && swiper.params.history.enabled)) return;
swiper.hashNavigation.initialized = true;
const hash = document.location.hash.replace('#', '');
if (hash) {
const speed = 0;
for (let i = 0, length = swiper.slides.length; i < length; i += 1) {
const slide = swiper.slides.eq(i);
const slideHash = slide.attr('data-hash') || slide.attr('data-history');
if (slideHash === hash && !slide.hasClass(swiper.params.slideDuplicateClass)) {
const index = slide.index();
swiper.slideTo(index, speed, swiper.params.runCallbacksOnInit, true);
}
}
}
if (swiper.params.hashNavigation.watchState) {
$(window).on('hashchange', swiper.hashNavigation.onHashCange);
}
},
destroy() {
const swiper = this;
if (swiper.params.hashNavigation.watchState) {
$(window).off('hashchange', swiper.hashNavigation.onHashCange);
}
},
};
export default {
name: 'hash-navigation',
params: {
hashNavigation: {
enabled: false,
replaceState: false,
watchState: false,
},
},
create() {
const swiper = this;
Utils.extend(swiper, {
hashNavigation: {
initialized: false,
init: HashNavigation.init.bind(swiper),
destroy: HashNavigation.destroy.bind(swiper),
setHash: HashNavigation.setHash.bind(swiper),
onHashCange: HashNavigation.onHashCange.bind(swiper),
},
});
},
on: {
init() {
const swiper = this;
if (swiper.params.hashNavigation.enabled) {
swiper.hashNavigation.init();
}
},
destroy() {
const swiper = this;
if (swiper.params.hashNavigation.enabled) {
swiper.hashNavigation.destroy();
}
},
transitionEnd() {
const swiper = this;
if (swiper.hashNavigation.initialized) {
swiper.hashNavigation.setHash();
}
},
},
};
| setHash | identifier_name |
hash-navigation.js | import { window, document } from 'ssr-window';
import $ from '../../utils/dom';
import Utils from '../../utils/utils';
const HashNavigation = {
onHashCange() {
const swiper = this;
const newHash = document.location.hash.replace('#', '');
const activeSlideHash = swiper.slides.eq(swiper.activeIndex).attr('data-hash');
if (newHash !== activeSlideHash) {
const newIndex = swiper.$wrapperEl.children(`.${swiper.params.slideClass}[data-hash="${newHash}"]`).index();
if (typeof newIndex === 'undefined') return;
swiper.slideTo(newIndex);
}
},
setHash() {
const swiper = this;
if (!swiper.hashNavigation.initialized || !swiper.params.hashNavigation.enabled) return;
if (swiper.params.hashNavigation.replaceState && window.history && window.history.replaceState) {
window.history.replaceState(null, null, (`#${swiper.slides.eq(swiper.activeIndex).attr('data-hash')}` || ''));
} else {
const slide = swiper.slides.eq(swiper.activeIndex);
const hash = slide.attr('data-hash') || slide.attr('data-history');
document.location.hash = hash || '';
}
},
init() {
const swiper = this;
if (!swiper.params.hashNavigation.enabled || (swiper.params.history && swiper.params.history.enabled)) return;
swiper.hashNavigation.initialized = true;
const hash = document.location.hash.replace('#', '');
if (hash) {
const speed = 0;
for (let i = 0, length = swiper.slides.length; i < length; i += 1) {
const slide = swiper.slides.eq(i);
const slideHash = slide.attr('data-hash') || slide.attr('data-history');
if (slideHash === hash && !slide.hasClass(swiper.params.slideDuplicateClass)) {
const index = slide.index();
swiper.slideTo(index, speed, swiper.params.runCallbacksOnInit, true);
}
}
}
if (swiper.params.hashNavigation.watchState) {
$(window).on('hashchange', swiper.hashNavigation.onHashCange);
}
},
destroy() {
const swiper = this;
if (swiper.params.hashNavigation.watchState) {
$(window).off('hashchange', swiper.hashNavigation.onHashCange);
}
},
};
export default {
name: 'hash-navigation',
params: {
hashNavigation: {
enabled: false,
replaceState: false,
watchState: false,
},
},
create() {
const swiper = this;
Utils.extend(swiper, {
hashNavigation: {
initialized: false,
init: HashNavigation.init.bind(swiper),
destroy: HashNavigation.destroy.bind(swiper),
setHash: HashNavigation.setHash.bind(swiper),
onHashCange: HashNavigation.onHashCange.bind(swiper),
},
});
},
on: {
init() {
const swiper = this;
if (swiper.params.hashNavigation.enabled) {
swiper.hashNavigation.init(); | const swiper = this;
if (swiper.params.hashNavigation.enabled) {
swiper.hashNavigation.destroy();
}
},
transitionEnd() {
const swiper = this;
if (swiper.hashNavigation.initialized) {
swiper.hashNavigation.setHash();
}
},
},
}; | }
},
destroy() { | random_line_split |
hash-navigation.js | import { window, document } from 'ssr-window';
import $ from '../../utils/dom';
import Utils from '../../utils/utils';
const HashNavigation = {
onHashCange() {
const swiper = this;
const newHash = document.location.hash.replace('#', '');
const activeSlideHash = swiper.slides.eq(swiper.activeIndex).attr('data-hash');
if (newHash !== activeSlideHash) {
const newIndex = swiper.$wrapperEl.children(`.${swiper.params.slideClass}[data-hash="${newHash}"]`).index();
if (typeof newIndex === 'undefined') return;
swiper.slideTo(newIndex);
}
},
setHash() | ,
init() {
const swiper = this;
if (!swiper.params.hashNavigation.enabled || (swiper.params.history && swiper.params.history.enabled)) return;
swiper.hashNavigation.initialized = true;
const hash = document.location.hash.replace('#', '');
if (hash) {
const speed = 0;
for (let i = 0, length = swiper.slides.length; i < length; i += 1) {
const slide = swiper.slides.eq(i);
const slideHash = slide.attr('data-hash') || slide.attr('data-history');
if (slideHash === hash && !slide.hasClass(swiper.params.slideDuplicateClass)) {
const index = slide.index();
swiper.slideTo(index, speed, swiper.params.runCallbacksOnInit, true);
}
}
}
if (swiper.params.hashNavigation.watchState) {
$(window).on('hashchange', swiper.hashNavigation.onHashCange);
}
},
destroy() {
const swiper = this;
if (swiper.params.hashNavigation.watchState) {
$(window).off('hashchange', swiper.hashNavigation.onHashCange);
}
},
};
export default {
name: 'hash-navigation',
params: {
hashNavigation: {
enabled: false,
replaceState: false,
watchState: false,
},
},
create() {
const swiper = this;
Utils.extend(swiper, {
hashNavigation: {
initialized: false,
init: HashNavigation.init.bind(swiper),
destroy: HashNavigation.destroy.bind(swiper),
setHash: HashNavigation.setHash.bind(swiper),
onHashCange: HashNavigation.onHashCange.bind(swiper),
},
});
},
on: {
init() {
const swiper = this;
if (swiper.params.hashNavigation.enabled) {
swiper.hashNavigation.init();
}
},
destroy() {
const swiper = this;
if (swiper.params.hashNavigation.enabled) {
swiper.hashNavigation.destroy();
}
},
transitionEnd() {
const swiper = this;
if (swiper.hashNavigation.initialized) {
swiper.hashNavigation.setHash();
}
},
},
};
| {
const swiper = this;
if (!swiper.hashNavigation.initialized || !swiper.params.hashNavigation.enabled) return;
if (swiper.params.hashNavigation.replaceState && window.history && window.history.replaceState) {
window.history.replaceState(null, null, (`#${swiper.slides.eq(swiper.activeIndex).attr('data-hash')}` || ''));
} else {
const slide = swiper.slides.eq(swiper.activeIndex);
const hash = slide.attr('data-hash') || slide.attr('data-history');
document.location.hash = hash || '';
}
} | identifier_body |
loginRedirectProvider.ts | module app.security {
"use strict";
class LoginRedirectProvider implements ng.IServiceProvider {
constructor() {
}
public loginUrl:string = "/login";
public lastPath:string;
public defaultPath:string = "/";
public setLoginUrl = (value) => {
this.loginUrl = value;
};
public setDefaultUrl = (value) => {
this.defaultPath = value;
};
public $get = ["$q","$location",($q,$location) => {
return {
responseError: (response) => {
if (response.status == 401) {
this.lastPath = $location.path();
$location.path(this.loginUrl);
}
return $q.reject(response);
},
redirectPreLogin: () => {
if (this.lastPath) {
$location.path(this.lastPath);
this.lastPath = "";
} else {
$location.path(this.defaultPath);
}
}
};
}]
}
angular.module("app.security").provider("loginRedirect", [LoginRedirectProvider])
.config(["$httpProvider", config]);
function co | httpProvider) {
$httpProvider.interceptors.push("loginRedirect");
}
} | nfig($ | identifier_name |
loginRedirectProvider.ts | module app.security {
"use strict";
class LoginRedirectProvider implements ng.IServiceProvider {
constructor() {
}
public loginUrl:string = "/login";
public lastPath:string;
public defaultPath:string = "/";
public setLoginUrl = (value) => {
this.loginUrl = value;
};
public setDefaultUrl = (value) => {
this.defaultPath = value;
};
public $get = ["$q","$location",($q,$location) => {
return {
responseError: (response) => {
if (response.status == 401) {
this.lastPath = $location.path();
$location.path(this.loginUrl);
}
return $q.reject(response);
}, | $location.path(this.lastPath);
this.lastPath = "";
} else {
$location.path(this.defaultPath);
}
}
};
}]
}
angular.module("app.security").provider("loginRedirect", [LoginRedirectProvider])
.config(["$httpProvider", config]);
function config($httpProvider) {
$httpProvider.interceptors.push("loginRedirect");
}
} |
redirectPreLogin: () => {
if (this.lastPath) { | random_line_split |
loginRedirectProvider.ts | module app.security {
"use strict";
class LoginRedirectProvider implements ng.IServiceProvider {
constructor() {
}
public loginUrl:string = "/login";
public lastPath:string;
public defaultPath:string = "/";
public setLoginUrl = (value) => {
this.loginUrl = value;
};
public setDefaultUrl = (value) => {
this.defaultPath = value;
};
public $get = ["$q","$location",($q,$location) => {
return {
responseError: (response) => {
if (response.status == 401) {
this.lastPath = $location.path();
$location.path(this.loginUrl);
}
return $q.reject(response);
},
redirectPreLogin: () => {
if (this.lastPath) {
| lse {
$location.path(this.defaultPath);
}
}
};
}]
}
angular.module("app.security").provider("loginRedirect", [LoginRedirectProvider])
.config(["$httpProvider", config]);
function config($httpProvider) {
$httpProvider.interceptors.push("loginRedirect");
}
} | $location.path(this.lastPath);
this.lastPath = "";
} e | conditional_block |
loginRedirectProvider.ts | module app.security {
"use strict";
class LoginRedirectProvider implements ng.IServiceProvider {
constructor() {
}
public loginUrl:string = "/login";
public lastPath:string;
public defaultPath:string = "/";
public setLoginUrl = (value) => {
this.loginUrl = value;
};
public setDefaultUrl = (value) => {
this.defaultPath = value;
};
public $get = ["$q","$location",($q,$location) => {
return {
responseError: (response) => {
if (response.status == 401) {
this.lastPath = $location.path();
$location.path(this.loginUrl);
}
return $q.reject(response);
},
redirectPreLogin: () => {
if (this.lastPath) {
$location.path(this.lastPath);
this.lastPath = "";
} else {
$location.path(this.defaultPath);
}
}
};
}]
}
angular.module("app.security").provider("loginRedirect", [LoginRedirectProvider])
.config(["$httpProvider", config]);
function config($httpProvider) {
| $httpProvider.interceptors.push("loginRedirect");
}
} | identifier_body |
|
jy_serv.py | #!/usr/bin/env jython
import sys
#sys.path.append("/usr/share/java/itextpdf-5.4.1.jar")
sys.path.append("itextpdf-5.4.1.jar")
#sys.path.append("/usr/share/java/itext-2.0.7.jar")
#sys.path.append("/usr/share/java/xercesImpl.jar")
#sys.path.append("/usr/share/java/xml-apis.jar")
from java.io import FileOutputStream
from com.itextpdf.text.pdf import PdfReader,PdfStamper,BaseFont
#from com.lowagie.text.pdf import PdfReader,PdfStamper,BaseFont
#import re
import time
#import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
def pdf_fill(orig_pdf,new_pdf,vals):
#print "pdf_fill",orig_pdf,new_pdf,vals
t0=time.time()
#print orig_pdf
rd=PdfReader(orig_pdf)
#print new_pdf
#print t0
st=PdfStamper(rd,FileOutputStream(new_pdf))
font=BaseFont.createFont("/usr/share/fonts/truetype/thai/Garuda.ttf",BaseFont.IDENTITY_H,BaseFont.EMBEDDED)
form=st.getAcroFields()
for k,v in vals.items():
|
st.setFormFlattening(True)
st.close()
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return True
def pdf_merge(pdf1,pdf2):
#print "pdf_merge",orig_pdf,vals
t0=time.time()
pdf=pdf1
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return pdf
serv=SimpleXMLRPCServer(("localhost",9999))
serv.register_function(pdf_fill,"pdf_fill")
serv.register_function(pdf_merge,"pdf_merge")
print "waiting for requests..."
serv.serve_forever()
| try:
form.setFieldProperty(k,"textfont",font,None)
form.setField(k,v.decode('utf-8'))
except Exception,e:
raise Exception("Field %s: %s"%(k,str(e))) | conditional_block |
jy_serv.py | #!/usr/bin/env jython
import sys
#sys.path.append("/usr/share/java/itextpdf-5.4.1.jar")
sys.path.append("itextpdf-5.4.1.jar")
#sys.path.append("/usr/share/java/itext-2.0.7.jar")
#sys.path.append("/usr/share/java/xercesImpl.jar")
#sys.path.append("/usr/share/java/xml-apis.jar")
from java.io import FileOutputStream
from com.itextpdf.text.pdf import PdfReader,PdfStamper,BaseFont
#from com.lowagie.text.pdf import PdfReader,PdfStamper,BaseFont
#import re
import time
#import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
def pdf_fill(orig_pdf,new_pdf,vals):
#print "pdf_fill",orig_pdf,new_pdf,vals
t0=time.time()
#print orig_pdf
rd=PdfReader(orig_pdf)
#print new_pdf
#print t0
st=PdfStamper(rd,FileOutputStream(new_pdf))
font=BaseFont.createFont("/usr/share/fonts/truetype/thai/Garuda.ttf",BaseFont.IDENTITY_H,BaseFont.EMBEDDED)
form=st.getAcroFields()
for k,v in vals.items():
try:
form.setFieldProperty(k,"textfont",font,None)
form.setField(k,v.decode('utf-8'))
except Exception,e:
raise Exception("Field %s: %s"%(k,str(e)))
st.setFormFlattening(True)
st.close()
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return True
def | (pdf1,pdf2):
#print "pdf_merge",orig_pdf,vals
t0=time.time()
pdf=pdf1
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return pdf
serv=SimpleXMLRPCServer(("localhost",9999))
serv.register_function(pdf_fill,"pdf_fill")
serv.register_function(pdf_merge,"pdf_merge")
print "waiting for requests..."
serv.serve_forever()
| pdf_merge | identifier_name |
jy_serv.py | #!/usr/bin/env jython
import sys
#sys.path.append("/usr/share/java/itextpdf-5.4.1.jar")
sys.path.append("itextpdf-5.4.1.jar")
#sys.path.append("/usr/share/java/itext-2.0.7.jar")
#sys.path.append("/usr/share/java/xercesImpl.jar")
#sys.path.append("/usr/share/java/xml-apis.jar")
from java.io import FileOutputStream
from com.itextpdf.text.pdf import PdfReader,PdfStamper,BaseFont
#from com.lowagie.text.pdf import PdfReader,PdfStamper,BaseFont
#import re
import time
#import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
def pdf_fill(orig_pdf,new_pdf,vals):
#print "pdf_fill",orig_pdf,new_pdf,vals
t0=time.time()
#print orig_pdf
rd=PdfReader(orig_pdf) | #print t0
st=PdfStamper(rd,FileOutputStream(new_pdf))
font=BaseFont.createFont("/usr/share/fonts/truetype/thai/Garuda.ttf",BaseFont.IDENTITY_H,BaseFont.EMBEDDED)
form=st.getAcroFields()
for k,v in vals.items():
try:
form.setFieldProperty(k,"textfont",font,None)
form.setField(k,v.decode('utf-8'))
except Exception,e:
raise Exception("Field %s: %s"%(k,str(e)))
st.setFormFlattening(True)
st.close()
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return True
def pdf_merge(pdf1,pdf2):
#print "pdf_merge",orig_pdf,vals
t0=time.time()
pdf=pdf1
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return pdf
serv=SimpleXMLRPCServer(("localhost",9999))
serv.register_function(pdf_fill,"pdf_fill")
serv.register_function(pdf_merge,"pdf_merge")
print "waiting for requests..."
serv.serve_forever() | #print new_pdf | random_line_split |
jy_serv.py | #!/usr/bin/env jython
import sys
#sys.path.append("/usr/share/java/itextpdf-5.4.1.jar")
sys.path.append("itextpdf-5.4.1.jar")
#sys.path.append("/usr/share/java/itext-2.0.7.jar")
#sys.path.append("/usr/share/java/xercesImpl.jar")
#sys.path.append("/usr/share/java/xml-apis.jar")
from java.io import FileOutputStream
from com.itextpdf.text.pdf import PdfReader,PdfStamper,BaseFont
#from com.lowagie.text.pdf import PdfReader,PdfStamper,BaseFont
#import re
import time
#import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
def pdf_fill(orig_pdf,new_pdf,vals):
#print "pdf_fill",orig_pdf,new_pdf,vals
t0=time.time()
#print orig_pdf
rd=PdfReader(orig_pdf)
#print new_pdf
#print t0
st=PdfStamper(rd,FileOutputStream(new_pdf))
font=BaseFont.createFont("/usr/share/fonts/truetype/thai/Garuda.ttf",BaseFont.IDENTITY_H,BaseFont.EMBEDDED)
form=st.getAcroFields()
for k,v in vals.items():
try:
form.setFieldProperty(k,"textfont",font,None)
form.setField(k,v.decode('utf-8'))
except Exception,e:
raise Exception("Field %s: %s"%(k,str(e)))
st.setFormFlattening(True)
st.close()
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return True
def pdf_merge(pdf1,pdf2):
#print "pdf_merge",orig_pdf,vals
|
serv=SimpleXMLRPCServer(("localhost",9999))
serv.register_function(pdf_fill,"pdf_fill")
serv.register_function(pdf_merge,"pdf_merge")
print "waiting for requests..."
serv.serve_forever()
| t0=time.time()
pdf=pdf1
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return pdf | identifier_body |
humancapitalsearch.py | import functools
import os
import numpy as np
import pygmo as pg
from simulation import simulate, statistical
from solving import value_function_list
from util import constants as cs, param_type
class HumanCapitalSearchProblem(object):
|
def calculate_criterion(simulate_coefficients,
data_coefficients, weights):
cutoff = 155
try:
squared_coeff_diffs = (data_coefficients[:cutoff]
- simulate_coefficients[:cutoff])**2
return np.sum(squared_coeff_diffs * weights[:cutoff]**2)
except ValueError:
return 1000000.0
return None
def convert_hours_to_seconds(hours):
seconds = 60 * 60 * hours
return int(seconds)
if __name__ == '__main__':
x0 = np.array([ 3.50002199e-03, 6.51848176e-03, 1.51129690e-02,
5.44408669e-01, 4.00993663e-01, 6.55844833e-02,
6.07802957e+00, 1.60167206e+00, 5.01869425e+00,
4.72961572e+00, 9.38466921e+00, 5.05588161e+00,
8.19033636e+00, 2.75929445e+00, 2.85635433e+00,
1.75737616e+00, 7.80585097e-01, 7.75955256e-01,
7.84082645e-01, 7.84472240e-01, 7.88595353e-01,
7.56837829e-01, 7.95899147e-01, 1.00607895e-01,
9.54173933e-02, 1.01830970e-01, 2.35455817e-01,
4.34618429e-01, 5.05177886e-01, 4.97754216e-01,
1.33424724e+00, 1.33335481e+00, 5.14048248e-01,
5.31256998e-01, 2.72639929e-02, 2.06973106e-01,
7.44039604e-01, 3.35103286e-01, 8.64058736e-01,
2.01314260e-01, 7.48161453e-01, 1.98923666e-01,
7.49378943e-01, 1.96135026e-01, 7.52297629e-01,
2.03848678e-01, 7.48561095e-01, 1.98618489e-01,
9.17364498e-01, 1.97851509e-01, 7.42171336e-01,
1.98302575e-01, 7.41711271e-01]) #array([ 149.86359966])
hc_problem = HumanCapitalSearchProblem()
hc_problem.fitness(x0)
np.set_printoptions(threshold=10000)
np.random.seed(128)
prob = pg.problem(hc_problem)
pop = pg.population(prob=prob, size=0)
pop.push_back(x0, f=[149.86359966])
archi = pg.archipelago(pop=pop)
methods = ["bobyqa", 'neldermead', 'praxis', "newuoa_bound", "sbplx"]
algo_list = []
for method in methods:
nl= pg.nlopt(method)
nl.stopval = 1e-8
nl.ftol_rel = 1e-8
nl.xtol_rel = 1e-8
nl.xtol_abs = 1e-8
nl.ftol_abs = 1e-8
nl.maxtime = convert_hours_to_seconds(12)
algo = pg.algorithm(nl)
archi.push_back(algo=algo, pop=pop)
print(archi)
archi.evolve()
import time
while archi.status is pg.core._evolve_status.busy:
time.sleep(120)
print(archi)
archi.wait_check()
print(archi.get_champions_x())
print(archi.get_champions_f())
| def fitness(self, params_nparray, gradient_eval=False):
params_paramtype = param_type.transform_array_to_paramstype(params_nparray)
optimal_value_functions = value_function_list.backwards_iterate(params_paramtype)
# if display_plots:
# plotting.plot_valuefunctions(optimal_value_functions)
panel_data = simulate.simulate_data(params_paramtype, optimal_value_functions)
# if save_panel_data:
# np.savetxt('simulated_data.csv', panel_data.T, delimiter=',')
simulated_coefficients, _, _, _ = statistical.calculate_coefficients(panel_data)
criterion_value = calculate_criterion(
simulated_coefficients, cs.data_coeffs, cs.weights)
if not gradient_eval:
print('within_val {0}:'.format(os.getpid()), repr(params_nparray), repr(np.array([criterion_value])))
return np.array([criterion_value])
def get_name(self):
return 'Human Capital Search Problem'
def get_bounds(self):
lowerbounds, upperbounds, _ = param_type.gen_initial_point()
return (lowerbounds, upperbounds)
def gradient(self, x):
grad_fitness = functools.partial(self.fitness, gradient_eval=True)
return pg.estimate_gradient(grad_fitness, x) | identifier_body |
humancapitalsearch.py | import functools
import os
import numpy as np
import pygmo as pg
from simulation import simulate, statistical
from solving import value_function_list
from util import constants as cs, param_type
class HumanCapitalSearchProblem(object):
def fitness(self, params_nparray, gradient_eval=False):
params_paramtype = param_type.transform_array_to_paramstype(params_nparray)
optimal_value_functions = value_function_list.backwards_iterate(params_paramtype)
# if display_plots:
# plotting.plot_valuefunctions(optimal_value_functions)
panel_data = simulate.simulate_data(params_paramtype, optimal_value_functions)
# if save_panel_data:
# np.savetxt('simulated_data.csv', panel_data.T, delimiter=',')
simulated_coefficients, _, _, _ = statistical.calculate_coefficients(panel_data)
criterion_value = calculate_criterion(
simulated_coefficients, cs.data_coeffs, cs.weights)
if not gradient_eval:
|
return np.array([criterion_value])
def get_name(self):
return 'Human Capital Search Problem'
def get_bounds(self):
lowerbounds, upperbounds, _ = param_type.gen_initial_point()
return (lowerbounds, upperbounds)
def gradient(self, x):
grad_fitness = functools.partial(self.fitness, gradient_eval=True)
return pg.estimate_gradient(grad_fitness, x)
def calculate_criterion(simulate_coefficients,
data_coefficients, weights):
cutoff = 155
try:
squared_coeff_diffs = (data_coefficients[:cutoff]
- simulate_coefficients[:cutoff])**2
return np.sum(squared_coeff_diffs * weights[:cutoff]**2)
except ValueError:
return 1000000.0
return None
def convert_hours_to_seconds(hours):
seconds = 60 * 60 * hours
return int(seconds)
if __name__ == '__main__':
x0 = np.array([ 3.50002199e-03, 6.51848176e-03, 1.51129690e-02,
5.44408669e-01, 4.00993663e-01, 6.55844833e-02,
6.07802957e+00, 1.60167206e+00, 5.01869425e+00,
4.72961572e+00, 9.38466921e+00, 5.05588161e+00,
8.19033636e+00, 2.75929445e+00, 2.85635433e+00,
1.75737616e+00, 7.80585097e-01, 7.75955256e-01,
7.84082645e-01, 7.84472240e-01, 7.88595353e-01,
7.56837829e-01, 7.95899147e-01, 1.00607895e-01,
9.54173933e-02, 1.01830970e-01, 2.35455817e-01,
4.34618429e-01, 5.05177886e-01, 4.97754216e-01,
1.33424724e+00, 1.33335481e+00, 5.14048248e-01,
5.31256998e-01, 2.72639929e-02, 2.06973106e-01,
7.44039604e-01, 3.35103286e-01, 8.64058736e-01,
2.01314260e-01, 7.48161453e-01, 1.98923666e-01,
7.49378943e-01, 1.96135026e-01, 7.52297629e-01,
2.03848678e-01, 7.48561095e-01, 1.98618489e-01,
9.17364498e-01, 1.97851509e-01, 7.42171336e-01,
1.98302575e-01, 7.41711271e-01]) #array([ 149.86359966])
hc_problem = HumanCapitalSearchProblem()
hc_problem.fitness(x0)
np.set_printoptions(threshold=10000)
np.random.seed(128)
prob = pg.problem(hc_problem)
pop = pg.population(prob=prob, size=0)
pop.push_back(x0, f=[149.86359966])
archi = pg.archipelago(pop=pop)
methods = ["bobyqa", 'neldermead', 'praxis', "newuoa_bound", "sbplx"]
algo_list = []
for method in methods:
nl= pg.nlopt(method)
nl.stopval = 1e-8
nl.ftol_rel = 1e-8
nl.xtol_rel = 1e-8
nl.xtol_abs = 1e-8
nl.ftol_abs = 1e-8
nl.maxtime = convert_hours_to_seconds(12)
algo = pg.algorithm(nl)
archi.push_back(algo=algo, pop=pop)
print(archi)
archi.evolve()
import time
while archi.status is pg.core._evolve_status.busy:
time.sleep(120)
print(archi)
archi.wait_check()
print(archi.get_champions_x())
print(archi.get_champions_f())
| print('within_val {0}:'.format(os.getpid()), repr(params_nparray), repr(np.array([criterion_value]))) | conditional_block |
humancapitalsearch.py | import functools
import os
import numpy as np
import pygmo as pg
from simulation import simulate, statistical
from solving import value_function_list
from util import constants as cs, param_type
class HumanCapitalSearchProblem(object):
def fitness(self, params_nparray, gradient_eval=False):
params_paramtype = param_type.transform_array_to_paramstype(params_nparray)
optimal_value_functions = value_function_list.backwards_iterate(params_paramtype)
# if display_plots:
# plotting.plot_valuefunctions(optimal_value_functions)
panel_data = simulate.simulate_data(params_paramtype, optimal_value_functions)
# if save_panel_data:
# np.savetxt('simulated_data.csv', panel_data.T, delimiter=',')
simulated_coefficients, _, _, _ = statistical.calculate_coefficients(panel_data)
criterion_value = calculate_criterion(
simulated_coefficients, cs.data_coeffs, cs.weights)
if not gradient_eval:
print('within_val {0}:'.format(os.getpid()), repr(params_nparray), repr(np.array([criterion_value])))
return np.array([criterion_value])
def get_name(self):
return 'Human Capital Search Problem'
def get_bounds(self):
lowerbounds, upperbounds, _ = param_type.gen_initial_point()
return (lowerbounds, upperbounds)
def gradient(self, x):
grad_fitness = functools.partial(self.fitness, gradient_eval=True)
return pg.estimate_gradient(grad_fitness, x)
def calculate_criterion(simulate_coefficients,
data_coefficients, weights):
cutoff = 155
try:
squared_coeff_diffs = (data_coefficients[:cutoff]
- simulate_coefficients[:cutoff])**2
return np.sum(squared_coeff_diffs * weights[:cutoff]**2)
except ValueError:
return 1000000.0
return None
def | (hours):
seconds = 60 * 60 * hours
return int(seconds)
if __name__ == '__main__':
x0 = np.array([ 3.50002199e-03, 6.51848176e-03, 1.51129690e-02,
5.44408669e-01, 4.00993663e-01, 6.55844833e-02,
6.07802957e+00, 1.60167206e+00, 5.01869425e+00,
4.72961572e+00, 9.38466921e+00, 5.05588161e+00,
8.19033636e+00, 2.75929445e+00, 2.85635433e+00,
1.75737616e+00, 7.80585097e-01, 7.75955256e-01,
7.84082645e-01, 7.84472240e-01, 7.88595353e-01,
7.56837829e-01, 7.95899147e-01, 1.00607895e-01,
9.54173933e-02, 1.01830970e-01, 2.35455817e-01,
4.34618429e-01, 5.05177886e-01, 4.97754216e-01,
1.33424724e+00, 1.33335481e+00, 5.14048248e-01,
5.31256998e-01, 2.72639929e-02, 2.06973106e-01,
7.44039604e-01, 3.35103286e-01, 8.64058736e-01,
2.01314260e-01, 7.48161453e-01, 1.98923666e-01,
7.49378943e-01, 1.96135026e-01, 7.52297629e-01,
2.03848678e-01, 7.48561095e-01, 1.98618489e-01,
9.17364498e-01, 1.97851509e-01, 7.42171336e-01,
1.98302575e-01, 7.41711271e-01]) #array([ 149.86359966])
hc_problem = HumanCapitalSearchProblem()
hc_problem.fitness(x0)
np.set_printoptions(threshold=10000)
np.random.seed(128)
prob = pg.problem(hc_problem)
pop = pg.population(prob=prob, size=0)
pop.push_back(x0, f=[149.86359966])
archi = pg.archipelago(pop=pop)
methods = ["bobyqa", 'neldermead', 'praxis', "newuoa_bound", "sbplx"]
algo_list = []
for method in methods:
nl= pg.nlopt(method)
nl.stopval = 1e-8
nl.ftol_rel = 1e-8
nl.xtol_rel = 1e-8
nl.xtol_abs = 1e-8
nl.ftol_abs = 1e-8
nl.maxtime = convert_hours_to_seconds(12)
algo = pg.algorithm(nl)
archi.push_back(algo=algo, pop=pop)
print(archi)
archi.evolve()
import time
while archi.status is pg.core._evolve_status.busy:
time.sleep(120)
print(archi)
archi.wait_check()
print(archi.get_champions_x())
print(archi.get_champions_f())
| convert_hours_to_seconds | identifier_name |
humancapitalsearch.py | import functools
import os
import numpy as np
import pygmo as pg
from simulation import simulate, statistical
from solving import value_function_list
from util import constants as cs, param_type
class HumanCapitalSearchProblem(object):
def fitness(self, params_nparray, gradient_eval=False):
params_paramtype = param_type.transform_array_to_paramstype(params_nparray)
optimal_value_functions = value_function_list.backwards_iterate(params_paramtype)
# if display_plots:
# plotting.plot_valuefunctions(optimal_value_functions)
panel_data = simulate.simulate_data(params_paramtype, optimal_value_functions)
# if save_panel_data:
# np.savetxt('simulated_data.csv', panel_data.T, delimiter=',')
simulated_coefficients, _, _, _ = statistical.calculate_coefficients(panel_data)
criterion_value = calculate_criterion(
simulated_coefficients, cs.data_coeffs, cs.weights)
if not gradient_eval:
print('within_val {0}:'.format(os.getpid()), repr(params_nparray), repr(np.array([criterion_value])))
return np.array([criterion_value])
def get_name(self):
return 'Human Capital Search Problem'
def get_bounds(self):
lowerbounds, upperbounds, _ = param_type.gen_initial_point()
return (lowerbounds, upperbounds)
def gradient(self, x):
grad_fitness = functools.partial(self.fitness, gradient_eval=True)
return pg.estimate_gradient(grad_fitness, x)
def calculate_criterion(simulate_coefficients,
data_coefficients, weights):
cutoff = 155
try:
squared_coeff_diffs = (data_coefficients[:cutoff]
- simulate_coefficients[:cutoff])**2
return np.sum(squared_coeff_diffs * weights[:cutoff]**2)
except ValueError:
return 1000000.0
return None
def convert_hours_to_seconds(hours):
seconds = 60 * 60 * hours
return int(seconds)
if __name__ == '__main__':
x0 = np.array([ 3.50002199e-03, 6.51848176e-03, 1.51129690e-02,
5.44408669e-01, 4.00993663e-01, 6.55844833e-02,
6.07802957e+00, 1.60167206e+00, 5.01869425e+00,
4.72961572e+00, 9.38466921e+00, 5.05588161e+00,
8.19033636e+00, 2.75929445e+00, 2.85635433e+00,
1.75737616e+00, 7.80585097e-01, 7.75955256e-01,
7.84082645e-01, 7.84472240e-01, 7.88595353e-01,
7.56837829e-01, 7.95899147e-01, 1.00607895e-01,
9.54173933e-02, 1.01830970e-01, 2.35455817e-01,
4.34618429e-01, 5.05177886e-01, 4.97754216e-01,
1.33424724e+00, 1.33335481e+00, 5.14048248e-01,
5.31256998e-01, 2.72639929e-02, 2.06973106e-01,
7.44039604e-01, 3.35103286e-01, 8.64058736e-01,
2.01314260e-01, 7.48161453e-01, 1.98923666e-01,
7.49378943e-01, 1.96135026e-01, 7.52297629e-01,
2.03848678e-01, 7.48561095e-01, 1.98618489e-01,
9.17364498e-01, 1.97851509e-01, 7.42171336e-01,
1.98302575e-01, 7.41711271e-01]) #array([ 149.86359966])
hc_problem = HumanCapitalSearchProblem()
hc_problem.fitness(x0)
np.set_printoptions(threshold=10000)
np.random.seed(128)
prob = pg.problem(hc_problem)
pop = pg.population(prob=prob, size=0)
pop.push_back(x0, f=[149.86359966])
archi = pg.archipelago(pop=pop)
methods = ["bobyqa", 'neldermead', 'praxis', "newuoa_bound", "sbplx"]
algo_list = []
for method in methods:
nl= pg.nlopt(method)
nl.stopval = 1e-8
nl.ftol_rel = 1e-8
nl.xtol_rel = 1e-8
nl.xtol_abs = 1e-8
nl.ftol_abs = 1e-8
nl.maxtime = convert_hours_to_seconds(12)
algo = pg.algorithm(nl)
archi.push_back(algo=algo, pop=pop)
print(archi)
archi.evolve()
import time | print(archi.get_champions_x())
print(archi.get_champions_f()) | while archi.status is pg.core._evolve_status.busy:
time.sleep(120)
print(archi)
archi.wait_check() | random_line_split |
building.d.ts | declare namespace AMap {
namespace Buildings {
interface Options extends Layer.Options {
/**
* 可见级别范围
*/
zooms?: [number, number];
/**
* 不透明度
*/
opacity?: number;
/**
* 高度比例系数,可控制3D视图下的楼块高度
*/
heightFactor?: number;
/**
* 是否可见
*/
visible?: boolean;
/**
* 层级
*/
zIndex?: number;
// inner
merge?: boolean;
sort?: boolean;
}
interface AreaStyle {
color1: string;
path: LocationValue[];
color2?: string;
visible?: boolean;
rejectTexture?: boolean;
}
interface Style {
hideWithoutStyle?: boolean;
areas: AreaStyle[];
}
}
class Buildings extends Layer {
/**
* 楼块图层,单独展示矢量化的楼块图层
| @param opts 图层选项
*/
constructor(opts?: Buildings.Options);
/**
* 按区域设置楼块的颜色
* @param style 颜色设置
*/
setStyle(style: Buildings.Style): void;
}
}
| * | identifier_name |
building.d.ts | declare namespace AMap {
namespace Buildings {
interface Options extends Layer.Options {
/**
* 可见级别范围
*/
zooms?: [number, number];
/**
* 不透明度
*/
opacity?: number;
/** | * 是否可见
*/
visible?: boolean;
/**
* 层级
*/
zIndex?: number;
// inner
merge?: boolean;
sort?: boolean;
}
interface AreaStyle {
color1: string;
path: LocationValue[];
color2?: string;
visible?: boolean;
rejectTexture?: boolean;
}
interface Style {
hideWithoutStyle?: boolean;
areas: AreaStyle[];
}
}
class Buildings extends Layer {
/**
* 楼块图层,单独展示矢量化的楼块图层
* @param opts 图层选项
*/
constructor(opts?: Buildings.Options);
/**
* 按区域设置楼块的颜色
* @param style 颜色设置
*/
setStyle(style: Buildings.Style): void;
}
} | * 高度比例系数,可控制3D视图下的楼块高度
*/
heightFactor?: number;
/** | random_line_split |
word_cluster.py | from sklearn.cluster import MiniBatchKMeans
import numpy as np
import json
import os
from texta.settings import MODELS_DIR
class WordCluster(object):
"""
WordCluster object to cluster Word2Vec vectors using MiniBatchKMeans.
: param embedding : Word2Vec object
: param n_clusters, int, number of clusters in output
"""
def __init__(self):
self.word_to_cluster_dict = {}
self.cluster_dict = {}
def cluster(self, embedding, n_clusters=None):
vocab = list(embedding.wv.vocab.keys())
vocab_vectors = np.array([embedding[word] for word in vocab])
if not n_clusters:
# number of clusters = 10% of embedding vocabulary
# if larger than 1000, limit to 1000
n_clusters = int(len(vocab) * 0.1)
if n_clusters > 1000:
n_clusters = 1000
clustering = MiniBatchKMeans(n_clusters=n_clusters).fit(vocab_vectors)
cluster_labels = clustering.labels_
for i,cluster_label in enumerate(cluster_labels):
|
return True
def query(self, word):
try:
return self.cluster_dict[self.word_to_cluster_dict[word]]
except:
return []
def text_to_clusters(self, text):
text = [str(self.word_to_cluster_dict[word]) for word in text if word in self.word_to_cluster_dict]
return ' '.join(text)
def save(self, file_path):
try:
data = {"word_to_cluster_dict": self.word_to_cluster_dict, "cluster_dict": self.cluster_dict}
with open(file_path, 'w') as fh:
fh.write(json.dumps(data))
return True
except:
return False
def load(self, unique_id, task_type='train_tagger'):
file_path = os.path.join(MODELS_DIR, task_type, 'cluster_{}'.format(unique_id))
try:
with open(file_path) as fh:
data = json.loads(fh.read())
self.cluster_dict = data["cluster_dict"]
self.word_to_cluster_dict = data["word_to_cluster_dict"]
except:
return False
| word = vocab[i]
etalon = embedding.wv.most_similar(positive=[clustering.cluster_centers_[cluster_label]])[0][0]
if etalon not in self.cluster_dict:
self.cluster_dict[etalon] = []
self.cluster_dict[etalon].append(word)
self.word_to_cluster_dict[word] = etalon | conditional_block |
word_cluster.py | from sklearn.cluster import MiniBatchKMeans
import numpy as np
import json
import os
from texta.settings import MODELS_DIR
class WordCluster(object):
"""
WordCluster object to cluster Word2Vec vectors using MiniBatchKMeans.
: param embedding : Word2Vec object
: param n_clusters, int, number of clusters in output
"""
def __init__(self):
self.word_to_cluster_dict = {}
self.cluster_dict = {}
def cluster(self, embedding, n_clusters=None):
|
def query(self, word):
try:
return self.cluster_dict[self.word_to_cluster_dict[word]]
except:
return []
def text_to_clusters(self, text):
text = [str(self.word_to_cluster_dict[word]) for word in text if word in self.word_to_cluster_dict]
return ' '.join(text)
def save(self, file_path):
try:
data = {"word_to_cluster_dict": self.word_to_cluster_dict, "cluster_dict": self.cluster_dict}
with open(file_path, 'w') as fh:
fh.write(json.dumps(data))
return True
except:
return False
def load(self, unique_id, task_type='train_tagger'):
file_path = os.path.join(MODELS_DIR, task_type, 'cluster_{}'.format(unique_id))
try:
with open(file_path) as fh:
data = json.loads(fh.read())
self.cluster_dict = data["cluster_dict"]
self.word_to_cluster_dict = data["word_to_cluster_dict"]
except:
return False
| vocab = list(embedding.wv.vocab.keys())
vocab_vectors = np.array([embedding[word] for word in vocab])
if not n_clusters:
# number of clusters = 10% of embedding vocabulary
# if larger than 1000, limit to 1000
n_clusters = int(len(vocab) * 0.1)
if n_clusters > 1000:
n_clusters = 1000
clustering = MiniBatchKMeans(n_clusters=n_clusters).fit(vocab_vectors)
cluster_labels = clustering.labels_
for i,cluster_label in enumerate(cluster_labels):
word = vocab[i]
etalon = embedding.wv.most_similar(positive=[clustering.cluster_centers_[cluster_label]])[0][0]
if etalon not in self.cluster_dict:
self.cluster_dict[etalon] = []
self.cluster_dict[etalon].append(word)
self.word_to_cluster_dict[word] = etalon
return True | identifier_body |
word_cluster.py | from sklearn.cluster import MiniBatchKMeans
import numpy as np
import json
import os
from texta.settings import MODELS_DIR
| class WordCluster(object):
"""
WordCluster object to cluster Word2Vec vectors using MiniBatchKMeans.
: param embedding : Word2Vec object
: param n_clusters, int, number of clusters in output
"""
def __init__(self):
self.word_to_cluster_dict = {}
self.cluster_dict = {}
def cluster(self, embedding, n_clusters=None):
vocab = list(embedding.wv.vocab.keys())
vocab_vectors = np.array([embedding[word] for word in vocab])
if not n_clusters:
# number of clusters = 10% of embedding vocabulary
# if larger than 1000, limit to 1000
n_clusters = int(len(vocab) * 0.1)
if n_clusters > 1000:
n_clusters = 1000
clustering = MiniBatchKMeans(n_clusters=n_clusters).fit(vocab_vectors)
cluster_labels = clustering.labels_
for i,cluster_label in enumerate(cluster_labels):
word = vocab[i]
etalon = embedding.wv.most_similar(positive=[clustering.cluster_centers_[cluster_label]])[0][0]
if etalon not in self.cluster_dict:
self.cluster_dict[etalon] = []
self.cluster_dict[etalon].append(word)
self.word_to_cluster_dict[word] = etalon
return True
def query(self, word):
try:
return self.cluster_dict[self.word_to_cluster_dict[word]]
except:
return []
def text_to_clusters(self, text):
text = [str(self.word_to_cluster_dict[word]) for word in text if word in self.word_to_cluster_dict]
return ' '.join(text)
def save(self, file_path):
try:
data = {"word_to_cluster_dict": self.word_to_cluster_dict, "cluster_dict": self.cluster_dict}
with open(file_path, 'w') as fh:
fh.write(json.dumps(data))
return True
except:
return False
def load(self, unique_id, task_type='train_tagger'):
file_path = os.path.join(MODELS_DIR, task_type, 'cluster_{}'.format(unique_id))
try:
with open(file_path) as fh:
data = json.loads(fh.read())
self.cluster_dict = data["cluster_dict"]
self.word_to_cluster_dict = data["word_to_cluster_dict"]
except:
return False | random_line_split |
|
word_cluster.py | from sklearn.cluster import MiniBatchKMeans
import numpy as np
import json
import os
from texta.settings import MODELS_DIR
class WordCluster(object):
"""
WordCluster object to cluster Word2Vec vectors using MiniBatchKMeans.
: param embedding : Word2Vec object
: param n_clusters, int, number of clusters in output
"""
def __init__(self):
self.word_to_cluster_dict = {}
self.cluster_dict = {}
def cluster(self, embedding, n_clusters=None):
vocab = list(embedding.wv.vocab.keys())
vocab_vectors = np.array([embedding[word] for word in vocab])
if not n_clusters:
# number of clusters = 10% of embedding vocabulary
# if larger than 1000, limit to 1000
n_clusters = int(len(vocab) * 0.1)
if n_clusters > 1000:
n_clusters = 1000
clustering = MiniBatchKMeans(n_clusters=n_clusters).fit(vocab_vectors)
cluster_labels = clustering.labels_
for i,cluster_label in enumerate(cluster_labels):
word = vocab[i]
etalon = embedding.wv.most_similar(positive=[clustering.cluster_centers_[cluster_label]])[0][0]
if etalon not in self.cluster_dict:
self.cluster_dict[etalon] = []
self.cluster_dict[etalon].append(word)
self.word_to_cluster_dict[word] = etalon
return True
def query(self, word):
try:
return self.cluster_dict[self.word_to_cluster_dict[word]]
except:
return []
def | (self, text):
text = [str(self.word_to_cluster_dict[word]) for word in text if word in self.word_to_cluster_dict]
return ' '.join(text)
def save(self, file_path):
try:
data = {"word_to_cluster_dict": self.word_to_cluster_dict, "cluster_dict": self.cluster_dict}
with open(file_path, 'w') as fh:
fh.write(json.dumps(data))
return True
except:
return False
def load(self, unique_id, task_type='train_tagger'):
file_path = os.path.join(MODELS_DIR, task_type, 'cluster_{}'.format(unique_id))
try:
with open(file_path) as fh:
data = json.loads(fh.read())
self.cluster_dict = data["cluster_dict"]
self.word_to_cluster_dict = data["word_to_cluster_dict"]
except:
return False
| text_to_clusters | identifier_name |
auth.service.ts | import { Injectable } from '@angular/core';
import { Http, Headers } from '@angular/http';
import 'rxjs/add/operator/map';
import { tokenNotExpired } from 'angular2-jwt';
import { environment } from '../../environments/environment';
const apiUrl = environment.apiUrl;
@Injectable()
export class AuthService {
authToken: any;
user: any;
| (private http: Http) { }
registerUser (user) {
let headers = new Headers();
headers.append('Content-Type', 'application/json');
return this.http.post(apiUrl + 'users/register', user, {headers: headers})
.map(res => res.json());
}
authenticateUser (user) {
let headers = new Headers();
headers.append('Content-Type', 'application/json');
return this.http.post(apiUrl + 'users/authenticate', user, {headers: headers})
.map(res => res.json());
}
getProfile () {
let headers = new Headers();
this.loadToken();
headers.append('Authorization',this.authToken);
headers.append('Content-Type', 'application/json');
return this.http.get(apiUrl + 'users/profile', {headers: headers})
.map(res => res.json());
}
storeUserData (token, user) {
localStorage.setItem('id_token', token);
localStorage.setItem('user', JSON.stringify(user));
this.authToken = token;
this.user = user;
}
loadToken () {
const token = localStorage.getItem('id_token');
this.authToken = token;
}
loggedIn () {
// console.log(tokenNotExpired());
// return true;
return tokenNotExpired('id_token');
}
logout () {
this.authToken = null;
this.user = null;
localStorage.clear();
}
}
| constructor | identifier_name |
auth.service.ts | import { Injectable } from '@angular/core';
import { Http, Headers } from '@angular/http';
import 'rxjs/add/operator/map';
import { tokenNotExpired } from 'angular2-jwt';
import { environment } from '../../environments/environment';
const apiUrl = environment.apiUrl;
@Injectable()
export class AuthService {
authToken: any;
user: any;
constructor(private http: Http) { }
registerUser (user) {
let headers = new Headers();
headers.append('Content-Type', 'application/json');
return this.http.post(apiUrl + 'users/register', user, {headers: headers})
.map(res => res.json());
}
authenticateUser (user) {
let headers = new Headers();
headers.append('Content-Type', 'application/json');
return this.http.post(apiUrl + 'users/authenticate', user, {headers: headers})
.map(res => res.json());
}
getProfile () {
let headers = new Headers();
this.loadToken();
headers.append('Authorization',this.authToken);
headers.append('Content-Type', 'application/json');
return this.http.get(apiUrl + 'users/profile', {headers: headers})
.map(res => res.json());
}
storeUserData (token, user) {
localStorage.setItem('id_token', token);
localStorage.setItem('user', JSON.stringify(user));
this.authToken = token;
this.user = user;
}
loadToken () {
const token = localStorage.getItem('id_token');
this.authToken = token;
}
loggedIn () {
// console.log(tokenNotExpired());
// return true;
return tokenNotExpired('id_token');
} | this.user = null;
localStorage.clear();
}
} |
logout () {
this.authToken = null; | random_line_split |
feed.ts | import { Comment } from './comment'
export class | {
id:number;
body:string;
mlink:string;
is_system:string;
is_edited:boolean;
is_ack:boolean;
feed_type:string;
feed_status_type:string;
category:string;
full_details_mobile_url:string;
group_id:string;
group_name:string;
group_privacy:string;
group_sub_type:string;
has_guest:boolean;
like_count:number;
superlike_count:number;
haha_count:number;
yay_count:number;
wow_count:number;
sad_count:number;
comment_count:number;
attachment_count:number;
platform:string;
liked:boolean;
superliked:boolean;
haha:boolean;
yay:boolean;
wow:boolean;
sad:boolean;
watched:boolean;
unread:boolean;
comments: Array<Comment>;
mention_tags: Array<any>;
attachments: Array<any>;
attachment_references: Array<any>;
liked_list: Array<any>;
from_user: any;
to_user: any;
feed_property: any;
}
| Feed | identifier_name |
feed.ts | import { Comment } from './comment'
export class Feed {
id:number;
body:string;
mlink:string;
is_system:string;
is_edited:boolean;
is_ack:boolean;
feed_type:string;
feed_status_type:string;
category:string;
full_details_mobile_url:string;
group_id:string;
group_name:string;
group_privacy:string;
group_sub_type:string; | superlike_count:number;
haha_count:number;
yay_count:number;
wow_count:number;
sad_count:number;
comment_count:number;
attachment_count:number;
platform:string;
liked:boolean;
superliked:boolean;
haha:boolean;
yay:boolean;
wow:boolean;
sad:boolean;
watched:boolean;
unread:boolean;
comments: Array<Comment>;
mention_tags: Array<any>;
attachments: Array<any>;
attachment_references: Array<any>;
liked_list: Array<any>;
from_user: any;
to_user: any;
feed_property: any;
} | has_guest:boolean;
like_count:number; | random_line_split |
mod.rs | //! Sends an email using the client
use std::string::String;
use std::net::{SocketAddr, ToSocketAddrs};
use openssl::ssl::{SslContext, SslMethod};
use transport::error::{EmailResult, Error};
use transport::smtp::extension::{Extension, ServerInfo};
use transport::smtp::client::Client;
use transport::smtp::authentication::Mechanism;
use transport::EmailTransport;
use email::SendableEmail;
pub mod extension;
pub mod authentication;
pub mod response;
pub mod client;
// Registrated port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub static SMTP_PORT: u16 = 25;
/// Default submission port
pub static SUBMISSION_PORT: u16 = 587;
// Useful strings and characters
/// The word separator for SMTP transactions
pub static SP: &'static str = " ";
/// The line ending for SMTP transactions (carriage return + line feed)
pub static CRLF: &'static str = "\r\n";
/// Colon
pub static COLON: &'static str = ":";
/// The ending of message content
pub static MESSAGE_ENDING: &'static str = "\r\n.\r\n";
/// NUL unicode character
pub static NUL: &'static str = "\0";
/// TLS security level
#[derive(Debug)]
pub enum SecurityLevel {
/// Use a TLS wrapped connection
///
/// Non RFC-compliant, should only be used if the server does not support STARTTLS.
EncryptedWrapper,
/// Only send an email on encrypted connection (with STARTTLS)
///
/// Recommended mode, prevents MITM when used with verified certificates.
AlwaysEncrypt,
/// Use TLS when available (with STARTTLS)
///
/// Default mode.
Opportunistic,
/// Never use TLS
NeverEncrypt,
}
/// Contains client configuration
pub struct SmtpTransportBuilder {
/// Maximum connection reuse
///
/// Zero means no limitation
connection_reuse_count_limit: u16,
/// Enable connection reuse
connection_reuse: bool,
/// Name sent during HELO or EHLO
hello_name: String,
/// Credentials
credentials: Option<(String, String)>,
/// Socket we are connecting to
server_addr: SocketAddr,
/// SSL contexyt to use
ssl_context: SslContext,
/// TLS security level
security_level: SecurityLevel,
/// Enable UTF8 mailboxes in enveloppe or headers
smtp_utf8: bool,
/// List of authentication mechanism, sorted by priority
authentication_mechanisms: Vec<Mechanism>,
}
/// Builder for the SMTP SmtpTransport
impl SmtpTransportBuilder {
/// Creates a new local SMTP client
pub fn new<A: ToSocketAddrs>(addr: A) -> Result<SmtpTransportBuilder, Error> {
let mut addresses = try!(addr.to_socket_addrs());
match addresses.next() {
Some(addr) => Ok(SmtpTransportBuilder {
server_addr: addr,
ssl_context: SslContext::new(SslMethod::Tlsv1).unwrap(),
security_level: SecurityLevel::Opportunistic,
smtp_utf8: false,
credentials: None,
connection_reuse_count_limit: 100,
connection_reuse: false,
hello_name: "localhost".to_string(),
authentication_mechanisms: vec![Mechanism::CramMd5, Mechanism::Plain],
}),
None => Err(From::from("Could nor resolve hostname")),
}
}
/// Creates a new local SMTP client to port 25
pub fn localhost() -> Result<SmtpTransportBuilder, Error> {
SmtpTransportBuilder::new(("localhost", SMTP_PORT))
}
/// Use STARTTLS with a specific context
pub fn ssl_context(mut self, ssl_context: SslContext) -> SmtpTransportBuilder {
self.ssl_context = ssl_context;
self
}
/// Set the security level for SSL/TLS
pub fn security_level(mut self, level: SecurityLevel) -> SmtpTransportBuilder {
self.security_level = level;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `ssl_wrapper()``
pub fn encrypt(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::AlwaysEncrypt;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `encrypt()`
pub fn ssl_wrapper(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::EncryptedWrapper;
self
}
/// Enable SMTPUTF8 if the server supports it
pub fn smtp_utf8(mut self, enabled: bool) -> SmtpTransportBuilder {
self.smtp_utf8 = enabled;
self
}
/// Set the name used during HELO or EHLO
pub fn hello_name(mut self, name: &str) -> SmtpTransportBuilder {
self.hello_name = name.to_string();
self
}
/// Enable connection reuse
pub fn | (mut self, enable: bool) -> SmtpTransportBuilder {
self.connection_reuse = enable;
self
}
/// Set the maximum number of emails sent using one connection
pub fn connection_reuse_count_limit(mut self, limit: u16) -> SmtpTransportBuilder {
self.connection_reuse_count_limit = limit;
self
}
/// Set the client credentials
pub fn credentials(mut self, username: &str, password: &str) -> SmtpTransportBuilder {
self.credentials = Some((username.to_string(), password.to_string()));
self
}
/// Set the authentication mechanisms
pub fn authentication_mechanisms(mut self, mechanisms: Vec<Mechanism>) -> SmtpTransportBuilder {
self.authentication_mechanisms = mechanisms;
self
}
/// Build the SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn build(self) -> SmtpTransport {
SmtpTransport::new(self)
}
}
/// Represents the state of a client
#[derive(Debug)]
struct State {
/// Panic state
pub panic: bool,
/// Connection reuse counter
pub connection_reuse_count: u16,
}
/// Structure that implements the high level SMTP client
pub struct SmtpTransport {
/// Information about the server
/// Value is None before HELO/EHLO
server_info: Option<ServerInfo>,
/// SmtpTransport variable states
state: State,
/// Information about the client
client_info: SmtpTransportBuilder,
/// Low level client
client: Client,
}
macro_rules! try_smtp (
($err: expr, $client: ident) => ({
match $err {
Ok(val) => val,
Err(err) => {
if !$client.state.panic {
$client.state.panic = true;
$client.reset();
}
return Err(From::from(err))
},
}
})
);
impl SmtpTransport {
/// Creates a new SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn new(builder: SmtpTransportBuilder) -> SmtpTransport {
let client = Client::new();
SmtpTransport {
client: client,
server_info: None,
client_info: builder,
state: State {
panic: false,
connection_reuse_count: 0,
},
}
}
/// Reset the client state
fn reset(&mut self) {
// Close the SMTP transaction if needed
self.close();
// Reset the client state
self.server_info = None;
self.state.panic = false;
self.state.connection_reuse_count = 0;
}
/// Gets the EHLO response and updates server information
pub fn get_ehlo(&mut self) -> EmailResult {
// Extended Hello
let ehlo_response = try_smtp!(self.client.ehlo(&self.client_info.hello_name), self);
self.server_info = Some(try_smtp!(ServerInfo::from_response(&ehlo_response), self));
// Print server information
debug!("server {}", self.server_info.as_ref().unwrap());
Ok(ehlo_response)
}
}
impl EmailTransport for SmtpTransport {
/// Sends an email
fn send<T: SendableEmail>(&mut self, email: T) -> EmailResult {
// Extract email information
let message_id = email.message_id();
let from_address = email.from_address();
let to_addresses = email.to_addresses();
let message = email.message();
// Check if the connection is still available
if self.state.connection_reuse_count > 0 {
if !self.client.is_connected() {
self.reset();
}
}
if self.state.connection_reuse_count == 0 {
try!(self.client.connect(&self.client_info.server_addr,
match &self.client_info.security_level {
&SecurityLevel::EncryptedWrapper =>
Some(&self.client_info.ssl_context),
_ => None,
}));
// Log the connection
info!("connection established to {}", self.client_info.server_addr);
try!(self.get_ehlo());
match (&self.client_info.security_level,
self.server_info.as_ref().unwrap().supports_feature(&Extension::StartTls)) {
(&SecurityLevel::AlwaysEncrypt, false) =>
return Err(From::from("Could not encrypt connection, aborting")),
(&SecurityLevel::Opportunistic, false) => (),
(&SecurityLevel::NeverEncrypt, _) => (),
(&SecurityLevel::EncryptedWrapper, _) => (),
(_, true) => {
try_smtp!(self.client.starttls(), self);
try_smtp!(self.client.upgrade_tls_stream(&self.client_info.ssl_context),
self);
debug!("connection encrypted");
// Send EHLO again
try!(self.get_ehlo());
}
}
if self.client_info.credentials.is_some() {
let (username, password) = self.client_info.credentials.clone().unwrap();
let mut found = false;
for mechanism in self.client_info.authentication_mechanisms.clone() {
if self.server_info.as_ref().unwrap().supports_auth_mechanism(mechanism) {
found = true;
try_smtp!(self.client.auth(mechanism, &username, &password), self);
break;
}
}
if !found {
info!("No supported authentication mechanisms available");
}
}
}
// Mail
let mail_options = match (self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::EightBitMime),
self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::SmtpUtfEight)) {
(true, true) => Some("BODY=8BITMIME SMTPUTF8"),
(true, false) => Some("BODY=8BITMIME"),
(false, _) => None,
};
try_smtp!(self.client.mail(&from_address, mail_options), self);
// Log the mail command
info!("{}: from=<{}>", message_id, from_address);
// Recipient
for to_address in to_addresses.iter() {
try_smtp!(self.client.rcpt(&to_address), self);
// Log the rcpt command
info!("{}: to=<{}>", message_id, to_address);
}
// Data
try_smtp!(self.client.data(), self);
// Message content
let result = self.client.message(&message);
if result.is_ok() {
// Increment the connection reuse counter
self.state.connection_reuse_count = self.state.connection_reuse_count + 1;
// Log the message
info!("{}: conn_use={}, size={}, status=sent ({})",
message_id,
self.state.connection_reuse_count,
message.len(),
result.as_ref()
.ok()
.unwrap()
.message()
.iter()
.next()
.unwrap_or(&"no response".to_string()));
}
// Test if we can reuse the existing connection
if (!self.client_info.connection_reuse) ||
(self.state.connection_reuse_count >= self.client_info.connection_reuse_count_limit) {
self.reset();
}
result
}
/// Closes the inner connection
fn close(&mut self) {
self.client.close();
}
}
| connection_reuse | identifier_name |
mod.rs | //! Sends an email using the client
use std::string::String;
use std::net::{SocketAddr, ToSocketAddrs};
use openssl::ssl::{SslContext, SslMethod};
use transport::error::{EmailResult, Error};
use transport::smtp::extension::{Extension, ServerInfo};
use transport::smtp::client::Client;
use transport::smtp::authentication::Mechanism;
use transport::EmailTransport;
use email::SendableEmail;
pub mod extension;
pub mod authentication;
pub mod response;
pub mod client;
// Registrated port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub static SMTP_PORT: u16 = 25;
/// Default submission port
pub static SUBMISSION_PORT: u16 = 587;
// Useful strings and characters
/// The word separator for SMTP transactions
pub static SP: &'static str = " ";
/// The line ending for SMTP transactions (carriage return + line feed)
pub static CRLF: &'static str = "\r\n";
/// Colon
pub static COLON: &'static str = ":";
/// The ending of message content
pub static MESSAGE_ENDING: &'static str = "\r\n.\r\n";
/// NUL unicode character
pub static NUL: &'static str = "\0";
/// TLS security level
#[derive(Debug)]
pub enum SecurityLevel {
/// Use a TLS wrapped connection
///
/// Non RFC-compliant, should only be used if the server does not support STARTTLS.
EncryptedWrapper,
/// Only send an email on encrypted connection (with STARTTLS)
///
/// Recommended mode, prevents MITM when used with verified certificates.
AlwaysEncrypt,
/// Use TLS when available (with STARTTLS)
///
/// Default mode.
Opportunistic,
/// Never use TLS
NeverEncrypt,
}
/// Contains client configuration
pub struct SmtpTransportBuilder {
/// Maximum connection reuse
///
/// Zero means no limitation
connection_reuse_count_limit: u16,
/// Enable connection reuse
connection_reuse: bool,
/// Name sent during HELO or EHLO
hello_name: String,
/// Credentials
credentials: Option<(String, String)>,
/// Socket we are connecting to
server_addr: SocketAddr,
/// SSL contexyt to use
ssl_context: SslContext,
/// TLS security level
security_level: SecurityLevel,
/// Enable UTF8 mailboxes in enveloppe or headers
smtp_utf8: bool,
/// List of authentication mechanism, sorted by priority
authentication_mechanisms: Vec<Mechanism>,
}
/// Builder for the SMTP SmtpTransport
impl SmtpTransportBuilder {
/// Creates a new local SMTP client
pub fn new<A: ToSocketAddrs>(addr: A) -> Result<SmtpTransportBuilder, Error> {
let mut addresses = try!(addr.to_socket_addrs());
match addresses.next() {
Some(addr) => Ok(SmtpTransportBuilder {
server_addr: addr,
ssl_context: SslContext::new(SslMethod::Tlsv1).unwrap(),
security_level: SecurityLevel::Opportunistic,
smtp_utf8: false,
credentials: None,
connection_reuse_count_limit: 100,
connection_reuse: false,
hello_name: "localhost".to_string(),
authentication_mechanisms: vec![Mechanism::CramMd5, Mechanism::Plain],
}),
None => Err(From::from("Could nor resolve hostname")),
}
}
/// Creates a new local SMTP client to port 25
pub fn localhost() -> Result<SmtpTransportBuilder, Error> {
SmtpTransportBuilder::new(("localhost", SMTP_PORT))
}
/// Use STARTTLS with a specific context
pub fn ssl_context(mut self, ssl_context: SslContext) -> SmtpTransportBuilder {
self.ssl_context = ssl_context;
self
}
/// Set the security level for SSL/TLS
pub fn security_level(mut self, level: SecurityLevel) -> SmtpTransportBuilder |
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `ssl_wrapper()``
pub fn encrypt(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::AlwaysEncrypt;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `encrypt()`
pub fn ssl_wrapper(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::EncryptedWrapper;
self
}
/// Enable SMTPUTF8 if the server supports it
pub fn smtp_utf8(mut self, enabled: bool) -> SmtpTransportBuilder {
self.smtp_utf8 = enabled;
self
}
/// Set the name used during HELO or EHLO
pub fn hello_name(mut self, name: &str) -> SmtpTransportBuilder {
self.hello_name = name.to_string();
self
}
/// Enable connection reuse
pub fn connection_reuse(mut self, enable: bool) -> SmtpTransportBuilder {
self.connection_reuse = enable;
self
}
/// Set the maximum number of emails sent using one connection
pub fn connection_reuse_count_limit(mut self, limit: u16) -> SmtpTransportBuilder {
self.connection_reuse_count_limit = limit;
self
}
/// Set the client credentials
pub fn credentials(mut self, username: &str, password: &str) -> SmtpTransportBuilder {
self.credentials = Some((username.to_string(), password.to_string()));
self
}
/// Set the authentication mechanisms
pub fn authentication_mechanisms(mut self, mechanisms: Vec<Mechanism>) -> SmtpTransportBuilder {
self.authentication_mechanisms = mechanisms;
self
}
/// Build the SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn build(self) -> SmtpTransport {
SmtpTransport::new(self)
}
}
/// Represents the state of a client
#[derive(Debug)]
struct State {
/// Panic state
pub panic: bool,
/// Connection reuse counter
pub connection_reuse_count: u16,
}
/// Structure that implements the high level SMTP client
pub struct SmtpTransport {
/// Information about the server
/// Value is None before HELO/EHLO
server_info: Option<ServerInfo>,
/// SmtpTransport variable states
state: State,
/// Information about the client
client_info: SmtpTransportBuilder,
/// Low level client
client: Client,
}
macro_rules! try_smtp (
($err: expr, $client: ident) => ({
match $err {
Ok(val) => val,
Err(err) => {
if !$client.state.panic {
$client.state.panic = true;
$client.reset();
}
return Err(From::from(err))
},
}
})
);
impl SmtpTransport {
/// Creates a new SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn new(builder: SmtpTransportBuilder) -> SmtpTransport {
let client = Client::new();
SmtpTransport {
client: client,
server_info: None,
client_info: builder,
state: State {
panic: false,
connection_reuse_count: 0,
},
}
}
/// Reset the client state
fn reset(&mut self) {
// Close the SMTP transaction if needed
self.close();
// Reset the client state
self.server_info = None;
self.state.panic = false;
self.state.connection_reuse_count = 0;
}
/// Gets the EHLO response and updates server information
pub fn get_ehlo(&mut self) -> EmailResult {
// Extended Hello
let ehlo_response = try_smtp!(self.client.ehlo(&self.client_info.hello_name), self);
self.server_info = Some(try_smtp!(ServerInfo::from_response(&ehlo_response), self));
// Print server information
debug!("server {}", self.server_info.as_ref().unwrap());
Ok(ehlo_response)
}
}
impl EmailTransport for SmtpTransport {
/// Sends an email
fn send<T: SendableEmail>(&mut self, email: T) -> EmailResult {
// Extract email information
let message_id = email.message_id();
let from_address = email.from_address();
let to_addresses = email.to_addresses();
let message = email.message();
// Check if the connection is still available
if self.state.connection_reuse_count > 0 {
if !self.client.is_connected() {
self.reset();
}
}
if self.state.connection_reuse_count == 0 {
try!(self.client.connect(&self.client_info.server_addr,
match &self.client_info.security_level {
&SecurityLevel::EncryptedWrapper =>
Some(&self.client_info.ssl_context),
_ => None,
}));
// Log the connection
info!("connection established to {}", self.client_info.server_addr);
try!(self.get_ehlo());
match (&self.client_info.security_level,
self.server_info.as_ref().unwrap().supports_feature(&Extension::StartTls)) {
(&SecurityLevel::AlwaysEncrypt, false) =>
return Err(From::from("Could not encrypt connection, aborting")),
(&SecurityLevel::Opportunistic, false) => (),
(&SecurityLevel::NeverEncrypt, _) => (),
(&SecurityLevel::EncryptedWrapper, _) => (),
(_, true) => {
try_smtp!(self.client.starttls(), self);
try_smtp!(self.client.upgrade_tls_stream(&self.client_info.ssl_context),
self);
debug!("connection encrypted");
// Send EHLO again
try!(self.get_ehlo());
}
}
if self.client_info.credentials.is_some() {
let (username, password) = self.client_info.credentials.clone().unwrap();
let mut found = false;
for mechanism in self.client_info.authentication_mechanisms.clone() {
if self.server_info.as_ref().unwrap().supports_auth_mechanism(mechanism) {
found = true;
try_smtp!(self.client.auth(mechanism, &username, &password), self);
break;
}
}
if !found {
info!("No supported authentication mechanisms available");
}
}
}
// Mail
let mail_options = match (self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::EightBitMime),
self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::SmtpUtfEight)) {
(true, true) => Some("BODY=8BITMIME SMTPUTF8"),
(true, false) => Some("BODY=8BITMIME"),
(false, _) => None,
};
try_smtp!(self.client.mail(&from_address, mail_options), self);
// Log the mail command
info!("{}: from=<{}>", message_id, from_address);
// Recipient
for to_address in to_addresses.iter() {
try_smtp!(self.client.rcpt(&to_address), self);
// Log the rcpt command
info!("{}: to=<{}>", message_id, to_address);
}
// Data
try_smtp!(self.client.data(), self);
// Message content
let result = self.client.message(&message);
if result.is_ok() {
// Increment the connection reuse counter
self.state.connection_reuse_count = self.state.connection_reuse_count + 1;
// Log the message
info!("{}: conn_use={}, size={}, status=sent ({})",
message_id,
self.state.connection_reuse_count,
message.len(),
result.as_ref()
.ok()
.unwrap()
.message()
.iter()
.next()
.unwrap_or(&"no response".to_string()));
}
// Test if we can reuse the existing connection
if (!self.client_info.connection_reuse) ||
(self.state.connection_reuse_count >= self.client_info.connection_reuse_count_limit) {
self.reset();
}
result
}
/// Closes the inner connection
fn close(&mut self) {
self.client.close();
}
}
| {
self.security_level = level;
self
} | identifier_body |
mod.rs | //! Sends an email using the client
use std::string::String;
use std::net::{SocketAddr, ToSocketAddrs};
use openssl::ssl::{SslContext, SslMethod};
use transport::error::{EmailResult, Error};
use transport::smtp::extension::{Extension, ServerInfo};
use transport::smtp::client::Client;
use transport::smtp::authentication::Mechanism;
use transport::EmailTransport;
use email::SendableEmail;
pub mod extension;
pub mod authentication;
pub mod response; | pub mod client;
// Registrated port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub static SMTP_PORT: u16 = 25;
/// Default submission port
pub static SUBMISSION_PORT: u16 = 587;
// Useful strings and characters
/// The word separator for SMTP transactions
pub static SP: &'static str = " ";
/// The line ending for SMTP transactions (carriage return + line feed)
pub static CRLF: &'static str = "\r\n";
/// Colon
pub static COLON: &'static str = ":";
/// The ending of message content
pub static MESSAGE_ENDING: &'static str = "\r\n.\r\n";
/// NUL unicode character
pub static NUL: &'static str = "\0";
/// TLS security level
#[derive(Debug)]
pub enum SecurityLevel {
/// Use a TLS wrapped connection
///
/// Non RFC-compliant, should only be used if the server does not support STARTTLS.
EncryptedWrapper,
/// Only send an email on encrypted connection (with STARTTLS)
///
/// Recommended mode, prevents MITM when used with verified certificates.
AlwaysEncrypt,
/// Use TLS when available (with STARTTLS)
///
/// Default mode.
Opportunistic,
/// Never use TLS
NeverEncrypt,
}
/// Contains client configuration
pub struct SmtpTransportBuilder {
/// Maximum connection reuse
///
/// Zero means no limitation
connection_reuse_count_limit: u16,
/// Enable connection reuse
connection_reuse: bool,
/// Name sent during HELO or EHLO
hello_name: String,
/// Credentials
credentials: Option<(String, String)>,
/// Socket we are connecting to
server_addr: SocketAddr,
/// SSL contexyt to use
ssl_context: SslContext,
/// TLS security level
security_level: SecurityLevel,
/// Enable UTF8 mailboxes in enveloppe or headers
smtp_utf8: bool,
/// List of authentication mechanism, sorted by priority
authentication_mechanisms: Vec<Mechanism>,
}
/// Builder for the SMTP SmtpTransport
impl SmtpTransportBuilder {
/// Creates a new local SMTP client
pub fn new<A: ToSocketAddrs>(addr: A) -> Result<SmtpTransportBuilder, Error> {
let mut addresses = try!(addr.to_socket_addrs());
match addresses.next() {
Some(addr) => Ok(SmtpTransportBuilder {
server_addr: addr,
ssl_context: SslContext::new(SslMethod::Tlsv1).unwrap(),
security_level: SecurityLevel::Opportunistic,
smtp_utf8: false,
credentials: None,
connection_reuse_count_limit: 100,
connection_reuse: false,
hello_name: "localhost".to_string(),
authentication_mechanisms: vec![Mechanism::CramMd5, Mechanism::Plain],
}),
None => Err(From::from("Could nor resolve hostname")),
}
}
/// Creates a new local SMTP client to port 25
pub fn localhost() -> Result<SmtpTransportBuilder, Error> {
SmtpTransportBuilder::new(("localhost", SMTP_PORT))
}
/// Use STARTTLS with a specific context
pub fn ssl_context(mut self, ssl_context: SslContext) -> SmtpTransportBuilder {
self.ssl_context = ssl_context;
self
}
/// Set the security level for SSL/TLS
pub fn security_level(mut self, level: SecurityLevel) -> SmtpTransportBuilder {
self.security_level = level;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `ssl_wrapper()``
pub fn encrypt(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::AlwaysEncrypt;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `encrypt()`
pub fn ssl_wrapper(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::EncryptedWrapper;
self
}
/// Enable SMTPUTF8 if the server supports it
pub fn smtp_utf8(mut self, enabled: bool) -> SmtpTransportBuilder {
self.smtp_utf8 = enabled;
self
}
/// Set the name used during HELO or EHLO
pub fn hello_name(mut self, name: &str) -> SmtpTransportBuilder {
self.hello_name = name.to_string();
self
}
/// Enable connection reuse
pub fn connection_reuse(mut self, enable: bool) -> SmtpTransportBuilder {
self.connection_reuse = enable;
self
}
/// Set the maximum number of emails sent using one connection
pub fn connection_reuse_count_limit(mut self, limit: u16) -> SmtpTransportBuilder {
self.connection_reuse_count_limit = limit;
self
}
/// Set the client credentials
pub fn credentials(mut self, username: &str, password: &str) -> SmtpTransportBuilder {
self.credentials = Some((username.to_string(), password.to_string()));
self
}
/// Set the authentication mechanisms
pub fn authentication_mechanisms(mut self, mechanisms: Vec<Mechanism>) -> SmtpTransportBuilder {
self.authentication_mechanisms = mechanisms;
self
}
/// Build the SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn build(self) -> SmtpTransport {
SmtpTransport::new(self)
}
}
/// Represents the state of a client
#[derive(Debug)]
struct State {
/// Panic state
pub panic: bool,
/// Connection reuse counter
pub connection_reuse_count: u16,
}
/// Structure that implements the high level SMTP client
pub struct SmtpTransport {
/// Information about the server
/// Value is None before HELO/EHLO
server_info: Option<ServerInfo>,
/// SmtpTransport variable states
state: State,
/// Information about the client
client_info: SmtpTransportBuilder,
/// Low level client
client: Client,
}
macro_rules! try_smtp (
($err: expr, $client: ident) => ({
match $err {
Ok(val) => val,
Err(err) => {
if !$client.state.panic {
$client.state.panic = true;
$client.reset();
}
return Err(From::from(err))
},
}
})
);
impl SmtpTransport {
/// Creates a new SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn new(builder: SmtpTransportBuilder) -> SmtpTransport {
let client = Client::new();
SmtpTransport {
client: client,
server_info: None,
client_info: builder,
state: State {
panic: false,
connection_reuse_count: 0,
},
}
}
/// Reset the client state
fn reset(&mut self) {
// Close the SMTP transaction if needed
self.close();
// Reset the client state
self.server_info = None;
self.state.panic = false;
self.state.connection_reuse_count = 0;
}
/// Gets the EHLO response and updates server information
pub fn get_ehlo(&mut self) -> EmailResult {
// Extended Hello
let ehlo_response = try_smtp!(self.client.ehlo(&self.client_info.hello_name), self);
self.server_info = Some(try_smtp!(ServerInfo::from_response(&ehlo_response), self));
// Print server information
debug!("server {}", self.server_info.as_ref().unwrap());
Ok(ehlo_response)
}
}
impl EmailTransport for SmtpTransport {
/// Sends an email
fn send<T: SendableEmail>(&mut self, email: T) -> EmailResult {
// Extract email information
let message_id = email.message_id();
let from_address = email.from_address();
let to_addresses = email.to_addresses();
let message = email.message();
// Check if the connection is still available
if self.state.connection_reuse_count > 0 {
if !self.client.is_connected() {
self.reset();
}
}
if self.state.connection_reuse_count == 0 {
try!(self.client.connect(&self.client_info.server_addr,
match &self.client_info.security_level {
&SecurityLevel::EncryptedWrapper =>
Some(&self.client_info.ssl_context),
_ => None,
}));
// Log the connection
info!("connection established to {}", self.client_info.server_addr);
try!(self.get_ehlo());
match (&self.client_info.security_level,
self.server_info.as_ref().unwrap().supports_feature(&Extension::StartTls)) {
(&SecurityLevel::AlwaysEncrypt, false) =>
return Err(From::from("Could not encrypt connection, aborting")),
(&SecurityLevel::Opportunistic, false) => (),
(&SecurityLevel::NeverEncrypt, _) => (),
(&SecurityLevel::EncryptedWrapper, _) => (),
(_, true) => {
try_smtp!(self.client.starttls(), self);
try_smtp!(self.client.upgrade_tls_stream(&self.client_info.ssl_context),
self);
debug!("connection encrypted");
// Send EHLO again
try!(self.get_ehlo());
}
}
if self.client_info.credentials.is_some() {
let (username, password) = self.client_info.credentials.clone().unwrap();
let mut found = false;
for mechanism in self.client_info.authentication_mechanisms.clone() {
if self.server_info.as_ref().unwrap().supports_auth_mechanism(mechanism) {
found = true;
try_smtp!(self.client.auth(mechanism, &username, &password), self);
break;
}
}
if !found {
info!("No supported authentication mechanisms available");
}
}
}
// Mail
let mail_options = match (self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::EightBitMime),
self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::SmtpUtfEight)) {
(true, true) => Some("BODY=8BITMIME SMTPUTF8"),
(true, false) => Some("BODY=8BITMIME"),
(false, _) => None,
};
try_smtp!(self.client.mail(&from_address, mail_options), self);
// Log the mail command
info!("{}: from=<{}>", message_id, from_address);
// Recipient
for to_address in to_addresses.iter() {
try_smtp!(self.client.rcpt(&to_address), self);
// Log the rcpt command
info!("{}: to=<{}>", message_id, to_address);
}
// Data
try_smtp!(self.client.data(), self);
// Message content
let result = self.client.message(&message);
if result.is_ok() {
// Increment the connection reuse counter
self.state.connection_reuse_count = self.state.connection_reuse_count + 1;
// Log the message
info!("{}: conn_use={}, size={}, status=sent ({})",
message_id,
self.state.connection_reuse_count,
message.len(),
result.as_ref()
.ok()
.unwrap()
.message()
.iter()
.next()
.unwrap_or(&"no response".to_string()));
}
// Test if we can reuse the existing connection
if (!self.client_info.connection_reuse) ||
(self.state.connection_reuse_count >= self.client_info.connection_reuse_count_limit) {
self.reset();
}
result
}
/// Closes the inner connection
fn close(&mut self) {
self.client.close();
}
} | random_line_split |
|
mod.rs | //! Sends an email using the client
use std::string::String;
use std::net::{SocketAddr, ToSocketAddrs};
use openssl::ssl::{SslContext, SslMethod};
use transport::error::{EmailResult, Error};
use transport::smtp::extension::{Extension, ServerInfo};
use transport::smtp::client::Client;
use transport::smtp::authentication::Mechanism;
use transport::EmailTransport;
use email::SendableEmail;
pub mod extension;
pub mod authentication;
pub mod response;
pub mod client;
// Registrated port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub static SMTP_PORT: u16 = 25;
/// Default submission port
pub static SUBMISSION_PORT: u16 = 587;
// Useful strings and characters
/// The word separator for SMTP transactions
pub static SP: &'static str = " ";
/// The line ending for SMTP transactions (carriage return + line feed)
pub static CRLF: &'static str = "\r\n";
/// Colon
pub static COLON: &'static str = ":";
/// The ending of message content
pub static MESSAGE_ENDING: &'static str = "\r\n.\r\n";
/// NUL unicode character
pub static NUL: &'static str = "\0";
/// TLS security level
#[derive(Debug)]
pub enum SecurityLevel {
/// Use a TLS wrapped connection
///
/// Non RFC-compliant, should only be used if the server does not support STARTTLS.
EncryptedWrapper,
/// Only send an email on encrypted connection (with STARTTLS)
///
/// Recommended mode, prevents MITM when used with verified certificates.
AlwaysEncrypt,
/// Use TLS when available (with STARTTLS)
///
/// Default mode.
Opportunistic,
/// Never use TLS
NeverEncrypt,
}
/// Contains client configuration
pub struct SmtpTransportBuilder {
/// Maximum connection reuse
///
/// Zero means no limitation
connection_reuse_count_limit: u16,
/// Enable connection reuse
connection_reuse: bool,
/// Name sent during HELO or EHLO
hello_name: String,
/// Credentials
credentials: Option<(String, String)>,
/// Socket we are connecting to
server_addr: SocketAddr,
/// SSL contexyt to use
ssl_context: SslContext,
/// TLS security level
security_level: SecurityLevel,
/// Enable UTF8 mailboxes in enveloppe or headers
smtp_utf8: bool,
/// List of authentication mechanism, sorted by priority
authentication_mechanisms: Vec<Mechanism>,
}
/// Builder for the SMTP SmtpTransport
impl SmtpTransportBuilder {
/// Creates a new local SMTP client
pub fn new<A: ToSocketAddrs>(addr: A) -> Result<SmtpTransportBuilder, Error> {
let mut addresses = try!(addr.to_socket_addrs());
match addresses.next() {
Some(addr) => Ok(SmtpTransportBuilder {
server_addr: addr,
ssl_context: SslContext::new(SslMethod::Tlsv1).unwrap(),
security_level: SecurityLevel::Opportunistic,
smtp_utf8: false,
credentials: None,
connection_reuse_count_limit: 100,
connection_reuse: false,
hello_name: "localhost".to_string(),
authentication_mechanisms: vec![Mechanism::CramMd5, Mechanism::Plain],
}),
None => Err(From::from("Could nor resolve hostname")),
}
}
/// Creates a new local SMTP client to port 25
pub fn localhost() -> Result<SmtpTransportBuilder, Error> {
SmtpTransportBuilder::new(("localhost", SMTP_PORT))
}
/// Use STARTTLS with a specific context
pub fn ssl_context(mut self, ssl_context: SslContext) -> SmtpTransportBuilder {
self.ssl_context = ssl_context;
self
}
/// Set the security level for SSL/TLS
pub fn security_level(mut self, level: SecurityLevel) -> SmtpTransportBuilder {
self.security_level = level;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `ssl_wrapper()``
pub fn encrypt(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::AlwaysEncrypt;
self
}
/// Require SSL/TLS using STARTTLS
///
/// Incompatible with `encrypt()`
pub fn ssl_wrapper(mut self) -> SmtpTransportBuilder {
self.security_level = SecurityLevel::EncryptedWrapper;
self
}
/// Enable SMTPUTF8 if the server supports it
pub fn smtp_utf8(mut self, enabled: bool) -> SmtpTransportBuilder {
self.smtp_utf8 = enabled;
self
}
/// Set the name used during HELO or EHLO
pub fn hello_name(mut self, name: &str) -> SmtpTransportBuilder {
self.hello_name = name.to_string();
self
}
/// Enable connection reuse
pub fn connection_reuse(mut self, enable: bool) -> SmtpTransportBuilder {
self.connection_reuse = enable;
self
}
/// Set the maximum number of emails sent using one connection
pub fn connection_reuse_count_limit(mut self, limit: u16) -> SmtpTransportBuilder {
self.connection_reuse_count_limit = limit;
self
}
/// Set the client credentials
pub fn credentials(mut self, username: &str, password: &str) -> SmtpTransportBuilder {
self.credentials = Some((username.to_string(), password.to_string()));
self
}
/// Set the authentication mechanisms
pub fn authentication_mechanisms(mut self, mechanisms: Vec<Mechanism>) -> SmtpTransportBuilder {
self.authentication_mechanisms = mechanisms;
self
}
/// Build the SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn build(self) -> SmtpTransport {
SmtpTransport::new(self)
}
}
/// Represents the state of a client
#[derive(Debug)]
struct State {
/// Panic state
pub panic: bool,
/// Connection reuse counter
pub connection_reuse_count: u16,
}
/// Structure that implements the high level SMTP client
pub struct SmtpTransport {
/// Information about the server
/// Value is None before HELO/EHLO
server_info: Option<ServerInfo>,
/// SmtpTransport variable states
state: State,
/// Information about the client
client_info: SmtpTransportBuilder,
/// Low level client
client: Client,
}
macro_rules! try_smtp (
($err: expr, $client: ident) => ({
match $err {
Ok(val) => val,
Err(err) => {
if !$client.state.panic {
$client.state.panic = true;
$client.reset();
}
return Err(From::from(err))
},
}
})
);
impl SmtpTransport {
/// Creates a new SMTP client
///
/// It does not connects to the server, but only creates the `SmtpTransport`
pub fn new(builder: SmtpTransportBuilder) -> SmtpTransport {
let client = Client::new();
SmtpTransport {
client: client,
server_info: None,
client_info: builder,
state: State {
panic: false,
connection_reuse_count: 0,
},
}
}
/// Reset the client state
fn reset(&mut self) {
// Close the SMTP transaction if needed
self.close();
// Reset the client state
self.server_info = None;
self.state.panic = false;
self.state.connection_reuse_count = 0;
}
/// Gets the EHLO response and updates server information
pub fn get_ehlo(&mut self) -> EmailResult {
// Extended Hello
let ehlo_response = try_smtp!(self.client.ehlo(&self.client_info.hello_name), self);
self.server_info = Some(try_smtp!(ServerInfo::from_response(&ehlo_response), self));
// Print server information
debug!("server {}", self.server_info.as_ref().unwrap());
Ok(ehlo_response)
}
}
impl EmailTransport for SmtpTransport {
/// Sends an email
fn send<T: SendableEmail>(&mut self, email: T) -> EmailResult {
// Extract email information
let message_id = email.message_id();
let from_address = email.from_address();
let to_addresses = email.to_addresses();
let message = email.message();
// Check if the connection is still available
if self.state.connection_reuse_count > 0 {
if !self.client.is_connected() {
self.reset();
}
}
if self.state.connection_reuse_count == 0 {
try!(self.client.connect(&self.client_info.server_addr,
match &self.client_info.security_level {
&SecurityLevel::EncryptedWrapper =>
Some(&self.client_info.ssl_context),
_ => None,
}));
// Log the connection
info!("connection established to {}", self.client_info.server_addr);
try!(self.get_ehlo());
match (&self.client_info.security_level,
self.server_info.as_ref().unwrap().supports_feature(&Extension::StartTls)) {
(&SecurityLevel::AlwaysEncrypt, false) =>
return Err(From::from("Could not encrypt connection, aborting")),
(&SecurityLevel::Opportunistic, false) => (),
(&SecurityLevel::NeverEncrypt, _) => (),
(&SecurityLevel::EncryptedWrapper, _) => (),
(_, true) => {
try_smtp!(self.client.starttls(), self);
try_smtp!(self.client.upgrade_tls_stream(&self.client_info.ssl_context),
self);
debug!("connection encrypted");
// Send EHLO again
try!(self.get_ehlo());
}
}
if self.client_info.credentials.is_some() {
let (username, password) = self.client_info.credentials.clone().unwrap();
let mut found = false;
for mechanism in self.client_info.authentication_mechanisms.clone() {
if self.server_info.as_ref().unwrap().supports_auth_mechanism(mechanism) {
found = true;
try_smtp!(self.client.auth(mechanism, &username, &password), self);
break;
}
}
if !found {
info!("No supported authentication mechanisms available");
}
}
}
// Mail
let mail_options = match (self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::EightBitMime),
self.server_info
.as_ref()
.unwrap()
.supports_feature(&Extension::SmtpUtfEight)) {
(true, true) => Some("BODY=8BITMIME SMTPUTF8"),
(true, false) => Some("BODY=8BITMIME"),
(false, _) => None,
};
try_smtp!(self.client.mail(&from_address, mail_options), self);
// Log the mail command
info!("{}: from=<{}>", message_id, from_address);
// Recipient
for to_address in to_addresses.iter() {
try_smtp!(self.client.rcpt(&to_address), self);
// Log the rcpt command
info!("{}: to=<{}>", message_id, to_address);
}
// Data
try_smtp!(self.client.data(), self);
// Message content
let result = self.client.message(&message);
if result.is_ok() {
// Increment the connection reuse counter
self.state.connection_reuse_count = self.state.connection_reuse_count + 1;
// Log the message
info!("{}: conn_use={}, size={}, status=sent ({})",
message_id,
self.state.connection_reuse_count,
message.len(),
result.as_ref()
.ok()
.unwrap()
.message()
.iter()
.next()
.unwrap_or(&"no response".to_string()));
}
// Test if we can reuse the existing connection
if (!self.client_info.connection_reuse) ||
(self.state.connection_reuse_count >= self.client_info.connection_reuse_count_limit) |
result
}
/// Closes the inner connection
fn close(&mut self) {
self.client.close();
}
}
| {
self.reset();
} | conditional_block |
test_benchmark_mem.py | # -*- coding: utf-8 -*-
#
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import unittest
from unittest import mock
from hardware.benchmark import mem
from hardware.benchmark import utils
SYSBENCH_OUTPUT = """Operations performed: 1957354 (391412.04 ops/sec)
1911.48 MB transferred (382.24 MB/sec)
Test execution summary:
total time: 5.0008s
total number of events: 1957354
total time taken by event execution: 3.0686
per-request statistics:
min: 0.00ms
avg: 0.00ms
max: 0.23ms
approx. 95 percentile: 0.00ms
Threads fairness:
events (avg/stddev): 1957354.0000/0.00
execution time (avg/stddev): 3.0686/0.00"""
EXPECTED_RESULT = [
('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2),
('cpu', 'logical_0', 'bandwidth_1K', '382'),
('cpu', 'logical_0', 'bandwidth_4K', '382'),
('cpu', 'logical_0', 'bandwidth_1M', '382'),
('cpu', 'logical_0', 'bandwidth_16M', '382'),
('cpu', 'logical_0', 'bandwidth_128M', '382'),
('cpu', 'logical_0', 'bandwidth_1G', '382'),
('cpu', 'logical_0', 'bandwidth_2G', '382'),
('cpu', 'logical_1', 'bandwidth_1K', '382'),
('cpu', 'logical_1', 'bandwidth_4K', '382'),
('cpu', 'logical_1', 'bandwidth_1M', '382'),
('cpu', 'logical_1', 'bandwidth_16M', '382'),
('cpu', 'logical_1', 'bandwidth_128M', '382'),
('cpu', 'logical_1', 'bandwidth_1G', '382'),
('cpu', 'logical_1', 'bandwidth_2G', '382'),
('cpu', 'logical', 'threaded_bandwidth_1K', '382'),
('cpu', 'logical', 'threaded_bandwidth_4K', '382'),
('cpu', 'logical', 'threaded_bandwidth_1M', '382'),
('cpu', 'logical', 'threaded_bandwidth_16M', '382'),
('cpu', 'logical', 'threaded_bandwidth_128M', '382'),
('cpu', 'logical', 'threaded_bandwidth_1G', '382'),
('cpu', 'logical', 'threaded_bandwidth_2G', '382'),
('cpu', 'logical', 'forked_bandwidth_1K', '382'),
('cpu', 'logical', 'forked_bandwidth_4K', '382'),
('cpu', 'logical', 'forked_bandwidth_1M', '382'),
('cpu', 'logical', 'forked_bandwidth_16M', '382'),
('cpu', 'logical', 'forked_bandwidth_128M', '382'),
('cpu', 'logical', 'forked_bandwidth_1G', '382'),
('cpu', 'logical', 'forked_bandwidth_2G', '382')
]
@mock.patch.object(mem, 'get_available_memory')
@mock.patch.object(utils, 'get_one_cpu_per_socket')
@mock.patch.object(subprocess, 'Popen')
class TestBenchmarkMem(unittest.TestCase):
def setUp(self):
|
def test_mem_perf_bytes(self, mock_popen, mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
mock_cpu_socket.return_value = range(2)
mem.mem_perf(self.hw_data)
expected = EXPECTED_RESULT
self.assertEqual(sorted(expected), sorted(self.hw_data))
def test_check_mem_size(self, mock_popen, mock_cpu_socket,
mock_get_memory):
block_size_list = ('1K', '4K', '1M', '16M', '128M', '1G', '2G')
mock_get_memory.return_value = 123456789012
for block_size in block_size_list:
self.assertTrue(mem.check_mem_size(block_size, 2))
# Low memory
mock_get_memory.return_value = 1
for block_size in block_size_list:
self.assertFalse(mem.check_mem_size(block_size, 2))
def test_run_sysbench_memory_forked_bytes(self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_forked(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'forked_bandwidth_1K', '382')],
hw_data)
def test_run_sysbench_memory_threaded_bytes(self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_threaded(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'threaded_bandwidth_1K', '382')],
hw_data)
| super(TestBenchmarkMem, self).setUp()
self.hw_data = [('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2)] | identifier_body |
test_benchmark_mem.py | # -*- coding: utf-8 -*-
#
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import unittest
from unittest import mock
from hardware.benchmark import mem
from hardware.benchmark import utils
SYSBENCH_OUTPUT = """Operations performed: 1957354 (391412.04 ops/sec)
1911.48 MB transferred (382.24 MB/sec)
Test execution summary:
total time: 5.0008s
total number of events: 1957354
total time taken by event execution: 3.0686
per-request statistics:
min: 0.00ms
avg: 0.00ms
max: 0.23ms
approx. 95 percentile: 0.00ms
Threads fairness:
events (avg/stddev): 1957354.0000/0.00
execution time (avg/stddev): 3.0686/0.00"""
EXPECTED_RESULT = [
('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2),
('cpu', 'logical_0', 'bandwidth_1K', '382'),
('cpu', 'logical_0', 'bandwidth_4K', '382'),
('cpu', 'logical_0', 'bandwidth_1M', '382'),
('cpu', 'logical_0', 'bandwidth_16M', '382'),
('cpu', 'logical_0', 'bandwidth_128M', '382'),
('cpu', 'logical_0', 'bandwidth_1G', '382'),
('cpu', 'logical_0', 'bandwidth_2G', '382'),
('cpu', 'logical_1', 'bandwidth_1K', '382'),
('cpu', 'logical_1', 'bandwidth_4K', '382'),
('cpu', 'logical_1', 'bandwidth_1M', '382'),
('cpu', 'logical_1', 'bandwidth_16M', '382'),
('cpu', 'logical_1', 'bandwidth_128M', '382'),
('cpu', 'logical_1', 'bandwidth_1G', '382'),
('cpu', 'logical_1', 'bandwidth_2G', '382'),
('cpu', 'logical', 'threaded_bandwidth_1K', '382'),
('cpu', 'logical', 'threaded_bandwidth_4K', '382'),
('cpu', 'logical', 'threaded_bandwidth_1M', '382'),
('cpu', 'logical', 'threaded_bandwidth_16M', '382'),
('cpu', 'logical', 'threaded_bandwidth_128M', '382'),
('cpu', 'logical', 'threaded_bandwidth_1G', '382'),
('cpu', 'logical', 'threaded_bandwidth_2G', '382'),
('cpu', 'logical', 'forked_bandwidth_1K', '382'),
('cpu', 'logical', 'forked_bandwidth_4K', '382'),
('cpu', 'logical', 'forked_bandwidth_1M', '382'),
('cpu', 'logical', 'forked_bandwidth_16M', '382'),
('cpu', 'logical', 'forked_bandwidth_128M', '382'),
('cpu', 'logical', 'forked_bandwidth_1G', '382'),
('cpu', 'logical', 'forked_bandwidth_2G', '382')
]
@mock.patch.object(mem, 'get_available_memory')
@mock.patch.object(utils, 'get_one_cpu_per_socket')
@mock.patch.object(subprocess, 'Popen')
class TestBenchmarkMem(unittest.TestCase):
def setUp(self):
super(TestBenchmarkMem, self).setUp()
self.hw_data = [('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2)]
def test_mem_perf_bytes(self, mock_popen, mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
mock_cpu_socket.return_value = range(2)
mem.mem_perf(self.hw_data)
expected = EXPECTED_RESULT
self.assertEqual(sorted(expected), sorted(self.hw_data))
def test_check_mem_size(self, mock_popen, mock_cpu_socket,
mock_get_memory):
block_size_list = ('1K', '4K', '1M', '16M', '128M', '1G', '2G')
mock_get_memory.return_value = 123456789012
for block_size in block_size_list:
self.assertTrue(mem.check_mem_size(block_size, 2))
# Low memory
mock_get_memory.return_value = 1
for block_size in block_size_list:
self.assertFalse(mem.check_mem_size(block_size, 2))
def test_run_sysbench_memory_forked_bytes(self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_forked(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'forked_bandwidth_1K', '382')],
hw_data)
def | (self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_threaded(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'threaded_bandwidth_1K', '382')],
hw_data)
| test_run_sysbench_memory_threaded_bytes | identifier_name |
test_benchmark_mem.py | # -*- coding: utf-8 -*-
#
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import unittest
from unittest import mock
from hardware.benchmark import mem
from hardware.benchmark import utils
SYSBENCH_OUTPUT = """Operations performed: 1957354 (391412.04 ops/sec)
1911.48 MB transferred (382.24 MB/sec)
Test execution summary:
total time: 5.0008s
total number of events: 1957354
total time taken by event execution: 3.0686
per-request statistics:
min: 0.00ms
avg: 0.00ms
max: 0.23ms
approx. 95 percentile: 0.00ms
Threads fairness:
events (avg/stddev): 1957354.0000/0.00
execution time (avg/stddev): 3.0686/0.00"""
EXPECTED_RESULT = [
('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2),
('cpu', 'logical_0', 'bandwidth_1K', '382'),
('cpu', 'logical_0', 'bandwidth_4K', '382'),
('cpu', 'logical_0', 'bandwidth_1M', '382'),
('cpu', 'logical_0', 'bandwidth_16M', '382'),
('cpu', 'logical_0', 'bandwidth_128M', '382'),
('cpu', 'logical_0', 'bandwidth_1G', '382'),
('cpu', 'logical_0', 'bandwidth_2G', '382'),
('cpu', 'logical_1', 'bandwidth_1K', '382'),
('cpu', 'logical_1', 'bandwidth_4K', '382'),
('cpu', 'logical_1', 'bandwidth_1M', '382'),
('cpu', 'logical_1', 'bandwidth_16M', '382'),
('cpu', 'logical_1', 'bandwidth_128M', '382'),
('cpu', 'logical_1', 'bandwidth_1G', '382'),
('cpu', 'logical_1', 'bandwidth_2G', '382'),
('cpu', 'logical', 'threaded_bandwidth_1K', '382'),
('cpu', 'logical', 'threaded_bandwidth_4K', '382'),
('cpu', 'logical', 'threaded_bandwidth_1M', '382'),
('cpu', 'logical', 'threaded_bandwidth_16M', '382'),
('cpu', 'logical', 'threaded_bandwidth_128M', '382'),
('cpu', 'logical', 'threaded_bandwidth_1G', '382'),
('cpu', 'logical', 'threaded_bandwidth_2G', '382'),
('cpu', 'logical', 'forked_bandwidth_1K', '382'),
('cpu', 'logical', 'forked_bandwidth_4K', '382'),
('cpu', 'logical', 'forked_bandwidth_1M', '382'),
('cpu', 'logical', 'forked_bandwidth_16M', '382'),
('cpu', 'logical', 'forked_bandwidth_128M', '382'),
('cpu', 'logical', 'forked_bandwidth_1G', '382'),
('cpu', 'logical', 'forked_bandwidth_2G', '382')
]
@mock.patch.object(mem, 'get_available_memory')
@mock.patch.object(utils, 'get_one_cpu_per_socket')
@mock.patch.object(subprocess, 'Popen')
class TestBenchmarkMem(unittest.TestCase):
def setUp(self):
super(TestBenchmarkMem, self).setUp()
self.hw_data = [('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2)]
def test_mem_perf_bytes(self, mock_popen, mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
mock_cpu_socket.return_value = range(2)
mem.mem_perf(self.hw_data)
expected = EXPECTED_RESULT
self.assertEqual(sorted(expected), sorted(self.hw_data))
def test_check_mem_size(self, mock_popen, mock_cpu_socket,
mock_get_memory):
block_size_list = ('1K', '4K', '1M', '16M', '128M', '1G', '2G')
mock_get_memory.return_value = 123456789012
for block_size in block_size_list:
self.assertTrue(mem.check_mem_size(block_size, 2))
# Low memory
mock_get_memory.return_value = 1
for block_size in block_size_list:
self.assertFalse(mem.check_mem_size(block_size, 2))
def test_run_sysbench_memory_forked_bytes(self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_forked(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'forked_bandwidth_1K', '382')], |
def test_run_sysbench_memory_threaded_bytes(self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_threaded(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'threaded_bandwidth_1K', '382')],
hw_data) | hw_data) | random_line_split |
test_benchmark_mem.py | # -*- coding: utf-8 -*-
#
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import unittest
from unittest import mock
from hardware.benchmark import mem
from hardware.benchmark import utils
SYSBENCH_OUTPUT = """Operations performed: 1957354 (391412.04 ops/sec)
1911.48 MB transferred (382.24 MB/sec)
Test execution summary:
total time: 5.0008s
total number of events: 1957354
total time taken by event execution: 3.0686
per-request statistics:
min: 0.00ms
avg: 0.00ms
max: 0.23ms
approx. 95 percentile: 0.00ms
Threads fairness:
events (avg/stddev): 1957354.0000/0.00
execution time (avg/stddev): 3.0686/0.00"""
EXPECTED_RESULT = [
('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2),
('cpu', 'logical_0', 'bandwidth_1K', '382'),
('cpu', 'logical_0', 'bandwidth_4K', '382'),
('cpu', 'logical_0', 'bandwidth_1M', '382'),
('cpu', 'logical_0', 'bandwidth_16M', '382'),
('cpu', 'logical_0', 'bandwidth_128M', '382'),
('cpu', 'logical_0', 'bandwidth_1G', '382'),
('cpu', 'logical_0', 'bandwidth_2G', '382'),
('cpu', 'logical_1', 'bandwidth_1K', '382'),
('cpu', 'logical_1', 'bandwidth_4K', '382'),
('cpu', 'logical_1', 'bandwidth_1M', '382'),
('cpu', 'logical_1', 'bandwidth_16M', '382'),
('cpu', 'logical_1', 'bandwidth_128M', '382'),
('cpu', 'logical_1', 'bandwidth_1G', '382'),
('cpu', 'logical_1', 'bandwidth_2G', '382'),
('cpu', 'logical', 'threaded_bandwidth_1K', '382'),
('cpu', 'logical', 'threaded_bandwidth_4K', '382'),
('cpu', 'logical', 'threaded_bandwidth_1M', '382'),
('cpu', 'logical', 'threaded_bandwidth_16M', '382'),
('cpu', 'logical', 'threaded_bandwidth_128M', '382'),
('cpu', 'logical', 'threaded_bandwidth_1G', '382'),
('cpu', 'logical', 'threaded_bandwidth_2G', '382'),
('cpu', 'logical', 'forked_bandwidth_1K', '382'),
('cpu', 'logical', 'forked_bandwidth_4K', '382'),
('cpu', 'logical', 'forked_bandwidth_1M', '382'),
('cpu', 'logical', 'forked_bandwidth_16M', '382'),
('cpu', 'logical', 'forked_bandwidth_128M', '382'),
('cpu', 'logical', 'forked_bandwidth_1G', '382'),
('cpu', 'logical', 'forked_bandwidth_2G', '382')
]
@mock.patch.object(mem, 'get_available_memory')
@mock.patch.object(utils, 'get_one_cpu_per_socket')
@mock.patch.object(subprocess, 'Popen')
class TestBenchmarkMem(unittest.TestCase):
def setUp(self):
super(TestBenchmarkMem, self).setUp()
self.hw_data = [('cpu', 'logical', 'number', 2),
('cpu', 'physical', 'number', 2)]
def test_mem_perf_bytes(self, mock_popen, mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
mock_cpu_socket.return_value = range(2)
mem.mem_perf(self.hw_data)
expected = EXPECTED_RESULT
self.assertEqual(sorted(expected), sorted(self.hw_data))
def test_check_mem_size(self, mock_popen, mock_cpu_socket,
mock_get_memory):
block_size_list = ('1K', '4K', '1M', '16M', '128M', '1G', '2G')
mock_get_memory.return_value = 123456789012
for block_size in block_size_list:
|
# Low memory
mock_get_memory.return_value = 1
for block_size in block_size_list:
self.assertFalse(mem.check_mem_size(block_size, 2))
def test_run_sysbench_memory_forked_bytes(self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_forked(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'forked_bandwidth_1K', '382')],
hw_data)
def test_run_sysbench_memory_threaded_bytes(self, mock_popen,
mock_cpu_socket,
mock_get_memory):
mock_get_memory.return_value = 123456789012
mock_popen.return_value = mock.Mock(
stdout=SYSBENCH_OUTPUT.encode().splitlines())
hw_data = []
mem.run_sysbench_memory_threaded(hw_data, 10, '1K', 2)
self.assertEqual([('cpu', 'logical', 'threaded_bandwidth_1K', '382')],
hw_data)
| self.assertTrue(mem.check_mem_size(block_size, 2)) | conditional_block |
index.tsx | /**
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
import * as React from 'react';
import {createStore, applyMiddleware, compose} from 'redux';
import {Provider, connect} from 'react-redux';
import thunk from 'redux-thunk';
import {WarbandMember, hasClientAPI} from 'camelot-unchained';
import PlayerStatusComponent from '../../components/PlayerStatusComponent';
import reducer, {SessionState} from './services/session';
import {PlayerState, doThing, initializePlayerSession} from './services/session/player';
import {PlayerStatus, BodyParts} from '../../lib/PlayerStatus';
const composeEnhancers = (window as any).__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const store = createStore(reducer, composeEnhancers(applyMiddleware(thunk)));
export interface ContainerProps {
containerClass?: string;
isMini?: boolean;
}
export interface PlayerHealthProps extends ContainerProps {
dispatch?: (action: any) => any;
player?: PlayerState;
}
export interface PlayerHealthState {
}
function select(state: SessionState): PlayerHealthProps {
return {
player: state.player,
};
}
class PlayerHealth extends React.Component<PlayerHealthProps, PlayerHealthState> {
constructor(props: PlayerHealthProps) {
super(props);
}
public | () {
const hide = this.props.player.playerStatus.name === '';
if (hide) return null;
const dead = this.props.player.playerStatus.blood.current <= 0 ||
this.props.player.playerStatus.health[BodyParts.Torso].current <= 0;
return (
<div className={`Playerhealth ${this.props.containerClass}`}
onClick={() => hasClientAPI() || dead ? '' : this.props.dispatch(doThing())}>
<PlayerStatusComponent
containerClass='PlayerHealth'
playerStatus={this.props.player.playerStatus}
events={this.props.player.events}
/>
</div>
);
}
public componentDidMount() {
this.props.dispatch(initializePlayerSession());
}
}
const PlayerComp = connect(select)(PlayerHealth);
class Container extends React.Component<ContainerProps,{}> {
public render() {
return (
<Provider store={store}>
<PlayerComp {...(this.props as any)}/>
</Provider>
);
}
}
export default Container;
| render | identifier_name |
index.tsx | /**
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
import * as React from 'react';
import {createStore, applyMiddleware, compose} from 'redux';
import {Provider, connect} from 'react-redux';
import thunk from 'redux-thunk';
import {WarbandMember, hasClientAPI} from 'camelot-unchained';
import PlayerStatusComponent from '../../components/PlayerStatusComponent';
import reducer, {SessionState} from './services/session';
import {PlayerState, doThing, initializePlayerSession} from './services/session/player';
import {PlayerStatus, BodyParts} from '../../lib/PlayerStatus';
const composeEnhancers = (window as any).__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const store = createStore(reducer, composeEnhancers(applyMiddleware(thunk)));
export interface ContainerProps {
containerClass?: string;
isMini?: boolean;
}
export interface PlayerHealthProps extends ContainerProps {
dispatch?: (action: any) => any;
player?: PlayerState;
}
export interface PlayerHealthState {
}
function select(state: SessionState): PlayerHealthProps {
return {
player: state.player,
};
}
class PlayerHealth extends React.Component<PlayerHealthProps, PlayerHealthState> {
constructor(props: PlayerHealthProps) {
super(props);
}
public render() |
onClick={() => hasClientAPI() || dead ? '' : this.props.dispatch(doThing())}>
<PlayerStatusComponent
containerClass='PlayerHealth'
playerStatus={this.props.player.playerStatus}
events={this.props.player.events}
/>
</div>
);
}
public componentDidMount() {
this.props.dispatch(initializePlayerSession());
}
}
const PlayerComp = connect(select)(PlayerHealth);
class Container extends React.Component<ContainerProps,{}> {
public render() {
return (
<Provider store={store}>
<PlayerComp {...(this.props as any)}/>
</Provider>
);
}
}
export default Container;
| {
const hide = this.props.player.playerStatus.name === '';
if (hide) return null;
const dead = this.props.player.playerStatus.blood.current <= 0 ||
this.props.player.playerStatus.health[BodyParts.Torso].current <= 0;
return (
<div className={`Playerhealth ${this.props.containerClass}`} | identifier_body |
index.tsx | /**
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
import * as React from 'react';
import {createStore, applyMiddleware, compose} from 'redux';
import {Provider, connect} from 'react-redux';
import thunk from 'redux-thunk';
import {WarbandMember, hasClientAPI} from 'camelot-unchained';
import PlayerStatusComponent from '../../components/PlayerStatusComponent';
import reducer, {SessionState} from './services/session';
import {PlayerState, doThing, initializePlayerSession} from './services/session/player';
import {PlayerStatus, BodyParts} from '../../lib/PlayerStatus';
const composeEnhancers = (window as any).__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const store = createStore(reducer, composeEnhancers(applyMiddleware(thunk)));
export interface ContainerProps {
containerClass?: string;
isMini?: boolean;
}
export interface PlayerHealthProps extends ContainerProps {
dispatch?: (action: any) => any;
player?: PlayerState;
}
export interface PlayerHealthState {
}
function select(state: SessionState): PlayerHealthProps {
return {
player: state.player,
};
}
class PlayerHealth extends React.Component<PlayerHealthProps, PlayerHealthState> {
constructor(props: PlayerHealthProps) {
super(props);
}
| const dead = this.props.player.playerStatus.blood.current <= 0 ||
this.props.player.playerStatus.health[BodyParts.Torso].current <= 0;
return (
<div className={`Playerhealth ${this.props.containerClass}`}
onClick={() => hasClientAPI() || dead ? '' : this.props.dispatch(doThing())}>
<PlayerStatusComponent
containerClass='PlayerHealth'
playerStatus={this.props.player.playerStatus}
events={this.props.player.events}
/>
</div>
);
}
public componentDidMount() {
this.props.dispatch(initializePlayerSession());
}
}
const PlayerComp = connect(select)(PlayerHealth);
class Container extends React.Component<ContainerProps,{}> {
public render() {
return (
<Provider store={store}>
<PlayerComp {...(this.props as any)}/>
</Provider>
);
}
}
export default Container; | public render() {
const hide = this.props.player.playerStatus.name === '';
if (hide) return null; | random_line_split |
test_stale_read.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::atomic::*;
use std::sync::{mpsc, Arc, Mutex};
use std::time::Duration;
use std::{mem, thread};
use kvproto::metapb::{Peer, Region};
use raft::eraftpb::MessageType;
use pd_client::PdClient;
use raftstore::store::Callback;
use test_raftstore::*;
use tikv_util::config::*;
use tikv_util::HandyRwLock;
fn stale_read_during_splitting(right_derive: bool) {
let count = 3;
let mut cluster = new_node_cluster(0, count);
cluster.cfg.raft_store.right_derive_when_split = right_derive;
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.run();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
// Get the first region.
let region_left = cluster.get_region(key1);
let region_right = cluster.get_region(key2);
assert_eq!(region_left, region_right);
let region1 = region_left;
assert_eq!(region1.get_id(), 1);
let peer3 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 3)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), peer3.clone());
// Get the current leader.
let leader1 = peer3;
// Pause the apply worker of peer 3.
let apply_split = "apply_before_split_1_3";
fail::cfg(apply_split, "pause").unwrap();
// Split the first region.
cluster.split_region(®ion1, key2, Callback::write(Box::new(move |_| {})));
// Sleep for a while.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 2 X L F
// Note: A has the peer 3,
// L: leader, F: follower, X: peer is not ready.
thread::sleep(election_timeout);
// A key that is covered by the old region and the new region.
let stale_key = if right_derive { key1 } else { key2 };
// Get the new region.
let region2 = cluster.get_region_with(stale_key, |region| region != ®ion1);
// Get the leader of the new region.
let leader2 = cluster.leader_of_region(region2.get_id()).unwrap();
assert_ne!(leader1.get_store_id(), leader2.get_store_id());
must_not_stale_read(
&mut cluster,
stale_key,
®ion1,
&leader1,
®ion2,
&leader2,
apply_split,
);
}
fn must_not_stale_read(
cluster: &mut Cluster<NodeCluster>,
stale_key: &[u8],
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
fp: &str,
) {
// A new value for stale_key.
let v3 = b"v3";
let mut request = new_request(
new_region.get_id(),
new_region.get_region_epoch().clone(),
vec![new_put_cf_cmd("default", stale_key, v3)],
false,
);
request.mut_header().set_peer(new_leader.clone());
cluster
.call_command_on_node(new_leader.get_store_id(), request, Duration::from_secs(5))
.unwrap();
// LocalRead.
let read_quorum = false;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// ReadIndex.
let read_quorum = true;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// Leaders can always propose read index despite split/merge.
let propose_readindex = "before_propose_readindex";
fail::cfg(propose_readindex, "return(true)").unwrap();
// Can not execute reads that are queued.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
value1.unwrap_err(); // Error::Timeout
// Remove the fp.
fail::remove(fp);
// It should read an error instead of timeout.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(5),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
assert!(value1.unwrap().get_header().has_error());
// Clean up.
fail::remove(propose_readindex);
}
fn | (
cluster: &mut Cluster<NodeCluster>,
key: &[u8],
value: &[u8],
read_quorum: bool,
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
) {
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
let value2 = read_on_peer(
cluster,
new_leader.clone(),
new_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?} vs {:?}", key, value1, value2);
assert_eq!(must_get_value(value2.as_ref().unwrap()).as_slice(), value);
// The old leader should return an error.
assert!(
value1.as_ref().unwrap().get_header().has_error(),
"{:?}",
value1
);
}
#[test]
fn test_node_stale_read_during_splitting_left_derive() {
stale_read_during_splitting(false);
}
#[test]
fn test_node_stale_read_during_splitting_right_derive() {
stale_read_during_splitting(true);
}
#[test]
fn test_stale_read_during_merging() {
let count = 3;
let mut cluster = new_node_cluster(0, count);
configure_for_merge(&mut cluster);
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.cfg.raft_store.right_derive_when_split = false;
cluster.cfg.raft_store.pd_heartbeat_tick_interval =
cluster.cfg.raft_store.raft_base_tick_interval;
debug!("max leader lease: {:?}", election_timeout);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
cluster.run_conf_change();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
let region = pd_client.get_region(b"k1").unwrap();
pd_client.must_add_peer(region.get_id(), new_peer(2, 4));
pd_client.must_add_peer(region.get_id(), new_peer(3, 5));
cluster.must_split(®ion, b"k2");
let mut region1 = cluster.get_region(key1);
let mut region1000 = cluster.get_region(key2);
assert_ne!(region1, region1000);
assert_eq!(region1.get_id(), 1); // requires disable right_derive.
let leader1 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 4)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), leader1.clone());
let leader1000 = region1000
.get_peers()
.iter()
.find(|p| p.get_store_id() != leader1.get_store_id())
.unwrap()
.clone();
cluster.must_transfer_leader(region1000.get_id(), leader1000.clone());
assert_ne!(leader1.get_store_id(), leader1000.get_store_id());
// Sleeps an election timeout. The new leader needs enough time to gather
// all followers progress, in cause the merge request is reject by the
// log gap too large (min_progress == 0).
thread::sleep(election_timeout);
// merge into
// region1000 ------------> region1
cluster.must_try_merge(region1000.get_id(), region1.get_id());
// Pause the apply workers except for the peer 4.
let apply_commit_merge = "apply_before_commit_merge_except_1_4";
fail::cfg(apply_commit_merge, "pause").unwrap();
// Wait for commit merge.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 1000 F L F
// after wait
// Region 1 L F F
// Region 1000 X L F
// Note: L: leader, F: follower, X: peer is not exist.
// TODO: what if cluster runs slow and lease is expired.
// Epoch changed by prepare merge.
// We can not use `get_region_with` to get the latest info of reigon 1000,
// because leader1 is not paused, it executes commit merge very fast
// and reports pd, its range covers region1000.
//
// region1000 does prepare merge, it increases ver and conf_ver by 1.
debug!("before merge: {:?} | {:?}", region1000, region1);
let region1000_version = region1000.get_region_epoch().get_version() + 1;
region1000
.mut_region_epoch()
.set_version(region1000_version);
let region1000_conf_version = region1000.get_region_epoch().get_conf_ver() + 1;
region1000
.mut_region_epoch()
.set_conf_ver(region1000_conf_version);
// Epoch changed by commit merge.
region1 = cluster.get_region_with(key1, |region| region != ®ion1);
debug!("after merge: {:?} | {:?}", region1000, region1);
// A key that is covered by region 1000 and region 1.
let stale_key = key2;
must_not_stale_read(
&mut cluster,
stale_key,
®ion1000,
&leader1000,
®ion1,
&leader1,
apply_commit_merge,
);
}
#[test]
fn test_read_index_when_transfer_leader_2() {
let mut cluster = new_node_cluster(0, 3);
// Increase the election tick to make this test case running reliably.
configure_for_lease_read(&mut cluster, Some(50), Some(10_000));
// Stop log compaction to transfer leader with filter easier.
configure_for_request_snapshot(&mut cluster);
let max_lease = Duration::from_secs(2);
cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration(max_lease);
// Add peer 2 and 3 and wait them to apply it.
cluster.pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k0", b"v0");
cluster.pd_client.must_add_peer(r1, new_peer(2, 2));
cluster.pd_client.must_add_peer(r1, new_peer(3, 3));
must_get_equal(&cluster.get_engine(2), b"k0", b"v0");
must_get_equal(&cluster.get_engine(3), b"k0", b"v0");
// Put and test again to ensure that peer 3 get the latest writes by message append
// instead of snapshot, so that transfer leader to peer 3 can 100% success.
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
let r1 = cluster.get_region(b"k1");
let old_leader = cluster.leader_of_region(r1.get_id()).unwrap();
// Use a macro instead of a closure to avoid any capture of local variables.
macro_rules! read_on_old_leader {
() => {{
let (tx, rx) = mpsc::sync_channel(1);
let mut read_request = new_request(
r1.get_id(),
r1.get_region_epoch().clone(),
vec![new_get_cmd(b"k1")],
true, // read quorum
);
read_request.mut_header().set_peer(new_peer(1, 1));
let sim = cluster.sim.wl();
sim.async_command_on_node(
old_leader.get_id(),
read_request,
Callback::Read(Box::new(move |resp| tx.send(resp.response).unwrap())),
)
.unwrap();
rx
}};
}
// Delay all raft messages to peer 1.
let dropped_msgs = Arc::new(Mutex::new(Vec::new()));
let filter = Box::new(
RegionPacketFilter::new(r1.get_id(), old_leader.get_store_id())
.direction(Direction::Recv)
.skip(MessageType::MsgTransferLeader)
.when(Arc::new(AtomicBool::new(true)))
.reserve_dropped(Arc::clone(&dropped_msgs)),
);
cluster
.sim
.wl()
.add_recv_filter(old_leader.get_id(), filter);
let resp1 = read_on_old_leader!();
cluster.must_transfer_leader(r1.get_id(), new_peer(3, 3));
let resp2 = read_on_old_leader!();
// Unpark all pending messages and clear all filters.
let router = cluster.sim.wl().get_router(old_leader.get_id()).unwrap();
let mut reserved_msgs = Vec::new();
'LOOP: loop {
for raft_msg in mem::replace(dropped_msgs.lock().unwrap().as_mut(), vec![]) {
let msg_type = raft_msg.get_message().get_msg_type();
if msg_type == MessageType::MsgHeartbeatResponse || msg_type == MessageType::MsgAppend {
reserved_msgs.push(raft_msg);
if msg_type == MessageType::MsgAppend {
break 'LOOP;
}
}
}
}
// Resume reserved messages in one batch to make sure the old leader can get read and role
// change in one `Ready`.
fail::cfg("pause_on_peer_collect_message", "pause").unwrap();
for raft_msg in reserved_msgs {
router.send_raft_message(raft_msg).unwrap();
}
fail::cfg("pause_on_peer_collect_message", "off").unwrap();
cluster.sim.wl().clear_recv_filters(old_leader.get_id());
let resp1 = resp1.recv().unwrap();
assert!(resp1.get_header().get_error().has_stale_command());
// Response 2 should contains an error.
let resp2 = resp2.recv().unwrap();
assert!(resp2.get_header().get_error().has_stale_command());
drop(cluster);
fail::remove("pause_on_peer_collect_message");
}
#[test]
fn test_read_after_peer_destroyed() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
// Disable default max peer number check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Add 2 peers.
for i in 2..4 {
pd_client.must_add_peer(r1, new_peer(i, i));
}
// Make sure peer 1 leads the region.
cluster.must_transfer_leader(r1, new_peer(1, 1));
let (key, value) = (b"k1", b"v1");
cluster.must_put(key, value);
assert_eq!(cluster.get(key), Some(value.to_vec()));
let destroy_peer_fp = "destroy_peer";
fail::cfg(destroy_peer_fp, "pause").unwrap();
pd_client.must_remove_peer(r1, new_peer(1, 1));
sleep_ms(300);
// Try writing k2 to peer3
let mut request = new_request(
r1,
cluster.pd_client.get_region_epoch(r1),
vec![new_get_cmd(b"k1")],
false,
);
request.mut_header().set_peer(new_peer(1, 1));
let (cb, rx) = make_cb(&request);
cluster
.sim
.rl()
.async_command_on_node(1, request, cb)
.unwrap();
// Wait for raftstore receives the read request.
sleep_ms(200);
fail::remove(destroy_peer_fp);
let resp = rx.recv_timeout(Duration::from_millis(200)).unwrap();
assert!(
resp.get_header().get_error().has_region_not_found(),
"{:?}",
resp
);
}
/// In previous implementation, we suspect the leader lease at the position of `leader_commit_prepare_merge`
/// failpoint when `PrepareMerge` log is committed, which is too late to prevent stale read.
#[test]
fn test_stale_read_during_merging_2() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
pd_client.disable_default_operator();
configure_for_merge(&mut cluster);
configure_for_lease_read(&mut cluster, Some(50), Some(20));
cluster.run();
for i in 0..10 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v");
}
let region = pd_client.get_region(b"k1").unwrap();
cluster.must_split(®ion, b"k2");
let left = pd_client.get_region(b"k1").unwrap();
let right = pd_client.get_region(b"k2").unwrap();
let left_peer_1 = find_peer(&left, 1).unwrap().to_owned();
cluster.must_transfer_leader(left.get_id(), left_peer_1.clone());
let right_peer_3 = find_peer(&right, 3).unwrap().to_owned();
cluster.must_transfer_leader(right.get_id(), right_peer_3);
let leader_commit_prepare_merge_fp = "leader_commit_prepare_merge";
fail::cfg(leader_commit_prepare_merge_fp, "pause").unwrap();
pd_client.must_merge(left.get_id(), right.get_id());
cluster.must_put(b"k1", b"v1");
let value = read_on_peer(
&mut cluster,
left_peer_1,
left,
b"k1",
false,
Duration::from_millis(200),
);
// The leader lease must be suspected so the local read is forbidden.
// The result should be Error::Timeout because the leader is paused at
// the position of `leader_commit_prepare_merge` failpoint.
// In previous implementation, the result is ok and the value is "v"
// but the right answer is "v1".
value.unwrap_err();
fail::remove(leader_commit_prepare_merge_fp);
}
| must_not_eq_on_key | identifier_name |
test_stale_read.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::atomic::*;
use std::sync::{mpsc, Arc, Mutex};
use std::time::Duration;
use std::{mem, thread};
use kvproto::metapb::{Peer, Region};
use raft::eraftpb::MessageType;
use pd_client::PdClient;
use raftstore::store::Callback;
use test_raftstore::*;
use tikv_util::config::*;
use tikv_util::HandyRwLock;
fn stale_read_during_splitting(right_derive: bool) {
let count = 3;
let mut cluster = new_node_cluster(0, count);
cluster.cfg.raft_store.right_derive_when_split = right_derive;
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.run();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
// Get the first region.
let region_left = cluster.get_region(key1);
let region_right = cluster.get_region(key2);
assert_eq!(region_left, region_right);
let region1 = region_left;
assert_eq!(region1.get_id(), 1);
let peer3 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 3)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), peer3.clone());
// Get the current leader.
let leader1 = peer3;
// Pause the apply worker of peer 3.
let apply_split = "apply_before_split_1_3";
fail::cfg(apply_split, "pause").unwrap();
// Split the first region.
cluster.split_region(®ion1, key2, Callback::write(Box::new(move |_| {})));
// Sleep for a while.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 2 X L F
// Note: A has the peer 3,
// L: leader, F: follower, X: peer is not ready.
thread::sleep(election_timeout);
// A key that is covered by the old region and the new region.
let stale_key = if right_derive { key1 } else | ;
// Get the new region.
let region2 = cluster.get_region_with(stale_key, |region| region != ®ion1);
// Get the leader of the new region.
let leader2 = cluster.leader_of_region(region2.get_id()).unwrap();
assert_ne!(leader1.get_store_id(), leader2.get_store_id());
must_not_stale_read(
&mut cluster,
stale_key,
®ion1,
&leader1,
®ion2,
&leader2,
apply_split,
);
}
fn must_not_stale_read(
cluster: &mut Cluster<NodeCluster>,
stale_key: &[u8],
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
fp: &str,
) {
// A new value for stale_key.
let v3 = b"v3";
let mut request = new_request(
new_region.get_id(),
new_region.get_region_epoch().clone(),
vec![new_put_cf_cmd("default", stale_key, v3)],
false,
);
request.mut_header().set_peer(new_leader.clone());
cluster
.call_command_on_node(new_leader.get_store_id(), request, Duration::from_secs(5))
.unwrap();
// LocalRead.
let read_quorum = false;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// ReadIndex.
let read_quorum = true;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// Leaders can always propose read index despite split/merge.
let propose_readindex = "before_propose_readindex";
fail::cfg(propose_readindex, "return(true)").unwrap();
// Can not execute reads that are queued.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
value1.unwrap_err(); // Error::Timeout
// Remove the fp.
fail::remove(fp);
// It should read an error instead of timeout.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(5),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
assert!(value1.unwrap().get_header().has_error());
// Clean up.
fail::remove(propose_readindex);
}
fn must_not_eq_on_key(
cluster: &mut Cluster<NodeCluster>,
key: &[u8],
value: &[u8],
read_quorum: bool,
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
) {
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
let value2 = read_on_peer(
cluster,
new_leader.clone(),
new_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?} vs {:?}", key, value1, value2);
assert_eq!(must_get_value(value2.as_ref().unwrap()).as_slice(), value);
// The old leader should return an error.
assert!(
value1.as_ref().unwrap().get_header().has_error(),
"{:?}",
value1
);
}
#[test]
fn test_node_stale_read_during_splitting_left_derive() {
stale_read_during_splitting(false);
}
#[test]
fn test_node_stale_read_during_splitting_right_derive() {
stale_read_during_splitting(true);
}
#[test]
fn test_stale_read_during_merging() {
let count = 3;
let mut cluster = new_node_cluster(0, count);
configure_for_merge(&mut cluster);
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.cfg.raft_store.right_derive_when_split = false;
cluster.cfg.raft_store.pd_heartbeat_tick_interval =
cluster.cfg.raft_store.raft_base_tick_interval;
debug!("max leader lease: {:?}", election_timeout);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
cluster.run_conf_change();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
let region = pd_client.get_region(b"k1").unwrap();
pd_client.must_add_peer(region.get_id(), new_peer(2, 4));
pd_client.must_add_peer(region.get_id(), new_peer(3, 5));
cluster.must_split(®ion, b"k2");
let mut region1 = cluster.get_region(key1);
let mut region1000 = cluster.get_region(key2);
assert_ne!(region1, region1000);
assert_eq!(region1.get_id(), 1); // requires disable right_derive.
let leader1 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 4)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), leader1.clone());
let leader1000 = region1000
.get_peers()
.iter()
.find(|p| p.get_store_id() != leader1.get_store_id())
.unwrap()
.clone();
cluster.must_transfer_leader(region1000.get_id(), leader1000.clone());
assert_ne!(leader1.get_store_id(), leader1000.get_store_id());
// Sleeps an election timeout. The new leader needs enough time to gather
// all followers progress, in cause the merge request is reject by the
// log gap too large (min_progress == 0).
thread::sleep(election_timeout);
// merge into
// region1000 ------------> region1
cluster.must_try_merge(region1000.get_id(), region1.get_id());
// Pause the apply workers except for the peer 4.
let apply_commit_merge = "apply_before_commit_merge_except_1_4";
fail::cfg(apply_commit_merge, "pause").unwrap();
// Wait for commit merge.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 1000 F L F
// after wait
// Region 1 L F F
// Region 1000 X L F
// Note: L: leader, F: follower, X: peer is not exist.
// TODO: what if cluster runs slow and lease is expired.
// Epoch changed by prepare merge.
// We can not use `get_region_with` to get the latest info of reigon 1000,
// because leader1 is not paused, it executes commit merge very fast
// and reports pd, its range covers region1000.
//
// region1000 does prepare merge, it increases ver and conf_ver by 1.
debug!("before merge: {:?} | {:?}", region1000, region1);
let region1000_version = region1000.get_region_epoch().get_version() + 1;
region1000
.mut_region_epoch()
.set_version(region1000_version);
let region1000_conf_version = region1000.get_region_epoch().get_conf_ver() + 1;
region1000
.mut_region_epoch()
.set_conf_ver(region1000_conf_version);
// Epoch changed by commit merge.
region1 = cluster.get_region_with(key1, |region| region != ®ion1);
debug!("after merge: {:?} | {:?}", region1000, region1);
// A key that is covered by region 1000 and region 1.
let stale_key = key2;
must_not_stale_read(
&mut cluster,
stale_key,
®ion1000,
&leader1000,
®ion1,
&leader1,
apply_commit_merge,
);
}
#[test]
fn test_read_index_when_transfer_leader_2() {
let mut cluster = new_node_cluster(0, 3);
// Increase the election tick to make this test case running reliably.
configure_for_lease_read(&mut cluster, Some(50), Some(10_000));
// Stop log compaction to transfer leader with filter easier.
configure_for_request_snapshot(&mut cluster);
let max_lease = Duration::from_secs(2);
cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration(max_lease);
// Add peer 2 and 3 and wait them to apply it.
cluster.pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k0", b"v0");
cluster.pd_client.must_add_peer(r1, new_peer(2, 2));
cluster.pd_client.must_add_peer(r1, new_peer(3, 3));
must_get_equal(&cluster.get_engine(2), b"k0", b"v0");
must_get_equal(&cluster.get_engine(3), b"k0", b"v0");
// Put and test again to ensure that peer 3 get the latest writes by message append
// instead of snapshot, so that transfer leader to peer 3 can 100% success.
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
let r1 = cluster.get_region(b"k1");
let old_leader = cluster.leader_of_region(r1.get_id()).unwrap();
// Use a macro instead of a closure to avoid any capture of local variables.
macro_rules! read_on_old_leader {
() => {{
let (tx, rx) = mpsc::sync_channel(1);
let mut read_request = new_request(
r1.get_id(),
r1.get_region_epoch().clone(),
vec![new_get_cmd(b"k1")],
true, // read quorum
);
read_request.mut_header().set_peer(new_peer(1, 1));
let sim = cluster.sim.wl();
sim.async_command_on_node(
old_leader.get_id(),
read_request,
Callback::Read(Box::new(move |resp| tx.send(resp.response).unwrap())),
)
.unwrap();
rx
}};
}
// Delay all raft messages to peer 1.
let dropped_msgs = Arc::new(Mutex::new(Vec::new()));
let filter = Box::new(
RegionPacketFilter::new(r1.get_id(), old_leader.get_store_id())
.direction(Direction::Recv)
.skip(MessageType::MsgTransferLeader)
.when(Arc::new(AtomicBool::new(true)))
.reserve_dropped(Arc::clone(&dropped_msgs)),
);
cluster
.sim
.wl()
.add_recv_filter(old_leader.get_id(), filter);
let resp1 = read_on_old_leader!();
cluster.must_transfer_leader(r1.get_id(), new_peer(3, 3));
let resp2 = read_on_old_leader!();
// Unpark all pending messages and clear all filters.
let router = cluster.sim.wl().get_router(old_leader.get_id()).unwrap();
let mut reserved_msgs = Vec::new();
'LOOP: loop {
for raft_msg in mem::replace(dropped_msgs.lock().unwrap().as_mut(), vec![]) {
let msg_type = raft_msg.get_message().get_msg_type();
if msg_type == MessageType::MsgHeartbeatResponse || msg_type == MessageType::MsgAppend {
reserved_msgs.push(raft_msg);
if msg_type == MessageType::MsgAppend {
break 'LOOP;
}
}
}
}
// Resume reserved messages in one batch to make sure the old leader can get read and role
// change in one `Ready`.
fail::cfg("pause_on_peer_collect_message", "pause").unwrap();
for raft_msg in reserved_msgs {
router.send_raft_message(raft_msg).unwrap();
}
fail::cfg("pause_on_peer_collect_message", "off").unwrap();
cluster.sim.wl().clear_recv_filters(old_leader.get_id());
let resp1 = resp1.recv().unwrap();
assert!(resp1.get_header().get_error().has_stale_command());
// Response 2 should contains an error.
let resp2 = resp2.recv().unwrap();
assert!(resp2.get_header().get_error().has_stale_command());
drop(cluster);
fail::remove("pause_on_peer_collect_message");
}
#[test]
fn test_read_after_peer_destroyed() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
// Disable default max peer number check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Add 2 peers.
for i in 2..4 {
pd_client.must_add_peer(r1, new_peer(i, i));
}
// Make sure peer 1 leads the region.
cluster.must_transfer_leader(r1, new_peer(1, 1));
let (key, value) = (b"k1", b"v1");
cluster.must_put(key, value);
assert_eq!(cluster.get(key), Some(value.to_vec()));
let destroy_peer_fp = "destroy_peer";
fail::cfg(destroy_peer_fp, "pause").unwrap();
pd_client.must_remove_peer(r1, new_peer(1, 1));
sleep_ms(300);
// Try writing k2 to peer3
let mut request = new_request(
r1,
cluster.pd_client.get_region_epoch(r1),
vec![new_get_cmd(b"k1")],
false,
);
request.mut_header().set_peer(new_peer(1, 1));
let (cb, rx) = make_cb(&request);
cluster
.sim
.rl()
.async_command_on_node(1, request, cb)
.unwrap();
// Wait for raftstore receives the read request.
sleep_ms(200);
fail::remove(destroy_peer_fp);
let resp = rx.recv_timeout(Duration::from_millis(200)).unwrap();
assert!(
resp.get_header().get_error().has_region_not_found(),
"{:?}",
resp
);
}
/// In previous implementation, we suspect the leader lease at the position of `leader_commit_prepare_merge`
/// failpoint when `PrepareMerge` log is committed, which is too late to prevent stale read.
#[test]
fn test_stale_read_during_merging_2() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
pd_client.disable_default_operator();
configure_for_merge(&mut cluster);
configure_for_lease_read(&mut cluster, Some(50), Some(20));
cluster.run();
for i in 0..10 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v");
}
let region = pd_client.get_region(b"k1").unwrap();
cluster.must_split(®ion, b"k2");
let left = pd_client.get_region(b"k1").unwrap();
let right = pd_client.get_region(b"k2").unwrap();
let left_peer_1 = find_peer(&left, 1).unwrap().to_owned();
cluster.must_transfer_leader(left.get_id(), left_peer_1.clone());
let right_peer_3 = find_peer(&right, 3).unwrap().to_owned();
cluster.must_transfer_leader(right.get_id(), right_peer_3);
let leader_commit_prepare_merge_fp = "leader_commit_prepare_merge";
fail::cfg(leader_commit_prepare_merge_fp, "pause").unwrap();
pd_client.must_merge(left.get_id(), right.get_id());
cluster.must_put(b"k1", b"v1");
let value = read_on_peer(
&mut cluster,
left_peer_1,
left,
b"k1",
false,
Duration::from_millis(200),
);
// The leader lease must be suspected so the local read is forbidden.
// The result should be Error::Timeout because the leader is paused at
// the position of `leader_commit_prepare_merge` failpoint.
// In previous implementation, the result is ok and the value is "v"
// but the right answer is "v1".
value.unwrap_err();
fail::remove(leader_commit_prepare_merge_fp);
}
| { key2 } | conditional_block |
test_stale_read.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::atomic::*;
use std::sync::{mpsc, Arc, Mutex};
use std::time::Duration;
use std::{mem, thread};
use kvproto::metapb::{Peer, Region};
use raft::eraftpb::MessageType;
use pd_client::PdClient;
use raftstore::store::Callback;
use test_raftstore::*;
use tikv_util::config::*;
use tikv_util::HandyRwLock;
fn stale_read_during_splitting(right_derive: bool) {
let count = 3;
let mut cluster = new_node_cluster(0, count);
cluster.cfg.raft_store.right_derive_when_split = right_derive;
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.run();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
// Get the first region.
let region_left = cluster.get_region(key1);
let region_right = cluster.get_region(key2);
assert_eq!(region_left, region_right); | .get_peers()
.iter()
.find(|p| p.get_id() == 3)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), peer3.clone());
// Get the current leader.
let leader1 = peer3;
// Pause the apply worker of peer 3.
let apply_split = "apply_before_split_1_3";
fail::cfg(apply_split, "pause").unwrap();
// Split the first region.
cluster.split_region(®ion1, key2, Callback::write(Box::new(move |_| {})));
// Sleep for a while.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 2 X L F
// Note: A has the peer 3,
// L: leader, F: follower, X: peer is not ready.
thread::sleep(election_timeout);
// A key that is covered by the old region and the new region.
let stale_key = if right_derive { key1 } else { key2 };
// Get the new region.
let region2 = cluster.get_region_with(stale_key, |region| region != ®ion1);
// Get the leader of the new region.
let leader2 = cluster.leader_of_region(region2.get_id()).unwrap();
assert_ne!(leader1.get_store_id(), leader2.get_store_id());
must_not_stale_read(
&mut cluster,
stale_key,
®ion1,
&leader1,
®ion2,
&leader2,
apply_split,
);
}
fn must_not_stale_read(
cluster: &mut Cluster<NodeCluster>,
stale_key: &[u8],
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
fp: &str,
) {
// A new value for stale_key.
let v3 = b"v3";
let mut request = new_request(
new_region.get_id(),
new_region.get_region_epoch().clone(),
vec![new_put_cf_cmd("default", stale_key, v3)],
false,
);
request.mut_header().set_peer(new_leader.clone());
cluster
.call_command_on_node(new_leader.get_store_id(), request, Duration::from_secs(5))
.unwrap();
// LocalRead.
let read_quorum = false;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// ReadIndex.
let read_quorum = true;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// Leaders can always propose read index despite split/merge.
let propose_readindex = "before_propose_readindex";
fail::cfg(propose_readindex, "return(true)").unwrap();
// Can not execute reads that are queued.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
value1.unwrap_err(); // Error::Timeout
// Remove the fp.
fail::remove(fp);
// It should read an error instead of timeout.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(5),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
assert!(value1.unwrap().get_header().has_error());
// Clean up.
fail::remove(propose_readindex);
}
fn must_not_eq_on_key(
cluster: &mut Cluster<NodeCluster>,
key: &[u8],
value: &[u8],
read_quorum: bool,
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
) {
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
let value2 = read_on_peer(
cluster,
new_leader.clone(),
new_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?} vs {:?}", key, value1, value2);
assert_eq!(must_get_value(value2.as_ref().unwrap()).as_slice(), value);
// The old leader should return an error.
assert!(
value1.as_ref().unwrap().get_header().has_error(),
"{:?}",
value1
);
}
#[test]
fn test_node_stale_read_during_splitting_left_derive() {
stale_read_during_splitting(false);
}
#[test]
fn test_node_stale_read_during_splitting_right_derive() {
stale_read_during_splitting(true);
}
#[test]
fn test_stale_read_during_merging() {
let count = 3;
let mut cluster = new_node_cluster(0, count);
configure_for_merge(&mut cluster);
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.cfg.raft_store.right_derive_when_split = false;
cluster.cfg.raft_store.pd_heartbeat_tick_interval =
cluster.cfg.raft_store.raft_base_tick_interval;
debug!("max leader lease: {:?}", election_timeout);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
cluster.run_conf_change();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
let region = pd_client.get_region(b"k1").unwrap();
pd_client.must_add_peer(region.get_id(), new_peer(2, 4));
pd_client.must_add_peer(region.get_id(), new_peer(3, 5));
cluster.must_split(®ion, b"k2");
let mut region1 = cluster.get_region(key1);
let mut region1000 = cluster.get_region(key2);
assert_ne!(region1, region1000);
assert_eq!(region1.get_id(), 1); // requires disable right_derive.
let leader1 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 4)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), leader1.clone());
let leader1000 = region1000
.get_peers()
.iter()
.find(|p| p.get_store_id() != leader1.get_store_id())
.unwrap()
.clone();
cluster.must_transfer_leader(region1000.get_id(), leader1000.clone());
assert_ne!(leader1.get_store_id(), leader1000.get_store_id());
// Sleeps an election timeout. The new leader needs enough time to gather
// all followers progress, in cause the merge request is reject by the
// log gap too large (min_progress == 0).
thread::sleep(election_timeout);
// merge into
// region1000 ------------> region1
cluster.must_try_merge(region1000.get_id(), region1.get_id());
// Pause the apply workers except for the peer 4.
let apply_commit_merge = "apply_before_commit_merge_except_1_4";
fail::cfg(apply_commit_merge, "pause").unwrap();
// Wait for commit merge.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 1000 F L F
// after wait
// Region 1 L F F
// Region 1000 X L F
// Note: L: leader, F: follower, X: peer is not exist.
// TODO: what if cluster runs slow and lease is expired.
// Epoch changed by prepare merge.
// We can not use `get_region_with` to get the latest info of reigon 1000,
// because leader1 is not paused, it executes commit merge very fast
// and reports pd, its range covers region1000.
//
// region1000 does prepare merge, it increases ver and conf_ver by 1.
debug!("before merge: {:?} | {:?}", region1000, region1);
let region1000_version = region1000.get_region_epoch().get_version() + 1;
region1000
.mut_region_epoch()
.set_version(region1000_version);
let region1000_conf_version = region1000.get_region_epoch().get_conf_ver() + 1;
region1000
.mut_region_epoch()
.set_conf_ver(region1000_conf_version);
// Epoch changed by commit merge.
region1 = cluster.get_region_with(key1, |region| region != ®ion1);
debug!("after merge: {:?} | {:?}", region1000, region1);
// A key that is covered by region 1000 and region 1.
let stale_key = key2;
must_not_stale_read(
&mut cluster,
stale_key,
®ion1000,
&leader1000,
®ion1,
&leader1,
apply_commit_merge,
);
}
#[test]
fn test_read_index_when_transfer_leader_2() {
let mut cluster = new_node_cluster(0, 3);
// Increase the election tick to make this test case running reliably.
configure_for_lease_read(&mut cluster, Some(50), Some(10_000));
// Stop log compaction to transfer leader with filter easier.
configure_for_request_snapshot(&mut cluster);
let max_lease = Duration::from_secs(2);
cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration(max_lease);
// Add peer 2 and 3 and wait them to apply it.
cluster.pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k0", b"v0");
cluster.pd_client.must_add_peer(r1, new_peer(2, 2));
cluster.pd_client.must_add_peer(r1, new_peer(3, 3));
must_get_equal(&cluster.get_engine(2), b"k0", b"v0");
must_get_equal(&cluster.get_engine(3), b"k0", b"v0");
// Put and test again to ensure that peer 3 get the latest writes by message append
// instead of snapshot, so that transfer leader to peer 3 can 100% success.
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
let r1 = cluster.get_region(b"k1");
let old_leader = cluster.leader_of_region(r1.get_id()).unwrap();
// Use a macro instead of a closure to avoid any capture of local variables.
macro_rules! read_on_old_leader {
() => {{
let (tx, rx) = mpsc::sync_channel(1);
let mut read_request = new_request(
r1.get_id(),
r1.get_region_epoch().clone(),
vec![new_get_cmd(b"k1")],
true, // read quorum
);
read_request.mut_header().set_peer(new_peer(1, 1));
let sim = cluster.sim.wl();
sim.async_command_on_node(
old_leader.get_id(),
read_request,
Callback::Read(Box::new(move |resp| tx.send(resp.response).unwrap())),
)
.unwrap();
rx
}};
}
// Delay all raft messages to peer 1.
let dropped_msgs = Arc::new(Mutex::new(Vec::new()));
let filter = Box::new(
RegionPacketFilter::new(r1.get_id(), old_leader.get_store_id())
.direction(Direction::Recv)
.skip(MessageType::MsgTransferLeader)
.when(Arc::new(AtomicBool::new(true)))
.reserve_dropped(Arc::clone(&dropped_msgs)),
);
cluster
.sim
.wl()
.add_recv_filter(old_leader.get_id(), filter);
let resp1 = read_on_old_leader!();
cluster.must_transfer_leader(r1.get_id(), new_peer(3, 3));
let resp2 = read_on_old_leader!();
// Unpark all pending messages and clear all filters.
let router = cluster.sim.wl().get_router(old_leader.get_id()).unwrap();
let mut reserved_msgs = Vec::new();
'LOOP: loop {
for raft_msg in mem::replace(dropped_msgs.lock().unwrap().as_mut(), vec![]) {
let msg_type = raft_msg.get_message().get_msg_type();
if msg_type == MessageType::MsgHeartbeatResponse || msg_type == MessageType::MsgAppend {
reserved_msgs.push(raft_msg);
if msg_type == MessageType::MsgAppend {
break 'LOOP;
}
}
}
}
// Resume reserved messages in one batch to make sure the old leader can get read and role
// change in one `Ready`.
fail::cfg("pause_on_peer_collect_message", "pause").unwrap();
for raft_msg in reserved_msgs {
router.send_raft_message(raft_msg).unwrap();
}
fail::cfg("pause_on_peer_collect_message", "off").unwrap();
cluster.sim.wl().clear_recv_filters(old_leader.get_id());
let resp1 = resp1.recv().unwrap();
assert!(resp1.get_header().get_error().has_stale_command());
// Response 2 should contains an error.
let resp2 = resp2.recv().unwrap();
assert!(resp2.get_header().get_error().has_stale_command());
drop(cluster);
fail::remove("pause_on_peer_collect_message");
}
#[test]
fn test_read_after_peer_destroyed() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
// Disable default max peer number check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Add 2 peers.
for i in 2..4 {
pd_client.must_add_peer(r1, new_peer(i, i));
}
// Make sure peer 1 leads the region.
cluster.must_transfer_leader(r1, new_peer(1, 1));
let (key, value) = (b"k1", b"v1");
cluster.must_put(key, value);
assert_eq!(cluster.get(key), Some(value.to_vec()));
let destroy_peer_fp = "destroy_peer";
fail::cfg(destroy_peer_fp, "pause").unwrap();
pd_client.must_remove_peer(r1, new_peer(1, 1));
sleep_ms(300);
// Try writing k2 to peer3
let mut request = new_request(
r1,
cluster.pd_client.get_region_epoch(r1),
vec![new_get_cmd(b"k1")],
false,
);
request.mut_header().set_peer(new_peer(1, 1));
let (cb, rx) = make_cb(&request);
cluster
.sim
.rl()
.async_command_on_node(1, request, cb)
.unwrap();
// Wait for raftstore receives the read request.
sleep_ms(200);
fail::remove(destroy_peer_fp);
let resp = rx.recv_timeout(Duration::from_millis(200)).unwrap();
assert!(
resp.get_header().get_error().has_region_not_found(),
"{:?}",
resp
);
}
/// In previous implementation, we suspect the leader lease at the position of `leader_commit_prepare_merge`
/// failpoint when `PrepareMerge` log is committed, which is too late to prevent stale read.
#[test]
fn test_stale_read_during_merging_2() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
pd_client.disable_default_operator();
configure_for_merge(&mut cluster);
configure_for_lease_read(&mut cluster, Some(50), Some(20));
cluster.run();
for i in 0..10 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v");
}
let region = pd_client.get_region(b"k1").unwrap();
cluster.must_split(®ion, b"k2");
let left = pd_client.get_region(b"k1").unwrap();
let right = pd_client.get_region(b"k2").unwrap();
let left_peer_1 = find_peer(&left, 1).unwrap().to_owned();
cluster.must_transfer_leader(left.get_id(), left_peer_1.clone());
let right_peer_3 = find_peer(&right, 3).unwrap().to_owned();
cluster.must_transfer_leader(right.get_id(), right_peer_3);
let leader_commit_prepare_merge_fp = "leader_commit_prepare_merge";
fail::cfg(leader_commit_prepare_merge_fp, "pause").unwrap();
pd_client.must_merge(left.get_id(), right.get_id());
cluster.must_put(b"k1", b"v1");
let value = read_on_peer(
&mut cluster,
left_peer_1,
left,
b"k1",
false,
Duration::from_millis(200),
);
// The leader lease must be suspected so the local read is forbidden.
// The result should be Error::Timeout because the leader is paused at
// the position of `leader_commit_prepare_merge` failpoint.
// In previous implementation, the result is ok and the value is "v"
// but the right answer is "v1".
value.unwrap_err();
fail::remove(leader_commit_prepare_merge_fp);
} | let region1 = region_left;
assert_eq!(region1.get_id(), 1);
let peer3 = region1 | random_line_split |
test_stale_read.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::atomic::*;
use std::sync::{mpsc, Arc, Mutex};
use std::time::Duration;
use std::{mem, thread};
use kvproto::metapb::{Peer, Region};
use raft::eraftpb::MessageType;
use pd_client::PdClient;
use raftstore::store::Callback;
use test_raftstore::*;
use tikv_util::config::*;
use tikv_util::HandyRwLock;
fn stale_read_during_splitting(right_derive: bool) {
let count = 3;
let mut cluster = new_node_cluster(0, count);
cluster.cfg.raft_store.right_derive_when_split = right_derive;
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.run();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
// Get the first region.
let region_left = cluster.get_region(key1);
let region_right = cluster.get_region(key2);
assert_eq!(region_left, region_right);
let region1 = region_left;
assert_eq!(region1.get_id(), 1);
let peer3 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 3)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), peer3.clone());
// Get the current leader.
let leader1 = peer3;
// Pause the apply worker of peer 3.
let apply_split = "apply_before_split_1_3";
fail::cfg(apply_split, "pause").unwrap();
// Split the first region.
cluster.split_region(®ion1, key2, Callback::write(Box::new(move |_| {})));
// Sleep for a while.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 2 X L F
// Note: A has the peer 3,
// L: leader, F: follower, X: peer is not ready.
thread::sleep(election_timeout);
// A key that is covered by the old region and the new region.
let stale_key = if right_derive { key1 } else { key2 };
// Get the new region.
let region2 = cluster.get_region_with(stale_key, |region| region != ®ion1);
// Get the leader of the new region.
let leader2 = cluster.leader_of_region(region2.get_id()).unwrap();
assert_ne!(leader1.get_store_id(), leader2.get_store_id());
must_not_stale_read(
&mut cluster,
stale_key,
®ion1,
&leader1,
®ion2,
&leader2,
apply_split,
);
}
fn must_not_stale_read(
cluster: &mut Cluster<NodeCluster>,
stale_key: &[u8],
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
fp: &str,
) {
// A new value for stale_key.
let v3 = b"v3";
let mut request = new_request(
new_region.get_id(),
new_region.get_region_epoch().clone(),
vec![new_put_cf_cmd("default", stale_key, v3)],
false,
);
request.mut_header().set_peer(new_leader.clone());
cluster
.call_command_on_node(new_leader.get_store_id(), request, Duration::from_secs(5))
.unwrap();
// LocalRead.
let read_quorum = false;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// ReadIndex.
let read_quorum = true;
must_not_eq_on_key(
cluster,
stale_key,
v3,
read_quorum,
old_region,
old_leader,
new_region,
new_leader,
);
// Leaders can always propose read index despite split/merge.
let propose_readindex = "before_propose_readindex";
fail::cfg(propose_readindex, "return(true)").unwrap();
// Can not execute reads that are queued.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
value1.unwrap_err(); // Error::Timeout
// Remove the fp.
fail::remove(fp);
// It should read an error instead of timeout.
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
stale_key,
read_quorum,
Duration::from_secs(5),
);
debug!("stale_key: {:?}, {:?}", stale_key, value1);
assert!(value1.unwrap().get_header().has_error());
// Clean up.
fail::remove(propose_readindex);
}
fn must_not_eq_on_key(
cluster: &mut Cluster<NodeCluster>,
key: &[u8],
value: &[u8],
read_quorum: bool,
old_region: &Region,
old_leader: &Peer,
new_region: &Region,
new_leader: &Peer,
) {
let value1 = read_on_peer(
cluster,
old_leader.clone(),
old_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
let value2 = read_on_peer(
cluster,
new_leader.clone(),
new_region.clone(),
key,
read_quorum,
Duration::from_secs(1),
);
debug!("stale_key: {:?}, {:?} vs {:?}", key, value1, value2);
assert_eq!(must_get_value(value2.as_ref().unwrap()).as_slice(), value);
// The old leader should return an error.
assert!(
value1.as_ref().unwrap().get_header().has_error(),
"{:?}",
value1
);
}
#[test]
fn test_node_stale_read_during_splitting_left_derive() {
stale_read_during_splitting(false);
}
#[test]
fn test_node_stale_read_during_splitting_right_derive() {
stale_read_during_splitting(true);
}
#[test]
fn test_stale_read_during_merging() {
let count = 3;
let mut cluster = new_node_cluster(0, count);
configure_for_merge(&mut cluster);
let election_timeout = configure_for_lease_read(&mut cluster, None, None);
cluster.cfg.raft_store.right_derive_when_split = false;
cluster.cfg.raft_store.pd_heartbeat_tick_interval =
cluster.cfg.raft_store.raft_base_tick_interval;
debug!("max leader lease: {:?}", election_timeout);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
cluster.run_conf_change();
// Write the initial values.
let key1 = b"k1";
let v1 = b"v1";
cluster.must_put(key1, v1);
let key2 = b"k2";
let v2 = b"v2";
cluster.must_put(key2, v2);
let region = pd_client.get_region(b"k1").unwrap();
pd_client.must_add_peer(region.get_id(), new_peer(2, 4));
pd_client.must_add_peer(region.get_id(), new_peer(3, 5));
cluster.must_split(®ion, b"k2");
let mut region1 = cluster.get_region(key1);
let mut region1000 = cluster.get_region(key2);
assert_ne!(region1, region1000);
assert_eq!(region1.get_id(), 1); // requires disable right_derive.
let leader1 = region1
.get_peers()
.iter()
.find(|p| p.get_id() == 4)
.unwrap()
.clone();
cluster.must_transfer_leader(region1.get_id(), leader1.clone());
let leader1000 = region1000
.get_peers()
.iter()
.find(|p| p.get_store_id() != leader1.get_store_id())
.unwrap()
.clone();
cluster.must_transfer_leader(region1000.get_id(), leader1000.clone());
assert_ne!(leader1.get_store_id(), leader1000.get_store_id());
// Sleeps an election timeout. The new leader needs enough time to gather
// all followers progress, in cause the merge request is reject by the
// log gap too large (min_progress == 0).
thread::sleep(election_timeout);
// merge into
// region1000 ------------> region1
cluster.must_try_merge(region1000.get_id(), region1.get_id());
// Pause the apply workers except for the peer 4.
let apply_commit_merge = "apply_before_commit_merge_except_1_4";
fail::cfg(apply_commit_merge, "pause").unwrap();
// Wait for commit merge.
// The TiKVs that have followers of the old region will elected a leader
// of the new region.
// TiKV A TiKV B TiKV C
// Region 1 L F F
// Region 1000 F L F
// after wait
// Region 1 L F F
// Region 1000 X L F
// Note: L: leader, F: follower, X: peer is not exist.
// TODO: what if cluster runs slow and lease is expired.
// Epoch changed by prepare merge.
// We can not use `get_region_with` to get the latest info of reigon 1000,
// because leader1 is not paused, it executes commit merge very fast
// and reports pd, its range covers region1000.
//
// region1000 does prepare merge, it increases ver and conf_ver by 1.
debug!("before merge: {:?} | {:?}", region1000, region1);
let region1000_version = region1000.get_region_epoch().get_version() + 1;
region1000
.mut_region_epoch()
.set_version(region1000_version);
let region1000_conf_version = region1000.get_region_epoch().get_conf_ver() + 1;
region1000
.mut_region_epoch()
.set_conf_ver(region1000_conf_version);
// Epoch changed by commit merge.
region1 = cluster.get_region_with(key1, |region| region != ®ion1);
debug!("after merge: {:?} | {:?}", region1000, region1);
// A key that is covered by region 1000 and region 1.
let stale_key = key2;
must_not_stale_read(
&mut cluster,
stale_key,
®ion1000,
&leader1000,
®ion1,
&leader1,
apply_commit_merge,
);
}
#[test]
fn test_read_index_when_transfer_leader_2() {
let mut cluster = new_node_cluster(0, 3);
// Increase the election tick to make this test case running reliably.
configure_for_lease_read(&mut cluster, Some(50), Some(10_000));
// Stop log compaction to transfer leader with filter easier.
configure_for_request_snapshot(&mut cluster);
let max_lease = Duration::from_secs(2);
cluster.cfg.raft_store.raft_store_max_leader_lease = ReadableDuration(max_lease);
// Add peer 2 and 3 and wait them to apply it.
cluster.pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k0", b"v0");
cluster.pd_client.must_add_peer(r1, new_peer(2, 2));
cluster.pd_client.must_add_peer(r1, new_peer(3, 3));
must_get_equal(&cluster.get_engine(2), b"k0", b"v0");
must_get_equal(&cluster.get_engine(3), b"k0", b"v0");
// Put and test again to ensure that peer 3 get the latest writes by message append
// instead of snapshot, so that transfer leader to peer 3 can 100% success.
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
let r1 = cluster.get_region(b"k1");
let old_leader = cluster.leader_of_region(r1.get_id()).unwrap();
// Use a macro instead of a closure to avoid any capture of local variables.
macro_rules! read_on_old_leader {
() => {{
let (tx, rx) = mpsc::sync_channel(1);
let mut read_request = new_request(
r1.get_id(),
r1.get_region_epoch().clone(),
vec![new_get_cmd(b"k1")],
true, // read quorum
);
read_request.mut_header().set_peer(new_peer(1, 1));
let sim = cluster.sim.wl();
sim.async_command_on_node(
old_leader.get_id(),
read_request,
Callback::Read(Box::new(move |resp| tx.send(resp.response).unwrap())),
)
.unwrap();
rx
}};
}
// Delay all raft messages to peer 1.
let dropped_msgs = Arc::new(Mutex::new(Vec::new()));
let filter = Box::new(
RegionPacketFilter::new(r1.get_id(), old_leader.get_store_id())
.direction(Direction::Recv)
.skip(MessageType::MsgTransferLeader)
.when(Arc::new(AtomicBool::new(true)))
.reserve_dropped(Arc::clone(&dropped_msgs)),
);
cluster
.sim
.wl()
.add_recv_filter(old_leader.get_id(), filter);
let resp1 = read_on_old_leader!();
cluster.must_transfer_leader(r1.get_id(), new_peer(3, 3));
let resp2 = read_on_old_leader!();
// Unpark all pending messages and clear all filters.
let router = cluster.sim.wl().get_router(old_leader.get_id()).unwrap();
let mut reserved_msgs = Vec::new();
'LOOP: loop {
for raft_msg in mem::replace(dropped_msgs.lock().unwrap().as_mut(), vec![]) {
let msg_type = raft_msg.get_message().get_msg_type();
if msg_type == MessageType::MsgHeartbeatResponse || msg_type == MessageType::MsgAppend {
reserved_msgs.push(raft_msg);
if msg_type == MessageType::MsgAppend {
break 'LOOP;
}
}
}
}
// Resume reserved messages in one batch to make sure the old leader can get read and role
// change in one `Ready`.
fail::cfg("pause_on_peer_collect_message", "pause").unwrap();
for raft_msg in reserved_msgs {
router.send_raft_message(raft_msg).unwrap();
}
fail::cfg("pause_on_peer_collect_message", "off").unwrap();
cluster.sim.wl().clear_recv_filters(old_leader.get_id());
let resp1 = resp1.recv().unwrap();
assert!(resp1.get_header().get_error().has_stale_command());
// Response 2 should contains an error.
let resp2 = resp2.recv().unwrap();
assert!(resp2.get_header().get_error().has_stale_command());
drop(cluster);
fail::remove("pause_on_peer_collect_message");
}
#[test]
fn test_read_after_peer_destroyed() |
/// In previous implementation, we suspect the leader lease at the position of `leader_commit_prepare_merge`
/// failpoint when `PrepareMerge` log is committed, which is too late to prevent stale read.
#[test]
fn test_stale_read_during_merging_2() {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
pd_client.disable_default_operator();
configure_for_merge(&mut cluster);
configure_for_lease_read(&mut cluster, Some(50), Some(20));
cluster.run();
for i in 0..10 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v");
}
let region = pd_client.get_region(b"k1").unwrap();
cluster.must_split(®ion, b"k2");
let left = pd_client.get_region(b"k1").unwrap();
let right = pd_client.get_region(b"k2").unwrap();
let left_peer_1 = find_peer(&left, 1).unwrap().to_owned();
cluster.must_transfer_leader(left.get_id(), left_peer_1.clone());
let right_peer_3 = find_peer(&right, 3).unwrap().to_owned();
cluster.must_transfer_leader(right.get_id(), right_peer_3);
let leader_commit_prepare_merge_fp = "leader_commit_prepare_merge";
fail::cfg(leader_commit_prepare_merge_fp, "pause").unwrap();
pd_client.must_merge(left.get_id(), right.get_id());
cluster.must_put(b"k1", b"v1");
let value = read_on_peer(
&mut cluster,
left_peer_1,
left,
b"k1",
false,
Duration::from_millis(200),
);
// The leader lease must be suspected so the local read is forbidden.
// The result should be Error::Timeout because the leader is paused at
// the position of `leader_commit_prepare_merge` failpoint.
// In previous implementation, the result is ok and the value is "v"
// but the right answer is "v1".
value.unwrap_err();
fail::remove(leader_commit_prepare_merge_fp);
}
| {
let mut cluster = new_node_cluster(0, 3);
let pd_client = cluster.pd_client.clone();
// Disable default max peer number check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Add 2 peers.
for i in 2..4 {
pd_client.must_add_peer(r1, new_peer(i, i));
}
// Make sure peer 1 leads the region.
cluster.must_transfer_leader(r1, new_peer(1, 1));
let (key, value) = (b"k1", b"v1");
cluster.must_put(key, value);
assert_eq!(cluster.get(key), Some(value.to_vec()));
let destroy_peer_fp = "destroy_peer";
fail::cfg(destroy_peer_fp, "pause").unwrap();
pd_client.must_remove_peer(r1, new_peer(1, 1));
sleep_ms(300);
// Try writing k2 to peer3
let mut request = new_request(
r1,
cluster.pd_client.get_region_epoch(r1),
vec![new_get_cmd(b"k1")],
false,
);
request.mut_header().set_peer(new_peer(1, 1));
let (cb, rx) = make_cb(&request);
cluster
.sim
.rl()
.async_command_on_node(1, request, cb)
.unwrap();
// Wait for raftstore receives the read request.
sleep_ms(200);
fail::remove(destroy_peer_fp);
let resp = rx.recv_timeout(Duration::from_millis(200)).unwrap();
assert!(
resp.get_header().get_error().has_region_not_found(),
"{:?}",
resp
);
} | identifier_body |
singleCourse.ts | import { ActivatedRoute, Router } from '@angular/router';
import { Store } from '@ngrx/store';
import { coursesActions } from '../../Store/actions';
export abstract class AbstractCourseDetailed {
public abstract pageType : string;
public currentCourse$ : any;
protected abstract get activatedRoute() : ActivatedRoute;
protected abstract get router() : Router;
protected abstract get store(): Store<any>
ngOnInit() {
let sub = this.activatedRoute.params.subscribe(({id}) => {
if(id) {
this.store.dispatch(new coursesActions.FetchingSingleCourse({id}))
} else {
this.store.dispatch(new coursesActions.EmptyCourse())
}
});
this.store.select('currentCourse').subscribe(course => {
this.currentCourse$ = course;
});
}
get prettyCourseDuration(): string {
const {duration} = this.currentCourse$,
hours = Math.floor(duration / 60),
minutes = duration % 60; |
return duration ? `${hours || ''} ${hours ? 'hour' : ''}${hours > 1 ? 's' : ''} ${minutes || '0'} min.` : 'not specified';
}
} | random_line_split |
|
singleCourse.ts | import { ActivatedRoute, Router } from '@angular/router';
import { Store } from '@ngrx/store';
import { coursesActions } from '../../Store/actions';
export abstract class AbstractCourseDetailed {
public abstract pageType : string;
public currentCourse$ : any;
protected abstract get activatedRoute() : ActivatedRoute;
protected abstract get router() : Router;
protected abstract get store(): Store<any>
| () {
let sub = this.activatedRoute.params.subscribe(({id}) => {
if(id) {
this.store.dispatch(new coursesActions.FetchingSingleCourse({id}))
} else {
this.store.dispatch(new coursesActions.EmptyCourse())
}
});
this.store.select('currentCourse').subscribe(course => {
this.currentCourse$ = course;
});
}
get prettyCourseDuration(): string {
const {duration} = this.currentCourse$,
hours = Math.floor(duration / 60),
minutes = duration % 60;
return duration ? `${hours || ''} ${hours ? 'hour' : ''}${hours > 1 ? 's' : ''} ${minutes || '0'} min.` : 'not specified';
}
}
| ngOnInit | identifier_name |
singleCourse.ts | import { ActivatedRoute, Router } from '@angular/router';
import { Store } from '@ngrx/store';
import { coursesActions } from '../../Store/actions';
export abstract class AbstractCourseDetailed {
public abstract pageType : string;
public currentCourse$ : any;
protected abstract get activatedRoute() : ActivatedRoute;
protected abstract get router() : Router;
protected abstract get store(): Store<any>
ngOnInit() {
let sub = this.activatedRoute.params.subscribe(({id}) => {
if(id) | else {
this.store.dispatch(new coursesActions.EmptyCourse())
}
});
this.store.select('currentCourse').subscribe(course => {
this.currentCourse$ = course;
});
}
get prettyCourseDuration(): string {
const {duration} = this.currentCourse$,
hours = Math.floor(duration / 60),
minutes = duration % 60;
return duration ? `${hours || ''} ${hours ? 'hour' : ''}${hours > 1 ? 's' : ''} ${minutes || '0'} min.` : 'not specified';
}
}
| {
this.store.dispatch(new coursesActions.FetchingSingleCourse({id}))
} | conditional_block |
singleCourse.ts | import { ActivatedRoute, Router } from '@angular/router';
import { Store } from '@ngrx/store';
import { coursesActions } from '../../Store/actions';
export abstract class AbstractCourseDetailed {
public abstract pageType : string;
public currentCourse$ : any;
protected abstract get activatedRoute() : ActivatedRoute;
protected abstract get router() : Router;
protected abstract get store(): Store<any>
ngOnInit() |
get prettyCourseDuration(): string {
const {duration} = this.currentCourse$,
hours = Math.floor(duration / 60),
minutes = duration % 60;
return duration ? `${hours || ''} ${hours ? 'hour' : ''}${hours > 1 ? 's' : ''} ${minutes || '0'} min.` : 'not specified';
}
}
| {
let sub = this.activatedRoute.params.subscribe(({id}) => {
if(id) {
this.store.dispatch(new coursesActions.FetchingSingleCourse({id}))
} else {
this.store.dispatch(new coursesActions.EmptyCourse())
}
});
this.store.select('currentCourse').subscribe(course => {
this.currentCourse$ = course;
});
} | identifier_body |
test_del_10gig_hardware.py | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing commands that remove virtual hardware."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDel10GigHardware(TestBrokerCommand):
def test_200_del_hosts(self):
for i in range(0, 8) + range(9, 17):
hostname = "ivirt%d.aqd-unittest.ms.com" % (1 + i)
command = "del_host --hostname %s" % hostname
if i < 9:
net_index = (i % 4) + 2
usable_index = i / 4
else:
net_index = ((i - 9) % 4) + 6
usable_index = (i - 9) / 4
ip = self.net.unknown[net_index].usable[usable_index]
self.dsdb_expect_delete(ip)
(out, err) = self.successtest(command.split(" "))
self.assertEmptyOut(out, command)
self.dsdb_verify()
def test_300_delaux(self):
|
def test_700_delmachines(self):
for i in range(0, 8) + range(9, 17):
machine = "evm%d" % (10 + i)
self.noouttest(["del", "machine", "--machine", machine])
def test_800_verifydelmachines(self):
for i in range(0, 18):
machine = "evm%d" % (10 + i)
command = "show machine --machine %s" % machine
self.notfoundtest(command.split(" "))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDel10GigHardware)
unittest.TextTestRunner(verbosity=2).run(suite)
| for i in range(1, 25):
hostname = "evh%d-e1.aqd-unittest.ms.com" % (i + 50)
self.dsdb_expect_delete(self.net.vm_storage_net[0].usable[i - 1])
command = ["del", "auxiliary", "--auxiliary", hostname]
(out, err) = self.successtest(command)
self.assertEmptyOut(out, command)
self.dsdb_verify() | identifier_body |
test_del_10gig_hardware.py | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing commands that remove virtual hardware."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDel10GigHardware(TestBrokerCommand):
def test_200_del_hosts(self):
for i in range(0, 8) + range(9, 17):
hostname = "ivirt%d.aqd-unittest.ms.com" % (1 + i)
command = "del_host --hostname %s" % hostname
if i < 9:
net_index = (i % 4) + 2
usable_index = i / 4
else:
net_index = ((i - 9) % 4) + 6
usable_index = (i - 9) / 4
ip = self.net.unknown[net_index].usable[usable_index]
self.dsdb_expect_delete(ip)
(out, err) = self.successtest(command.split(" "))
self.assertEmptyOut(out, command)
self.dsdb_verify()
def test_300_delaux(self):
for i in range(1, 25):
hostname = "evh%d-e1.aqd-unittest.ms.com" % (i + 50)
self.dsdb_expect_delete(self.net.vm_storage_net[0].usable[i - 1])
command = ["del", "auxiliary", "--auxiliary", hostname]
(out, err) = self.successtest(command)
self.assertEmptyOut(out, command)
self.dsdb_verify()
def test_700_delmachines(self):
for i in range(0, 8) + range(9, 17):
machine = "evm%d" % (10 + i)
self.noouttest(["del", "machine", "--machine", machine]) | command = "show machine --machine %s" % machine
self.notfoundtest(command.split(" "))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDel10GigHardware)
unittest.TextTestRunner(verbosity=2).run(suite) |
def test_800_verifydelmachines(self):
for i in range(0, 18):
machine = "evm%d" % (10 + i) | random_line_split |
test_del_10gig_hardware.py | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing commands that remove virtual hardware."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDel10GigHardware(TestBrokerCommand):
def test_200_del_hosts(self):
for i in range(0, 8) + range(9, 17):
hostname = "ivirt%d.aqd-unittest.ms.com" % (1 + i)
command = "del_host --hostname %s" % hostname
if i < 9:
|
else:
net_index = ((i - 9) % 4) + 6
usable_index = (i - 9) / 4
ip = self.net.unknown[net_index].usable[usable_index]
self.dsdb_expect_delete(ip)
(out, err) = self.successtest(command.split(" "))
self.assertEmptyOut(out, command)
self.dsdb_verify()
def test_300_delaux(self):
for i in range(1, 25):
hostname = "evh%d-e1.aqd-unittest.ms.com" % (i + 50)
self.dsdb_expect_delete(self.net.vm_storage_net[0].usable[i - 1])
command = ["del", "auxiliary", "--auxiliary", hostname]
(out, err) = self.successtest(command)
self.assertEmptyOut(out, command)
self.dsdb_verify()
def test_700_delmachines(self):
for i in range(0, 8) + range(9, 17):
machine = "evm%d" % (10 + i)
self.noouttest(["del", "machine", "--machine", machine])
def test_800_verifydelmachines(self):
for i in range(0, 18):
machine = "evm%d" % (10 + i)
command = "show machine --machine %s" % machine
self.notfoundtest(command.split(" "))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDel10GigHardware)
unittest.TextTestRunner(verbosity=2).run(suite)
| net_index = (i % 4) + 2
usable_index = i / 4 | conditional_block |
test_del_10gig_hardware.py | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing commands that remove virtual hardware."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class | (TestBrokerCommand):
def test_200_del_hosts(self):
for i in range(0, 8) + range(9, 17):
hostname = "ivirt%d.aqd-unittest.ms.com" % (1 + i)
command = "del_host --hostname %s" % hostname
if i < 9:
net_index = (i % 4) + 2
usable_index = i / 4
else:
net_index = ((i - 9) % 4) + 6
usable_index = (i - 9) / 4
ip = self.net.unknown[net_index].usable[usable_index]
self.dsdb_expect_delete(ip)
(out, err) = self.successtest(command.split(" "))
self.assertEmptyOut(out, command)
self.dsdb_verify()
def test_300_delaux(self):
for i in range(1, 25):
hostname = "evh%d-e1.aqd-unittest.ms.com" % (i + 50)
self.dsdb_expect_delete(self.net.vm_storage_net[0].usable[i - 1])
command = ["del", "auxiliary", "--auxiliary", hostname]
(out, err) = self.successtest(command)
self.assertEmptyOut(out, command)
self.dsdb_verify()
def test_700_delmachines(self):
for i in range(0, 8) + range(9, 17):
machine = "evm%d" % (10 + i)
self.noouttest(["del", "machine", "--machine", machine])
def test_800_verifydelmachines(self):
for i in range(0, 18):
machine = "evm%d" % (10 + i)
command = "show machine --machine %s" % machine
self.notfoundtest(command.split(" "))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDel10GigHardware)
unittest.TextTestRunner(verbosity=2).run(suite)
| TestDel10GigHardware | identifier_name |
schedule.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Cost schedule and other parameterisations for the EVM.
/// Definition of the cost schedule and other parameterisations for the EVM.
pub struct Schedule {
/// Does it support exceptional failed code deposit
pub exceptional_failed_code_deposit: bool,
/// Does it have a delegate cal
pub have_delegate_call: bool,
/// VM stack limit
pub stack_limit: usize,
/// Max number of nested calls/creates
pub max_depth: usize,
/// Gas prices for instructions in all tiers
pub tier_step_gas: [usize; 8],
/// Gas price for `EXP` opcode
pub exp_gas: usize,
/// Additional gas for `EXP` opcode for each byte of exponent
pub exp_byte_gas: usize,
/// Gas price for `SHA3` opcode
pub sha3_gas: usize,
/// Additional gas for `SHA3` opcode for each word of hashed memory
pub sha3_word_gas: usize,
/// Gas price for loading from storage
pub sload_gas: usize,
/// Gas price for setting new value to storage (`storage==0`, `new!=0`)
pub sstore_set_gas: usize,
/// Gas price for altering value in storage
pub sstore_reset_gas: usize,
/// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`)
pub sstore_refund_gas: usize,
/// Gas price for `JUMPDEST` opcode
pub jumpdest_gas: usize,
/// Gas price for `LOG*`
pub log_gas: usize,
/// Additional gas for data in `LOG*`
pub log_data_gas: usize,
/// Additional gas for each topic in `LOG*`
pub log_topic_gas: usize,
/// Gas price for `CREATE` opcode
pub create_gas: usize,
/// Gas price for `*CALL*` opcodes
pub call_gas: usize,
/// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0`
pub call_stipend: usize,
/// Additional gas required for value transfer (`CALL|CALLCODE`)
pub call_value_transfer_gas: usize,
/// Additional gas for creating new account (`CALL|CALLCODE`)
pub call_new_account_gas: usize,
/// Refund for SUICIDE
pub suicide_refund_gas: usize,
/// Gas for used memory
pub memory_gas: usize,
/// Coefficient used to convert memory size to gas price for memory
pub quad_coeff_div: usize,
/// Cost for contract length when executing `CREATE`
pub create_data_gas: usize,
/// Maximum code size when creating a contract.
pub create_data_limit: usize,
/// Transaction cost
pub tx_gas: usize,
/// `CREATE` transaction cost
pub tx_create_gas: usize,
/// Additional cost for empty data transaction
pub tx_data_zero_gas: usize,
/// Aditional cost for non-empty data transaction
pub tx_data_non_zero_gas: usize,
/// Gas price for copying memory
pub copy_gas: usize,
/// Price of EXTCODESIZE
pub extcodesize_gas: usize,
/// Base price of EXTCODECOPY
pub extcodecopy_base_gas: usize,
/// Price of BALANCE
pub balance_gas: usize,
/// Price of SUICIDE
pub suicide_gas: usize,
/// Amount of additional gas to pay when SUICIDE credits a non-existant account
pub suicide_to_new_account_cost: usize,
/// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit.
/// If None: let CALL's gas = (requested > GAS ? [OOG] : GAS). let CREATE's gas = GAS
pub sub_gas_cap_divisor: Option<usize>,
/// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value.
pub no_empty: bool,
/// Kill empty accounts if touched.
pub kill_empty: bool,
}
impl Schedule {
/// Schedule for the Frontier-era of the Ethereum main net.
pub fn new_frontier() -> Schedule {
Self::new(false, false, 21000)
}
/// Schedule for the Homestead-era of the Ethereum main net.
pub fn new_homestead() -> Schedule {
Self::new(true, true, 53000)
}
/// Schedule for the post-EIP-150-era of the Ethereum main net.
pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule {
Schedule {
exceptional_failed_code_deposit: true,
have_delegate_call: true,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: if fix_exp {50} else {10},
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 200,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 700,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: max_code_size,
tx_gas: 21000,
tx_create_gas: 53000,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 700,
extcodecopy_base_gas: 700,
balance_gas: 400,
suicide_gas: 5000,
suicide_to_new_account_cost: 25000,
sub_gas_cap_divisor: Some(64),
no_empty: no_empty,
kill_empty: kill_empty,
}
}
fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule {
Schedule {
exceptional_failed_code_deposit: efcd,
have_delegate_call: hdc,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: 10,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 50,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 40,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: usize::max_value(),
tx_gas: 21000,
tx_create_gas: tcg,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 20,
extcodecopy_base_gas: 20,
balance_gas: 20,
suicide_gas: 0,
suicide_to_new_account_cost: 0,
sub_gas_cap_divisor: None,
no_empty: false,
kill_empty: false,
}
}
}
#[test]
#[cfg(test)]
fn | () {
let s1 = Schedule::new_frontier();
let s2 = Schedule::new_homestead();
// To optimize division we assume 2**9 for quad_coeff_div
assert_eq!(s1.quad_coeff_div, 512);
assert_eq!(s2.quad_coeff_div, 512);
}
| schedule_evm_assumptions | identifier_name |
schedule.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Cost schedule and other parameterisations for the EVM.
/// Definition of the cost schedule and other parameterisations for the EVM.
pub struct Schedule {
/// Does it support exceptional failed code deposit
pub exceptional_failed_code_deposit: bool,
/// Does it have a delegate cal
pub have_delegate_call: bool,
/// VM stack limit
pub stack_limit: usize,
/// Max number of nested calls/creates
pub max_depth: usize,
/// Gas prices for instructions in all tiers
pub tier_step_gas: [usize; 8],
/// Gas price for `EXP` opcode
pub exp_gas: usize,
/// Additional gas for `EXP` opcode for each byte of exponent
pub exp_byte_gas: usize,
/// Gas price for `SHA3` opcode
pub sha3_gas: usize,
/// Additional gas for `SHA3` opcode for each word of hashed memory
pub sha3_word_gas: usize,
/// Gas price for loading from storage
pub sload_gas: usize,
/// Gas price for setting new value to storage (`storage==0`, `new!=0`)
pub sstore_set_gas: usize,
/// Gas price for altering value in storage
pub sstore_reset_gas: usize,
/// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`)
pub sstore_refund_gas: usize,
/// Gas price for `JUMPDEST` opcode
pub jumpdest_gas: usize,
/// Gas price for `LOG*`
pub log_gas: usize,
/// Additional gas for data in `LOG*`
pub log_data_gas: usize,
/// Additional gas for each topic in `LOG*`
pub log_topic_gas: usize,
/// Gas price for `CREATE` opcode
pub create_gas: usize,
/// Gas price for `*CALL*` opcodes
pub call_gas: usize,
/// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0`
pub call_stipend: usize,
/// Additional gas required for value transfer (`CALL|CALLCODE`)
pub call_value_transfer_gas: usize,
/// Additional gas for creating new account (`CALL|CALLCODE`)
pub call_new_account_gas: usize,
/// Refund for SUICIDE
pub suicide_refund_gas: usize,
/// Gas for used memory
pub memory_gas: usize,
/// Coefficient used to convert memory size to gas price for memory
pub quad_coeff_div: usize,
/// Cost for contract length when executing `CREATE`
pub create_data_gas: usize,
/// Maximum code size when creating a contract.
pub create_data_limit: usize,
/// Transaction cost
pub tx_gas: usize,
/// `CREATE` transaction cost
pub tx_create_gas: usize,
/// Additional cost for empty data transaction
pub tx_data_zero_gas: usize,
/// Aditional cost for non-empty data transaction
pub tx_data_non_zero_gas: usize,
/// Gas price for copying memory
pub copy_gas: usize,
/// Price of EXTCODESIZE
pub extcodesize_gas: usize,
/// Base price of EXTCODECOPY
pub extcodecopy_base_gas: usize,
/// Price of BALANCE
pub balance_gas: usize,
/// Price of SUICIDE
pub suicide_gas: usize,
/// Amount of additional gas to pay when SUICIDE credits a non-existant account
pub suicide_to_new_account_cost: usize,
/// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit.
/// If None: let CALL's gas = (requested > GAS ? [OOG] : GAS). let CREATE's gas = GAS
pub sub_gas_cap_divisor: Option<usize>,
/// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value.
pub no_empty: bool,
/// Kill empty accounts if touched.
pub kill_empty: bool,
}
impl Schedule {
/// Schedule for the Frontier-era of the Ethereum main net.
pub fn new_frontier() -> Schedule {
Self::new(false, false, 21000)
}
/// Schedule for the Homestead-era of the Ethereum main net.
pub fn new_homestead() -> Schedule {
Self::new(true, true, 53000)
}
/// Schedule for the post-EIP-150-era of the Ethereum main net.
pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule {
Schedule {
exceptional_failed_code_deposit: true,
have_delegate_call: true,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: if fix_exp {50} else | ,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 200,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 700,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: max_code_size,
tx_gas: 21000,
tx_create_gas: 53000,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 700,
extcodecopy_base_gas: 700,
balance_gas: 400,
suicide_gas: 5000,
suicide_to_new_account_cost: 25000,
sub_gas_cap_divisor: Some(64),
no_empty: no_empty,
kill_empty: kill_empty,
}
}
fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule {
Schedule {
exceptional_failed_code_deposit: efcd,
have_delegate_call: hdc,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: 10,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 50,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 40,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: usize::max_value(),
tx_gas: 21000,
tx_create_gas: tcg,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 20,
extcodecopy_base_gas: 20,
balance_gas: 20,
suicide_gas: 0,
suicide_to_new_account_cost: 0,
sub_gas_cap_divisor: None,
no_empty: false,
kill_empty: false,
}
}
}
#[test]
#[cfg(test)]
fn schedule_evm_assumptions() {
let s1 = Schedule::new_frontier();
let s2 = Schedule::new_homestead();
// To optimize division we assume 2**9 for quad_coeff_div
assert_eq!(s1.quad_coeff_div, 512);
assert_eq!(s2.quad_coeff_div, 512);
}
| {10} | conditional_block |
schedule.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Cost schedule and other parameterisations for the EVM.
/// Definition of the cost schedule and other parameterisations for the EVM.
pub struct Schedule {
/// Does it support exceptional failed code deposit
pub exceptional_failed_code_deposit: bool,
/// Does it have a delegate cal
pub have_delegate_call: bool,
/// VM stack limit
pub stack_limit: usize,
/// Max number of nested calls/creates
pub max_depth: usize,
/// Gas prices for instructions in all tiers
pub tier_step_gas: [usize; 8],
/// Gas price for `EXP` opcode
pub exp_gas: usize,
/// Additional gas for `EXP` opcode for each byte of exponent
pub exp_byte_gas: usize,
/// Gas price for `SHA3` opcode
pub sha3_gas: usize,
/// Additional gas for `SHA3` opcode for each word of hashed memory
pub sha3_word_gas: usize,
/// Gas price for loading from storage
pub sload_gas: usize,
/// Gas price for setting new value to storage (`storage==0`, `new!=0`)
pub sstore_set_gas: usize,
/// Gas price for altering value in storage
pub sstore_reset_gas: usize,
/// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`)
pub sstore_refund_gas: usize,
/// Gas price for `JUMPDEST` opcode
pub jumpdest_gas: usize,
/// Gas price for `LOG*`
pub log_gas: usize,
/// Additional gas for data in `LOG*`
pub log_data_gas: usize,
/// Additional gas for each topic in `LOG*`
pub log_topic_gas: usize,
/// Gas price for `CREATE` opcode
pub create_gas: usize,
/// Gas price for `*CALL*` opcodes
pub call_gas: usize,
/// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0`
pub call_stipend: usize,
/// Additional gas required for value transfer (`CALL|CALLCODE`)
pub call_value_transfer_gas: usize,
/// Additional gas for creating new account (`CALL|CALLCODE`)
pub call_new_account_gas: usize,
/// Refund for SUICIDE
pub suicide_refund_gas: usize,
/// Gas for used memory
pub memory_gas: usize,
/// Coefficient used to convert memory size to gas price for memory
pub quad_coeff_div: usize,
/// Cost for contract length when executing `CREATE`
pub create_data_gas: usize,
/// Maximum code size when creating a contract.
pub create_data_limit: usize,
/// Transaction cost
pub tx_gas: usize,
/// `CREATE` transaction cost
pub tx_create_gas: usize,
/// Additional cost for empty data transaction
pub tx_data_zero_gas: usize,
/// Aditional cost for non-empty data transaction
pub tx_data_non_zero_gas: usize,
/// Gas price for copying memory
pub copy_gas: usize,
/// Price of EXTCODESIZE
pub extcodesize_gas: usize,
/// Base price of EXTCODECOPY
pub extcodecopy_base_gas: usize,
/// Price of BALANCE
pub balance_gas: usize,
/// Price of SUICIDE
pub suicide_gas: usize, | /// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit.
/// If None: let CALL's gas = (requested > GAS ? [OOG] : GAS). let CREATE's gas = GAS
pub sub_gas_cap_divisor: Option<usize>,
/// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value.
pub no_empty: bool,
/// Kill empty accounts if touched.
pub kill_empty: bool,
}
impl Schedule {
/// Schedule for the Frontier-era of the Ethereum main net.
pub fn new_frontier() -> Schedule {
Self::new(false, false, 21000)
}
/// Schedule for the Homestead-era of the Ethereum main net.
pub fn new_homestead() -> Schedule {
Self::new(true, true, 53000)
}
/// Schedule for the post-EIP-150-era of the Ethereum main net.
pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule {
Schedule {
exceptional_failed_code_deposit: true,
have_delegate_call: true,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: if fix_exp {50} else {10},
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 200,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 700,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: max_code_size,
tx_gas: 21000,
tx_create_gas: 53000,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 700,
extcodecopy_base_gas: 700,
balance_gas: 400,
suicide_gas: 5000,
suicide_to_new_account_cost: 25000,
sub_gas_cap_divisor: Some(64),
no_empty: no_empty,
kill_empty: kill_empty,
}
}
fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule {
Schedule {
exceptional_failed_code_deposit: efcd,
have_delegate_call: hdc,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: 10,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 50,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 40,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: usize::max_value(),
tx_gas: 21000,
tx_create_gas: tcg,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 20,
extcodecopy_base_gas: 20,
balance_gas: 20,
suicide_gas: 0,
suicide_to_new_account_cost: 0,
sub_gas_cap_divisor: None,
no_empty: false,
kill_empty: false,
}
}
}
#[test]
#[cfg(test)]
fn schedule_evm_assumptions() {
let s1 = Schedule::new_frontier();
let s2 = Schedule::new_homestead();
// To optimize division we assume 2**9 for quad_coeff_div
assert_eq!(s1.quad_coeff_div, 512);
assert_eq!(s2.quad_coeff_div, 512);
} | /// Amount of additional gas to pay when SUICIDE credits a non-existant account
pub suicide_to_new_account_cost: usize, | random_line_split |
schedule.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Cost schedule and other parameterisations for the EVM.
/// Definition of the cost schedule and other parameterisations for the EVM.
pub struct Schedule {
/// Does it support exceptional failed code deposit
pub exceptional_failed_code_deposit: bool,
/// Does it have a delegate cal
pub have_delegate_call: bool,
/// VM stack limit
pub stack_limit: usize,
/// Max number of nested calls/creates
pub max_depth: usize,
/// Gas prices for instructions in all tiers
pub tier_step_gas: [usize; 8],
/// Gas price for `EXP` opcode
pub exp_gas: usize,
/// Additional gas for `EXP` opcode for each byte of exponent
pub exp_byte_gas: usize,
/// Gas price for `SHA3` opcode
pub sha3_gas: usize,
/// Additional gas for `SHA3` opcode for each word of hashed memory
pub sha3_word_gas: usize,
/// Gas price for loading from storage
pub sload_gas: usize,
/// Gas price for setting new value to storage (`storage==0`, `new!=0`)
pub sstore_set_gas: usize,
/// Gas price for altering value in storage
pub sstore_reset_gas: usize,
/// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`)
pub sstore_refund_gas: usize,
/// Gas price for `JUMPDEST` opcode
pub jumpdest_gas: usize,
/// Gas price for `LOG*`
pub log_gas: usize,
/// Additional gas for data in `LOG*`
pub log_data_gas: usize,
/// Additional gas for each topic in `LOG*`
pub log_topic_gas: usize,
/// Gas price for `CREATE` opcode
pub create_gas: usize,
/// Gas price for `*CALL*` opcodes
pub call_gas: usize,
/// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0`
pub call_stipend: usize,
/// Additional gas required for value transfer (`CALL|CALLCODE`)
pub call_value_transfer_gas: usize,
/// Additional gas for creating new account (`CALL|CALLCODE`)
pub call_new_account_gas: usize,
/// Refund for SUICIDE
pub suicide_refund_gas: usize,
/// Gas for used memory
pub memory_gas: usize,
/// Coefficient used to convert memory size to gas price for memory
pub quad_coeff_div: usize,
/// Cost for contract length when executing `CREATE`
pub create_data_gas: usize,
/// Maximum code size when creating a contract.
pub create_data_limit: usize,
/// Transaction cost
pub tx_gas: usize,
/// `CREATE` transaction cost
pub tx_create_gas: usize,
/// Additional cost for empty data transaction
pub tx_data_zero_gas: usize,
/// Aditional cost for non-empty data transaction
pub tx_data_non_zero_gas: usize,
/// Gas price for copying memory
pub copy_gas: usize,
/// Price of EXTCODESIZE
pub extcodesize_gas: usize,
/// Base price of EXTCODECOPY
pub extcodecopy_base_gas: usize,
/// Price of BALANCE
pub balance_gas: usize,
/// Price of SUICIDE
pub suicide_gas: usize,
/// Amount of additional gas to pay when SUICIDE credits a non-existant account
pub suicide_to_new_account_cost: usize,
/// If Some(x): let limit = GAS * (x - 1) / x; let CALL's gas = min(requested, limit). let CREATE's gas = limit.
/// If None: let CALL's gas = (requested > GAS ? [OOG] : GAS). let CREATE's gas = GAS
pub sub_gas_cap_divisor: Option<usize>,
/// Don't ever make empty accounts; contracts start with nonce=1. Also, don't charge 25k when sending/suicide zero-value.
pub no_empty: bool,
/// Kill empty accounts if touched.
pub kill_empty: bool,
}
impl Schedule {
/// Schedule for the Frontier-era of the Ethereum main net.
pub fn new_frontier() -> Schedule {
Self::new(false, false, 21000)
}
/// Schedule for the Homestead-era of the Ethereum main net.
pub fn new_homestead() -> Schedule {
Self::new(true, true, 53000)
}
/// Schedule for the post-EIP-150-era of the Ethereum main net.
pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule {
Schedule {
exceptional_failed_code_deposit: true,
have_delegate_call: true,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: if fix_exp {50} else {10},
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 200,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 700,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: max_code_size,
tx_gas: 21000,
tx_create_gas: 53000,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 700,
extcodecopy_base_gas: 700,
balance_gas: 400,
suicide_gas: 5000,
suicide_to_new_account_cost: 25000,
sub_gas_cap_divisor: Some(64),
no_empty: no_empty,
kill_empty: kill_empty,
}
}
fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule |
}
#[test]
#[cfg(test)]
fn schedule_evm_assumptions() {
let s1 = Schedule::new_frontier();
let s2 = Schedule::new_homestead();
// To optimize division we assume 2**9 for quad_coeff_div
assert_eq!(s1.quad_coeff_div, 512);
assert_eq!(s2.quad_coeff_div, 512);
}
| {
Schedule {
exceptional_failed_code_deposit: efcd,
have_delegate_call: hdc,
stack_limit: 1024,
max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
exp_gas: 10,
exp_byte_gas: 10,
sha3_gas: 30,
sha3_word_gas: 6,
sload_gas: 50,
sstore_set_gas: 20000,
sstore_reset_gas: 5000,
sstore_refund_gas: 15000,
jumpdest_gas: 1,
log_gas: 375,
log_data_gas: 8,
log_topic_gas: 375,
create_gas: 32000,
call_gas: 40,
call_stipend: 2300,
call_value_transfer_gas: 9000,
call_new_account_gas: 25000,
suicide_refund_gas: 24000,
memory_gas: 3,
quad_coeff_div: 512,
create_data_gas: 200,
create_data_limit: usize::max_value(),
tx_gas: 21000,
tx_create_gas: tcg,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
extcodesize_gas: 20,
extcodecopy_base_gas: 20,
balance_gas: 20,
suicide_gas: 0,
suicide_to_new_account_cost: 0,
sub_gas_cap_divisor: None,
no_empty: false,
kill_empty: false,
}
} | identifier_body |
__init__.py | # -*- coding: UTF-8 -*-
# COPYRIGHT (c) 2016 Cristóbal Ganter
#
# GNU AFFERO GENERAL PUBLIC LICENSE
# Version 3, 19 November 2007
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | # along with this program. If not, see <http://www.gnu.org/licenses/>.
from controller import MSGHandler
from src.load import load_wsclasses
load_wsclasses(__name__, MSGHandler) | # GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License | random_line_split |
lib.rs | #[derive(Debug)]
pub struct Rectangle {
length: u32,
width: u32,
}
impl Rectangle {
pub fn can_hold(&self, other: &Rectangle) -> bool {
self.length > other.length && self.width > other.width
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | () {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(larger.can_hold(&smaller));
}
#[test]
fn smaller_can_hold_larger() {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(!smaller.can_hold(&larger));
}
}
| larger_can_hold_smaller | identifier_name |
lib.rs | #[derive(Debug)]
pub struct Rectangle {
length: u32,
width: u32,
}
impl Rectangle {
pub fn can_hold(&self, other: &Rectangle) -> bool {
self.length > other.length && self.width > other.width
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn larger_can_hold_smaller() {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(larger.can_hold(&smaller));
}
#[test]
fn smaller_can_hold_larger() {
let larger = Rectangle {
length: 8, | width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(!smaller.can_hold(&larger));
}
} | random_line_split |
|
test.rs | extern crate couch;
extern crate http;
extern crate serialize;
#[cfg(test)]
mod test {
use couch::{Server,Document};
#[deriving(Encodable,Decodable)]
struct TestDocument {
_id: String,
body: String
}
impl Document for TestDocument {
fn id(&self) -> String |
}
#[test]
fn speak_to_the_couch() {
let server = Server::new(String::from_str("http://localhost:5984"));
let info = server.info();
assert_eq!(info.message(), "Welcome".to_owned());
}
#[test]
fn create_database() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("created_by_couch".to_owned());
let database = server.create_database("created_by_couch".to_owned());
assert_eq!(database.name(), "created_by_couch".to_owned());
}
#[test]
fn create_document() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("create_doc".to_owned());
let mut database = server.create_database("create_doc".to_owned());
let test_doc = &TestDocument { _id: "test".to_owned(), body: "test".to_owned() };
database.put(test_doc);
}
} | {
self._id.clone()
} | identifier_body |
test.rs | extern crate couch;
extern crate http;
extern crate serialize;
#[cfg(test)]
mod test {
use couch::{Server,Document};
#[deriving(Encodable,Decodable)]
struct TestDocument {
_id: String,
body: String
}
impl Document for TestDocument {
fn id(&self) -> String {
self._id.clone()
}
}
#[test]
fn speak_to_the_couch() {
let server = Server::new(String::from_str("http://localhost:5984"));
let info = server.info();
assert_eq!(info.message(), "Welcome".to_owned());
}
#[test]
fn | () {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("created_by_couch".to_owned());
let database = server.create_database("created_by_couch".to_owned());
assert_eq!(database.name(), "created_by_couch".to_owned());
}
#[test]
fn create_document() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("create_doc".to_owned());
let mut database = server.create_database("create_doc".to_owned());
let test_doc = &TestDocument { _id: "test".to_owned(), body: "test".to_owned() };
database.put(test_doc);
}
} | create_database | identifier_name |
test.rs | extern crate couch;
extern crate http;
extern crate serialize;
|
#[deriving(Encodable,Decodable)]
struct TestDocument {
_id: String,
body: String
}
impl Document for TestDocument {
fn id(&self) -> String {
self._id.clone()
}
}
#[test]
fn speak_to_the_couch() {
let server = Server::new(String::from_str("http://localhost:5984"));
let info = server.info();
assert_eq!(info.message(), "Welcome".to_owned());
}
#[test]
fn create_database() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("created_by_couch".to_owned());
let database = server.create_database("created_by_couch".to_owned());
assert_eq!(database.name(), "created_by_couch".to_owned());
}
#[test]
fn create_document() {
let mut server = Server::new(String::from_str("http://localhost:5984"));
server.delete_database("create_doc".to_owned());
let mut database = server.create_database("create_doc".to_owned());
let test_doc = &TestDocument { _id: "test".to_owned(), body: "test".to_owned() };
database.put(test_doc);
}
} | #[cfg(test)]
mod test {
use couch::{Server,Document}; | random_line_split |
Add.tsx | import { randomUUID } from 'node:crypto';
import {
Fragment,
useState,
type ReactElement,
type ReactNode,
type Dispatch as D,
type SetStateAction as S,
type ChangeEvent,
type MouseEvent
} from 'react';
import { useDispatch } from 'react-redux';
import type { Dispatch } from '@reduxjs/toolkit';
import { Input, Button, Modal, message, Select } from 'antd';
import style from './add.sass';
import { requestDouyinVideoHtml, type DouyinVideo } from '../services/douyin';
import { setAddDownloadList } from '../reducers/douyin';
import type { AwemeDetail, ScriptRendedData, DownloadUrlItem, C0Obj, CVersionObj } from '../types';
/* select渲染 */
function sele | nloadUrl: Array<DownloadUrlItem>): Array<ReactNode> {
return downloadUrl.map((item: DownloadUrlItem, index: number): ReactElement => {
return <Select.Option key={ item.label + item.value } value={ item.value }>{ item.label }</Select.Option>;
});
}
/* 获取和下载链接 */
function Add(props: {}): ReactElement {
const dispatch: Dispatch = useDispatch();
const [urlValue, setUrlValue]: [string, D<S<string>>] = useState('');
const [getUrlLoading, setGetUrlLoading]: [boolean, D<S<boolean>>] = useState(false);
const [visible, setVisible]: [boolean, D<S<boolean>>] = useState(false); // 弹出层的显示隐藏
const [downloadUrl, setDownloadUrl]: [DownloadUrlItem[], D<S<DownloadUrlItem[]>>] = useState([]); // 视频下载地址
const [selectedUrl, setSelectedUrl]: [string, D<S<string>>] = useState(''); // 选中的下载地址
const [title, setTitle]: [string, D<S<string>>] = useState(''); // 视频标题
// 关闭后清除状态
function afterClose(): void {
setDownloadUrl([]);
setSelectedUrl('');
setTitle('');
}
// 添加新的下载地址
function handleAddClick(event: MouseEvent<HTMLButtonElement>): void {
dispatch(setAddDownloadList({
qid: randomUUID(),
url: selectedUrl,
title
}));
setVisible(false);
}
// 获取下载地址
async function handleGetVideoUrlClick(event: MouseEvent<HTMLButtonElement>): Promise<void> {
if (/^\s*$/.test(urlValue)) return;
setGetUrlLoading(true);
try {
let html: string = '';
const res: DouyinVideo = await requestDouyinVideoHtml(urlValue);
if (res.type === 'html') {
// 直接获取html
html = res.value;
} else {
// 计算__ac_signature并获取html
const acSignature: string = Reflect.get(Reflect.get(globalThis, 'byted_acrawler'), 'sign')
.call(undefined, '', res.value);
const secondCookie: string = ` __ac_nonce=${ res.value }; __ac_signature=${ acSignature }`;
const secondRes: DouyinVideo = await requestDouyinVideoHtml(urlValue, secondCookie);
html = secondRes.value;
}
const document: Document = new DOMParser().parseFromString(html, 'text/html');
const rendedData: HTMLElement | null = document.getElementById('RENDER_DATA');
if (rendedData) {
const data: string = decodeURIComponent(rendedData.innerText);
const json: ScriptRendedData = JSON.parse(data);
const cVersion: CVersionObj | undefined = Object.values(json).find(
(o: C0Obj | CVersionObj): o is CVersionObj => typeof o === 'object' && ('aweme' in o));
if (cVersion) {
const awemeDetail: AwemeDetail = cVersion.aweme.detail;
const urls: DownloadUrlItem[] = [];
urls.push(
{ label: '有水印', value: awemeDetail.download.url },
{ label: '无水印', value: `https:${ awemeDetail.video.playApi }` }
);
let i: number = 1;
for (const item of awemeDetail.video.bitRateList) {
for (const item2 of item.playAddr) {
urls.push({
label: '下载地址-' + i++,
value: `https:${ item2.src }`
});
}
}
setDownloadUrl(urls);
setTitle(awemeDetail.desc);
setVisible(true);
} else {
message.error('视频相关信息解析失败!');
}
} else {
message.error('找不到视频相关信息!');
}
} catch (err) {
console.error(err);
message.error('视频地址解析失败!');
}
setGetUrlLoading(false);
}
return (
<Fragment>
<Input className={ style.input }
value={ urlValue }
placeholder="请输入视频ID"
onChange={ (event: ChangeEvent<HTMLInputElement>): void => setUrlValue(event.target.value) }
/>
<Button loading={ getUrlLoading } onClick={ handleGetVideoUrlClick }>获取下载地址</Button>
{/* 下载地址 */}
<Modal title="选择下载地址"
visible={ visible }
width={ 400 }
centered={ true }
destroyOnClose={ true }
closable={ false }
afterClose={ afterClose }
onOk={ handleAddClick }
onCancel={ (event: MouseEvent<HTMLButtonElement>): void => setVisible(false) }
>
<Select className={ style.urlSelect }
value={ selectedUrl }
onSelect={ (value: string): void => setSelectedUrl(value) }
>
{ selectOptionsRender(downloadUrl) }
</Select>
</Modal>
</Fragment>
);
}
export default Add; | ctOptionsRender(dow | identifier_name |
Add.tsx | import { randomUUID } from 'node:crypto';
import {
Fragment,
useState,
type ReactElement,
type ReactNode,
type Dispatch as D,
type SetStateAction as S,
type ChangeEvent,
type MouseEvent
} from 'react';
import { useDispatch } from 'react-redux';
import type { Dispatch } from '@reduxjs/toolkit';
import { Input, Button, Modal, message, Select } from 'antd';
import style from './add.sass';
import { requestDouyinVideoHtml, type DouyinVideo } from '../services/douyin';
import { setAddDownloadList } from '../reducers/douyin';
import type { AwemeDetail, ScriptRendedData, DownloadUrlItem, C0Obj, CVersionObj } from '../types';
/* select渲染 */
function selectOptionsRender(downloadUrl: Array<DownloadUrlItem>): Array<ReactNode> {
return downloadUrl.map((item: DownloadUrlItem, index: number): ReactElement => {
return <Select.Option key={ item.label + item.value } value={ item.value }>{ item.label }</Select.Option>;
});
}
/* 获取和下载链接 */
function Add(props: {}): ReactElement {
const dispatch | : Dispatch = useDispatch();
const [urlValue, setUrlValue]: [string, D<S<string>>] = useState('');
const [getUrlLoading, setGetUrlLoading]: [boolean, D<S<boolean>>] = useState(false);
const [visible, setVisible]: [boolean, D<S<boolean>>] = useState(false); // 弹出层的显示隐藏
const [downloadUrl, setDownloadUrl]: [DownloadUrlItem[], D<S<DownloadUrlItem[]>>] = useState([]); // 视频下载地址
const [selectedUrl, setSelectedUrl]: [string, D<S<string>>] = useState(''); // 选中的下载地址
const [title, setTitle]: [string, D<S<string>>] = useState(''); // 视频标题
// 关闭后清除状态
function afterClose(): void {
setDownloadUrl([]);
setSelectedUrl('');
setTitle('');
}
// 添加新的下载地址
function handleAddClick(event: MouseEvent<HTMLButtonElement>): void {
dispatch(setAddDownloadList({
qid: randomUUID(),
url: selectedUrl,
title
}));
setVisible(false);
}
// 获取下载地址
async function handleGetVideoUrlClick(event: MouseEvent<HTMLButtonElement>): Promise<void> {
if (/^\s*$/.test(urlValue)) return;
setGetUrlLoading(true);
try {
let html: string = '';
const res: DouyinVideo = await requestDouyinVideoHtml(urlValue);
if (res.type === 'html') {
// 直接获取html
html = res.value;
} else {
// 计算__ac_signature并获取html
const acSignature: string = Reflect.get(Reflect.get(globalThis, 'byted_acrawler'), 'sign')
.call(undefined, '', res.value);
const secondCookie: string = ` __ac_nonce=${ res.value }; __ac_signature=${ acSignature }`;
const secondRes: DouyinVideo = await requestDouyinVideoHtml(urlValue, secondCookie);
html = secondRes.value;
}
const document: Document = new DOMParser().parseFromString(html, 'text/html');
const rendedData: HTMLElement | null = document.getElementById('RENDER_DATA');
if (rendedData) {
const data: string = decodeURIComponent(rendedData.innerText);
const json: ScriptRendedData = JSON.parse(data);
const cVersion: CVersionObj | undefined = Object.values(json).find(
(o: C0Obj | CVersionObj): o is CVersionObj => typeof o === 'object' && ('aweme' in o));
if (cVersion) {
const awemeDetail: AwemeDetail = cVersion.aweme.detail;
const urls: DownloadUrlItem[] = [];
urls.push(
{ label: '有水印', value: awemeDetail.download.url },
{ label: '无水印', value: `https:${ awemeDetail.video.playApi }` }
);
let i: number = 1;
for (const item of awemeDetail.video.bitRateList) {
for (const item2 of item.playAddr) {
urls.push({
label: '下载地址-' + i++,
value: `https:${ item2.src }`
});
}
}
setDownloadUrl(urls);
setTitle(awemeDetail.desc);
setVisible(true);
} else {
message.error('视频相关信息解析失败!');
}
} else {
message.error('找不到视频相关信息!');
}
} catch (err) {
console.error(err);
message.error('视频地址解析失败!');
}
setGetUrlLoading(false);
}
return (
<Fragment>
<Input className={ style.input }
value={ urlValue }
placeholder="请输入视频ID"
onChange={ (event: ChangeEvent<HTMLInputElement>): void => setUrlValue(event.target.value) }
/>
<Button loading={ getUrlLoading } onClick={ handleGetVideoUrlClick }>获取下载地址</Button>
{/* 下载地址 */}
<Modal title="选择下载地址"
visible={ visible }
width={ 400 }
centered={ true }
destroyOnClose={ true }
closable={ false }
afterClose={ afterClose }
onOk={ handleAddClick }
onCancel={ (event: MouseEvent<HTMLButtonElement>): void => setVisible(false) }
>
<Select className={ style.urlSelect }
value={ selectedUrl }
onSelect={ (value: string): void => setSelectedUrl(value) }
>
{ selectOptionsRender(downloadUrl) }
</Select>
</Modal>
</Fragment>
);
}
export default Add; | identifier_body |
|
Add.tsx | import { randomUUID } from 'node:crypto';
import {
Fragment,
useState,
type ReactElement,
type ReactNode,
type Dispatch as D,
type SetStateAction as S,
type ChangeEvent,
type MouseEvent
} from 'react';
import { useDispatch } from 'react-redux';
import type { Dispatch } from '@reduxjs/toolkit';
import { Input, Button, Modal, message, Select } from 'antd';
import style from './add.sass';
import { requestDouyinVideoHtml, type DouyinVideo } from '../services/douyin';
import { setAddDownloadList } from '../reducers/douyin';
import type { AwemeDetail, ScriptRendedData, DownloadUrlItem, C0Obj, CVersionObj } from '../types';
/* select渲染 */
function selectOptionsRender(downloadUrl: Array<DownloadUrlItem>): Array<ReactNode> {
return downloadUrl.map((item: DownloadUrlItem, index: number): ReactElement => {
return <Select.Option key={ item.label + item.value } value={ item.value }>{ item.label }</Select.Option>;
});
}
/* 获取和下载链接 */
function Add(props: {}): ReactElement {
const dispatch: Dispatch = useDispatch();
const [urlValue, setUrlValue]: [string, D<S<string>>] = useState('');
const [getUrlLoading, setGetUrlLoading]: [boolean, D<S<boolean>>] = useState(false);
const [visible, setVisible]: [boolean, D<S<boolean>>] = useState(false); // 弹出层的显示隐藏
const [downloadUrl, setDownloadUrl]: [DownloadUrlItem[], D<S<DownloadUrlItem[]>>] = useState([]); // 视频下载地址
const [selectedUrl, setSelectedUrl]: [string, D<S<string>>] = useState(''); // 选中的下载地址
const [title, setTitle]: [string, D<S<string>>] = useState(''); // 视频标题
// 关闭后清除状态
function afterClose(): void {
setDownloadUrl([]);
setSelectedUrl('');
setTitle('');
}
// 添加新的下载地址
function handleAddClick(event: MouseEvent<HTMLButtonElement>): void {
dispatch(setAddDownloadList({
qid: randomUUID(),
url: selectedUrl,
title
}));
setVisible(false);
}
// 获取下载地址
async function handleGetVideoUrlClick(event: MouseEvent<HTMLButtonElement>): Promise<void> {
if (/^\s*$/.test(urlValue)) return;
setGetUrlLoading(true);
try {
let html: string = '';
const res: DouyinVideo = await requestDouyinVideoHtml(urlValue);
if (res.type === 'html') {
// 直接获取html
html = res.value;
} else {
// 计算__ac_signature并获取html
const acSignature: string = Reflect.get(Reflect.get(globalThis, 'byted_acrawler'), 'sign')
.call(undefined, '', res.value);
const secondCookie: string = ` __ac_nonce=${ res.value }; __ac_signature=${ acSignature }`;
const secondRes: DouyinVideo = await requestDouyinVideoHtml(urlValue, secondCookie);
html = secondRes.value;
}
const document: Document = new DOMParser().parseFromString(html, 'text/html');
const rendedData: HTMLElement | null = document.getElementById('RENDER_DATA');
if (rendedData) {
const data: string = decodeURIComponent(rendedData.innerText);
const json: ScriptRendedData = JSON.parse(data);
const cVersion: CVersionObj | undefined = Object.values(json).find(
(o: C0Obj | CVersionObj): o is CVersionObj => typeof o === 'object' && ('aweme' in o));
if (cVersion) {
const awemeDetail: AwemeDetail = cVersion.aweme.detail;
const urls: DownloadUrlItem[] = [];
urls.push(
{ label: '有水印', value: awemeDetail.download.url },
{ label: '无水印', value: `https:${ awemeDetail.video.playApi }` }
);
let i: number = 1;
for (const item of awemeDetail.video.bitRateList) {
for (const item2 of item.playAddr) {
urls.push({
label: '下载地址-' + i++,
value: `https:${ item2.src }`
});
}
}
setDownloadUrl(urls);
setTitle(awemeDetail.desc);
setVisible(true);
} else {
message.error('视频相关信息解析失败!');
}
} else {
message.error('找不到视频相关信息!');
}
} catch (err) {
console.er | alse);
}
return (
<Fragment>
<Input className={ style.input }
value={ urlValue }
placeholder="请输入视频ID"
onChange={ (event: ChangeEvent<HTMLInputElement>): void => setUrlValue(event.target.value) }
/>
<Button loading={ getUrlLoading } onClick={ handleGetVideoUrlClick }>获取下载地址</Button>
{/* 下载地址 */}
<Modal title="选择下载地址"
visible={ visible }
width={ 400 }
centered={ true }
destroyOnClose={ true }
closable={ false }
afterClose={ afterClose }
onOk={ handleAddClick }
onCancel={ (event: MouseEvent<HTMLButtonElement>): void => setVisible(false) }
>
<Select className={ style.urlSelect }
value={ selectedUrl }
onSelect={ (value: string): void => setSelectedUrl(value) }
>
{ selectOptionsRender(downloadUrl) }
</Select>
</Modal>
</Fragment>
);
}
export default Add; | ror(err);
message.error('视频地址解析失败!');
}
setGetUrlLoading(f | conditional_block |
Add.tsx | import { randomUUID } from 'node:crypto';
import {
Fragment,
useState,
type ReactElement,
type ReactNode,
type Dispatch as D,
type SetStateAction as S,
type ChangeEvent,
type MouseEvent
} from 'react';
import { useDispatch } from 'react-redux';
import type { Dispatch } from '@reduxjs/toolkit';
import { Input, Button, Modal, message, Select } from 'antd';
import style from './add.sass';
import { requestDouyinVideoHtml, type DouyinVideo } from '../services/douyin';
import { setAddDownloadList } from '../reducers/douyin';
import type { AwemeDetail, ScriptRendedData, DownloadUrlItem, C0Obj, CVersionObj } from '../types';
/* select渲染 */
function selectOptionsRender(downloadUrl: Array<DownloadUrlItem>): Array<ReactNode> {
return downloadUrl.map((item: DownloadUrlItem, index: number): ReactElement => {
return <Select.Option key={ item.label + item.value } value={ item.value }>{ item.label }</Select.Option>;
});
}
/* 获取和下载链接 */
function Add(props: {}): ReactElement {
const dispatch: Dispatch = useDispatch();
const [urlValue, setUrlValue]: [string, D<S<string>>] = useState('');
const [getUrlLoading, setGetUrlLoading]: [boolean, D<S<boolean>>] = useState(false);
const [visible, setVisible]: [boolean, D<S<boolean>>] = useState(false); // 弹出层的显示隐藏
const [downloadUrl, setDownloadUrl]: [DownloadUrlItem[], D<S<DownloadUrlItem[]>>] = useState([]); // 视频下载地址
const [selectedUrl, setSelectedUrl]: [string, D<S<string>>] = useState(''); // 选中的下载地址
const [title, setTitle]: [string, D<S<string>>] = useState(''); // 视频标题
// 关闭后清除状态
function afterClose(): void {
setDownloadUrl([]);
setSelectedUrl('');
setTitle('');
}
// 添加新的下载地址
function handleAddClick(event: MouseEvent<HTMLButtonElement>): void {
dispatch(setAddDownloadList({
qid: randomUUID(),
url: selectedUrl,
title
}));
setVisible(false);
}
// 获取下载地址
async function handleGetVideoUrlClick(event: MouseEvent<HTMLButtonElement>): Promise<void> {
if (/^\s*$/.test(urlValue)) return;
setGetUrlLoading(true);
try {
let html: string = '';
const res: DouyinVideo = await requestDouyinVideoHtml(urlValue);
if (res.type === 'html') {
// 直接获取html
html = res.value;
} else {
// 计算__ac_signature并获取html
const acSignature: string = Reflect.get(Reflect.get(globalThis, 'byted_acrawler'), 'sign')
.call(undefined, '', res.value);
const secondCookie: string = ` __ac_nonce=${ res.value }; __ac_signature=${ acSignature }`;
const secondRes: DouyinVideo = await requestDouyinVideoHtml(urlValue, secondCookie);
html = secondRes.value;
}
const document: Document = new DOMParser().parseFromString(html, 'text/html');
const rendedData: HTMLElement | null = document.getElementById('RENDER_DATA');
if (rendedData) {
const data: string = decodeURIComponent(rendedData.innerText);
const json: ScriptRendedData = JSON.parse(data);
const cVersion: CVersionObj | undefined = Object.values(json).find(
(o: C0Obj | CVersionObj): o is CVersionObj => typeof o === 'object' && ('aweme' in o));
if (cVersion) {
const awemeDetail: AwemeDetail = cVersion.aweme.detail;
const urls: DownloadUrlItem[] = [];
urls.push(
{ label: '有水印', value: awemeDetail.download.url },
{ label: '无水印', value: `https:${ awemeDetail.video.playApi }` }
);
let i: number = 1;
for (const item of awemeDetail.video.bitRateList) {
for (const item2 of item.playAddr) {
urls.push({
label: '下载地址-' + i++,
value: `https:${ item2.src }`
});
}
}
setDownloadUrl(urls);
setTitle(awemeDetail.desc);
setVisible(true);
} else {
message.error('视频相关信息解析失败!');
}
} else {
message.error('找不到视频相关信息!');
}
} catch (err) {
console.error(err);
message.error('视频地址解析失败!');
}
setGetUrlLoading(false);
}
return (
<Fragment>
<Input className={ style.input }
value={ urlValue }
placeholder="请输入视频ID"
onChange={ (event: ChangeEvent<HTMLInputElement>): void => setUrlValue(event.target.value) }
/>
<Button loading={ getUrlLoading } onClick={ handleGetVideoUrlClick }>获取下载地址</Button>
{/* 下载地址 */}
<Modal title="选择下载地址"
visible={ visible }
width={ 400 }
centered={ true } | >
<Select className={ style.urlSelect }
value={ selectedUrl }
onSelect={ (value: string): void => setSelectedUrl(value) }
>
{ selectOptionsRender(downloadUrl) }
</Select>
</Modal>
</Fragment>
);
}
export default Add; | destroyOnClose={ true }
closable={ false }
afterClose={ afterClose }
onOk={ handleAddClick }
onCancel={ (event: MouseEvent<HTMLButtonElement>): void => setVisible(false) } | random_line_split |
test_availability_zone.py | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.tests.functional.api_sample_tests import test_servers
CONF = nova.conf.CONF
class AvailabilityZoneJsonTest(test_servers.ServersSampleBase):
ADMIN_API = True
extension_name = "os-availability-zone"
def _get_flags(self):
f = super(AvailabilityZoneJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.availability_zone.'
'Availability_zone')
return f
def | (self):
response = self._do_get('os-availability-zone')
self._verify_response('availability-zone-list-resp', {}, response, 200)
def test_availability_zone_detail(self):
response = self._do_get('os-availability-zone/detail')
self._verify_response('availability-zone-detail-resp', {}, response,
200)
def test_availability_zone_post(self):
self._post_server()
| test_availability_zone_list | identifier_name |
test_availability_zone.py | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.tests.functional.api_sample_tests import test_servers
CONF = nova.conf.CONF
class AvailabilityZoneJsonTest(test_servers.ServersSampleBase):
ADMIN_API = True
extension_name = "os-availability-zone"
def _get_flags(self):
|
def test_availability_zone_list(self):
response = self._do_get('os-availability-zone')
self._verify_response('availability-zone-list-resp', {}, response, 200)
def test_availability_zone_detail(self):
response = self._do_get('os-availability-zone/detail')
self._verify_response('availability-zone-detail-resp', {}, response,
200)
def test_availability_zone_post(self):
self._post_server()
| f = super(AvailabilityZoneJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.availability_zone.'
'Availability_zone')
return f | identifier_body |
test_availability_zone.py | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.tests.functional.api_sample_tests import test_servers
CONF = nova.conf.CONF
class AvailabilityZoneJsonTest(test_servers.ServersSampleBase):
ADMIN_API = True
extension_name = "os-availability-zone"
def _get_flags(self):
f = super(AvailabilityZoneJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.availability_zone.'
'Availability_zone')
return f
| def test_availability_zone_list(self):
response = self._do_get('os-availability-zone')
self._verify_response('availability-zone-list-resp', {}, response, 200)
def test_availability_zone_detail(self):
response = self._do_get('os-availability-zone/detail')
self._verify_response('availability-zone-detail-resp', {}, response,
200)
def test_availability_zone_post(self):
self._post_server() | random_line_split |
|
stage_spec.ts | /*
* Copyright 2019 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Job} from "models/pipeline_configs/job";
import {Stage} from "models/pipeline_configs/stage";
import {ExecTask} from "models/pipeline_configs/task";
describe("Stage model", () => {
function validJob() {
return new Job("name", [new ExecTask("ls", ["-lA"])]);
}
it("should include a name", () => {
let stage = new Stage("foo", [validJob()]);
expect(stage.isValid()).toBe(true);
expect(stage.errors().count()).toBe(0);
stage = new Stage("", [validJob()]);
expect(stage.isValid()).toBe(false);
expect(stage.errors().count()).toBe(1);
expect(stage.errors().keys()).toEqual(["name"]);
});
it("validates name format", () => {
const stage = new Stage("my awesome stage that has a terrible name", [validJob()]);
expect(stage.isValid()).toBe(false);
expect(stage.errors().count()).toBe(1);
expect(stage.errors().keys()).toEqual(["name"]);
expect(stage.errors().errorsForDisplay("name")).toBe("Invalid name. This must be alphanumeric and can contain hyphens, underscores and periods (however, it cannot start with a period). The maximum allowed length is 255 characters.");
});
it("should include a job", () => {
const stage = new Stage("foo", []);
expect(stage.isValid()).toBe(false);
expect(stage.errors().count()).toBe(1);
expect(stage.errors().keys()).toEqual(["jobs"]);
expect(stage.errors().errorsForDisplay("jobs")).toBe("A stage must have at least one job.");
});
it("approval state allows toggling between automatic and manual approval types", () => {
const stage = new Stage("foo", [validJob()]);
expect(stage.toApiPayload().approval.type).toBe("success"); // default setting
stage.approval().state(false); | expect(stage.toApiPayload().approval.type).toBe("manual");
stage.approval().state(true);
expect(stage.toApiPayload().approval.type).toBe("success");
});
it("should serialize correctly", () => {
const stage = new Stage("foo", [validJob()]);
expect(stage.toApiPayload()).toEqual({
name: "foo",
approval: {
type: "success",
authorization: {}
},
jobs: [
{
name: "name",
tasks: [{
type: "exec",
attributes: {
command: "ls",
arguments: ["-lA"],
run_if: []
}
}]
}
]
});
});
}); | random_line_split |
|
stage_spec.ts | /*
* Copyright 2019 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Job} from "models/pipeline_configs/job";
import {Stage} from "models/pipeline_configs/stage";
import {ExecTask} from "models/pipeline_configs/task";
describe("Stage model", () => {
function validJob() |
it("should include a name", () => {
let stage = new Stage("foo", [validJob()]);
expect(stage.isValid()).toBe(true);
expect(stage.errors().count()).toBe(0);
stage = new Stage("", [validJob()]);
expect(stage.isValid()).toBe(false);
expect(stage.errors().count()).toBe(1);
expect(stage.errors().keys()).toEqual(["name"]);
});
it("validates name format", () => {
const stage = new Stage("my awesome stage that has a terrible name", [validJob()]);
expect(stage.isValid()).toBe(false);
expect(stage.errors().count()).toBe(1);
expect(stage.errors().keys()).toEqual(["name"]);
expect(stage.errors().errorsForDisplay("name")).toBe("Invalid name. This must be alphanumeric and can contain hyphens, underscores and periods (however, it cannot start with a period). The maximum allowed length is 255 characters.");
});
it("should include a job", () => {
const stage = new Stage("foo", []);
expect(stage.isValid()).toBe(false);
expect(stage.errors().count()).toBe(1);
expect(stage.errors().keys()).toEqual(["jobs"]);
expect(stage.errors().errorsForDisplay("jobs")).toBe("A stage must have at least one job.");
});
it("approval state allows toggling between automatic and manual approval types", () => {
const stage = new Stage("foo", [validJob()]);
expect(stage.toApiPayload().approval.type).toBe("success"); // default setting
stage.approval().state(false);
expect(stage.toApiPayload().approval.type).toBe("manual");
stage.approval().state(true);
expect(stage.toApiPayload().approval.type).toBe("success");
});
it("should serialize correctly", () => {
const stage = new Stage("foo", [validJob()]);
expect(stage.toApiPayload()).toEqual({
name: "foo",
approval: {
type: "success",
authorization: {}
},
jobs: [
{
name: "name",
tasks: [{
type: "exec",
attributes: {
command: "ls",
arguments: ["-lA"],
run_if: []
}
}]
}
]
});
});
});
| {
return new Job("name", [new ExecTask("ls", ["-lA"])]);
} | identifier_body |
stage_spec.ts | /*
* Copyright 2019 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Job} from "models/pipeline_configs/job";
import {Stage} from "models/pipeline_configs/stage";
import {ExecTask} from "models/pipeline_configs/task";
describe("Stage model", () => {
function | () {
return new Job("name", [new ExecTask("ls", ["-lA"])]);
}
it("should include a name", () => {
let stage = new Stage("foo", [validJob()]);
expect(stage.isValid()).toBe(true);
expect(stage.errors().count()).toBe(0);
stage = new Stage("", [validJob()]);
expect(stage.isValid()).toBe(false);
expect(stage.errors().count()).toBe(1);
expect(stage.errors().keys()).toEqual(["name"]);
});
it("validates name format", () => {
const stage = new Stage("my awesome stage that has a terrible name", [validJob()]);
expect(stage.isValid()).toBe(false);
expect(stage.errors().count()).toBe(1);
expect(stage.errors().keys()).toEqual(["name"]);
expect(stage.errors().errorsForDisplay("name")).toBe("Invalid name. This must be alphanumeric and can contain hyphens, underscores and periods (however, it cannot start with a period). The maximum allowed length is 255 characters.");
});
it("should include a job", () => {
const stage = new Stage("foo", []);
expect(stage.isValid()).toBe(false);
expect(stage.errors().count()).toBe(1);
expect(stage.errors().keys()).toEqual(["jobs"]);
expect(stage.errors().errorsForDisplay("jobs")).toBe("A stage must have at least one job.");
});
it("approval state allows toggling between automatic and manual approval types", () => {
const stage = new Stage("foo", [validJob()]);
expect(stage.toApiPayload().approval.type).toBe("success"); // default setting
stage.approval().state(false);
expect(stage.toApiPayload().approval.type).toBe("manual");
stage.approval().state(true);
expect(stage.toApiPayload().approval.type).toBe("success");
});
it("should serialize correctly", () => {
const stage = new Stage("foo", [validJob()]);
expect(stage.toApiPayload()).toEqual({
name: "foo",
approval: {
type: "success",
authorization: {}
},
jobs: [
{
name: "name",
tasks: [{
type: "exec",
attributes: {
command: "ls",
arguments: ["-lA"],
run_if: []
}
}]
}
]
});
});
});
| validJob | identifier_name |
utils.py | """Helper functions and view derivers for spynl.main."""
import json
import logging
import traceback
import sys
import os
import contextlib
from functools import wraps
from inspect import isclass, getfullargspec
import yaml
from tld import get_tld
from tld.exceptions import TldBadUrl, TldDomainNotFound
from pyramid.response import Response
from pyramid.renderers import json_renderer_factory
from pyramid.exceptions import Forbidden
from pyramid import threadlocal
from pyramid.httpexceptions import HTTPForbidden, HTTPNotFound
from spynl.main import urlson
from spynl.main.exceptions import SpynlException, MissingParameter, BadOrigin
from spynl.main.version import __version__ as spynl_version
from spynl.main.locale import SpynlTranslationString as _
def get_request():
"""
Retrieve current request.
Use with care, though:
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
"""
return threadlocal.get_current_request()
def get_settings(setting=None):
"""
Get settings (from .ini file [app:main] section)
If setting is given, get its value from the application settings and return it.
Can also be accessed from the request object: request.registry.settings
For more info on the way we do it here, consult
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
Our policy is to not edit the settings during a request/response cycle.
"""
registry_settings = threadlocal.get_current_registry().settings or {}
if setting is not None:
return registry_settings.get(setting)
return registry_settings
def check_origin(endpoint, info):
"""Check if origin is allowed"""
def wrapper_view(context, request):
"""raise HTTPForbidden if origin isn't allowed"""
origin = request.headers.get('Origin', '')
if not is_origin_allowed(origin):
# because this is a wrapper, the bad origin will not be properly
# escalated to forbidden, so it needs to be done like this.
raise Forbidden(
detail=BadOrigin(origin).message.translate(request.localizer)
)
return endpoint(context, request)
return wrapper_view
# NOTE this has NOTHING to do with the check options view deriver. But we
# need to register it somewhere.
check_origin.options = ('is_error_view',)
def validate_locale(locale):
"""Validate a locale against our supported languages."""
supported_languages = [
lang.strip().lower()
for lang in get_settings().get('spynl.languages', 'en').split(',')
]
language = None
if not locale:
return
# we're only looking for languages here, not dialects.
language = str(locale)[:2].lower()
if language in supported_languages:
return language
def handle_pre_flight_request(endpoint, info):
"""
"pre-flight-request": return custom response with some information on
what we allow. Used by browsers before they send XMLHttpRequests.
"""
def wrapper(context, request):
"""Call the endpoint if not an OPTION (pre-flight) request,
otherwise return a custom Response."""
if request.method != 'OPTIONS':
return endpoint(context, request)
else:
headerlist = []
origin = request.headers.get('Origin')
if origin: # otherwise we are on localhost or are called directly
if is_origin_allowed(origin):
headerlist.append(('Access-Control-Allow-Origin', origin))
else:
headerlist.append(('Access-Control-Allow-Origin', 'null'))
headerlist.extend(
[
('Access-Control-Allow-Methods', 'GET,POST'),
('Access-Control-Max-Age', '86400'),
('Access-Control-Allow-Credentials', 'true'),
('Content-Length', '0'),
('Content-Type', 'text/plain'),
]
)
# you can send any headers to Spynl, basically
if 'Access-Control-Request-Headers' in request.headers:
headerlist.append(
(
'Access-Control-Allow-Headers',
request.headers['Access-Control-Request-Headers'],
)
)
# returning a generic and resource-agnostic pre-flight response
return Response(headerlist=headerlist)
return wrapper
def is_origin_allowed(origin):
"""
Check request origin for matching our whitelists.
First tries dev whitelists (that list is expected to hold
either complete URLs or mere protocols, e.g. "chrome-extension://").
Then the tld whitelist is tried, which is expected to hold
only the top-level domains.
Returns True if origin is allowed, False otherwise.
"""
if not origin:
return True
settings = get_settings()
dev_whitelist = parse_csv_list(settings.get('spynl.dev_origin_whitelist', ''))
dev_list_urls = [url for url in dev_whitelist if not url.endswith('://')]
origin_allowed = origin in dev_list_urls
dev_list_protocols = [url for url in dev_whitelist if url.endswith('://')]
for protocol in dev_list_protocols:
if origin.startswith(protocol):
origin_allowed = True
if not origin_allowed:
try:
tld = get_tld(origin)
except (TldBadUrl, TldDomainNotFound):
tld = origin # dev domains like e.g. 0.0.0.0:9000 will fall here
tld_whitelist = parse_csv_list(settings.get('spynl.tld_origin_whitelist', ''))
if tld in tld_whitelist:
origin_allowed = True
return origin_allowed
def get_header_args(request):
"""Return a dictionary with arguments passed as headers."""
# these require a spynl-specific prefix to be recognized
headers = {
key: value
for key, value in request.headers.items()
if key.lower().startswith('x-spynl-')
}
# We might also get the session id and client IP address with the headers
for key in request.headers.keys():
if key.lower() == 'sid':
headers['sid'] = request.headers[key]
if key == 'X-Forwarded-For':
headers['X-Forwarded-For'] = request.headers[key]
return headers
def get_parsed_body(request):
"""Return the body of the request parsed if request was POST or PUT."""
settings = get_settings()
body_parser = settings.get('spynl.post_parser')
if request.method in ('POST', 'PUT'):
if body_parser:
request.parsed_body = body_parser(request)
else:
request.parsed_body = {} if not request.body else json.loads(request.body)
else:
# disregard any body content if not a POST of PUT request
request.parsed_body = {}
return request.parsed_body
def unify_args(request):
"""
Make one giant args dictonary from GET, POST, headers and cookies and
return it. On the way, create r.parsed_body and r.parsed_get as well.
It is possible to provide a custom parser for the POST body in the
settings. Complex data can be given via GET as a JSON string.
GET would overwrite POST when parameter names collide.
"""
args = {}
# get headers first, they might be useful for parsing the body
args.update(get_header_args(request))
# get POST data
args.update(get_parsed_body(request))
# get GET args, can be written in JSON style
# args.update(urlson.loads_dict(request.GET))
# TODO: needs some refactoring - maybe urlson can actually do this parsing
# for us. We don't know the context yet.
from spynl.main.serial import objects
context = hasattr(request, 'context') and request.context or None
args.update(
json.loads(
json.dumps(urlson.loads_dict(request.GET)),
object_hook=objects.SpynlDecoder(context=context),
)
)
request.endpoint_method = find_view_name(request)
# get cookies, but do not overwrite explicitly given settings
for key in request.cookies:
if key not in args:
args[key] = request.cookies[key]
# we actually want the sid to live as a header from here on out.
# It can come in other ways as well (e.g. in GET) for convenience,
# but we agree for it to live in one place.
if args.get('sid'):
request.headers['sid'] = args['sid']
del args['sid']
return args
def find_view_name(request):
"""find the view name
TODO: I believe this is not completely generic.
"""
name = None
if request.matchdict and 'method' in request.matchdict: # a route was matched
name = request.matchdict['method']
else:
name = request.path_info
if name.startswith('/'):
name = name[1:]
if hasattr(request, 'matched_route') and request.matched_route:
if name in request.matched_route.name:
# method name was not in the URL
if request.method == 'POST':
name = 'edit'
elif request.method == 'GET':
name = 'get'
return name
def get_user_info(request, purpose=None):
"""
Spynl.main has no user model. This function allows the use of a
user_info function defined in a plugin, by setting it to the
'user_info' setting in the plugger.py of the plugin. If no
other function is defined, it uses _user_info instead.
The user_info function should return a dictionary with
information about the (authenticated) user. If no information is
available it should return an empty dictionary.
"""
try:
return request.registry.settings['user_info_function'](request, purpose)
except (KeyError, AttributeError, TypeError):
return _get_user_info(request)
def _get_user_info(request):
"""
Function to get user information as a dictionary. In spynl.main the
only user information we can get is the ip address.
"""
ipaddress = get_user_ip(request)
return dict(ipaddress=ipaddress)
def get_user_ip(request):
""" Get the ipaddress of the user """
ipaddress = request.environ.get('REMOTE_ADDR', None)
# Load balancers overwrite ipaddress,
# so we prefer the forward header EBS sets
if 'X-Forwarded-For' in request.headers.keys():
ipaddress = request.headers['X-Forwarded-For']
return ipaddress
def get_err_source(original_traceback=None):
"""Use this when an error is handled to get info on where it occured"""
try: # carefully try to get the actual place where the error happened
if not original_traceback:
original_traceback = sys.exc_info()[2] # class, exc, traceback
first_call = traceback.extract_tb(original_traceback)[-1]
return dict(
module=first_call[0],
linenr=first_call[1],
method=first_call[2],
src_code=first_call[3],
)
except Exception:
return 'I was unable to retrieve error source information.'
def renderer_factory(info):
"""
Normally responses are rendered as bare JSON, but this factory will look
into the settings for other requested renderers first.
"""
if hasattr(info, 'settings'):
settings = info.settings
if settings and 'spynl.renderer' in settings:
return settings['spynl.renderer']
return json_renderer_factory(None)
def get_logger(name=None):
"""Return the Logger object with the given name."""
if not name:
name = __name__
return logging.getLogger(name)
def parse_value(value, class_info):
'''
Parse a value. class_info is expected to be a class or a list
of classes to try in order.
Raises SpynlException exception if no parsing was possible.
'''
if isclass(class_info):
try:
return class_info(value)
except Exception:
raise SpynlException(
_(
'parse-value-exception-as-class',
mapping={'value': value, 'class': class_info.__name__},
)
)
if hasattr(class_info, '__iter__'):
for cl in class_info:
if not isclass(cl):
raise SpynlException(
_(
'parse-value-exception-not-class',
mapping={'class': cl, 'value': value},
)
)
try:
return cl(value)
except Exception:
pass
raise SpynlException(
_(
'parse-value-exception-any-class',
mapping={'value': value, 'classes': [cl.__name__ for cl in class_info]},
)
)
def parse_csv_list(csv_list):
"""Parse a list of CSV values."""
return [i.strip() for i in csv_list.split(',')]
def | (doc_str, load_yaml=True):
"""
Load the YAML part (after "---") from the docstring of a Spynl view.
if load_yaml is True, return the result of yaml.load, otherwise return
as string.
"""
if doc_str:
yaml_sep = doc_str.find('---')
else:
yaml_sep = -1
if yaml_sep != -1:
yaml_str = doc_str[yaml_sep:]
if load_yaml:
return yaml.load(yaml_str)
else:
return yaml_str
return None
def required_args(*arguments):
"""Call the decorator that checks if required args passed in request."""
def outer_wrapper(func):
"""Return the decorator."""
@wraps(func)
def inner_wrapper(*args):
"""
Raise if a required argument is missing or is empty.
Decorator checks if request.args were the expected <*arguments> of
the current endpoint.
"""
request = args[-1] # request is always the last argument
for required_arg in arguments:
if request.args.get(required_arg, None) is None:
raise MissingParameter(required_arg)
if len(getfullargspec(func).args) == 1:
return func(request)
else:
return func(*args)
return inner_wrapper
return outer_wrapper
def report_to_sentry(exception, request):
"""Send exception info to online services for better monitoring
The user_info param can be added so the services can display which user
was involved.
The exc_info parameter should only be passed in if a
different exception than the current one on the stack should be sent.
The metadata parameter can be used for any extra information.
The endpoint parameter is sent under the tags Sentry parameter so
exceptions can be filtered in their website by endpoint.
"""
log = get_logger()
settings = get_settings()
try:
import raven
dsn = 'https://{}@app.getsentry.com/{}'.format(
settings['spynl.sentry_key'], settings['spynl.sentry_project']
)
client = raven.Client(
dsn=dsn,
release=spynl_version,
site='Spynl',
environment=settings.get('spynl.ops.environment', 'dev'),
processors=('raven.processors.SanitizePasswordsProcessor',),
)
except (ImportError, KeyError):
# if raven package is not installed or sentry key or project don't exist move on
return
except raven.exceptions.InvalidDsn:
log.warning('Invalid Sentry DSN')
return
user_info = get_user_info(request, purpose='error_view')
if user_info:
client.user_context(user_info)
client.captureException(
tags=dict(endpoint=request.path),
extra=dict(
url=request.path_url,
debug_message=getattr(exception, 'debug_message', None),
developer_message=getattr(exception, 'developer_message', None),
detail=getattr(exception, 'detail', None),
),
)
def report_to_newrelic(user_info):
# tell NewRelic about user information if the newrelic package is installed
# (the rest of the configuration of NewRelic is ini-file-based)
try:
import newrelic.agent
except ImportError:
return
if not user_info:
return
log = get_logger()
for key, value in user_info.items():
# do not include ipaddress for privacy
if key == 'ipaddress':
continue
if not newrelic.agent.add_custom_parameter(key, value):
log.warning('Could not add user info to NewRelic on exception: %s', key)
break
def log_error(exc, request, top_msg, error_type=None, error_msg=None):
"""
Log the error from an error view to the log, and to external monitoring.
Make sure the __cause__ of the exception is used.
"""
log = get_logger()
if not error_type:
error_type = exc.__class__.__name__
if error_type.endswith('Exception'):
error_type = error_type[: -len('Exception')]
if not error_msg:
try:
error_msg = exc.message
except AttributeError:
error_msg = str(exc)
if not error_msg:
error_msg = _('no-message-available')
user_info = get_user_info(request, purpose='error_view')
debug_message = (getattr(exc, 'debug_message', 'No debug message'),)
developer_message = (getattr(exc, 'developer_message', 'No developer message'),)
metadata = dict(
user=user_info,
url=request.path_url,
debug_message=debug_message,
developer_message=developer_message,
err_source=get_err_source(exc.__traceback__),
detail=getattr(exc, 'detail', None),
)
if developer_message:
top_msg += " developer_message: %s" % developer_message
if debug_message:
top_msg += " debug_message: %s" % debug_message
log.error(
top_msg,
error_type,
str(error_msg),
exc_info=sys.exc_info(),
extra=dict(meta=metadata),
)
if getattr(exc, 'monitor', None) is True or not isinstance(
exc, (HTTPForbidden, HTTPNotFound)
):
report_to_sentry(exc, request)
report_to_newrelic(user_info)
@contextlib.contextmanager
def chdir(dirname=None):
"""Change to this directory during this context"""
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def add_jinja2_filters(config, new_filters):
"""
A helper function to add jinja filters in a plugger in such a
way that previously added filters are not removed.
"""
filters = config.get_settings().get('jinja2.filters', {})
filters.update(new_filters)
config.add_settings({'jinja2.filters': filters})
| get_yaml_from_docstring | identifier_name |
utils.py | """Helper functions and view derivers for spynl.main."""
import json
import logging
import traceback
import sys
import os
import contextlib
from functools import wraps
from inspect import isclass, getfullargspec
import yaml
from tld import get_tld
from tld.exceptions import TldBadUrl, TldDomainNotFound
from pyramid.response import Response
from pyramid.renderers import json_renderer_factory
from pyramid.exceptions import Forbidden
from pyramid import threadlocal
from pyramid.httpexceptions import HTTPForbidden, HTTPNotFound
from spynl.main import urlson
from spynl.main.exceptions import SpynlException, MissingParameter, BadOrigin
from spynl.main.version import __version__ as spynl_version
from spynl.main.locale import SpynlTranslationString as _
def get_request():
"""
Retrieve current request.
Use with care, though:
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
"""
return threadlocal.get_current_request()
def get_settings(setting=None):
"""
Get settings (from .ini file [app:main] section)
If setting is given, get its value from the application settings and return it.
Can also be accessed from the request object: request.registry.settings
For more info on the way we do it here, consult
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
Our policy is to not edit the settings during a request/response cycle.
"""
registry_settings = threadlocal.get_current_registry().settings or {}
if setting is not None:
return registry_settings.get(setting)
return registry_settings
def check_origin(endpoint, info):
"""Check if origin is allowed"""
def wrapper_view(context, request):
"""raise HTTPForbidden if origin isn't allowed"""
origin = request.headers.get('Origin', '')
if not is_origin_allowed(origin):
# because this is a wrapper, the bad origin will not be properly
# escalated to forbidden, so it needs to be done like this.
raise Forbidden(
detail=BadOrigin(origin).message.translate(request.localizer)
)
return endpoint(context, request)
return wrapper_view
# NOTE this has NOTHING to do with the check options view deriver. But we
# need to register it somewhere.
check_origin.options = ('is_error_view',)
def validate_locale(locale):
"""Validate a locale against our supported languages."""
supported_languages = [
lang.strip().lower()
for lang in get_settings().get('spynl.languages', 'en').split(',')
]
language = None
if not locale:
return
# we're only looking for languages here, not dialects.
language = str(locale)[:2].lower()
if language in supported_languages:
return language
def handle_pre_flight_request(endpoint, info):
"""
"pre-flight-request": return custom response with some information on
what we allow. Used by browsers before they send XMLHttpRequests.
"""
def wrapper(context, request):
"""Call the endpoint if not an OPTION (pre-flight) request,
otherwise return a custom Response."""
if request.method != 'OPTIONS':
return endpoint(context, request)
else:
headerlist = []
origin = request.headers.get('Origin')
if origin: # otherwise we are on localhost or are called directly
if is_origin_allowed(origin):
headerlist.append(('Access-Control-Allow-Origin', origin))
else:
headerlist.append(('Access-Control-Allow-Origin', 'null'))
headerlist.extend(
[
('Access-Control-Allow-Methods', 'GET,POST'),
('Access-Control-Max-Age', '86400'),
('Access-Control-Allow-Credentials', 'true'),
('Content-Length', '0'),
('Content-Type', 'text/plain'),
]
)
# you can send any headers to Spynl, basically
if 'Access-Control-Request-Headers' in request.headers:
headerlist.append(
(
'Access-Control-Allow-Headers',
request.headers['Access-Control-Request-Headers'],
)
)
# returning a generic and resource-agnostic pre-flight response
return Response(headerlist=headerlist)
return wrapper
def is_origin_allowed(origin):
"""
Check request origin for matching our whitelists.
First tries dev whitelists (that list is expected to hold
either complete URLs or mere protocols, e.g. "chrome-extension://").
Then the tld whitelist is tried, which is expected to hold
only the top-level domains.
Returns True if origin is allowed, False otherwise.
"""
if not origin:
return True
settings = get_settings()
dev_whitelist = parse_csv_list(settings.get('spynl.dev_origin_whitelist', ''))
dev_list_urls = [url for url in dev_whitelist if not url.endswith('://')]
origin_allowed = origin in dev_list_urls
dev_list_protocols = [url for url in dev_whitelist if url.endswith('://')]
for protocol in dev_list_protocols:
if origin.startswith(protocol):
origin_allowed = True
if not origin_allowed:
try:
tld = get_tld(origin)
except (TldBadUrl, TldDomainNotFound):
tld = origin # dev domains like e.g. 0.0.0.0:9000 will fall here
tld_whitelist = parse_csv_list(settings.get('spynl.tld_origin_whitelist', ''))
if tld in tld_whitelist:
origin_allowed = True
return origin_allowed
def get_header_args(request):
"""Return a dictionary with arguments passed as headers."""
# these require a spynl-specific prefix to be recognized
headers = {
key: value
for key, value in request.headers.items()
if key.lower().startswith('x-spynl-')
}
# We might also get the session id and client IP address with the headers
for key in request.headers.keys():
if key.lower() == 'sid':
headers['sid'] = request.headers[key]
if key == 'X-Forwarded-For':
headers['X-Forwarded-For'] = request.headers[key]
return headers
def get_parsed_body(request):
"""Return the body of the request parsed if request was POST or PUT."""
settings = get_settings()
body_parser = settings.get('spynl.post_parser')
if request.method in ('POST', 'PUT'):
if body_parser:
request.parsed_body = body_parser(request)
else:
request.parsed_body = {} if not request.body else json.loads(request.body)
else:
# disregard any body content if not a POST of PUT request
request.parsed_body = {}
return request.parsed_body
def unify_args(request):
"""
Make one giant args dictonary from GET, POST, headers and cookies and
return it. On the way, create r.parsed_body and r.parsed_get as well.
It is possible to provide a custom parser for the POST body in the
settings. Complex data can be given via GET as a JSON string.
GET would overwrite POST when parameter names collide.
"""
args = {}
# get headers first, they might be useful for parsing the body
args.update(get_header_args(request))
# get POST data
args.update(get_parsed_body(request))
# get GET args, can be written in JSON style
# args.update(urlson.loads_dict(request.GET))
# TODO: needs some refactoring - maybe urlson can actually do this parsing
# for us. We don't know the context yet.
from spynl.main.serial import objects
context = hasattr(request, 'context') and request.context or None
args.update(
json.loads(
json.dumps(urlson.loads_dict(request.GET)),
object_hook=objects.SpynlDecoder(context=context),
)
)
request.endpoint_method = find_view_name(request)
# get cookies, but do not overwrite explicitly given settings
for key in request.cookies:
if key not in args:
args[key] = request.cookies[key]
# we actually want the sid to live as a header from here on out.
# It can come in other ways as well (e.g. in GET) for convenience,
# but we agree for it to live in one place.
if args.get('sid'):
request.headers['sid'] = args['sid']
del args['sid']
return args
def find_view_name(request):
"""find the view name
TODO: I believe this is not completely generic.
"""
name = None
if request.matchdict and 'method' in request.matchdict: # a route was matched
name = request.matchdict['method']
else:
name = request.path_info
if name.startswith('/'):
name = name[1:]
if hasattr(request, 'matched_route') and request.matched_route:
if name in request.matched_route.name:
# method name was not in the URL
if request.method == 'POST':
name = 'edit'
elif request.method == 'GET':
name = 'get'
return name
def get_user_info(request, purpose=None):
"""
Spynl.main has no user model. This function allows the use of a
user_info function defined in a plugin, by setting it to the
'user_info' setting in the plugger.py of the plugin. If no
other function is defined, it uses _user_info instead.
The user_info function should return a dictionary with
information about the (authenticated) user. If no information is
available it should return an empty dictionary.
"""
try:
return request.registry.settings['user_info_function'](request, purpose)
except (KeyError, AttributeError, TypeError):
return _get_user_info(request)
def _get_user_info(request):
"""
Function to get user information as a dictionary. In spynl.main the
only user information we can get is the ip address.
"""
ipaddress = get_user_ip(request)
return dict(ipaddress=ipaddress)
def get_user_ip(request):
""" Get the ipaddress of the user """
ipaddress = request.environ.get('REMOTE_ADDR', None)
# Load balancers overwrite ipaddress,
# so we prefer the forward header EBS sets
if 'X-Forwarded-For' in request.headers.keys():
ipaddress = request.headers['X-Forwarded-For']
return ipaddress
def get_err_source(original_traceback=None):
"""Use this when an error is handled to get info on where it occured"""
try: # carefully try to get the actual place where the error happened
if not original_traceback:
original_traceback = sys.exc_info()[2] # class, exc, traceback
first_call = traceback.extract_tb(original_traceback)[-1]
return dict(
module=first_call[0],
linenr=first_call[1],
method=first_call[2],
src_code=first_call[3],
)
except Exception:
return 'I was unable to retrieve error source information.'
def renderer_factory(info):
"""
Normally responses are rendered as bare JSON, but this factory will look
into the settings for other requested renderers first.
"""
if hasattr(info, 'settings'):
settings = info.settings
if settings and 'spynl.renderer' in settings:
return settings['spynl.renderer']
return json_renderer_factory(None)
def get_logger(name=None):
"""Return the Logger object with the given name."""
if not name:
name = __name__
return logging.getLogger(name)
def parse_value(value, class_info):
'''
Parse a value. class_info is expected to be a class or a list
of classes to try in order.
Raises SpynlException exception if no parsing was possible.
'''
if isclass(class_info):
try:
return class_info(value)
except Exception:
raise SpynlException(
_(
'parse-value-exception-as-class',
mapping={'value': value, 'class': class_info.__name__},
)
)
if hasattr(class_info, '__iter__'):
for cl in class_info:
if not isclass(cl):
raise SpynlException(
_(
'parse-value-exception-not-class',
mapping={'class': cl, 'value': value},
)
)
try:
return cl(value)
except Exception:
pass
raise SpynlException(
_(
'parse-value-exception-any-class',
mapping={'value': value, 'classes': [cl.__name__ for cl in class_info]},
)
)
def parse_csv_list(csv_list):
"""Parse a list of CSV values."""
return [i.strip() for i in csv_list.split(',')]
def get_yaml_from_docstring(doc_str, load_yaml=True):
"""
Load the YAML part (after "---") from the docstring of a Spynl view.
if load_yaml is True, return the result of yaml.load, otherwise return
as string.
"""
if doc_str:
yaml_sep = doc_str.find('---')
else:
yaml_sep = -1
if yaml_sep != -1:
yaml_str = doc_str[yaml_sep:]
if load_yaml:
return yaml.load(yaml_str)
else:
return yaml_str
return None
def required_args(*arguments):
"""Call the decorator that checks if required args passed in request."""
def outer_wrapper(func):
"""Return the decorator."""
@wraps(func)
def inner_wrapper(*args):
"""
Raise if a required argument is missing or is empty.
Decorator checks if request.args were the expected <*arguments> of
the current endpoint.
"""
request = args[-1] # request is always the last argument
for required_arg in arguments:
if request.args.get(required_arg, None) is None:
raise MissingParameter(required_arg)
if len(getfullargspec(func).args) == 1:
return func(request)
else:
return func(*args)
return inner_wrapper
return outer_wrapper
def report_to_sentry(exception, request):
"""Send exception info to online services for better monitoring
The user_info param can be added so the services can display which user
was involved.
The exc_info parameter should only be passed in if a
different exception than the current one on the stack should be sent.
The metadata parameter can be used for any extra information.
The endpoint parameter is sent under the tags Sentry parameter so
exceptions can be filtered in their website by endpoint.
"""
log = get_logger()
settings = get_settings()
try:
import raven
dsn = 'https://{}@app.getsentry.com/{}'.format(
settings['spynl.sentry_key'], settings['spynl.sentry_project']
)
client = raven.Client(
dsn=dsn,
release=spynl_version,
site='Spynl',
environment=settings.get('spynl.ops.environment', 'dev'),
processors=('raven.processors.SanitizePasswordsProcessor',),
)
except (ImportError, KeyError):
# if raven package is not installed or sentry key or project don't exist move on
return
except raven.exceptions.InvalidDsn:
log.warning('Invalid Sentry DSN')
return
user_info = get_user_info(request, purpose='error_view')
if user_info:
client.user_context(user_info)
client.captureException(
tags=dict(endpoint=request.path),
extra=dict(
url=request.path_url,
debug_message=getattr(exception, 'debug_message', None),
developer_message=getattr(exception, 'developer_message', None),
detail=getattr(exception, 'detail', None),
),
)
def report_to_newrelic(user_info):
# tell NewRelic about user information if the newrelic package is installed
# (the rest of the configuration of NewRelic is ini-file-based)
try:
import newrelic.agent
except ImportError:
return
if not user_info:
return
log = get_logger()
for key, value in user_info.items():
# do not include ipaddress for privacy
if key == 'ipaddress':
continue
if not newrelic.agent.add_custom_parameter(key, value):
log.warning('Could not add user info to NewRelic on exception: %s', key)
break
def log_error(exc, request, top_msg, error_type=None, error_msg=None):
"""
Log the error from an error view to the log, and to external monitoring.
Make sure the __cause__ of the exception is used.
"""
log = get_logger()
if not error_type:
error_type = exc.__class__.__name__
if error_type.endswith('Exception'):
error_type = error_type[: -len('Exception')]
if not error_msg:
try:
error_msg = exc.message
except AttributeError:
error_msg = str(exc)
if not error_msg:
error_msg = _('no-message-available')
user_info = get_user_info(request, purpose='error_view')
debug_message = (getattr(exc, 'debug_message', 'No debug message'),)
developer_message = (getattr(exc, 'developer_message', 'No developer message'),)
metadata = dict(
user=user_info,
url=request.path_url,
debug_message=debug_message,
developer_message=developer_message,
err_source=get_err_source(exc.__traceback__),
detail=getattr(exc, 'detail', None),
)
if developer_message:
top_msg += " developer_message: %s" % developer_message
if debug_message:
top_msg += " debug_message: %s" % debug_message
log.error(
top_msg,
error_type,
str(error_msg),
exc_info=sys.exc_info(),
extra=dict(meta=metadata),
)
if getattr(exc, 'monitor', None) is True or not isinstance(
exc, (HTTPForbidden, HTTPNotFound)
):
report_to_sentry(exc, request)
report_to_newrelic(user_info)
@contextlib.contextmanager
def chdir(dirname=None):
"""Change to this directory during this context"""
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def add_jinja2_filters(config, new_filters):
"""
A helper function to add jinja filters in a plugger in such a
way that previously added filters are not removed.
""" | filters = config.get_settings().get('jinja2.filters', {})
filters.update(new_filters)
config.add_settings({'jinja2.filters': filters}) | random_line_split |
|
utils.py | """Helper functions and view derivers for spynl.main."""
import json
import logging
import traceback
import sys
import os
import contextlib
from functools import wraps
from inspect import isclass, getfullargspec
import yaml
from tld import get_tld
from tld.exceptions import TldBadUrl, TldDomainNotFound
from pyramid.response import Response
from pyramid.renderers import json_renderer_factory
from pyramid.exceptions import Forbidden
from pyramid import threadlocal
from pyramid.httpexceptions import HTTPForbidden, HTTPNotFound
from spynl.main import urlson
from spynl.main.exceptions import SpynlException, MissingParameter, BadOrigin
from spynl.main.version import __version__ as spynl_version
from spynl.main.locale import SpynlTranslationString as _
def get_request():
"""
Retrieve current request.
Use with care, though:
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
"""
return threadlocal.get_current_request()
def get_settings(setting=None):
"""
Get settings (from .ini file [app:main] section)
If setting is given, get its value from the application settings and return it.
Can also be accessed from the request object: request.registry.settings
For more info on the way we do it here, consult
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
Our policy is to not edit the settings during a request/response cycle.
"""
registry_settings = threadlocal.get_current_registry().settings or {}
if setting is not None:
return registry_settings.get(setting)
return registry_settings
def check_origin(endpoint, info):
"""Check if origin is allowed"""
def wrapper_view(context, request):
"""raise HTTPForbidden if origin isn't allowed"""
origin = request.headers.get('Origin', '')
if not is_origin_allowed(origin):
# because this is a wrapper, the bad origin will not be properly
# escalated to forbidden, so it needs to be done like this.
raise Forbidden(
detail=BadOrigin(origin).message.translate(request.localizer)
)
return endpoint(context, request)
return wrapper_view
# NOTE this has NOTHING to do with the check options view deriver. But we
# need to register it somewhere.
check_origin.options = ('is_error_view',)
def validate_locale(locale):
"""Validate a locale against our supported languages."""
supported_languages = [
lang.strip().lower()
for lang in get_settings().get('spynl.languages', 'en').split(',')
]
language = None
if not locale:
return
# we're only looking for languages here, not dialects.
language = str(locale)[:2].lower()
if language in supported_languages:
return language
def handle_pre_flight_request(endpoint, info):
"""
"pre-flight-request": return custom response with some information on
what we allow. Used by browsers before they send XMLHttpRequests.
"""
def wrapper(context, request):
"""Call the endpoint if not an OPTION (pre-flight) request,
otherwise return a custom Response."""
if request.method != 'OPTIONS':
return endpoint(context, request)
else:
headerlist = []
origin = request.headers.get('Origin')
if origin: # otherwise we are on localhost or are called directly
if is_origin_allowed(origin):
headerlist.append(('Access-Control-Allow-Origin', origin))
else:
headerlist.append(('Access-Control-Allow-Origin', 'null'))
headerlist.extend(
[
('Access-Control-Allow-Methods', 'GET,POST'),
('Access-Control-Max-Age', '86400'),
('Access-Control-Allow-Credentials', 'true'),
('Content-Length', '0'),
('Content-Type', 'text/plain'),
]
)
# you can send any headers to Spynl, basically
if 'Access-Control-Request-Headers' in request.headers:
headerlist.append(
(
'Access-Control-Allow-Headers',
request.headers['Access-Control-Request-Headers'],
)
)
# returning a generic and resource-agnostic pre-flight response
return Response(headerlist=headerlist)
return wrapper
def is_origin_allowed(origin):
"""
Check request origin for matching our whitelists.
First tries dev whitelists (that list is expected to hold
either complete URLs or mere protocols, e.g. "chrome-extension://").
Then the tld whitelist is tried, which is expected to hold
only the top-level domains.
Returns True if origin is allowed, False otherwise.
"""
if not origin:
return True
settings = get_settings()
dev_whitelist = parse_csv_list(settings.get('spynl.dev_origin_whitelist', ''))
dev_list_urls = [url for url in dev_whitelist if not url.endswith('://')]
origin_allowed = origin in dev_list_urls
dev_list_protocols = [url for url in dev_whitelist if url.endswith('://')]
for protocol in dev_list_protocols:
if origin.startswith(protocol):
origin_allowed = True
if not origin_allowed:
try:
tld = get_tld(origin)
except (TldBadUrl, TldDomainNotFound):
tld = origin # dev domains like e.g. 0.0.0.0:9000 will fall here
tld_whitelist = parse_csv_list(settings.get('spynl.tld_origin_whitelist', ''))
if tld in tld_whitelist:
origin_allowed = True
return origin_allowed
def get_header_args(request):
"""Return a dictionary with arguments passed as headers."""
# these require a spynl-specific prefix to be recognized
headers = {
key: value
for key, value in request.headers.items()
if key.lower().startswith('x-spynl-')
}
# We might also get the session id and client IP address with the headers
for key in request.headers.keys():
if key.lower() == 'sid':
headers['sid'] = request.headers[key]
if key == 'X-Forwarded-For':
headers['X-Forwarded-For'] = request.headers[key]
return headers
def get_parsed_body(request):
"""Return the body of the request parsed if request was POST or PUT."""
settings = get_settings()
body_parser = settings.get('spynl.post_parser')
if request.method in ('POST', 'PUT'):
if body_parser:
|
else:
request.parsed_body = {} if not request.body else json.loads(request.body)
else:
# disregard any body content if not a POST of PUT request
request.parsed_body = {}
return request.parsed_body
def unify_args(request):
"""
Make one giant args dictonary from GET, POST, headers and cookies and
return it. On the way, create r.parsed_body and r.parsed_get as well.
It is possible to provide a custom parser for the POST body in the
settings. Complex data can be given via GET as a JSON string.
GET would overwrite POST when parameter names collide.
"""
args = {}
# get headers first, they might be useful for parsing the body
args.update(get_header_args(request))
# get POST data
args.update(get_parsed_body(request))
# get GET args, can be written in JSON style
# args.update(urlson.loads_dict(request.GET))
# TODO: needs some refactoring - maybe urlson can actually do this parsing
# for us. We don't know the context yet.
from spynl.main.serial import objects
context = hasattr(request, 'context') and request.context or None
args.update(
json.loads(
json.dumps(urlson.loads_dict(request.GET)),
object_hook=objects.SpynlDecoder(context=context),
)
)
request.endpoint_method = find_view_name(request)
# get cookies, but do not overwrite explicitly given settings
for key in request.cookies:
if key not in args:
args[key] = request.cookies[key]
# we actually want the sid to live as a header from here on out.
# It can come in other ways as well (e.g. in GET) for convenience,
# but we agree for it to live in one place.
if args.get('sid'):
request.headers['sid'] = args['sid']
del args['sid']
return args
def find_view_name(request):
"""find the view name
TODO: I believe this is not completely generic.
"""
name = None
if request.matchdict and 'method' in request.matchdict: # a route was matched
name = request.matchdict['method']
else:
name = request.path_info
if name.startswith('/'):
name = name[1:]
if hasattr(request, 'matched_route') and request.matched_route:
if name in request.matched_route.name:
# method name was not in the URL
if request.method == 'POST':
name = 'edit'
elif request.method == 'GET':
name = 'get'
return name
def get_user_info(request, purpose=None):
"""
Spynl.main has no user model. This function allows the use of a
user_info function defined in a plugin, by setting it to the
'user_info' setting in the plugger.py of the plugin. If no
other function is defined, it uses _user_info instead.
The user_info function should return a dictionary with
information about the (authenticated) user. If no information is
available it should return an empty dictionary.
"""
try:
return request.registry.settings['user_info_function'](request, purpose)
except (KeyError, AttributeError, TypeError):
return _get_user_info(request)
def _get_user_info(request):
"""
Function to get user information as a dictionary. In spynl.main the
only user information we can get is the ip address.
"""
ipaddress = get_user_ip(request)
return dict(ipaddress=ipaddress)
def get_user_ip(request):
""" Get the ipaddress of the user """
ipaddress = request.environ.get('REMOTE_ADDR', None)
# Load balancers overwrite ipaddress,
# so we prefer the forward header EBS sets
if 'X-Forwarded-For' in request.headers.keys():
ipaddress = request.headers['X-Forwarded-For']
return ipaddress
def get_err_source(original_traceback=None):
"""Use this when an error is handled to get info on where it occured"""
try: # carefully try to get the actual place where the error happened
if not original_traceback:
original_traceback = sys.exc_info()[2] # class, exc, traceback
first_call = traceback.extract_tb(original_traceback)[-1]
return dict(
module=first_call[0],
linenr=first_call[1],
method=first_call[2],
src_code=first_call[3],
)
except Exception:
return 'I was unable to retrieve error source information.'
def renderer_factory(info):
"""
Normally responses are rendered as bare JSON, but this factory will look
into the settings for other requested renderers first.
"""
if hasattr(info, 'settings'):
settings = info.settings
if settings and 'spynl.renderer' in settings:
return settings['spynl.renderer']
return json_renderer_factory(None)
def get_logger(name=None):
"""Return the Logger object with the given name."""
if not name:
name = __name__
return logging.getLogger(name)
def parse_value(value, class_info):
'''
Parse a value. class_info is expected to be a class or a list
of classes to try in order.
Raises SpynlException exception if no parsing was possible.
'''
if isclass(class_info):
try:
return class_info(value)
except Exception:
raise SpynlException(
_(
'parse-value-exception-as-class',
mapping={'value': value, 'class': class_info.__name__},
)
)
if hasattr(class_info, '__iter__'):
for cl in class_info:
if not isclass(cl):
raise SpynlException(
_(
'parse-value-exception-not-class',
mapping={'class': cl, 'value': value},
)
)
try:
return cl(value)
except Exception:
pass
raise SpynlException(
_(
'parse-value-exception-any-class',
mapping={'value': value, 'classes': [cl.__name__ for cl in class_info]},
)
)
def parse_csv_list(csv_list):
"""Parse a list of CSV values."""
return [i.strip() for i in csv_list.split(',')]
def get_yaml_from_docstring(doc_str, load_yaml=True):
"""
Load the YAML part (after "---") from the docstring of a Spynl view.
if load_yaml is True, return the result of yaml.load, otherwise return
as string.
"""
if doc_str:
yaml_sep = doc_str.find('---')
else:
yaml_sep = -1
if yaml_sep != -1:
yaml_str = doc_str[yaml_sep:]
if load_yaml:
return yaml.load(yaml_str)
else:
return yaml_str
return None
def required_args(*arguments):
"""Call the decorator that checks if required args passed in request."""
def outer_wrapper(func):
"""Return the decorator."""
@wraps(func)
def inner_wrapper(*args):
"""
Raise if a required argument is missing or is empty.
Decorator checks if request.args were the expected <*arguments> of
the current endpoint.
"""
request = args[-1] # request is always the last argument
for required_arg in arguments:
if request.args.get(required_arg, None) is None:
raise MissingParameter(required_arg)
if len(getfullargspec(func).args) == 1:
return func(request)
else:
return func(*args)
return inner_wrapper
return outer_wrapper
def report_to_sentry(exception, request):
"""Send exception info to online services for better monitoring
The user_info param can be added so the services can display which user
was involved.
The exc_info parameter should only be passed in if a
different exception than the current one on the stack should be sent.
The metadata parameter can be used for any extra information.
The endpoint parameter is sent under the tags Sentry parameter so
exceptions can be filtered in their website by endpoint.
"""
log = get_logger()
settings = get_settings()
try:
import raven
dsn = 'https://{}@app.getsentry.com/{}'.format(
settings['spynl.sentry_key'], settings['spynl.sentry_project']
)
client = raven.Client(
dsn=dsn,
release=spynl_version,
site='Spynl',
environment=settings.get('spynl.ops.environment', 'dev'),
processors=('raven.processors.SanitizePasswordsProcessor',),
)
except (ImportError, KeyError):
# if raven package is not installed or sentry key or project don't exist move on
return
except raven.exceptions.InvalidDsn:
log.warning('Invalid Sentry DSN')
return
user_info = get_user_info(request, purpose='error_view')
if user_info:
client.user_context(user_info)
client.captureException(
tags=dict(endpoint=request.path),
extra=dict(
url=request.path_url,
debug_message=getattr(exception, 'debug_message', None),
developer_message=getattr(exception, 'developer_message', None),
detail=getattr(exception, 'detail', None),
),
)
def report_to_newrelic(user_info):
# tell NewRelic about user information if the newrelic package is installed
# (the rest of the configuration of NewRelic is ini-file-based)
try:
import newrelic.agent
except ImportError:
return
if not user_info:
return
log = get_logger()
for key, value in user_info.items():
# do not include ipaddress for privacy
if key == 'ipaddress':
continue
if not newrelic.agent.add_custom_parameter(key, value):
log.warning('Could not add user info to NewRelic on exception: %s', key)
break
def log_error(exc, request, top_msg, error_type=None, error_msg=None):
"""
Log the error from an error view to the log, and to external monitoring.
Make sure the __cause__ of the exception is used.
"""
log = get_logger()
if not error_type:
error_type = exc.__class__.__name__
if error_type.endswith('Exception'):
error_type = error_type[: -len('Exception')]
if not error_msg:
try:
error_msg = exc.message
except AttributeError:
error_msg = str(exc)
if not error_msg:
error_msg = _('no-message-available')
user_info = get_user_info(request, purpose='error_view')
debug_message = (getattr(exc, 'debug_message', 'No debug message'),)
developer_message = (getattr(exc, 'developer_message', 'No developer message'),)
metadata = dict(
user=user_info,
url=request.path_url,
debug_message=debug_message,
developer_message=developer_message,
err_source=get_err_source(exc.__traceback__),
detail=getattr(exc, 'detail', None),
)
if developer_message:
top_msg += " developer_message: %s" % developer_message
if debug_message:
top_msg += " debug_message: %s" % debug_message
log.error(
top_msg,
error_type,
str(error_msg),
exc_info=sys.exc_info(),
extra=dict(meta=metadata),
)
if getattr(exc, 'monitor', None) is True or not isinstance(
exc, (HTTPForbidden, HTTPNotFound)
):
report_to_sentry(exc, request)
report_to_newrelic(user_info)
@contextlib.contextmanager
def chdir(dirname=None):
"""Change to this directory during this context"""
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def add_jinja2_filters(config, new_filters):
"""
A helper function to add jinja filters in a plugger in such a
way that previously added filters are not removed.
"""
filters = config.get_settings().get('jinja2.filters', {})
filters.update(new_filters)
config.add_settings({'jinja2.filters': filters})
| request.parsed_body = body_parser(request) | conditional_block |
utils.py | """Helper functions and view derivers for spynl.main."""
import json
import logging
import traceback
import sys
import os
import contextlib
from functools import wraps
from inspect import isclass, getfullargspec
import yaml
from tld import get_tld
from tld.exceptions import TldBadUrl, TldDomainNotFound
from pyramid.response import Response
from pyramid.renderers import json_renderer_factory
from pyramid.exceptions import Forbidden
from pyramid import threadlocal
from pyramid.httpexceptions import HTTPForbidden, HTTPNotFound
from spynl.main import urlson
from spynl.main.exceptions import SpynlException, MissingParameter, BadOrigin
from spynl.main.version import __version__ as spynl_version
from spynl.main.locale import SpynlTranslationString as _
def get_request():
"""
Retrieve current request.
Use with care, though:
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
"""
return threadlocal.get_current_request()
def get_settings(setting=None):
"""
Get settings (from .ini file [app:main] section)
If setting is given, get its value from the application settings and return it.
Can also be accessed from the request object: request.registry.settings
For more info on the way we do it here, consult
http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/threadlocals.html
Our policy is to not edit the settings during a request/response cycle.
"""
registry_settings = threadlocal.get_current_registry().settings or {}
if setting is not None:
return registry_settings.get(setting)
return registry_settings
def check_origin(endpoint, info):
"""Check if origin is allowed"""
def wrapper_view(context, request):
"""raise HTTPForbidden if origin isn't allowed"""
origin = request.headers.get('Origin', '')
if not is_origin_allowed(origin):
# because this is a wrapper, the bad origin will not be properly
# escalated to forbidden, so it needs to be done like this.
raise Forbidden(
detail=BadOrigin(origin).message.translate(request.localizer)
)
return endpoint(context, request)
return wrapper_view
# NOTE this has NOTHING to do with the check options view deriver. But we
# need to register it somewhere.
check_origin.options = ('is_error_view',)
def validate_locale(locale):
"""Validate a locale against our supported languages."""
supported_languages = [
lang.strip().lower()
for lang in get_settings().get('spynl.languages', 'en').split(',')
]
language = None
if not locale:
return
# we're only looking for languages here, not dialects.
language = str(locale)[:2].lower()
if language in supported_languages:
return language
def handle_pre_flight_request(endpoint, info):
"""
"pre-flight-request": return custom response with some information on
what we allow. Used by browsers before they send XMLHttpRequests.
"""
def wrapper(context, request):
"""Call the endpoint if not an OPTION (pre-flight) request,
otherwise return a custom Response."""
if request.method != 'OPTIONS':
return endpoint(context, request)
else:
headerlist = []
origin = request.headers.get('Origin')
if origin: # otherwise we are on localhost or are called directly
if is_origin_allowed(origin):
headerlist.append(('Access-Control-Allow-Origin', origin))
else:
headerlist.append(('Access-Control-Allow-Origin', 'null'))
headerlist.extend(
[
('Access-Control-Allow-Methods', 'GET,POST'),
('Access-Control-Max-Age', '86400'),
('Access-Control-Allow-Credentials', 'true'),
('Content-Length', '0'),
('Content-Type', 'text/plain'),
]
)
# you can send any headers to Spynl, basically
if 'Access-Control-Request-Headers' in request.headers:
headerlist.append(
(
'Access-Control-Allow-Headers',
request.headers['Access-Control-Request-Headers'],
)
)
# returning a generic and resource-agnostic pre-flight response
return Response(headerlist=headerlist)
return wrapper
def is_origin_allowed(origin):
"""
Check request origin for matching our whitelists.
First tries dev whitelists (that list is expected to hold
either complete URLs or mere protocols, e.g. "chrome-extension://").
Then the tld whitelist is tried, which is expected to hold
only the top-level domains.
Returns True if origin is allowed, False otherwise.
"""
if not origin:
return True
settings = get_settings()
dev_whitelist = parse_csv_list(settings.get('spynl.dev_origin_whitelist', ''))
dev_list_urls = [url for url in dev_whitelist if not url.endswith('://')]
origin_allowed = origin in dev_list_urls
dev_list_protocols = [url for url in dev_whitelist if url.endswith('://')]
for protocol in dev_list_protocols:
if origin.startswith(protocol):
origin_allowed = True
if not origin_allowed:
try:
tld = get_tld(origin)
except (TldBadUrl, TldDomainNotFound):
tld = origin # dev domains like e.g. 0.0.0.0:9000 will fall here
tld_whitelist = parse_csv_list(settings.get('spynl.tld_origin_whitelist', ''))
if tld in tld_whitelist:
origin_allowed = True
return origin_allowed
def get_header_args(request):
"""Return a dictionary with arguments passed as headers."""
# these require a spynl-specific prefix to be recognized
headers = {
key: value
for key, value in request.headers.items()
if key.lower().startswith('x-spynl-')
}
# We might also get the session id and client IP address with the headers
for key in request.headers.keys():
if key.lower() == 'sid':
headers['sid'] = request.headers[key]
if key == 'X-Forwarded-For':
headers['X-Forwarded-For'] = request.headers[key]
return headers
def get_parsed_body(request):
|
def unify_args(request):
"""
Make one giant args dictonary from GET, POST, headers and cookies and
return it. On the way, create r.parsed_body and r.parsed_get as well.
It is possible to provide a custom parser for the POST body in the
settings. Complex data can be given via GET as a JSON string.
GET would overwrite POST when parameter names collide.
"""
args = {}
# get headers first, they might be useful for parsing the body
args.update(get_header_args(request))
# get POST data
args.update(get_parsed_body(request))
# get GET args, can be written in JSON style
# args.update(urlson.loads_dict(request.GET))
# TODO: needs some refactoring - maybe urlson can actually do this parsing
# for us. We don't know the context yet.
from spynl.main.serial import objects
context = hasattr(request, 'context') and request.context or None
args.update(
json.loads(
json.dumps(urlson.loads_dict(request.GET)),
object_hook=objects.SpynlDecoder(context=context),
)
)
request.endpoint_method = find_view_name(request)
# get cookies, but do not overwrite explicitly given settings
for key in request.cookies:
if key not in args:
args[key] = request.cookies[key]
# we actually want the sid to live as a header from here on out.
# It can come in other ways as well (e.g. in GET) for convenience,
# but we agree for it to live in one place.
if args.get('sid'):
request.headers['sid'] = args['sid']
del args['sid']
return args
def find_view_name(request):
"""find the view name
TODO: I believe this is not completely generic.
"""
name = None
if request.matchdict and 'method' in request.matchdict: # a route was matched
name = request.matchdict['method']
else:
name = request.path_info
if name.startswith('/'):
name = name[1:]
if hasattr(request, 'matched_route') and request.matched_route:
if name in request.matched_route.name:
# method name was not in the URL
if request.method == 'POST':
name = 'edit'
elif request.method == 'GET':
name = 'get'
return name
def get_user_info(request, purpose=None):
"""
Spynl.main has no user model. This function allows the use of a
user_info function defined in a plugin, by setting it to the
'user_info' setting in the plugger.py of the plugin. If no
other function is defined, it uses _user_info instead.
The user_info function should return a dictionary with
information about the (authenticated) user. If no information is
available it should return an empty dictionary.
"""
try:
return request.registry.settings['user_info_function'](request, purpose)
except (KeyError, AttributeError, TypeError):
return _get_user_info(request)
def _get_user_info(request):
"""
Function to get user information as a dictionary. In spynl.main the
only user information we can get is the ip address.
"""
ipaddress = get_user_ip(request)
return dict(ipaddress=ipaddress)
def get_user_ip(request):
""" Get the ipaddress of the user """
ipaddress = request.environ.get('REMOTE_ADDR', None)
# Load balancers overwrite ipaddress,
# so we prefer the forward header EBS sets
if 'X-Forwarded-For' in request.headers.keys():
ipaddress = request.headers['X-Forwarded-For']
return ipaddress
def get_err_source(original_traceback=None):
"""Use this when an error is handled to get info on where it occured"""
try: # carefully try to get the actual place where the error happened
if not original_traceback:
original_traceback = sys.exc_info()[2] # class, exc, traceback
first_call = traceback.extract_tb(original_traceback)[-1]
return dict(
module=first_call[0],
linenr=first_call[1],
method=first_call[2],
src_code=first_call[3],
)
except Exception:
return 'I was unable to retrieve error source information.'
def renderer_factory(info):
"""
Normally responses are rendered as bare JSON, but this factory will look
into the settings for other requested renderers first.
"""
if hasattr(info, 'settings'):
settings = info.settings
if settings and 'spynl.renderer' in settings:
return settings['spynl.renderer']
return json_renderer_factory(None)
def get_logger(name=None):
"""Return the Logger object with the given name."""
if not name:
name = __name__
return logging.getLogger(name)
def parse_value(value, class_info):
'''
Parse a value. class_info is expected to be a class or a list
of classes to try in order.
Raises SpynlException exception if no parsing was possible.
'''
if isclass(class_info):
try:
return class_info(value)
except Exception:
raise SpynlException(
_(
'parse-value-exception-as-class',
mapping={'value': value, 'class': class_info.__name__},
)
)
if hasattr(class_info, '__iter__'):
for cl in class_info:
if not isclass(cl):
raise SpynlException(
_(
'parse-value-exception-not-class',
mapping={'class': cl, 'value': value},
)
)
try:
return cl(value)
except Exception:
pass
raise SpynlException(
_(
'parse-value-exception-any-class',
mapping={'value': value, 'classes': [cl.__name__ for cl in class_info]},
)
)
def parse_csv_list(csv_list):
"""Parse a list of CSV values."""
return [i.strip() for i in csv_list.split(',')]
def get_yaml_from_docstring(doc_str, load_yaml=True):
"""
Load the YAML part (after "---") from the docstring of a Spynl view.
if load_yaml is True, return the result of yaml.load, otherwise return
as string.
"""
if doc_str:
yaml_sep = doc_str.find('---')
else:
yaml_sep = -1
if yaml_sep != -1:
yaml_str = doc_str[yaml_sep:]
if load_yaml:
return yaml.load(yaml_str)
else:
return yaml_str
return None
def required_args(*arguments):
"""Call the decorator that checks if required args passed in request."""
def outer_wrapper(func):
"""Return the decorator."""
@wraps(func)
def inner_wrapper(*args):
"""
Raise if a required argument is missing or is empty.
Decorator checks if request.args were the expected <*arguments> of
the current endpoint.
"""
request = args[-1] # request is always the last argument
for required_arg in arguments:
if request.args.get(required_arg, None) is None:
raise MissingParameter(required_arg)
if len(getfullargspec(func).args) == 1:
return func(request)
else:
return func(*args)
return inner_wrapper
return outer_wrapper
def report_to_sentry(exception, request):
"""Send exception info to online services for better monitoring
The user_info param can be added so the services can display which user
was involved.
The exc_info parameter should only be passed in if a
different exception than the current one on the stack should be sent.
The metadata parameter can be used for any extra information.
The endpoint parameter is sent under the tags Sentry parameter so
exceptions can be filtered in their website by endpoint.
"""
log = get_logger()
settings = get_settings()
try:
import raven
dsn = 'https://{}@app.getsentry.com/{}'.format(
settings['spynl.sentry_key'], settings['spynl.sentry_project']
)
client = raven.Client(
dsn=dsn,
release=spynl_version,
site='Spynl',
environment=settings.get('spynl.ops.environment', 'dev'),
processors=('raven.processors.SanitizePasswordsProcessor',),
)
except (ImportError, KeyError):
# if raven package is not installed or sentry key or project don't exist move on
return
except raven.exceptions.InvalidDsn:
log.warning('Invalid Sentry DSN')
return
user_info = get_user_info(request, purpose='error_view')
if user_info:
client.user_context(user_info)
client.captureException(
tags=dict(endpoint=request.path),
extra=dict(
url=request.path_url,
debug_message=getattr(exception, 'debug_message', None),
developer_message=getattr(exception, 'developer_message', None),
detail=getattr(exception, 'detail', None),
),
)
def report_to_newrelic(user_info):
# tell NewRelic about user information if the newrelic package is installed
# (the rest of the configuration of NewRelic is ini-file-based)
try:
import newrelic.agent
except ImportError:
return
if not user_info:
return
log = get_logger()
for key, value in user_info.items():
# do not include ipaddress for privacy
if key == 'ipaddress':
continue
if not newrelic.agent.add_custom_parameter(key, value):
log.warning('Could not add user info to NewRelic on exception: %s', key)
break
def log_error(exc, request, top_msg, error_type=None, error_msg=None):
"""
Log the error from an error view to the log, and to external monitoring.
Make sure the __cause__ of the exception is used.
"""
log = get_logger()
if not error_type:
error_type = exc.__class__.__name__
if error_type.endswith('Exception'):
error_type = error_type[: -len('Exception')]
if not error_msg:
try:
error_msg = exc.message
except AttributeError:
error_msg = str(exc)
if not error_msg:
error_msg = _('no-message-available')
user_info = get_user_info(request, purpose='error_view')
debug_message = (getattr(exc, 'debug_message', 'No debug message'),)
developer_message = (getattr(exc, 'developer_message', 'No developer message'),)
metadata = dict(
user=user_info,
url=request.path_url,
debug_message=debug_message,
developer_message=developer_message,
err_source=get_err_source(exc.__traceback__),
detail=getattr(exc, 'detail', None),
)
if developer_message:
top_msg += " developer_message: %s" % developer_message
if debug_message:
top_msg += " debug_message: %s" % debug_message
log.error(
top_msg,
error_type,
str(error_msg),
exc_info=sys.exc_info(),
extra=dict(meta=metadata),
)
if getattr(exc, 'monitor', None) is True or not isinstance(
exc, (HTTPForbidden, HTTPNotFound)
):
report_to_sentry(exc, request)
report_to_newrelic(user_info)
@contextlib.contextmanager
def chdir(dirname=None):
"""Change to this directory during this context"""
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def add_jinja2_filters(config, new_filters):
"""
A helper function to add jinja filters in a plugger in such a
way that previously added filters are not removed.
"""
filters = config.get_settings().get('jinja2.filters', {})
filters.update(new_filters)
config.add_settings({'jinja2.filters': filters})
| """Return the body of the request parsed if request was POST or PUT."""
settings = get_settings()
body_parser = settings.get('spynl.post_parser')
if request.method in ('POST', 'PUT'):
if body_parser:
request.parsed_body = body_parser(request)
else:
request.parsed_body = {} if not request.body else json.loads(request.body)
else:
# disregard any body content if not a POST of PUT request
request.parsed_body = {}
return request.parsed_body | identifier_body |
objpool.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::slice::{Iter, IterMut};
use serde_derive::{Deserialize, Serialize};
use slog;
#[derive(Debug, Serialize, Deserialize)]
pub struct ObjPoolIndex<T> {
i: usize,
type_marker: PhantomData<T>
}
impl<T> Copy for ObjPoolIndex<T> { }
impl<T> Clone for ObjPoolIndex<T> {
fn clone(&self) -> ObjPoolIndex<T> {
*self
}
}
impl<T> PartialEq for ObjPoolIndex<T> {
fn eq(&self, other: &ObjPoolIndex<T>) -> bool {
self.i == other.i
}
}
impl<T> Eq for ObjPoolIndex<T> { }
impl<T> Hash for ObjPoolIndex<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.i.hash(state);
}
}
impl<T> ObjPoolIndex<T> {
pub fn get_raw_i(&self) -> usize {
self.i
}
}
impl<T> slog::Value for ObjPoolIndex<T> {
fn serialize(&self, _record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer) -> slog::Result {
serializer.emit_usize(key, self.i)
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub struct ObjPool<T> {
storage: Vec<T>
}
impl<T> ObjPool<T> {
pub fn new() -> ObjPool<T> {
ObjPool {storage: Vec::new()}
}
pub fn insert(&mut self, item: T) -> ObjPoolIndex<T> {
let i = self.storage.len();
self.storage.push(item);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
pub fn get(&self, i: ObjPoolIndex<T>) -> &T {
&self.storage[i.i]
}
pub fn get_mut(&mut self, i: ObjPoolIndex<T>) -> &mut T {
&mut self.storage[i.i]
}
pub fn iter_idx(&self) -> ObjPoolIdxIterator<T> {
ObjPoolIdxIterator {
pool: self,
current_idx: 0,
}
}
pub fn iter(&self) -> Iter<T> {
self.storage.iter()
}
pub fn iter_mut(&mut self) -> IterMut<T> {
self.storage.iter_mut()
}
pub fn iter_mut_idx(&mut self) -> ObjPoolMutIdxIterator<T> {
ObjPoolMutIdxIterator {
inner_iter: self.storage.iter_mut(),
current_idx: 0,
}
}
pub fn len(&self) -> usize {
self.storage.len()
}
}
impl<T: Default> ObjPool<T> {
pub fn alloc(&mut self) -> ObjPoolIndex<T> {
let i = self.storage.len();
let o = T::default();
self.storage.push(o);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
}
pub struct ObjPoolIdxIterator<'a, T: 'a> {
pool: &'a ObjPool<T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolIdxIterator<'a, T> {
type Item = ObjPoolIndex<T>;
fn next(&mut self) -> Option<ObjPoolIndex<T>> |
}
pub struct ObjPoolMutIdxIterator<'a, T: 'a> {
inner_iter: IterMut<'a, T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolMutIdxIterator<'a, T> {
type Item = (ObjPoolIndex<T>, &'a mut T);
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner_iter.next();
match next {
None => None,
Some(x) => {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some((ret, x))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Default)]
struct ObjPoolTestObject {
foo: u32
}
#[test]
fn objpool_basic_works() {
let mut pool = ObjPool::<ObjPoolTestObject>::new();
let x = pool.alloc();
let y = pool.alloc();
{
let o = pool.get_mut(x);
o.foo = 123;
}
{
let o = pool.get_mut(y);
o.foo = 456;
}
let ox = pool.get(x);
let oy = pool.get(y);
assert_eq!(ox.foo, 123);
assert_eq!(oy.foo, 456);
}
}
| {
if self.current_idx == self.pool.storage.len() {
None
} else {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some(ret)
}
} | identifier_body |
objpool.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::slice::{Iter, IterMut};
use serde_derive::{Deserialize, Serialize};
use slog;
#[derive(Debug, Serialize, Deserialize)]
pub struct ObjPoolIndex<T> {
i: usize,
type_marker: PhantomData<T>
}
impl<T> Copy for ObjPoolIndex<T> { }
impl<T> Clone for ObjPoolIndex<T> {
fn clone(&self) -> ObjPoolIndex<T> {
*self
}
}
impl<T> PartialEq for ObjPoolIndex<T> {
fn eq(&self, other: &ObjPoolIndex<T>) -> bool {
self.i == other.i
}
}
impl<T> Eq for ObjPoolIndex<T> { }
impl<T> Hash for ObjPoolIndex<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.i.hash(state);
}
}
impl<T> ObjPoolIndex<T> {
pub fn get_raw_i(&self) -> usize {
self.i
}
}
impl<T> slog::Value for ObjPoolIndex<T> {
fn serialize(&self, _record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer) -> slog::Result {
serializer.emit_usize(key, self.i)
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub struct ObjPool<T> {
storage: Vec<T>
}
impl<T> ObjPool<T> {
pub fn new() -> ObjPool<T> {
ObjPool {storage: Vec::new()}
}
pub fn insert(&mut self, item: T) -> ObjPoolIndex<T> {
let i = self.storage.len();
self.storage.push(item);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
pub fn get(&self, i: ObjPoolIndex<T>) -> &T {
&self.storage[i.i]
}
pub fn get_mut(&mut self, i: ObjPoolIndex<T>) -> &mut T {
&mut self.storage[i.i]
}
pub fn iter_idx(&self) -> ObjPoolIdxIterator<T> {
ObjPoolIdxIterator {
pool: self,
current_idx: 0,
}
}
pub fn iter(&self) -> Iter<T> {
self.storage.iter()
}
pub fn iter_mut(&mut self) -> IterMut<T> {
self.storage.iter_mut()
}
pub fn iter_mut_idx(&mut self) -> ObjPoolMutIdxIterator<T> {
ObjPoolMutIdxIterator {
inner_iter: self.storage.iter_mut(),
current_idx: 0,
}
}
pub fn len(&self) -> usize {
self.storage.len()
}
}
impl<T: Default> ObjPool<T> {
pub fn alloc(&mut self) -> ObjPoolIndex<T> {
let i = self.storage.len();
let o = T::default();
self.storage.push(o);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
}
pub struct ObjPoolIdxIterator<'a, T: 'a> {
pool: &'a ObjPool<T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolIdxIterator<'a, T> {
type Item = ObjPoolIndex<T>;
fn next(&mut self) -> Option<ObjPoolIndex<T>> {
if self.current_idx == self.pool.storage.len() {
None
} else |
}
}
pub struct ObjPoolMutIdxIterator<'a, T: 'a> {
inner_iter: IterMut<'a, T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolMutIdxIterator<'a, T> {
type Item = (ObjPoolIndex<T>, &'a mut T);
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner_iter.next();
match next {
None => None,
Some(x) => {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some((ret, x))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Default)]
struct ObjPoolTestObject {
foo: u32
}
#[test]
fn objpool_basic_works() {
let mut pool = ObjPool::<ObjPoolTestObject>::new();
let x = pool.alloc();
let y = pool.alloc();
{
let o = pool.get_mut(x);
o.foo = 123;
}
{
let o = pool.get_mut(y);
o.foo = 456;
}
let ox = pool.get(x);
let oy = pool.get(y);
assert_eq!(ox.foo, 123);
assert_eq!(oy.foo, 456);
}
}
| {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some(ret)
} | conditional_block |
objpool.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::slice::{Iter, IterMut};
use serde_derive::{Deserialize, Serialize};
use slog;
#[derive(Debug, Serialize, Deserialize)]
pub struct ObjPoolIndex<T> {
i: usize,
type_marker: PhantomData<T>
}
impl<T> Copy for ObjPoolIndex<T> { }
impl<T> Clone for ObjPoolIndex<T> {
fn clone(&self) -> ObjPoolIndex<T> {
*self
}
}
impl<T> PartialEq for ObjPoolIndex<T> {
fn | (&self, other: &ObjPoolIndex<T>) -> bool {
self.i == other.i
}
}
impl<T> Eq for ObjPoolIndex<T> { }
impl<T> Hash for ObjPoolIndex<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.i.hash(state);
}
}
impl<T> ObjPoolIndex<T> {
pub fn get_raw_i(&self) -> usize {
self.i
}
}
impl<T> slog::Value for ObjPoolIndex<T> {
fn serialize(&self, _record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer) -> slog::Result {
serializer.emit_usize(key, self.i)
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub struct ObjPool<T> {
storage: Vec<T>
}
impl<T> ObjPool<T> {
pub fn new() -> ObjPool<T> {
ObjPool {storage: Vec::new()}
}
pub fn insert(&mut self, item: T) -> ObjPoolIndex<T> {
let i = self.storage.len();
self.storage.push(item);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
pub fn get(&self, i: ObjPoolIndex<T>) -> &T {
&self.storage[i.i]
}
pub fn get_mut(&mut self, i: ObjPoolIndex<T>) -> &mut T {
&mut self.storage[i.i]
}
pub fn iter_idx(&self) -> ObjPoolIdxIterator<T> {
ObjPoolIdxIterator {
pool: self,
current_idx: 0,
}
}
pub fn iter(&self) -> Iter<T> {
self.storage.iter()
}
pub fn iter_mut(&mut self) -> IterMut<T> {
self.storage.iter_mut()
}
pub fn iter_mut_idx(&mut self) -> ObjPoolMutIdxIterator<T> {
ObjPoolMutIdxIterator {
inner_iter: self.storage.iter_mut(),
current_idx: 0,
}
}
pub fn len(&self) -> usize {
self.storage.len()
}
}
impl<T: Default> ObjPool<T> {
pub fn alloc(&mut self) -> ObjPoolIndex<T> {
let i = self.storage.len();
let o = T::default();
self.storage.push(o);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
}
pub struct ObjPoolIdxIterator<'a, T: 'a> {
pool: &'a ObjPool<T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolIdxIterator<'a, T> {
type Item = ObjPoolIndex<T>;
fn next(&mut self) -> Option<ObjPoolIndex<T>> {
if self.current_idx == self.pool.storage.len() {
None
} else {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some(ret)
}
}
}
pub struct ObjPoolMutIdxIterator<'a, T: 'a> {
inner_iter: IterMut<'a, T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolMutIdxIterator<'a, T> {
type Item = (ObjPoolIndex<T>, &'a mut T);
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner_iter.next();
match next {
None => None,
Some(x) => {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some((ret, x))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Default)]
struct ObjPoolTestObject {
foo: u32
}
#[test]
fn objpool_basic_works() {
let mut pool = ObjPool::<ObjPoolTestObject>::new();
let x = pool.alloc();
let y = pool.alloc();
{
let o = pool.get_mut(x);
o.foo = 123;
}
{
let o = pool.get_mut(y);
o.foo = 456;
}
let ox = pool.get(x);
let oy = pool.get(y);
assert_eq!(ox.foo, 123);
assert_eq!(oy.foo, 456);
}
}
| eq | identifier_name |
objpool.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::slice::{Iter, IterMut};
use serde_derive::{Deserialize, Serialize};
use slog;
#[derive(Debug, Serialize, Deserialize)]
pub struct ObjPoolIndex<T> {
i: usize,
type_marker: PhantomData<T>
}
impl<T> Copy for ObjPoolIndex<T> { }
impl<T> Clone for ObjPoolIndex<T> {
fn clone(&self) -> ObjPoolIndex<T> {
*self
}
}
impl<T> PartialEq for ObjPoolIndex<T> {
fn eq(&self, other: &ObjPoolIndex<T>) -> bool {
self.i == other.i
}
}
impl<T> Eq for ObjPoolIndex<T> { }
impl<T> Hash for ObjPoolIndex<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.i.hash(state);
}
}
impl<T> ObjPoolIndex<T> {
pub fn get_raw_i(&self) -> usize {
self.i
}
}
impl<T> slog::Value for ObjPoolIndex<T> {
fn serialize(&self, _record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer) -> slog::Result {
serializer.emit_usize(key, self.i)
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub struct ObjPool<T> {
storage: Vec<T>
}
impl<T> ObjPool<T> {
pub fn new() -> ObjPool<T> {
ObjPool {storage: Vec::new()}
}
pub fn insert(&mut self, item: T) -> ObjPoolIndex<T> {
let i = self.storage.len();
self.storage.push(item);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
pub fn get(&self, i: ObjPoolIndex<T>) -> &T {
&self.storage[i.i]
}
pub fn get_mut(&mut self, i: ObjPoolIndex<T>) -> &mut T {
&mut self.storage[i.i]
}
pub fn iter_idx(&self) -> ObjPoolIdxIterator<T> {
ObjPoolIdxIterator {
pool: self,
current_idx: 0,
}
}
pub fn iter(&self) -> Iter<T> {
self.storage.iter()
}
pub fn iter_mut(&mut self) -> IterMut<T> {
self.storage.iter_mut()
}
pub fn iter_mut_idx(&mut self) -> ObjPoolMutIdxIterator<T> {
ObjPoolMutIdxIterator {
inner_iter: self.storage.iter_mut(),
current_idx: 0,
}
}
pub fn len(&self) -> usize {
self.storage.len()
}
}
impl<T: Default> ObjPool<T> {
pub fn alloc(&mut self) -> ObjPoolIndex<T> {
let i = self.storage.len();
let o = T::default();
self.storage.push(o);
ObjPoolIndex::<T> {i: i, type_marker: PhantomData}
}
}
pub struct ObjPoolIdxIterator<'a, T: 'a> {
pool: &'a ObjPool<T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolIdxIterator<'a, T> {
type Item = ObjPoolIndex<T>;
fn next(&mut self) -> Option<ObjPoolIndex<T>> {
if self.current_idx == self.pool.storage.len() {
None
} else {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some(ret)
}
}
}
pub struct ObjPoolMutIdxIterator<'a, T: 'a> {
inner_iter: IterMut<'a, T>,
current_idx: usize,
}
impl<'a, T> Iterator for ObjPoolMutIdxIterator<'a, T> {
type Item = (ObjPoolIndex<T>, &'a mut T); | let next = self.inner_iter.next();
match next {
None => None,
Some(x) => {
let ret = ObjPoolIndex::<T> {i: self.current_idx, type_marker: PhantomData};
self.current_idx += 1;
Some((ret, x))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Default)]
struct ObjPoolTestObject {
foo: u32
}
#[test]
fn objpool_basic_works() {
let mut pool = ObjPool::<ObjPoolTestObject>::new();
let x = pool.alloc();
let y = pool.alloc();
{
let o = pool.get_mut(x);
o.foo = 123;
}
{
let o = pool.get_mut(y);
o.foo = 456;
}
let ox = pool.get(x);
let oy = pool.get(y);
assert_eq!(ox.foo, 123);
assert_eq!(oy.foo, 456);
}
} |
fn next(&mut self) -> Option<Self::Item> { | random_line_split |
sysoptions.py | """Options list for system config."""
import os
from collections import OrderedDict
from lutris import runners
from lutris.util import display, system
def get_optirun_choices():
"""Return menu choices (label, value) for Optimus"""
choices = [("Off", "off")]
if system.find_executable("primusrun"):
choices.append(("primusrun", "primusrun"))
if system.find_executable("optirun"):
choices.append(("optirun/virtualgl", "optirun"))
return choices
system_options = [ # pylint: disable=invalid-name
{
"option": "game_path",
"type": "directory_chooser",
"label": "Default installation folder",
"default": os.path.expanduser("~/Games"),
"scope": ["runner", "system"],
"help": "The default folder where you install your games."
}, | "option": "disable_runtime",
"type": "bool",
"label": "Disable Lutris Runtime",
"default": False,
"help": (
"The Lutris Runtime loads some libraries before running the "
"game. Which can cause some incompatibilities in some cases. "
"Check this option to disable it."
),
},
{
"option": "prefer_system_libs",
"type": "bool",
"label": "Prefer system libraries",
"default": True,
"help": (
"When the runtime is enabled, prioritize the system libraries"
" over the provided ones."
),
},
{
"option": "reset_desktop",
"type": "bool",
"label": "Restore resolution on game exit",
"default": False,
"help": (
"Some games don't restore your screen resolution when \n"
"closed or when they crash. This is when this option comes \n"
"into play to save your bacon."
),
},
{
"option": "single_cpu",
"type": "bool",
"label": "Restrict to single core",
"advanced": True,
"default": False,
"help": "Restrict the game to a single CPU core.",
},
{
"option": "restore_gamma",
"type": "bool",
"default": False,
"label": "Restore gamma on game exit",
"advanced": True,
"help": (
"Some games don't correctly restores gamma on exit, making "
"your display too bright. Select this option to correct it."
),
},
{
"option": "disable_compositor",
"label": "Disable desktop effects",
"type": "bool",
"default": False,
"advanced": True,
"help": (
"Disable desktop effects while game is running, "
"reducing stuttering and increasing performance"
),
},
{
"option": "reset_pulse",
"type": "bool",
"label": "Reset PulseAudio",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": "Restart PulseAudio before launching the game.",
},
{
"option": "pulse_latency",
"type": "bool",
"label": "Reduce PulseAudio latency",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": (
"Set the environment variable PULSE_LATENCY_MSEC=60 "
"to improve audio quality on some games"
),
},
{
"option": "use_us_layout",
"type": "bool",
"label": "Switch to US keyboard layout",
"default": False,
"advanced": True,
"help": "Switch to US keyboard qwerty layout while game is running",
},
{
"option": "optimus",
"type": "choice",
"default": "off",
"choices": get_optirun_choices,
"label": "Optimus launcher (NVIDIA Optimus laptops)",
"advanced": True,
"help": (
"If you have installed the primus or bumblebee packages, "
"select what launcher will run the game with the command, "
"activating your NVIDIA graphic chip for high 3D "
"performance. primusrun normally has better performance, but"
"optirun/virtualgl works better for more games."
),
},
{
"option": "fps_limit",
"type": "string",
"size": "small",
"label": "Fps limit",
"advanced": True,
"condition": bool(system.find_executable("strangle")),
"help": "Limit the game's fps to desired number",
},
{
"option": "gamemode",
"type": "bool",
"default": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"condition": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"label": "Enable Feral gamemode",
"help": "Request a set of optimisations be temporarily applied to the host OS",
},
{
"option": "dri_prime",
"type": "bool",
"default": False,
"condition": display.USE_DRI_PRIME,
"label": "Use PRIME (hybrid graphics on laptops)",
"advanced": True,
"help": (
"If you have open source graphic drivers (Mesa), selecting this "
"option will run the game with the 'DRI_PRIME=1' environment variable, "
"activating your discrete graphic chip for high 3D "
"performance."
),
},
{
"option": "sdl_video_fullscreen",
"type": "choice",
"label": "SDL 1.2 Fullscreen Monitor",
"choices": display.get_output_list,
"default": "off",
"advanced": True,
"help": (
"Hint SDL 1.2 games to use a specific monitor when going "
"fullscreen by setting the SDL_VIDEO_FULLSCREEN "
"environment variable"
),
},
{
"option": "display",
"type": "choice",
"label": "Turn off monitors except",
"choices": display.get_output_choices,
"default": "off",
"advanced": True,
"help": (
"Only keep the selected screen active while the game is "
"running. \n"
"This is useful if you have a dual-screen setup, and are \n"
"having display issues when running a game in fullscreen."
),
},
{
"option": "resolution",
"type": "choice",
"label": "Switch resolution to",
"choices": display.get_resolution_choices,
"default": "off",
"help": "Switch to this screen resolution while the game is running.",
},
{
"option": "terminal",
"label": "Run in a terminal",
"type": "bool",
"default": False,
"advanced": True,
"help": "Run the game in a new terminal window.",
},
{
"option": "terminal_app",
"label": "Terminal application",
"type": "choice_with_entry",
"choices": system.get_terminal_apps,
"default": system.get_default_terminal(),
"advanced": True,
"help": (
"The terminal emulator to be run with the previous option."
"Choose from the list of detected terminal apps or enter "
"the terminal's command or path."
"Note: Not all terminal emulators are guaranteed to work."
),
},
{
"option": "env",
"type": "mapping",
"label": "Environment variables",
"help": "Environment variables loaded at run time",
},
{
"option": "prefix_command",
"type": "string",
"label": "Command prefix",
"advanced": True,
"help": (
"Command line instructions to add in front of the game's "
"execution command."
),
},
{
"option": "manual_command",
"type": "file",
"label": "Manual command",
"advanced": True,
"help": ("Script to execute from the game's contextual menu"),
},
{
"option": "prelaunch_command",
"type": "file",
"label": "Pre-launch command",
"advanced": True,
"help": "Script to execute before the game starts",
},
{
"option": "prelaunch_wait",
"type": "bool",
"label": "Wait for pre-launch command completion",
"advanced": True,
"default": False,
"help": "Run the game only once the pre-launch command has exited",
},
{
"option": "postexit_command",
"type": "file",
"label": "Post-exit command",
"advanced": True,
"help": "Script to execute when the game exits",
},
{
"option": "include_processes",
"type": "string",
"label": "Include processes",
"advanced": True,
"help": (
"What processes to include in process monitoring. "
"This is to override the built-in exclude list.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "exclude_processes",
"type": "string",
"label": "Exclude processes",
"advanced": True,
"help": (
"What processes to exclude in process monitoring. "
"For example background processes that stick around "
"after the game has been closed.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "killswitch",
"type": "string",
"label": "Killswitch file",
"advanced": True,
"help": (
"Path to a file which will stop the game when deleted \n"
"(usually /dev/input/js0 to stop the game on joystick "
"unplugging)"
),
},
{
"option": "xboxdrv",
"type": "string",
"label": "xboxdrv config",
"advanced": True,
"condition": system.find_executable("xboxdrv"),
"help": (
"Command line options for xboxdrv, a driver for XBOX 360 "
"controllers. Requires the xboxdrv package installed."
),
},
{
"option": "sdl_gamecontrollerconfig",
"type": "string",
"label": "SDL2 gamepad mapping",
"advanced": True,
"help": (
"SDL_GAMECONTROLLERCONFIG mapping string or path to a custom "
"gamecontrollerdb.txt file containing mappings."
),
},
{
"option": "xephyr",
"label": "Use Xephyr",
"type": "choice",
"choices": (
("Off", "off"),
("8BPP (256 colors)", "8bpp"),
("16BPP (65536 colors)", "16bpp"),
("24BPP (16M colors)", "24bpp"),
),
"default": "off",
"advanced": True,
"help": "Run program in Xephyr to support 8BPP and 16BPP color modes",
},
{
"option": "xephyr_resolution",
"type": "string",
"label": "Xephyr resolution",
"advanced": True,
"help": "Screen resolution of the Xephyr server",
},
{
"option": "xephyr_fullscreen",
"type": "bool",
"label": "Xephyr Fullscreen",
"default": True,
"advanced": True,
"help": "Open Xephyr in fullscreen (at the desktop resolution)",
},
]
def with_runner_overrides(runner_slug):
"""Return system options updated with overrides from given runner."""
options = system_options
try:
runner = runners.import_runner(runner_slug)
except runners.InvalidRunner:
return options
if not getattr(runner, "system_options_override"):
runner = runner()
if runner.system_options_override:
opts_dict = OrderedDict((opt["option"], opt) for opt in options)
for option in runner.system_options_override:
key = option["option"]
if opts_dict.get(key):
opts_dict[key] = opts_dict[key].copy()
opts_dict[key].update(option)
else:
opts_dict[key] = option
options = [opt for opt in list(opts_dict.values())]
return options | { | random_line_split |
sysoptions.py | """Options list for system config."""
import os
from collections import OrderedDict
from lutris import runners
from lutris.util import display, system
def get_optirun_choices():
|
system_options = [ # pylint: disable=invalid-name
{
"option": "game_path",
"type": "directory_chooser",
"label": "Default installation folder",
"default": os.path.expanduser("~/Games"),
"scope": ["runner", "system"],
"help": "The default folder where you install your games."
},
{
"option": "disable_runtime",
"type": "bool",
"label": "Disable Lutris Runtime",
"default": False,
"help": (
"The Lutris Runtime loads some libraries before running the "
"game. Which can cause some incompatibilities in some cases. "
"Check this option to disable it."
),
},
{
"option": "prefer_system_libs",
"type": "bool",
"label": "Prefer system libraries",
"default": True,
"help": (
"When the runtime is enabled, prioritize the system libraries"
" over the provided ones."
),
},
{
"option": "reset_desktop",
"type": "bool",
"label": "Restore resolution on game exit",
"default": False,
"help": (
"Some games don't restore your screen resolution when \n"
"closed or when they crash. This is when this option comes \n"
"into play to save your bacon."
),
},
{
"option": "single_cpu",
"type": "bool",
"label": "Restrict to single core",
"advanced": True,
"default": False,
"help": "Restrict the game to a single CPU core.",
},
{
"option": "restore_gamma",
"type": "bool",
"default": False,
"label": "Restore gamma on game exit",
"advanced": True,
"help": (
"Some games don't correctly restores gamma on exit, making "
"your display too bright. Select this option to correct it."
),
},
{
"option": "disable_compositor",
"label": "Disable desktop effects",
"type": "bool",
"default": False,
"advanced": True,
"help": (
"Disable desktop effects while game is running, "
"reducing stuttering and increasing performance"
),
},
{
"option": "reset_pulse",
"type": "bool",
"label": "Reset PulseAudio",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": "Restart PulseAudio before launching the game.",
},
{
"option": "pulse_latency",
"type": "bool",
"label": "Reduce PulseAudio latency",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": (
"Set the environment variable PULSE_LATENCY_MSEC=60 "
"to improve audio quality on some games"
),
},
{
"option": "use_us_layout",
"type": "bool",
"label": "Switch to US keyboard layout",
"default": False,
"advanced": True,
"help": "Switch to US keyboard qwerty layout while game is running",
},
{
"option": "optimus",
"type": "choice",
"default": "off",
"choices": get_optirun_choices,
"label": "Optimus launcher (NVIDIA Optimus laptops)",
"advanced": True,
"help": (
"If you have installed the primus or bumblebee packages, "
"select what launcher will run the game with the command, "
"activating your NVIDIA graphic chip for high 3D "
"performance. primusrun normally has better performance, but"
"optirun/virtualgl works better for more games."
),
},
{
"option": "fps_limit",
"type": "string",
"size": "small",
"label": "Fps limit",
"advanced": True,
"condition": bool(system.find_executable("strangle")),
"help": "Limit the game's fps to desired number",
},
{
"option": "gamemode",
"type": "bool",
"default": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"condition": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"label": "Enable Feral gamemode",
"help": "Request a set of optimisations be temporarily applied to the host OS",
},
{
"option": "dri_prime",
"type": "bool",
"default": False,
"condition": display.USE_DRI_PRIME,
"label": "Use PRIME (hybrid graphics on laptops)",
"advanced": True,
"help": (
"If you have open source graphic drivers (Mesa), selecting this "
"option will run the game with the 'DRI_PRIME=1' environment variable, "
"activating your discrete graphic chip for high 3D "
"performance."
),
},
{
"option": "sdl_video_fullscreen",
"type": "choice",
"label": "SDL 1.2 Fullscreen Monitor",
"choices": display.get_output_list,
"default": "off",
"advanced": True,
"help": (
"Hint SDL 1.2 games to use a specific monitor when going "
"fullscreen by setting the SDL_VIDEO_FULLSCREEN "
"environment variable"
),
},
{
"option": "display",
"type": "choice",
"label": "Turn off monitors except",
"choices": display.get_output_choices,
"default": "off",
"advanced": True,
"help": (
"Only keep the selected screen active while the game is "
"running. \n"
"This is useful if you have a dual-screen setup, and are \n"
"having display issues when running a game in fullscreen."
),
},
{
"option": "resolution",
"type": "choice",
"label": "Switch resolution to",
"choices": display.get_resolution_choices,
"default": "off",
"help": "Switch to this screen resolution while the game is running.",
},
{
"option": "terminal",
"label": "Run in a terminal",
"type": "bool",
"default": False,
"advanced": True,
"help": "Run the game in a new terminal window.",
},
{
"option": "terminal_app",
"label": "Terminal application",
"type": "choice_with_entry",
"choices": system.get_terminal_apps,
"default": system.get_default_terminal(),
"advanced": True,
"help": (
"The terminal emulator to be run with the previous option."
"Choose from the list of detected terminal apps or enter "
"the terminal's command or path."
"Note: Not all terminal emulators are guaranteed to work."
),
},
{
"option": "env",
"type": "mapping",
"label": "Environment variables",
"help": "Environment variables loaded at run time",
},
{
"option": "prefix_command",
"type": "string",
"label": "Command prefix",
"advanced": True,
"help": (
"Command line instructions to add in front of the game's "
"execution command."
),
},
{
"option": "manual_command",
"type": "file",
"label": "Manual command",
"advanced": True,
"help": ("Script to execute from the game's contextual menu"),
},
{
"option": "prelaunch_command",
"type": "file",
"label": "Pre-launch command",
"advanced": True,
"help": "Script to execute before the game starts",
},
{
"option": "prelaunch_wait",
"type": "bool",
"label": "Wait for pre-launch command completion",
"advanced": True,
"default": False,
"help": "Run the game only once the pre-launch command has exited",
},
{
"option": "postexit_command",
"type": "file",
"label": "Post-exit command",
"advanced": True,
"help": "Script to execute when the game exits",
},
{
"option": "include_processes",
"type": "string",
"label": "Include processes",
"advanced": True,
"help": (
"What processes to include in process monitoring. "
"This is to override the built-in exclude list.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "exclude_processes",
"type": "string",
"label": "Exclude processes",
"advanced": True,
"help": (
"What processes to exclude in process monitoring. "
"For example background processes that stick around "
"after the game has been closed.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "killswitch",
"type": "string",
"label": "Killswitch file",
"advanced": True,
"help": (
"Path to a file which will stop the game when deleted \n"
"(usually /dev/input/js0 to stop the game on joystick "
"unplugging)"
),
},
{
"option": "xboxdrv",
"type": "string",
"label": "xboxdrv config",
"advanced": True,
"condition": system.find_executable("xboxdrv"),
"help": (
"Command line options for xboxdrv, a driver for XBOX 360 "
"controllers. Requires the xboxdrv package installed."
),
},
{
"option": "sdl_gamecontrollerconfig",
"type": "string",
"label": "SDL2 gamepad mapping",
"advanced": True,
"help": (
"SDL_GAMECONTROLLERCONFIG mapping string or path to a custom "
"gamecontrollerdb.txt file containing mappings."
),
},
{
"option": "xephyr",
"label": "Use Xephyr",
"type": "choice",
"choices": (
("Off", "off"),
("8BPP (256 colors)", "8bpp"),
("16BPP (65536 colors)", "16bpp"),
("24BPP (16M colors)", "24bpp"),
),
"default": "off",
"advanced": True,
"help": "Run program in Xephyr to support 8BPP and 16BPP color modes",
},
{
"option": "xephyr_resolution",
"type": "string",
"label": "Xephyr resolution",
"advanced": True,
"help": "Screen resolution of the Xephyr server",
},
{
"option": "xephyr_fullscreen",
"type": "bool",
"label": "Xephyr Fullscreen",
"default": True,
"advanced": True,
"help": "Open Xephyr in fullscreen (at the desktop resolution)",
},
]
def with_runner_overrides(runner_slug):
"""Return system options updated with overrides from given runner."""
options = system_options
try:
runner = runners.import_runner(runner_slug)
except runners.InvalidRunner:
return options
if not getattr(runner, "system_options_override"):
runner = runner()
if runner.system_options_override:
opts_dict = OrderedDict((opt["option"], opt) for opt in options)
for option in runner.system_options_override:
key = option["option"]
if opts_dict.get(key):
opts_dict[key] = opts_dict[key].copy()
opts_dict[key].update(option)
else:
opts_dict[key] = option
options = [opt for opt in list(opts_dict.values())]
return options
| """Return menu choices (label, value) for Optimus"""
choices = [("Off", "off")]
if system.find_executable("primusrun"):
choices.append(("primusrun", "primusrun"))
if system.find_executable("optirun"):
choices.append(("optirun/virtualgl", "optirun"))
return choices | identifier_body |
sysoptions.py | """Options list for system config."""
import os
from collections import OrderedDict
from lutris import runners
from lutris.util import display, system
def get_optirun_choices():
"""Return menu choices (label, value) for Optimus"""
choices = [("Off", "off")]
if system.find_executable("primusrun"):
choices.append(("primusrun", "primusrun"))
if system.find_executable("optirun"):
|
return choices
system_options = [ # pylint: disable=invalid-name
{
"option": "game_path",
"type": "directory_chooser",
"label": "Default installation folder",
"default": os.path.expanduser("~/Games"),
"scope": ["runner", "system"],
"help": "The default folder where you install your games."
},
{
"option": "disable_runtime",
"type": "bool",
"label": "Disable Lutris Runtime",
"default": False,
"help": (
"The Lutris Runtime loads some libraries before running the "
"game. Which can cause some incompatibilities in some cases. "
"Check this option to disable it."
),
},
{
"option": "prefer_system_libs",
"type": "bool",
"label": "Prefer system libraries",
"default": True,
"help": (
"When the runtime is enabled, prioritize the system libraries"
" over the provided ones."
),
},
{
"option": "reset_desktop",
"type": "bool",
"label": "Restore resolution on game exit",
"default": False,
"help": (
"Some games don't restore your screen resolution when \n"
"closed or when they crash. This is when this option comes \n"
"into play to save your bacon."
),
},
{
"option": "single_cpu",
"type": "bool",
"label": "Restrict to single core",
"advanced": True,
"default": False,
"help": "Restrict the game to a single CPU core.",
},
{
"option": "restore_gamma",
"type": "bool",
"default": False,
"label": "Restore gamma on game exit",
"advanced": True,
"help": (
"Some games don't correctly restores gamma on exit, making "
"your display too bright. Select this option to correct it."
),
},
{
"option": "disable_compositor",
"label": "Disable desktop effects",
"type": "bool",
"default": False,
"advanced": True,
"help": (
"Disable desktop effects while game is running, "
"reducing stuttering and increasing performance"
),
},
{
"option": "reset_pulse",
"type": "bool",
"label": "Reset PulseAudio",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": "Restart PulseAudio before launching the game.",
},
{
"option": "pulse_latency",
"type": "bool",
"label": "Reduce PulseAudio latency",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": (
"Set the environment variable PULSE_LATENCY_MSEC=60 "
"to improve audio quality on some games"
),
},
{
"option": "use_us_layout",
"type": "bool",
"label": "Switch to US keyboard layout",
"default": False,
"advanced": True,
"help": "Switch to US keyboard qwerty layout while game is running",
},
{
"option": "optimus",
"type": "choice",
"default": "off",
"choices": get_optirun_choices,
"label": "Optimus launcher (NVIDIA Optimus laptops)",
"advanced": True,
"help": (
"If you have installed the primus or bumblebee packages, "
"select what launcher will run the game with the command, "
"activating your NVIDIA graphic chip for high 3D "
"performance. primusrun normally has better performance, but"
"optirun/virtualgl works better for more games."
),
},
{
"option": "fps_limit",
"type": "string",
"size": "small",
"label": "Fps limit",
"advanced": True,
"condition": bool(system.find_executable("strangle")),
"help": "Limit the game's fps to desired number",
},
{
"option": "gamemode",
"type": "bool",
"default": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"condition": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"label": "Enable Feral gamemode",
"help": "Request a set of optimisations be temporarily applied to the host OS",
},
{
"option": "dri_prime",
"type": "bool",
"default": False,
"condition": display.USE_DRI_PRIME,
"label": "Use PRIME (hybrid graphics on laptops)",
"advanced": True,
"help": (
"If you have open source graphic drivers (Mesa), selecting this "
"option will run the game with the 'DRI_PRIME=1' environment variable, "
"activating your discrete graphic chip for high 3D "
"performance."
),
},
{
"option": "sdl_video_fullscreen",
"type": "choice",
"label": "SDL 1.2 Fullscreen Monitor",
"choices": display.get_output_list,
"default": "off",
"advanced": True,
"help": (
"Hint SDL 1.2 games to use a specific monitor when going "
"fullscreen by setting the SDL_VIDEO_FULLSCREEN "
"environment variable"
),
},
{
"option": "display",
"type": "choice",
"label": "Turn off monitors except",
"choices": display.get_output_choices,
"default": "off",
"advanced": True,
"help": (
"Only keep the selected screen active while the game is "
"running. \n"
"This is useful if you have a dual-screen setup, and are \n"
"having display issues when running a game in fullscreen."
),
},
{
"option": "resolution",
"type": "choice",
"label": "Switch resolution to",
"choices": display.get_resolution_choices,
"default": "off",
"help": "Switch to this screen resolution while the game is running.",
},
{
"option": "terminal",
"label": "Run in a terminal",
"type": "bool",
"default": False,
"advanced": True,
"help": "Run the game in a new terminal window.",
},
{
"option": "terminal_app",
"label": "Terminal application",
"type": "choice_with_entry",
"choices": system.get_terminal_apps,
"default": system.get_default_terminal(),
"advanced": True,
"help": (
"The terminal emulator to be run with the previous option."
"Choose from the list of detected terminal apps or enter "
"the terminal's command or path."
"Note: Not all terminal emulators are guaranteed to work."
),
},
{
"option": "env",
"type": "mapping",
"label": "Environment variables",
"help": "Environment variables loaded at run time",
},
{
"option": "prefix_command",
"type": "string",
"label": "Command prefix",
"advanced": True,
"help": (
"Command line instructions to add in front of the game's "
"execution command."
),
},
{
"option": "manual_command",
"type": "file",
"label": "Manual command",
"advanced": True,
"help": ("Script to execute from the game's contextual menu"),
},
{
"option": "prelaunch_command",
"type": "file",
"label": "Pre-launch command",
"advanced": True,
"help": "Script to execute before the game starts",
},
{
"option": "prelaunch_wait",
"type": "bool",
"label": "Wait for pre-launch command completion",
"advanced": True,
"default": False,
"help": "Run the game only once the pre-launch command has exited",
},
{
"option": "postexit_command",
"type": "file",
"label": "Post-exit command",
"advanced": True,
"help": "Script to execute when the game exits",
},
{
"option": "include_processes",
"type": "string",
"label": "Include processes",
"advanced": True,
"help": (
"What processes to include in process monitoring. "
"This is to override the built-in exclude list.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "exclude_processes",
"type": "string",
"label": "Exclude processes",
"advanced": True,
"help": (
"What processes to exclude in process monitoring. "
"For example background processes that stick around "
"after the game has been closed.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "killswitch",
"type": "string",
"label": "Killswitch file",
"advanced": True,
"help": (
"Path to a file which will stop the game when deleted \n"
"(usually /dev/input/js0 to stop the game on joystick "
"unplugging)"
),
},
{
"option": "xboxdrv",
"type": "string",
"label": "xboxdrv config",
"advanced": True,
"condition": system.find_executable("xboxdrv"),
"help": (
"Command line options for xboxdrv, a driver for XBOX 360 "
"controllers. Requires the xboxdrv package installed."
),
},
{
"option": "sdl_gamecontrollerconfig",
"type": "string",
"label": "SDL2 gamepad mapping",
"advanced": True,
"help": (
"SDL_GAMECONTROLLERCONFIG mapping string or path to a custom "
"gamecontrollerdb.txt file containing mappings."
),
},
{
"option": "xephyr",
"label": "Use Xephyr",
"type": "choice",
"choices": (
("Off", "off"),
("8BPP (256 colors)", "8bpp"),
("16BPP (65536 colors)", "16bpp"),
("24BPP (16M colors)", "24bpp"),
),
"default": "off",
"advanced": True,
"help": "Run program in Xephyr to support 8BPP and 16BPP color modes",
},
{
"option": "xephyr_resolution",
"type": "string",
"label": "Xephyr resolution",
"advanced": True,
"help": "Screen resolution of the Xephyr server",
},
{
"option": "xephyr_fullscreen",
"type": "bool",
"label": "Xephyr Fullscreen",
"default": True,
"advanced": True,
"help": "Open Xephyr in fullscreen (at the desktop resolution)",
},
]
def with_runner_overrides(runner_slug):
"""Return system options updated with overrides from given runner."""
options = system_options
try:
runner = runners.import_runner(runner_slug)
except runners.InvalidRunner:
return options
if not getattr(runner, "system_options_override"):
runner = runner()
if runner.system_options_override:
opts_dict = OrderedDict((opt["option"], opt) for opt in options)
for option in runner.system_options_override:
key = option["option"]
if opts_dict.get(key):
opts_dict[key] = opts_dict[key].copy()
opts_dict[key].update(option)
else:
opts_dict[key] = option
options = [opt for opt in list(opts_dict.values())]
return options
| choices.append(("optirun/virtualgl", "optirun")) | conditional_block |
sysoptions.py | """Options list for system config."""
import os
from collections import OrderedDict
from lutris import runners
from lutris.util import display, system
def get_optirun_choices():
"""Return menu choices (label, value) for Optimus"""
choices = [("Off", "off")]
if system.find_executable("primusrun"):
choices.append(("primusrun", "primusrun"))
if system.find_executable("optirun"):
choices.append(("optirun/virtualgl", "optirun"))
return choices
system_options = [ # pylint: disable=invalid-name
{
"option": "game_path",
"type": "directory_chooser",
"label": "Default installation folder",
"default": os.path.expanduser("~/Games"),
"scope": ["runner", "system"],
"help": "The default folder where you install your games."
},
{
"option": "disable_runtime",
"type": "bool",
"label": "Disable Lutris Runtime",
"default": False,
"help": (
"The Lutris Runtime loads some libraries before running the "
"game. Which can cause some incompatibilities in some cases. "
"Check this option to disable it."
),
},
{
"option": "prefer_system_libs",
"type": "bool",
"label": "Prefer system libraries",
"default": True,
"help": (
"When the runtime is enabled, prioritize the system libraries"
" over the provided ones."
),
},
{
"option": "reset_desktop",
"type": "bool",
"label": "Restore resolution on game exit",
"default": False,
"help": (
"Some games don't restore your screen resolution when \n"
"closed or when they crash. This is when this option comes \n"
"into play to save your bacon."
),
},
{
"option": "single_cpu",
"type": "bool",
"label": "Restrict to single core",
"advanced": True,
"default": False,
"help": "Restrict the game to a single CPU core.",
},
{
"option": "restore_gamma",
"type": "bool",
"default": False,
"label": "Restore gamma on game exit",
"advanced": True,
"help": (
"Some games don't correctly restores gamma on exit, making "
"your display too bright. Select this option to correct it."
),
},
{
"option": "disable_compositor",
"label": "Disable desktop effects",
"type": "bool",
"default": False,
"advanced": True,
"help": (
"Disable desktop effects while game is running, "
"reducing stuttering and increasing performance"
),
},
{
"option": "reset_pulse",
"type": "bool",
"label": "Reset PulseAudio",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": "Restart PulseAudio before launching the game.",
},
{
"option": "pulse_latency",
"type": "bool",
"label": "Reduce PulseAudio latency",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": (
"Set the environment variable PULSE_LATENCY_MSEC=60 "
"to improve audio quality on some games"
),
},
{
"option": "use_us_layout",
"type": "bool",
"label": "Switch to US keyboard layout",
"default": False,
"advanced": True,
"help": "Switch to US keyboard qwerty layout while game is running",
},
{
"option": "optimus",
"type": "choice",
"default": "off",
"choices": get_optirun_choices,
"label": "Optimus launcher (NVIDIA Optimus laptops)",
"advanced": True,
"help": (
"If you have installed the primus or bumblebee packages, "
"select what launcher will run the game with the command, "
"activating your NVIDIA graphic chip for high 3D "
"performance. primusrun normally has better performance, but"
"optirun/virtualgl works better for more games."
),
},
{
"option": "fps_limit",
"type": "string",
"size": "small",
"label": "Fps limit",
"advanced": True,
"condition": bool(system.find_executable("strangle")),
"help": "Limit the game's fps to desired number",
},
{
"option": "gamemode",
"type": "bool",
"default": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"condition": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"label": "Enable Feral gamemode",
"help": "Request a set of optimisations be temporarily applied to the host OS",
},
{
"option": "dri_prime",
"type": "bool",
"default": False,
"condition": display.USE_DRI_PRIME,
"label": "Use PRIME (hybrid graphics on laptops)",
"advanced": True,
"help": (
"If you have open source graphic drivers (Mesa), selecting this "
"option will run the game with the 'DRI_PRIME=1' environment variable, "
"activating your discrete graphic chip for high 3D "
"performance."
),
},
{
"option": "sdl_video_fullscreen",
"type": "choice",
"label": "SDL 1.2 Fullscreen Monitor",
"choices": display.get_output_list,
"default": "off",
"advanced": True,
"help": (
"Hint SDL 1.2 games to use a specific monitor when going "
"fullscreen by setting the SDL_VIDEO_FULLSCREEN "
"environment variable"
),
},
{
"option": "display",
"type": "choice",
"label": "Turn off monitors except",
"choices": display.get_output_choices,
"default": "off",
"advanced": True,
"help": (
"Only keep the selected screen active while the game is "
"running. \n"
"This is useful if you have a dual-screen setup, and are \n"
"having display issues when running a game in fullscreen."
),
},
{
"option": "resolution",
"type": "choice",
"label": "Switch resolution to",
"choices": display.get_resolution_choices,
"default": "off",
"help": "Switch to this screen resolution while the game is running.",
},
{
"option": "terminal",
"label": "Run in a terminal",
"type": "bool",
"default": False,
"advanced": True,
"help": "Run the game in a new terminal window.",
},
{
"option": "terminal_app",
"label": "Terminal application",
"type": "choice_with_entry",
"choices": system.get_terminal_apps,
"default": system.get_default_terminal(),
"advanced": True,
"help": (
"The terminal emulator to be run with the previous option."
"Choose from the list of detected terminal apps or enter "
"the terminal's command or path."
"Note: Not all terminal emulators are guaranteed to work."
),
},
{
"option": "env",
"type": "mapping",
"label": "Environment variables",
"help": "Environment variables loaded at run time",
},
{
"option": "prefix_command",
"type": "string",
"label": "Command prefix",
"advanced": True,
"help": (
"Command line instructions to add in front of the game's "
"execution command."
),
},
{
"option": "manual_command",
"type": "file",
"label": "Manual command",
"advanced": True,
"help": ("Script to execute from the game's contextual menu"),
},
{
"option": "prelaunch_command",
"type": "file",
"label": "Pre-launch command",
"advanced": True,
"help": "Script to execute before the game starts",
},
{
"option": "prelaunch_wait",
"type": "bool",
"label": "Wait for pre-launch command completion",
"advanced": True,
"default": False,
"help": "Run the game only once the pre-launch command has exited",
},
{
"option": "postexit_command",
"type": "file",
"label": "Post-exit command",
"advanced": True,
"help": "Script to execute when the game exits",
},
{
"option": "include_processes",
"type": "string",
"label": "Include processes",
"advanced": True,
"help": (
"What processes to include in process monitoring. "
"This is to override the built-in exclude list.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "exclude_processes",
"type": "string",
"label": "Exclude processes",
"advanced": True,
"help": (
"What processes to exclude in process monitoring. "
"For example background processes that stick around "
"after the game has been closed.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "killswitch",
"type": "string",
"label": "Killswitch file",
"advanced": True,
"help": (
"Path to a file which will stop the game when deleted \n"
"(usually /dev/input/js0 to stop the game on joystick "
"unplugging)"
),
},
{
"option": "xboxdrv",
"type": "string",
"label": "xboxdrv config",
"advanced": True,
"condition": system.find_executable("xboxdrv"),
"help": (
"Command line options for xboxdrv, a driver for XBOX 360 "
"controllers. Requires the xboxdrv package installed."
),
},
{
"option": "sdl_gamecontrollerconfig",
"type": "string",
"label": "SDL2 gamepad mapping",
"advanced": True,
"help": (
"SDL_GAMECONTROLLERCONFIG mapping string or path to a custom "
"gamecontrollerdb.txt file containing mappings."
),
},
{
"option": "xephyr",
"label": "Use Xephyr",
"type": "choice",
"choices": (
("Off", "off"),
("8BPP (256 colors)", "8bpp"),
("16BPP (65536 colors)", "16bpp"),
("24BPP (16M colors)", "24bpp"),
),
"default": "off",
"advanced": True,
"help": "Run program in Xephyr to support 8BPP and 16BPP color modes",
},
{
"option": "xephyr_resolution",
"type": "string",
"label": "Xephyr resolution",
"advanced": True,
"help": "Screen resolution of the Xephyr server",
},
{
"option": "xephyr_fullscreen",
"type": "bool",
"label": "Xephyr Fullscreen",
"default": True,
"advanced": True,
"help": "Open Xephyr in fullscreen (at the desktop resolution)",
},
]
def | (runner_slug):
"""Return system options updated with overrides from given runner."""
options = system_options
try:
runner = runners.import_runner(runner_slug)
except runners.InvalidRunner:
return options
if not getattr(runner, "system_options_override"):
runner = runner()
if runner.system_options_override:
opts_dict = OrderedDict((opt["option"], opt) for opt in options)
for option in runner.system_options_override:
key = option["option"]
if opts_dict.get(key):
opts_dict[key] = opts_dict[key].copy()
opts_dict[key].update(option)
else:
opts_dict[key] = option
options = [opt for opt in list(opts_dict.values())]
return options
| with_runner_overrides | identifier_name |
trait-inheritance2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | trait Bar { fn g(&self) -> int; }
trait Baz { fn h(&self) -> int; }
trait Quux: Foo + Bar + Baz { }
struct A { x: int }
impl Foo for A { fn f(&self) -> int { 10 } }
impl Bar for A { fn g(&self) -> int { 20 } }
impl Baz for A { fn h(&self) -> int { 30 } }
impl Quux for A {}
fn f<T:Quux + Foo + Bar + Baz>(a: &T) {
assert_eq!(a.f(), 10);
assert_eq!(a.g(), 20);
assert_eq!(a.h(), 30);
}
pub fn main() {
let a = &A { x: 3 };
f(a);
} | // option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo { fn f(&self) -> int; } | random_line_split |
trait-inheritance2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo { fn f(&self) -> int; }
trait Bar { fn g(&self) -> int; }
trait Baz { fn h(&self) -> int; }
trait Quux: Foo + Bar + Baz { }
struct A { x: int }
impl Foo for A { fn f(&self) -> int { 10 } }
impl Bar for A { fn g(&self) -> int { 20 } }
impl Baz for A { fn | (&self) -> int { 30 } }
impl Quux for A {}
fn f<T:Quux + Foo + Bar + Baz>(a: &T) {
assert_eq!(a.f(), 10);
assert_eq!(a.g(), 20);
assert_eq!(a.h(), 30);
}
pub fn main() {
let a = &A { x: 3 };
f(a);
}
| h | identifier_name |
trait-inheritance2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo { fn f(&self) -> int; }
trait Bar { fn g(&self) -> int; }
trait Baz { fn h(&self) -> int; }
trait Quux: Foo + Bar + Baz { }
struct A { x: int }
impl Foo for A { fn f(&self) -> int | }
impl Bar for A { fn g(&self) -> int { 20 } }
impl Baz for A { fn h(&self) -> int { 30 } }
impl Quux for A {}
fn f<T:Quux + Foo + Bar + Baz>(a: &T) {
assert_eq!(a.f(), 10);
assert_eq!(a.g(), 20);
assert_eq!(a.h(), 30);
}
pub fn main() {
let a = &A { x: 3 };
f(a);
}
| { 10 } | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.