file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
test_list.py
|
import sys
import unittest
from test import test_support, list_tests
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, xrange(sys.maxint // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super(ListTest, self).test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super(ListTest, self).test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
@unittest.expectedFailure
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_main(verbose=None):
|
if __name__ == "__main__":
test_main(verbose=True)
|
test_support.run_unittest(ListTest)
# verify reference counting
# import sys
# if verbose and hasattr(sys, "gettotalrefcount"):
# import gc
# counts = [None] * 5
# for i in xrange(len(counts)):
# test_support.run_unittest(ListTest)
# gc.collect()
# counts[i] = sys.gettotalrefcount()
# print counts
|
identifier_body
|
test_list.py
|
import sys
import unittest
from test import test_support, list_tests
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, xrange(sys.maxint // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super(ListTest, self).test_truth()
self.assertTrue(not [])
|
self.assertTrue([] is not [])
def test_len(self):
super(ListTest, self).test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
@unittest.expectedFailure
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_main(verbose=None):
test_support.run_unittest(ListTest)
# verify reference counting
# import sys
# if verbose and hasattr(sys, "gettotalrefcount"):
# import gc
# counts = [None] * 5
# for i in xrange(len(counts)):
# test_support.run_unittest(ListTest)
# gc.collect()
# counts[i] = sys.gettotalrefcount()
# print counts
if __name__ == "__main__":
test_main(verbose=True)
|
self.assertTrue([42])
def test_identity(self):
|
random_line_split
|
test_list.py
|
import sys
import unittest
from test import test_support, list_tests
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, xrange(sys.maxint // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super(ListTest, self).test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def
|
(self):
super(ListTest, self).test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
@unittest.expectedFailure
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_main(verbose=None):
test_support.run_unittest(ListTest)
# verify reference counting
# import sys
# if verbose and hasattr(sys, "gettotalrefcount"):
# import gc
# counts = [None] * 5
# for i in xrange(len(counts)):
# test_support.run_unittest(ListTest)
# gc.collect()
# counts[i] = sys.gettotalrefcount()
# print counts
if __name__ == "__main__":
test_main(verbose=True)
|
test_len
|
identifier_name
|
main.rs
|
// Author: Alex Chernyakhovsky ([email protected])
// TODO(achernya): Per the documentation on doc.rust-lang.org, std::io
// is not yet ready, so use old_io until 1.0-final.
#![feature(old_io)]
// std::env contains a lot of nice functions that otherwise would
// require std::os to use; std::os has lots of deprecated functions.
#![feature(env)]
// TODO(achernya): Remove this feature when std::env moves over to
// std::path.
#![feature(old_path)]
use std::env;
use std::old_io as io;
// println_stderr is like println, but to stderr.
macro_rules! println_stderr(
($($arg:tt)*) => (
match writeln!(&mut io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
)
);
// ShellCommand is a trait that defines a runnable POSIX shell
// command. An implementation is an abstract representation of shell
// commands such as simple invocations, invocations with redirection,
// and even shell pipelines.
trait ShellCommand {
fn run(&self);
}
fn shell_loop() {
let mut stdin = io::stdin();
loop {
print!("$ ");
let line = stdin.read_line();
match line {
Ok(expr) => handle_command(&expr),
Err(_) => break,
}
}
}
fn handle_command(user_expr: &str) {
// Clean up the string by removing the newline at the end
let expr = user_expr.trim_matches('\n');
let components: Vec<&str> = expr.split(' ').collect();
if builtins(&components) {
return;
}
}
fn builtins(command: &Vec<&str>) -> bool {
match command[0] {
"cd" => cd(command),
"pwd" => pwd(),
_ => return false,
}
true
}
fn cd(command: &Vec<&str>) {
// cd is the "change directory" command. It can take either 0 or 1
// arguments. If given no arguments, then the $HOME directory is
// chosen.
let dir: Option<Path> = match command.len() {
0 => panic!("invalid cd invocation"),
1 => env::home_dir(),
_ => Some(Path::new(command[1]))
|
let directory = dir.unwrap();
let result = env::set_current_dir(&directory);
match result {
Err(err) => {
println_stderr!("cd: {}: {}", directory.display(), err);
},
_ => {},
}
}
fn pwd() {
let p = env::current_dir().unwrap_or(Path::new("/"));
println!("{}", p.display());
}
fn main() {
// TODO(achernya): is there any initialization we want to do
// before we enter the shell loop?
shell_loop();
}
|
};
if dir.is_none() {
println_stderr!("cd: no directory to change to");
return;
}
|
random_line_split
|
main.rs
|
// Author: Alex Chernyakhovsky ([email protected])
// TODO(achernya): Per the documentation on doc.rust-lang.org, std::io
// is not yet ready, so use old_io until 1.0-final.
#![feature(old_io)]
// std::env contains a lot of nice functions that otherwise would
// require std::os to use; std::os has lots of deprecated functions.
#![feature(env)]
// TODO(achernya): Remove this feature when std::env moves over to
// std::path.
#![feature(old_path)]
use std::env;
use std::old_io as io;
// println_stderr is like println, but to stderr.
macro_rules! println_stderr(
($($arg:tt)*) => (
match writeln!(&mut io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
)
);
// ShellCommand is a trait that defines a runnable POSIX shell
// command. An implementation is an abstract representation of shell
// commands such as simple invocations, invocations with redirection,
// and even shell pipelines.
trait ShellCommand {
fn run(&self);
}
fn shell_loop() {
let mut stdin = io::stdin();
loop {
print!("$ ");
let line = stdin.read_line();
match line {
Ok(expr) => handle_command(&expr),
Err(_) => break,
}
}
}
fn handle_command(user_expr: &str) {
// Clean up the string by removing the newline at the end
let expr = user_expr.trim_matches('\n');
let components: Vec<&str> = expr.split(' ').collect();
if builtins(&components) {
return;
}
}
fn
|
(command: &Vec<&str>) -> bool {
match command[0] {
"cd" => cd(command),
"pwd" => pwd(),
_ => return false,
}
true
}
fn cd(command: &Vec<&str>) {
// cd is the "change directory" command. It can take either 0 or 1
// arguments. If given no arguments, then the $HOME directory is
// chosen.
let dir: Option<Path> = match command.len() {
0 => panic!("invalid cd invocation"),
1 => env::home_dir(),
_ => Some(Path::new(command[1]))
};
if dir.is_none() {
println_stderr!("cd: no directory to change to");
return;
}
let directory = dir.unwrap();
let result = env::set_current_dir(&directory);
match result {
Err(err) => {
println_stderr!("cd: {}: {}", directory.display(), err);
},
_ => {},
}
}
fn pwd() {
let p = env::current_dir().unwrap_or(Path::new("/"));
println!("{}", p.display());
}
fn main() {
// TODO(achernya): is there any initialization we want to do
// before we enter the shell loop?
shell_loop();
}
|
builtins
|
identifier_name
|
main.rs
|
// Author: Alex Chernyakhovsky ([email protected])
// TODO(achernya): Per the documentation on doc.rust-lang.org, std::io
// is not yet ready, so use old_io until 1.0-final.
#![feature(old_io)]
// std::env contains a lot of nice functions that otherwise would
// require std::os to use; std::os has lots of deprecated functions.
#![feature(env)]
// TODO(achernya): Remove this feature when std::env moves over to
// std::path.
#![feature(old_path)]
use std::env;
use std::old_io as io;
// println_stderr is like println, but to stderr.
macro_rules! println_stderr(
($($arg:tt)*) => (
match writeln!(&mut io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
)
);
// ShellCommand is a trait that defines a runnable POSIX shell
// command. An implementation is an abstract representation of shell
// commands such as simple invocations, invocations with redirection,
// and even shell pipelines.
trait ShellCommand {
fn run(&self);
}
fn shell_loop() {
let mut stdin = io::stdin();
loop {
print!("$ ");
let line = stdin.read_line();
match line {
Ok(expr) => handle_command(&expr),
Err(_) => break,
}
}
}
fn handle_command(user_expr: &str)
|
fn builtins(command: &Vec<&str>) -> bool {
match command[0] {
"cd" => cd(command),
"pwd" => pwd(),
_ => return false,
}
true
}
fn cd(command: &Vec<&str>) {
// cd is the "change directory" command. It can take either 0 or 1
// arguments. If given no arguments, then the $HOME directory is
// chosen.
let dir: Option<Path> = match command.len() {
0 => panic!("invalid cd invocation"),
1 => env::home_dir(),
_ => Some(Path::new(command[1]))
};
if dir.is_none() {
println_stderr!("cd: no directory to change to");
return;
}
let directory = dir.unwrap();
let result = env::set_current_dir(&directory);
match result {
Err(err) => {
println_stderr!("cd: {}: {}", directory.display(), err);
},
_ => {},
}
}
fn pwd() {
let p = env::current_dir().unwrap_or(Path::new("/"));
println!("{}", p.display());
}
fn main() {
// TODO(achernya): is there any initialization we want to do
// before we enter the shell loop?
shell_loop();
}
|
{
// Clean up the string by removing the newline at the end
let expr = user_expr.trim_matches('\n');
let components: Vec<&str> = expr.split(' ').collect();
if builtins(&components) {
return;
}
}
|
identifier_body
|
index.js
|
/*
* Network Access Status v2.1.0
*Copyright 2017
*Authors: Venkatesh Chinthakindi.
*All Rights Reserved.
*use ,reproduction, distribution, and modification of this code is subject to the terms and conditions of the MIT license
*
*/
debugger;
var networkAccessStatus=(function(){
return{
check: function() {
var newEvent;
var w;
var status = null;
if (typeof (Worker) !== "undefined") {
if (typeof (w) == "undefined") {
|
try{
setInterval(()=>{startNetworkCheck.callback()},interval);
}
catch(error){
console.log('inner file console catch'+error)
}
},
callback:function(){
guid = (startNetworkCheck.S4() + startNetworkCheck.S4() + "-" +
startNetworkCheck.S4() + "-4" +
startNetworkCheck.S4().substr(0,3) + "-" + startNetworkCheck.S4()
+ "-" + startNetworkCheck.S4() +
startNetworkCheck.S4() + startNetworkCheck.S4()).toLowerCase();
self.postMessage("https://www.google.co.in/images/branding/product/ico/googleg_lodp.ico?ID="+guid);
},
S4:function(){
return (((1+Math.random())*0x10000)|0).toString(16).substring(1);
}
}
})((startNetworkCheck||{}))
startNetworkCheck.checkNetwork();`])
var blobURL = window.URL.createObjectURL(blob);
w = new Worker(blobURL);
}
var img=new Image();
w.onmessage = function(event) {
img.src=event.data;
img.onload = function() {
if(status!=true)
{
status=true;
newEvent = new CustomEvent('networkStatusChanged', { detail:true});
window.dispatchEvent(newEvent);
}
};
img.onerror = function() {
if(status!=false)
{
status=false;
newEvent = new CustomEvent('networkStatusChanged', { detail: false });
window.dispatchEvent(newEvent);
}
};
};
};
}
}
})((networkAccessStatus||{}));
module.exports=networkAccessStatus;
|
var blob=new Blob([
`var startNetworkCheck=(function(){
var interval=3000;
return{
checkNetwork:function(){
|
random_line_split
|
index.js
|
/*
* Network Access Status v2.1.0
*Copyright 2017
*Authors: Venkatesh Chinthakindi.
*All Rights Reserved.
*use ,reproduction, distribution, and modification of this code is subject to the terms and conditions of the MIT license
*
*/
debugger;
var networkAccessStatus=(function(){
return{
check: function() {
var newEvent;
var w;
var status = null;
if (typeof (Worker) !== "undefined") {
if (typeof (w) == "undefined") {
var blob=new Blob([
`var startNetworkCheck=(function(){
var interval=3000;
return{
checkNetwork:function(){
try{
setInterval(()=>{startNetworkCheck.callback()},interval);
}
catch(error){
console.log('inner file console catch'+error)
}
},
callback:function(){
guid = (startNetworkCheck.S4() + startNetworkCheck.S4() + "-" +
startNetworkCheck.S4() + "-4" +
startNetworkCheck.S4().substr(0,3) + "-" + startNetworkCheck.S4()
+ "-" + startNetworkCheck.S4() +
startNetworkCheck.S4() + startNetworkCheck.S4()).toLowerCase();
self.postMessage("https://www.google.co.in/images/branding/product/ico/googleg_lodp.ico?ID="+guid);
},
S4:function(){
return (((1+Math.random())*0x10000)|0).toString(16).substring(1);
}
}
})((startNetworkCheck||{}))
startNetworkCheck.checkNetwork();`])
var blobURL = window.URL.createObjectURL(blob);
w = new Worker(blobURL);
}
var img=new Image();
w.onmessage = function(event) {
img.src=event.data;
img.onload = function() {
if(status!=true)
|
};
img.onerror = function() {
if(status!=false)
{
status=false;
newEvent = new CustomEvent('networkStatusChanged', { detail: false });
window.dispatchEvent(newEvent);
}
};
};
};
}
}
})((networkAccessStatus||{}));
module.exports=networkAccessStatus;
|
{
status=true;
newEvent = new CustomEvent('networkStatusChanged', { detail:true});
window.dispatchEvent(newEvent);
}
|
conditional_block
|
test_record_test.py
|
# Lint as: python3
"""Unit tests for test_record module."""
import sys
import unittest
from openhtf.core import test_record
def _get_obj_size(obj):
|
class TestRecordTest(unittest.TestCase):
def test_attachment_data(self):
expected_data = b'test attachment data'
attachment = test_record.Attachment(expected_data, 'text')
data = attachment.data
self.assertEqual(data, expected_data)
def test_attachment_memory_safety(self):
empty_attachment = test_record.Attachment(b'', 'text')
expected_obj_size = _get_obj_size(empty_attachment)
large_data = b'test attachment data' * 1000
attachment = test_record.Attachment(large_data, 'text')
obj_size = _get_obj_size(attachment)
self.assertEqual(obj_size, expected_obj_size)
|
size = 0
for attr in obj.__slots__: # pytype: disable=attribute-error
size += sys.getsizeof(attr)
size += sys.getsizeof(getattr(obj, attr))
return size
|
identifier_body
|
test_record_test.py
|
# Lint as: python3
"""Unit tests for test_record module."""
import sys
import unittest
from openhtf.core import test_record
def _get_obj_size(obj):
size = 0
for attr in obj.__slots__: # pytype: disable=attribute-error
size += sys.getsizeof(attr)
size += sys.getsizeof(getattr(obj, attr))
return size
class
|
(unittest.TestCase):
def test_attachment_data(self):
expected_data = b'test attachment data'
attachment = test_record.Attachment(expected_data, 'text')
data = attachment.data
self.assertEqual(data, expected_data)
def test_attachment_memory_safety(self):
empty_attachment = test_record.Attachment(b'', 'text')
expected_obj_size = _get_obj_size(empty_attachment)
large_data = b'test attachment data' * 1000
attachment = test_record.Attachment(large_data, 'text')
obj_size = _get_obj_size(attachment)
self.assertEqual(obj_size, expected_obj_size)
|
TestRecordTest
|
identifier_name
|
test_record_test.py
|
# Lint as: python3
"""Unit tests for test_record module."""
import sys
import unittest
from openhtf.core import test_record
def _get_obj_size(obj):
size = 0
for attr in obj.__slots__: # pytype: disable=attribute-error
|
return size
class TestRecordTest(unittest.TestCase):
def test_attachment_data(self):
expected_data = b'test attachment data'
attachment = test_record.Attachment(expected_data, 'text')
data = attachment.data
self.assertEqual(data, expected_data)
def test_attachment_memory_safety(self):
empty_attachment = test_record.Attachment(b'', 'text')
expected_obj_size = _get_obj_size(empty_attachment)
large_data = b'test attachment data' * 1000
attachment = test_record.Attachment(large_data, 'text')
obj_size = _get_obj_size(attachment)
self.assertEqual(obj_size, expected_obj_size)
|
size += sys.getsizeof(attr)
size += sys.getsizeof(getattr(obj, attr))
|
conditional_block
|
test_record_test.py
|
# Lint as: python3
"""Unit tests for test_record module."""
import sys
import unittest
from openhtf.core import test_record
def _get_obj_size(obj):
size = 0
for attr in obj.__slots__: # pytype: disable=attribute-error
|
size += sys.getsizeof(getattr(obj, attr))
return size
class TestRecordTest(unittest.TestCase):
def test_attachment_data(self):
expected_data = b'test attachment data'
attachment = test_record.Attachment(expected_data, 'text')
data = attachment.data
self.assertEqual(data, expected_data)
def test_attachment_memory_safety(self):
empty_attachment = test_record.Attachment(b'', 'text')
expected_obj_size = _get_obj_size(empty_attachment)
large_data = b'test attachment data' * 1000
attachment = test_record.Attachment(large_data, 'text')
obj_size = _get_obj_size(attachment)
self.assertEqual(obj_size, expected_obj_size)
|
size += sys.getsizeof(attr)
|
random_line_split
|
paper-item.js
|
/**
* @module ember-paper
*/
import Ember from 'ember';
import RippleMixin from '../mixins/ripple-mixin';
import ProxyMixin from 'ember-paper/mixins/proxy-mixin';
const {
get,
set,
isEmpty,
computed,
run,
Component
} = Ember;
/**
* @class PaperItem
* @extends Ember.Component
* @uses ProxyMixin
* @uses RippleMixin
*/
export default Component.extend(RippleMixin, ProxyMixin, {
tagName: 'md-list-item',
// Ripple Overrides
rippleContainerSelector: '.md-no-style',
center: false,
dimBackground: true,
outline: false,
classNameBindings: ['shouldBeClickable:md-clickable', 'hasProxiedComponent:md-proxy-focus'],
attributeBindings: ['role', 'tabindex'],
role: 'listitem',
tabindex: '-1',
hasProxiedComponent: computed.bool('proxiedComponents.length'),
hasPrimaryAction: computed.notEmpty('onClick'),
hasSecondaryAction: computed('secondaryItem', 'onClick', function() {
let secondaryItem = get(this, 'secondaryItem');
if (!isEmpty(secondaryItem)) {
let hasClickAction = get(secondaryItem, 'onClick');
let hasChangeAction = get(secondaryItem, 'onChange');
return hasClickAction || hasChangeAction;
} else {
return false;
}
}),
secondaryItem: computed('proxiedComponents.[]', function() {
let proxiedComponents = get(this, 'proxiedComponents');
return proxiedComponents.find((component)=> {
return get(component, 'isSecondary');
});
}),
shouldBeClickable: computed.or('proxiedComponents.length', 'onClick'),
|
(ev) {
this.get('proxiedComponents').forEach((component)=> {
if (component.processProxy && !get(component, 'disabled') && (get(component, 'bubbles') | !get(this, 'hasPrimaryAction'))) {
component.processProxy();
}
});
this.sendAction('onClick', ev);
},
setupProxiedComponent() {
let tEl = this.$();
let proxiedComponents = get(this, 'proxiedComponents');
proxiedComponents.forEach((component)=> {
let isProxyHandlerSet = get(component, 'isProxyHandlerSet');
// we run init only once for each component.
if (!isProxyHandlerSet) {
// Allow proxied component to propagate ripple hammer event
if (!get(component, 'onClick') && !get(component, 'propagateRipple')) {
set(component, 'propagateRipple', true);
}
// ripple
let el = component.$();
set(this, 'mouseActive', false);
el.on('mousedown', ()=> {
set(this, 'mouseActive', true);
run.later(()=> {
set(this, 'mouseActive', false);
}, 100);
});
el.on('focus', ()=> {
if (!get(this, 'mouseActive')) {
tEl.addClass('md-focused');
}
el.on('blur', function proxyOnBlur() {
tEl.removeClass('md-focused');
el.off('blur', proxyOnBlur);
});
});
// If we don't have primary action then
// no need to bubble
if (!get(this, 'hasPrimaryAction')) {
let bubbles = get(component, 'bubbles');
if (isEmpty(bubbles)) {
set(component, 'bubbles', false);
}
} else if (get(proxiedComponents, 'length')) {
// primary action exists. Make sure child
// that has separate action won't bubble.
proxiedComponents.forEach((component)=> {
let hasClickAction = get(component, 'onClick');
let hasChangeAction = get(component, 'onChange');
if (hasClickAction || hasChangeAction) {
let bubbles = get(component, 'bubbles');
if (isEmpty(bubbles)) {
set(component, 'bubbles', false);
}
}
});
}
// Init complete. We don't want it to run again
// for that particular component.
set(component, 'isProxyHandlerSet', true);
}
});
}
});
|
click
|
identifier_name
|
paper-item.js
|
/**
* @module ember-paper
*/
import Ember from 'ember';
import RippleMixin from '../mixins/ripple-mixin';
import ProxyMixin from 'ember-paper/mixins/proxy-mixin';
const {
get,
set,
isEmpty,
computed,
run,
Component
} = Ember;
/**
* @class PaperItem
* @extends Ember.Component
* @uses ProxyMixin
* @uses RippleMixin
*/
export default Component.extend(RippleMixin, ProxyMixin, {
tagName: 'md-list-item',
// Ripple Overrides
rippleContainerSelector: '.md-no-style',
center: false,
dimBackground: true,
outline: false,
classNameBindings: ['shouldBeClickable:md-clickable', 'hasProxiedComponent:md-proxy-focus'],
attributeBindings: ['role', 'tabindex'],
role: 'listitem',
tabindex: '-1',
hasProxiedComponent: computed.bool('proxiedComponents.length'),
hasPrimaryAction: computed.notEmpty('onClick'),
hasSecondaryAction: computed('secondaryItem', 'onClick', function() {
let secondaryItem = get(this, 'secondaryItem');
if (!isEmpty(secondaryItem)) {
let hasClickAction = get(secondaryItem, 'onClick');
let hasChangeAction = get(secondaryItem, 'onChange');
return hasClickAction || hasChangeAction;
} else {
return false;
}
}),
secondaryItem: computed('proxiedComponents.[]', function() {
let proxiedComponents = get(this, 'proxiedComponents');
return proxiedComponents.find((component)=> {
return get(component, 'isSecondary');
});
}),
shouldBeClickable: computed.or('proxiedComponents.length', 'onClick'),
click(ev) {
this.get('proxiedComponents').forEach((component)=> {
if (component.processProxy && !get(component, 'disabled') && (get(component, 'bubbles') | !get(this, 'hasPrimaryAction'))) {
component.processProxy();
}
});
this.sendAction('onClick', ev);
},
setupProxiedComponent() {
let tEl = this.$();
let proxiedComponents = get(this, 'proxiedComponents');
proxiedComponents.forEach((component)=> {
let isProxyHandlerSet = get(component, 'isProxyHandlerSet');
// we run init only once for each component.
if (!isProxyHandlerSet) {
// Allow proxied component to propagate ripple hammer event
if (!get(component, 'onClick') && !get(component, 'propagateRipple')) {
set(component, 'propagateRipple', true);
}
// ripple
let el = component.$();
set(this, 'mouseActive', false);
el.on('mousedown', ()=> {
set(this, 'mouseActive', true);
run.later(()=> {
set(this, 'mouseActive', false);
}, 100);
});
el.on('focus', ()=> {
if (!get(this, 'mouseActive'))
|
el.on('blur', function proxyOnBlur() {
tEl.removeClass('md-focused');
el.off('blur', proxyOnBlur);
});
});
// If we don't have primary action then
// no need to bubble
if (!get(this, 'hasPrimaryAction')) {
let bubbles = get(component, 'bubbles');
if (isEmpty(bubbles)) {
set(component, 'bubbles', false);
}
} else if (get(proxiedComponents, 'length')) {
// primary action exists. Make sure child
// that has separate action won't bubble.
proxiedComponents.forEach((component)=> {
let hasClickAction = get(component, 'onClick');
let hasChangeAction = get(component, 'onChange');
if (hasClickAction || hasChangeAction) {
let bubbles = get(component, 'bubbles');
if (isEmpty(bubbles)) {
set(component, 'bubbles', false);
}
}
});
}
// Init complete. We don't want it to run again
// for that particular component.
set(component, 'isProxyHandlerSet', true);
}
});
}
});
|
{
tEl.addClass('md-focused');
}
|
conditional_block
|
paper-item.js
|
/**
* @module ember-paper
*/
import Ember from 'ember';
import RippleMixin from '../mixins/ripple-mixin';
import ProxyMixin from 'ember-paper/mixins/proxy-mixin';
const {
get,
set,
isEmpty,
computed,
run,
Component
} = Ember;
/**
* @class PaperItem
* @extends Ember.Component
* @uses ProxyMixin
* @uses RippleMixin
*/
export default Component.extend(RippleMixin, ProxyMixin, {
tagName: 'md-list-item',
|
outline: false,
classNameBindings: ['shouldBeClickable:md-clickable', 'hasProxiedComponent:md-proxy-focus'],
attributeBindings: ['role', 'tabindex'],
role: 'listitem',
tabindex: '-1',
hasProxiedComponent: computed.bool('proxiedComponents.length'),
hasPrimaryAction: computed.notEmpty('onClick'),
hasSecondaryAction: computed('secondaryItem', 'onClick', function() {
let secondaryItem = get(this, 'secondaryItem');
if (!isEmpty(secondaryItem)) {
let hasClickAction = get(secondaryItem, 'onClick');
let hasChangeAction = get(secondaryItem, 'onChange');
return hasClickAction || hasChangeAction;
} else {
return false;
}
}),
secondaryItem: computed('proxiedComponents.[]', function() {
let proxiedComponents = get(this, 'proxiedComponents');
return proxiedComponents.find((component)=> {
return get(component, 'isSecondary');
});
}),
shouldBeClickable: computed.or('proxiedComponents.length', 'onClick'),
click(ev) {
this.get('proxiedComponents').forEach((component)=> {
if (component.processProxy && !get(component, 'disabled') && (get(component, 'bubbles') | !get(this, 'hasPrimaryAction'))) {
component.processProxy();
}
});
this.sendAction('onClick', ev);
},
setupProxiedComponent() {
let tEl = this.$();
let proxiedComponents = get(this, 'proxiedComponents');
proxiedComponents.forEach((component)=> {
let isProxyHandlerSet = get(component, 'isProxyHandlerSet');
// we run init only once for each component.
if (!isProxyHandlerSet) {
// Allow proxied component to propagate ripple hammer event
if (!get(component, 'onClick') && !get(component, 'propagateRipple')) {
set(component, 'propagateRipple', true);
}
// ripple
let el = component.$();
set(this, 'mouseActive', false);
el.on('mousedown', ()=> {
set(this, 'mouseActive', true);
run.later(()=> {
set(this, 'mouseActive', false);
}, 100);
});
el.on('focus', ()=> {
if (!get(this, 'mouseActive')) {
tEl.addClass('md-focused');
}
el.on('blur', function proxyOnBlur() {
tEl.removeClass('md-focused');
el.off('blur', proxyOnBlur);
});
});
// If we don't have primary action then
// no need to bubble
if (!get(this, 'hasPrimaryAction')) {
let bubbles = get(component, 'bubbles');
if (isEmpty(bubbles)) {
set(component, 'bubbles', false);
}
} else if (get(proxiedComponents, 'length')) {
// primary action exists. Make sure child
// that has separate action won't bubble.
proxiedComponents.forEach((component)=> {
let hasClickAction = get(component, 'onClick');
let hasChangeAction = get(component, 'onChange');
if (hasClickAction || hasChangeAction) {
let bubbles = get(component, 'bubbles');
if (isEmpty(bubbles)) {
set(component, 'bubbles', false);
}
}
});
}
// Init complete. We don't want it to run again
// for that particular component.
set(component, 'isProxyHandlerSet', true);
}
});
}
});
|
// Ripple Overrides
rippleContainerSelector: '.md-no-style',
center: false,
dimBackground: true,
|
random_line_split
|
paper-item.js
|
/**
* @module ember-paper
*/
import Ember from 'ember';
import RippleMixin from '../mixins/ripple-mixin';
import ProxyMixin from 'ember-paper/mixins/proxy-mixin';
const {
get,
set,
isEmpty,
computed,
run,
Component
} = Ember;
/**
* @class PaperItem
* @extends Ember.Component
* @uses ProxyMixin
* @uses RippleMixin
*/
export default Component.extend(RippleMixin, ProxyMixin, {
tagName: 'md-list-item',
// Ripple Overrides
rippleContainerSelector: '.md-no-style',
center: false,
dimBackground: true,
outline: false,
classNameBindings: ['shouldBeClickable:md-clickable', 'hasProxiedComponent:md-proxy-focus'],
attributeBindings: ['role', 'tabindex'],
role: 'listitem',
tabindex: '-1',
hasProxiedComponent: computed.bool('proxiedComponents.length'),
hasPrimaryAction: computed.notEmpty('onClick'),
hasSecondaryAction: computed('secondaryItem', 'onClick', function() {
let secondaryItem = get(this, 'secondaryItem');
if (!isEmpty(secondaryItem)) {
let hasClickAction = get(secondaryItem, 'onClick');
let hasChangeAction = get(secondaryItem, 'onChange');
return hasClickAction || hasChangeAction;
} else {
return false;
}
}),
secondaryItem: computed('proxiedComponents.[]', function() {
let proxiedComponents = get(this, 'proxiedComponents');
return proxiedComponents.find((component)=> {
return get(component, 'isSecondary');
});
}),
shouldBeClickable: computed.or('proxiedComponents.length', 'onClick'),
click(ev) {
this.get('proxiedComponents').forEach((component)=> {
if (component.processProxy && !get(component, 'disabled') && (get(component, 'bubbles') | !get(this, 'hasPrimaryAction'))) {
component.processProxy();
}
});
this.sendAction('onClick', ev);
},
setupProxiedComponent()
|
});
|
{
let tEl = this.$();
let proxiedComponents = get(this, 'proxiedComponents');
proxiedComponents.forEach((component)=> {
let isProxyHandlerSet = get(component, 'isProxyHandlerSet');
// we run init only once for each component.
if (!isProxyHandlerSet) {
// Allow proxied component to propagate ripple hammer event
if (!get(component, 'onClick') && !get(component, 'propagateRipple')) {
set(component, 'propagateRipple', true);
}
// ripple
let el = component.$();
set(this, 'mouseActive', false);
el.on('mousedown', ()=> {
set(this, 'mouseActive', true);
run.later(()=> {
set(this, 'mouseActive', false);
}, 100);
});
el.on('focus', ()=> {
if (!get(this, 'mouseActive')) {
tEl.addClass('md-focused');
}
el.on('blur', function proxyOnBlur() {
tEl.removeClass('md-focused');
el.off('blur', proxyOnBlur);
});
});
// If we don't have primary action then
// no need to bubble
if (!get(this, 'hasPrimaryAction')) {
let bubbles = get(component, 'bubbles');
if (isEmpty(bubbles)) {
set(component, 'bubbles', false);
}
} else if (get(proxiedComponents, 'length')) {
// primary action exists. Make sure child
// that has separate action won't bubble.
proxiedComponents.forEach((component)=> {
let hasClickAction = get(component, 'onClick');
let hasChangeAction = get(component, 'onChange');
if (hasClickAction || hasChangeAction) {
let bubbles = get(component, 'bubbles');
if (isEmpty(bubbles)) {
set(component, 'bubbles', false);
}
}
});
}
// Init complete. We don't want it to run again
// for that particular component.
set(component, 'isProxyHandlerSet', true);
}
});
}
|
identifier_body
|
games-reducer.ts
|
import {ADD_ALL_GAMES_ACTION, ADD_GAME_ACTION, CLEAR_GAMES_ACTION, LOAD_GAMES_FINISHED_ACTION} from './games-actions';
import {TournamentGame} from '../../../shared/model/tournament-game';
import * as _ from 'lodash';
export interface GamesState {
allGames: TournamentGame[];
loadGames: boolean;
}
const initialState: GamesState = {
allGames: [],
loadGames: true
};
export function gamesReducer(state: GamesState = initialState, action): GamesState {
switch (action.type) {
case ADD_ALL_GAMES_ACTION:
return addAllGames(state, action);
case ADD_GAME_ACTION:
return addGame(state, action);
case LOAD_GAMES_FINISHED_ACTION:
return handleLoadGames(state);
case CLEAR_GAMES_ACTION:
return clearGames(state);
default:
return state;
}
}
function addAllGames(state: GamesState, action): GamesState {
const newStoreState: GamesState = _.cloneDeep(state);
if (action.payload !== undefined) {
newStoreState.allGames = action.payload;
}
return newStoreState;
}
function addGame(state: GamesState, action): GamesState
|
function clearGames(state: GamesState): GamesState {
const newState: GamesState = _.cloneDeep(state);
newState.allGames = [];
return newState;
}
function handleLoadGames(state: GamesState): GamesState {
const newStoreState = _.cloneDeep(state);
newStoreState.loadGames = false;
return newStoreState;
}
|
{
const newStoreState: GamesState = _.cloneDeep(state);
if (action.payload !== undefined ) {
newStoreState.allGames.push(action.payload);
}
return newStoreState;
}
|
identifier_body
|
games-reducer.ts
|
import {ADD_ALL_GAMES_ACTION, ADD_GAME_ACTION, CLEAR_GAMES_ACTION, LOAD_GAMES_FINISHED_ACTION} from './games-actions';
import {TournamentGame} from '../../../shared/model/tournament-game';
import * as _ from 'lodash';
export interface GamesState {
allGames: TournamentGame[];
loadGames: boolean;
}
const initialState: GamesState = {
allGames: [],
loadGames: true
};
export function gamesReducer(state: GamesState = initialState, action): GamesState {
switch (action.type) {
case ADD_ALL_GAMES_ACTION:
return addAllGames(state, action);
case ADD_GAME_ACTION:
return addGame(state, action);
case LOAD_GAMES_FINISHED_ACTION:
return handleLoadGames(state);
case CLEAR_GAMES_ACTION:
return clearGames(state);
default:
return state;
}
|
function addAllGames(state: GamesState, action): GamesState {
const newStoreState: GamesState = _.cloneDeep(state);
if (action.payload !== undefined) {
newStoreState.allGames = action.payload;
}
return newStoreState;
}
function addGame(state: GamesState, action): GamesState {
const newStoreState: GamesState = _.cloneDeep(state);
if (action.payload !== undefined ) {
newStoreState.allGames.push(action.payload);
}
return newStoreState;
}
function clearGames(state: GamesState): GamesState {
const newState: GamesState = _.cloneDeep(state);
newState.allGames = [];
return newState;
}
function handleLoadGames(state: GamesState): GamesState {
const newStoreState = _.cloneDeep(state);
newStoreState.loadGames = false;
return newStoreState;
}
|
}
|
random_line_split
|
games-reducer.ts
|
import {ADD_ALL_GAMES_ACTION, ADD_GAME_ACTION, CLEAR_GAMES_ACTION, LOAD_GAMES_FINISHED_ACTION} from './games-actions';
import {TournamentGame} from '../../../shared/model/tournament-game';
import * as _ from 'lodash';
export interface GamesState {
allGames: TournamentGame[];
loadGames: boolean;
}
const initialState: GamesState = {
allGames: [],
loadGames: true
};
export function gamesReducer(state: GamesState = initialState, action): GamesState {
switch (action.type) {
case ADD_ALL_GAMES_ACTION:
return addAllGames(state, action);
case ADD_GAME_ACTION:
return addGame(state, action);
case LOAD_GAMES_FINISHED_ACTION:
return handleLoadGames(state);
case CLEAR_GAMES_ACTION:
return clearGames(state);
default:
return state;
}
}
function addAllGames(state: GamesState, action): GamesState {
const newStoreState: GamesState = _.cloneDeep(state);
if (action.payload !== undefined) {
newStoreState.allGames = action.payload;
}
return newStoreState;
}
function addGame(state: GamesState, action): GamesState {
const newStoreState: GamesState = _.cloneDeep(state);
if (action.payload !== undefined ) {
newStoreState.allGames.push(action.payload);
}
return newStoreState;
}
function clearGames(state: GamesState): GamesState {
const newState: GamesState = _.cloneDeep(state);
newState.allGames = [];
return newState;
}
function
|
(state: GamesState): GamesState {
const newStoreState = _.cloneDeep(state);
newStoreState.loadGames = false;
return newStoreState;
}
|
handleLoadGames
|
identifier_name
|
games-reducer.ts
|
import {ADD_ALL_GAMES_ACTION, ADD_GAME_ACTION, CLEAR_GAMES_ACTION, LOAD_GAMES_FINISHED_ACTION} from './games-actions';
import {TournamentGame} from '../../../shared/model/tournament-game';
import * as _ from 'lodash';
export interface GamesState {
allGames: TournamentGame[];
loadGames: boolean;
}
const initialState: GamesState = {
allGames: [],
loadGames: true
};
export function gamesReducer(state: GamesState = initialState, action): GamesState {
switch (action.type) {
case ADD_ALL_GAMES_ACTION:
return addAllGames(state, action);
case ADD_GAME_ACTION:
return addGame(state, action);
case LOAD_GAMES_FINISHED_ACTION:
return handleLoadGames(state);
case CLEAR_GAMES_ACTION:
return clearGames(state);
default:
return state;
}
}
function addAllGames(state: GamesState, action): GamesState {
const newStoreState: GamesState = _.cloneDeep(state);
if (action.payload !== undefined) {
newStoreState.allGames = action.payload;
}
return newStoreState;
}
function addGame(state: GamesState, action): GamesState {
const newStoreState: GamesState = _.cloneDeep(state);
if (action.payload !== undefined )
|
return newStoreState;
}
function clearGames(state: GamesState): GamesState {
const newState: GamesState = _.cloneDeep(state);
newState.allGames = [];
return newState;
}
function handleLoadGames(state: GamesState): GamesState {
const newStoreState = _.cloneDeep(state);
newStoreState.loadGames = false;
return newStoreState;
}
|
{
newStoreState.allGames.push(action.payload);
}
|
conditional_block
|
Exp6_LineFollowing_IRSensors.py
|
"""//***********************************************************************
* Exp6_LineFollowing_IRSensors -- RedBot Experiment 6
*
* This code reads the three line following sensors on A3, A6, and A7
* and prints them out to the Serial Monitor. Upload this example to your
* RedBot and open up the Serial Monitor by clicking the magnifying glass
* in the upper-right hand corner.
|
* Revised, 31 Oct 2014 B. Huang
* Revices, 2 Oct 2015 L Mathews
***********************************************************************/"""
import sys
import signal
from pymata_aio.pymata3 import PyMata3
from library.redbot import RedBotSensor
WIFLY_IP_ADDRESS = None # Leave set as None if not using WiFly
WIFLY_IP_ADDRESS = "10.0.1.18" # If using a WiFly on the RedBot, set the ip address here.
if WIFLY_IP_ADDRESS:
board = PyMata3(ip_address=WIFLY_IP_ADDRESS)
else:
# Use a USB cable to RedBot or an XBee connection instead of WiFly.
COM_PORT = None # Use None for automatic com port detection, or set if needed i.e. "COM7"
board = PyMata3(com_port=COM_PORT)
LEFT_LINE_FOLLOWER = 3 # pin number assignments for each IR sensor
CENTRE_LINE_FOLLOWER = 6
RIGHT_LINE_FOLLOWER = 7
IR_sensor_1 = RedBotSensor(board, LEFT_LINE_FOLLOWER)
IR_sensor_2 = RedBotSensor(board, CENTRE_LINE_FOLLOWER)
IR_sensor_3 = RedBotSensor(board, RIGHT_LINE_FOLLOWER)
def signal_handler(sig, frame):
"""Helper method to shutdown the RedBot if Ctrl-c is pressed"""
print('\nYou pressed Ctrl+C')
if board is not None:
board.send_reset()
board.shutdown()
sys.exit(0)
def setup():
signal.signal(signal.SIGINT, signal_handler)
print("Welcome to Experiment 6!")
print("------------------------")
def loop():
board.sleep(0.1)
print("IR Sensor Readings: {}, {}, {}".format(IR_sensor_1.read(), IR_sensor_2.read(), IR_sensor_3.read()))
if __name__ == "__main__":
setup()
while True:
loop()
|
*
* This sketch was written by SparkFun Electronics,with lots of help from
* the Arduino community. This code is completely free for any use.
*
* 8 Oct 2013 M. Hord
|
random_line_split
|
Exp6_LineFollowing_IRSensors.py
|
"""//***********************************************************************
* Exp6_LineFollowing_IRSensors -- RedBot Experiment 6
*
* This code reads the three line following sensors on A3, A6, and A7
* and prints them out to the Serial Monitor. Upload this example to your
* RedBot and open up the Serial Monitor by clicking the magnifying glass
* in the upper-right hand corner.
*
* This sketch was written by SparkFun Electronics,with lots of help from
* the Arduino community. This code is completely free for any use.
*
* 8 Oct 2013 M. Hord
* Revised, 31 Oct 2014 B. Huang
* Revices, 2 Oct 2015 L Mathews
***********************************************************************/"""
import sys
import signal
from pymata_aio.pymata3 import PyMata3
from library.redbot import RedBotSensor
WIFLY_IP_ADDRESS = None # Leave set as None if not using WiFly
WIFLY_IP_ADDRESS = "10.0.1.18" # If using a WiFly on the RedBot, set the ip address here.
if WIFLY_IP_ADDRESS:
board = PyMata3(ip_address=WIFLY_IP_ADDRESS)
else:
# Use a USB cable to RedBot or an XBee connection instead of WiFly.
COM_PORT = None # Use None for automatic com port detection, or set if needed i.e. "COM7"
board = PyMata3(com_port=COM_PORT)
LEFT_LINE_FOLLOWER = 3 # pin number assignments for each IR sensor
CENTRE_LINE_FOLLOWER = 6
RIGHT_LINE_FOLLOWER = 7
IR_sensor_1 = RedBotSensor(board, LEFT_LINE_FOLLOWER)
IR_sensor_2 = RedBotSensor(board, CENTRE_LINE_FOLLOWER)
IR_sensor_3 = RedBotSensor(board, RIGHT_LINE_FOLLOWER)
def signal_handler(sig, frame):
"""Helper method to shutdown the RedBot if Ctrl-c is pressed"""
print('\nYou pressed Ctrl+C')
if board is not None:
board.send_reset()
board.shutdown()
sys.exit(0)
def setup():
|
def loop():
board.sleep(0.1)
print("IR Sensor Readings: {}, {}, {}".format(IR_sensor_1.read(), IR_sensor_2.read(), IR_sensor_3.read()))
if __name__ == "__main__":
setup()
while True:
loop()
|
signal.signal(signal.SIGINT, signal_handler)
print("Welcome to Experiment 6!")
print("------------------------")
|
identifier_body
|
Exp6_LineFollowing_IRSensors.py
|
"""//***********************************************************************
* Exp6_LineFollowing_IRSensors -- RedBot Experiment 6
*
* This code reads the three line following sensors on A3, A6, and A7
* and prints them out to the Serial Monitor. Upload this example to your
* RedBot and open up the Serial Monitor by clicking the magnifying glass
* in the upper-right hand corner.
*
* This sketch was written by SparkFun Electronics,with lots of help from
* the Arduino community. This code is completely free for any use.
*
* 8 Oct 2013 M. Hord
* Revised, 31 Oct 2014 B. Huang
* Revices, 2 Oct 2015 L Mathews
***********************************************************************/"""
import sys
import signal
from pymata_aio.pymata3 import PyMata3
from library.redbot import RedBotSensor
WIFLY_IP_ADDRESS = None # Leave set as None if not using WiFly
WIFLY_IP_ADDRESS = "10.0.1.18" # If using a WiFly on the RedBot, set the ip address here.
if WIFLY_IP_ADDRESS:
board = PyMata3(ip_address=WIFLY_IP_ADDRESS)
else:
# Use a USB cable to RedBot or an XBee connection instead of WiFly.
COM_PORT = None # Use None for automatic com port detection, or set if needed i.e. "COM7"
board = PyMata3(com_port=COM_PORT)
LEFT_LINE_FOLLOWER = 3 # pin number assignments for each IR sensor
CENTRE_LINE_FOLLOWER = 6
RIGHT_LINE_FOLLOWER = 7
IR_sensor_1 = RedBotSensor(board, LEFT_LINE_FOLLOWER)
IR_sensor_2 = RedBotSensor(board, CENTRE_LINE_FOLLOWER)
IR_sensor_3 = RedBotSensor(board, RIGHT_LINE_FOLLOWER)
def
|
(sig, frame):
"""Helper method to shutdown the RedBot if Ctrl-c is pressed"""
print('\nYou pressed Ctrl+C')
if board is not None:
board.send_reset()
board.shutdown()
sys.exit(0)
def setup():
signal.signal(signal.SIGINT, signal_handler)
print("Welcome to Experiment 6!")
print("------------------------")
def loop():
board.sleep(0.1)
print("IR Sensor Readings: {}, {}, {}".format(IR_sensor_1.read(), IR_sensor_2.read(), IR_sensor_3.read()))
if __name__ == "__main__":
setup()
while True:
loop()
|
signal_handler
|
identifier_name
|
Exp6_LineFollowing_IRSensors.py
|
"""//***********************************************************************
* Exp6_LineFollowing_IRSensors -- RedBot Experiment 6
*
* This code reads the three line following sensors on A3, A6, and A7
* and prints them out to the Serial Monitor. Upload this example to your
* RedBot and open up the Serial Monitor by clicking the magnifying glass
* in the upper-right hand corner.
*
* This sketch was written by SparkFun Electronics,with lots of help from
* the Arduino community. This code is completely free for any use.
*
* 8 Oct 2013 M. Hord
* Revised, 31 Oct 2014 B. Huang
* Revices, 2 Oct 2015 L Mathews
***********************************************************************/"""
import sys
import signal
from pymata_aio.pymata3 import PyMata3
from library.redbot import RedBotSensor
WIFLY_IP_ADDRESS = None # Leave set as None if not using WiFly
WIFLY_IP_ADDRESS = "10.0.1.18" # If using a WiFly on the RedBot, set the ip address here.
if WIFLY_IP_ADDRESS:
board = PyMata3(ip_address=WIFLY_IP_ADDRESS)
else:
# Use a USB cable to RedBot or an XBee connection instead of WiFly.
COM_PORT = None # Use None for automatic com port detection, or set if needed i.e. "COM7"
board = PyMata3(com_port=COM_PORT)
LEFT_LINE_FOLLOWER = 3 # pin number assignments for each IR sensor
CENTRE_LINE_FOLLOWER = 6
RIGHT_LINE_FOLLOWER = 7
IR_sensor_1 = RedBotSensor(board, LEFT_LINE_FOLLOWER)
IR_sensor_2 = RedBotSensor(board, CENTRE_LINE_FOLLOWER)
IR_sensor_3 = RedBotSensor(board, RIGHT_LINE_FOLLOWER)
def signal_handler(sig, frame):
"""Helper method to shutdown the RedBot if Ctrl-c is pressed"""
print('\nYou pressed Ctrl+C')
if board is not None:
board.send_reset()
board.shutdown()
sys.exit(0)
def setup():
signal.signal(signal.SIGINT, signal_handler)
print("Welcome to Experiment 6!")
print("------------------------")
def loop():
board.sleep(0.1)
print("IR Sensor Readings: {}, {}, {}".format(IR_sensor_1.read(), IR_sensor_2.read(), IR_sensor_3.read()))
if __name__ == "__main__":
setup()
while True:
|
loop()
|
conditional_block
|
|
bigtable_input.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read Minigo game examples from a Bigtable.
"""
import bisect
import collections
import datetime
import math
import multiprocessing
import operator
import re
import struct
import time
import numpy as np
from tqdm import tqdm
from absl import flags
from google.cloud import bigtable
from google.cloud.bigtable import row_filters as bigtable_row_filters
from google.cloud.bigtable import column_family as bigtable_column_family
import tensorflow as tf
from tensorflow.contrib import cloud as contrib_cloud
import utils
flags.DEFINE_string('cbt_project', None,
'The project used to connect to the cloud bigtable ')
# cbt_instance: identifier of Cloud Bigtable instance in cbt_project.
flags.DEFINE_string('cbt_instance', None,
'The identifier of the cloud bigtable instance in cbt_project')
# cbt_table: identifier of Cloud Bigtable table in cbt_instance.
# The cbt_table is expected to be accompanied by one with an "-nr"
# suffix, for "no-resign".
flags.DEFINE_string('cbt_table', None,
'The table within the cloud bigtable instance to use')
FLAGS = flags.FLAGS
# Constants
ROW_PREFIX = 'g_{:0>10}_'
ROWCOUNT_PREFIX = 'ct_{:0>10}_'
# Model tabels (models, models_for_eval) row key
MODEL_PREFIX = "m_{run}_{num:0>10}"
# Name of model
MODEL_NAME = b'model'
# Maximum number of concurrent processes to use when issuing requests against
# Bigtable. Value taken from default in the load-testing tool described here:
#
# https://github.com/googleapis/google-cloud-go/blob/master/bigtable/cmd/loadtest/loadtest.go
MAX_BT_CONCURRENCY = 100
# Column family and qualifier constants.
# Column Families
METADATA = 'metadata'
TFEXAMPLE = 'tfexample'
# Column Qualifiers
# Note that in CBT, families are strings and qualifiers are bytes.
TABLE_STATE = b'table_state'
WAIT_CELL = b'wait_for_game_number'
GAME_COUNTER = b'game_counter'
MOVE_COUNT = b'move_count'
# Patterns
_game_row_key = re.compile(r'g_(\d+)_m_(\d+)')
_game_from_counter = re.compile(r'ct_(\d+)_')
# The string information needed to construct a client of a Bigtable table.
BigtableSpec = collections.namedtuple(
'BigtableSpec',
['project', 'instance', 'table'])
# Information needed to create a mix of two Game queues.
# r = resign/regular; c = calibration (no-resign)
GameMix = collections.namedtuple(
'GameMix',
['games_r', 'moves_r',
'games_c', 'moves_c',
'selection'])
def cbt_intvalue(value):
"""Decode a big-endian uint64.
Cloud Bigtable stores integers as big-endian uint64,
and performs this translation when integers are being
set. But when being read, the values need to be
decoded.
"""
return int(struct.unpack('>q', value)[0])
def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype)
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
"""Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
"""
ds = ds.batch(batch_size)
# Turns 'g_0000001234_m_133' into 'g_0000001234'
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
# NOTE: Cannot be truly sure the count is right till the end.
return h
def _game_keys_as_array(ds):
"""Turn keys of a Bigtable dataset into an array.
Take g_GGG_m_MMM and create GGG.MMM numbers.
Valuable when visualizing the distribution of a given dataset in
the game keyspace.
"""
ds = ds.map(lambda row_key, cell: row_key)
# want 'g_0000001234_m_133' is '0000001234.133' and so forth
ds = ds.map(lambda x:
tf.strings.to_number(tf.strings.substr(x, 2, 10) +
'.' +
tf.strings.substr(x, 15, 3),
out_type=tf.float64))
return make_single_array(ds)
def _delete_rows(args):
"""Delete the given row keys from the given Bigtable.
The args are (BigtableSpec, row_keys), but are passed
as a single argument in order to work with
multiprocessing.Pool.map. This is also the reason why this is a
top-level function instead of a method.
"""
btspec, row_keys = args
bt_table = bigtable.Client(btspec.project).instance(
btspec.instance).table(btspec.table)
rows = [bt_table.row(k) for k in row_keys]
for r in rows:
r.delete()
bt_table.mutate_rows(rows)
return row_keys
class GameQueue:
"""Queue of games stored in a Cloud Bigtable.
The state of the table is stored in the `table_state`
row, which includes the columns `metadata:game_counter`.
"""
def __init__(self, project_name, instance_name, table_name):
"""Constructor.
Args:
project_name: string name of GCP project having table.
instance_name: string name of CBT instance in project.
table_name: string name of CBT table in instance.
"""
self.btspec = BigtableSpec(project_name, instance_name, table_name)
self.bt_table = bigtable.Client(
self.btspec.project, admin=True).instance(
self.btspec.instance).table(self.btspec.table)
self.tf_table = contrib_cloud.BigtableClient(
self.btspec.project,
self.btspec.instance).table(self.btspec.table)
def create(self):
"""Create the table underlying the queue.
Create the 'metadata' and 'tfexample' column families
and their properties.
"""
if self.bt_table.exists():
utils.dbg('Table already exists')
return
max_versions_rule = bigtable_column_family.MaxVersionsGCRule(1)
self.bt_table.create(column_families={
METADATA: max_versions_rule,
TFEXAMPLE: max_versions_rule})
@property
def latest_game_number(self):
"""Return the number of the next game to be written."""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, GAME_COUNTER, GAME_COUNTER))
if table_state is None:
return 0
return cbt_intvalue(table_state.cell_value(METADATA, GAME_COUNTER))
@latest_game_number.setter
def latest_game_number(self, latest):
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, GAME_COUNTER, int(latest))
table_state.commit()
def games_by_time(self, start_game, end_game):
"""Given a range of games, return the games sorted by time.
Returns [(time, game_number), ...]
The time will be a `datetime.datetime` and the game
number is the integer used as the basis of the row ID.
Note that when a cluster of self-play nodes are writing
concurrently, the game numbers may be out of order.
"""
move_count = b'move_count'
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(start_game),
ROWCOUNT_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, move_count, move_count))
def parse(r):
rk = str(r.row_key, 'utf-8')
game = _game_from_counter.match(rk).groups()[0]
return (r.cells[METADATA][move_count][0].timestamp, game)
return sorted([parse(r) for r in rows], key=operator.itemgetter(0))
def delete_row_range(self, format_str, start_game, end_game):
"""Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
"""
row_keys = make_single_array(
self.tf_table.keys_by_range_dataset(
format_str.format(start_game),
format_str.format(end_game)))
row_keys = list(row_keys)
if not row_keys:
utils.dbg('No rows left for games %d..%d' % (
start_game, end_game))
return
utils.dbg('Deleting %d rows: %s..%s' % (
len(row_keys), row_keys[0], row_keys[-1]))
# Reverse the keys so that the queue is left in a more
# sensible end state if you change your mind (say, due to a
# mistake in the timestamp) and abort the process: there will
# be a bit trimmed from the end, rather than a bit
# trimmed out of the middle.
row_keys.reverse()
total_keys = len(row_keys)
utils.dbg('Deleting total of %d keys' % total_keys)
concurrency = min(MAX_BT_CONCURRENCY,
multiprocessing.cpu_count() * 2)
with multiprocessing.Pool(processes=concurrency) as pool:
batches = []
with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:
for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS,
row_keys):
pbar.update(len(b))
batches.append((self.btspec, b))
if len(batches) >= concurrency:
pool.map(_delete_rows, batches)
batches = []
pool.map(_delete_rows, batches)
batches = []
def trim_games_since(self, t, max_games=500000):
"""Trim off the games since the given time.
Search back no more than max_games for this time point, locate
the game there, and remove all games since that game,
resetting the latest game counter.
If `t` is a `datetime.timedelta`, then the target time will be
found by subtracting that delta from the time of the last
game. Otherwise, it will be the target time.
"""
latest = self.latest_game_number
earliest = int(latest - max_games)
gbt = self.games_by_time(earliest, latest)
if not gbt:
utils.dbg('No games between %d and %d' % (earliest, latest))
return
most_recent = gbt[-1]
if isinstance(t, datetime.timedelta):
target = most_recent[0] - t
else:
target = t
i = bisect.bisect_right(gbt, (target,))
if i >= len(gbt):
utils.dbg('Last game is already at %s' % gbt[-1][0])
return
when, which = gbt[i]
utils.dbg('Most recent: %s %s' % most_recent)
utils.dbg(' Target: %s %s' % (when, which))
which = int(which)
self.delete_row_range(ROW_PREFIX, which, latest)
self.delete_row_range(ROWCOUNT_PREFIX, which, latest)
self.latest_game_number = which
def bleakest_moves(self, start_game, end_game):
"""Given a range of games, return the bleakest moves.
Returns a list of (game, move, q) sorted by q.
"""
bleak = b'bleakest_q'
rows = self.bt_table.read_rows(
ROW_PREFIX.format(start_game),
ROW_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, bleak, bleak))
def
|
(r):
rk = str(r.row_key, 'utf-8')
g, m = _game_row_key.match(rk).groups()
q = r.cell_value(METADATA, bleak)
return int(g), int(m), float(q)
return sorted([parse(r) for r in rows], key=operator.itemgetter(2))
def require_fresh_games(self, number_fresh):
"""Require a given number of fresh games to be played.
Args:
number_fresh: integer, number of new fresh games needed
Increments the cell `table_state=metadata:wait_for_game_number`
by the given number of games. This will cause
`self.wait_for_fresh_games()` to block until the game
counter has reached this number.
"""
latest = self.latest_game_number
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh))
table_state.commit()
print("== Setting wait cell to ", int(latest + number_fresh), flush=True)
def wait_for_fresh_games(self, poll_interval=15.0):
"""Block caller until required new games have been played.
Args:
poll_interval: number of seconds to wait between checks
If the cell `table_state=metadata:wait_for_game_number` exists,
then block the caller, checking every `poll_interval` seconds,
until `table_state=metadata:game_counter is at least the value
in that cell.
"""
wait_until_game = self.read_wait_cell()
if not wait_until_game:
return
latest_game = self.latest_game_number
last_latest = latest_game
while latest_game < wait_until_game:
utils.dbg('Latest game {} not yet at required game {} '
'(+{}, {:0.3f} games/sec)'.format(
latest_game,
wait_until_game,
latest_game - last_latest,
(latest_game - last_latest) / poll_interval
))
time.sleep(poll_interval)
last_latest = latest_game
latest_game = self.latest_game_number
def read_wait_cell(self):
"""Read the value of the cell holding the 'wait' value,
Returns the int value of whatever it has, or None if the cell doesn't
exist.
"""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, WAIT_CELL, WAIT_CELL))
if table_state is None:
utils.dbg('No waiting for new games needed; '
'wait_for_game_number column not in table_state')
return None
value = table_state.cell_value(METADATA, WAIT_CELL)
if not value:
utils.dbg('No waiting for new games needed; '
'no value in wait_for_game_number cell '
'in table_state')
return None
return cbt_intvalue(value)
def count_moves_in_game_range(self, game_begin, game_end):
"""Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary.
"""
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(game_begin),
ROWCOUNT_PREFIX.format(game_end),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, MOVE_COUNT, MOVE_COUNT))
return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows])
def moves_from_games(self, start_game, end_game, moves, shuffle,
column_family, column):
"""Dataset of samples and/or shuffled moves from game range.
Args:
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
start_row = ROW_PREFIX.format(start_game)
end_row = ROW_PREFIX.format(end_game)
# NOTE: Choose a probability high enough to guarantee at least the
# required number of moves, by using a slightly lower estimate
# of the total moves, then trimming the result.
total_moves = self.count_moves_in_game_range(start_game, end_game)
probability = moves / (total_moves * 0.99)
utils.dbg('Row range: %s - %s; total moves: %d; probability %.3f; moves %d' % (
start_row, end_row, total_moves, probability, moves))
ds = self.tf_table.parallel_scan_range(start_row, end_row,
probability=probability,
columns=[(column_family, column)])
if shuffle:
utils.dbg('Doing a complete shuffle of %d moves' % moves)
ds = ds.shuffle(moves)
ds = ds.take(moves)
return ds
def moves_from_last_n_games(self, n, moves, shuffle,
column_family, column):
"""Randomly choose a given number of moves from the last n games.
Args:
n: number of games at the end of this GameQueue to source.
moves: number of moves to be sampled from `n` games.
shuffle: if True, shuffle the selected moves.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
Returns:
a dataset containing the selected moves.
"""
self.wait_for_fresh_games()
latest_game = self.latest_game_number
utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game))
if latest_game == 0:
raise ValueError('Cannot find a latest game in the table')
start = int(max(0, latest_game - n))
ds = self.moves_from_games(start, latest_game, moves, shuffle,
column_family, column)
return ds
def _write_move_counts(self, sess, h):
"""Add move counts from the given histogram to the table.
Used to update the move counts in an existing table. Should
not be needed except for backfill or repair.
Args:
sess: TF session to use for doing a Bigtable write.
tf_table: TF Cloud Bigtable to use for writing.
h: a dictionary keyed by game row prefix ("g_0023561") whose values
are the move counts for each game.
"""
def gen():
for k, v in h.items():
# The keys in the histogram may be of type 'bytes'
k = str(k, 'utf-8')
vs = str(v)
yield (k.replace('g_', 'ct_') + '_%d' % v, vs)
yield (k + '_m_000', vs)
mc = tf.data.Dataset.from_generator(gen, (tf.string, tf.string))
wr_op = self.tf_table.write(mc,
column_families=[METADATA],
columns=[MOVE_COUNT])
sess.run(wr_op)
def update_move_counts(self, start_game, end_game, interval=1000):
"""Used to update the move_count cell for older games.
Should not be needed except for backfill or repair.
move_count cells will be updated in both g_<game_id>_m_000 rows
and ct_<game_id>_<move_count> rows.
"""
for g in range(start_game, end_game, interval):
with tf.Session() as sess:
start_row = ROW_PREFIX.format(g)
end_row = ROW_PREFIX.format(g + interval)
print('Range:', start_row, end_row)
start_time = time.time()
ds = self.tf_table.keys_by_range_dataset(start_row, end_row)
h = _histogram_move_keys_by_game(sess, ds)
self._write_move_counts(sess, h)
end_time = time.time()
elapsed = end_time - start_time
print(' games/sec:', len(h)/elapsed)
def set_fresh_watermark(game_queue, count_from, window_size,
fresh_fraction=0.05, minimum_fresh=20000):
"""Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
"""
already_played = game_queue.latest_game_number - count_from
print("== already_played: ", already_played, flush=True)
if window_size > count_from: # How to handle the case when the window is not yet 'full'
game_queue.require_fresh_games(int(minimum_fresh * .9))
else:
num_to_play = max(0, math.ceil(window_size * .9 * fresh_fraction) - already_played)
print("== Num to play: ", num_to_play, flush=True)
game_queue.require_fresh_games(num_to_play)
def mix_by_decile(games, moves, deciles=9):
"""Compute a mix of regular and calibration games by decile.
deciles should be an integer between 0 and 10 inclusive.
"""
assert 0 <= deciles <= 10
# The prefixes and suffixes below have the following meanings:
# ct_: count
# fr_: fraction
# _r: resign (ordinary)
# _nr: no-resign
ct_total = 10
lesser = ct_total - math.floor(deciles)
greater = ct_total - lesser
ct_r, ct_nr = greater, lesser
fr_r = ct_r / ct_total
fr_nr = ct_nr / ct_total
games_r = math.ceil(games * fr_r)
moves_r = math.ceil(moves * fr_r)
games_c = math.floor(games * fr_nr)
moves_c = math.floor(moves * fr_nr)
selection = np.array([0] * ct_r + [1] * ct_nr, dtype=np.int64)
return GameMix(games_r, moves_r,
games_c, moves_c,
selection)
def get_unparsed_moves_from_last_n_games(games, games_nr, n,
moves=2**21,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from the last N games.
Args:
games, games_nr: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
mix = mix_by_decile(n, moves, 9)
resign = games.moves_from_last_n_games(
mix.games_r,
mix.moves_r,
shuffle,
column_family, column)
no_resign = games_nr.moves_from_last_n_games(
mix.games_c,
mix.moves_c,
shuffle,
column_family, column)
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, no_resign], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def get_unparsed_moves_from_games(games_r, games_c,
start_r, start_c,
mix,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from a given start point.
Args:
games_r, games_c: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
start_r: an integer indicating the game number to start at in games_r.
start_c: an integer indicating the game number to start at in games_c.
mix: the result of mix_by_decile()
shuffle: if True, shuffle the selected move examples.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than the moves implied by `mix`,
sampled randomly from the game ranges implied.
"""
resign = games_r.moves_from_games(
start_r, start_r + mix.games_r, mix.moves_r, shuffle, column_family, column)
calibrated = games_c.moves_from_games(
start_c, start_c + mix.games_c, mix.moves_c, shuffle, column_family, column)
moves = mix.moves_r + mix.moves_c
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, calibrated], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def count_elements_in_dataset(ds, batch_size=1*1024, parallel_batch=8):
"""Count and return all the elements in the given dataset.
Debugging function. The elements in a dataset cannot be counted
without enumerating all of them. By counting in batch and in
parallel, this method allows rapid traversal of the dataset.
Args:
ds: The dataset whose elements should be counted.
batch_size: the number of elements to count a a time.
parallel_batch: how many batches to count in parallel.
Returns:
The number of elements in the dataset.
"""
with tf.Session() as sess:
dsc = ds.apply(tf.data.experimental.enumerate_dataset())
dsc = dsc.apply(tf.data.experimental.map_and_batch(
lambda c, v: c, batch_size, num_parallel_batches=parallel_batch))
iterator = dsc.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
counted = 0
try:
while True:
# The numbers in the tensors are 0-based indicies,
# so add 1 to get the number counted.
counted = sess.run(tf.reduce_max(get_next)) + 1
utils.dbg('Counted so far: %d' % counted)
except tf.errors.OutOfRangeError:
pass
utils.dbg('Counted total: %d' % counted)
return counted
|
parse
|
identifier_name
|
bigtable_input.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read Minigo game examples from a Bigtable.
"""
import bisect
import collections
import datetime
import math
import multiprocessing
import operator
import re
import struct
import time
import numpy as np
from tqdm import tqdm
from absl import flags
from google.cloud import bigtable
from google.cloud.bigtable import row_filters as bigtable_row_filters
from google.cloud.bigtable import column_family as bigtable_column_family
import tensorflow as tf
from tensorflow.contrib import cloud as contrib_cloud
import utils
flags.DEFINE_string('cbt_project', None,
'The project used to connect to the cloud bigtable ')
# cbt_instance: identifier of Cloud Bigtable instance in cbt_project.
flags.DEFINE_string('cbt_instance', None,
'The identifier of the cloud bigtable instance in cbt_project')
# cbt_table: identifier of Cloud Bigtable table in cbt_instance.
# The cbt_table is expected to be accompanied by one with an "-nr"
# suffix, for "no-resign".
flags.DEFINE_string('cbt_table', None,
'The table within the cloud bigtable instance to use')
FLAGS = flags.FLAGS
# Constants
ROW_PREFIX = 'g_{:0>10}_'
ROWCOUNT_PREFIX = 'ct_{:0>10}_'
# Model tabels (models, models_for_eval) row key
MODEL_PREFIX = "m_{run}_{num:0>10}"
# Name of model
MODEL_NAME = b'model'
# Maximum number of concurrent processes to use when issuing requests against
# Bigtable. Value taken from default in the load-testing tool described here:
#
# https://github.com/googleapis/google-cloud-go/blob/master/bigtable/cmd/loadtest/loadtest.go
MAX_BT_CONCURRENCY = 100
# Column family and qualifier constants.
# Column Families
METADATA = 'metadata'
TFEXAMPLE = 'tfexample'
# Column Qualifiers
# Note that in CBT, families are strings and qualifiers are bytes.
TABLE_STATE = b'table_state'
WAIT_CELL = b'wait_for_game_number'
GAME_COUNTER = b'game_counter'
MOVE_COUNT = b'move_count'
# Patterns
_game_row_key = re.compile(r'g_(\d+)_m_(\d+)')
_game_from_counter = re.compile(r'ct_(\d+)_')
# The string information needed to construct a client of a Bigtable table.
BigtableSpec = collections.namedtuple(
'BigtableSpec',
['project', 'instance', 'table'])
# Information needed to create a mix of two Game queues.
# r = resign/regular; c = calibration (no-resign)
GameMix = collections.namedtuple(
'GameMix',
['games_r', 'moves_r',
'games_c', 'moves_c',
'selection'])
def cbt_intvalue(value):
"""Decode a big-endian uint64.
Cloud Bigtable stores integers as big-endian uint64,
and performs this translation when integers are being
set. But when being read, the values need to be
decoded.
"""
return int(struct.unpack('>q', value)[0])
def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype)
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
"""Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
"""
ds = ds.batch(batch_size)
# Turns 'g_0000001234_m_133' into 'g_0000001234'
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
# NOTE: Cannot be truly sure the count is right till the end.
return h
def _game_keys_as_array(ds):
"""Turn keys of a Bigtable dataset into an array.
Take g_GGG_m_MMM and create GGG.MMM numbers.
Valuable when visualizing the distribution of a given dataset in
the game keyspace.
"""
ds = ds.map(lambda row_key, cell: row_key)
# want 'g_0000001234_m_133' is '0000001234.133' and so forth
ds = ds.map(lambda x:
tf.strings.to_number(tf.strings.substr(x, 2, 10) +
'.' +
tf.strings.substr(x, 15, 3),
out_type=tf.float64))
return make_single_array(ds)
def _delete_rows(args):
"""Delete the given row keys from the given Bigtable.
The args are (BigtableSpec, row_keys), but are passed
as a single argument in order to work with
multiprocessing.Pool.map. This is also the reason why this is a
top-level function instead of a method.
"""
btspec, row_keys = args
bt_table = bigtable.Client(btspec.project).instance(
btspec.instance).table(btspec.table)
rows = [bt_table.row(k) for k in row_keys]
for r in rows:
r.delete()
bt_table.mutate_rows(rows)
return row_keys
class GameQueue:
"""Queue of games stored in a Cloud Bigtable.
The state of the table is stored in the `table_state`
row, which includes the columns `metadata:game_counter`.
"""
def __init__(self, project_name, instance_name, table_name):
"""Constructor.
Args:
project_name: string name of GCP project having table.
instance_name: string name of CBT instance in project.
table_name: string name of CBT table in instance.
"""
self.btspec = BigtableSpec(project_name, instance_name, table_name)
self.bt_table = bigtable.Client(
self.btspec.project, admin=True).instance(
self.btspec.instance).table(self.btspec.table)
self.tf_table = contrib_cloud.BigtableClient(
self.btspec.project,
self.btspec.instance).table(self.btspec.table)
def create(self):
"""Create the table underlying the queue.
Create the 'metadata' and 'tfexample' column families
and their properties.
"""
if self.bt_table.exists():
utils.dbg('Table already exists')
return
max_versions_rule = bigtable_column_family.MaxVersionsGCRule(1)
self.bt_table.create(column_families={
METADATA: max_versions_rule,
TFEXAMPLE: max_versions_rule})
@property
def latest_game_number(self):
"""Return the number of the next game to be written."""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, GAME_COUNTER, GAME_COUNTER))
if table_state is None:
return 0
return cbt_intvalue(table_state.cell_value(METADATA, GAME_COUNTER))
@latest_game_number.setter
def latest_game_number(self, latest):
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, GAME_COUNTER, int(latest))
table_state.commit()
def games_by_time(self, start_game, end_game):
"""Given a range of games, return the games sorted by time.
Returns [(time, game_number), ...]
The time will be a `datetime.datetime` and the game
number is the integer used as the basis of the row ID.
Note that when a cluster of self-play nodes are writing
concurrently, the game numbers may be out of order.
"""
move_count = b'move_count'
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(start_game),
ROWCOUNT_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, move_count, move_count))
def parse(r):
|
return sorted([parse(r) for r in rows], key=operator.itemgetter(0))
def delete_row_range(self, format_str, start_game, end_game):
"""Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
"""
row_keys = make_single_array(
self.tf_table.keys_by_range_dataset(
format_str.format(start_game),
format_str.format(end_game)))
row_keys = list(row_keys)
if not row_keys:
utils.dbg('No rows left for games %d..%d' % (
start_game, end_game))
return
utils.dbg('Deleting %d rows: %s..%s' % (
len(row_keys), row_keys[0], row_keys[-1]))
# Reverse the keys so that the queue is left in a more
# sensible end state if you change your mind (say, due to a
# mistake in the timestamp) and abort the process: there will
# be a bit trimmed from the end, rather than a bit
# trimmed out of the middle.
row_keys.reverse()
total_keys = len(row_keys)
utils.dbg('Deleting total of %d keys' % total_keys)
concurrency = min(MAX_BT_CONCURRENCY,
multiprocessing.cpu_count() * 2)
with multiprocessing.Pool(processes=concurrency) as pool:
batches = []
with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:
for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS,
row_keys):
pbar.update(len(b))
batches.append((self.btspec, b))
if len(batches) >= concurrency:
pool.map(_delete_rows, batches)
batches = []
pool.map(_delete_rows, batches)
batches = []
def trim_games_since(self, t, max_games=500000):
"""Trim off the games since the given time.
Search back no more than max_games for this time point, locate
the game there, and remove all games since that game,
resetting the latest game counter.
If `t` is a `datetime.timedelta`, then the target time will be
found by subtracting that delta from the time of the last
game. Otherwise, it will be the target time.
"""
latest = self.latest_game_number
earliest = int(latest - max_games)
gbt = self.games_by_time(earliest, latest)
if not gbt:
utils.dbg('No games between %d and %d' % (earliest, latest))
return
most_recent = gbt[-1]
if isinstance(t, datetime.timedelta):
target = most_recent[0] - t
else:
target = t
i = bisect.bisect_right(gbt, (target,))
if i >= len(gbt):
utils.dbg('Last game is already at %s' % gbt[-1][0])
return
when, which = gbt[i]
utils.dbg('Most recent: %s %s' % most_recent)
utils.dbg(' Target: %s %s' % (when, which))
which = int(which)
self.delete_row_range(ROW_PREFIX, which, latest)
self.delete_row_range(ROWCOUNT_PREFIX, which, latest)
self.latest_game_number = which
def bleakest_moves(self, start_game, end_game):
"""Given a range of games, return the bleakest moves.
Returns a list of (game, move, q) sorted by q.
"""
bleak = b'bleakest_q'
rows = self.bt_table.read_rows(
ROW_PREFIX.format(start_game),
ROW_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, bleak, bleak))
def parse(r):
rk = str(r.row_key, 'utf-8')
g, m = _game_row_key.match(rk).groups()
q = r.cell_value(METADATA, bleak)
return int(g), int(m), float(q)
return sorted([parse(r) for r in rows], key=operator.itemgetter(2))
def require_fresh_games(self, number_fresh):
"""Require a given number of fresh games to be played.
Args:
number_fresh: integer, number of new fresh games needed
Increments the cell `table_state=metadata:wait_for_game_number`
by the given number of games. This will cause
`self.wait_for_fresh_games()` to block until the game
counter has reached this number.
"""
latest = self.latest_game_number
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh))
table_state.commit()
print("== Setting wait cell to ", int(latest + number_fresh), flush=True)
def wait_for_fresh_games(self, poll_interval=15.0):
"""Block caller until required new games have been played.
Args:
poll_interval: number of seconds to wait between checks
If the cell `table_state=metadata:wait_for_game_number` exists,
then block the caller, checking every `poll_interval` seconds,
until `table_state=metadata:game_counter is at least the value
in that cell.
"""
wait_until_game = self.read_wait_cell()
if not wait_until_game:
return
latest_game = self.latest_game_number
last_latest = latest_game
while latest_game < wait_until_game:
utils.dbg('Latest game {} not yet at required game {} '
'(+{}, {:0.3f} games/sec)'.format(
latest_game,
wait_until_game,
latest_game - last_latest,
(latest_game - last_latest) / poll_interval
))
time.sleep(poll_interval)
last_latest = latest_game
latest_game = self.latest_game_number
def read_wait_cell(self):
"""Read the value of the cell holding the 'wait' value,
Returns the int value of whatever it has, or None if the cell doesn't
exist.
"""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, WAIT_CELL, WAIT_CELL))
if table_state is None:
utils.dbg('No waiting for new games needed; '
'wait_for_game_number column not in table_state')
return None
value = table_state.cell_value(METADATA, WAIT_CELL)
if not value:
utils.dbg('No waiting for new games needed; '
'no value in wait_for_game_number cell '
'in table_state')
return None
return cbt_intvalue(value)
def count_moves_in_game_range(self, game_begin, game_end):
"""Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary.
"""
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(game_begin),
ROWCOUNT_PREFIX.format(game_end),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, MOVE_COUNT, MOVE_COUNT))
return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows])
def moves_from_games(self, start_game, end_game, moves, shuffle,
column_family, column):
"""Dataset of samples and/or shuffled moves from game range.
Args:
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
start_row = ROW_PREFIX.format(start_game)
end_row = ROW_PREFIX.format(end_game)
# NOTE: Choose a probability high enough to guarantee at least the
# required number of moves, by using a slightly lower estimate
# of the total moves, then trimming the result.
total_moves = self.count_moves_in_game_range(start_game, end_game)
probability = moves / (total_moves * 0.99)
utils.dbg('Row range: %s - %s; total moves: %d; probability %.3f; moves %d' % (
start_row, end_row, total_moves, probability, moves))
ds = self.tf_table.parallel_scan_range(start_row, end_row,
probability=probability,
columns=[(column_family, column)])
if shuffle:
utils.dbg('Doing a complete shuffle of %d moves' % moves)
ds = ds.shuffle(moves)
ds = ds.take(moves)
return ds
def moves_from_last_n_games(self, n, moves, shuffle,
column_family, column):
"""Randomly choose a given number of moves from the last n games.
Args:
n: number of games at the end of this GameQueue to source.
moves: number of moves to be sampled from `n` games.
shuffle: if True, shuffle the selected moves.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
Returns:
a dataset containing the selected moves.
"""
self.wait_for_fresh_games()
latest_game = self.latest_game_number
utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game))
if latest_game == 0:
raise ValueError('Cannot find a latest game in the table')
start = int(max(0, latest_game - n))
ds = self.moves_from_games(start, latest_game, moves, shuffle,
column_family, column)
return ds
def _write_move_counts(self, sess, h):
"""Add move counts from the given histogram to the table.
Used to update the move counts in an existing table. Should
not be needed except for backfill or repair.
Args:
sess: TF session to use for doing a Bigtable write.
tf_table: TF Cloud Bigtable to use for writing.
h: a dictionary keyed by game row prefix ("g_0023561") whose values
are the move counts for each game.
"""
def gen():
for k, v in h.items():
# The keys in the histogram may be of type 'bytes'
k = str(k, 'utf-8')
vs = str(v)
yield (k.replace('g_', 'ct_') + '_%d' % v, vs)
yield (k + '_m_000', vs)
mc = tf.data.Dataset.from_generator(gen, (tf.string, tf.string))
wr_op = self.tf_table.write(mc,
column_families=[METADATA],
columns=[MOVE_COUNT])
sess.run(wr_op)
def update_move_counts(self, start_game, end_game, interval=1000):
"""Used to update the move_count cell for older games.
Should not be needed except for backfill or repair.
move_count cells will be updated in both g_<game_id>_m_000 rows
and ct_<game_id>_<move_count> rows.
"""
for g in range(start_game, end_game, interval):
with tf.Session() as sess:
start_row = ROW_PREFIX.format(g)
end_row = ROW_PREFIX.format(g + interval)
print('Range:', start_row, end_row)
start_time = time.time()
ds = self.tf_table.keys_by_range_dataset(start_row, end_row)
h = _histogram_move_keys_by_game(sess, ds)
self._write_move_counts(sess, h)
end_time = time.time()
elapsed = end_time - start_time
print(' games/sec:', len(h)/elapsed)
def set_fresh_watermark(game_queue, count_from, window_size,
fresh_fraction=0.05, minimum_fresh=20000):
"""Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
"""
already_played = game_queue.latest_game_number - count_from
print("== already_played: ", already_played, flush=True)
if window_size > count_from: # How to handle the case when the window is not yet 'full'
game_queue.require_fresh_games(int(minimum_fresh * .9))
else:
num_to_play = max(0, math.ceil(window_size * .9 * fresh_fraction) - already_played)
print("== Num to play: ", num_to_play, flush=True)
game_queue.require_fresh_games(num_to_play)
def mix_by_decile(games, moves, deciles=9):
"""Compute a mix of regular and calibration games by decile.
deciles should be an integer between 0 and 10 inclusive.
"""
assert 0 <= deciles <= 10
# The prefixes and suffixes below have the following meanings:
# ct_: count
# fr_: fraction
# _r: resign (ordinary)
# _nr: no-resign
ct_total = 10
lesser = ct_total - math.floor(deciles)
greater = ct_total - lesser
ct_r, ct_nr = greater, lesser
fr_r = ct_r / ct_total
fr_nr = ct_nr / ct_total
games_r = math.ceil(games * fr_r)
moves_r = math.ceil(moves * fr_r)
games_c = math.floor(games * fr_nr)
moves_c = math.floor(moves * fr_nr)
selection = np.array([0] * ct_r + [1] * ct_nr, dtype=np.int64)
return GameMix(games_r, moves_r,
games_c, moves_c,
selection)
def get_unparsed_moves_from_last_n_games(games, games_nr, n,
moves=2**21,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from the last N games.
Args:
games, games_nr: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
mix = mix_by_decile(n, moves, 9)
resign = games.moves_from_last_n_games(
mix.games_r,
mix.moves_r,
shuffle,
column_family, column)
no_resign = games_nr.moves_from_last_n_games(
mix.games_c,
mix.moves_c,
shuffle,
column_family, column)
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, no_resign], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def get_unparsed_moves_from_games(games_r, games_c,
start_r, start_c,
mix,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from a given start point.
Args:
games_r, games_c: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
start_r: an integer indicating the game number to start at in games_r.
start_c: an integer indicating the game number to start at in games_c.
mix: the result of mix_by_decile()
shuffle: if True, shuffle the selected move examples.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than the moves implied by `mix`,
sampled randomly from the game ranges implied.
"""
resign = games_r.moves_from_games(
start_r, start_r + mix.games_r, mix.moves_r, shuffle, column_family, column)
calibrated = games_c.moves_from_games(
start_c, start_c + mix.games_c, mix.moves_c, shuffle, column_family, column)
moves = mix.moves_r + mix.moves_c
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, calibrated], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def count_elements_in_dataset(ds, batch_size=1*1024, parallel_batch=8):
"""Count and return all the elements in the given dataset.
Debugging function. The elements in a dataset cannot be counted
without enumerating all of them. By counting in batch and in
parallel, this method allows rapid traversal of the dataset.
Args:
ds: The dataset whose elements should be counted.
batch_size: the number of elements to count a a time.
parallel_batch: how many batches to count in parallel.
Returns:
The number of elements in the dataset.
"""
with tf.Session() as sess:
dsc = ds.apply(tf.data.experimental.enumerate_dataset())
dsc = dsc.apply(tf.data.experimental.map_and_batch(
lambda c, v: c, batch_size, num_parallel_batches=parallel_batch))
iterator = dsc.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
counted = 0
try:
while True:
# The numbers in the tensors are 0-based indicies,
# so add 1 to get the number counted.
counted = sess.run(tf.reduce_max(get_next)) + 1
utils.dbg('Counted so far: %d' % counted)
except tf.errors.OutOfRangeError:
pass
utils.dbg('Counted total: %d' % counted)
return counted
|
rk = str(r.row_key, 'utf-8')
game = _game_from_counter.match(rk).groups()[0]
return (r.cells[METADATA][move_count][0].timestamp, game)
|
identifier_body
|
bigtable_input.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read Minigo game examples from a Bigtable.
"""
import bisect
import collections
import datetime
import math
import multiprocessing
import operator
import re
import struct
import time
import numpy as np
from tqdm import tqdm
from absl import flags
from google.cloud import bigtable
from google.cloud.bigtable import row_filters as bigtable_row_filters
from google.cloud.bigtable import column_family as bigtable_column_family
import tensorflow as tf
from tensorflow.contrib import cloud as contrib_cloud
import utils
flags.DEFINE_string('cbt_project', None,
'The project used to connect to the cloud bigtable ')
# cbt_instance: identifier of Cloud Bigtable instance in cbt_project.
flags.DEFINE_string('cbt_instance', None,
'The identifier of the cloud bigtable instance in cbt_project')
# cbt_table: identifier of Cloud Bigtable table in cbt_instance.
# The cbt_table is expected to be accompanied by one with an "-nr"
# suffix, for "no-resign".
flags.DEFINE_string('cbt_table', None,
'The table within the cloud bigtable instance to use')
FLAGS = flags.FLAGS
# Constants
ROW_PREFIX = 'g_{:0>10}_'
ROWCOUNT_PREFIX = 'ct_{:0>10}_'
# Model tabels (models, models_for_eval) row key
MODEL_PREFIX = "m_{run}_{num:0>10}"
# Name of model
MODEL_NAME = b'model'
# Maximum number of concurrent processes to use when issuing requests against
# Bigtable. Value taken from default in the load-testing tool described here:
#
# https://github.com/googleapis/google-cloud-go/blob/master/bigtable/cmd/loadtest/loadtest.go
MAX_BT_CONCURRENCY = 100
# Column family and qualifier constants.
# Column Families
METADATA = 'metadata'
TFEXAMPLE = 'tfexample'
# Column Qualifiers
# Note that in CBT, families are strings and qualifiers are bytes.
TABLE_STATE = b'table_state'
WAIT_CELL = b'wait_for_game_number'
GAME_COUNTER = b'game_counter'
MOVE_COUNT = b'move_count'
# Patterns
_game_row_key = re.compile(r'g_(\d+)_m_(\d+)')
_game_from_counter = re.compile(r'ct_(\d+)_')
# The string information needed to construct a client of a Bigtable table.
BigtableSpec = collections.namedtuple(
'BigtableSpec',
['project', 'instance', 'table'])
# Information needed to create a mix of two Game queues.
# r = resign/regular; c = calibration (no-resign)
GameMix = collections.namedtuple(
'GameMix',
['games_r', 'moves_r',
'games_c', 'moves_c',
'selection'])
def cbt_intvalue(value):
"""Decode a big-endian uint64.
Cloud Bigtable stores integers as big-endian uint64,
and performs this translation when integers are being
set. But when being read, the values need to be
decoded.
"""
return int(struct.unpack('>q', value)[0])
def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype)
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
"""Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
"""
ds = ds.batch(batch_size)
# Turns 'g_0000001234_m_133' into 'g_0000001234'
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
# NOTE: Cannot be truly sure the count is right till the end.
return h
def _game_keys_as_array(ds):
"""Turn keys of a Bigtable dataset into an array.
Take g_GGG_m_MMM and create GGG.MMM numbers.
Valuable when visualizing the distribution of a given dataset in
the game keyspace.
"""
ds = ds.map(lambda row_key, cell: row_key)
# want 'g_0000001234_m_133' is '0000001234.133' and so forth
ds = ds.map(lambda x:
tf.strings.to_number(tf.strings.substr(x, 2, 10) +
'.' +
tf.strings.substr(x, 15, 3),
out_type=tf.float64))
return make_single_array(ds)
def _delete_rows(args):
"""Delete the given row keys from the given Bigtable.
The args are (BigtableSpec, row_keys), but are passed
as a single argument in order to work with
multiprocessing.Pool.map. This is also the reason why this is a
top-level function instead of a method.
"""
btspec, row_keys = args
bt_table = bigtable.Client(btspec.project).instance(
btspec.instance).table(btspec.table)
rows = [bt_table.row(k) for k in row_keys]
for r in rows:
r.delete()
bt_table.mutate_rows(rows)
return row_keys
class GameQueue:
"""Queue of games stored in a Cloud Bigtable.
The state of the table is stored in the `table_state`
row, which includes the columns `metadata:game_counter`.
"""
def __init__(self, project_name, instance_name, table_name):
"""Constructor.
Args:
project_name: string name of GCP project having table.
instance_name: string name of CBT instance in project.
table_name: string name of CBT table in instance.
"""
self.btspec = BigtableSpec(project_name, instance_name, table_name)
self.bt_table = bigtable.Client(
self.btspec.project, admin=True).instance(
self.btspec.instance).table(self.btspec.table)
self.tf_table = contrib_cloud.BigtableClient(
self.btspec.project,
self.btspec.instance).table(self.btspec.table)
def create(self):
"""Create the table underlying the queue.
Create the 'metadata' and 'tfexample' column families
and their properties.
"""
if self.bt_table.exists():
utils.dbg('Table already exists')
return
max_versions_rule = bigtable_column_family.MaxVersionsGCRule(1)
self.bt_table.create(column_families={
METADATA: max_versions_rule,
TFEXAMPLE: max_versions_rule})
@property
def latest_game_number(self):
"""Return the number of the next game to be written."""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, GAME_COUNTER, GAME_COUNTER))
if table_state is None:
return 0
return cbt_intvalue(table_state.cell_value(METADATA, GAME_COUNTER))
@latest_game_number.setter
def latest_game_number(self, latest):
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, GAME_COUNTER, int(latest))
table_state.commit()
def games_by_time(self, start_game, end_game):
"""Given a range of games, return the games sorted by time.
Returns [(time, game_number), ...]
The time will be a `datetime.datetime` and the game
number is the integer used as the basis of the row ID.
Note that when a cluster of self-play nodes are writing
concurrently, the game numbers may be out of order.
"""
move_count = b'move_count'
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(start_game),
ROWCOUNT_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, move_count, move_count))
def parse(r):
rk = str(r.row_key, 'utf-8')
game = _game_from_counter.match(rk).groups()[0]
return (r.cells[METADATA][move_count][0].timestamp, game)
return sorted([parse(r) for r in rows], key=operator.itemgetter(0))
def delete_row_range(self, format_str, start_game, end_game):
"""Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
"""
row_keys = make_single_array(
self.tf_table.keys_by_range_dataset(
format_str.format(start_game),
format_str.format(end_game)))
row_keys = list(row_keys)
if not row_keys:
utils.dbg('No rows left for games %d..%d' % (
start_game, end_game))
return
utils.dbg('Deleting %d rows: %s..%s' % (
len(row_keys), row_keys[0], row_keys[-1]))
# Reverse the keys so that the queue is left in a more
# sensible end state if you change your mind (say, due to a
# mistake in the timestamp) and abort the process: there will
# be a bit trimmed from the end, rather than a bit
# trimmed out of the middle.
row_keys.reverse()
total_keys = len(row_keys)
utils.dbg('Deleting total of %d keys' % total_keys)
concurrency = min(MAX_BT_CONCURRENCY,
multiprocessing.cpu_count() * 2)
with multiprocessing.Pool(processes=concurrency) as pool:
batches = []
with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:
for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS,
row_keys):
pbar.update(len(b))
batches.append((self.btspec, b))
if len(batches) >= concurrency:
pool.map(_delete_rows, batches)
batches = []
pool.map(_delete_rows, batches)
batches = []
def trim_games_since(self, t, max_games=500000):
"""Trim off the games since the given time.
Search back no more than max_games for this time point, locate
the game there, and remove all games since that game,
resetting the latest game counter.
If `t` is a `datetime.timedelta`, then the target time will be
found by subtracting that delta from the time of the last
game. Otherwise, it will be the target time.
"""
latest = self.latest_game_number
earliest = int(latest - max_games)
gbt = self.games_by_time(earliest, latest)
if not gbt:
utils.dbg('No games between %d and %d' % (earliest, latest))
return
most_recent = gbt[-1]
if isinstance(t, datetime.timedelta):
target = most_recent[0] - t
else:
target = t
i = bisect.bisect_right(gbt, (target,))
if i >= len(gbt):
utils.dbg('Last game is already at %s' % gbt[-1][0])
return
when, which = gbt[i]
utils.dbg('Most recent: %s %s' % most_recent)
utils.dbg(' Target: %s %s' % (when, which))
which = int(which)
self.delete_row_range(ROW_PREFIX, which, latest)
self.delete_row_range(ROWCOUNT_PREFIX, which, latest)
self.latest_game_number = which
def bleakest_moves(self, start_game, end_game):
"""Given a range of games, return the bleakest moves.
Returns a list of (game, move, q) sorted by q.
"""
bleak = b'bleakest_q'
rows = self.bt_table.read_rows(
ROW_PREFIX.format(start_game),
ROW_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, bleak, bleak))
def parse(r):
rk = str(r.row_key, 'utf-8')
g, m = _game_row_key.match(rk).groups()
q = r.cell_value(METADATA, bleak)
return int(g), int(m), float(q)
return sorted([parse(r) for r in rows], key=operator.itemgetter(2))
def require_fresh_games(self, number_fresh):
"""Require a given number of fresh games to be played.
Args:
number_fresh: integer, number of new fresh games needed
Increments the cell `table_state=metadata:wait_for_game_number`
by the given number of games. This will cause
`self.wait_for_fresh_games()` to block until the game
counter has reached this number.
"""
latest = self.latest_game_number
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh))
table_state.commit()
print("== Setting wait cell to ", int(latest + number_fresh), flush=True)
def wait_for_fresh_games(self, poll_interval=15.0):
"""Block caller until required new games have been played.
Args:
poll_interval: number of seconds to wait between checks
If the cell `table_state=metadata:wait_for_game_number` exists,
then block the caller, checking every `poll_interval` seconds,
until `table_state=metadata:game_counter is at least the value
in that cell.
"""
wait_until_game = self.read_wait_cell()
if not wait_until_game:
return
latest_game = self.latest_game_number
last_latest = latest_game
while latest_game < wait_until_game:
utils.dbg('Latest game {} not yet at required game {} '
'(+{}, {:0.3f} games/sec)'.format(
latest_game,
wait_until_game,
latest_game - last_latest,
(latest_game - last_latest) / poll_interval
))
time.sleep(poll_interval)
last_latest = latest_game
latest_game = self.latest_game_number
def read_wait_cell(self):
"""Read the value of the cell holding the 'wait' value,
Returns the int value of whatever it has, or None if the cell doesn't
exist.
"""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, WAIT_CELL, WAIT_CELL))
if table_state is None:
utils.dbg('No waiting for new games needed; '
'wait_for_game_number column not in table_state')
return None
value = table_state.cell_value(METADATA, WAIT_CELL)
if not value:
utils.dbg('No waiting for new games needed; '
'no value in wait_for_game_number cell '
'in table_state')
return None
return cbt_intvalue(value)
def count_moves_in_game_range(self, game_begin, game_end):
"""Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary.
"""
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(game_begin),
ROWCOUNT_PREFIX.format(game_end),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, MOVE_COUNT, MOVE_COUNT))
return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows])
def moves_from_games(self, start_game, end_game, moves, shuffle,
column_family, column):
"""Dataset of samples and/or shuffled moves from game range.
Args:
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
start_row = ROW_PREFIX.format(start_game)
end_row = ROW_PREFIX.format(end_game)
# NOTE: Choose a probability high enough to guarantee at least the
# required number of moves, by using a slightly lower estimate
# of the total moves, then trimming the result.
total_moves = self.count_moves_in_game_range(start_game, end_game)
probability = moves / (total_moves * 0.99)
utils.dbg('Row range: %s - %s; total moves: %d; probability %.3f; moves %d' % (
start_row, end_row, total_moves, probability, moves))
ds = self.tf_table.parallel_scan_range(start_row, end_row,
probability=probability,
columns=[(column_family, column)])
if shuffle:
|
ds = ds.take(moves)
return ds
def moves_from_last_n_games(self, n, moves, shuffle,
column_family, column):
"""Randomly choose a given number of moves from the last n games.
Args:
n: number of games at the end of this GameQueue to source.
moves: number of moves to be sampled from `n` games.
shuffle: if True, shuffle the selected moves.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
Returns:
a dataset containing the selected moves.
"""
self.wait_for_fresh_games()
latest_game = self.latest_game_number
utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game))
if latest_game == 0:
raise ValueError('Cannot find a latest game in the table')
start = int(max(0, latest_game - n))
ds = self.moves_from_games(start, latest_game, moves, shuffle,
column_family, column)
return ds
def _write_move_counts(self, sess, h):
"""Add move counts from the given histogram to the table.
Used to update the move counts in an existing table. Should
not be needed except for backfill or repair.
Args:
sess: TF session to use for doing a Bigtable write.
tf_table: TF Cloud Bigtable to use for writing.
h: a dictionary keyed by game row prefix ("g_0023561") whose values
are the move counts for each game.
"""
def gen():
for k, v in h.items():
# The keys in the histogram may be of type 'bytes'
k = str(k, 'utf-8')
vs = str(v)
yield (k.replace('g_', 'ct_') + '_%d' % v, vs)
yield (k + '_m_000', vs)
mc = tf.data.Dataset.from_generator(gen, (tf.string, tf.string))
wr_op = self.tf_table.write(mc,
column_families=[METADATA],
columns=[MOVE_COUNT])
sess.run(wr_op)
def update_move_counts(self, start_game, end_game, interval=1000):
"""Used to update the move_count cell for older games.
Should not be needed except for backfill or repair.
move_count cells will be updated in both g_<game_id>_m_000 rows
and ct_<game_id>_<move_count> rows.
"""
for g in range(start_game, end_game, interval):
with tf.Session() as sess:
start_row = ROW_PREFIX.format(g)
end_row = ROW_PREFIX.format(g + interval)
print('Range:', start_row, end_row)
start_time = time.time()
ds = self.tf_table.keys_by_range_dataset(start_row, end_row)
h = _histogram_move_keys_by_game(sess, ds)
self._write_move_counts(sess, h)
end_time = time.time()
elapsed = end_time - start_time
print(' games/sec:', len(h)/elapsed)
def set_fresh_watermark(game_queue, count_from, window_size,
fresh_fraction=0.05, minimum_fresh=20000):
"""Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
"""
already_played = game_queue.latest_game_number - count_from
print("== already_played: ", already_played, flush=True)
if window_size > count_from: # How to handle the case when the window is not yet 'full'
game_queue.require_fresh_games(int(minimum_fresh * .9))
else:
num_to_play = max(0, math.ceil(window_size * .9 * fresh_fraction) - already_played)
print("== Num to play: ", num_to_play, flush=True)
game_queue.require_fresh_games(num_to_play)
def mix_by_decile(games, moves, deciles=9):
"""Compute a mix of regular and calibration games by decile.
deciles should be an integer between 0 and 10 inclusive.
"""
assert 0 <= deciles <= 10
# The prefixes and suffixes below have the following meanings:
# ct_: count
# fr_: fraction
# _r: resign (ordinary)
# _nr: no-resign
ct_total = 10
lesser = ct_total - math.floor(deciles)
greater = ct_total - lesser
ct_r, ct_nr = greater, lesser
fr_r = ct_r / ct_total
fr_nr = ct_nr / ct_total
games_r = math.ceil(games * fr_r)
moves_r = math.ceil(moves * fr_r)
games_c = math.floor(games * fr_nr)
moves_c = math.floor(moves * fr_nr)
selection = np.array([0] * ct_r + [1] * ct_nr, dtype=np.int64)
return GameMix(games_r, moves_r,
games_c, moves_c,
selection)
def get_unparsed_moves_from_last_n_games(games, games_nr, n,
moves=2**21,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from the last N games.
Args:
games, games_nr: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
mix = mix_by_decile(n, moves, 9)
resign = games.moves_from_last_n_games(
mix.games_r,
mix.moves_r,
shuffle,
column_family, column)
no_resign = games_nr.moves_from_last_n_games(
mix.games_c,
mix.moves_c,
shuffle,
column_family, column)
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, no_resign], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def get_unparsed_moves_from_games(games_r, games_c,
start_r, start_c,
mix,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from a given start point.
Args:
games_r, games_c: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
start_r: an integer indicating the game number to start at in games_r.
start_c: an integer indicating the game number to start at in games_c.
mix: the result of mix_by_decile()
shuffle: if True, shuffle the selected move examples.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than the moves implied by `mix`,
sampled randomly from the game ranges implied.
"""
resign = games_r.moves_from_games(
start_r, start_r + mix.games_r, mix.moves_r, shuffle, column_family, column)
calibrated = games_c.moves_from_games(
start_c, start_c + mix.games_c, mix.moves_c, shuffle, column_family, column)
moves = mix.moves_r + mix.moves_c
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, calibrated], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def count_elements_in_dataset(ds, batch_size=1*1024, parallel_batch=8):
"""Count and return all the elements in the given dataset.
Debugging function. The elements in a dataset cannot be counted
without enumerating all of them. By counting in batch and in
parallel, this method allows rapid traversal of the dataset.
Args:
ds: The dataset whose elements should be counted.
batch_size: the number of elements to count a a time.
parallel_batch: how many batches to count in parallel.
Returns:
The number of elements in the dataset.
"""
with tf.Session() as sess:
dsc = ds.apply(tf.data.experimental.enumerate_dataset())
dsc = dsc.apply(tf.data.experimental.map_and_batch(
lambda c, v: c, batch_size, num_parallel_batches=parallel_batch))
iterator = dsc.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
counted = 0
try:
while True:
# The numbers in the tensors are 0-based indicies,
# so add 1 to get the number counted.
counted = sess.run(tf.reduce_max(get_next)) + 1
utils.dbg('Counted so far: %d' % counted)
except tf.errors.OutOfRangeError:
pass
utils.dbg('Counted total: %d' % counted)
return counted
|
utils.dbg('Doing a complete shuffle of %d moves' % moves)
ds = ds.shuffle(moves)
|
conditional_block
|
bigtable_input.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read Minigo game examples from a Bigtable.
"""
import bisect
import collections
import datetime
import math
import multiprocessing
import operator
import re
import struct
import time
import numpy as np
from tqdm import tqdm
from absl import flags
from google.cloud import bigtable
from google.cloud.bigtable import row_filters as bigtable_row_filters
from google.cloud.bigtable import column_family as bigtable_column_family
import tensorflow as tf
from tensorflow.contrib import cloud as contrib_cloud
import utils
flags.DEFINE_string('cbt_project', None,
'The project used to connect to the cloud bigtable ')
# cbt_instance: identifier of Cloud Bigtable instance in cbt_project.
flags.DEFINE_string('cbt_instance', None,
'The identifier of the cloud bigtable instance in cbt_project')
# cbt_table: identifier of Cloud Bigtable table in cbt_instance.
# The cbt_table is expected to be accompanied by one with an "-nr"
# suffix, for "no-resign".
flags.DEFINE_string('cbt_table', None,
'The table within the cloud bigtable instance to use')
FLAGS = flags.FLAGS
# Constants
ROW_PREFIX = 'g_{:0>10}_'
ROWCOUNT_PREFIX = 'ct_{:0>10}_'
# Model tabels (models, models_for_eval) row key
MODEL_PREFIX = "m_{run}_{num:0>10}"
# Name of model
MODEL_NAME = b'model'
# Maximum number of concurrent processes to use when issuing requests against
# Bigtable. Value taken from default in the load-testing tool described here:
#
# https://github.com/googleapis/google-cloud-go/blob/master/bigtable/cmd/loadtest/loadtest.go
MAX_BT_CONCURRENCY = 100
# Column family and qualifier constants.
# Column Families
METADATA = 'metadata'
TFEXAMPLE = 'tfexample'
# Column Qualifiers
# Note that in CBT, families are strings and qualifiers are bytes.
TABLE_STATE = b'table_state'
WAIT_CELL = b'wait_for_game_number'
GAME_COUNTER = b'game_counter'
MOVE_COUNT = b'move_count'
# Patterns
_game_row_key = re.compile(r'g_(\d+)_m_(\d+)')
_game_from_counter = re.compile(r'ct_(\d+)_')
# The string information needed to construct a client of a Bigtable table.
BigtableSpec = collections.namedtuple(
'BigtableSpec',
['project', 'instance', 'table'])
# Information needed to create a mix of two Game queues.
# r = resign/regular; c = calibration (no-resign)
GameMix = collections.namedtuple(
'GameMix',
['games_r', 'moves_r',
'games_c', 'moves_c',
'selection'])
def cbt_intvalue(value):
"""Decode a big-endian uint64.
Cloud Bigtable stores integers as big-endian uint64,
and performs this translation when integers are being
set. But when being read, the values need to be
decoded.
"""
return int(struct.unpack('>q', value)[0])
def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype)
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
"""Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
"""
ds = ds.batch(batch_size)
# Turns 'g_0000001234_m_133' into 'g_0000001234'
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
# NOTE: Cannot be truly sure the count is right till the end.
return h
def _game_keys_as_array(ds):
"""Turn keys of a Bigtable dataset into an array.
Take g_GGG_m_MMM and create GGG.MMM numbers.
Valuable when visualizing the distribution of a given dataset in
the game keyspace.
"""
ds = ds.map(lambda row_key, cell: row_key)
# want 'g_0000001234_m_133' is '0000001234.133' and so forth
ds = ds.map(lambda x:
tf.strings.to_number(tf.strings.substr(x, 2, 10) +
'.' +
tf.strings.substr(x, 15, 3),
out_type=tf.float64))
return make_single_array(ds)
def _delete_rows(args):
"""Delete the given row keys from the given Bigtable.
The args are (BigtableSpec, row_keys), but are passed
as a single argument in order to work with
multiprocessing.Pool.map. This is also the reason why this is a
top-level function instead of a method.
"""
btspec, row_keys = args
bt_table = bigtable.Client(btspec.project).instance(
btspec.instance).table(btspec.table)
rows = [bt_table.row(k) for k in row_keys]
for r in rows:
r.delete()
bt_table.mutate_rows(rows)
return row_keys
class GameQueue:
"""Queue of games stored in a Cloud Bigtable.
The state of the table is stored in the `table_state`
row, which includes the columns `metadata:game_counter`.
"""
def __init__(self, project_name, instance_name, table_name):
"""Constructor.
Args:
project_name: string name of GCP project having table.
instance_name: string name of CBT instance in project.
table_name: string name of CBT table in instance.
"""
self.btspec = BigtableSpec(project_name, instance_name, table_name)
self.bt_table = bigtable.Client(
self.btspec.project, admin=True).instance(
self.btspec.instance).table(self.btspec.table)
self.tf_table = contrib_cloud.BigtableClient(
self.btspec.project,
self.btspec.instance).table(self.btspec.table)
def create(self):
"""Create the table underlying the queue.
Create the 'metadata' and 'tfexample' column families
and their properties.
"""
if self.bt_table.exists():
utils.dbg('Table already exists')
return
max_versions_rule = bigtable_column_family.MaxVersionsGCRule(1)
self.bt_table.create(column_families={
METADATA: max_versions_rule,
TFEXAMPLE: max_versions_rule})
@property
def latest_game_number(self):
"""Return the number of the next game to be written."""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, GAME_COUNTER, GAME_COUNTER))
if table_state is None:
return 0
return cbt_intvalue(table_state.cell_value(METADATA, GAME_COUNTER))
@latest_game_number.setter
def latest_game_number(self, latest):
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, GAME_COUNTER, int(latest))
table_state.commit()
def games_by_time(self, start_game, end_game):
"""Given a range of games, return the games sorted by time.
Returns [(time, game_number), ...]
The time will be a `datetime.datetime` and the game
number is the integer used as the basis of the row ID.
Note that when a cluster of self-play nodes are writing
concurrently, the game numbers may be out of order.
"""
move_count = b'move_count'
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(start_game),
ROWCOUNT_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, move_count, move_count))
def parse(r):
rk = str(r.row_key, 'utf-8')
game = _game_from_counter.match(rk).groups()[0]
return (r.cells[METADATA][move_count][0].timestamp, game)
return sorted([parse(r) for r in rows], key=operator.itemgetter(0))
def delete_row_range(self, format_str, start_game, end_game):
"""Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
"""
row_keys = make_single_array(
self.tf_table.keys_by_range_dataset(
format_str.format(start_game),
format_str.format(end_game)))
row_keys = list(row_keys)
if not row_keys:
utils.dbg('No rows left for games %d..%d' % (
start_game, end_game))
return
utils.dbg('Deleting %d rows: %s..%s' % (
len(row_keys), row_keys[0], row_keys[-1]))
# Reverse the keys so that the queue is left in a more
# sensible end state if you change your mind (say, due to a
# mistake in the timestamp) and abort the process: there will
# be a bit trimmed from the end, rather than a bit
# trimmed out of the middle.
row_keys.reverse()
total_keys = len(row_keys)
utils.dbg('Deleting total of %d keys' % total_keys)
concurrency = min(MAX_BT_CONCURRENCY,
multiprocessing.cpu_count() * 2)
with multiprocessing.Pool(processes=concurrency) as pool:
batches = []
with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:
for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS,
row_keys):
pbar.update(len(b))
batches.append((self.btspec, b))
if len(batches) >= concurrency:
pool.map(_delete_rows, batches)
batches = []
pool.map(_delete_rows, batches)
batches = []
def trim_games_since(self, t, max_games=500000):
"""Trim off the games since the given time.
Search back no more than max_games for this time point, locate
the game there, and remove all games since that game,
resetting the latest game counter.
If `t` is a `datetime.timedelta`, then the target time will be
found by subtracting that delta from the time of the last
game. Otherwise, it will be the target time.
"""
latest = self.latest_game_number
earliest = int(latest - max_games)
gbt = self.games_by_time(earliest, latest)
if not gbt:
utils.dbg('No games between %d and %d' % (earliest, latest))
return
most_recent = gbt[-1]
if isinstance(t, datetime.timedelta):
target = most_recent[0] - t
else:
target = t
i = bisect.bisect_right(gbt, (target,))
if i >= len(gbt):
utils.dbg('Last game is already at %s' % gbt[-1][0])
return
when, which = gbt[i]
utils.dbg('Most recent: %s %s' % most_recent)
utils.dbg(' Target: %s %s' % (when, which))
which = int(which)
self.delete_row_range(ROW_PREFIX, which, latest)
self.delete_row_range(ROWCOUNT_PREFIX, which, latest)
self.latest_game_number = which
def bleakest_moves(self, start_game, end_game):
"""Given a range of games, return the bleakest moves.
Returns a list of (game, move, q) sorted by q.
"""
bleak = b'bleakest_q'
rows = self.bt_table.read_rows(
ROW_PREFIX.format(start_game),
ROW_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, bleak, bleak))
def parse(r):
rk = str(r.row_key, 'utf-8')
g, m = _game_row_key.match(rk).groups()
q = r.cell_value(METADATA, bleak)
return int(g), int(m), float(q)
return sorted([parse(r) for r in rows], key=operator.itemgetter(2))
def require_fresh_games(self, number_fresh):
"""Require a given number of fresh games to be played.
Args:
number_fresh: integer, number of new fresh games needed
Increments the cell `table_state=metadata:wait_for_game_number`
by the given number of games. This will cause
`self.wait_for_fresh_games()` to block until the game
counter has reached this number.
"""
latest = self.latest_game_number
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh))
table_state.commit()
print("== Setting wait cell to ", int(latest + number_fresh), flush=True)
def wait_for_fresh_games(self, poll_interval=15.0):
"""Block caller until required new games have been played.
Args:
poll_interval: number of seconds to wait between checks
If the cell `table_state=metadata:wait_for_game_number` exists,
then block the caller, checking every `poll_interval` seconds,
until `table_state=metadata:game_counter is at least the value
in that cell.
"""
wait_until_game = self.read_wait_cell()
if not wait_until_game:
return
latest_game = self.latest_game_number
|
while latest_game < wait_until_game:
utils.dbg('Latest game {} not yet at required game {} '
'(+{}, {:0.3f} games/sec)'.format(
latest_game,
wait_until_game,
latest_game - last_latest,
(latest_game - last_latest) / poll_interval
))
time.sleep(poll_interval)
last_latest = latest_game
latest_game = self.latest_game_number
def read_wait_cell(self):
"""Read the value of the cell holding the 'wait' value,
Returns the int value of whatever it has, or None if the cell doesn't
exist.
"""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, WAIT_CELL, WAIT_CELL))
if table_state is None:
utils.dbg('No waiting for new games needed; '
'wait_for_game_number column not in table_state')
return None
value = table_state.cell_value(METADATA, WAIT_CELL)
if not value:
utils.dbg('No waiting for new games needed; '
'no value in wait_for_game_number cell '
'in table_state')
return None
return cbt_intvalue(value)
def count_moves_in_game_range(self, game_begin, game_end):
"""Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary.
"""
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(game_begin),
ROWCOUNT_PREFIX.format(game_end),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, MOVE_COUNT, MOVE_COUNT))
return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows])
def moves_from_games(self, start_game, end_game, moves, shuffle,
column_family, column):
"""Dataset of samples and/or shuffled moves from game range.
Args:
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
start_row = ROW_PREFIX.format(start_game)
end_row = ROW_PREFIX.format(end_game)
# NOTE: Choose a probability high enough to guarantee at least the
# required number of moves, by using a slightly lower estimate
# of the total moves, then trimming the result.
total_moves = self.count_moves_in_game_range(start_game, end_game)
probability = moves / (total_moves * 0.99)
utils.dbg('Row range: %s - %s; total moves: %d; probability %.3f; moves %d' % (
start_row, end_row, total_moves, probability, moves))
ds = self.tf_table.parallel_scan_range(start_row, end_row,
probability=probability,
columns=[(column_family, column)])
if shuffle:
utils.dbg('Doing a complete shuffle of %d moves' % moves)
ds = ds.shuffle(moves)
ds = ds.take(moves)
return ds
def moves_from_last_n_games(self, n, moves, shuffle,
column_family, column):
"""Randomly choose a given number of moves from the last n games.
Args:
n: number of games at the end of this GameQueue to source.
moves: number of moves to be sampled from `n` games.
shuffle: if True, shuffle the selected moves.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
Returns:
a dataset containing the selected moves.
"""
self.wait_for_fresh_games()
latest_game = self.latest_game_number
utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game))
if latest_game == 0:
raise ValueError('Cannot find a latest game in the table')
start = int(max(0, latest_game - n))
ds = self.moves_from_games(start, latest_game, moves, shuffle,
column_family, column)
return ds
def _write_move_counts(self, sess, h):
"""Add move counts from the given histogram to the table.
Used to update the move counts in an existing table. Should
not be needed except for backfill or repair.
Args:
sess: TF session to use for doing a Bigtable write.
tf_table: TF Cloud Bigtable to use for writing.
h: a dictionary keyed by game row prefix ("g_0023561") whose values
are the move counts for each game.
"""
def gen():
for k, v in h.items():
# The keys in the histogram may be of type 'bytes'
k = str(k, 'utf-8')
vs = str(v)
yield (k.replace('g_', 'ct_') + '_%d' % v, vs)
yield (k + '_m_000', vs)
mc = tf.data.Dataset.from_generator(gen, (tf.string, tf.string))
wr_op = self.tf_table.write(mc,
column_families=[METADATA],
columns=[MOVE_COUNT])
sess.run(wr_op)
def update_move_counts(self, start_game, end_game, interval=1000):
"""Used to update the move_count cell for older games.
Should not be needed except for backfill or repair.
move_count cells will be updated in both g_<game_id>_m_000 rows
and ct_<game_id>_<move_count> rows.
"""
for g in range(start_game, end_game, interval):
with tf.Session() as sess:
start_row = ROW_PREFIX.format(g)
end_row = ROW_PREFIX.format(g + interval)
print('Range:', start_row, end_row)
start_time = time.time()
ds = self.tf_table.keys_by_range_dataset(start_row, end_row)
h = _histogram_move_keys_by_game(sess, ds)
self._write_move_counts(sess, h)
end_time = time.time()
elapsed = end_time - start_time
print(' games/sec:', len(h)/elapsed)
def set_fresh_watermark(game_queue, count_from, window_size,
fresh_fraction=0.05, minimum_fresh=20000):
"""Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
"""
already_played = game_queue.latest_game_number - count_from
print("== already_played: ", already_played, flush=True)
if window_size > count_from: # How to handle the case when the window is not yet 'full'
game_queue.require_fresh_games(int(minimum_fresh * .9))
else:
num_to_play = max(0, math.ceil(window_size * .9 * fresh_fraction) - already_played)
print("== Num to play: ", num_to_play, flush=True)
game_queue.require_fresh_games(num_to_play)
def mix_by_decile(games, moves, deciles=9):
"""Compute a mix of regular and calibration games by decile.
deciles should be an integer between 0 and 10 inclusive.
"""
assert 0 <= deciles <= 10
# The prefixes and suffixes below have the following meanings:
# ct_: count
# fr_: fraction
# _r: resign (ordinary)
# _nr: no-resign
ct_total = 10
lesser = ct_total - math.floor(deciles)
greater = ct_total - lesser
ct_r, ct_nr = greater, lesser
fr_r = ct_r / ct_total
fr_nr = ct_nr / ct_total
games_r = math.ceil(games * fr_r)
moves_r = math.ceil(moves * fr_r)
games_c = math.floor(games * fr_nr)
moves_c = math.floor(moves * fr_nr)
selection = np.array([0] * ct_r + [1] * ct_nr, dtype=np.int64)
return GameMix(games_r, moves_r,
games_c, moves_c,
selection)
def get_unparsed_moves_from_last_n_games(games, games_nr, n,
moves=2**21,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from the last N games.
Args:
games, games_nr: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
n: an integer indicating how many past games should be sourced.
moves: an integer indicating how many moves should be sampled
from those N games.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
shuffle: if True, shuffle the selected move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than `moves` examples, sampled
randomly from the last `n` games in the table.
"""
mix = mix_by_decile(n, moves, 9)
resign = games.moves_from_last_n_games(
mix.games_r,
mix.moves_r,
shuffle,
column_family, column)
no_resign = games_nr.moves_from_last_n_games(
mix.games_c,
mix.moves_c,
shuffle,
column_family, column)
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, no_resign], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def get_unparsed_moves_from_games(games_r, games_c,
start_r, start_c,
mix,
shuffle=True,
column_family=TFEXAMPLE,
column='example',
values_only=True):
"""Get a dataset of serialized TFExamples from a given start point.
Args:
games_r, games_c: GameQueues of the regular selfplay and calibration
(aka 'no resign') games to sample from.
start_r: an integer indicating the game number to start at in games_r.
start_c: an integer indicating the game number to start at in games_c.
mix: the result of mix_by_decile()
shuffle: if True, shuffle the selected move examples.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
values_only: if True, return only column values, no row keys.
Returns:
A dataset containing no more than the moves implied by `mix`,
sampled randomly from the game ranges implied.
"""
resign = games_r.moves_from_games(
start_r, start_r + mix.games_r, mix.moves_r, shuffle, column_family, column)
calibrated = games_c.moves_from_games(
start_c, start_c + mix.games_c, mix.moves_c, shuffle, column_family, column)
moves = mix.moves_r + mix.moves_c
choice = tf.data.Dataset.from_tensor_slices(mix.selection).repeat().take(moves)
ds = tf.data.experimental.choose_from_datasets([resign, calibrated], choice)
if shuffle:
ds = ds.shuffle(len(mix.selection) * 2)
if values_only:
ds = ds.map(lambda row_name, s: s)
return ds
def count_elements_in_dataset(ds, batch_size=1*1024, parallel_batch=8):
"""Count and return all the elements in the given dataset.
Debugging function. The elements in a dataset cannot be counted
without enumerating all of them. By counting in batch and in
parallel, this method allows rapid traversal of the dataset.
Args:
ds: The dataset whose elements should be counted.
batch_size: the number of elements to count a a time.
parallel_batch: how many batches to count in parallel.
Returns:
The number of elements in the dataset.
"""
with tf.Session() as sess:
dsc = ds.apply(tf.data.experimental.enumerate_dataset())
dsc = dsc.apply(tf.data.experimental.map_and_batch(
lambda c, v: c, batch_size, num_parallel_batches=parallel_batch))
iterator = dsc.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
counted = 0
try:
while True:
# The numbers in the tensors are 0-based indicies,
# so add 1 to get the number counted.
counted = sess.run(tf.reduce_max(get_next)) + 1
utils.dbg('Counted so far: %d' % counted)
except tf.errors.OutOfRangeError:
pass
utils.dbg('Counted total: %d' % counted)
return counted
|
last_latest = latest_game
|
random_line_split
|
event.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.core.index.base import IOIndex, Index, ElementNotFoundException
from indico.core.index.adapter import IIndexableByStartDateTime
from indico.util.date_time import utc_timestamp
from BTrees.OOBTree import OOSet, OOBTree
class CategoryEventStartDateIndex(Index):
def __init__(self):
self._container = OOBTree()
# add home category by default
self.add_category('0')
def __getitem__(self, key):
return self._container[key]
def __setitem__(self, key, value):
self._container[key] = value
def getCategory(self, categId, create=False):
if categId not in self._container:
if create:
self.add_category(categId)
else:
raise KeyError(categId)
return self._container[categId]
def add_category(self, categId):
self._container[categId] = IOIndex(IIndexableByStartDateTime)
def index_obj(self, obj):
try:
category = self.getCategory(obj.getOwner().getId())
except KeyError:
# some legacy events are in categories that don't exist anymore...
return
category.index_obj(obj)
def unindex_obj(self, obj):
try:
category = self.getCategory(obj.getOwner().getId())
except KeyError:
# some legacy events are in categories that don't exist anymore...
return
try:
category.unindex_obj(obj)
except ElementNotFoundException:
# some legacy events are not in this index...
pass
def remove_category(self, categId):
del self._container[categId]
def _initializeSubIndex(self, cset):
tsIndex = IOIndex(IIndexableByStartDateTime)
for conf in cset:
tsIndex.index_obj(conf)
return tsIndex
def initialize(self, dbi=None):
from MaKaC.conference import CategoryManager
for cid, categ in CategoryManager()._getIdx().iteritems():
self[cid] = self._initializeSubIndex(categ.conferences)
if dbi:
dbi.commit()
def _check(self, dbi=None):
|
from MaKaC.conference import CategoryManager, ConferenceHolder
confIdx = ConferenceHolder()._getIdx()
categIdx = CategoryManager()._getIdx()
i = 0
for cid, index in self._container.iteritems():
# simple data structure check
for problem in index._check():
yield problem
# consistency with CategoryManager
if cid not in categIdx:
yield "Category '%s' not in CategoryManager" % cid
# consistency with ConferenceHolder
for ts, conf in index.iteritems():
if conf.getId() not in confIdx:
yield "[%s] Conference '%s'(%s) not in ConferenceHolder" \
% (cid, conf.getId(), ts)
if dbi and i % 100 == 99:
dbi.abort()
i += 1
|
identifier_body
|
|
event.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.core.index.base import IOIndex, Index, ElementNotFoundException
from indico.core.index.adapter import IIndexableByStartDateTime
from indico.util.date_time import utc_timestamp
from BTrees.OOBTree import OOSet, OOBTree
class CategoryEventStartDateIndex(Index):
def __init__(self):
self._container = OOBTree()
# add home category by default
self.add_category('0')
def __getitem__(self, key):
return self._container[key]
def __setitem__(self, key, value):
self._container[key] = value
def getCategory(self, categId, create=False):
if categId not in self._container:
if create:
self.add_category(categId)
else:
|
return self._container[categId]
def add_category(self, categId):
self._container[categId] = IOIndex(IIndexableByStartDateTime)
def index_obj(self, obj):
try:
category = self.getCategory(obj.getOwner().getId())
except KeyError:
# some legacy events are in categories that don't exist anymore...
return
category.index_obj(obj)
def unindex_obj(self, obj):
try:
category = self.getCategory(obj.getOwner().getId())
except KeyError:
# some legacy events are in categories that don't exist anymore...
return
try:
category.unindex_obj(obj)
except ElementNotFoundException:
# some legacy events are not in this index...
pass
def remove_category(self, categId):
del self._container[categId]
def _initializeSubIndex(self, cset):
tsIndex = IOIndex(IIndexableByStartDateTime)
for conf in cset:
tsIndex.index_obj(conf)
return tsIndex
def initialize(self, dbi=None):
from MaKaC.conference import CategoryManager
for cid, categ in CategoryManager()._getIdx().iteritems():
self[cid] = self._initializeSubIndex(categ.conferences)
if dbi:
dbi.commit()
def _check(self, dbi=None):
from MaKaC.conference import CategoryManager, ConferenceHolder
confIdx = ConferenceHolder()._getIdx()
categIdx = CategoryManager()._getIdx()
i = 0
for cid, index in self._container.iteritems():
# simple data structure check
for problem in index._check():
yield problem
# consistency with CategoryManager
if cid not in categIdx:
yield "Category '%s' not in CategoryManager" % cid
# consistency with ConferenceHolder
for ts, conf in index.iteritems():
if conf.getId() not in confIdx:
yield "[%s] Conference '%s'(%s) not in ConferenceHolder" \
% (cid, conf.getId(), ts)
if dbi and i % 100 == 99:
dbi.abort()
i += 1
|
raise KeyError(categId)
|
conditional_block
|
event.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.core.index.base import IOIndex, Index, ElementNotFoundException
from indico.core.index.adapter import IIndexableByStartDateTime
from indico.util.date_time import utc_timestamp
from BTrees.OOBTree import OOSet, OOBTree
class CategoryEventStartDateIndex(Index):
def __init__(self):
self._container = OOBTree()
# add home category by default
self.add_category('0')
def __getitem__(self, key):
return self._container[key]
def __setitem__(self, key, value):
self._container[key] = value
def getCategory(self, categId, create=False):
if categId not in self._container:
if create:
self.add_category(categId)
else:
raise KeyError(categId)
return self._container[categId]
def add_category(self, categId):
self._container[categId] = IOIndex(IIndexableByStartDateTime)
def index_obj(self, obj):
try:
category = self.getCategory(obj.getOwner().getId())
except KeyError:
# some legacy events are in categories that don't exist anymore...
return
category.index_obj(obj)
def unindex_obj(self, obj):
try:
category = self.getCategory(obj.getOwner().getId())
except KeyError:
# some legacy events are in categories that don't exist anymore...
return
try:
category.unindex_obj(obj)
except ElementNotFoundException:
# some legacy events are not in this index...
pass
def remove_category(self, categId):
del self._container[categId]
def
|
(self, cset):
tsIndex = IOIndex(IIndexableByStartDateTime)
for conf in cset:
tsIndex.index_obj(conf)
return tsIndex
def initialize(self, dbi=None):
from MaKaC.conference import CategoryManager
for cid, categ in CategoryManager()._getIdx().iteritems():
self[cid] = self._initializeSubIndex(categ.conferences)
if dbi:
dbi.commit()
def _check(self, dbi=None):
from MaKaC.conference import CategoryManager, ConferenceHolder
confIdx = ConferenceHolder()._getIdx()
categIdx = CategoryManager()._getIdx()
i = 0
for cid, index in self._container.iteritems():
# simple data structure check
for problem in index._check():
yield problem
# consistency with CategoryManager
if cid not in categIdx:
yield "Category '%s' not in CategoryManager" % cid
# consistency with ConferenceHolder
for ts, conf in index.iteritems():
if conf.getId() not in confIdx:
yield "[%s] Conference '%s'(%s) not in ConferenceHolder" \
% (cid, conf.getId(), ts)
if dbi and i % 100 == 99:
dbi.abort()
i += 1
|
_initializeSubIndex
|
identifier_name
|
event.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.core.index.base import IOIndex, Index, ElementNotFoundException
from indico.core.index.adapter import IIndexableByStartDateTime
from indico.util.date_time import utc_timestamp
from BTrees.OOBTree import OOSet, OOBTree
class CategoryEventStartDateIndex(Index):
def __init__(self):
self._container = OOBTree()
# add home category by default
self.add_category('0')
def __getitem__(self, key):
return self._container[key]
def __setitem__(self, key, value):
self._container[key] = value
def getCategory(self, categId, create=False):
if categId not in self._container:
if create:
self.add_category(categId)
else:
raise KeyError(categId)
return self._container[categId]
def add_category(self, categId):
self._container[categId] = IOIndex(IIndexableByStartDateTime)
def index_obj(self, obj):
try:
category = self.getCategory(obj.getOwner().getId())
except KeyError:
# some legacy events are in categories that don't exist anymore...
return
category.index_obj(obj)
def unindex_obj(self, obj):
try:
category = self.getCategory(obj.getOwner().getId())
except KeyError:
# some legacy events are in categories that don't exist anymore...
return
try:
category.unindex_obj(obj)
except ElementNotFoundException:
# some legacy events are not in this index...
pass
def remove_category(self, categId):
|
def _initializeSubIndex(self, cset):
tsIndex = IOIndex(IIndexableByStartDateTime)
for conf in cset:
tsIndex.index_obj(conf)
return tsIndex
def initialize(self, dbi=None):
from MaKaC.conference import CategoryManager
for cid, categ in CategoryManager()._getIdx().iteritems():
self[cid] = self._initializeSubIndex(categ.conferences)
if dbi:
dbi.commit()
def _check(self, dbi=None):
from MaKaC.conference import CategoryManager, ConferenceHolder
confIdx = ConferenceHolder()._getIdx()
categIdx = CategoryManager()._getIdx()
i = 0
for cid, index in self._container.iteritems():
# simple data structure check
for problem in index._check():
yield problem
# consistency with CategoryManager
if cid not in categIdx:
yield "Category '%s' not in CategoryManager" % cid
# consistency with ConferenceHolder
for ts, conf in index.iteritems():
if conf.getId() not in confIdx:
yield "[%s] Conference '%s'(%s) not in ConferenceHolder" \
% (cid, conf.getId(), ts)
if dbi and i % 100 == 99:
dbi.abort()
i += 1
|
del self._container[categId]
|
random_line_split
|
file_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use about_loader;
use mime_classifier::MIMEClassifier;
use mime_guess::guess_mime_type;
use net_traits::ProgressMsg::{Done, Payload};
use net_traits::{LoadConsumer, LoadData, Metadata};
use resource_thread::{CancellationListener, ProgressSender};
use resource_thread::{send_error, start_sending_sniffed_opt};
use std::borrow::ToOwned;
use std::error::Error;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
use std::sync::Arc;
use url::Url;
use util::thread::spawn_named;
static READ_SIZE: usize = 8192;
enum ReadStatus {
Partial(Vec<u8>),
EOF,
}
enum LoadResult {
Cancelled,
Finished,
}
fn read_block(reader: &mut File) -> Result<ReadStatus, String> {
let mut buf = vec![0; READ_SIZE];
match reader.read(&mut buf) {
Ok(0) => Ok(ReadStatus::EOF),
Ok(n) => {
buf.truncate(n);
Ok(ReadStatus::Partial(buf))
}
Err(e) => Err(e.description().to_owned()),
}
}
fn read_all(reader: &mut File, progress_chan: &ProgressSender, cancel_listener: &CancellationListener)
-> Result<LoadResult, String> {
loop {
if cancel_listener.is_cancelled() {
let _ = progress_chan.send(Done(Err("load cancelled".to_owned())));
return Ok(LoadResult::Cancelled);
}
match try!(read_block(reader)) {
ReadStatus::Partial(buf) => progress_chan.send(Payload(buf)).unwrap(),
ReadStatus::EOF => return Ok(LoadResult::Finished),
}
}
}
fn get_progress_chan(load_data: LoadData, file_path: PathBuf,
senders: LoadConsumer, classifier: Arc<MIMEClassifier>, buf: &[u8])
-> Result<ProgressSender, ()> {
let mut metadata = Metadata::default(load_data.url);
let mime_type = guess_mime_type(file_path.as_path());
metadata.set_content_type(Some(&mime_type));
return start_sending_sniffed_opt(senders, metadata, classifier, buf, load_data.context);
}
pub fn factory(load_data: LoadData,
senders: LoadConsumer,
classifier: Arc<MIMEClassifier>,
|
cancel_listener: CancellationListener) {
assert!(&*load_data.url.scheme == "file");
spawn_named("file_loader".to_owned(), move || {
let file_path: Result<PathBuf, ()> = load_data.url.to_file_path();
match file_path {
Ok(file_path) => {
match File::open(&file_path) {
Ok(ref mut reader) => {
if cancel_listener.is_cancelled() {
if let Ok(progress_chan) = get_progress_chan(load_data, file_path,
senders, classifier, &[]) {
let _ = progress_chan.send(Done(Err("load cancelled".to_owned())));
}
return;
}
match read_block(reader) {
Ok(ReadStatus::Partial(buf)) => {
let progress_chan = get_progress_chan(load_data, file_path,
senders, classifier, &buf).ok().unwrap();
progress_chan.send(Payload(buf)).unwrap();
let read_result = read_all(reader, &progress_chan, &cancel_listener);
if let Ok(load_result) = read_result {
match load_result {
LoadResult::Cancelled => return,
LoadResult::Finished => progress_chan.send(Done(Ok(()))).unwrap(),
}
}
}
Ok(ReadStatus::EOF) => {
if let Ok(chan) = get_progress_chan(load_data, file_path,
senders, classifier, &[]) {
let _ = chan.send(Done(Ok(())));
}
}
Err(e) => {
send_error(load_data.url, e, senders);
}
};
}
Err(_) => {
// this should be one of the three errors listed in
// http://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.open
// but, we'll go for a "file not found!"
let url = Url::parse("about:not-found").unwrap();
let load_data_404 = LoadData::new(load_data.context, url, None);
about_loader::factory(load_data_404, senders, classifier, cancel_listener)
}
}
}
Err(_) => {
send_error(load_data.url, "Could not parse path".to_owned(), senders);
}
}
});
}
|
random_line_split
|
|
file_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use about_loader;
use mime_classifier::MIMEClassifier;
use mime_guess::guess_mime_type;
use net_traits::ProgressMsg::{Done, Payload};
use net_traits::{LoadConsumer, LoadData, Metadata};
use resource_thread::{CancellationListener, ProgressSender};
use resource_thread::{send_error, start_sending_sniffed_opt};
use std::borrow::ToOwned;
use std::error::Error;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
use std::sync::Arc;
use url::Url;
use util::thread::spawn_named;
static READ_SIZE: usize = 8192;
enum ReadStatus {
Partial(Vec<u8>),
EOF,
}
enum LoadResult {
Cancelled,
Finished,
}
fn read_block(reader: &mut File) -> Result<ReadStatus, String> {
let mut buf = vec![0; READ_SIZE];
match reader.read(&mut buf) {
Ok(0) => Ok(ReadStatus::EOF),
Ok(n) => {
buf.truncate(n);
Ok(ReadStatus::Partial(buf))
}
Err(e) => Err(e.description().to_owned()),
}
}
fn read_all(reader: &mut File, progress_chan: &ProgressSender, cancel_listener: &CancellationListener)
-> Result<LoadResult, String> {
loop {
if cancel_listener.is_cancelled() {
let _ = progress_chan.send(Done(Err("load cancelled".to_owned())));
return Ok(LoadResult::Cancelled);
}
match try!(read_block(reader)) {
ReadStatus::Partial(buf) => progress_chan.send(Payload(buf)).unwrap(),
ReadStatus::EOF => return Ok(LoadResult::Finished),
}
}
}
fn get_progress_chan(load_data: LoadData, file_path: PathBuf,
senders: LoadConsumer, classifier: Arc<MIMEClassifier>, buf: &[u8])
-> Result<ProgressSender, ()>
|
pub fn factory(load_data: LoadData,
senders: LoadConsumer,
classifier: Arc<MIMEClassifier>,
cancel_listener: CancellationListener) {
assert!(&*load_data.url.scheme == "file");
spawn_named("file_loader".to_owned(), move || {
let file_path: Result<PathBuf, ()> = load_data.url.to_file_path();
match file_path {
Ok(file_path) => {
match File::open(&file_path) {
Ok(ref mut reader) => {
if cancel_listener.is_cancelled() {
if let Ok(progress_chan) = get_progress_chan(load_data, file_path,
senders, classifier, &[]) {
let _ = progress_chan.send(Done(Err("load cancelled".to_owned())));
}
return;
}
match read_block(reader) {
Ok(ReadStatus::Partial(buf)) => {
let progress_chan = get_progress_chan(load_data, file_path,
senders, classifier, &buf).ok().unwrap();
progress_chan.send(Payload(buf)).unwrap();
let read_result = read_all(reader, &progress_chan, &cancel_listener);
if let Ok(load_result) = read_result {
match load_result {
LoadResult::Cancelled => return,
LoadResult::Finished => progress_chan.send(Done(Ok(()))).unwrap(),
}
}
}
Ok(ReadStatus::EOF) => {
if let Ok(chan) = get_progress_chan(load_data, file_path,
senders, classifier, &[]) {
let _ = chan.send(Done(Ok(())));
}
}
Err(e) => {
send_error(load_data.url, e, senders);
}
};
}
Err(_) => {
// this should be one of the three errors listed in
// http://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.open
// but, we'll go for a "file not found!"
let url = Url::parse("about:not-found").unwrap();
let load_data_404 = LoadData::new(load_data.context, url, None);
about_loader::factory(load_data_404, senders, classifier, cancel_listener)
}
}
}
Err(_) => {
send_error(load_data.url, "Could not parse path".to_owned(), senders);
}
}
});
}
|
{
let mut metadata = Metadata::default(load_data.url);
let mime_type = guess_mime_type(file_path.as_path());
metadata.set_content_type(Some(&mime_type));
return start_sending_sniffed_opt(senders, metadata, classifier, buf, load_data.context);
}
|
identifier_body
|
file_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use about_loader;
use mime_classifier::MIMEClassifier;
use mime_guess::guess_mime_type;
use net_traits::ProgressMsg::{Done, Payload};
use net_traits::{LoadConsumer, LoadData, Metadata};
use resource_thread::{CancellationListener, ProgressSender};
use resource_thread::{send_error, start_sending_sniffed_opt};
use std::borrow::ToOwned;
use std::error::Error;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
use std::sync::Arc;
use url::Url;
use util::thread::spawn_named;
static READ_SIZE: usize = 8192;
enum ReadStatus {
Partial(Vec<u8>),
EOF,
}
enum LoadResult {
Cancelled,
Finished,
}
fn read_block(reader: &mut File) -> Result<ReadStatus, String> {
let mut buf = vec![0; READ_SIZE];
match reader.read(&mut buf) {
Ok(0) => Ok(ReadStatus::EOF),
Ok(n) => {
buf.truncate(n);
Ok(ReadStatus::Partial(buf))
}
Err(e) => Err(e.description().to_owned()),
}
}
fn
|
(reader: &mut File, progress_chan: &ProgressSender, cancel_listener: &CancellationListener)
-> Result<LoadResult, String> {
loop {
if cancel_listener.is_cancelled() {
let _ = progress_chan.send(Done(Err("load cancelled".to_owned())));
return Ok(LoadResult::Cancelled);
}
match try!(read_block(reader)) {
ReadStatus::Partial(buf) => progress_chan.send(Payload(buf)).unwrap(),
ReadStatus::EOF => return Ok(LoadResult::Finished),
}
}
}
fn get_progress_chan(load_data: LoadData, file_path: PathBuf,
senders: LoadConsumer, classifier: Arc<MIMEClassifier>, buf: &[u8])
-> Result<ProgressSender, ()> {
let mut metadata = Metadata::default(load_data.url);
let mime_type = guess_mime_type(file_path.as_path());
metadata.set_content_type(Some(&mime_type));
return start_sending_sniffed_opt(senders, metadata, classifier, buf, load_data.context);
}
pub fn factory(load_data: LoadData,
senders: LoadConsumer,
classifier: Arc<MIMEClassifier>,
cancel_listener: CancellationListener) {
assert!(&*load_data.url.scheme == "file");
spawn_named("file_loader".to_owned(), move || {
let file_path: Result<PathBuf, ()> = load_data.url.to_file_path();
match file_path {
Ok(file_path) => {
match File::open(&file_path) {
Ok(ref mut reader) => {
if cancel_listener.is_cancelled() {
if let Ok(progress_chan) = get_progress_chan(load_data, file_path,
senders, classifier, &[]) {
let _ = progress_chan.send(Done(Err("load cancelled".to_owned())));
}
return;
}
match read_block(reader) {
Ok(ReadStatus::Partial(buf)) => {
let progress_chan = get_progress_chan(load_data, file_path,
senders, classifier, &buf).ok().unwrap();
progress_chan.send(Payload(buf)).unwrap();
let read_result = read_all(reader, &progress_chan, &cancel_listener);
if let Ok(load_result) = read_result {
match load_result {
LoadResult::Cancelled => return,
LoadResult::Finished => progress_chan.send(Done(Ok(()))).unwrap(),
}
}
}
Ok(ReadStatus::EOF) => {
if let Ok(chan) = get_progress_chan(load_data, file_path,
senders, classifier, &[]) {
let _ = chan.send(Done(Ok(())));
}
}
Err(e) => {
send_error(load_data.url, e, senders);
}
};
}
Err(_) => {
// this should be one of the three errors listed in
// http://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.open
// but, we'll go for a "file not found!"
let url = Url::parse("about:not-found").unwrap();
let load_data_404 = LoadData::new(load_data.context, url, None);
about_loader::factory(load_data_404, senders, classifier, cancel_listener)
}
}
}
Err(_) => {
send_error(load_data.url, "Could not parse path".to_owned(), senders);
}
}
});
}
|
read_all
|
identifier_name
|
inward-entries.client.controller.js
|
'use strict';
// InwardEntry controller
angular.module('inward-entries').controller('InwardEntriesController', ['$scope', '$stateParams', '$location', 'Authentication', 'InwardEntries',
function($scope, $stateParams, $location, Authentication, InwardEntries) {
$scope.authentication = Authentication;
// Create new InwardEntry
$scope.create = function() {
// Create new InwardEntry object
var inwardentry = new InwardEntries ({
receivedFrom: this.receivedFrom,
grnno: this.grnno,
date: this.date,
deliveryChallanNo: this.deliveryChallanNo,
sNo: this.sNo,
nameoftheItem: this.nameoftheItem,
uom: this.uom,
rate: this.rate,
received: this.received,
jobNo: this.jobNo,
created: Date.now
});
// Redirect after save
inwardentry.$save(function(response) {
$location.path('inward-entries/' + response._id);
// Clear form fields
$scope.name = '';
}, function(errorResponse) {
$scope.error = errorResponse.data.message;
});
};
// Remove existing InwardEntry
$scope.remove = function(inwardentry) {
if ( inwardentry ) {
inwardentry.$remove();
for (var i in $scope.InwardEntries) {
if ($scope.inwardentries [i] === inwardentry)
|
}
} else {
$scope.inwardentry.$remove(function() {
$location.path('inward-entries');
});
}
};
// Update existing InwardEntry
$scope.update = function() {
var inwardentry = $scope.inwardentry;
inwardentry.$update(function() {
$location.path('inward-entries/' + inwardentry._id);
}, function(errorResponse) {
$scope.error = errorResponse.data.message;
});
};
// Find a list of InwardEntry
$scope.find = function() {
$scope.inwardentries = InwardEntries.query();
};
// Find existing InwardEntry
$scope.findOne = function() {
InwardEntries.get({
inwardEntryId: $stateParams.inwardEntryId
})
.$promise.then(function(data) {
data.date = moment(data.date).format('YYYY-MM-DD');
$scope.inwardentry = data;
}, function(reason) {
console.log('Failed: ' + reason);
});
};
}
]);
|
{
$scope.inwardentries.splice(i, 1);
}
|
conditional_block
|
inward-entries.client.controller.js
|
'use strict';
// InwardEntry controller
angular.module('inward-entries').controller('InwardEntriesController', ['$scope', '$stateParams', '$location', 'Authentication', 'InwardEntries',
function($scope, $stateParams, $location, Authentication, InwardEntries) {
$scope.authentication = Authentication;
// Create new InwardEntry
$scope.create = function() {
// Create new InwardEntry object
var inwardentry = new InwardEntries ({
receivedFrom: this.receivedFrom,
grnno: this.grnno,
date: this.date,
deliveryChallanNo: this.deliveryChallanNo,
sNo: this.sNo,
nameoftheItem: this.nameoftheItem,
uom: this.uom,
rate: this.rate,
received: this.received,
jobNo: this.jobNo,
created: Date.now
});
// Redirect after save
inwardentry.$save(function(response) {
$location.path('inward-entries/' + response._id);
// Clear form fields
$scope.name = '';
}, function(errorResponse) {
$scope.error = errorResponse.data.message;
});
};
// Remove existing InwardEntry
$scope.remove = function(inwardentry) {
if ( inwardentry ) {
inwardentry.$remove();
|
for (var i in $scope.InwardEntries) {
if ($scope.inwardentries [i] === inwardentry) {
$scope.inwardentries.splice(i, 1);
}
}
} else {
$scope.inwardentry.$remove(function() {
$location.path('inward-entries');
});
}
};
// Update existing InwardEntry
$scope.update = function() {
var inwardentry = $scope.inwardentry;
inwardentry.$update(function() {
$location.path('inward-entries/' + inwardentry._id);
}, function(errorResponse) {
$scope.error = errorResponse.data.message;
});
};
// Find a list of InwardEntry
$scope.find = function() {
$scope.inwardentries = InwardEntries.query();
};
// Find existing InwardEntry
$scope.findOne = function() {
InwardEntries.get({
inwardEntryId: $stateParams.inwardEntryId
})
.$promise.then(function(data) {
data.date = moment(data.date).format('YYYY-MM-DD');
$scope.inwardentry = data;
}, function(reason) {
console.log('Failed: ' + reason);
});
};
}
]);
|
random_line_split
|
|
iptables_manager.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import inspect
import os
import re
from neutron.agent.linux import utils as linux_utils
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class
|
(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
return
self.remove_chain(name, wrap)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chain = get_chain_name(chain, wrap)
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False,
root_helper=None, use_ipv6=False, namespace=None,
binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.root_helper = root_helper
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various nova components. We set it as the last
# chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug(_('Got semaphore / lock "%s"'), lock_name)
return self._apply_synchronized()
finally:
LOG.debug(_('Semaphore / lock released "%s"'), lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, root_helper=self.root_helper)
all_lines = all_tables.split('\n')
for table_name, table in tables.iteritems():
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
root_helper=self.root_helper)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug(_("IPTablesManager.apply completed with success"))
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_list, match_str):
# find a matching entry, starting from the bottom
for s in reversed(filter_list):
s = s.strip()
if match_str in s:
return s
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter, chain_str)
if not old:
dup = self._find_last_entry(new_filter, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter, rule_str)
if not old:
dup = self._find_last_entry(new_filter, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precendence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = (self.execute(args,
root_helper=self.root_helper))
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
|
IptablesTable
|
identifier_name
|
iptables_manager.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import inspect
import os
import re
from neutron.agent.linux import utils as linux_utils
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
return
self.remove_chain(name, wrap)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chain = get_chain_name(chain, wrap)
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False,
root_helper=None, use_ipv6=False, namespace=None,
binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.root_helper = root_helper
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various nova components. We set it as the last
# chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
|
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug(_('Got semaphore / lock "%s"'), lock_name)
return self._apply_synchronized()
finally:
LOG.debug(_('Semaphore / lock released "%s"'), lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, root_helper=self.root_helper)
all_lines = all_tables.split('\n')
for table_name, table in tables.iteritems():
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
root_helper=self.root_helper)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug(_("IPTablesManager.apply completed with success"))
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_list, match_str):
# find a matching entry, starting from the bottom
for s in reversed(filter_list):
s = s.strip()
if match_str in s:
return s
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter, chain_str)
if not old:
dup = self._find_last_entry(new_filter, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter, rule_str)
if not old:
dup = self._find_last_entry(new_filter, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precendence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = (self.execute(args,
root_helper=self.root_helper))
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
|
def defer_apply_on(self):
self.iptables_apply_deferred = True
|
random_line_split
|
iptables_manager.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import inspect
import os
import re
from neutron.agent.linux import utils as linux_utils
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
return
self.remove_chain(name, wrap)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chain = get_chain_name(chain, wrap)
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False,
root_helper=None, use_ipv6=False, namespace=None,
binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.root_helper = root_helper
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various nova components. We set it as the last
# chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
|
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug(_('Got semaphore / lock "%s"'), lock_name)
return self._apply_synchronized()
finally:
LOG.debug(_('Semaphore / lock released "%s"'), lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, root_helper=self.root_helper)
all_lines = all_tables.split('\n')
for table_name, table in tables.iteritems():
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
root_helper=self.root_helper)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug(_("IPTablesManager.apply completed with success"))
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_list, match_str):
# find a matching entry, starting from the bottom
for s in reversed(filter_list):
s = s.strip()
if match_str in s:
return s
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter, chain_str)
if not old:
dup = self._find_last_entry(new_filter, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter, rule_str)
if not old:
dup = self._find_last_entry(new_filter, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precendence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = (self.execute(args,
root_helper=self.root_helper))
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
|
self.iptables_apply_deferred = False
self._apply()
|
identifier_body
|
iptables_manager.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import inspect
import os
import re
from neutron.agent.linux import utils as linux_utils
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
return
self.remove_chain(name, wrap)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chain = get_chain_name(chain, wrap)
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False,
root_helper=None, use_ipv6=False, namespace=None,
binary_name=binary_name):
if _execute:
|
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.root_helper = root_helper
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various nova components. We set it as the last
# chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug(_('Got semaphore / lock "%s"'), lock_name)
return self._apply_synchronized()
finally:
LOG.debug(_('Semaphore / lock released "%s"'), lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, root_helper=self.root_helper)
all_lines = all_tables.split('\n')
for table_name, table in tables.iteritems():
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
root_helper=self.root_helper)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug(_("IPTablesManager.apply completed with success"))
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_list, match_str):
# find a matching entry, starting from the bottom
for s in reversed(filter_list):
s = s.strip()
if match_str in s:
return s
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter, chain_str)
if not old:
dup = self._find_last_entry(new_filter, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter, rule_str)
if not old:
dup = self._find_last_entry(new_filter, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precendence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = (self.execute(args,
root_helper=self.root_helper))
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
|
self.execute = _execute
|
conditional_block
|
p081.py
|
# https://projecteuler.net/problem=81
from projecteuler.FileReader import file_to_2D_array_of_ints
# this problem uses a similar solution to problem 18, "Maximum Path Sum 1."
# this problem uses a diamond instead of a pyramid
matrix = file_to_2D_array_of_ints("p081.txt", ",")
y_max = len(matrix) - 1
x_max = len(matrix[0]) - 1
for y in range(y_max, -1, -1):
for x in range(x_max, -1, -1):
if y == y_max and x == x_max:
continue
elif y == y_max:
matrix[y][x] += matrix[y][x + 1]
elif x == x_max:
matrix[y][x] += matrix[y + 1][x]
else:
matrix[y][x] += min(matrix[y][x + 1], matrix[y + 1][x])
|
print(matrix[0][0])
|
random_line_split
|
|
p081.py
|
# https://projecteuler.net/problem=81
from projecteuler.FileReader import file_to_2D_array_of_ints
# this problem uses a similar solution to problem 18, "Maximum Path Sum 1."
# this problem uses a diamond instead of a pyramid
matrix = file_to_2D_array_of_ints("p081.txt", ",")
y_max = len(matrix) - 1
x_max = len(matrix[0]) - 1
for y in range(y_max, -1, -1):
for x in range(x_max, -1, -1):
if y == y_max and x == x_max:
continue
elif y == y_max:
matrix[y][x] += matrix[y][x + 1]
elif x == x_max:
|
else:
matrix[y][x] += min(matrix[y][x + 1], matrix[y + 1][x])
print(matrix[0][0])
|
matrix[y][x] += matrix[y + 1][x]
|
conditional_block
|
test_param_methods.py
|
"""
Testing for enumerate_param, enumerate_params, and enumerate_keyed_param
"""
import unittest
import mws
# pylint: disable=invalid-name
class TestParamsRaiseExceptions(unittest.TestCase):
"""
Simple test that asserts a ValueError is raised by an improper entry to
`utils.enumerate_keyed_param`.
"""
def test_keyed_param_fails_without_dict(self):
"""
Should raise ValueError for values not being a dict.
"""
param = "something"
values = ["this is not a dict like it should be!"]
with self.assertRaises(ValueError):
mws.utils.enumerate_keyed_param(param, values)
def test_single_param_default():
"""
Test each method type for their default empty dicts.
"""
# Single
assert mws.utils.enumerate_param("something", []) == {}
# Multi
assert mws.utils.enumerate_params() == {}
assert mws.utils.enumerate_params("antler") == {}
# Keyed
assert mws.utils.enumerate_keyed_param("acorn", []) == {}
def test_single_param_not_dotted_list_values():
"""
A param string with no dot at the end and a list of ints.
List should be ingested in order.
"""
param = "SomethingOrOther"
values = (123, 765, 3512, 756437, 3125)
result = mws.utils.enumerate_param(param, values)
assert result == {
"SomethingOrOther.1": 123,
"SomethingOrOther.2": 765,
"SomethingOrOther.3": 3512,
"SomethingOrOther.4": 756437,
"SomethingOrOther.5": 3125,
}
def test_single_param_dotted_single_value():
"""
A param string with a dot at the end and a single string value.
Values that are not list, tuple, or set should coerce to a list and provide a single output.
"""
param = "FooBar."
|
}
def test_multi_params():
"""
A series of params sent as a list of dicts to enumerate_params.
Each param should generate a unique set of keys and values.
Final result should be a flat dict.
"""
param1 = "Summat."
values1 = ("colorful", "cheery", "turkey")
param2 = "FooBaz.what"
values2 = "singular"
param3 = "hot_dog"
values3 = ["something", "or", "other"]
# We could test with values as a set, but we cannot be 100% of the order of the output,
# and I don't feel it necessary to flesh this out enough to account for it.
result = mws.utils.enumerate_params({
param1: values1,
param2: values2,
param3: values3,
})
assert result == {
"Summat.1": "colorful",
"Summat.2": "cheery",
"Summat.3": "turkey",
"FooBaz.what.1": "singular",
"hot_dog.1": "something",
"hot_dog.2": "or",
"hot_dog.3": "other",
}
def test_keyed_params():
"""
Asserting the result through enumerate_keyed_param is as expected.
"""
# Example:
# param = "InboundShipmentPlanRequestItems.member"
# values = [
# {'SellerSKU': 'Football2415',
# 'Quantity': 3},
# {'SellerSKU': 'TeeballBall3251',
# 'Quantity': 5},
# ...
# ]
# Returns:
# {
# 'InboundShipmentPlanRequestItems.member.1.SellerSKU': 'Football2415',
# 'InboundShipmentPlanRequestItems.member.1.Quantity': 3,
# 'InboundShipmentPlanRequestItems.member.2.SellerSKU': 'TeeballBall3251',
# 'InboundShipmentPlanRequestItems.member.2.Quantity': 5,
# ...
# }
param = "AthingToKeyUp.member"
item1 = {
"thing": "stuff",
"foo": "baz",
}
item2 = {
"thing": 123,
"foo": 908,
"bar": "hello",
}
item3 = {
"stuff": "foobarbazmatazz",
"stuff2": "foobarbazmatazz5",
}
result = mws.utils.enumerate_keyed_param(param, [item1, item2, item3])
assert result == {
"AthingToKeyUp.member.1.thing": "stuff",
"AthingToKeyUp.member.1.foo": "baz",
"AthingToKeyUp.member.2.thing": 123,
"AthingToKeyUp.member.2.foo": 908,
"AthingToKeyUp.member.2.bar": "hello",
"AthingToKeyUp.member.3.stuff": "foobarbazmatazz",
"AthingToKeyUp.member.3.stuff2": "foobarbazmatazz5",
}
|
values = "eleven"
result = mws.utils.enumerate_param(param, values)
assert result == {
"FooBar.1": "eleven",
|
random_line_split
|
test_param_methods.py
|
"""
Testing for enumerate_param, enumerate_params, and enumerate_keyed_param
"""
import unittest
import mws
# pylint: disable=invalid-name
class TestParamsRaiseExceptions(unittest.TestCase):
"""
Simple test that asserts a ValueError is raised by an improper entry to
`utils.enumerate_keyed_param`.
"""
def test_keyed_param_fails_without_dict(self):
"""
Should raise ValueError for values not being a dict.
"""
param = "something"
values = ["this is not a dict like it should be!"]
with self.assertRaises(ValueError):
mws.utils.enumerate_keyed_param(param, values)
def test_single_param_default():
"""
Test each method type for their default empty dicts.
"""
# Single
assert mws.utils.enumerate_param("something", []) == {}
# Multi
assert mws.utils.enumerate_params() == {}
assert mws.utils.enumerate_params("antler") == {}
# Keyed
assert mws.utils.enumerate_keyed_param("acorn", []) == {}
def
|
():
"""
A param string with no dot at the end and a list of ints.
List should be ingested in order.
"""
param = "SomethingOrOther"
values = (123, 765, 3512, 756437, 3125)
result = mws.utils.enumerate_param(param, values)
assert result == {
"SomethingOrOther.1": 123,
"SomethingOrOther.2": 765,
"SomethingOrOther.3": 3512,
"SomethingOrOther.4": 756437,
"SomethingOrOther.5": 3125,
}
def test_single_param_dotted_single_value():
"""
A param string with a dot at the end and a single string value.
Values that are not list, tuple, or set should coerce to a list and provide a single output.
"""
param = "FooBar."
values = "eleven"
result = mws.utils.enumerate_param(param, values)
assert result == {
"FooBar.1": "eleven",
}
def test_multi_params():
"""
A series of params sent as a list of dicts to enumerate_params.
Each param should generate a unique set of keys and values.
Final result should be a flat dict.
"""
param1 = "Summat."
values1 = ("colorful", "cheery", "turkey")
param2 = "FooBaz.what"
values2 = "singular"
param3 = "hot_dog"
values3 = ["something", "or", "other"]
# We could test with values as a set, but we cannot be 100% of the order of the output,
# and I don't feel it necessary to flesh this out enough to account for it.
result = mws.utils.enumerate_params({
param1: values1,
param2: values2,
param3: values3,
})
assert result == {
"Summat.1": "colorful",
"Summat.2": "cheery",
"Summat.3": "turkey",
"FooBaz.what.1": "singular",
"hot_dog.1": "something",
"hot_dog.2": "or",
"hot_dog.3": "other",
}
def test_keyed_params():
"""
Asserting the result through enumerate_keyed_param is as expected.
"""
# Example:
# param = "InboundShipmentPlanRequestItems.member"
# values = [
# {'SellerSKU': 'Football2415',
# 'Quantity': 3},
# {'SellerSKU': 'TeeballBall3251',
# 'Quantity': 5},
# ...
# ]
# Returns:
# {
# 'InboundShipmentPlanRequestItems.member.1.SellerSKU': 'Football2415',
# 'InboundShipmentPlanRequestItems.member.1.Quantity': 3,
# 'InboundShipmentPlanRequestItems.member.2.SellerSKU': 'TeeballBall3251',
# 'InboundShipmentPlanRequestItems.member.2.Quantity': 5,
# ...
# }
param = "AthingToKeyUp.member"
item1 = {
"thing": "stuff",
"foo": "baz",
}
item2 = {
"thing": 123,
"foo": 908,
"bar": "hello",
}
item3 = {
"stuff": "foobarbazmatazz",
"stuff2": "foobarbazmatazz5",
}
result = mws.utils.enumerate_keyed_param(param, [item1, item2, item3])
assert result == {
"AthingToKeyUp.member.1.thing": "stuff",
"AthingToKeyUp.member.1.foo": "baz",
"AthingToKeyUp.member.2.thing": 123,
"AthingToKeyUp.member.2.foo": 908,
"AthingToKeyUp.member.2.bar": "hello",
"AthingToKeyUp.member.3.stuff": "foobarbazmatazz",
"AthingToKeyUp.member.3.stuff2": "foobarbazmatazz5",
}
|
test_single_param_not_dotted_list_values
|
identifier_name
|
test_param_methods.py
|
"""
Testing for enumerate_param, enumerate_params, and enumerate_keyed_param
"""
import unittest
import mws
# pylint: disable=invalid-name
class TestParamsRaiseExceptions(unittest.TestCase):
|
def test_single_param_default():
"""
Test each method type for their default empty dicts.
"""
# Single
assert mws.utils.enumerate_param("something", []) == {}
# Multi
assert mws.utils.enumerate_params() == {}
assert mws.utils.enumerate_params("antler") == {}
# Keyed
assert mws.utils.enumerate_keyed_param("acorn", []) == {}
def test_single_param_not_dotted_list_values():
"""
A param string with no dot at the end and a list of ints.
List should be ingested in order.
"""
param = "SomethingOrOther"
values = (123, 765, 3512, 756437, 3125)
result = mws.utils.enumerate_param(param, values)
assert result == {
"SomethingOrOther.1": 123,
"SomethingOrOther.2": 765,
"SomethingOrOther.3": 3512,
"SomethingOrOther.4": 756437,
"SomethingOrOther.5": 3125,
}
def test_single_param_dotted_single_value():
"""
A param string with a dot at the end and a single string value.
Values that are not list, tuple, or set should coerce to a list and provide a single output.
"""
param = "FooBar."
values = "eleven"
result = mws.utils.enumerate_param(param, values)
assert result == {
"FooBar.1": "eleven",
}
def test_multi_params():
"""
A series of params sent as a list of dicts to enumerate_params.
Each param should generate a unique set of keys and values.
Final result should be a flat dict.
"""
param1 = "Summat."
values1 = ("colorful", "cheery", "turkey")
param2 = "FooBaz.what"
values2 = "singular"
param3 = "hot_dog"
values3 = ["something", "or", "other"]
# We could test with values as a set, but we cannot be 100% of the order of the output,
# and I don't feel it necessary to flesh this out enough to account for it.
result = mws.utils.enumerate_params({
param1: values1,
param2: values2,
param3: values3,
})
assert result == {
"Summat.1": "colorful",
"Summat.2": "cheery",
"Summat.3": "turkey",
"FooBaz.what.1": "singular",
"hot_dog.1": "something",
"hot_dog.2": "or",
"hot_dog.3": "other",
}
def test_keyed_params():
"""
Asserting the result through enumerate_keyed_param is as expected.
"""
# Example:
# param = "InboundShipmentPlanRequestItems.member"
# values = [
# {'SellerSKU': 'Football2415',
# 'Quantity': 3},
# {'SellerSKU': 'TeeballBall3251',
# 'Quantity': 5},
# ...
# ]
# Returns:
# {
# 'InboundShipmentPlanRequestItems.member.1.SellerSKU': 'Football2415',
# 'InboundShipmentPlanRequestItems.member.1.Quantity': 3,
# 'InboundShipmentPlanRequestItems.member.2.SellerSKU': 'TeeballBall3251',
# 'InboundShipmentPlanRequestItems.member.2.Quantity': 5,
# ...
# }
param = "AthingToKeyUp.member"
item1 = {
"thing": "stuff",
"foo": "baz",
}
item2 = {
"thing": 123,
"foo": 908,
"bar": "hello",
}
item3 = {
"stuff": "foobarbazmatazz",
"stuff2": "foobarbazmatazz5",
}
result = mws.utils.enumerate_keyed_param(param, [item1, item2, item3])
assert result == {
"AthingToKeyUp.member.1.thing": "stuff",
"AthingToKeyUp.member.1.foo": "baz",
"AthingToKeyUp.member.2.thing": 123,
"AthingToKeyUp.member.2.foo": 908,
"AthingToKeyUp.member.2.bar": "hello",
"AthingToKeyUp.member.3.stuff": "foobarbazmatazz",
"AthingToKeyUp.member.3.stuff2": "foobarbazmatazz5",
}
|
"""
Simple test that asserts a ValueError is raised by an improper entry to
`utils.enumerate_keyed_param`.
"""
def test_keyed_param_fails_without_dict(self):
"""
Should raise ValueError for values not being a dict.
"""
param = "something"
values = ["this is not a dict like it should be!"]
with self.assertRaises(ValueError):
mws.utils.enumerate_keyed_param(param, values)
|
identifier_body
|
ws_server.js
|
/** CONFIG **/
var debug = true;
var log_level = 1; // 1: No, 3: verbose
var encoding = 'utf-8';
var public_html_path = 'public_html';
var http_port = 8000;
var host_ip = '127.0.0.1';
if (process.argv.length > 2) {
host_ip = process.argv[2];
}
if (process.argv.length > 3) {
http_port = process.argv[3];
}
/** IMPORT MODULES **/
var http = require('http');
var fs = require('fs');
var static = require('node-static');
/** DATA **/
var blank = {
count : 0,
poll : [0, 0, 0, 0, 0],
};
/** INIT SERVICES **/
var file = new(static.Server)(public_html_path);
var http_server = http.createServer(function(req, res) {
req.addListener('end', function() {
file.serve(req, res);
});
});
/** LOGGERS & ETC **/
var do_nothing = function() {};
var ngv_error_logger = function() {
console.log(Array.prototype.join.call(arguments, ", "));
};
var ngv_disconn_logger = function(con) {
if (debug)
|
};
var ngv_client_logger = function(con, data) {
if (debug) {
console.log("[Client:"+con.id+"]");
console.log(data);
}
};
/** MAIN **/
/** START SERVER **/
// io = io.listen(http_server);
// io.set('log level', log_level);
http_server.listen(http_port, host_ip);
console.log('[Message]: HTTP file server is running at http://'+host_ip+':'+http_port);
|
{
console.log('[Message]: '+con.id+' disconnected');
}
|
conditional_block
|
ws_server.js
|
/** CONFIG **/
var debug = true;
var log_level = 1; // 1: No, 3: verbose
var encoding = 'utf-8';
var public_html_path = 'public_html';
var http_port = 8000;
var host_ip = '127.0.0.1';
if (process.argv.length > 2) {
host_ip = process.argv[2];
}
if (process.argv.length > 3) {
http_port = process.argv[3];
}
|
/** DATA **/
var blank = {
count : 0,
poll : [0, 0, 0, 0, 0],
};
/** INIT SERVICES **/
var file = new(static.Server)(public_html_path);
var http_server = http.createServer(function(req, res) {
req.addListener('end', function() {
file.serve(req, res);
});
});
/** LOGGERS & ETC **/
var do_nothing = function() {};
var ngv_error_logger = function() {
console.log(Array.prototype.join.call(arguments, ", "));
};
var ngv_disconn_logger = function(con) {
if (debug) {
console.log('[Message]: '+con.id+' disconnected');
}
};
var ngv_client_logger = function(con, data) {
if (debug) {
console.log("[Client:"+con.id+"]");
console.log(data);
}
};
/** MAIN **/
/** START SERVER **/
// io = io.listen(http_server);
// io.set('log level', log_level);
http_server.listen(http_port, host_ip);
console.log('[Message]: HTTP file server is running at http://'+host_ip+':'+http_port);
|
/** IMPORT MODULES **/
var http = require('http');
var fs = require('fs');
var static = require('node-static');
|
random_line_split
|
UserManager.js
|
import UserInfo from "./UserInfo";
class UserManager{
/****************************************************
* static
****************************************************/
static _instance;
static getInstance() {
if (UserManager._instance)
return UserManager._instance;
else
return new UserManager();
|
/****************************************************
* private instance variables
****************************************************/
_info;
_users;
// _reloadLevelsCallback;
// _reloadSettingsCallback;
// _reloadMainCallback;
_reloadCallbacks;
/****************************************************
* instance method
****************************************************/
constructor() {
if (!UserManager._instance) {
UserManager._instance = this;
} else {
new Error('UserManager is Singlton class !!');
}
this._reloadCallbacks = [];
this.initUser();
}
reloadUserList(){
return this.getUserListData();
}
createNewUser(options){
this._info = new UserInfo(options);
this.saveUser();
}
changeUser(id){
localStorage.setItem('currentUser', id);
this.initUser();
this.getUserListData();
// if(this._reloadLevelsCallback)
// this._reloadLevelsCallback();
// if(this._reloadSettingsCallback)
// this._reloadSettingsCallback();
// if(this._reloadMainCallback)
// this._reloadMainCallback();
for(let i = 0; i < this._reloadCallbacks.length; ++i){
this._reloadCallbacks[i]();
}
return this._info;
}
requestDeleteUser(id){
console.log('requestDeleteUser: >>', id);
delete this._users[id];
localStorage.removeItem(id);
return this.reloadUserList();
}
requestProgressLevelId(){
return (this.info) ? this.info.progress : this.getCurrentUserInfo().progress;
}
saveLevelData(level){
this._info.saveData[level.id] = level.result;
this._info.saveProgress(level.id);
this.saveUser(this._info);
}
/*setReloadLevelsCallback(callback){
// this._reloadLevelsCallback = callback;
this.setUserReloadCallback(callback);
}
setReloadSettingsCallback(callback){
// this._reloadSettingsCallback = callback;
this.setUserReloadCallback(callback);
}
setReloadMainCallback(callback){
// this._reloadMainCallback = callback;
this.setUserReloadCallback(callback);
}*/
setUserReloadCallback(callback){
this._reloadCallbacks.push(callback);
}
removeSaveData(){
this._info.removeSaveData();
this.saveUser();
}
saveSettings(key, value){
this._info.saveSettings(key, value);
this.saveUser();
}
/****************************************************
* private method
****************************************************/
initUser(){
// localStorage.clear();
if (typeof(Storage) !== "undefined") {
this.reloadUserList();
let data = this.getCurrentUserInfo();
// data 가 undefined 라면 UserInfo 에서 이름없는 사용자를 자동으로 만들어줌.
this._info = new UserInfo(data);
this.saveUser(this._info);
} else {
// TODO: No Web Storage support..
}
}
getUserListData(){
if(!this._users)
this._users = {};
for(let uid in localStorage){
if(uid.substr(0, 6) === '_USER_'){
this._users[uid] = this.getUserInfo(uid);
}
}
return this._users;
}
getUserInfo(id){
return JSON.parse(localStorage.getItem(id));
}
getCurrentUserInfo(){
let currentUserId = localStorage.getItem('currentUser');
return this.getUserInfo(currentUserId);
}
saveUser(userInfo){
let info = (userInfo) ? userInfo : this._info;
this.saveCurrentUserId(info.id);
localStorage.setItem(info.id, JSON.stringify(info.data));
console.log('UserManager.saveUser() > localStorage :', localStorage);
}
saveCurrentUserId(id){
localStorage.setItem('currentUser', id);
}
/****************************************************
* GETTER
****************************************************/
get info(){
return this._info;
}
get users(){
return this._users;
}
}
export default UserManager;
|
}
|
random_line_split
|
UserManager.js
|
import UserInfo from "./UserInfo";
class UserManager{
/****************************************************
* static
****************************************************/
static _instance;
static getInstance() {
if (UserManager._instance)
return UserManager._instance;
else
return new UserManager();
}
/****************************************************
* private instance variables
****************************************************/
_info;
_users;
// _reloadLevelsCallback;
// _reloadSettingsCallback;
// _reloadMainCallback;
_reloadCallbacks;
/****************************************************
* instance method
****************************************************/
constructor() {
if (!UserManager._instance) {
UserManager._instance = this;
} else {
new Error('UserManager is Singlton class !!');
}
this._reloadCallbacks = [];
this.initUser();
}
reloadUserList(){
return this.getUserListData();
}
createNewUser(options){
this._info = new UserInfo(options);
this.saveUser();
}
changeUser(id){
localStorage.setItem('currentUser', id);
this.initUser();
this.getUserListData();
// if(this._reloadLevelsCallback)
// this._reloadLevelsCallback();
// if(this._reloadSettingsCallback)
// this._reloadSettingsCallback();
// if(this._reloadMainCallback)
// this._reloadMainCallback();
for(let i = 0; i < this._reloadCallbacks.length; ++i){
this._reloadCallbacks[i]();
}
return this._info;
}
requestDeleteUser(id){
console.log('requestDeleteUser: >>', id);
delete this._users[id];
localStorage.removeItem(id);
return this.reloadUserList();
}
requestProgressLevelId(){
return (this.info) ? this.info.progress : this.getCurrentUserInfo().progress;
}
saveLevelData(level){
this._info.saveData[level.id] = level.result;
this._info.saveProgress(level.id);
this.saveUser(this._info);
}
/*setReloadLevelsCallback(callback){
// this._reloadLevelsCallback = callback;
this.setUserReloadCallback(callback);
}
setReloadSettingsCallback(callback){
// this._reloadSettingsCallback = callback;
this.setUserReloadCallback(callback);
}
setReloadMainCallback(callback){
// this._reloadMainCallback = callback;
this.setUserReloadCallback(callback);
}*/
setUserReloadCallback(callback){
this._reloadCallbacks.push(callback);
}
removeSaveData(){
this._info.removeSaveData();
this.saveUser();
}
saveSettings(key, value){
this._info.saveSettings(key, value);
this.saveUser();
}
/****************************************************
* private method
****************************************************/
initUser(){
// localStorage.clear();
if (typeof(Storage) !== "undefined") {
this.reloadUserList();
let data = this.getCurrentUserInfo();
// data 가 undefined 라면 UserInfo 에서 이름없는 사용자를 자동으로 만들어줌.
this._info = new UserInfo(data);
this.saveUser(this._info);
} else {
// TODO: No Web Storage support..
}
}
getUserListData(){
if(!this._users)
this._users = {};
for(let uid in localStorage){
if(uid.substr(0, 6) === '_USER_'){
this._users[uid] = this.getUserInfo(uid);
}
}
return this._users;
}
getUserInfo(id){
return JSON.parse(localStorage.getItem(id));
}
getCurrentUserInfo(){
let currentUserId = localStorage.getIt
|
nfo) ? userInfo : this._info;
this.saveCurrentUserId(info.id);
localStorage.setItem(info.id, JSON.stringify(info.data));
console.log('UserManager.saveUser() > localStorage :', localStorage);
}
saveCurrentUserId(id){
localStorage.setItem('currentUser', id);
}
/****************************************************
* GETTER
****************************************************/
get info(){
return this._info;
}
get users(){
return this._users;
}
}
export default UserManager;
|
em('currentUser');
return this.getUserInfo(currentUserId);
}
saveUser(userInfo){
let info = (userI
|
identifier_body
|
UserManager.js
|
import UserInfo from "./UserInfo";
class UserManager{
/****************************************************
* static
****************************************************/
static _instance;
static getInstance() {
if (UserManager._instance)
return UserManager._instance;
else
return new UserManager();
}
/****************************************************
* private instance variables
****************************************************/
_info;
_users;
// _reloadLevelsCallback;
// _reloadSettingsCallback;
// _reloadMainCallback;
_reloadCallbacks;
/****************************************************
* instance method
****************************************************/
constructor() {
if (!UserManager._instance) {
UserManager._instance = this;
} else {
new Error('UserManager is Singlton class !!');
}
this._reloadCallbacks = [];
this.initUser();
}
reloadUserList(){
return this.getUserListData();
}
createNewUser(options){
this._info = new UserInfo(options);
this.saveUser();
}
changeUser(id){
localStorage.setItem('currentUser', id);
this.initUser();
this.getUserListData();
// if(this._reloadLevelsCallback)
// this._reloadLevelsCallback();
// if(this._reloadSettingsCallback)
// this._reloadSettingsCallback();
// if(this._reloadMainCallback)
// this._reloadMainCallback();
for(let i = 0; i < this._reloadCallbacks.length; ++i){
this._reloadCallbacks[i]();
}
return this._info;
}
requestDeleteUser(id){
console.log('requestDeleteUser: >>', id);
delete this._users[id];
localStorage.removeItem(id);
return this.reloadUserList();
}
requestProgressLevelId(){
return (this.info) ? this.info.progress : this.getCurrentUserInfo().progress;
}
saveLevelData(level){
this._info.saveData[level.id] = level.result;
this._info.saveProgress(level.id);
this.saveUser(this._info);
}
/*setReloadLevelsCallback(callback){
// this._reloadLevelsCallback = callback;
this.setUserReloadCallback(callback);
}
setReloadSettingsCallback(callback){
// this._reloadSettingsCallback = callback;
this.setUserReloadCallback(callback);
}
setReloadMainCallback(callback){
// this._reloadMainCallback = callback;
this.setUserReloadCallback(callback);
}*/
setUserReloadCallback(callback){
this._reloadCallbacks.push(callback);
}
removeSaveData(){
this._info.removeSaveData();
this.saveUser();
}
saveSettings(key, value){
this._info.saveSettings(key, value);
this.saveUser();
}
/****************************************************
* private method
****************************************************/
initUser(){
// localStorage.clear();
if (typeof(Storage) !== "undefined") {
this.reloadUserList();
let data = this.getCurrentUserInfo();
// data 가 undefined 라면 UserInfo 에서 이름없는 사용자를 자동으로 만들어줌.
this._info = new UserInfo(data);
this.saveUser(this._info);
} else {
// TODO: No Web Storage support..
}
}
getUserListData(){
if(!this._users)
this._users = {};
for(let uid in localStorage){
if(uid.substr(0, 6) === '_USER_'){
this._users[uid] = this.getUserInfo(uid);
}
}
return this._users;
}
getUserInfo(id){
return JSON.parse(localStorage.getItem(id));
}
getCurrentUserInfo(){
let currentUserId = localStorage.getItem('currentUser');
return this.getUserInfo(currentUserId);
}
saveUser(userInfo){
let info = (userInfo
|
Info : this._info;
this.saveCurrentUserId(info.id);
localStorage.setItem(info.id, JSON.stringify(info.data));
console.log('UserManager.saveUser() > localStorage :', localStorage);
}
saveCurrentUserId(id){
localStorage.setItem('currentUser', id);
}
/****************************************************
* GETTER
****************************************************/
get info(){
return this._info;
}
get users(){
return this._users;
}
}
export default UserManager;
|
) ? user
|
identifier_name
|
Rx.KitchenSink.d.ts
|
import { Subject } from './Subject';
import { Observable } from './Observable';
import { CoreOperators } from './CoreOperators';
import { Scheduler as IScheduler } from './Scheduler';
export interface KitchenSinkOperators<T> extends CoreOperators<T> {
isEmpty?: () => Observable<boolean>;
elementAt?: (index: number, defaultValue?: any) => Observable<T>;
distinctUntilKeyChanged?: (key: string, compare?: (x: any, y: any) => boolean) => Observable<T>;
find?: (predicate: (value: T, index: number, source: Observable<T>) => boolean, thisArg?: any) => Observable<T>;
findIndex?: (predicate: (value: T, index: number, source: Observable<T>) => boolean, thisArg?: any) => Observable<number>;
max?: <T, R>(comparer?: (x: R, y: T) => R) => Observable<R>;
min?: <T, R>(comparer?: (x: R, y: T) => R) => Observable<R>;
pairwise?: <R>() => Observable<R>;
timeInterval?: <T>(scheduler?: IScheduler) => Observable<T>;
mergeScan?: <T, R>(project: (acc: R, x: T) => Observable<R>, seed: R, concurrent?: number) => Observable<R>;
exhaust?: () => Observable<T>;
exhaustMap?: <R>(project: ((x: T, ix: number) => Observable<any>), projectResult?: (x: T, y: any, ix: number, iy: number) => R) => Observable<R>;
}
import { Observer } from './Observer';
import { Subscription } from './Subscription';
import { Subscriber } from './Subscriber';
import { AsyncSubject } from './subject/AsyncSubject';
import { ReplaySubject } from './subject/ReplaySubject';
import { BehaviorSubject } from './subject/BehaviorSubject';
import { ConnectableObservable } from './observable/ConnectableObservable';
import { Notification } from './Notification';
import { EmptyError } from './util/EmptyError';
import { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';
import { ArgumentOutOfRangeError } from './util/ArgumentOutOfRangeError';
import { AsapScheduler } from './scheduler/AsapScheduler';
import { QueueScheduler } from './scheduler/QueueScheduler';
import { TimeInterval } from './operator/timeInterval';
import { TestScheduler } from './testing/TestScheduler';
import { VirtualTimeScheduler } from './scheduler/VirtualTimeScheduler';
declare var Scheduler: {
|
asap: AsapScheduler;
queue: QueueScheduler;
};
declare var Symbol: {
rxSubscriber: any;
};
export { Subject, Scheduler, Observable, Observer, Subscriber, Subscription, AsyncSubject, ReplaySubject, BehaviorSubject, ConnectableObservable, Notification, EmptyError, ArgumentOutOfRangeError, ObjectUnsubscribedError, TestScheduler, VirtualTimeScheduler, TimeInterval, Symbol };
|
random_line_split
|
|
penalizer.js
|
function initialise_sliders(){
$( "div.slider" ).slider({
|
step: 0.000000001,
change: changed
});
populate();
};
function slid(event, ui){
update_indicator(event.target.id, ui.value);
}
function update_indicator(slider_id, penalty){
var indicator_id = "#" + slider_id + "Penalty";
$(indicator_id).text(penalty);
}
function changed(event, ui){
var new_constraint = {"type":event.target.id,"penalty":ui.value};
//old constraint value not important - ignored in DB, but stub inserted for consistency with db/ResourceUpdater.pm
var change = {"type":"edition", "old":"stub", "new":new_constraint};
var changes = [change];
var stringified_changes = JSON.stringify(changes);
$.post( "../db/ConstraintUpdater.pl",{"changes": stringified_changes});
}
function populate(){
$.get("../db/ConstraintList.pl", get_process_input);
}
function get_process_input(constraints_list){
for(i=0; i<constraints_list.length; i++){
var constraint = constraints_list[i];
var slider_id = constraint.type;
var penalty = constraint.penalty;
$("#" + slider_id).slider("value",penalty);
update_indicator(slider_id, penalty);
}
}
|
slide: slid,
|
random_line_split
|
penalizer.js
|
function initialise_sliders(){
$( "div.slider" ).slider({
slide: slid,
step: 0.000000001,
change: changed
});
populate();
};
function slid(event, ui){
update_indicator(event.target.id, ui.value);
}
function update_indicator(slider_id, penalty){
var indicator_id = "#" + slider_id + "Penalty";
$(indicator_id).text(penalty);
}
function changed(event, ui){
var new_constraint = {"type":event.target.id,"penalty":ui.value};
//old constraint value not important - ignored in DB, but stub inserted for consistency with db/ResourceUpdater.pm
var change = {"type":"edition", "old":"stub", "new":new_constraint};
var changes = [change];
var stringified_changes = JSON.stringify(changes);
$.post( "../db/ConstraintUpdater.pl",{"changes": stringified_changes});
}
function populate(){
$.get("../db/ConstraintList.pl", get_process_input);
}
function
|
(constraints_list){
for(i=0; i<constraints_list.length; i++){
var constraint = constraints_list[i];
var slider_id = constraint.type;
var penalty = constraint.penalty;
$("#" + slider_id).slider("value",penalty);
update_indicator(slider_id, penalty);
}
}
|
get_process_input
|
identifier_name
|
penalizer.js
|
function initialise_sliders(){
$( "div.slider" ).slider({
slide: slid,
step: 0.000000001,
change: changed
});
populate();
};
function slid(event, ui){
update_indicator(event.target.id, ui.value);
}
function update_indicator(slider_id, penalty){
var indicator_id = "#" + slider_id + "Penalty";
$(indicator_id).text(penalty);
}
function changed(event, ui){
var new_constraint = {"type":event.target.id,"penalty":ui.value};
//old constraint value not important - ignored in DB, but stub inserted for consistency with db/ResourceUpdater.pm
var change = {"type":"edition", "old":"stub", "new":new_constraint};
var changes = [change];
var stringified_changes = JSON.stringify(changes);
$.post( "../db/ConstraintUpdater.pl",{"changes": stringified_changes});
}
function populate(){
$.get("../db/ConstraintList.pl", get_process_input);
}
function get_process_input(constraints_list){
for(i=0; i<constraints_list.length; i++)
|
}
|
{
var constraint = constraints_list[i];
var slider_id = constraint.type;
var penalty = constraint.penalty;
$("#" + slider_id).slider("value",penalty);
update_indicator(slider_id, penalty);
}
|
conditional_block
|
penalizer.js
|
function initialise_sliders(){
$( "div.slider" ).slider({
slide: slid,
step: 0.000000001,
change: changed
});
populate();
};
function slid(event, ui){
update_indicator(event.target.id, ui.value);
}
function update_indicator(slider_id, penalty)
|
function changed(event, ui){
var new_constraint = {"type":event.target.id,"penalty":ui.value};
//old constraint value not important - ignored in DB, but stub inserted for consistency with db/ResourceUpdater.pm
var change = {"type":"edition", "old":"stub", "new":new_constraint};
var changes = [change];
var stringified_changes = JSON.stringify(changes);
$.post( "../db/ConstraintUpdater.pl",{"changes": stringified_changes});
}
function populate(){
$.get("../db/ConstraintList.pl", get_process_input);
}
function get_process_input(constraints_list){
for(i=0; i<constraints_list.length; i++){
var constraint = constraints_list[i];
var slider_id = constraint.type;
var penalty = constraint.penalty;
$("#" + slider_id).slider("value",penalty);
update_indicator(slider_id, penalty);
}
}
|
{
var indicator_id = "#" + slider_id + "Penalty";
$(indicator_id).text(penalty);
}
|
identifier_body
|
gpio.rs
|
// Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use erased_serde::Serialize;
use std::any::Any;
use structopt::StructOpt;
use opentitanlib::app::command::CommandDispatch;
use opentitanlib::app::TransportWrapper;
use opentitanlib::io::gpio::{PinMode, PullMode};
use opentitanlib::transport::Capability;
#[derive(Debug, StructOpt)]
/// Reads a GPIO pin.
pub struct GpioRead {
#[structopt(name = "PIN", help = "The GPIO pin to read")]
pub pin: String,
}
#[derive(serde::Serialize)]
pub struct GpioReadResult {
pub pin: String,
pub value: bool,
}
impl CommandDispatch for GpioRead {
fn run(
&self,
_context: &dyn Any,
transport: &TransportWrapper,
) -> Result<Option<Box<dyn Serialize>>> {
transport.capabilities()?.request(Capability::GPIO).ok()?;
let gpio_pin = transport.gpio_pin(&self.pin)?;
let value = gpio_pin.read()?;
Ok(Some(Box::new(GpioReadResult {
pin: self.pin.clone(),
value,
})))
}
}
#[derive(Debug, StructOpt)]
/// Writes a GPIO pin.
pub struct GpioWrite {
#[structopt(name = "PIN", help = "The GPIO pin to write")]
pub pin: String,
#[structopt(
name = "VALUE",
parse(try_from_str),
help = "The value to write to the pin"
)]
pub value: bool,
}
impl CommandDispatch for GpioWrite {
fn run(
&self,
_context: &dyn Any,
transport: &TransportWrapper,
) -> Result<Option<Box<dyn Serialize>>> {
transport.capabilities()?.request(Capability::GPIO).ok()?;
let gpio_pin = transport.gpio_pin(&self.pin)?;
gpio_pin.write(self.value)?;
Ok(None)
}
}
#[derive(Debug, StructOpt)]
/// Set the I/O mode of a GPIO pin (Input/OpenDrain/PushPull).
pub struct GpioSetMode {
#[structopt(name = "PIN", help = "The GPIO pin to modify")]
pub pin: String,
#[structopt(
name = "MODE",
possible_values = &PinMode::variants(),
case_insensitive=true,
help = "The I/O mode of the pin"
)]
pub mode: PinMode,
}
impl CommandDispatch for GpioSetMode {
fn run(
&self,
_context: &dyn Any,
transport: &TransportWrapper,
) -> Result<Option<Box<dyn Serialize>>> {
transport.capabilities()?.request(Capability::GPIO).ok()?;
let gpio_pin = transport.gpio_pin(&self.pin)?;
gpio_pin.set_mode(self.mode)?;
Ok(None)
}
}
#[derive(Debug, StructOpt)]
/// Set the I/O weak pull mode of a GPIO pin (PullUp/PullDown/None).
pub struct GpioSetPullMode {
#[structopt(name = "PIN", help = "The GPIO pin to modify")]
pub pin: String,
#[structopt(
name = "PULLMODE",
possible_values = &PullMode::variants(),
case_insensitive=true,
help = "The weak pull mode of the pin"
)]
pub pull_mode: PullMode,
}
impl CommandDispatch for GpioSetPullMode {
fn run(
&self,
_context: &dyn Any,
transport: &TransportWrapper,
) -> Result<Option<Box<dyn Serialize>>> {
transport.capabilities()?.request(Capability::GPIO).ok()?;
let gpio_pin = transport.gpio_pin(&self.pin)?;
gpio_pin.set_pull_mode(self.pull_mode)?;
Ok(None)
}
|
Read(GpioRead),
Write(GpioWrite),
SetMode(GpioSetMode),
SetPullMode(GpioSetPullMode),
}
|
}
/// Commands for manipulating GPIO pins.
#[derive(Debug, StructOpt, CommandDispatch)]
pub enum GpioCommand {
|
random_line_split
|
gpio.rs
|
// Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use erased_serde::Serialize;
use std::any::Any;
use structopt::StructOpt;
use opentitanlib::app::command::CommandDispatch;
use opentitanlib::app::TransportWrapper;
use opentitanlib::io::gpio::{PinMode, PullMode};
use opentitanlib::transport::Capability;
#[derive(Debug, StructOpt)]
/// Reads a GPIO pin.
pub struct GpioRead {
#[structopt(name = "PIN", help = "The GPIO pin to read")]
pub pin: String,
}
#[derive(serde::Serialize)]
pub struct GpioReadResult {
pub pin: String,
pub value: bool,
}
impl CommandDispatch for GpioRead {
fn run(
&self,
_context: &dyn Any,
transport: &TransportWrapper,
) -> Result<Option<Box<dyn Serialize>>> {
transport.capabilities()?.request(Capability::GPIO).ok()?;
let gpio_pin = transport.gpio_pin(&self.pin)?;
let value = gpio_pin.read()?;
Ok(Some(Box::new(GpioReadResult {
pin: self.pin.clone(),
value,
})))
}
}
#[derive(Debug, StructOpt)]
/// Writes a GPIO pin.
pub struct GpioWrite {
#[structopt(name = "PIN", help = "The GPIO pin to write")]
pub pin: String,
#[structopt(
name = "VALUE",
parse(try_from_str),
help = "The value to write to the pin"
)]
pub value: bool,
}
impl CommandDispatch for GpioWrite {
fn run(
&self,
_context: &dyn Any,
transport: &TransportWrapper,
) -> Result<Option<Box<dyn Serialize>>> {
transport.capabilities()?.request(Capability::GPIO).ok()?;
let gpio_pin = transport.gpio_pin(&self.pin)?;
gpio_pin.write(self.value)?;
Ok(None)
}
}
#[derive(Debug, StructOpt)]
/// Set the I/O mode of a GPIO pin (Input/OpenDrain/PushPull).
pub struct GpioSetMode {
#[structopt(name = "PIN", help = "The GPIO pin to modify")]
pub pin: String,
#[structopt(
name = "MODE",
possible_values = &PinMode::variants(),
case_insensitive=true,
help = "The I/O mode of the pin"
)]
pub mode: PinMode,
}
impl CommandDispatch for GpioSetMode {
fn run(
&self,
_context: &dyn Any,
transport: &TransportWrapper,
) -> Result<Option<Box<dyn Serialize>>> {
transport.capabilities()?.request(Capability::GPIO).ok()?;
let gpio_pin = transport.gpio_pin(&self.pin)?;
gpio_pin.set_mode(self.mode)?;
Ok(None)
}
}
#[derive(Debug, StructOpt)]
/// Set the I/O weak pull mode of a GPIO pin (PullUp/PullDown/None).
pub struct
|
{
#[structopt(name = "PIN", help = "The GPIO pin to modify")]
pub pin: String,
#[structopt(
name = "PULLMODE",
possible_values = &PullMode::variants(),
case_insensitive=true,
help = "The weak pull mode of the pin"
)]
pub pull_mode: PullMode,
}
impl CommandDispatch for GpioSetPullMode {
fn run(
&self,
_context: &dyn Any,
transport: &TransportWrapper,
) -> Result<Option<Box<dyn Serialize>>> {
transport.capabilities()?.request(Capability::GPIO).ok()?;
let gpio_pin = transport.gpio_pin(&self.pin)?;
gpio_pin.set_pull_mode(self.pull_mode)?;
Ok(None)
}
}
/// Commands for manipulating GPIO pins.
#[derive(Debug, StructOpt, CommandDispatch)]
pub enum GpioCommand {
Read(GpioRead),
Write(GpioWrite),
SetMode(GpioSetMode),
SetPullMode(GpioSetPullMode),
}
|
GpioSetPullMode
|
identifier_name
|
log_usingThreadedStream.py
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from time import time
from mo_future import text_type
from mo_logs import Log
from mo_logs.log_usingNothing import StructuredLogger
from mo_logs.strings import expand_template
from mo_threads import Thread, THREAD_STOP, Till
DEBUG_LOGGING = False
class StructuredLogger_usingThreadedStream(StructuredLogger):
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
use_UTF8 = False
if isinstance(stream, text_type):
if stream.startswith("sys."):
use_UTF8 = True # sys.* ARE OLD AND CAN NOT HANDLE unicode
self.stream = eval(stream)
name = stream
else:
self.stream = stream
name = "stream"
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
if use_UTF8:
def utf8_appender(value):
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
else:
appender = self.stream.write
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception as e:
if DEBUG_LOGGING:
raise e
try:
self.queue.close()
except Exception as f:
if DEBUG_LOGGING:
raise f
def time_delta_pusher(please_stop, appender, queue, interval):
"""
appender - THE FUNCTION THAT ACCEPTS A STRING
queue - FILLED WITH LOG ENTRIES {"template":template, "params":params} TO WRITE
interval - timedelta
USE IN A THREAD TO BATCH LOGS BY TIME INTERVAL
"""
next_run = time() + interval
while not please_stop:
(Till(till=next_run) | please_stop).wait()
next_run = time() + interval
|
logs = queue.pop_all()
if not logs:
continue
lines = []
for log in logs:
try:
if log is THREAD_STOP:
please_stop.go()
next_run = time()
else:
expanded = expand_template(log.get("template"), log.get("params"))
lines.append(expanded)
except Exception as e:
location = log.get('params', {}).get('location', {})
Log.warning("Trouble formatting log from {{location}}", location=location, cause=e)
# SWALLOW ERROR, GOT TO KEEP RUNNING
try:
appender(u"\n".join(lines) + u"\n")
except Exception as e:
sys.stderr.write(b"Trouble with appender: " + str(e.__class__.__name__) + b"\n")
# SWALLOW ERROR, MUST KEEP RUNNING
|
random_line_split
|
|
log_usingThreadedStream.py
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from time import time
from mo_future import text_type
from mo_logs import Log
from mo_logs.log_usingNothing import StructuredLogger
from mo_logs.strings import expand_template
from mo_threads import Thread, THREAD_STOP, Till
DEBUG_LOGGING = False
class StructuredLogger_usingThreadedStream(StructuredLogger):
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
use_UTF8 = False
if isinstance(stream, text_type):
|
else:
self.stream = stream
name = "stream"
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
if use_UTF8:
def utf8_appender(value):
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
else:
appender = self.stream.write
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception as e:
if DEBUG_LOGGING:
raise e
try:
self.queue.close()
except Exception as f:
if DEBUG_LOGGING:
raise f
def time_delta_pusher(please_stop, appender, queue, interval):
"""
appender - THE FUNCTION THAT ACCEPTS A STRING
queue - FILLED WITH LOG ENTRIES {"template":template, "params":params} TO WRITE
interval - timedelta
USE IN A THREAD TO BATCH LOGS BY TIME INTERVAL
"""
next_run = time() + interval
while not please_stop:
(Till(till=next_run) | please_stop).wait()
next_run = time() + interval
logs = queue.pop_all()
if not logs:
continue
lines = []
for log in logs:
try:
if log is THREAD_STOP:
please_stop.go()
next_run = time()
else:
expanded = expand_template(log.get("template"), log.get("params"))
lines.append(expanded)
except Exception as e:
location = log.get('params', {}).get('location', {})
Log.warning("Trouble formatting log from {{location}}", location=location, cause=e)
# SWALLOW ERROR, GOT TO KEEP RUNNING
try:
appender(u"\n".join(lines) + u"\n")
except Exception as e:
sys.stderr.write(b"Trouble with appender: " + str(e.__class__.__name__) + b"\n")
# SWALLOW ERROR, MUST KEEP RUNNING
|
if stream.startswith("sys."):
use_UTF8 = True # sys.* ARE OLD AND CAN NOT HANDLE unicode
self.stream = eval(stream)
name = stream
|
conditional_block
|
log_usingThreadedStream.py
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from time import time
from mo_future import text_type
from mo_logs import Log
from mo_logs.log_usingNothing import StructuredLogger
from mo_logs.strings import expand_template
from mo_threads import Thread, THREAD_STOP, Till
DEBUG_LOGGING = False
class StructuredLogger_usingThreadedStream(StructuredLogger):
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
use_UTF8 = False
if isinstance(stream, text_type):
if stream.startswith("sys."):
use_UTF8 = True # sys.* ARE OLD AND CAN NOT HANDLE unicode
self.stream = eval(stream)
name = stream
else:
self.stream = stream
name = "stream"
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
if use_UTF8:
def utf8_appender(value):
|
appender = utf8_appender
else:
appender = self.stream.write
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception as e:
if DEBUG_LOGGING:
raise e
try:
self.queue.close()
except Exception as f:
if DEBUG_LOGGING:
raise f
def time_delta_pusher(please_stop, appender, queue, interval):
"""
appender - THE FUNCTION THAT ACCEPTS A STRING
queue - FILLED WITH LOG ENTRIES {"template":template, "params":params} TO WRITE
interval - timedelta
USE IN A THREAD TO BATCH LOGS BY TIME INTERVAL
"""
next_run = time() + interval
while not please_stop:
(Till(till=next_run) | please_stop).wait()
next_run = time() + interval
logs = queue.pop_all()
if not logs:
continue
lines = []
for log in logs:
try:
if log is THREAD_STOP:
please_stop.go()
next_run = time()
else:
expanded = expand_template(log.get("template"), log.get("params"))
lines.append(expanded)
except Exception as e:
location = log.get('params', {}).get('location', {})
Log.warning("Trouble formatting log from {{location}}", location=location, cause=e)
# SWALLOW ERROR, GOT TO KEEP RUNNING
try:
appender(u"\n".join(lines) + u"\n")
except Exception as e:
sys.stderr.write(b"Trouble with appender: " + str(e.__class__.__name__) + b"\n")
# SWALLOW ERROR, MUST KEEP RUNNING
|
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
|
identifier_body
|
log_usingThreadedStream.py
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from time import time
from mo_future import text_type
from mo_logs import Log
from mo_logs.log_usingNothing import StructuredLogger
from mo_logs.strings import expand_template
from mo_threads import Thread, THREAD_STOP, Till
DEBUG_LOGGING = False
class StructuredLogger_usingThreadedStream(StructuredLogger):
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
use_UTF8 = False
if isinstance(stream, text_type):
if stream.startswith("sys."):
use_UTF8 = True # sys.* ARE OLD AND CAN NOT HANDLE unicode
self.stream = eval(stream)
name = stream
else:
self.stream = stream
name = "stream"
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
if use_UTF8:
def utf8_appender(value):
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
else:
appender = self.stream.write
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception as e:
if DEBUG_LOGGING:
raise e
try:
self.queue.close()
except Exception as f:
if DEBUG_LOGGING:
raise f
def
|
(please_stop, appender, queue, interval):
"""
appender - THE FUNCTION THAT ACCEPTS A STRING
queue - FILLED WITH LOG ENTRIES {"template":template, "params":params} TO WRITE
interval - timedelta
USE IN A THREAD TO BATCH LOGS BY TIME INTERVAL
"""
next_run = time() + interval
while not please_stop:
(Till(till=next_run) | please_stop).wait()
next_run = time() + interval
logs = queue.pop_all()
if not logs:
continue
lines = []
for log in logs:
try:
if log is THREAD_STOP:
please_stop.go()
next_run = time()
else:
expanded = expand_template(log.get("template"), log.get("params"))
lines.append(expanded)
except Exception as e:
location = log.get('params', {}).get('location', {})
Log.warning("Trouble formatting log from {{location}}", location=location, cause=e)
# SWALLOW ERROR, GOT TO KEEP RUNNING
try:
appender(u"\n".join(lines) + u"\n")
except Exception as e:
sys.stderr.write(b"Trouble with appender: " + str(e.__class__.__name__) + b"\n")
# SWALLOW ERROR, MUST KEEP RUNNING
|
time_delta_pusher
|
identifier_name
|
mod.rs
|
pub use self::stack::Stack;
pub use self::word::Word;
use memory;
use memory::MemoryMap;
use self::opcode::{Opcode, PartialAddr, IWord};
mod alu;
mod opcode;
mod stack;
mod word;
pub struct CPU {
p: Word,
i: IWord,
a: Word,
b: Word,
dat: Stack,
ret: Stack,
mem: MemoryMap
}
impl CPU {
fn jump(&mut self, address: PartialAddr) {
let PartialAddr(partial, bits) = address;
let bits_word = Word(bits as i32);
self.p = (self.p >> bits_word << bits_word) | partial;
}
fn exec(&mut self, opcode: Opcode) {
use self::opcode::Opcode::*;
match opcode {
Unary(op) => alu::unary(op, &mut self.dat),
Binary(op) => alu::binary(op, &mut self.dat, &mut self.ret),
Stack(op) => alu::stack(op, &mut self.dat, &mut self.ret),
Register(op, inc) => self.exec_register(op, inc),
Control(op, addr) => self.exec_control(op, addr)
}
}
fn
|
(&mut self, op: u8, increment: bool) {
let (reg, reg_op) = match (op, increment) {
(3, true) => (&mut self.p, 1), // @p
(7, true) => (&mut self.p, 2), // !p
(0...3, _) => (&mut self.a, op & 3), // a @ ! a!
(4...7, _) => (&mut self.b, op & 3), // b @b !b b!
(n, f) => panic!("register op out of range: {} {}", n, f)
};
match reg_op {
0 => self.dat.push(*reg), // a b
1 => self.dat.push(self.mem.read(*reg)), // @ @b @p
2 => self.mem.write(*reg, self.dat.pop()), // ! !b !p
3 => { *reg = self.dat.pop() }, // a! b!
_ => unreachable!()
}
if increment {
*reg = memory::inc_address(*reg);
}
}
fn exec_control(&mut self, operation: u8, address: PartialAddr) {
let t = self.dat.peek();
match operation {
0 => self.jump(address), // (jump)
1 => { // (call)
self.ret.push(self.p);
self.jump(address);
}
2 => if t == Word(0) { self.jump(address) }, // if
3 => if t >= Word(0) { self.jump(address) }, // -if
4 => if self.ret.next() { self.jump(address) }, // next
5 => if self.ret.next() { self.i.restart() }, // unext
6 => { // ex
let temp = self.p;
self.p = self.ret.pop();
self.ret.push(temp);
},
7 => { self.p = self.ret.pop(); }, // ;
n => panic!("control flow op out of range: {}", n)
}
}
}
|
exec_register
|
identifier_name
|
mod.rs
|
pub use self::stack::Stack;
pub use self::word::Word;
use memory;
use memory::MemoryMap;
use self::opcode::{Opcode, PartialAddr, IWord};
mod alu;
mod opcode;
mod stack;
mod word;
pub struct CPU {
p: Word,
i: IWord,
a: Word,
b: Word,
dat: Stack,
ret: Stack,
mem: MemoryMap
}
impl CPU {
fn jump(&mut self, address: PartialAddr) {
let PartialAddr(partial, bits) = address;
let bits_word = Word(bits as i32);
self.p = (self.p >> bits_word << bits_word) | partial;
}
fn exec(&mut self, opcode: Opcode)
|
fn exec_register(&mut self, op: u8, increment: bool) {
let (reg, reg_op) = match (op, increment) {
(3, true) => (&mut self.p, 1), // @p
(7, true) => (&mut self.p, 2), // !p
(0...3, _) => (&mut self.a, op & 3), // a @ ! a!
(4...7, _) => (&mut self.b, op & 3), // b @b !b b!
(n, f) => panic!("register op out of range: {} {}", n, f)
};
match reg_op {
0 => self.dat.push(*reg), // a b
1 => self.dat.push(self.mem.read(*reg)), // @ @b @p
2 => self.mem.write(*reg, self.dat.pop()), // ! !b !p
3 => { *reg = self.dat.pop() }, // a! b!
_ => unreachable!()
}
if increment {
*reg = memory::inc_address(*reg);
}
}
fn exec_control(&mut self, operation: u8, address: PartialAddr) {
let t = self.dat.peek();
match operation {
0 => self.jump(address), // (jump)
1 => { // (call)
self.ret.push(self.p);
self.jump(address);
}
2 => if t == Word(0) { self.jump(address) }, // if
3 => if t >= Word(0) { self.jump(address) }, // -if
4 => if self.ret.next() { self.jump(address) }, // next
5 => if self.ret.next() { self.i.restart() }, // unext
6 => { // ex
let temp = self.p;
self.p = self.ret.pop();
self.ret.push(temp);
},
7 => { self.p = self.ret.pop(); }, // ;
n => panic!("control flow op out of range: {}", n)
}
}
}
|
{
use self::opcode::Opcode::*;
match opcode {
Unary(op) => alu::unary(op, &mut self.dat),
Binary(op) => alu::binary(op, &mut self.dat, &mut self.ret),
Stack(op) => alu::stack(op, &mut self.dat, &mut self.ret),
Register(op, inc) => self.exec_register(op, inc),
Control(op, addr) => self.exec_control(op, addr)
}
}
|
identifier_body
|
mod.rs
|
pub use self::stack::Stack;
pub use self::word::Word;
use memory;
use memory::MemoryMap;
use self::opcode::{Opcode, PartialAddr, IWord};
mod alu;
mod opcode;
mod stack;
mod word;
pub struct CPU {
p: Word,
i: IWord,
a: Word,
b: Word,
dat: Stack,
ret: Stack,
mem: MemoryMap
}
impl CPU {
fn jump(&mut self, address: PartialAddr) {
let PartialAddr(partial, bits) = address;
let bits_word = Word(bits as i32);
self.p = (self.p >> bits_word << bits_word) | partial;
}
fn exec(&mut self, opcode: Opcode) {
use self::opcode::Opcode::*;
match opcode {
Unary(op) => alu::unary(op, &mut self.dat),
Binary(op) => alu::binary(op, &mut self.dat, &mut self.ret),
|
}
}
fn exec_register(&mut self, op: u8, increment: bool) {
let (reg, reg_op) = match (op, increment) {
(3, true) => (&mut self.p, 1), // @p
(7, true) => (&mut self.p, 2), // !p
(0...3, _) => (&mut self.a, op & 3), // a @ ! a!
(4...7, _) => (&mut self.b, op & 3), // b @b !b b!
(n, f) => panic!("register op out of range: {} {}", n, f)
};
match reg_op {
0 => self.dat.push(*reg), // a b
1 => self.dat.push(self.mem.read(*reg)), // @ @b @p
2 => self.mem.write(*reg, self.dat.pop()), // ! !b !p
3 => { *reg = self.dat.pop() }, // a! b!
_ => unreachable!()
}
if increment {
*reg = memory::inc_address(*reg);
}
}
fn exec_control(&mut self, operation: u8, address: PartialAddr) {
let t = self.dat.peek();
match operation {
0 => self.jump(address), // (jump)
1 => { // (call)
self.ret.push(self.p);
self.jump(address);
}
2 => if t == Word(0) { self.jump(address) }, // if
3 => if t >= Word(0) { self.jump(address) }, // -if
4 => if self.ret.next() { self.jump(address) }, // next
5 => if self.ret.next() { self.i.restart() }, // unext
6 => { // ex
let temp = self.p;
self.p = self.ret.pop();
self.ret.push(temp);
},
7 => { self.p = self.ret.pop(); }, // ;
n => panic!("control flow op out of range: {}", n)
}
}
}
|
Stack(op) => alu::stack(op, &mut self.dat, &mut self.ret),
Register(op, inc) => self.exec_register(op, inc),
Control(op, addr) => self.exec_control(op, addr)
|
random_line_split
|
test_missing_function_pycode.py
|
"""
@brief test log(time=8s)
@author Xavier Dupre
"""
import sys
import os
import unittest
import shutil
from contextlib import redirect_stdout
from io import StringIO
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pycode import process_standard_options_for_setup_help, get_temp_folder
from pyquickhelper.texthelper import compare_module_version
from pyquickhelper.texthelper.version_helper import numeric_module_version
from pyquickhelper.pycode.setup_helper import (
clean_notebooks_for_numbers, hash_list, process_argv_for_unittest,
process_standard_options_for_setup)
class TestMissingFunctionsPycode(ExtTestCase):
def test_process_standard_options_for_setup_help(self):
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help('--help-commands')
self.assertIn('Commands processed by pyquickhelper:', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'unittests'])
self.assertIn('-f file', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'clean_space'])
self.assertIn('clean unnecessary spaces', f.getvalue())
@unittest.skipIf(sys.platform != 'win32', reason="not available")
def test_process_standard_options_for_setup(self):
temp = get_temp_folder(
__file__, "temp_process_standard_options_for_setup")
os.mkdir(os.path.join(temp, '_unittests'))
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup(
['build_script'], file_or_folder=temp, project_var_name="debug",
fLOG=print)
text = f.getvalue()
self.assertIn('[process_standard_options_for_setup]', text)
self.assertExists(os.path.join(temp, 'bin'))
def test_numeric_module_version(self):
self.assertEqual(numeric_module_version((4, 5)), (4, 5))
self.assertEqual(numeric_module_version("4.5.e"), (4, 5, 'e'))
self.assertEqual(compare_module_version(("4.5.e"), (4, 5, 'e')), 0)
self.assertEqual(compare_module_version(("4.5.e"), None), -1)
self.assertEqual(compare_module_version(None, ("4.5.e")), 1)
self.assertEqual(compare_module_version(None, None), 0)
self.assertEqual(compare_module_version(
("4.5.e"), (4, 5, 'e', 'b')), -1)
def test_clean_notebooks_for_numbers(self):
temp = get_temp_folder(__file__, "temp_clean_notebooks_for_numbers")
nb = os.path.join(temp, "..", "data", "notebook_with_svg.ipynb")
fold = os.path.join(temp, '_doc', 'notebooks')
self.assertNotExists(fold)
os.makedirs(fold)
shutil.copy(nb, fold)
res = clean_notebooks_for_numbers(temp)
self.assertEqual(len(res), 1)
with open(res[0], 'r') as f:
content = f.read()
self.assertIn('"execution_count": 1,', content)
def test_hash_list(self):
li = [4, '5']
res = hash_list(li)
self.assertEqual(res, "1402b9d4")
li = []
res = hash_list(li)
self.assertEqual(res, "d41d8cd9")
def test_process_argv_for_unittest(self):
|
if __name__ == "__main__":
unittest.main()
|
li = ['unittests', '-d', '5']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests']
res = process_argv_for_unittest(li, None)
self.assertEmpty(res)
li = ['unittests', '-e', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-g', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-f', 'test.py']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
|
identifier_body
|
test_missing_function_pycode.py
|
"""
@brief test log(time=8s)
@author Xavier Dupre
"""
import sys
import os
import unittest
import shutil
from contextlib import redirect_stdout
from io import StringIO
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pycode import process_standard_options_for_setup_help, get_temp_folder
from pyquickhelper.texthelper import compare_module_version
from pyquickhelper.texthelper.version_helper import numeric_module_version
from pyquickhelper.pycode.setup_helper import (
clean_notebooks_for_numbers, hash_list, process_argv_for_unittest,
process_standard_options_for_setup)
class TestMissingFunctionsPycode(ExtTestCase):
def test_process_standard_options_for_setup_help(self):
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help('--help-commands')
self.assertIn('Commands processed by pyquickhelper:', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'unittests'])
self.assertIn('-f file', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'clean_space'])
self.assertIn('clean unnecessary spaces', f.getvalue())
@unittest.skipIf(sys.platform != 'win32', reason="not available")
def test_process_standard_options_for_setup(self):
temp = get_temp_folder(
__file__, "temp_process_standard_options_for_setup")
os.mkdir(os.path.join(temp, '_unittests'))
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup(
['build_script'], file_or_folder=temp, project_var_name="debug",
fLOG=print)
text = f.getvalue()
self.assertIn('[process_standard_options_for_setup]', text)
self.assertExists(os.path.join(temp, 'bin'))
def test_numeric_module_version(self):
self.assertEqual(numeric_module_version((4, 5)), (4, 5))
self.assertEqual(numeric_module_version("4.5.e"), (4, 5, 'e'))
self.assertEqual(compare_module_version(("4.5.e"), (4, 5, 'e')), 0)
self.assertEqual(compare_module_version(("4.5.e"), None), -1)
self.assertEqual(compare_module_version(None, ("4.5.e")), 1)
self.assertEqual(compare_module_version(None, None), 0)
self.assertEqual(compare_module_version(
("4.5.e"), (4, 5, 'e', 'b')), -1)
def test_clean_notebooks_for_numbers(self):
temp = get_temp_folder(__file__, "temp_clean_notebooks_for_numbers")
nb = os.path.join(temp, "..", "data", "notebook_with_svg.ipynb")
fold = os.path.join(temp, '_doc', 'notebooks')
self.assertNotExists(fold)
os.makedirs(fold)
shutil.copy(nb, fold)
res = clean_notebooks_for_numbers(temp)
self.assertEqual(len(res), 1)
with open(res[0], 'r') as f:
content = f.read()
self.assertIn('"execution_count": 1,', content)
def
|
(self):
li = [4, '5']
res = hash_list(li)
self.assertEqual(res, "1402b9d4")
li = []
res = hash_list(li)
self.assertEqual(res, "d41d8cd9")
def test_process_argv_for_unittest(self):
li = ['unittests', '-d', '5']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests']
res = process_argv_for_unittest(li, None)
self.assertEmpty(res)
li = ['unittests', '-e', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-g', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-f', 'test.py']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
if __name__ == "__main__":
unittest.main()
|
test_hash_list
|
identifier_name
|
test_missing_function_pycode.py
|
"""
@brief test log(time=8s)
@author Xavier Dupre
"""
import sys
import os
import unittest
import shutil
from contextlib import redirect_stdout
from io import StringIO
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pycode import process_standard_options_for_setup_help, get_temp_folder
from pyquickhelper.texthelper import compare_module_version
from pyquickhelper.texthelper.version_helper import numeric_module_version
from pyquickhelper.pycode.setup_helper import (
clean_notebooks_for_numbers, hash_list, process_argv_for_unittest,
process_standard_options_for_setup)
class TestMissingFunctionsPycode(ExtTestCase):
def test_process_standard_options_for_setup_help(self):
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help('--help-commands')
self.assertIn('Commands processed by pyquickhelper:', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'unittests'])
self.assertIn('-f file', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'clean_space'])
self.assertIn('clean unnecessary spaces', f.getvalue())
@unittest.skipIf(sys.platform != 'win32', reason="not available")
def test_process_standard_options_for_setup(self):
temp = get_temp_folder(
__file__, "temp_process_standard_options_for_setup")
os.mkdir(os.path.join(temp, '_unittests'))
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup(
['build_script'], file_or_folder=temp, project_var_name="debug",
fLOG=print)
|
text = f.getvalue()
self.assertIn('[process_standard_options_for_setup]', text)
self.assertExists(os.path.join(temp, 'bin'))
def test_numeric_module_version(self):
self.assertEqual(numeric_module_version((4, 5)), (4, 5))
self.assertEqual(numeric_module_version("4.5.e"), (4, 5, 'e'))
self.assertEqual(compare_module_version(("4.5.e"), (4, 5, 'e')), 0)
self.assertEqual(compare_module_version(("4.5.e"), None), -1)
self.assertEqual(compare_module_version(None, ("4.5.e")), 1)
self.assertEqual(compare_module_version(None, None), 0)
self.assertEqual(compare_module_version(
("4.5.e"), (4, 5, 'e', 'b')), -1)
def test_clean_notebooks_for_numbers(self):
temp = get_temp_folder(__file__, "temp_clean_notebooks_for_numbers")
nb = os.path.join(temp, "..", "data", "notebook_with_svg.ipynb")
fold = os.path.join(temp, '_doc', 'notebooks')
self.assertNotExists(fold)
os.makedirs(fold)
shutil.copy(nb, fold)
res = clean_notebooks_for_numbers(temp)
self.assertEqual(len(res), 1)
with open(res[0], 'r') as f:
content = f.read()
self.assertIn('"execution_count": 1,', content)
def test_hash_list(self):
li = [4, '5']
res = hash_list(li)
self.assertEqual(res, "1402b9d4")
li = []
res = hash_list(li)
self.assertEqual(res, "d41d8cd9")
def test_process_argv_for_unittest(self):
li = ['unittests', '-d', '5']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests']
res = process_argv_for_unittest(li, None)
self.assertEmpty(res)
li = ['unittests', '-e', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-g', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-f', 'test.py']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
if __name__ == "__main__":
unittest.main()
|
random_line_split
|
|
test_missing_function_pycode.py
|
"""
@brief test log(time=8s)
@author Xavier Dupre
"""
import sys
import os
import unittest
import shutil
from contextlib import redirect_stdout
from io import StringIO
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pycode import process_standard_options_for_setup_help, get_temp_folder
from pyquickhelper.texthelper import compare_module_version
from pyquickhelper.texthelper.version_helper import numeric_module_version
from pyquickhelper.pycode.setup_helper import (
clean_notebooks_for_numbers, hash_list, process_argv_for_unittest,
process_standard_options_for_setup)
class TestMissingFunctionsPycode(ExtTestCase):
def test_process_standard_options_for_setup_help(self):
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help('--help-commands')
self.assertIn('Commands processed by pyquickhelper:', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'unittests'])
self.assertIn('-f file', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'clean_space'])
self.assertIn('clean unnecessary spaces', f.getvalue())
@unittest.skipIf(sys.platform != 'win32', reason="not available")
def test_process_standard_options_for_setup(self):
temp = get_temp_folder(
__file__, "temp_process_standard_options_for_setup")
os.mkdir(os.path.join(temp, '_unittests'))
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup(
['build_script'], file_or_folder=temp, project_var_name="debug",
fLOG=print)
text = f.getvalue()
self.assertIn('[process_standard_options_for_setup]', text)
self.assertExists(os.path.join(temp, 'bin'))
def test_numeric_module_version(self):
self.assertEqual(numeric_module_version((4, 5)), (4, 5))
self.assertEqual(numeric_module_version("4.5.e"), (4, 5, 'e'))
self.assertEqual(compare_module_version(("4.5.e"), (4, 5, 'e')), 0)
self.assertEqual(compare_module_version(("4.5.e"), None), -1)
self.assertEqual(compare_module_version(None, ("4.5.e")), 1)
self.assertEqual(compare_module_version(None, None), 0)
self.assertEqual(compare_module_version(
("4.5.e"), (4, 5, 'e', 'b')), -1)
def test_clean_notebooks_for_numbers(self):
temp = get_temp_folder(__file__, "temp_clean_notebooks_for_numbers")
nb = os.path.join(temp, "..", "data", "notebook_with_svg.ipynb")
fold = os.path.join(temp, '_doc', 'notebooks')
self.assertNotExists(fold)
os.makedirs(fold)
shutil.copy(nb, fold)
res = clean_notebooks_for_numbers(temp)
self.assertEqual(len(res), 1)
with open(res[0], 'r') as f:
content = f.read()
self.assertIn('"execution_count": 1,', content)
def test_hash_list(self):
li = [4, '5']
res = hash_list(li)
self.assertEqual(res, "1402b9d4")
li = []
res = hash_list(li)
self.assertEqual(res, "d41d8cd9")
def test_process_argv_for_unittest(self):
li = ['unittests', '-d', '5']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests']
res = process_argv_for_unittest(li, None)
self.assertEmpty(res)
li = ['unittests', '-e', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-g', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-f', 'test.py']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
if __name__ == "__main__":
|
unittest.main()
|
conditional_block
|
|
easy-session-tests.ts
|
/// <reference path="./easy-session.d.ts" />
/// <reference path="../express-session/express-session.d.ts" />
/// <reference path="../cookie-parser/cookie-parser.d.ts" />
import express = require('express');
import session = require('express-session');
import cookieParser = require('cookie-parser');
import easySession = require('easy-session'); // Require the module : line 1
var app = express();
app.use(cookieParser('secreet'));
app.use(session({
secret: 'keyboard cat',
resave: false,
saveUninitialized: true
}));
app.use(easySession.main(session));
app.get('/login', function (req, res, next) {
req.session.login('user', {email: 'email'}, function (err) {
if(err) {
res.send(500);
return;
}
res.send(200);
});
});
|
req.session.logout(function (err) {
if(err) {
res.send(500);
return;
}
res.send(200);
});
});
app.get('/isloggedin', function (req, res, next) {
res.send(req.session.isLoggedIn('user'));
});
app.get('/isfresh', function (req, res, next) {
res.send(req.session.isFresh());
});
app.get('/hasrole', function (req, res, next) {
res.send(req.session.hasRole('user'));
});
app.post('/setrole', function (req, res, next) {
req.session.setRole(req.query.role);
res.send(200);
});
app.get('/getrole', function (req, res, next) {
res.send(req.session.getRole());
});
app.use(easySession.isLoggedIn());
app.use(easySession.isFresh());
app.use(easySession.checkRole('user'));
|
app.post('/logout', function (req, res, next) {
|
random_line_split
|
easy-session-tests.ts
|
/// <reference path="./easy-session.d.ts" />
/// <reference path="../express-session/express-session.d.ts" />
/// <reference path="../cookie-parser/cookie-parser.d.ts" />
import express = require('express');
import session = require('express-session');
import cookieParser = require('cookie-parser');
import easySession = require('easy-session'); // Require the module : line 1
var app = express();
app.use(cookieParser('secreet'));
app.use(session({
secret: 'keyboard cat',
resave: false,
saveUninitialized: true
}));
app.use(easySession.main(session));
app.get('/login', function (req, res, next) {
req.session.login('user', {email: 'email'}, function (err) {
if(err)
|
res.send(200);
});
});
app.post('/logout', function (req, res, next) {
req.session.logout(function (err) {
if(err) {
res.send(500);
return;
}
res.send(200);
});
});
app.get('/isloggedin', function (req, res, next) {
res.send(req.session.isLoggedIn('user'));
});
app.get('/isfresh', function (req, res, next) {
res.send(req.session.isFresh());
});
app.get('/hasrole', function (req, res, next) {
res.send(req.session.hasRole('user'));
});
app.post('/setrole', function (req, res, next) {
req.session.setRole(req.query.role);
res.send(200);
});
app.get('/getrole', function (req, res, next) {
res.send(req.session.getRole());
});
app.use(easySession.isLoggedIn());
app.use(easySession.isFresh());
app.use(easySession.checkRole('user'));
|
{
res.send(500);
return;
}
|
conditional_block
|
animation_queue_spec.ts
|
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {AnimationQueue} from '@angular/core/src/animation/animation_queue';
import {NgZone} from '../../src/zone/ng_zone';
import {TestBed, fakeAsync, flushMicrotasks} from '../../testing';
import {MockAnimationPlayer} from '../../testing/mock_animation_player';
import {beforeEach, describe, expect, it} from '../../testing/testing_internal';
export function main() {
describe('AnimationQueue', function() {
beforeEach(() => { TestBed.configureTestingModule({declarations: [], imports: []}); });
it('should queue animation players and run when flushed, but only as the next scheduled microtask',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const log: string[] = [];
const p1 = new MockAnimationPlayer();
const p2 = new MockAnimationPlayer();
const p3 = new MockAnimationPlayer();
p1.onStart(() => log.push('1'));
p2.onStart(() => log.push('2'));
p3.onStart(() => log.push('3'));
queue.enqueue(p1);
queue.enqueue(p2);
queue.enqueue(p3);
expect(log).toEqual([]);
queue.flush();
expect(log).toEqual([]);
flushMicrotasks();
expect(log).toEqual(['1', '2', '3']);
}));
it('should always run each of the animation players outside of the angular zone on start',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const player = new MockAnimationPlayer();
let eventHasRun = false;
player.onStart(() => {
NgZone.assertNotInAngularZone();
eventHasRun = true;
});
zone.run(() => {
NgZone.assertInAngularZone();
queue.enqueue(player);
queue.flush();
flushMicrotasks();
});
expect(eventHasRun).toBe(true);
}));
it('should always run each of the animation players outside of the angular zone on done',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const player = new MockAnimationPlayer();
let eventHasRun = false;
player.onDone(() => {
NgZone.assertNotInAngularZone();
eventHasRun = true;
});
zone.run(() => {
NgZone.assertInAngularZone();
queue.enqueue(player);
queue.flush();
flushMicrotasks();
});
expect(eventHasRun).toBe(false);
player.finish();
expect(eventHasRun).toBe(true);
}));
it('should not run animations again incase an animation midway fails', fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const log: string[] = [];
const p1 = new PlayerThatFails(false);
const p2 = new PlayerThatFails(true);
const p3 = new PlayerThatFails(false);
p1.onStart(() => log.push('1'));
p2.onStart(() => log.push('2'));
p3.onStart(() => log.push('3'));
queue.enqueue(p1);
queue.enqueue(p2);
queue.enqueue(p3);
queue.flush();
expect(() => flushMicrotasks()).toThrowError();
expect(log).toEqual(['1', '2']);
// let's reset this so that it gets triggered again
p2.reset();
|
expect(() => flushMicrotasks()).not.toThrowError();
expect(log).toEqual(['1', '2', '3']);
}));
});
}
class PlayerThatFails extends MockAnimationPlayer {
private _animationStarted = false;
constructor(public doFail: boolean) { super(); }
play() {
super.play();
this._animationStarted = true;
if (this.doFail) {
throw new Error('Oh nooooo');
}
}
reset() { this._animationStarted = false; }
hasStarted() { return this._animationStarted; }
}
|
p2.onStart(() => log.push('2'));
queue.flush();
|
random_line_split
|
animation_queue_spec.ts
|
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {AnimationQueue} from '@angular/core/src/animation/animation_queue';
import {NgZone} from '../../src/zone/ng_zone';
import {TestBed, fakeAsync, flushMicrotasks} from '../../testing';
import {MockAnimationPlayer} from '../../testing/mock_animation_player';
import {beforeEach, describe, expect, it} from '../../testing/testing_internal';
export function main()
|
class PlayerThatFails extends MockAnimationPlayer {
private _animationStarted = false;
constructor(public doFail: boolean) { super(); }
play() {
super.play();
this._animationStarted = true;
if (this.doFail) {
throw new Error('Oh nooooo');
}
}
reset() { this._animationStarted = false; }
hasStarted() { return this._animationStarted; }
}
|
{
describe('AnimationQueue', function() {
beforeEach(() => { TestBed.configureTestingModule({declarations: [], imports: []}); });
it('should queue animation players and run when flushed, but only as the next scheduled microtask',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const log: string[] = [];
const p1 = new MockAnimationPlayer();
const p2 = new MockAnimationPlayer();
const p3 = new MockAnimationPlayer();
p1.onStart(() => log.push('1'));
p2.onStart(() => log.push('2'));
p3.onStart(() => log.push('3'));
queue.enqueue(p1);
queue.enqueue(p2);
queue.enqueue(p3);
expect(log).toEqual([]);
queue.flush();
expect(log).toEqual([]);
flushMicrotasks();
expect(log).toEqual(['1', '2', '3']);
}));
it('should always run each of the animation players outside of the angular zone on start',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const player = new MockAnimationPlayer();
let eventHasRun = false;
player.onStart(() => {
NgZone.assertNotInAngularZone();
eventHasRun = true;
});
zone.run(() => {
NgZone.assertInAngularZone();
queue.enqueue(player);
queue.flush();
flushMicrotasks();
});
expect(eventHasRun).toBe(true);
}));
it('should always run each of the animation players outside of the angular zone on done',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const player = new MockAnimationPlayer();
let eventHasRun = false;
player.onDone(() => {
NgZone.assertNotInAngularZone();
eventHasRun = true;
});
zone.run(() => {
NgZone.assertInAngularZone();
queue.enqueue(player);
queue.flush();
flushMicrotasks();
});
expect(eventHasRun).toBe(false);
player.finish();
expect(eventHasRun).toBe(true);
}));
it('should not run animations again incase an animation midway fails', fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const log: string[] = [];
const p1 = new PlayerThatFails(false);
const p2 = new PlayerThatFails(true);
const p3 = new PlayerThatFails(false);
p1.onStart(() => log.push('1'));
p2.onStart(() => log.push('2'));
p3.onStart(() => log.push('3'));
queue.enqueue(p1);
queue.enqueue(p2);
queue.enqueue(p3);
queue.flush();
expect(() => flushMicrotasks()).toThrowError();
expect(log).toEqual(['1', '2']);
// let's reset this so that it gets triggered again
p2.reset();
p2.onStart(() => log.push('2'));
queue.flush();
expect(() => flushMicrotasks()).not.toThrowError();
expect(log).toEqual(['1', '2', '3']);
}));
});
}
|
identifier_body
|
animation_queue_spec.ts
|
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {AnimationQueue} from '@angular/core/src/animation/animation_queue';
import {NgZone} from '../../src/zone/ng_zone';
import {TestBed, fakeAsync, flushMicrotasks} from '../../testing';
import {MockAnimationPlayer} from '../../testing/mock_animation_player';
import {beforeEach, describe, expect, it} from '../../testing/testing_internal';
export function main() {
describe('AnimationQueue', function() {
beforeEach(() => { TestBed.configureTestingModule({declarations: [], imports: []}); });
it('should queue animation players and run when flushed, but only as the next scheduled microtask',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const log: string[] = [];
const p1 = new MockAnimationPlayer();
const p2 = new MockAnimationPlayer();
const p3 = new MockAnimationPlayer();
p1.onStart(() => log.push('1'));
p2.onStart(() => log.push('2'));
p3.onStart(() => log.push('3'));
queue.enqueue(p1);
queue.enqueue(p2);
queue.enqueue(p3);
expect(log).toEqual([]);
queue.flush();
expect(log).toEqual([]);
flushMicrotasks();
expect(log).toEqual(['1', '2', '3']);
}));
it('should always run each of the animation players outside of the angular zone on start',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const player = new MockAnimationPlayer();
let eventHasRun = false;
player.onStart(() => {
NgZone.assertNotInAngularZone();
eventHasRun = true;
});
zone.run(() => {
NgZone.assertInAngularZone();
queue.enqueue(player);
queue.flush();
flushMicrotasks();
});
expect(eventHasRun).toBe(true);
}));
it('should always run each of the animation players outside of the angular zone on done',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const player = new MockAnimationPlayer();
let eventHasRun = false;
player.onDone(() => {
NgZone.assertNotInAngularZone();
eventHasRun = true;
});
zone.run(() => {
NgZone.assertInAngularZone();
queue.enqueue(player);
queue.flush();
flushMicrotasks();
});
expect(eventHasRun).toBe(false);
player.finish();
expect(eventHasRun).toBe(true);
}));
it('should not run animations again incase an animation midway fails', fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const log: string[] = [];
const p1 = new PlayerThatFails(false);
const p2 = new PlayerThatFails(true);
const p3 = new PlayerThatFails(false);
p1.onStart(() => log.push('1'));
p2.onStart(() => log.push('2'));
p3.onStart(() => log.push('3'));
queue.enqueue(p1);
queue.enqueue(p2);
queue.enqueue(p3);
queue.flush();
expect(() => flushMicrotasks()).toThrowError();
expect(log).toEqual(['1', '2']);
// let's reset this so that it gets triggered again
p2.reset();
p2.onStart(() => log.push('2'));
queue.flush();
expect(() => flushMicrotasks()).not.toThrowError();
expect(log).toEqual(['1', '2', '3']);
}));
});
}
class PlayerThatFails extends MockAnimationPlayer {
private _animationStarted = false;
|
(public doFail: boolean) { super(); }
play() {
super.play();
this._animationStarted = true;
if (this.doFail) {
throw new Error('Oh nooooo');
}
}
reset() { this._animationStarted = false; }
hasStarted() { return this._animationStarted; }
}
|
constructor
|
identifier_name
|
animation_queue_spec.ts
|
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {AnimationQueue} from '@angular/core/src/animation/animation_queue';
import {NgZone} from '../../src/zone/ng_zone';
import {TestBed, fakeAsync, flushMicrotasks} from '../../testing';
import {MockAnimationPlayer} from '../../testing/mock_animation_player';
import {beforeEach, describe, expect, it} from '../../testing/testing_internal';
export function main() {
describe('AnimationQueue', function() {
beforeEach(() => { TestBed.configureTestingModule({declarations: [], imports: []}); });
it('should queue animation players and run when flushed, but only as the next scheduled microtask',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const log: string[] = [];
const p1 = new MockAnimationPlayer();
const p2 = new MockAnimationPlayer();
const p3 = new MockAnimationPlayer();
p1.onStart(() => log.push('1'));
p2.onStart(() => log.push('2'));
p3.onStart(() => log.push('3'));
queue.enqueue(p1);
queue.enqueue(p2);
queue.enqueue(p3);
expect(log).toEqual([]);
queue.flush();
expect(log).toEqual([]);
flushMicrotasks();
expect(log).toEqual(['1', '2', '3']);
}));
it('should always run each of the animation players outside of the angular zone on start',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const player = new MockAnimationPlayer();
let eventHasRun = false;
player.onStart(() => {
NgZone.assertNotInAngularZone();
eventHasRun = true;
});
zone.run(() => {
NgZone.assertInAngularZone();
queue.enqueue(player);
queue.flush();
flushMicrotasks();
});
expect(eventHasRun).toBe(true);
}));
it('should always run each of the animation players outside of the angular zone on done',
fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const player = new MockAnimationPlayer();
let eventHasRun = false;
player.onDone(() => {
NgZone.assertNotInAngularZone();
eventHasRun = true;
});
zone.run(() => {
NgZone.assertInAngularZone();
queue.enqueue(player);
queue.flush();
flushMicrotasks();
});
expect(eventHasRun).toBe(false);
player.finish();
expect(eventHasRun).toBe(true);
}));
it('should not run animations again incase an animation midway fails', fakeAsync(() => {
const zone = TestBed.get(NgZone);
const queue = new AnimationQueue(zone);
const log: string[] = [];
const p1 = new PlayerThatFails(false);
const p2 = new PlayerThatFails(true);
const p3 = new PlayerThatFails(false);
p1.onStart(() => log.push('1'));
p2.onStart(() => log.push('2'));
p3.onStart(() => log.push('3'));
queue.enqueue(p1);
queue.enqueue(p2);
queue.enqueue(p3);
queue.flush();
expect(() => flushMicrotasks()).toThrowError();
expect(log).toEqual(['1', '2']);
// let's reset this so that it gets triggered again
p2.reset();
p2.onStart(() => log.push('2'));
queue.flush();
expect(() => flushMicrotasks()).not.toThrowError();
expect(log).toEqual(['1', '2', '3']);
}));
});
}
class PlayerThatFails extends MockAnimationPlayer {
private _animationStarted = false;
constructor(public doFail: boolean) { super(); }
play() {
super.play();
this._animationStarted = true;
if (this.doFail)
|
}
reset() { this._animationStarted = false; }
hasStarted() { return this._animationStarted; }
}
|
{
throw new Error('Oh nooooo');
}
|
conditional_block
|
configuration.py
|
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import netaddr
from oslo.config import cfg
from akanda.rug.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
DEFAULT_AS = 64512
OPTIONS = [
cfg.StrOpt('provider_rules_path'),
cfg.IntOpt('asn', default=DEFAULT_AS),
cfg.IntOpt('neighbor_asn', default=DEFAULT_AS),
]
cfg.CONF.register_opts(OPTIONS)
EXTERNAL_NET = 'external'
INTERNAL_NET = 'internal'
MANAGEMENT_NET = 'management'
SERVICE_STATIC = 'static'
SERVICE_DHCP = 'dhcp'
SERVICE_RA = 'ra'
def build_config(client, router, interfaces):
provider_rules = load_provider_rules(cfg.CONF.provider_rules_path)
networks = generate_network_config(client, router, interfaces)
gateway = get_default_v4_gateway(client, router, networks)
return {
'asn': cfg.CONF.asn,
'neighbor_asn': cfg.CONF.neighbor_asn,
'default_v4_gateway': gateway,
'networks': networks,
'labels': provider_rules.get('labels', {}),
'floating_ips': generate_floating_config(router),
'tenant_id': router.tenant_id,
'hostname': router.name
}
def get_default_v4_gateway(client, router, networks):
"""Find the IPv4 default gateway for the router.
"""
LOG.debug('networks = %r', networks)
LOG.debug('external interface = %s', router.external_port.mac_address)
# Now find the subnet that our external IP is on, and return its
# gateway.
for n in networks:
if n['network_type'] == EXTERNAL_NET:
v4_addresses = [
addr
for addr in (netaddr.IPAddress(ip.partition('/')[0])
for ip in n['interface']['addresses'])
if addr.version == 4
]
for s in n['subnets']:
subnet = netaddr.IPNetwork(s['cidr'])
if subnet.version != 4:
continue
LOG.debug(
'%s: checking if subnet %s should have the default route',
router.id, s['cidr'])
for addr in v4_addresses:
if addr in subnet:
LOG.debug(
'%s: found gateway %s for subnet %s on network %s',
router.id,
s['gateway_ip'],
s['cidr'],
n['network_id'],
)
return s['gateway_ip']
# Sometimes we are asked to build a configuration for the server
# when the external interface is still marked as "down". We can
# report that case, but we don't treat it as an error here because
# we'll be asked to do it again when the interface comes up.
LOG.info('%s: no default gateway was found', router.id)
return ''
def load_provider_rules(path):
try:
return jsonutils.load(open(path))
except: # pragma nocover
LOG.exception('unable to open provider rules: %s' % path)
def generate_network_config(client, router, interfaces):
iface_map = dict((i['lladdr'], i['ifname']) for i in interfaces)
retval = [
_network_config(
client,
router.external_port,
iface_map[router.external_port.mac_address],
EXTERNAL_NET),
_management_network_config(
router.management_port,
iface_map[router.management_port.mac_address],
interfaces,
)]
retval.extend(
_network_config(
client,
p,
iface_map[p.mac_address],
INTERNAL_NET,
client.get_network_ports(p.network_id))
for p in router.internal_ports)
return retval
def _management_network_config(port, ifname, interfaces):
for iface in interfaces:
if iface['ifname'] == ifname:
return _make_network_config_dict(
iface, MANAGEMENT_NET, port.network_id)
def _network_config(client, port, ifname, network_type, network_ports=[]):
subnets = client.get_network_subnets(port.network_id)
subnets_dict = dict((s.id, s) for s in subnets)
return _make_network_config_dict(
_interface_config(ifname, port, subnets_dict),
network_type,
port.network_id,
subnets_dict=subnets_dict,
network_ports=network_ports)
def _make_network_config_dict(interface, network_type, network_id,
v4_conf=SERVICE_STATIC, v6_conf=SERVICE_STATIC,
subnets_dict={}, network_ports=[]):
return {'interface': interface,
'network_id': network_id,
'v4_conf_service': v4_conf,
'v6_conf_service': v6_conf,
'network_type': network_type,
'subnets': [_subnet_config(s) for s in subnets_dict.values()],
'allocations': _allocation_config(network_ports, subnets_dict)}
def _interface_config(ifname, port, subnets_dict):
def fmt(fixed):
return '%s/%s' % (fixed.ip_address,
subnets_dict[fixed.subnet_id].cidr.prefixlen)
return {'ifname': ifname,
'addresses': [fmt(fixed) for fixed in port.fixed_ips]}
def _subnet_config(subnet):
return {
'cidr': str(subnet.cidr),
'dhcp_enabled': subnet.enable_dhcp and subnet.ipv6_ra_mode != 'slaac',
'dns_nameservers': subnet.dns_nameservers,
'host_routes': subnet.host_routes,
'gateway_ip': (str(subnet.gateway_ip)
if subnet.gateway_ip is not None
else ''),
}
def _allocation_config(ports, subnets_dict):
|
def generate_floating_config(router):
return [
{'floating_ip': str(fip.floating_ip), 'fixed_ip': str(fip.fixed_ip)}
for fip in router.floating_ips
]
|
r = re.compile('[:.]')
allocations = []
for port in ports:
addrs = {
str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp
for fixed in port.fixed_ips
}
if not addrs:
continue
allocations.append(
{
'ip_addresses': addrs,
'device_id': port.device_id,
'hostname': '%s.local' % r.sub('-', sorted(addrs.keys())[0]),
'mac_address': port.mac_address
}
)
return allocations
|
identifier_body
|
configuration.py
|
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import netaddr
from oslo.config import cfg
from akanda.rug.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
DEFAULT_AS = 64512
OPTIONS = [
cfg.StrOpt('provider_rules_path'),
cfg.IntOpt('asn', default=DEFAULT_AS),
cfg.IntOpt('neighbor_asn', default=DEFAULT_AS),
]
cfg.CONF.register_opts(OPTIONS)
EXTERNAL_NET = 'external'
INTERNAL_NET = 'internal'
MANAGEMENT_NET = 'management'
SERVICE_STATIC = 'static'
SERVICE_DHCP = 'dhcp'
SERVICE_RA = 'ra'
def build_config(client, router, interfaces):
provider_rules = load_provider_rules(cfg.CONF.provider_rules_path)
networks = generate_network_config(client, router, interfaces)
gateway = get_default_v4_gateway(client, router, networks)
return {
'asn': cfg.CONF.asn,
'neighbor_asn': cfg.CONF.neighbor_asn,
'default_v4_gateway': gateway,
'networks': networks,
'labels': provider_rules.get('labels', {}),
'floating_ips': generate_floating_config(router),
'tenant_id': router.tenant_id,
'hostname': router.name
}
def get_default_v4_gateway(client, router, networks):
"""Find the IPv4 default gateway for the router.
"""
LOG.debug('networks = %r', networks)
LOG.debug('external interface = %s', router.external_port.mac_address)
# Now find the subnet that our external IP is on, and return its
# gateway.
for n in networks:
if n['network_type'] == EXTERNAL_NET:
v4_addresses = [
addr
for addr in (netaddr.IPAddress(ip.partition('/')[0])
for ip in n['interface']['addresses'])
if addr.version == 4
]
for s in n['subnets']:
subnet = netaddr.IPNetwork(s['cidr'])
if subnet.version != 4:
continue
LOG.debug(
'%s: checking if subnet %s should have the default route',
router.id, s['cidr'])
for addr in v4_addresses:
if addr in subnet:
LOG.debug(
'%s: found gateway %s for subnet %s on network %s',
router.id,
s['gateway_ip'],
s['cidr'],
n['network_id'],
)
return s['gateway_ip']
# Sometimes we are asked to build a configuration for the server
# when the external interface is still marked as "down". We can
# report that case, but we don't treat it as an error here because
# we'll be asked to do it again when the interface comes up.
LOG.info('%s: no default gateway was found', router.id)
return ''
def load_provider_rules(path):
try:
return jsonutils.load(open(path))
except: # pragma nocover
LOG.exception('unable to open provider rules: %s' % path)
def generate_network_config(client, router, interfaces):
iface_map = dict((i['lladdr'], i['ifname']) for i in interfaces)
retval = [
_network_config(
client,
router.external_port,
iface_map[router.external_port.mac_address],
EXTERNAL_NET),
_management_network_config(
router.management_port,
iface_map[router.management_port.mac_address],
interfaces,
)]
retval.extend(
_network_config(
client,
p,
iface_map[p.mac_address],
INTERNAL_NET,
client.get_network_ports(p.network_id))
for p in router.internal_ports)
return retval
def _management_network_config(port, ifname, interfaces):
for iface in interfaces:
if iface['ifname'] == ifname:
return _make_network_config_dict(
iface, MANAGEMENT_NET, port.network_id)
def _network_config(client, port, ifname, network_type, network_ports=[]):
subnets = client.get_network_subnets(port.network_id)
subnets_dict = dict((s.id, s) for s in subnets)
return _make_network_config_dict(
_interface_config(ifname, port, subnets_dict),
network_type,
port.network_id,
subnets_dict=subnets_dict,
network_ports=network_ports)
def _make_network_config_dict(interface, network_type, network_id,
v4_conf=SERVICE_STATIC, v6_conf=SERVICE_STATIC,
subnets_dict={}, network_ports=[]):
return {'interface': interface,
'network_id': network_id,
'v4_conf_service': v4_conf,
'v6_conf_service': v6_conf,
'network_type': network_type,
'subnets': [_subnet_config(s) for s in subnets_dict.values()],
'allocations': _allocation_config(network_ports, subnets_dict)}
def _interface_config(ifname, port, subnets_dict):
def fmt(fixed):
return '%s/%s' % (fixed.ip_address,
subnets_dict[fixed.subnet_id].cidr.prefixlen)
return {'ifname': ifname,
'addresses': [fmt(fixed) for fixed in port.fixed_ips]}
def
|
(subnet):
return {
'cidr': str(subnet.cidr),
'dhcp_enabled': subnet.enable_dhcp and subnet.ipv6_ra_mode != 'slaac',
'dns_nameservers': subnet.dns_nameservers,
'host_routes': subnet.host_routes,
'gateway_ip': (str(subnet.gateway_ip)
if subnet.gateway_ip is not None
else ''),
}
def _allocation_config(ports, subnets_dict):
r = re.compile('[:.]')
allocations = []
for port in ports:
addrs = {
str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp
for fixed in port.fixed_ips
}
if not addrs:
continue
allocations.append(
{
'ip_addresses': addrs,
'device_id': port.device_id,
'hostname': '%s.local' % r.sub('-', sorted(addrs.keys())[0]),
'mac_address': port.mac_address
}
)
return allocations
def generate_floating_config(router):
return [
{'floating_ip': str(fip.floating_ip), 'fixed_ip': str(fip.fixed_ip)}
for fip in router.floating_ips
]
|
_subnet_config
|
identifier_name
|
configuration.py
|
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import netaddr
from oslo.config import cfg
from akanda.rug.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
DEFAULT_AS = 64512
OPTIONS = [
cfg.StrOpt('provider_rules_path'),
cfg.IntOpt('asn', default=DEFAULT_AS),
cfg.IntOpt('neighbor_asn', default=DEFAULT_AS),
]
cfg.CONF.register_opts(OPTIONS)
EXTERNAL_NET = 'external'
INTERNAL_NET = 'internal'
MANAGEMENT_NET = 'management'
SERVICE_STATIC = 'static'
SERVICE_DHCP = 'dhcp'
SERVICE_RA = 'ra'
def build_config(client, router, interfaces):
provider_rules = load_provider_rules(cfg.CONF.provider_rules_path)
networks = generate_network_config(client, router, interfaces)
gateway = get_default_v4_gateway(client, router, networks)
return {
'asn': cfg.CONF.asn,
'neighbor_asn': cfg.CONF.neighbor_asn,
'default_v4_gateway': gateway,
'networks': networks,
'labels': provider_rules.get('labels', {}),
'floating_ips': generate_floating_config(router),
'tenant_id': router.tenant_id,
'hostname': router.name
}
def get_default_v4_gateway(client, router, networks):
"""Find the IPv4 default gateway for the router.
"""
LOG.debug('networks = %r', networks)
LOG.debug('external interface = %s', router.external_port.mac_address)
# Now find the subnet that our external IP is on, and return its
# gateway.
for n in networks:
if n['network_type'] == EXTERNAL_NET:
v4_addresses = [
addr
for addr in (netaddr.IPAddress(ip.partition('/')[0])
for ip in n['interface']['addresses'])
if addr.version == 4
]
for s in n['subnets']:
subnet = netaddr.IPNetwork(s['cidr'])
if subnet.version != 4:
continue
LOG.debug(
'%s: checking if subnet %s should have the default route',
router.id, s['cidr'])
for addr in v4_addresses:
if addr in subnet:
LOG.debug(
'%s: found gateway %s for subnet %s on network %s',
router.id,
s['gateway_ip'],
s['cidr'],
n['network_id'],
)
return s['gateway_ip']
# Sometimes we are asked to build a configuration for the server
# when the external interface is still marked as "down". We can
# report that case, but we don't treat it as an error here because
# we'll be asked to do it again when the interface comes up.
LOG.info('%s: no default gateway was found', router.id)
return ''
def load_provider_rules(path):
try:
return jsonutils.load(open(path))
except: # pragma nocover
LOG.exception('unable to open provider rules: %s' % path)
def generate_network_config(client, router, interfaces):
iface_map = dict((i['lladdr'], i['ifname']) for i in interfaces)
retval = [
_network_config(
client,
router.external_port,
iface_map[router.external_port.mac_address],
EXTERNAL_NET),
_management_network_config(
router.management_port,
iface_map[router.management_port.mac_address],
interfaces,
)]
retval.extend(
_network_config(
client,
p,
iface_map[p.mac_address],
INTERNAL_NET,
client.get_network_ports(p.network_id))
for p in router.internal_ports)
return retval
def _management_network_config(port, ifname, interfaces):
for iface in interfaces:
if iface['ifname'] == ifname:
|
def _network_config(client, port, ifname, network_type, network_ports=[]):
subnets = client.get_network_subnets(port.network_id)
subnets_dict = dict((s.id, s) for s in subnets)
return _make_network_config_dict(
_interface_config(ifname, port, subnets_dict),
network_type,
port.network_id,
subnets_dict=subnets_dict,
network_ports=network_ports)
def _make_network_config_dict(interface, network_type, network_id,
v4_conf=SERVICE_STATIC, v6_conf=SERVICE_STATIC,
subnets_dict={}, network_ports=[]):
return {'interface': interface,
'network_id': network_id,
'v4_conf_service': v4_conf,
'v6_conf_service': v6_conf,
'network_type': network_type,
'subnets': [_subnet_config(s) for s in subnets_dict.values()],
'allocations': _allocation_config(network_ports, subnets_dict)}
def _interface_config(ifname, port, subnets_dict):
def fmt(fixed):
return '%s/%s' % (fixed.ip_address,
subnets_dict[fixed.subnet_id].cidr.prefixlen)
return {'ifname': ifname,
'addresses': [fmt(fixed) for fixed in port.fixed_ips]}
def _subnet_config(subnet):
return {
'cidr': str(subnet.cidr),
'dhcp_enabled': subnet.enable_dhcp and subnet.ipv6_ra_mode != 'slaac',
'dns_nameservers': subnet.dns_nameservers,
'host_routes': subnet.host_routes,
'gateway_ip': (str(subnet.gateway_ip)
if subnet.gateway_ip is not None
else ''),
}
def _allocation_config(ports, subnets_dict):
r = re.compile('[:.]')
allocations = []
for port in ports:
addrs = {
str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp
for fixed in port.fixed_ips
}
if not addrs:
continue
allocations.append(
{
'ip_addresses': addrs,
'device_id': port.device_id,
'hostname': '%s.local' % r.sub('-', sorted(addrs.keys())[0]),
'mac_address': port.mac_address
}
)
return allocations
def generate_floating_config(router):
return [
{'floating_ip': str(fip.floating_ip), 'fixed_ip': str(fip.fixed_ip)}
for fip in router.floating_ips
]
|
return _make_network_config_dict(
iface, MANAGEMENT_NET, port.network_id)
|
conditional_block
|
configuration.py
|
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import netaddr
from oslo.config import cfg
from akanda.rug.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
DEFAULT_AS = 64512
OPTIONS = [
cfg.StrOpt('provider_rules_path'),
cfg.IntOpt('asn', default=DEFAULT_AS),
cfg.IntOpt('neighbor_asn', default=DEFAULT_AS),
]
cfg.CONF.register_opts(OPTIONS)
EXTERNAL_NET = 'external'
INTERNAL_NET = 'internal'
MANAGEMENT_NET = 'management'
SERVICE_STATIC = 'static'
SERVICE_DHCP = 'dhcp'
SERVICE_RA = 'ra'
def build_config(client, router, interfaces):
provider_rules = load_provider_rules(cfg.CONF.provider_rules_path)
networks = generate_network_config(client, router, interfaces)
gateway = get_default_v4_gateway(client, router, networks)
return {
'asn': cfg.CONF.asn,
'neighbor_asn': cfg.CONF.neighbor_asn,
'default_v4_gateway': gateway,
'networks': networks,
'labels': provider_rules.get('labels', {}),
'floating_ips': generate_floating_config(router),
'tenant_id': router.tenant_id,
'hostname': router.name
}
def get_default_v4_gateway(client, router, networks):
"""Find the IPv4 default gateway for the router.
"""
LOG.debug('networks = %r', networks)
LOG.debug('external interface = %s', router.external_port.mac_address)
# Now find the subnet that our external IP is on, and return its
# gateway.
for n in networks:
if n['network_type'] == EXTERNAL_NET:
v4_addresses = [
addr
for addr in (netaddr.IPAddress(ip.partition('/')[0])
for ip in n['interface']['addresses'])
if addr.version == 4
]
for s in n['subnets']:
subnet = netaddr.IPNetwork(s['cidr'])
if subnet.version != 4:
continue
LOG.debug(
'%s: checking if subnet %s should have the default route',
router.id, s['cidr'])
for addr in v4_addresses:
if addr in subnet:
LOG.debug(
'%s: found gateway %s for subnet %s on network %s',
router.id,
s['gateway_ip'],
s['cidr'],
n['network_id'],
)
return s['gateway_ip']
# Sometimes we are asked to build a configuration for the server
# when the external interface is still marked as "down". We can
# report that case, but we don't treat it as an error here because
# we'll be asked to do it again when the interface comes up.
LOG.info('%s: no default gateway was found', router.id)
return ''
def load_provider_rules(path):
try:
return jsonutils.load(open(path))
except: # pragma nocover
LOG.exception('unable to open provider rules: %s' % path)
def generate_network_config(client, router, interfaces):
iface_map = dict((i['lladdr'], i['ifname']) for i in interfaces)
retval = [
|
_management_network_config(
router.management_port,
iface_map[router.management_port.mac_address],
interfaces,
)]
retval.extend(
_network_config(
client,
p,
iface_map[p.mac_address],
INTERNAL_NET,
client.get_network_ports(p.network_id))
for p in router.internal_ports)
return retval
def _management_network_config(port, ifname, interfaces):
for iface in interfaces:
if iface['ifname'] == ifname:
return _make_network_config_dict(
iface, MANAGEMENT_NET, port.network_id)
def _network_config(client, port, ifname, network_type, network_ports=[]):
subnets = client.get_network_subnets(port.network_id)
subnets_dict = dict((s.id, s) for s in subnets)
return _make_network_config_dict(
_interface_config(ifname, port, subnets_dict),
network_type,
port.network_id,
subnets_dict=subnets_dict,
network_ports=network_ports)
def _make_network_config_dict(interface, network_type, network_id,
v4_conf=SERVICE_STATIC, v6_conf=SERVICE_STATIC,
subnets_dict={}, network_ports=[]):
return {'interface': interface,
'network_id': network_id,
'v4_conf_service': v4_conf,
'v6_conf_service': v6_conf,
'network_type': network_type,
'subnets': [_subnet_config(s) for s in subnets_dict.values()],
'allocations': _allocation_config(network_ports, subnets_dict)}
def _interface_config(ifname, port, subnets_dict):
def fmt(fixed):
return '%s/%s' % (fixed.ip_address,
subnets_dict[fixed.subnet_id].cidr.prefixlen)
return {'ifname': ifname,
'addresses': [fmt(fixed) for fixed in port.fixed_ips]}
def _subnet_config(subnet):
return {
'cidr': str(subnet.cidr),
'dhcp_enabled': subnet.enable_dhcp and subnet.ipv6_ra_mode != 'slaac',
'dns_nameservers': subnet.dns_nameservers,
'host_routes': subnet.host_routes,
'gateway_ip': (str(subnet.gateway_ip)
if subnet.gateway_ip is not None
else ''),
}
def _allocation_config(ports, subnets_dict):
r = re.compile('[:.]')
allocations = []
for port in ports:
addrs = {
str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp
for fixed in port.fixed_ips
}
if not addrs:
continue
allocations.append(
{
'ip_addresses': addrs,
'device_id': port.device_id,
'hostname': '%s.local' % r.sub('-', sorted(addrs.keys())[0]),
'mac_address': port.mac_address
}
)
return allocations
def generate_floating_config(router):
return [
{'floating_ip': str(fip.floating_ip), 'fixed_ip': str(fip.fixed_ip)}
for fip in router.floating_ips
]
|
_network_config(
client,
router.external_port,
iface_map[router.external_port.mac_address],
EXTERNAL_NET),
|
random_line_split
|
yahoo.py
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import re
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
clean_html,
unescapeHTML,
ExtractorError,
int_or_none,
mimetype2ext,
)
from .nbc import NBCSportsVPlayerIE
class
|
(InfoExtractor):
IE_DESC = 'Yahoo screen and movies'
_VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+)?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
_TESTS = [
{
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
'info_dict': {
'id': '2d25e626-2378-391f-ada0-ddaf1417e588',
'ext': 'mp4',
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
'description': 'Julian and Travis watch Julian Smith',
'duration': 6863,
},
},
{
'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
'md5': 'd6e6fc6e1313c608f316ddad7b82b306',
'info_dict': {
'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
'ext': 'mp4',
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
'description': 'md5:66b627ab0a282b26352136ca96ce73c1',
'duration': 151,
},
},
{
'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
'md5': '60e8ac193d8fb71997caa8fce54c6460',
'info_dict': {
'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb',
'ext': 'mp4',
'title': "Yahoo Saves 'Community'",
'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
'duration': 170,
}
},
{
'url': 'https://tw.screen.yahoo.com/election-2014-askmayor/敢問市長-黃秀霜批賴清德-非常高傲-033009720.html',
'md5': '3a09cf59349cfaddae1797acc3c087fc',
'info_dict': {
'id': 'cac903b3-fcf4-3c14-b632-643ab541712f',
'ext': 'mp4',
'title': '敢問市長/黃秀霜批賴清德「非常高傲」',
'description': '直言台南沒捷運 交通居五都之末',
'duration': 396,
}
},
{
'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
'md5': '0b51660361f0e27c9789e7037ef76f4b',
'info_dict': {
'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
'ext': 'mp4',
'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
'description': 'md5:f66c890e1490f4910a9953c941dee944',
'duration': 97,
}
},
{
'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html',
'md5': '57e06440778b1828a6079d2f744212c4',
'info_dict': {
'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73',
'ext': 'mp4',
'title': 'Program that makes hockey more affordable not offered in Manitoba',
'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4',
'duration': 121,
}
}, {
'url': 'https://ca.finance.yahoo.com/news/hackers-sony-more-trouble-well-154609075.html',
'md5': '226a895aae7e21b0129e2a2006fe9690',
'info_dict': {
'id': 'e624c4bc-3389-34de-9dfc-025f74943409',
'ext': 'mp4',
'title': '\'The Interview\' TV Spot: War',
'description': 'The Interview',
'duration': 30,
}
}, {
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
'md5': '88e209b417f173d86186bef6e4d1f160',
'info_dict': {
'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
'ext': 'mp4',
'title': 'China Moses Is Crazy About the Blues',
'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
'duration': 128,
}
}, {
'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html',
'md5': 'd9a083ccf1379127bf25699d67e4791b',
'info_dict': {
'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c',
'ext': 'mp4',
'title': 'Connect the Dots: Dark Side of Virgo',
'description': 'md5:1428185051cfd1949807ad4ff6d3686a',
'duration': 201,
}
}, {
'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html',
'md5': '989396ae73d20c6f057746fb226aa215',
'info_dict': {
'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1',
'ext': 'mp4',
'title': '\'True Story\' Trailer',
'description': 'True Story',
'duration': 150,
},
}, {
'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
'only_matching': True,
}, {
'note': 'NBC Sports embeds',
'url': 'http://sports.yahoo.com/blogs/ncaab-the-dagger/tyler-kalinoski-s-buzzer-beater-caps-davidson-s-comeback-win-185609842.html?guid=nbc_cbk_davidsonbuzzerbeater_150313',
'info_dict': {
'id': '9CsDKds0kvHI',
'ext': 'flv',
'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
}
}, {
'url': 'https://tw.news.yahoo.com/-100120367.html',
'only_matching': True,
}, {
# Query result is embedded in webpage, but explicit request to video API fails with geo restriction
'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
'info_dict': {
'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
'ext': 'mp4',
'title': 'Communitary - Community Episode 1: Ladders',
'description': 'md5:8fc39608213295748e1e289807838c97',
'duration': 1646,
},
}, {
# it uses an alias to get the video_id
'url': 'https://www.yahoo.com/movies/the-stars-of-daddys-home-have-very-different-212843197.html',
'info_dict': {
'id': '40eda9c8-8e5f-3552-8745-830f67d0c737',
'ext': 'mp4',
'title': 'Will Ferrell & Mark Wahlberg Are Pro-Spanking',
'description': 'While they play feuding fathers in \'Daddy\'s Home,\' star Will Ferrell & Mark Wahlberg share their true feelings on parenthood.',
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id') or self._match_id(url)
page_id = mobj.group('id')
url = mobj.group('url')
host = mobj.group('host')
webpage = self._download_webpage(url, display_id)
# Look for iframed media first
iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
if iframe_m:
iframepage = self._download_webpage(
host + iframe_m.group(1), display_id, 'Downloading iframe webpage')
items_json = self._search_regex(
r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None)
if items_json:
items = json.loads(items_json)
video_id = items[0]['id']
return self._get_info(video_id, display_id, webpage)
# Look for NBCSports iframes
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Query result is often embedded in webpage as JSON. Sometimes explicit requests
# to video API results in a failure with geo restriction reason therefore using
# embedded query result when present sounds reasonable.
config_json = self._search_regex(
r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
webpage, 'videoplayer applet', default=None)
if config_json:
config = self._parse_json(config_json, display_id, fatal=False)
if config:
sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi')
if sapi:
return self._extract_info(display_id, sapi, webpage)
items_json = self._search_regex(
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
if items_json is None:
alias = self._search_regex(
r'"aliases":{"video":"(.*?)"', webpage, 'alias', default=None)
if alias is not None:
alias_info = self._download_json(
'https://www.yahoo.com/_td/api/resource/VideoService.videos;video_aliases=["%s"]' % alias,
display_id, 'Downloading alias info')
video_id = alias_info[0]['id']
else:
CONTENT_ID_REGEXES = [
r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
r'"first_videoid"\s*:\s*"([^"]+)"',
r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
r'<article[^>]data-uuid=["\']([^"\']+)',
r'yahoo://article/view\?.*\buuid=([^&"\']+)',
]
video_id = self._search_regex(
CONTENT_ID_REGEXES, webpage, 'content ID')
else:
items = json.loads(items_json)
info = items['mediaItems']['query']['results']['mediaObj'][0]
# The 'meta' field is not always in the video webpage, we request it
# from another page
video_id = info['id']
return self._get_info(video_id, display_id, webpage)
def _extract_info(self, display_id, query, webpage):
info = query['query']['results']['mediaObj'][0]
meta = info.get('meta')
video_id = info.get('id')
if not meta:
msg = info['status'].get('msg')
if msg:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, msg), expected=True)
raise ExtractorError('Unable to extract media object meta')
formats = []
for s in info['streams']:
format_info = {
'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')),
'tbr': int_or_none(s.get('bitrate')),
}
host = s['host']
path = s['path']
if host.startswith('rtmp'):
format_info.update({
'url': host,
'play_path': path,
'ext': 'flv',
})
else:
if s.get('format') == 'm3u8_playlist':
format_info['protocol'] = 'm3u8_native'
format_info['ext'] = 'mp4'
format_url = compat_urlparse.urljoin(host, path)
format_info['url'] = format_url
formats.append(format_info)
self._sort_formats(formats)
closed_captions = self._html_search_regex(
r'"closedcaptions":(\[[^\]]+\])', webpage, 'closed captions',
default='[]')
cc_json = self._parse_json(closed_captions, video_id, fatal=False)
subtitles = {}
if cc_json:
for closed_caption in cc_json:
lang = closed_caption['lang']
if lang not in subtitles:
subtitles[lang] = []
subtitles[lang].append({
'url': closed_caption['url'],
'ext': mimetype2ext(closed_caption['content_type']),
})
return {
'id': video_id,
'display_id': display_id,
'title': unescapeHTML(meta['title']),
'formats': formats,
'description': clean_html(meta['description']),
'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
'duration': int_or_none(meta.get('duration')),
'subtitles': subtitles,
}
def _get_info(self, video_id, display_id, webpage):
region = self._search_regex(
r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
webpage, 'region', fatal=False, default='US')
data = compat_urllib_parse.urlencode({
'protocol': 'http',
'region': region,
})
query_url = (
'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
'{id}?{data}'.format(id=video_id, data=data))
query_result = self._download_json(
query_url, display_id, 'Downloading video info')
return self._extract_info(display_id, query_result, webpage)
class YahooSearchIE(SearchInfoExtractor):
IE_DESC = 'Yahoo screen search'
_MAX_RESULTS = 1000
IE_NAME = 'screen.yahoo:search'
_SEARCH_KEY = 'yvsearch'
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
for pagenum in itertools.count(0):
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
info = self._download_json(result_url, query,
note='Downloading results page ' + str(pagenum + 1))
m = info['m']
results = info['results']
for (i, r) in enumerate(results):
if (pagenum * 30) + i >= n:
break
mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
entries.append(e)
if (pagenum * 30 + i >= n) or (m['last'] >= (m['total'] - 1)):
break
return {
'_type': 'playlist',
'id': query,
'entries': entries,
}
|
YahooIE
|
identifier_name
|
yahoo.py
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import re
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
clean_html,
unescapeHTML,
ExtractorError,
int_or_none,
mimetype2ext,
)
from .nbc import NBCSportsVPlayerIE
class YahooIE(InfoExtractor):
IE_DESC = 'Yahoo screen and movies'
_VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+)?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
_TESTS = [
{
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
'info_dict': {
'id': '2d25e626-2378-391f-ada0-ddaf1417e588',
'ext': 'mp4',
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
'description': 'Julian and Travis watch Julian Smith',
'duration': 6863,
},
},
{
'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
'md5': 'd6e6fc6e1313c608f316ddad7b82b306',
'info_dict': {
'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
'ext': 'mp4',
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
'description': 'md5:66b627ab0a282b26352136ca96ce73c1',
'duration': 151,
},
},
{
'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
'md5': '60e8ac193d8fb71997caa8fce54c6460',
'info_dict': {
'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb',
'ext': 'mp4',
'title': "Yahoo Saves 'Community'",
'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
'duration': 170,
}
},
{
'url': 'https://tw.screen.yahoo.com/election-2014-askmayor/敢問市長-黃秀霜批賴清德-非常高傲-033009720.html',
'md5': '3a09cf59349cfaddae1797acc3c087fc',
'info_dict': {
'id': 'cac903b3-fcf4-3c14-b632-643ab541712f',
'ext': 'mp4',
'title': '敢問市長/黃秀霜批賴清德「非常高傲」',
'description': '直言台南沒捷運 交通居五都之末',
'duration': 396,
}
},
{
'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
'md5': '0b51660361f0e27c9789e7037ef76f4b',
'info_dict': {
'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
'ext': 'mp4',
'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
'description': 'md5:f66c890e1490f4910a9953c941dee944',
'duration': 97,
}
},
{
'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html',
'md5': '57e06440778b1828a6079d2f744212c4',
'info_dict': {
'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73',
'ext': 'mp4',
'title': 'Program that makes hockey more affordable not offered in Manitoba',
'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4',
'duration': 121,
}
}, {
'url': 'https://ca.finance.yahoo.com/news/hackers-sony-more-trouble-well-154609075.html',
'md5': '226a895aae7e21b0129e2a2006fe9690',
'info_dict': {
'id': 'e624c4bc-3389-34de-9dfc-025f74943409',
'ext': 'mp4',
'title': '\'The Interview\' TV Spot: War',
'description': 'The Interview',
'duration': 30,
}
}, {
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
'md5': '88e209b417f173d86186bef6e4d1f160',
'info_dict': {
'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
'ext': 'mp4',
'title': 'China Moses Is Crazy About the Blues',
'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
'duration': 128,
}
}, {
'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html',
'md5': 'd9a083ccf1379127bf25699d67e4791b',
'info_dict': {
'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c',
'ext': 'mp4',
'title': 'Connect the Dots: Dark Side of Virgo',
'description': 'md5:1428185051cfd1949807ad4ff6d3686a',
'duration': 201,
}
}, {
'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html',
'md5': '989396ae73d20c6f057746fb226aa215',
'info_dict': {
'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1',
'ext': 'mp4',
'title': '\'True Story\' Trailer',
'description': 'True Story',
'duration': 150,
},
}, {
'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
'only_matching': True,
}, {
'note': 'NBC Sports embeds',
'url': 'http://sports.yahoo.com/blogs/ncaab-the-dagger/tyler-kalinoski-s-buzzer-beater-caps-davidson-s-comeback-win-185609842.html?guid=nbc_cbk_davidsonbuzzerbeater_150313',
'info_dict': {
'id': '9CsDKds0kvHI',
'ext': 'flv',
'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
}
}, {
'url': 'https://tw.news.yahoo.com/-100120367.html',
'only_matching': True,
}, {
# Query result is embedded in webpage, but explicit request to video API fails with geo restriction
'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
'info_dict': {
'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
'ext': 'mp4',
'title': 'Communitary - Community Episode 1: Ladders',
'description': 'md5:8fc39608213295748e1e289807838c97',
'duration': 1646,
},
}, {
# it uses an alias to get the video_id
'url': 'https://www.yahoo.com/movies/the-stars-of-daddys-home-have-very-different-212843197.html',
'info_dict': {
'id': '40eda9c8-8e5f-3552-8745-830f67d0c737',
'ext': 'mp4',
'title': 'Will Ferrell & Mark Wahlberg Are Pro-Spanking',
'description': 'While they play feuding fathers in \'Daddy\'s Home,\' star Will Ferrell & Mark Wahlberg share their true feelings on parenthood.',
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id') or self._match_id(url)
page_id = mobj.group('id')
url = mobj.group('url')
host = mobj.group('host')
webpage = self._download_webpage(url, display_id)
# Look for iframed media first
iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
if iframe_m:
iframepage = self._download_webpage(
host + iframe_m.group(1), display_id, 'Downloading iframe webpage')
items_json = self._search_regex(
r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None)
if items_json:
items = json.loads(items_json)
video_id = items[0]['id']
return self._get_info(video_id, display_id, webpage)
# Look for NBCSports iframes
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Query result is often embedded in webpage as JSON. Sometimes explicit requests
# to video API results in a failure with geo restriction reason therefore using
# embedded query result when present sounds reasonable.
config_json = self._search_regex(
r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
webpage, 'videoplayer applet', default=None)
if config_json:
config = self._parse_json(config_json, display_id, fatal=False)
if config:
sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi')
if sapi:
return self._extract_info(display_id, sapi, webpage)
items_json = self._search_regex(
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
if items_json is None:
alias = self._search_regex(
r'"aliases":{"video":"(.*?)"', webpage, 'alias', default=None)
if alias is not None:
alias_info = self._download_json(
'https://www.yahoo.com/_td/api/resource/VideoService.videos;video_aliases=["%s"]' % alias,
display_id, 'Downloading alias info')
video_id = alias_info[0]['id']
else:
CONTENT_ID_REGEXES = [
r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
r'"first_videoid"\s*:\s*"([^"]+)"',
r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
r'<article[^>]data-uuid=["\']([^"\']+)',
r'yahoo://article/view\?.*\buuid=([^&"\']+)',
]
video_id = self._search_regex(
CONTENT_ID_REGEXES, webpage, 'content ID')
else:
items = json.loads(items_json)
info = items['mediaItems']['query']['results']['mediaObj'][0]
# The 'meta' field is not always in the video webpage, we request it
# from another page
video_id = info['id']
return self._get_info(video_id, display_id, webpage)
def _extract_info(self, display_id, query, webpage):
info = query['query']['results']['mediaObj'][0]
meta = info.get('meta')
video_id = info.get('id')
if not meta:
msg = info['status'].get('msg')
if msg:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, msg), expected=True)
raise ExtractorError('Unable to extract media object meta')
formats = []
for s in info['streams']:
format_info = {
'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')),
'tbr': int_or_none(s.get('bitrate')),
}
host = s['host']
path = s['path']
if host.startswith('rtmp'):
format_info.update({
'url': host,
'play_path': path,
'ext': 'flv',
})
else:
if s.get('format') == 'm3u8_playlist':
format_info['protocol'] = 'm3u8_native'
format_info['ext'] = 'mp4'
format_url = compat_urlparse.urljoin(host, path)
format_info['url'] = format_url
formats.append(format_info)
self._sort_formats(formats)
closed_captions = self._html_search_regex(
r'"closedcaptions":(\[[^\]]+\])', webpage, 'closed captions',
default='[]')
cc_json = self._parse_json(closed_captions, video_id, fatal=False)
subtitles = {}
if cc_json:
for closed_caption in cc_json:
lang = closed_caption['lang']
if lang not in subtitles:
subtitles[lang] = []
subtitles[lang].append({
'url': closed_caption['url'],
'ext': mimetype2ext(closed_caption['content_type']),
})
return {
'id': video_id,
'display_id': display_id,
'title': unescapeHTML(meta['title']),
'formats': formats,
'description': clean_html(meta['description']),
'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
'duration': int_or_none(meta.get('duration')),
'subtitles': subtitles,
}
def _get_info(self, video_id, display_id, webpage):
region = self._search_regex(
r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
webpage, 'region', fatal=False, default='US')
data = compat_urllib_parse.urlencode({
'protocol': 'http',
'region': region,
})
query_url = (
'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
'{id}?{data}'.format(id=video_id, data=data))
query_result = self._download_json(
query_url, display_id, 'Downloading video info')
return self._extract_info(display_id, query_result, webpage)
class YahooSearchIE(SearchInfoExtractor):
IE_DESC = 'Yahoo screen search'
_MAX_RESULTS = 1000
IE_NAME = 'screen.yahoo:search'
_SEARCH_KEY = 'yvsearch'
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
for pagenum in itertools.count(0):
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_u
|
ies': entries,
}
|
rllib_parse.quote_plus(query), pagenum * 30)
info = self._download_json(result_url, query,
note='Downloading results page ' + str(pagenum + 1))
m = info['m']
results = info['results']
for (i, r) in enumerate(results):
if (pagenum * 30) + i >= n:
break
mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
entries.append(e)
if (pagenum * 30 + i >= n) or (m['last'] >= (m['total'] - 1)):
break
return {
'_type': 'playlist',
'id': query,
'entr
|
conditional_block
|
yahoo.py
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import re
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
clean_html,
unescapeHTML,
ExtractorError,
int_or_none,
mimetype2ext,
)
from .nbc import NBCSportsVPlayerIE
class YahooIE(InfoExtractor):
IE_DESC = 'Yahoo screen and movies'
_VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+)?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
_TESTS = [
{
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
'info_dict': {
'id': '2d25e626-2378-391f-ada0-ddaf1417e588',
'ext': 'mp4',
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
'description': 'Julian and Travis watch Julian Smith',
'duration': 6863,
},
},
{
'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
'md5': 'd6e6fc6e1313c608f316ddad7b82b306',
'info_dict': {
'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
'ext': 'mp4',
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
'description': 'md5:66b627ab0a282b26352136ca96ce73c1',
'duration': 151,
},
},
{
'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
'md5': '60e8ac193d8fb71997caa8fce54c6460',
'info_dict': {
'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb',
'ext': 'mp4',
'title': "Yahoo Saves 'Community'",
'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
'duration': 170,
}
},
{
'url': 'https://tw.screen.yahoo.com/election-2014-askmayor/敢問市長-黃秀霜批賴清德-非常高傲-033009720.html',
'md5': '3a09cf59349cfaddae1797acc3c087fc',
'info_dict': {
'id': 'cac903b3-fcf4-3c14-b632-643ab541712f',
'ext': 'mp4',
'title': '敢問市長/黃秀霜批賴清德「非常高傲」',
'description': '直言台南沒捷運 交通居五都之末',
'duration': 396,
}
},
{
'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
'md5': '0b51660361f0e27c9789e7037ef76f4b',
'info_dict': {
'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
'ext': 'mp4',
'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
'description': 'md5:f66c890e1490f4910a9953c941dee944',
'duration': 97,
}
},
{
'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html',
'md5': '57e06440778b1828a6079d2f744212c4',
'info_dict': {
'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73',
'ext': 'mp4',
'title': 'Program that makes hockey more affordable not offered in Manitoba',
'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4',
'duration': 121,
}
}, {
'url': 'https://ca.finance.yahoo.com/news/hackers-sony-more-trouble-well-154609075.html',
'md5': '226a895aae7e21b0129e2a2006fe9690',
'info_dict': {
'id': 'e624c4bc-3389-34de-9dfc-025f74943409',
'ext': 'mp4',
'title': '\'The Interview\' TV Spot: War',
'description': 'The Interview',
'duration': 30,
}
}, {
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
'md5': '88e209b417f173d86186bef6e4d1f160',
'info_dict': {
'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
'ext': 'mp4',
'title': 'China Moses Is Crazy About the Blues',
'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
'duration': 128,
}
}, {
'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html',
'md5': 'd9a083ccf1379127bf25699d67e4791b',
'info_dict': {
'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c',
'ext': 'mp4',
'title': 'Connect the Dots: Dark Side of Virgo',
'description': 'md5:1428185051cfd1949807ad4ff6d3686a',
'duration': 201,
}
}, {
'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html',
'md5': '989396ae73d20c6f057746fb226aa215',
'info_dict': {
'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1',
'ext': 'mp4',
'title': '\'True Story\' Trailer',
'description': 'True Story',
'duration': 150,
},
}, {
'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
'only_matching': True,
}, {
'note': 'NBC Sports embeds',
'url': 'http://sports.yahoo.com/blogs/ncaab-the-dagger/tyler-kalinoski-s-buzzer-beater-caps-davidson-s-comeback-win-185609842.html?guid=nbc_cbk_davidsonbuzzerbeater_150313',
'info_dict': {
'id': '9CsDKds0kvHI',
'ext': 'flv',
'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
}
}, {
'url': 'https://tw.news.yahoo.com/-100120367.html',
'only_matching': True,
}, {
# Query result is embedded in webpage, but explicit request to video API fails with geo restriction
'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
'info_dict': {
'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
'ext': 'mp4',
'title': 'Communitary - Community Episode 1: Ladders',
'description': 'md5:8fc39608213295748e1e289807838c97',
'duration': 1646,
},
}, {
# it uses an alias to get the video_id
'url': 'https://www.yahoo.com/movies/the-stars-of-daddys-home-have-very-different-212843197.html',
'info_dict': {
'id': '40eda9c8-8e5f-3552-8745-830f67d0c737',
'ext': 'mp4',
'title': 'Will Ferrell & Mark Wahlberg Are Pro-Spanking',
'description': 'While they play feuding fathers in \'Daddy\'s Home,\' star Will Ferrell & Mark Wahlberg share their true feelings on parenthood.',
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id') or self._match_id(url)
page_id = mobj.group('id')
url = mobj.group('url')
host = mobj.group('host')
webpage = self._download_webpage(url, display_id)
# Look for iframed media first
iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
if iframe_m:
iframepage = self._download_webpage(
host + iframe_m.group(1), display_id, 'Downloading iframe webpage')
items_json = self._search_regex(
r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None)
if items_json:
items = json.loads(items_json)
video_id = items[0]['id']
return self._get_info(video_id, display_id, webpage)
# Look for NBCSports iframes
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Query result is often embedded in webpage as JSON. Sometimes explicit requests
# to video API results in a failure with geo restriction reason therefore using
# embedded query result when present sounds reasonable.
config_json = self._search_regex(
r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
webpage, 'videoplayer applet', default=None)
if config_json:
config = self._parse_json(config_json, display_id, fatal=False)
if config:
sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi')
if sapi:
return self._extract_info(display_id, sapi, webpage)
items_json = self._search_regex(
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
if items_json is None:
alias = self._search_regex(
r'"aliases":{"video":"(.*?)"', webpage, 'alias', default=None)
if alias is not None:
alias_info = self._download_json(
'https://www.yahoo.com/_td/api/resource/VideoService.videos;video_aliases=["%s"]' % alias,
display_id, 'Downloading alias info')
video_id = alias_info[0]['id']
else:
CONTENT_ID_REGEXES = [
r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
r'"first_videoid"\s*:\s*"([^"]+)"',
r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
r'<article[^>]data-uuid=["\']([^"\']+)',
r'yahoo://article/view\?.*\buuid=([^&"\']+)',
]
video_id = self._search_regex(
CONTENT_ID_REGEXES, webpage, 'content ID')
else:
items = json.loads(items_json)
info = items['mediaItems']['query']['results']['mediaObj'][0]
# The 'meta' field is not always in the video webpage, we request it
# from another page
video_id = info['id']
return self._get_info(video_id, display_id, webpage)
def _extract_info(self, display_id, query, webpage):
info = query['query']['results']['mediaObj'][0]
meta = info.get('meta')
video_id = info.get('id')
if not meta:
msg = info['status'].get('msg')
if msg:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, msg), expected=True)
raise ExtractorError('Unable to extract media object meta')
formats = []
for s in info['streams']:
format_info = {
'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')),
'tbr': int_or_none(s.get('bitrate')),
}
host = s['host']
path = s['path']
if host.startswith('rtmp'):
format_info.update({
'url': host,
'play_path': path,
'ext': 'flv',
})
else:
if s.get('format') == 'm3u8_playlist':
format_info['protocol'] = 'm3u8_native'
format_info['ext'] = 'mp4'
format_url = compat_urlparse.urljoin(host, path)
format_info['url'] = format_url
formats.append(format_info)
self._sort_formats(formats)
closed_captions = self._html_search_regex(
r'"closedcaptions":(\[[^\]]+\])', webpage, 'closed captions',
default='[]')
cc_json = self._parse_json(closed_captions, video_id, fatal=False)
subtitles = {}
if cc_json:
for closed_caption in cc_json:
lang = closed_caption['lang']
if lang not in subtitles:
subtitles[lang] = []
subtitles[lang].append({
'url': closed_caption['url'],
'ext': mimetype2ext(closed_caption['content_type']),
})
return {
'id': video_id,
'display_id': display_id,
'title': unescapeHTML(meta['title']),
'formats': formats,
'description': clean_html(meta['description']),
'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
'duration': int_or_none(meta.get('duration')),
'subtitles': subtitles,
}
def _get_info(self, video_id, display_id, webpage):
region = self._search_regex(
r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
webpage, 'region', fatal=False, default='US')
data = compat_urllib_parse.urlencode({
'protocol': 'http',
'region': region,
})
query_url = (
'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
'{id}?{data}'.format(id=video_id, data=data))
query_result = self._download_json(
query_url, display_id, 'Downloading video info')
return self._extract_info(display_id, query_result, webpage)
class YahooSearchIE(SearchInfoExtractor):
IE_DESC = 'Yahoo screen search'
_MAX_RESULTS = 1000
IE_NAME = 'screen.yahoo:search'
_SEARCH_KEY = 'yvsearch'
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
for pagenum i
|
n itertools.count(0):
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
info = self._download_json(result_url, query,
note='Downloading results page ' + str(pagenum + 1))
m = info['m']
results = info['results']
for (i, r) in enumerate(results):
if (pagenum * 30) + i >= n:
break
mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
entries.append(e)
if (pagenum * 30 + i >= n) or (m['last'] >= (m['total'] - 1)):
break
return {
'_type': 'playlist',
'id': query,
'entries': entries,
}
|
identifier_body
|
|
yahoo.py
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import re
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
clean_html,
unescapeHTML,
ExtractorError,
int_or_none,
mimetype2ext,
)
from .nbc import NBCSportsVPlayerIE
class YahooIE(InfoExtractor):
IE_DESC = 'Yahoo screen and movies'
_VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+)?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
_TESTS = [
{
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
'info_dict': {
'id': '2d25e626-2378-391f-ada0-ddaf1417e588',
'ext': 'mp4',
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
'description': 'Julian and Travis watch Julian Smith',
'duration': 6863,
},
},
{
'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
'md5': 'd6e6fc6e1313c608f316ddad7b82b306',
'info_dict': {
'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
'ext': 'mp4',
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
'description': 'md5:66b627ab0a282b26352136ca96ce73c1',
'duration': 151,
},
},
{
'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
'md5': '60e8ac193d8fb71997caa8fce54c6460',
'info_dict': {
'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb',
'ext': 'mp4',
'title': "Yahoo Saves 'Community'",
'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
'duration': 170,
}
},
{
'url': 'https://tw.screen.yahoo.com/election-2014-askmayor/敢問市長-黃秀霜批賴清德-非常高傲-033009720.html',
'md5': '3a09cf59349cfaddae1797acc3c087fc',
'info_dict': {
'id': 'cac903b3-fcf4-3c14-b632-643ab541712f',
'ext': 'mp4',
'title': '敢問市長/黃秀霜批賴清德「非常高傲」',
'description': '直言台南沒捷運 交通居五都之末',
'duration': 396,
}
},
{
'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
'md5': '0b51660361f0e27c9789e7037ef76f4b',
'info_dict': {
'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
'ext': 'mp4',
'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
'description': 'md5:f66c890e1490f4910a9953c941dee944',
|
{
'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html',
'md5': '57e06440778b1828a6079d2f744212c4',
'info_dict': {
'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73',
'ext': 'mp4',
'title': 'Program that makes hockey more affordable not offered in Manitoba',
'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4',
'duration': 121,
}
}, {
'url': 'https://ca.finance.yahoo.com/news/hackers-sony-more-trouble-well-154609075.html',
'md5': '226a895aae7e21b0129e2a2006fe9690',
'info_dict': {
'id': 'e624c4bc-3389-34de-9dfc-025f74943409',
'ext': 'mp4',
'title': '\'The Interview\' TV Spot: War',
'description': 'The Interview',
'duration': 30,
}
}, {
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
'md5': '88e209b417f173d86186bef6e4d1f160',
'info_dict': {
'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
'ext': 'mp4',
'title': 'China Moses Is Crazy About the Blues',
'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
'duration': 128,
}
}, {
'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html',
'md5': 'd9a083ccf1379127bf25699d67e4791b',
'info_dict': {
'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c',
'ext': 'mp4',
'title': 'Connect the Dots: Dark Side of Virgo',
'description': 'md5:1428185051cfd1949807ad4ff6d3686a',
'duration': 201,
}
}, {
'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html',
'md5': '989396ae73d20c6f057746fb226aa215',
'info_dict': {
'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1',
'ext': 'mp4',
'title': '\'True Story\' Trailer',
'description': 'True Story',
'duration': 150,
},
}, {
'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
'only_matching': True,
}, {
'note': 'NBC Sports embeds',
'url': 'http://sports.yahoo.com/blogs/ncaab-the-dagger/tyler-kalinoski-s-buzzer-beater-caps-davidson-s-comeback-win-185609842.html?guid=nbc_cbk_davidsonbuzzerbeater_150313',
'info_dict': {
'id': '9CsDKds0kvHI',
'ext': 'flv',
'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
}
}, {
'url': 'https://tw.news.yahoo.com/-100120367.html',
'only_matching': True,
}, {
# Query result is embedded in webpage, but explicit request to video API fails with geo restriction
'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
'info_dict': {
'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
'ext': 'mp4',
'title': 'Communitary - Community Episode 1: Ladders',
'description': 'md5:8fc39608213295748e1e289807838c97',
'duration': 1646,
},
}, {
# it uses an alias to get the video_id
'url': 'https://www.yahoo.com/movies/the-stars-of-daddys-home-have-very-different-212843197.html',
'info_dict': {
'id': '40eda9c8-8e5f-3552-8745-830f67d0c737',
'ext': 'mp4',
'title': 'Will Ferrell & Mark Wahlberg Are Pro-Spanking',
'description': 'While they play feuding fathers in \'Daddy\'s Home,\' star Will Ferrell & Mark Wahlberg share their true feelings on parenthood.',
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id') or self._match_id(url)
page_id = mobj.group('id')
url = mobj.group('url')
host = mobj.group('host')
webpage = self._download_webpage(url, display_id)
# Look for iframed media first
iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
if iframe_m:
iframepage = self._download_webpage(
host + iframe_m.group(1), display_id, 'Downloading iframe webpage')
items_json = self._search_regex(
r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None)
if items_json:
items = json.loads(items_json)
video_id = items[0]['id']
return self._get_info(video_id, display_id, webpage)
# Look for NBCSports iframes
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Query result is often embedded in webpage as JSON. Sometimes explicit requests
# to video API results in a failure with geo restriction reason therefore using
# embedded query result when present sounds reasonable.
config_json = self._search_regex(
r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
webpage, 'videoplayer applet', default=None)
if config_json:
config = self._parse_json(config_json, display_id, fatal=False)
if config:
sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi')
if sapi:
return self._extract_info(display_id, sapi, webpage)
items_json = self._search_regex(
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
if items_json is None:
alias = self._search_regex(
r'"aliases":{"video":"(.*?)"', webpage, 'alias', default=None)
if alias is not None:
alias_info = self._download_json(
'https://www.yahoo.com/_td/api/resource/VideoService.videos;video_aliases=["%s"]' % alias,
display_id, 'Downloading alias info')
video_id = alias_info[0]['id']
else:
CONTENT_ID_REGEXES = [
r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
r'"first_videoid"\s*:\s*"([^"]+)"',
r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
r'<article[^>]data-uuid=["\']([^"\']+)',
r'yahoo://article/view\?.*\buuid=([^&"\']+)',
]
video_id = self._search_regex(
CONTENT_ID_REGEXES, webpage, 'content ID')
else:
items = json.loads(items_json)
info = items['mediaItems']['query']['results']['mediaObj'][0]
# The 'meta' field is not always in the video webpage, we request it
# from another page
video_id = info['id']
return self._get_info(video_id, display_id, webpage)
def _extract_info(self, display_id, query, webpage):
info = query['query']['results']['mediaObj'][0]
meta = info.get('meta')
video_id = info.get('id')
if not meta:
msg = info['status'].get('msg')
if msg:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, msg), expected=True)
raise ExtractorError('Unable to extract media object meta')
formats = []
for s in info['streams']:
format_info = {
'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')),
'tbr': int_or_none(s.get('bitrate')),
}
host = s['host']
path = s['path']
if host.startswith('rtmp'):
format_info.update({
'url': host,
'play_path': path,
'ext': 'flv',
})
else:
if s.get('format') == 'm3u8_playlist':
format_info['protocol'] = 'm3u8_native'
format_info['ext'] = 'mp4'
format_url = compat_urlparse.urljoin(host, path)
format_info['url'] = format_url
formats.append(format_info)
self._sort_formats(formats)
closed_captions = self._html_search_regex(
r'"closedcaptions":(\[[^\]]+\])', webpage, 'closed captions',
default='[]')
cc_json = self._parse_json(closed_captions, video_id, fatal=False)
subtitles = {}
if cc_json:
for closed_caption in cc_json:
lang = closed_caption['lang']
if lang not in subtitles:
subtitles[lang] = []
subtitles[lang].append({
'url': closed_caption['url'],
'ext': mimetype2ext(closed_caption['content_type']),
})
return {
'id': video_id,
'display_id': display_id,
'title': unescapeHTML(meta['title']),
'formats': formats,
'description': clean_html(meta['description']),
'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
'duration': int_or_none(meta.get('duration')),
'subtitles': subtitles,
}
def _get_info(self, video_id, display_id, webpage):
region = self._search_regex(
r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
webpage, 'region', fatal=False, default='US')
data = compat_urllib_parse.urlencode({
'protocol': 'http',
'region': region,
})
query_url = (
'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
'{id}?{data}'.format(id=video_id, data=data))
query_result = self._download_json(
query_url, display_id, 'Downloading video info')
return self._extract_info(display_id, query_result, webpage)
class YahooSearchIE(SearchInfoExtractor):
IE_DESC = 'Yahoo screen search'
_MAX_RESULTS = 1000
IE_NAME = 'screen.yahoo:search'
_SEARCH_KEY = 'yvsearch'
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
for pagenum in itertools.count(0):
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
info = self._download_json(result_url, query,
note='Downloading results page ' + str(pagenum + 1))
m = info['m']
results = info['results']
for (i, r) in enumerate(results):
if (pagenum * 30) + i >= n:
break
mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
entries.append(e)
if (pagenum * 30 + i >= n) or (m['last'] >= (m['total'] - 1)):
break
return {
'_type': 'playlist',
'id': query,
'entries': entries,
}
|
'duration': 97,
}
},
|
random_line_split
|
fake_browser.rs
|
extern crate cookie;
use self::cookie::CookieJar;
extern crate regex;
use self::regex::Regex;
extern crate hyper;
use self::hyper::client::{Client, RedirectPolicy};
use self::hyper::Url;
use std::io::prelude::*;
use std::error::Error;
use std::fmt::{Display, Formatter};
use api::CallError;
/// Function that return authorization uri for Standalone client
pub fn authorization_client_uri(client_id: u64, scope: String, version: String, redirect: String) -> String {
format!("https://oauth.vk.com/authorize?client_id={}&scope={}&redirect_uri={}&display=mobile&v={}&response_type=token", client_id, scope, redirect, version)
}
use std::collections::HashMap;
// Get params send by hidden fields on auth page form
fn hidden_params(s: &String) -> HashMap<String,String> {
let mut map = HashMap::new();
let reg = Regex::new("name=\"([a-z_]*)\".*value=\"([:A-Za-z-/0-9.]+)\"").unwrap();
for cap in reg.captures_iter(&*s) {
map.insert(cap.at(1).unwrap_or("").into(), cap.at(2).unwrap_or("").into());
}
map
}
// Build POST request body for <form>
fn build_post_for_hidden_form(mut hidden_fields: HashMap<String,String>, login: String, password: String) -> String {
let mut result = String::new();
hidden_fields.insert("email".into(), login);
hidden_fields.insert("pass".into(), password);
for (key, value) in hidden_fields.iter() {
result.extend( format!("{}={}&", key,value).chars() );
}
result
}
// Find URL to send auth form
fn get_post_uri(s: &String) -> String {
let reg = Regex::new("action=\"([a-z:/?=&.0-9]*)\"").unwrap();
match reg.captures_iter(&*s).next() {
Some(x) => x.at(1).unwrap_or(""),
None => ""
}.into()
}
// Get access token and other data from response URL
fn get_token(u: &Url) -> (String, u64, u64) {
let reg = Regex::new("access_token=([a-f0-9]+)&expires_in=([0-9]+)&user_id=([0-9]+)").unwrap();
let mut token: String = String::new();
let mut expires: u64 = 0u64;
let mut user_id: u64 = 0u64;
for cap in reg.captures_iter(&u.to_string()) {
token = cap.at(1).unwrap_or("").into();
expires = cap.at(2).unwrap_or("0").parse::<u64>().unwrap();
user_id = cap.at(3).unwrap_or("0").parse::<u64>().unwrap();
}
(token, expires, user_id)
}
// Find url to confirm rights after authorization process(not always showed form)
fn find_confirmation_form(s: &String) -> String {
let mut result = String::new();
let reg = Regex::new("action=\"([A-Za-z0-9:/.?=&_%]+)\"").unwrap();
for cap in reg.captures_iter(&*s) {
result = cap.at(1).unwrap_or("").into();
}
result
}
// Stub
fn detect_captcha(s: &String) -> bool {
let reg = Regex::new("id=\"captcha\"").unwrap();
if reg.is_match(&*s) {
true
}
else{
false
}
}
/// Error returned if captcha was detected on login process
/// _Warning:_ the error isn't about 'Captcha needed' VK.com API real error.
#[derive(Debug)]
pub struct CapthaError;
impl Display for CapthaError {
fn fmt(&self,f: &mut Formatter) -> Result<(), ::std::fmt::Error> {
"Captcha was found on authorization process.".fmt(f)
}
}
impl Error for CapthaError {
fn description(&self) -> &str {
"Captha was found on authorization process."
}
}
|
/// The function implement login process for user without browser
/// _Warning: use the thing careful to privacy and privacy policy of vk.com_
pub fn fake_browser(login: String, password: String, url: String) -> Result<(String, u64, u64),CallError> {
use std::thread::sleep_ms;
use self::hyper::header::{Cookie,Location,SetCookie, ContentLength};
use self::hyper::client::response::Response;
let mut client = Client::new();
client.set_redirect_policy(RedirectPolicy::FollowNone);
let mut res: Response;
match client.get(&url).send(){
Ok(r) => res = r,
Err(e) => return Err(CallError::new(url, Some(Box::new(e))))
};
let mut jar = CookieJar::new(b"");
match res.headers.get::<SetCookie>(){
Some(setcookie) => setcookie.apply_to_cookie_jar(&mut jar),
None => return Err(CallError::new(
format!("Header of response doesn't set any cookies, {}", res.url), None))
};
let mut result = String::new();
match res.read_to_string(&mut result){
Ok(_) => { },
Err(e) => return Err(CallError::new(
format!("Failed read page to string by url: {}", res.url), Some(Box::new(e))))
};
let params = hidden_params(&result);
let post_req = build_post_for_hidden_form(params, login, password);
let post_uri = get_post_uri(&result);
sleep_ms(1000);
match client.post(&post_uri).header::<Cookie>(Cookie::from_cookie_jar(&jar)).body(&post_req).send(){
Ok(r) => res = r,
Err(e) => return Err(CallError::new(
format!("Can't send POST to {} with body {}",post_uri, post_req), Some(Box::new(e))))
};
while res.headers.has::<Location>() {
if res.headers.has::<SetCookie>() {
res.headers.get::<SetCookie>().unwrap().apply_to_cookie_jar(&mut jar);
}
let redirect = res.headers.get::<Location>().unwrap().clone();
res = client.get(&*redirect).header::<Cookie>(Cookie::from_cookie_jar(&jar)).send().unwrap();
let length = res.headers.get::<ContentLength>().unwrap().clone();
// Check that we've got yet one confirmation form
if length != ContentLength(0u64) {
let mut answer = String::new();
if let Ok(_) = res.read_to_string(&mut answer) {
if detect_captcha(&answer) {
return Err(CallError::new(answer, Some(Box::new(CapthaError))));
}
let url = find_confirmation_form(&answer);
if !url.is_empty() {
match client.post(&url).header::<Cookie>(Cookie::from_cookie_jar(&jar)).send(){
Ok(r) => res = r,
Err(e) => return Err(CallError::new(
format!("Failed POST to url: {}", res.url), Some(Box::new(e))))
};
}
}
}
}
let result = get_token(&res.url);
if result == (String::new(), 0u64, 0u64) {
Err(CallError::new(
format!("Can't get token by url: {}", res.url),
None))
}
else {
Ok(result)
}
}
|
random_line_split
|
|
fake_browser.rs
|
extern crate cookie;
use self::cookie::CookieJar;
extern crate regex;
use self::regex::Regex;
extern crate hyper;
use self::hyper::client::{Client, RedirectPolicy};
use self::hyper::Url;
use std::io::prelude::*;
use std::error::Error;
use std::fmt::{Display, Formatter};
use api::CallError;
/// Function that return authorization uri for Standalone client
pub fn
|
(client_id: u64, scope: String, version: String, redirect: String) -> String {
format!("https://oauth.vk.com/authorize?client_id={}&scope={}&redirect_uri={}&display=mobile&v={}&response_type=token", client_id, scope, redirect, version)
}
use std::collections::HashMap;
// Get params send by hidden fields on auth page form
fn hidden_params(s: &String) -> HashMap<String,String> {
let mut map = HashMap::new();
let reg = Regex::new("name=\"([a-z_]*)\".*value=\"([:A-Za-z-/0-9.]+)\"").unwrap();
for cap in reg.captures_iter(&*s) {
map.insert(cap.at(1).unwrap_or("").into(), cap.at(2).unwrap_or("").into());
}
map
}
// Build POST request body for <form>
fn build_post_for_hidden_form(mut hidden_fields: HashMap<String,String>, login: String, password: String) -> String {
let mut result = String::new();
hidden_fields.insert("email".into(), login);
hidden_fields.insert("pass".into(), password);
for (key, value) in hidden_fields.iter() {
result.extend( format!("{}={}&", key,value).chars() );
}
result
}
// Find URL to send auth form
fn get_post_uri(s: &String) -> String {
let reg = Regex::new("action=\"([a-z:/?=&.0-9]*)\"").unwrap();
match reg.captures_iter(&*s).next() {
Some(x) => x.at(1).unwrap_or(""),
None => ""
}.into()
}
// Get access token and other data from response URL
fn get_token(u: &Url) -> (String, u64, u64) {
let reg = Regex::new("access_token=([a-f0-9]+)&expires_in=([0-9]+)&user_id=([0-9]+)").unwrap();
let mut token: String = String::new();
let mut expires: u64 = 0u64;
let mut user_id: u64 = 0u64;
for cap in reg.captures_iter(&u.to_string()) {
token = cap.at(1).unwrap_or("").into();
expires = cap.at(2).unwrap_or("0").parse::<u64>().unwrap();
user_id = cap.at(3).unwrap_or("0").parse::<u64>().unwrap();
}
(token, expires, user_id)
}
// Find url to confirm rights after authorization process(not always showed form)
fn find_confirmation_form(s: &String) -> String {
let mut result = String::new();
let reg = Regex::new("action=\"([A-Za-z0-9:/.?=&_%]+)\"").unwrap();
for cap in reg.captures_iter(&*s) {
result = cap.at(1).unwrap_or("").into();
}
result
}
// Stub
fn detect_captcha(s: &String) -> bool {
let reg = Regex::new("id=\"captcha\"").unwrap();
if reg.is_match(&*s) {
true
}
else{
false
}
}
/// Error returned if captcha was detected on login process
/// _Warning:_ the error isn't about 'Captcha needed' VK.com API real error.
#[derive(Debug)]
pub struct CapthaError;
impl Display for CapthaError {
fn fmt(&self,f: &mut Formatter) -> Result<(), ::std::fmt::Error> {
"Captcha was found on authorization process.".fmt(f)
}
}
impl Error for CapthaError {
fn description(&self) -> &str {
"Captha was found on authorization process."
}
}
/// The function implement login process for user without browser
/// _Warning: use the thing careful to privacy and privacy policy of vk.com_
pub fn fake_browser(login: String, password: String, url: String) -> Result<(String, u64, u64),CallError> {
use std::thread::sleep_ms;
use self::hyper::header::{Cookie,Location,SetCookie, ContentLength};
use self::hyper::client::response::Response;
let mut client = Client::new();
client.set_redirect_policy(RedirectPolicy::FollowNone);
let mut res: Response;
match client.get(&url).send(){
Ok(r) => res = r,
Err(e) => return Err(CallError::new(url, Some(Box::new(e))))
};
let mut jar = CookieJar::new(b"");
match res.headers.get::<SetCookie>(){
Some(setcookie) => setcookie.apply_to_cookie_jar(&mut jar),
None => return Err(CallError::new(
format!("Header of response doesn't set any cookies, {}", res.url), None))
};
let mut result = String::new();
match res.read_to_string(&mut result){
Ok(_) => { },
Err(e) => return Err(CallError::new(
format!("Failed read page to string by url: {}", res.url), Some(Box::new(e))))
};
let params = hidden_params(&result);
let post_req = build_post_for_hidden_form(params, login, password);
let post_uri = get_post_uri(&result);
sleep_ms(1000);
match client.post(&post_uri).header::<Cookie>(Cookie::from_cookie_jar(&jar)).body(&post_req).send(){
Ok(r) => res = r,
Err(e) => return Err(CallError::new(
format!("Can't send POST to {} with body {}",post_uri, post_req), Some(Box::new(e))))
};
while res.headers.has::<Location>() {
if res.headers.has::<SetCookie>() {
res.headers.get::<SetCookie>().unwrap().apply_to_cookie_jar(&mut jar);
}
let redirect = res.headers.get::<Location>().unwrap().clone();
res = client.get(&*redirect).header::<Cookie>(Cookie::from_cookie_jar(&jar)).send().unwrap();
let length = res.headers.get::<ContentLength>().unwrap().clone();
// Check that we've got yet one confirmation form
if length != ContentLength(0u64) {
let mut answer = String::new();
if let Ok(_) = res.read_to_string(&mut answer) {
if detect_captcha(&answer) {
return Err(CallError::new(answer, Some(Box::new(CapthaError))));
}
let url = find_confirmation_form(&answer);
if !url.is_empty() {
match client.post(&url).header::<Cookie>(Cookie::from_cookie_jar(&jar)).send(){
Ok(r) => res = r,
Err(e) => return Err(CallError::new(
format!("Failed POST to url: {}", res.url), Some(Box::new(e))))
};
}
}
}
}
let result = get_token(&res.url);
if result == (String::new(), 0u64, 0u64) {
Err(CallError::new(
format!("Can't get token by url: {}", res.url),
None))
}
else {
Ok(result)
}
}
|
authorization_client_uri
|
identifier_name
|
fake_browser.rs
|
extern crate cookie;
use self::cookie::CookieJar;
extern crate regex;
use self::regex::Regex;
extern crate hyper;
use self::hyper::client::{Client, RedirectPolicy};
use self::hyper::Url;
use std::io::prelude::*;
use std::error::Error;
use std::fmt::{Display, Formatter};
use api::CallError;
/// Function that return authorization uri for Standalone client
pub fn authorization_client_uri(client_id: u64, scope: String, version: String, redirect: String) -> String {
format!("https://oauth.vk.com/authorize?client_id={}&scope={}&redirect_uri={}&display=mobile&v={}&response_type=token", client_id, scope, redirect, version)
}
use std::collections::HashMap;
// Get params send by hidden fields on auth page form
fn hidden_params(s: &String) -> HashMap<String,String> {
let mut map = HashMap::new();
let reg = Regex::new("name=\"([a-z_]*)\".*value=\"([:A-Za-z-/0-9.]+)\"").unwrap();
for cap in reg.captures_iter(&*s) {
map.insert(cap.at(1).unwrap_or("").into(), cap.at(2).unwrap_or("").into());
}
map
}
// Build POST request body for <form>
fn build_post_for_hidden_form(mut hidden_fields: HashMap<String,String>, login: String, password: String) -> String {
let mut result = String::new();
hidden_fields.insert("email".into(), login);
hidden_fields.insert("pass".into(), password);
for (key, value) in hidden_fields.iter() {
result.extend( format!("{}={}&", key,value).chars() );
}
result
}
// Find URL to send auth form
fn get_post_uri(s: &String) -> String
|
// Get access token and other data from response URL
fn get_token(u: &Url) -> (String, u64, u64) {
let reg = Regex::new("access_token=([a-f0-9]+)&expires_in=([0-9]+)&user_id=([0-9]+)").unwrap();
let mut token: String = String::new();
let mut expires: u64 = 0u64;
let mut user_id: u64 = 0u64;
for cap in reg.captures_iter(&u.to_string()) {
token = cap.at(1).unwrap_or("").into();
expires = cap.at(2).unwrap_or("0").parse::<u64>().unwrap();
user_id = cap.at(3).unwrap_or("0").parse::<u64>().unwrap();
}
(token, expires, user_id)
}
// Find url to confirm rights after authorization process(not always showed form)
fn find_confirmation_form(s: &String) -> String {
let mut result = String::new();
let reg = Regex::new("action=\"([A-Za-z0-9:/.?=&_%]+)\"").unwrap();
for cap in reg.captures_iter(&*s) {
result = cap.at(1).unwrap_or("").into();
}
result
}
// Stub
fn detect_captcha(s: &String) -> bool {
let reg = Regex::new("id=\"captcha\"").unwrap();
if reg.is_match(&*s) {
true
}
else{
false
}
}
/// Error returned if captcha was detected on login process
/// _Warning:_ the error isn't about 'Captcha needed' VK.com API real error.
#[derive(Debug)]
pub struct CapthaError;
impl Display for CapthaError {
fn fmt(&self,f: &mut Formatter) -> Result<(), ::std::fmt::Error> {
"Captcha was found on authorization process.".fmt(f)
}
}
impl Error for CapthaError {
fn description(&self) -> &str {
"Captha was found on authorization process."
}
}
/// The function implement login process for user without browser
/// _Warning: use the thing careful to privacy and privacy policy of vk.com_
pub fn fake_browser(login: String, password: String, url: String) -> Result<(String, u64, u64),CallError> {
use std::thread::sleep_ms;
use self::hyper::header::{Cookie,Location,SetCookie, ContentLength};
use self::hyper::client::response::Response;
let mut client = Client::new();
client.set_redirect_policy(RedirectPolicy::FollowNone);
let mut res: Response;
match client.get(&url).send(){
Ok(r) => res = r,
Err(e) => return Err(CallError::new(url, Some(Box::new(e))))
};
let mut jar = CookieJar::new(b"");
match res.headers.get::<SetCookie>(){
Some(setcookie) => setcookie.apply_to_cookie_jar(&mut jar),
None => return Err(CallError::new(
format!("Header of response doesn't set any cookies, {}", res.url), None))
};
let mut result = String::new();
match res.read_to_string(&mut result){
Ok(_) => { },
Err(e) => return Err(CallError::new(
format!("Failed read page to string by url: {}", res.url), Some(Box::new(e))))
};
let params = hidden_params(&result);
let post_req = build_post_for_hidden_form(params, login, password);
let post_uri = get_post_uri(&result);
sleep_ms(1000);
match client.post(&post_uri).header::<Cookie>(Cookie::from_cookie_jar(&jar)).body(&post_req).send(){
Ok(r) => res = r,
Err(e) => return Err(CallError::new(
format!("Can't send POST to {} with body {}",post_uri, post_req), Some(Box::new(e))))
};
while res.headers.has::<Location>() {
if res.headers.has::<SetCookie>() {
res.headers.get::<SetCookie>().unwrap().apply_to_cookie_jar(&mut jar);
}
let redirect = res.headers.get::<Location>().unwrap().clone();
res = client.get(&*redirect).header::<Cookie>(Cookie::from_cookie_jar(&jar)).send().unwrap();
let length = res.headers.get::<ContentLength>().unwrap().clone();
// Check that we've got yet one confirmation form
if length != ContentLength(0u64) {
let mut answer = String::new();
if let Ok(_) = res.read_to_string(&mut answer) {
if detect_captcha(&answer) {
return Err(CallError::new(answer, Some(Box::new(CapthaError))));
}
let url = find_confirmation_form(&answer);
if !url.is_empty() {
match client.post(&url).header::<Cookie>(Cookie::from_cookie_jar(&jar)).send(){
Ok(r) => res = r,
Err(e) => return Err(CallError::new(
format!("Failed POST to url: {}", res.url), Some(Box::new(e))))
};
}
}
}
}
let result = get_token(&res.url);
if result == (String::new(), 0u64, 0u64) {
Err(CallError::new(
format!("Can't get token by url: {}", res.url),
None))
}
else {
Ok(result)
}
}
|
{
let reg = Regex::new("action=\"([a-z:/?=&.0-9]*)\"").unwrap();
match reg.captures_iter(&*s).next() {
Some(x) => x.at(1).unwrap_or(""),
None => ""
}.into()
}
|
identifier_body
|
gen.rs
|
use std::rand::Rng;
use std::rand;
fn main()
|
{
// create a task-local Random Number Generator
let mut rng = rand::task_rng();
// the `gen` methods generates values in the full range of each type using
// a uniform distribution
println!("randomly generate some values for different primitive types");
println!("u8: {}", rng.gen::<u8>());
println!("i8: {}", rng.gen::<i8>());
println!("u16: {}", rng.gen::<u16>());
println!("i16: {}", rng.gen::<i16>());
// except for floats which get generated in the range [0, 1>
println!("f32: {}", rng.gen::<f32>());
println!("f64: {}", rng.gen::<f64>());
// `gen_iter` returns an iterator that yields a infinite number of randomly
// generated numbers
let mut v: Vec<u8> = rng.gen_iter::<u8>().take(10).collect();
println!("10 randomly generated u8 values");
println!("{}", v.as_slice());
// `shuffle` shuffles a mutable slice in place
rng.shuffle(v.as_mut_slice());
println!("shuffle previous slice");
println!("{}", v.as_slice());
// `choose` will sample an slice *with* replacement
// i.e. the same element can be chosen more than one time
println!("sample previous slice *with* replacement 10 times");
for _ in range(0u, 10) {
match rng.choose(v.as_slice()) {
None => fail!("slice was empty"),
Some(x) => println!("{}", x),
}
}
}
|
identifier_body
|
|
gen.rs
|
use std::rand::Rng;
use std::rand;
fn
|
() {
// create a task-local Random Number Generator
let mut rng = rand::task_rng();
// the `gen` methods generates values in the full range of each type using
// a uniform distribution
println!("randomly generate some values for different primitive types");
println!("u8: {}", rng.gen::<u8>());
println!("i8: {}", rng.gen::<i8>());
println!("u16: {}", rng.gen::<u16>());
println!("i16: {}", rng.gen::<i16>());
// except for floats which get generated in the range [0, 1>
println!("f32: {}", rng.gen::<f32>());
println!("f64: {}", rng.gen::<f64>());
// `gen_iter` returns an iterator that yields a infinite number of randomly
// generated numbers
let mut v: Vec<u8> = rng.gen_iter::<u8>().take(10).collect();
println!("10 randomly generated u8 values");
println!("{}", v.as_slice());
// `shuffle` shuffles a mutable slice in place
rng.shuffle(v.as_mut_slice());
println!("shuffle previous slice");
println!("{}", v.as_slice());
// `choose` will sample an slice *with* replacement
// i.e. the same element can be chosen more than one time
println!("sample previous slice *with* replacement 10 times");
for _ in range(0u, 10) {
match rng.choose(v.as_slice()) {
None => fail!("slice was empty"),
Some(x) => println!("{}", x),
}
}
}
|
main
|
identifier_name
|
gen.rs
|
use std::rand::Rng;
use std::rand;
fn main() {
// create a task-local Random Number Generator
let mut rng = rand::task_rng();
// the `gen` methods generates values in the full range of each type using
// a uniform distribution
println!("randomly generate some values for different primitive types");
println!("u8: {}", rng.gen::<u8>());
println!("i8: {}", rng.gen::<i8>());
|
println!("f32: {}", rng.gen::<f32>());
println!("f64: {}", rng.gen::<f64>());
// `gen_iter` returns an iterator that yields a infinite number of randomly
// generated numbers
let mut v: Vec<u8> = rng.gen_iter::<u8>().take(10).collect();
println!("10 randomly generated u8 values");
println!("{}", v.as_slice());
// `shuffle` shuffles a mutable slice in place
rng.shuffle(v.as_mut_slice());
println!("shuffle previous slice");
println!("{}", v.as_slice());
// `choose` will sample an slice *with* replacement
// i.e. the same element can be chosen more than one time
println!("sample previous slice *with* replacement 10 times");
for _ in range(0u, 10) {
match rng.choose(v.as_slice()) {
None => fail!("slice was empty"),
Some(x) => println!("{}", x),
}
}
}
|
println!("u16: {}", rng.gen::<u16>());
println!("i16: {}", rng.gen::<i16>());
// except for floats which get generated in the range [0, 1>
|
random_line_split
|
checkFile.ts
|
const fileTypeExts = {
pdf: ['pdf'],
image: ['jpg', 'jpeg', 'png'],
pdfImage: ['pdf', 'jpg', 'jpeg', 'png'],
};
const fileTypeAlerts = {
pdf: 'Please choose a pdf document',
image: 'Please choose an image file',
pdfImage: 'Please choose a pdf document or image file',
};
export default function checkFile(inputFiles, value, maxKb, fileType) {
if (
(window as any).FileReader &&
inputFiles &&
inputFiles[0] &&
inputFiles[0].size > (maxKb || 500) * 1000
)
|
const ext = value
.split('.')
.pop()
.toLowerCase();
if (fileTypeExts[fileType] && !fileTypeExts[fileType].includes(ext)) {
alert(fileTypeAlerts[fileType]);
return false;
}
return true;
}
|
{
alert(`The max file size is ${(maxKb || 500) / 1000}MB, please try again.`);
return false;
}
|
conditional_block
|
checkFile.ts
|
const fileTypeExts = {
pdf: ['pdf'],
image: ['jpg', 'jpeg', 'png'],
pdfImage: ['pdf', 'jpg', 'jpeg', 'png'],
};
const fileTypeAlerts = {
pdf: 'Please choose a pdf document',
image: 'Please choose an image file',
pdfImage: 'Please choose a pdf document or image file',
};
export default function
|
(inputFiles, value, maxKb, fileType) {
if (
(window as any).FileReader &&
inputFiles &&
inputFiles[0] &&
inputFiles[0].size > (maxKb || 500) * 1000
) {
alert(`The max file size is ${(maxKb || 500) / 1000}MB, please try again.`);
return false;
}
const ext = value
.split('.')
.pop()
.toLowerCase();
if (fileTypeExts[fileType] && !fileTypeExts[fileType].includes(ext)) {
alert(fileTypeAlerts[fileType]);
return false;
}
return true;
}
|
checkFile
|
identifier_name
|
checkFile.ts
|
const fileTypeExts = {
pdf: ['pdf'],
image: ['jpg', 'jpeg', 'png'],
pdfImage: ['pdf', 'jpg', 'jpeg', 'png'],
};
const fileTypeAlerts = {
pdf: 'Please choose a pdf document',
image: 'Please choose an image file',
pdfImage: 'Please choose a pdf document or image file',
};
export default function checkFile(inputFiles, value, maxKb, fileType) {
if (
(window as any).FileReader &&
|
alert(`The max file size is ${(maxKb || 500) / 1000}MB, please try again.`);
return false;
}
const ext = value
.split('.')
.pop()
.toLowerCase();
if (fileTypeExts[fileType] && !fileTypeExts[fileType].includes(ext)) {
alert(fileTypeAlerts[fileType]);
return false;
}
return true;
}
|
inputFiles &&
inputFiles[0] &&
inputFiles[0].size > (maxKb || 500) * 1000
) {
|
random_line_split
|
checkFile.ts
|
const fileTypeExts = {
pdf: ['pdf'],
image: ['jpg', 'jpeg', 'png'],
pdfImage: ['pdf', 'jpg', 'jpeg', 'png'],
};
const fileTypeAlerts = {
pdf: 'Please choose a pdf document',
image: 'Please choose an image file',
pdfImage: 'Please choose a pdf document or image file',
};
export default function checkFile(inputFiles, value, maxKb, fileType)
|
{
if (
(window as any).FileReader &&
inputFiles &&
inputFiles[0] &&
inputFiles[0].size > (maxKb || 500) * 1000
) {
alert(`The max file size is ${(maxKb || 500) / 1000}MB, please try again.`);
return false;
}
const ext = value
.split('.')
.pop()
.toLowerCase();
if (fileTypeExts[fileType] && !fileTypeExts[fileType].includes(ext)) {
alert(fileTypeAlerts[fileType]);
return false;
}
return true;
}
|
identifier_body
|
|
q_box.rs
|
use crate::{QObject, QPtr};
use cpp_core::{
CastFrom, CastInto, CppBox, CppDeletable, DynamicCast, Ptr, Ref, StaticDowncast, StaticUpcast,
};
use std::ops::Deref;
use std::{fmt, mem};
/// An owning pointer for `QObject`-based objects.
///
/// `QBox` will delete its object on drop if it has no parent. If the object has a parent,
/// it's assumed that the parent is responsible for deleting the object, as per Qt ownership system.
/// Additionally, `QBox` will be automatically set to null when the object is deleted, similar
/// to `QPtr` (or `QPointer<T>` in C++). `QBox` will not attempt to delete null pointers.
///
/// Note that dereferencing a null `QBox` will panic, so if it's known that the object may
/// already have been deleted, you should use `is_null()`, `as_ref()`,
/// or a similar method to check
/// if the object is still alive before calling its methods.
///
/// Unlike `CppBox` (which is non-nullable), `QBox` is permitted to contain a null pointer because
/// even if a non-null pointer is provided when constructing `QBox`, it will become null
/// automatically if the object is deleted.
///
/// To prevent the object from being deleted, convert `QBox` to another type of pointer using
/// `into_q_ptr()` or `into_ptr()`. Alternatively, setting a parent for the object will prevent
/// `QBox` from deleting it.
///
/// To make sure the object is deleted regardless of its parent, convert `QBox` to `CppBox` using
/// `into_box()`.
///
/// # Safety
///
/// `QBox` has the same safety issues as `QPtr`. See `QPtr` documentation.
pub struct QBox<T: StaticUpcast<QObject> + CppDeletable>(QPtr<T>);
impl<T: StaticUpcast<QObject> + CppDeletable> QBox<T> {
/// Creates a `QBox` from a `QPtr`.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn from_q_ptr(target: QPtr<T>) -> Self {
QBox(target)
}
/// Creates a `QBox` from a `Ptr`.
///
/// ### Safety
///
/// `target` must be either a valid pointer to an object or a null pointer.
/// See type level documentation.
pub unsafe fn new(target: impl CastInto<Ptr<T>>) -> Self {
QBox::from_q_ptr(QPtr::new(target))
}
/// Creates a `QBox` from a raw pointer.
///
/// ### Safety
///
/// `target` must be either a valid pointer to an object or a null pointer.
/// See type level documentation.
pub unsafe fn from_raw(target: *const T) -> Self {
QBox::from_q_ptr(QPtr::from_raw(target))
}
/// Creates a null pointer.
///
/// Note that you can also use `NullPtr` to specify a null pointer to a function accepting
/// `impl CastInto<Ptr<_>>`. Unlike `Ptr`, `NullPtr` is not a generic type, so it will
/// not cause type inference issues.
///
/// Note that accessing the content of a null `QBox` through `Deref` will result
/// in a panic.
///
/// ### Safety
///
/// Null pointers must not be dereferenced. See type level documentation.
pub unsafe fn null() -> Self {
QBox::from_q_ptr(QPtr::<T>::null())
}
/// Returns true if the pointer is null.
pub unsafe fn is_null(&self) -> bool
|
/// Returns the content as a const `Ptr`.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_ptr(&self) -> Ptr<T> {
self.0.as_ptr()
}
/// Returns the content as a raw const pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_raw_ptr(&self) -> *const T {
self.0.as_raw_ptr()
}
/// Returns the content as a raw mutable pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_mut_raw_ptr(&self) -> *mut T {
self.0.as_mut_raw_ptr()
}
/// Returns the content as a const `Ref`. Returns `None` if `self` is a null pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_ref(&self) -> Option<Ref<T>> {
self.0.as_ref()
}
/// Returns a reference to the value. Returns `None` if the pointer is null.
///
/// ### Safety
///
/// `self` must be valid.
/// The content must not be read or modified through other ways while the returned reference
/// exists.See type level documentation.
pub unsafe fn as_raw_ref<'a>(&self) -> Option<&'a T> {
self.as_ref().map(|r| r.as_raw_ref())
}
/// Returns a mutable reference to the value. Returns `None` if the pointer is null.
///
/// ### Safety
///
/// `self` must be valid.
/// The content must not be read or modified through other ways while the returned reference
/// exists.See type level documentation.
pub unsafe fn as_mut_raw_ref<'a>(&self) -> Option<&'a mut T> {
self.as_ref().map(|r| r.as_mut_raw_ref())
}
/// Converts the pointer to the base class type `U`.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid or null. See type level documentation.
pub unsafe fn static_upcast<U>(&self) -> QPtr<U>
where
T: StaticUpcast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().static_upcast::<U>())
}
/// Converts the pointer to the derived class type `U`.
///
/// It's recommended to use `dynamic_cast` instead because it performs a checked conversion.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid and it's type is `U` or inherits from `U`,
/// of if `self` is a null pointer. See type level documentation.
pub unsafe fn static_downcast<U>(&self) -> QPtr<U>
where
T: StaticDowncast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().static_downcast())
}
/// Converts the pointer to the derived class type `U`. Returns `None` if the object's type
/// is not `U` and doesn't inherit `U`.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid or null. See type level documentation.
pub unsafe fn dynamic_cast<U>(&self) -> QPtr<U>
where
T: DynamicCast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().dynamic_cast())
}
/// Converts this pointer to a `CppBox`. Returns `None` if `self`
/// is a null pointer.
///
/// Unlike `QBox`, `CppBox` will always delete the object when dropped.
///
/// ### Safety
///
/// `CppBox` will attempt to delete the object on drop. If something else also tries to
/// delete this object before or after that, the behavior is undefined.
/// See type level documentation.
pub unsafe fn into_box(self) -> Option<CppBox<T>> {
self.into_q_ptr().to_box()
}
/// Converts this `QBox` into a `QPtr`.
///
/// Unlike `QBox`, `QPtr` will never delete the object when dropped.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_q_ptr(mut self) -> QPtr<T> {
mem::replace(&mut self.0, QPtr::null())
}
/// Converts this `QBox` into a `Ptr`.
///
/// Unlike `QBox`, `Ptr` will never delete the object when dropped.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_ptr(self) -> Ptr<T> {
self.into_q_ptr().as_ptr()
}
/// Converts this `QBox` into a raw pointer without deleting the object.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_raw_ptr(self) -> *mut T {
self.into_q_ptr().as_mut_raw_ptr()
}
}
impl<T: StaticUpcast<QObject> + CppDeletable> fmt::Debug for QBox<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "QBox({:?})", unsafe { self.as_raw_ptr() })
}
}
/// Allows to call member functions of `T` and its base classes directly on the pointer.
///
/// Panics if the pointer is null.
impl<T: StaticUpcast<QObject> + CppDeletable> Deref for QBox<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe {
let ptr = self.as_raw_ptr();
if ptr.is_null() {
panic!("attempted to deref a null QBox<T>");
}
&*ptr
}
}
}
impl<'a, T, U> CastFrom<&'a QBox<U>> for Ptr<T>
where
U: StaticUpcast<T> + StaticUpcast<QObject> + CppDeletable,
{
unsafe fn cast_from(value: &'a QBox<U>) -> Self {
CastFrom::cast_from(value.as_ptr())
}
}
impl<T: StaticUpcast<QObject> + CppDeletable> Drop for QBox<T> {
fn drop(&mut self) {
unsafe {
let ptr = self.as_ptr();
if !ptr.is_null() && ptr.static_upcast().parent().is_null() {
T::delete(&*ptr.as_raw_ptr());
}
}
}
}
|
{
self.0.is_null()
}
|
identifier_body
|
q_box.rs
|
use crate::{QObject, QPtr};
use cpp_core::{
CastFrom, CastInto, CppBox, CppDeletable, DynamicCast, Ptr, Ref, StaticDowncast, StaticUpcast,
};
use std::ops::Deref;
use std::{fmt, mem};
/// An owning pointer for `QObject`-based objects.
///
/// `QBox` will delete its object on drop if it has no parent. If the object has a parent,
/// it's assumed that the parent is responsible for deleting the object, as per Qt ownership system.
/// Additionally, `QBox` will be automatically set to null when the object is deleted, similar
/// to `QPtr` (or `QPointer<T>` in C++). `QBox` will not attempt to delete null pointers.
///
/// Note that dereferencing a null `QBox` will panic, so if it's known that the object may
/// already have been deleted, you should use `is_null()`, `as_ref()`,
/// or a similar method to check
/// if the object is still alive before calling its methods.
///
/// Unlike `CppBox` (which is non-nullable), `QBox` is permitted to contain a null pointer because
/// even if a non-null pointer is provided when constructing `QBox`, it will become null
/// automatically if the object is deleted.
///
/// To prevent the object from being deleted, convert `QBox` to another type of pointer using
/// `into_q_ptr()` or `into_ptr()`. Alternatively, setting a parent for the object will prevent
/// `QBox` from deleting it.
///
/// To make sure the object is deleted regardless of its parent, convert `QBox` to `CppBox` using
/// `into_box()`.
///
/// # Safety
///
/// `QBox` has the same safety issues as `QPtr`. See `QPtr` documentation.
pub struct QBox<T: StaticUpcast<QObject> + CppDeletable>(QPtr<T>);
impl<T: StaticUpcast<QObject> + CppDeletable> QBox<T> {
/// Creates a `QBox` from a `QPtr`.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn from_q_ptr(target: QPtr<T>) -> Self {
QBox(target)
}
/// Creates a `QBox` from a `Ptr`.
///
/// ### Safety
///
/// `target` must be either a valid pointer to an object or a null pointer.
/// See type level documentation.
pub unsafe fn new(target: impl CastInto<Ptr<T>>) -> Self {
QBox::from_q_ptr(QPtr::new(target))
}
/// Creates a `QBox` from a raw pointer.
///
/// ### Safety
///
/// `target` must be either a valid pointer to an object or a null pointer.
/// See type level documentation.
pub unsafe fn from_raw(target: *const T) -> Self {
QBox::from_q_ptr(QPtr::from_raw(target))
}
/// Creates a null pointer.
///
/// Note that you can also use `NullPtr` to specify a null pointer to a function accepting
/// `impl CastInto<Ptr<_>>`. Unlike `Ptr`, `NullPtr` is not a generic type, so it will
/// not cause type inference issues.
///
/// Note that accessing the content of a null `QBox` through `Deref` will result
/// in a panic.
///
/// ### Safety
///
/// Null pointers must not be dereferenced. See type level documentation.
pub unsafe fn null() -> Self {
QBox::from_q_ptr(QPtr::<T>::null())
}
/// Returns true if the pointer is null.
pub unsafe fn is_null(&self) -> bool {
self.0.is_null()
}
/// Returns the content as a const `Ptr`.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_ptr(&self) -> Ptr<T> {
self.0.as_ptr()
}
/// Returns the content as a raw const pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_raw_ptr(&self) -> *const T {
self.0.as_raw_ptr()
}
/// Returns the content as a raw mutable pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_mut_raw_ptr(&self) -> *mut T {
self.0.as_mut_raw_ptr()
}
/// Returns the content as a const `Ref`. Returns `None` if `self` is a null pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_ref(&self) -> Option<Ref<T>> {
self.0.as_ref()
}
/// Returns a reference to the value. Returns `None` if the pointer is null.
///
/// ### Safety
///
/// `self` must be valid.
/// The content must not be read or modified through other ways while the returned reference
/// exists.See type level documentation.
pub unsafe fn as_raw_ref<'a>(&self) -> Option<&'a T> {
self.as_ref().map(|r| r.as_raw_ref())
}
/// Returns a mutable reference to the value. Returns `None` if the pointer is null.
///
/// ### Safety
///
/// `self` must be valid.
/// The content must not be read or modified through other ways while the returned reference
/// exists.See type level documentation.
pub unsafe fn as_mut_raw_ref<'a>(&self) -> Option<&'a mut T> {
self.as_ref().map(|r| r.as_mut_raw_ref())
}
/// Converts the pointer to the base class type `U`.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid or null. See type level documentation.
pub unsafe fn static_upcast<U>(&self) -> QPtr<U>
where
T: StaticUpcast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().static_upcast::<U>())
}
/// Converts the pointer to the derived class type `U`.
///
/// It's recommended to use `dynamic_cast` instead because it performs a checked conversion.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid and it's type is `U` or inherits from `U`,
/// of if `self` is a null pointer. See type level documentation.
pub unsafe fn static_downcast<U>(&self) -> QPtr<U>
where
T: StaticDowncast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().static_downcast())
}
/// Converts the pointer to the derived class type `U`. Returns `None` if the object's type
/// is not `U` and doesn't inherit `U`.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid or null. See type level documentation.
pub unsafe fn dynamic_cast<U>(&self) -> QPtr<U>
where
T: DynamicCast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().dynamic_cast())
}
/// Converts this pointer to a `CppBox`. Returns `None` if `self`
/// is a null pointer.
///
/// Unlike `QBox`, `CppBox` will always delete the object when dropped.
///
/// ### Safety
///
/// `CppBox` will attempt to delete the object on drop. If something else also tries to
/// delete this object before or after that, the behavior is undefined.
/// See type level documentation.
pub unsafe fn into_box(self) -> Option<CppBox<T>> {
self.into_q_ptr().to_box()
}
/// Converts this `QBox` into a `QPtr`.
///
/// Unlike `QBox`, `QPtr` will never delete the object when dropped.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_q_ptr(mut self) -> QPtr<T> {
mem::replace(&mut self.0, QPtr::null())
}
/// Converts this `QBox` into a `Ptr`.
///
/// Unlike `QBox`, `Ptr` will never delete the object when dropped.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_ptr(self) -> Ptr<T> {
self.into_q_ptr().as_ptr()
}
/// Converts this `QBox` into a raw pointer without deleting the object.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_raw_ptr(self) -> *mut T {
self.into_q_ptr().as_mut_raw_ptr()
}
}
impl<T: StaticUpcast<QObject> + CppDeletable> fmt::Debug for QBox<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "QBox({:?})", unsafe { self.as_raw_ptr() })
}
}
/// Allows to call member functions of `T` and its base classes directly on the pointer.
///
/// Panics if the pointer is null.
impl<T: StaticUpcast<QObject> + CppDeletable> Deref for QBox<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe {
let ptr = self.as_raw_ptr();
if ptr.is_null() {
panic!("attempted to deref a null QBox<T>");
}
&*ptr
}
}
}
impl<'a, T, U> CastFrom<&'a QBox<U>> for Ptr<T>
where
U: StaticUpcast<T> + StaticUpcast<QObject> + CppDeletable,
{
unsafe fn cast_from(value: &'a QBox<U>) -> Self {
CastFrom::cast_from(value.as_ptr())
}
}
impl<T: StaticUpcast<QObject> + CppDeletable> Drop for QBox<T> {
fn drop(&mut self) {
unsafe {
let ptr = self.as_ptr();
if !ptr.is_null() && ptr.static_upcast().parent().is_null()
|
}
}
}
|
{
T::delete(&*ptr.as_raw_ptr());
}
|
conditional_block
|
q_box.rs
|
use crate::{QObject, QPtr};
use cpp_core::{
CastFrom, CastInto, CppBox, CppDeletable, DynamicCast, Ptr, Ref, StaticDowncast, StaticUpcast,
};
use std::ops::Deref;
use std::{fmt, mem};
/// An owning pointer for `QObject`-based objects.
///
/// `QBox` will delete its object on drop if it has no parent. If the object has a parent,
/// it's assumed that the parent is responsible for deleting the object, as per Qt ownership system.
/// Additionally, `QBox` will be automatically set to null when the object is deleted, similar
/// to `QPtr` (or `QPointer<T>` in C++). `QBox` will not attempt to delete null pointers.
///
/// Note that dereferencing a null `QBox` will panic, so if it's known that the object may
/// already have been deleted, you should use `is_null()`, `as_ref()`,
/// or a similar method to check
/// if the object is still alive before calling its methods.
///
/// Unlike `CppBox` (which is non-nullable), `QBox` is permitted to contain a null pointer because
/// even if a non-null pointer is provided when constructing `QBox`, it will become null
/// automatically if the object is deleted.
///
/// To prevent the object from being deleted, convert `QBox` to another type of pointer using
/// `into_q_ptr()` or `into_ptr()`. Alternatively, setting a parent for the object will prevent
/// `QBox` from deleting it.
///
/// To make sure the object is deleted regardless of its parent, convert `QBox` to `CppBox` using
/// `into_box()`.
///
/// # Safety
///
/// `QBox` has the same safety issues as `QPtr`. See `QPtr` documentation.
pub struct QBox<T: StaticUpcast<QObject> + CppDeletable>(QPtr<T>);
impl<T: StaticUpcast<QObject> + CppDeletable> QBox<T> {
/// Creates a `QBox` from a `QPtr`.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn from_q_ptr(target: QPtr<T>) -> Self {
QBox(target)
}
/// Creates a `QBox` from a `Ptr`.
///
/// ### Safety
///
/// `target` must be either a valid pointer to an object or a null pointer.
/// See type level documentation.
pub unsafe fn new(target: impl CastInto<Ptr<T>>) -> Self {
QBox::from_q_ptr(QPtr::new(target))
}
/// Creates a `QBox` from a raw pointer.
///
/// ### Safety
///
/// `target` must be either a valid pointer to an object or a null pointer.
/// See type level documentation.
pub unsafe fn from_raw(target: *const T) -> Self {
QBox::from_q_ptr(QPtr::from_raw(target))
}
/// Creates a null pointer.
///
/// Note that you can also use `NullPtr` to specify a null pointer to a function accepting
/// `impl CastInto<Ptr<_>>`. Unlike `Ptr`, `NullPtr` is not a generic type, so it will
/// not cause type inference issues.
///
/// Note that accessing the content of a null `QBox` through `Deref` will result
/// in a panic.
///
/// ### Safety
///
/// Null pointers must not be dereferenced. See type level documentation.
pub unsafe fn null() -> Self {
QBox::from_q_ptr(QPtr::<T>::null())
}
/// Returns true if the pointer is null.
pub unsafe fn is_null(&self) -> bool {
self.0.is_null()
}
/// Returns the content as a const `Ptr`.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_ptr(&self) -> Ptr<T> {
self.0.as_ptr()
}
/// Returns the content as a raw const pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_raw_ptr(&self) -> *const T {
self.0.as_raw_ptr()
}
/// Returns the content as a raw mutable pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_mut_raw_ptr(&self) -> *mut T {
self.0.as_mut_raw_ptr()
}
/// Returns the content as a const `Ref`. Returns `None` if `self` is a null pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_ref(&self) -> Option<Ref<T>> {
self.0.as_ref()
}
/// Returns a reference to the value. Returns `None` if the pointer is null.
///
/// ### Safety
///
/// `self` must be valid.
/// The content must not be read or modified through other ways while the returned reference
/// exists.See type level documentation.
pub unsafe fn as_raw_ref<'a>(&self) -> Option<&'a T> {
self.as_ref().map(|r| r.as_raw_ref())
}
/// Returns a mutable reference to the value. Returns `None` if the pointer is null.
///
/// ### Safety
///
/// `self` must be valid.
/// The content must not be read or modified through other ways while the returned reference
/// exists.See type level documentation.
pub unsafe fn as_mut_raw_ref<'a>(&self) -> Option<&'a mut T> {
self.as_ref().map(|r| r.as_mut_raw_ref())
}
/// Converts the pointer to the base class type `U`.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid or null. See type level documentation.
pub unsafe fn static_upcast<U>(&self) -> QPtr<U>
where
T: StaticUpcast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().static_upcast::<U>())
}
/// Converts the pointer to the derived class type `U`.
///
/// It's recommended to use `dynamic_cast` instead because it performs a checked conversion.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid and it's type is `U` or inherits from `U`,
/// of if `self` is a null pointer. See type level documentation.
pub unsafe fn static_downcast<U>(&self) -> QPtr<U>
where
T: StaticDowncast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().static_downcast())
}
/// Converts the pointer to the derived class type `U`. Returns `None` if the object's type
/// is not `U` and doesn't inherit `U`.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid or null. See type level documentation.
pub unsafe fn dynamic_cast<U>(&self) -> QPtr<U>
where
T: DynamicCast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().dynamic_cast())
}
/// Converts this pointer to a `CppBox`. Returns `None` if `self`
/// is a null pointer.
///
/// Unlike `QBox`, `CppBox` will always delete the object when dropped.
///
/// ### Safety
///
/// `CppBox` will attempt to delete the object on drop. If something else also tries to
/// delete this object before or after that, the behavior is undefined.
/// See type level documentation.
pub unsafe fn into_box(self) -> Option<CppBox<T>> {
self.into_q_ptr().to_box()
}
/// Converts this `QBox` into a `QPtr`.
///
/// Unlike `QBox`, `QPtr` will never delete the object when dropped.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_q_ptr(mut self) -> QPtr<T> {
mem::replace(&mut self.0, QPtr::null())
}
/// Converts this `QBox` into a `Ptr`.
///
/// Unlike `QBox`, `Ptr` will never delete the object when dropped.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_ptr(self) -> Ptr<T> {
self.into_q_ptr().as_ptr()
}
/// Converts this `QBox` into a raw pointer without deleting the object.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_raw_ptr(self) -> *mut T {
self.into_q_ptr().as_mut_raw_ptr()
}
}
impl<T: StaticUpcast<QObject> + CppDeletable> fmt::Debug for QBox<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "QBox({:?})", unsafe { self.as_raw_ptr() })
}
}
/// Allows to call member functions of `T` and its base classes directly on the pointer.
///
/// Panics if the pointer is null.
impl<T: StaticUpcast<QObject> + CppDeletable> Deref for QBox<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe {
let ptr = self.as_raw_ptr();
if ptr.is_null() {
panic!("attempted to deref a null QBox<T>");
}
&*ptr
}
}
}
impl<'a, T, U> CastFrom<&'a QBox<U>> for Ptr<T>
where
U: StaticUpcast<T> + StaticUpcast<QObject> + CppDeletable,
{
unsafe fn cast_from(value: &'a QBox<U>) -> Self {
CastFrom::cast_from(value.as_ptr())
}
}
impl<T: StaticUpcast<QObject> + CppDeletable> Drop for QBox<T> {
fn
|
(&mut self) {
unsafe {
let ptr = self.as_ptr();
if !ptr.is_null() && ptr.static_upcast().parent().is_null() {
T::delete(&*ptr.as_raw_ptr());
}
}
}
}
|
drop
|
identifier_name
|
q_box.rs
|
use crate::{QObject, QPtr};
use cpp_core::{
CastFrom, CastInto, CppBox, CppDeletable, DynamicCast, Ptr, Ref, StaticDowncast, StaticUpcast,
};
use std::ops::Deref;
use std::{fmt, mem};
/// An owning pointer for `QObject`-based objects.
///
/// `QBox` will delete its object on drop if it has no parent. If the object has a parent,
/// it's assumed that the parent is responsible for deleting the object, as per Qt ownership system.
/// Additionally, `QBox` will be automatically set to null when the object is deleted, similar
/// to `QPtr` (or `QPointer<T>` in C++). `QBox` will not attempt to delete null pointers.
///
/// Note that dereferencing a null `QBox` will panic, so if it's known that the object may
/// already have been deleted, you should use `is_null()`, `as_ref()`,
/// or a similar method to check
/// if the object is still alive before calling its methods.
///
/// Unlike `CppBox` (which is non-nullable), `QBox` is permitted to contain a null pointer because
/// even if a non-null pointer is provided when constructing `QBox`, it will become null
/// automatically if the object is deleted.
///
/// To prevent the object from being deleted, convert `QBox` to another type of pointer using
/// `into_q_ptr()` or `into_ptr()`. Alternatively, setting a parent for the object will prevent
/// `QBox` from deleting it.
///
/// To make sure the object is deleted regardless of its parent, convert `QBox` to `CppBox` using
/// `into_box()`.
///
/// # Safety
///
/// `QBox` has the same safety issues as `QPtr`. See `QPtr` documentation.
pub struct QBox<T: StaticUpcast<QObject> + CppDeletable>(QPtr<T>);
impl<T: StaticUpcast<QObject> + CppDeletable> QBox<T> {
/// Creates a `QBox` from a `QPtr`.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn from_q_ptr(target: QPtr<T>) -> Self {
QBox(target)
}
/// Creates a `QBox` from a `Ptr`.
///
/// ### Safety
///
/// `target` must be either a valid pointer to an object or a null pointer.
/// See type level documentation.
pub unsafe fn new(target: impl CastInto<Ptr<T>>) -> Self {
QBox::from_q_ptr(QPtr::new(target))
}
/// Creates a `QBox` from a raw pointer.
///
/// ### Safety
///
/// `target` must be either a valid pointer to an object or a null pointer.
/// See type level documentation.
pub unsafe fn from_raw(target: *const T) -> Self {
QBox::from_q_ptr(QPtr::from_raw(target))
}
/// Creates a null pointer.
///
/// Note that you can also use `NullPtr` to specify a null pointer to a function accepting
/// `impl CastInto<Ptr<_>>`. Unlike `Ptr`, `NullPtr` is not a generic type, so it will
/// not cause type inference issues.
///
/// Note that accessing the content of a null `QBox` through `Deref` will result
/// in a panic.
///
/// ### Safety
///
/// Null pointers must not be dereferenced. See type level documentation.
pub unsafe fn null() -> Self {
QBox::from_q_ptr(QPtr::<T>::null())
}
/// Returns true if the pointer is null.
pub unsafe fn is_null(&self) -> bool {
self.0.is_null()
}
/// Returns the content as a const `Ptr`.
///
/// ### Safety
///
/// See type level documentation.
|
/// Returns the content as a raw const pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_raw_ptr(&self) -> *const T {
self.0.as_raw_ptr()
}
/// Returns the content as a raw mutable pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_mut_raw_ptr(&self) -> *mut T {
self.0.as_mut_raw_ptr()
}
/// Returns the content as a const `Ref`. Returns `None` if `self` is a null pointer.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn as_ref(&self) -> Option<Ref<T>> {
self.0.as_ref()
}
/// Returns a reference to the value. Returns `None` if the pointer is null.
///
/// ### Safety
///
/// `self` must be valid.
/// The content must not be read or modified through other ways while the returned reference
/// exists.See type level documentation.
pub unsafe fn as_raw_ref<'a>(&self) -> Option<&'a T> {
self.as_ref().map(|r| r.as_raw_ref())
}
/// Returns a mutable reference to the value. Returns `None` if the pointer is null.
///
/// ### Safety
///
/// `self` must be valid.
/// The content must not be read or modified through other ways while the returned reference
/// exists.See type level documentation.
pub unsafe fn as_mut_raw_ref<'a>(&self) -> Option<&'a mut T> {
self.as_ref().map(|r| r.as_mut_raw_ref())
}
/// Converts the pointer to the base class type `U`.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid or null. See type level documentation.
pub unsafe fn static_upcast<U>(&self) -> QPtr<U>
where
T: StaticUpcast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().static_upcast::<U>())
}
/// Converts the pointer to the derived class type `U`.
///
/// It's recommended to use `dynamic_cast` instead because it performs a checked conversion.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid and it's type is `U` or inherits from `U`,
/// of if `self` is a null pointer. See type level documentation.
pub unsafe fn static_downcast<U>(&self) -> QPtr<U>
where
T: StaticDowncast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().static_downcast())
}
/// Converts the pointer to the derived class type `U`. Returns `None` if the object's type
/// is not `U` and doesn't inherit `U`.
///
/// ### Safety
///
/// This operation is safe as long as `self` is valid or null. See type level documentation.
pub unsafe fn dynamic_cast<U>(&self) -> QPtr<U>
where
T: DynamicCast<U>,
U: StaticUpcast<QObject>,
{
QPtr::<U>::new(self.as_ptr().dynamic_cast())
}
/// Converts this pointer to a `CppBox`. Returns `None` if `self`
/// is a null pointer.
///
/// Unlike `QBox`, `CppBox` will always delete the object when dropped.
///
/// ### Safety
///
/// `CppBox` will attempt to delete the object on drop. If something else also tries to
/// delete this object before or after that, the behavior is undefined.
/// See type level documentation.
pub unsafe fn into_box(self) -> Option<CppBox<T>> {
self.into_q_ptr().to_box()
}
/// Converts this `QBox` into a `QPtr`.
///
/// Unlike `QBox`, `QPtr` will never delete the object when dropped.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_q_ptr(mut self) -> QPtr<T> {
mem::replace(&mut self.0, QPtr::null())
}
/// Converts this `QBox` into a `Ptr`.
///
/// Unlike `QBox`, `Ptr` will never delete the object when dropped.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_ptr(self) -> Ptr<T> {
self.into_q_ptr().as_ptr()
}
/// Converts this `QBox` into a raw pointer without deleting the object.
///
/// ### Safety
///
/// See type level documentation.
pub unsafe fn into_raw_ptr(self) -> *mut T {
self.into_q_ptr().as_mut_raw_ptr()
}
}
impl<T: StaticUpcast<QObject> + CppDeletable> fmt::Debug for QBox<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "QBox({:?})", unsafe { self.as_raw_ptr() })
}
}
/// Allows to call member functions of `T` and its base classes directly on the pointer.
///
/// Panics if the pointer is null.
impl<T: StaticUpcast<QObject> + CppDeletable> Deref for QBox<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe {
let ptr = self.as_raw_ptr();
if ptr.is_null() {
panic!("attempted to deref a null QBox<T>");
}
&*ptr
}
}
}
impl<'a, T, U> CastFrom<&'a QBox<U>> for Ptr<T>
where
U: StaticUpcast<T> + StaticUpcast<QObject> + CppDeletable,
{
unsafe fn cast_from(value: &'a QBox<U>) -> Self {
CastFrom::cast_from(value.as_ptr())
}
}
impl<T: StaticUpcast<QObject> + CppDeletable> Drop for QBox<T> {
fn drop(&mut self) {
unsafe {
let ptr = self.as_ptr();
if !ptr.is_null() && ptr.static_upcast().parent().is_null() {
T::delete(&*ptr.as_raw_ptr());
}
}
}
}
|
pub unsafe fn as_ptr(&self) -> Ptr<T> {
self.0.as_ptr()
}
|
random_line_split
|
platform.rs
|
/*
* Copyright 2017 Sreejith Krishnan R
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::any::Any;
use ::paint::{TextRuler, MathRuler};
use super::ruler::Ruler as SnapshotRuler;
use super::canvas::Canvas;
pub struct Platform {
ruler: SnapshotRuler,
}
impl Platform {
pub fn new(typeface: &str) -> Platform
|
pub fn new_canvas(&self, width: f32, height: f32) -> Canvas {
Canvas::new(width.max(1.), height.max(1.),
self.ruler.get_sk_typeface())
}
}
impl ::platform::Platform for Platform {
fn get_text_ruler(&self, size: f32) -> &TextRuler {
self.ruler.set_size(size);
&self.ruler
}
fn get_math_ruler(&self, size: f32) -> &MathRuler {
self.ruler.set_size(size);
&self.ruler
}
fn px_to_du(&self, px: f32) -> f32 {
px
}
fn sp_to_du(&self, sp: f32) -> f32 {
64.*sp
}
fn dp_to_du(&self, dp: f32) -> f32 {
64.*dp
}
fn as_any(&self) -> &Any {
self
}
}
|
{
Platform { ruler: SnapshotRuler::new(typeface, 0) }
}
|
identifier_body
|
platform.rs
|
/*
* Copyright 2017 Sreejith Krishnan R
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::any::Any;
use ::paint::{TextRuler, MathRuler};
use super::ruler::Ruler as SnapshotRuler;
use super::canvas::Canvas;
pub struct Platform {
ruler: SnapshotRuler,
}
impl Platform {
pub fn new(typeface: &str) -> Platform {
Platform { ruler: SnapshotRuler::new(typeface, 0) }
}
pub fn new_canvas(&self, width: f32, height: f32) -> Canvas {
Canvas::new(width.max(1.), height.max(1.),
self.ruler.get_sk_typeface())
}
}
impl ::platform::Platform for Platform {
fn
|
(&self, size: f32) -> &TextRuler {
self.ruler.set_size(size);
&self.ruler
}
fn get_math_ruler(&self, size: f32) -> &MathRuler {
self.ruler.set_size(size);
&self.ruler
}
fn px_to_du(&self, px: f32) -> f32 {
px
}
fn sp_to_du(&self, sp: f32) -> f32 {
64.*sp
}
fn dp_to_du(&self, dp: f32) -> f32 {
64.*dp
}
fn as_any(&self) -> &Any {
self
}
}
|
get_text_ruler
|
identifier_name
|
platform.rs
|
/*
* Copyright 2017 Sreejith Krishnan R
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
|
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::any::Any;
use ::paint::{TextRuler, MathRuler};
use super::ruler::Ruler as SnapshotRuler;
use super::canvas::Canvas;
pub struct Platform {
ruler: SnapshotRuler,
}
impl Platform {
pub fn new(typeface: &str) -> Platform {
Platform { ruler: SnapshotRuler::new(typeface, 0) }
}
pub fn new_canvas(&self, width: f32, height: f32) -> Canvas {
Canvas::new(width.max(1.), height.max(1.),
self.ruler.get_sk_typeface())
}
}
impl ::platform::Platform for Platform {
fn get_text_ruler(&self, size: f32) -> &TextRuler {
self.ruler.set_size(size);
&self.ruler
}
fn get_math_ruler(&self, size: f32) -> &MathRuler {
self.ruler.set_size(size);
&self.ruler
}
fn px_to_du(&self, px: f32) -> f32 {
px
}
fn sp_to_du(&self, sp: f32) -> f32 {
64.*sp
}
fn dp_to_du(&self, dp: f32) -> f32 {
64.*dp
}
fn as_any(&self) -> &Any {
self
}
}
|
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.