file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
manual_flatten.rs | use super::utils::make_iterator_snippet;
use super::MANUAL_FLATTEN;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::higher;
use clippy_utils::visitors::is_local_used;
use clippy_utils::{is_lang_ctor, path_to_local_id};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir::LangItem::{OptionSome, ResultOk};
use rustc_hir::{Expr, ExprKind, Pat, PatKind, StmtKind};
use rustc_lint::LateContext;
use rustc_middle::ty;
use rustc_span::source_map::Span;
/// Check for unnecessary `if let` usage in a for loop where only the `Some` or `Ok` variant of the
/// iterator element is used.
pub(super) fn check<'tcx>(
cx: &LateContext<'tcx>,
pat: &'tcx Pat<'_>,
arg: &'tcx Expr<'_>,
body: &'tcx Expr<'_>,
span: Span,
) {
if let ExprKind::Block(block, _) = body.kind {
// Ensure the `if let` statement is the only expression or statement in the for-loop
let inner_expr = if block.stmts.len() == 1 && block.expr.is_none() {
let match_stmt = &block.stmts[0];
if let StmtKind::Semi(inner_expr) = match_stmt.kind {
Some(inner_expr)
} else {
None
}
} else if block.stmts.is_empty() {
block.expr
} else {
None
};
if_chain! {
if let Some(inner_expr) = inner_expr;
if let Some(higher::IfLet { let_pat, let_expr, if_then, if_else: None })
= higher::IfLet::hir(cx, inner_expr);
// Ensure match_expr in `if let` statement is the same as the pat from the for-loop
if let PatKind::Binding(_, pat_hir_id, _, _) = pat.kind;
if path_to_local_id(let_expr, pat_hir_id);
// Ensure the `if let` statement is for the `Some` variant of `Option` or the `Ok` variant of `Result`
if let PatKind::TupleStruct(ref qpath, _, _) = let_pat.kind;
let some_ctor = is_lang_ctor(cx, qpath, OptionSome); | if !is_local_used(cx, if_then, pat_hir_id);
then {
let if_let_type = if some_ctor { "Some" } else { "Ok" };
// Prepare the error message
let msg = format!("unnecessary `if let` since only the `{}` variant of the iterator element is used", if_let_type);
// Prepare the help message
let mut applicability = Applicability::MaybeIncorrect;
let arg_snippet = make_iterator_snippet(cx, arg, &mut applicability);
let copied = match cx.typeck_results().expr_ty(let_expr).kind() {
ty::Ref(_, inner, _) => match inner.kind() {
ty::Ref(..) => ".copied()",
_ => ""
}
_ => ""
};
span_lint_and_then(
cx,
MANUAL_FLATTEN,
span,
&msg,
|diag| {
let sugg = format!("{}{}.flatten()", arg_snippet, copied);
diag.span_suggestion(
arg.span,
"try",
sugg,
Applicability::MaybeIncorrect,
);
diag.span_help(
inner_expr.span,
"...and remove the `if let` statement in the for loop",
);
}
);
}
}
}
} | let ok_ctor = is_lang_ctor(cx, qpath, ResultOk);
if some_ctor || ok_ctor;
// Ensure epxr in `if let` is not used afterwards | random_line_split |
manual_flatten.rs | use super::utils::make_iterator_snippet;
use super::MANUAL_FLATTEN;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::higher;
use clippy_utils::visitors::is_local_used;
use clippy_utils::{is_lang_ctor, path_to_local_id};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir::LangItem::{OptionSome, ResultOk};
use rustc_hir::{Expr, ExprKind, Pat, PatKind, StmtKind};
use rustc_lint::LateContext;
use rustc_middle::ty;
use rustc_span::source_map::Span;
/// Check for unnecessary `if let` usage in a for loop where only the `Some` or `Ok` variant of the
/// iterator element is used.
pub(super) fn | <'tcx>(
cx: &LateContext<'tcx>,
pat: &'tcx Pat<'_>,
arg: &'tcx Expr<'_>,
body: &'tcx Expr<'_>,
span: Span,
) {
if let ExprKind::Block(block, _) = body.kind {
// Ensure the `if let` statement is the only expression or statement in the for-loop
let inner_expr = if block.stmts.len() == 1 && block.expr.is_none() {
let match_stmt = &block.stmts[0];
if let StmtKind::Semi(inner_expr) = match_stmt.kind {
Some(inner_expr)
} else {
None
}
} else if block.stmts.is_empty() {
block.expr
} else {
None
};
if_chain! {
if let Some(inner_expr) = inner_expr;
if let Some(higher::IfLet { let_pat, let_expr, if_then, if_else: None })
= higher::IfLet::hir(cx, inner_expr);
// Ensure match_expr in `if let` statement is the same as the pat from the for-loop
if let PatKind::Binding(_, pat_hir_id, _, _) = pat.kind;
if path_to_local_id(let_expr, pat_hir_id);
// Ensure the `if let` statement is for the `Some` variant of `Option` or the `Ok` variant of `Result`
if let PatKind::TupleStruct(ref qpath, _, _) = let_pat.kind;
let some_ctor = is_lang_ctor(cx, qpath, OptionSome);
let ok_ctor = is_lang_ctor(cx, qpath, ResultOk);
if some_ctor || ok_ctor;
// Ensure epxr in `if let` is not used afterwards
if !is_local_used(cx, if_then, pat_hir_id);
then {
let if_let_type = if some_ctor { "Some" } else { "Ok" };
// Prepare the error message
let msg = format!("unnecessary `if let` since only the `{}` variant of the iterator element is used", if_let_type);
// Prepare the help message
let mut applicability = Applicability::MaybeIncorrect;
let arg_snippet = make_iterator_snippet(cx, arg, &mut applicability);
let copied = match cx.typeck_results().expr_ty(let_expr).kind() {
ty::Ref(_, inner, _) => match inner.kind() {
ty::Ref(..) => ".copied()",
_ => ""
}
_ => ""
};
span_lint_and_then(
cx,
MANUAL_FLATTEN,
span,
&msg,
|diag| {
let sugg = format!("{}{}.flatten()", arg_snippet, copied);
diag.span_suggestion(
arg.span,
"try",
sugg,
Applicability::MaybeIncorrect,
);
diag.span_help(
inner_expr.span,
"...and remove the `if let` statement in the for loop",
);
}
);
}
}
}
}
| check | identifier_name |
manual_flatten.rs | use super::utils::make_iterator_snippet;
use super::MANUAL_FLATTEN;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::higher;
use clippy_utils::visitors::is_local_used;
use clippy_utils::{is_lang_ctor, path_to_local_id};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir::LangItem::{OptionSome, ResultOk};
use rustc_hir::{Expr, ExprKind, Pat, PatKind, StmtKind};
use rustc_lint::LateContext;
use rustc_middle::ty;
use rustc_span::source_map::Span;
/// Check for unnecessary `if let` usage in a for loop where only the `Some` or `Ok` variant of the
/// iterator element is used.
pub(super) fn check<'tcx>(
cx: &LateContext<'tcx>,
pat: &'tcx Pat<'_>,
arg: &'tcx Expr<'_>,
body: &'tcx Expr<'_>,
span: Span,
) | {
if let ExprKind::Block(block, _) = body.kind {
// Ensure the `if let` statement is the only expression or statement in the for-loop
let inner_expr = if block.stmts.len() == 1 && block.expr.is_none() {
let match_stmt = &block.stmts[0];
if let StmtKind::Semi(inner_expr) = match_stmt.kind {
Some(inner_expr)
} else {
None
}
} else if block.stmts.is_empty() {
block.expr
} else {
None
};
if_chain! {
if let Some(inner_expr) = inner_expr;
if let Some(higher::IfLet { let_pat, let_expr, if_then, if_else: None })
= higher::IfLet::hir(cx, inner_expr);
// Ensure match_expr in `if let` statement is the same as the pat from the for-loop
if let PatKind::Binding(_, pat_hir_id, _, _) = pat.kind;
if path_to_local_id(let_expr, pat_hir_id);
// Ensure the `if let` statement is for the `Some` variant of `Option` or the `Ok` variant of `Result`
if let PatKind::TupleStruct(ref qpath, _, _) = let_pat.kind;
let some_ctor = is_lang_ctor(cx, qpath, OptionSome);
let ok_ctor = is_lang_ctor(cx, qpath, ResultOk);
if some_ctor || ok_ctor;
// Ensure epxr in `if let` is not used afterwards
if !is_local_used(cx, if_then, pat_hir_id);
then {
let if_let_type = if some_ctor { "Some" } else { "Ok" };
// Prepare the error message
let msg = format!("unnecessary `if let` since only the `{}` variant of the iterator element is used", if_let_type);
// Prepare the help message
let mut applicability = Applicability::MaybeIncorrect;
let arg_snippet = make_iterator_snippet(cx, arg, &mut applicability);
let copied = match cx.typeck_results().expr_ty(let_expr).kind() {
ty::Ref(_, inner, _) => match inner.kind() {
ty::Ref(..) => ".copied()",
_ => ""
}
_ => ""
};
span_lint_and_then(
cx,
MANUAL_FLATTEN,
span,
&msg,
|diag| {
let sugg = format!("{}{}.flatten()", arg_snippet, copied);
diag.span_suggestion(
arg.span,
"try",
sugg,
Applicability::MaybeIncorrect,
);
diag.span_help(
inner_expr.span,
"...and remove the `if let` statement in the for loop",
);
}
);
}
}
}
} | identifier_body |
|
repl.rs | extern crate readline;
use std::process;
use storage;
use jobs;
use cluster;
pub fn start() {
loop {
match readline::readline(">>> ") {
Ok(input) => {
let input = input.replace("\n", "");
if input.len() > 0 {
readline::add_history(input.as_ref());
println!("{:?}", input);
if "help" == input {
println!("help comes later....");
}
else if "jobs" == input {
//jobs::list();
}
else if "ping" == input {
cluster::ping(4); | storage.list();
}
else if "exit" == input || "quit" == input {
process::exit(0);
}
}
},
Err(e) => {
println!("{}", e);
//panic!("{}", e);
}
}
}
} | }
else if "storage" == input {
let storage = storage::bootstrap(); | random_line_split |
repl.rs | extern crate readline;
use std::process;
use storage;
use jobs;
use cluster;
pub fn start() | {
loop {
match readline::readline(">>> ") {
Ok(input) => {
let input = input.replace("\n", "");
if input.len() > 0 {
readline::add_history(input.as_ref());
println!("{:?}", input);
if "help" == input {
println!("help comes later....");
}
else if "jobs" == input {
//jobs::list();
}
else if "ping" == input {
cluster::ping(4);
}
else if "storage" == input {
let storage = storage::bootstrap();
storage.list();
}
else if "exit" == input || "quit" == input {
process::exit(0);
}
}
},
Err(e) => {
println!("{}", e);
//panic!("{}", e);
}
}
}
} | identifier_body |
|
repl.rs | extern crate readline;
use std::process;
use storage;
use jobs;
use cluster;
pub fn start() {
loop {
match readline::readline(">>> ") {
Ok(input) => {
let input = input.replace("\n", "");
if input.len() > 0 {
readline::add_history(input.as_ref());
println!("{:?}", input);
if "help" == input {
println!("help comes later....");
}
else if "jobs" == input {
//jobs::list();
}
else if "ping" == input {
cluster::ping(4);
}
else if "storage" == input {
let storage = storage::bootstrap();
storage.list();
}
else if "exit" == input || "quit" == input {
process::exit(0);
}
}
},
Err(e) => |
}
}
}
| {
println!("{}", e);
//panic!("{}", e);
} | conditional_block |
repl.rs | extern crate readline;
use std::process;
use storage;
use jobs;
use cluster;
pub fn | () {
loop {
match readline::readline(">>> ") {
Ok(input) => {
let input = input.replace("\n", "");
if input.len() > 0 {
readline::add_history(input.as_ref());
println!("{:?}", input);
if "help" == input {
println!("help comes later....");
}
else if "jobs" == input {
//jobs::list();
}
else if "ping" == input {
cluster::ping(4);
}
else if "storage" == input {
let storage = storage::bootstrap();
storage.list();
}
else if "exit" == input || "quit" == input {
process::exit(0);
}
}
},
Err(e) => {
println!("{}", e);
//panic!("{}", e);
}
}
}
}
| start | identifier_name |
utils.py | # -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2006, 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
"""
A collection of utilities that can also be used from outside, if wanted. Functions and
classes here can be assumed public and won’t disappear in future Py-notify versions.
@var is_callable:
Determine if C{object} is callable. E.g. if it is a function, method, class, instance of
a class with C{__call__}, etc. This is the same as built-in function C{callable} does.
C{is_callable} is provided since C{callable} is going to disappear in Python 3000 and may
issue warnings in 2.6.
@var as_string:
Convert any attribute to its name as string. Main use of this utility object is to
perform Python ‘private’ identifier mangling. E.g. you can write::
class MyClass (object):
__slots__ = ('__x')
def get_x (self):
if hasattr (self, as_string.__x):
return self.__x
Advantage is that you don’t have to do mangling ‘by hands’ and hence there is less chance
for a typing error. Furthermore, this code does not require changes if you change
C{MyClass} name to anything else, whereas custom mangling does.
However, usefulness of ‘as_string’ is still doubtful. When I wrote it, I didn’t know one
could just write ``__slots__ = ('__x')``, I thought it needed to be
``__slots__ = ('_MyClass__x')``. Imagine...
"""
__docformat__ = 'epytext en'
__all__ = ('is_callable', 'is_valid_identifier', 'mangle_identifier',
'as_string',
'raise_not_implemented_exception',
'execute',
'frozendict', 'DummyReference', 'ClassTypes', 'StringType')
import re
import sys
import types
from keyword import iskeyword
if sys.version_info[:3] < (2, 6, 0):
is_callable = callable
else:
def is_callable (object):
return hasattr (object, '__call__')
def is_valid_identifier (identifier):
"""
Determine if C{identifier} is a valid Python identifier. This function never raises
any exceptions. If C{identifier} is not a string, it simply returns C{False}.
@param identifier: identifier to determin if it is valid
@type identifier: C{basestring}
@rtype: C{bool}
"""
return (isinstance (identifier, StringType)
and re.match ('^[_a-zA-Z][_a-zA-Z0-9]*$', identifier) is not None
and not iskeyword (identifier))
def mangle_identifier (class_name, identifier):
"""
Mangle C{identifier} as how would be done if it appeared in a class with
C{class_name}. This function allows to mimic standard Python mangling of
pseudo-private attributes, i.e. those which names start with two underscores and don’t
end in two. If C{identifier} is not considered a private name, it is returned
unchanged.
@param class_name: name of Python class.
@type class_name: C{basestring}
| @raises ValueError: if either C{class_name} or C{identifier} is not valid from
Python’s point of view.
"""
if not (is_valid_identifier (class_name) and is_valid_identifier (identifier)):
raise ValueError ("'class_name' and 'identifier' must be valid Python identifiers")
if (identifier.startswith ('__')
and not identifier.endswith ('__')
and class_name != '_' * len (class_name)):
return '_%s%s' % (class_name.lstrip ('_'), identifier)
else:
return identifier
class _AsString (object):
"""
Internal helper class for C{L{as_string}}. Don’t use directly.
"""
__slots__ = ()
def __getattribute__(self, name):
return name
def __setattr__(self, name, value):
raise TypeError ("'as_string' attributes cannot be set")
def __delattr__(self, name):
raise TypeError ("'as_string' attributes cannot be deleted")
def __repr__(self):
return 'notify.utils.as_string'
as_string = _AsString ()
def raise_not_implemented_exception (object = None, function_name = None):
"""
Raise C{NotImplementedError} for a method invoked with C{object} as C{self}. The
function determines object class and method declaration class(es) itself and that’s
the whole point of it.
It should be called like this:
>>> raise_not_implemented_exception (self)
And output might look like this::
File ".../foo.py", line # in ?
Foo ().bar ()
File ".../foo.py", line #, in bar
raise_not_implemented_exception (self)
File ".../notify/utils.py", line #, in raise_not_implemented_exception
raise exception
NotImplementedError: bar() not implemented in class Foo (declared in AbstractFoo)
Optionally, C{function_name} can be specified. This argument mainly exists for C
extension, since function name cannot be detected automatically in this case. In
Python code you should just leave this argument out.
@param object: the object for which a non-implemented method is called.
@type object: C{object}
@param function_name: name of the unimplemented function or method (inferred
automatically for non-extension functions).
@type function_name: C{basestring} or C{None}
@raises NotImplementedError: always.
"""
if function_name is None:
try:
raise Exception
except Exception:
try:
traceback = sys.exc_info () [2]
function_name = traceback.tb_frame.f_back.f_code.co_name
except Exception:
# We can do nothing, ignore.
pass
if function_name is not None:
function_description = '%s()' % function_name
else:
function_description = 'UNKNOWN FUNCTION'
try:
class_description = ' in class %s' % object.__class__.__name__
if function_name is not None:
declaration_classes = _find_declaration_classes (object.__class__, function_name)
if len (declaration_classes) == 1:
if declaration_classes[0] is not object.__class__:
class_description += ' (declared in %s)' % declaration_classes[0].__name__
elif len (declaration_classes) > 1:
class_description += (' (declared in %s)'
% ', '.join ([_class.__name__
for _class in declaration_classes]))
except Exception:
class_description = ''
exception = NotImplementedError ('%s not implemented%s'
% (function_description, class_description))
raise exception
def _find_declaration_classes (_class, function_name):
declaring_bases = [base for base in _class.__bases__ if hasattr (base, function_name)]
if declaring_bases:
return reduce (lambda list1, list2: list1 + list2,
[_find_declaration_classes (base, function_name)
for base in declaring_bases],
[])
else:
return [_class]
if sys.version_info[0] >= 3:
execute = eval ('exec')
else:
from notify._2_x import execute
class frozendict (dict):
__slots__ = ('__hash')
def __init__(self, *arguments, **keywords):
super (frozendict, self).__init__(*arguments, **keywords)
self.__hash = None
def clear (self):
raise TypeError ("'%s' object doesn't support clearing" % type (self).__name__)
def pop (self, key, default = None):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def popitem (self):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def setdefault (self, key, default = None):
raise TypeError ("'%s' object doesn't support setdefault operation" % type (self).__name__)
def update (self, dict):
raise TypeError ("'%s' object doesn't support updating" % type (self).__name__)
def __setitem__(self, key, value):
raise TypeError ("'%s' object doesn't support item setting" % type (self).__name__)
def __delitem__(self, key):
raise TypeError ("'%s' object doesn't support item deletion" % type (self).__name__)
def __hash__(self):
_hash = self.__hash
if _hash is None:
_hash = 0x1337
if hasattr (dict, 'iteritems'):
for key, value in self.iteritems ():
_hash ^= hash (key) ^ hash (value)
else:
for key, value in self.items ():
_hash ^= hash (key) ^ hash (value)
self.__hash = _hash
return _hash
def __repr__(self):
return '%s (%s)' % (type (self).__name__, super (frozendict, self).__repr__())
frozendict.EMPTY = frozendict ({ })
# Force hash to be precomputed.
hash (frozendict.EMPTY)
class DummyReference (object):
"""
Simple class that is interface-compatible with C{weakref.ReferenceType}. In other
words, its constructor accepts only one parameter and this value is later returned
from C{L{__call__}} method. Unlike weak references, instances of this class don’t do
anything special. They are only needed to avoid special cases for non-references,
since you can treat instances of C{weakref.ReferenceType} and this class in the same
way.
"""
__slots__ = ('__object')
def __init__(self, object):
"""
Create a new dummy reference that will return C{object} when called.
@param object: the object that will be returned by this reference.
@type object: C{object}
"""
self.__object = object
def __call__(self):
"""
Return the C{object} specified at construction time.
@rtype: C{object}
"""
return self.__object
def __repr__(self):
return ('<%s.%s at 0x%x; to %r>'
% (self.__module__, self.__class__.__name__, id (self), self.__object))
def __str__(self):
return '<%s at 0x%x; to %s>' % (self.__class__.__name__, id (self), self.__object)
if sys.version_info[0] >= 3:
ClassTypes = (type,)
StringType = str
else:
ClassTypes = (type, types.ClassType)
StringType = basestring
# Local variables:
# mode: python
# python-indent: 4
# indent-tabs-mode: nil
# fill-column: 90
# End: | @param identifier: name of an attribute of that class.
@type identifier: C{basestring}
@rtype: C{str}
| random_line_split |
utils.py | # -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2006, 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
"""
A collection of utilities that can also be used from outside, if wanted. Functions and
classes here can be assumed public and won’t disappear in future Py-notify versions.
@var is_callable:
Determine if C{object} is callable. E.g. if it is a function, method, class, instance of
a class with C{__call__}, etc. This is the same as built-in function C{callable} does.
C{is_callable} is provided since C{callable} is going to disappear in Python 3000 and may
issue warnings in 2.6.
@var as_string:
Convert any attribute to its name as string. Main use of this utility object is to
perform Python ‘private’ identifier mangling. E.g. you can write::
class MyClass (object):
__slots__ = ('__x')
def get_x (self):
if hasattr (self, as_string.__x):
return self.__x
Advantage is that you don’t have to do mangling ‘by hands’ and hence there is less chance
for a typing error. Furthermore, this code does not require changes if you change
C{MyClass} name to anything else, whereas custom mangling does.
However, usefulness of ‘as_string’ is still doubtful. When I wrote it, I didn’t know one
could just write ``__slots__ = ('__x')``, I thought it needed to be
``__slots__ = ('_MyClass__x')``. Imagine...
"""
__docformat__ = 'epytext en'
__all__ = ('is_callable', 'is_valid_identifier', 'mangle_identifier',
'as_string',
'raise_not_implemented_exception',
'execute',
'frozendict', 'DummyReference', 'ClassTypes', 'StringType')
import re
import sys
import types
from keyword import iskeyword
if sys.version_info[:3] < (2, 6, 0):
is_callable = callable
else:
def is_callable (object):
return hasattr (object, '__call__')
def is_valid_identifier (identifier):
"""
Determine if C{identifier} is a valid Python identifier. This function never raises
any exceptions. If C{identifier} is not a string, it simply returns C{False}.
@param identifier: identifier to determin if it is valid
@type identifier: C{basestring}
@rtype: C{bool}
"""
return (isinstance (identifier, StringType)
and re.match ('^[_a-zA-Z][_a-zA-Z0-9]*$', identifier) is not None
and not iskeyword (identifier))
def mangle_identifier (class_name, identifier):
"""
Mangle C{identifier} as how would be done if it appeared in a class with
C{class_name}. This function allows to mimic standard Python mangling of
pseudo-private attributes, i.e. those which names start with two underscores and don’t
end in two. If C{identifier} is not considered a private name, it is returned
unchanged.
@param class_name: name of Python class.
@type class_name: C{basestring}
@param identifier: name of an attribute of that class.
@type identifier: C{basestring}
@rtype: C{str}
@raises ValueError: if either C{class_name} or C{identifier} is not valid from
Python’s point of view.
"""
if not (is_valid_identifier (class_name) and is_valid_identifier (identifier)):
raise ValueError ("'class_name' and 'identifier' must be valid Python identifiers")
if (identifier.startswith ('__')
and not identifier.endswith ('__')
and class_name != '_' * len (class_name)):
return '_%s%s' % (class_name.lstrip ('_'), identifier)
else:
return identifier
class _AsString (object):
"""
Internal helper class for C{L{as_string}}. Don’t use directly.
"""
__slots__ = ()
def __getattribute__(self, name):
return name
def __setattr__(self, name, value):
raise TypeError ("'as_string' attributes cannot be set")
def __delattr__(self, name):
raise TypeError ("'as_string' attributes cannot be deleted")
def __repr__(self):
return 'notify.utils.as_stri |
def raise_not_implemented_exception (object = None, function_name = None):
"""
Raise C{NotImplementedError} for a method invoked with C{object} as C{self}. The
function determines object class and method declaration class(es) itself and that’s
the whole point of it.
It should be called like this:
>>> raise_not_implemented_exception (self)
And output might look like this::
File ".../foo.py", line # in ?
Foo ().bar ()
File ".../foo.py", line #, in bar
raise_not_implemented_exception (self)
File ".../notify/utils.py", line #, in raise_not_implemented_exception
raise exception
NotImplementedError: bar() not implemented in class Foo (declared in AbstractFoo)
Optionally, C{function_name} can be specified. This argument mainly exists for C
extension, since function name cannot be detected automatically in this case. In
Python code you should just leave this argument out.
@param object: the object for which a non-implemented method is called.
@type object: C{object}
@param function_name: name of the unimplemented function or method (inferred
automatically for non-extension functions).
@type function_name: C{basestring} or C{None}
@raises NotImplementedError: always.
"""
if function_name is None:
try:
raise Exception
except Exception:
try:
traceback = sys.exc_info () [2]
function_name = traceback.tb_frame.f_back.f_code.co_name
except Exception:
# We can do nothing, ignore.
pass
if function_name is not None:
function_description = '%s()' % function_name
else:
function_description = 'UNKNOWN FUNCTION'
try:
class_description = ' in class %s' % object.__class__.__name__
if function_name is not None:
declaration_classes = _find_declaration_classes (object.__class__, function_name)
if len (declaration_classes) == 1:
if declaration_classes[0] is not object.__class__:
class_description += ' (declared in %s)' % declaration_classes[0].__name__
elif len (declaration_classes) > 1:
class_description += (' (declared in %s)'
% ', '.join ([_class.__name__
for _class in declaration_classes]))
except Exception:
class_description = ''
exception = NotImplementedError ('%s not implemented%s'
% (function_description, class_description))
raise exception
def _find_declaration_classes (_class, function_name):
declaring_bases = [base for base in _class.__bases__ if hasattr (base, function_name)]
if declaring_bases:
return reduce (lambda list1, list2: list1 + list2,
[_find_declaration_classes (base, function_name)
for base in declaring_bases],
[])
else:
return [_class]
if sys.version_info[0] >= 3:
execute = eval ('exec')
else:
from notify._2_x import execute
class frozendict (dict):
__slots__ = ('__hash')
def __init__(self, *arguments, **keywords):
super (frozendict, self).__init__(*arguments, **keywords)
self.__hash = None
def clear (self):
raise TypeError ("'%s' object doesn't support clearing" % type (self).__name__)
def pop (self, key, default = None):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def popitem (self):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def setdefault (self, key, default = None):
raise TypeError ("'%s' object doesn't support setdefault operation" % type (self).__name__)
def update (self, dict):
raise TypeError ("'%s' object doesn't support updating" % type (self).__name__)
def __setitem__(self, key, value):
raise TypeError ("'%s' object doesn't support item setting" % type (self).__name__)
def __delitem__(self, key):
raise TypeError ("'%s' object doesn't support item deletion" % type (self).__name__)
def __hash__(self):
_hash = self.__hash
if _hash is None:
_hash = 0x1337
if hasattr (dict, 'iteritems'):
for key, value in self.iteritems ():
_hash ^= hash (key) ^ hash (value)
else:
for key, value in self.items ():
_hash ^= hash (key) ^ hash (value)
self.__hash = _hash
return _hash
def __repr__(self):
return '%s (%s)' % (type (self).__name__, super (frozendict, self).__repr__())
frozendict.EMPTY = frozendict ({ })
# Force hash to be precomputed.
hash (frozendict.EMPTY)
class DummyReference (object):
"""
Simple class that is interface-compatible with C{weakref.ReferenceType}. In other
words, its constructor accepts only one parameter and this value is later returned
from C{L{__call__}} method. Unlike weak references, instances of this class don’t do
anything special. They are only needed to avoid special cases for non-references,
since you can treat instances of C{weakref.ReferenceType} and this class in the same
way.
"""
__slots__ = ('__object')
def __init__(self, object):
"""
Create a new dummy reference that will return C{object} when called.
@param object: the object that will be returned by this reference.
@type object: C{object}
"""
self.__object = object
def __call__(self):
"""
Return the C{object} specified at construction time.
@rtype: C{object}
"""
return self.__object
def __repr__(self):
return ('<%s.%s at 0x%x; to %r>'
% (self.__module__, self.__class__.__name__, id (self), self.__object))
def __str__(self):
return '<%s at 0x%x; to %s>' % (self.__class__.__name__, id (self), self.__object)
if sys.version_info[0] >= 3:
ClassTypes = (type,)
StringType = str
else:
ClassTypes = (type, types.ClassType)
StringType = basestring
# Local variables:
# mode: python
# python-indent: 4
# indent-tabs-mode: nil
# fill-column: 90
# End:
| ng'
as_string = _AsString ()
| identifier_body |
utils.py | # -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2006, 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
"""
A collection of utilities that can also be used from outside, if wanted. Functions and
classes here can be assumed public and won’t disappear in future Py-notify versions.
@var is_callable:
Determine if C{object} is callable. E.g. if it is a function, method, class, instance of
a class with C{__call__}, etc. This is the same as built-in function C{callable} does.
C{is_callable} is provided since C{callable} is going to disappear in Python 3000 and may
issue warnings in 2.6.
@var as_string:
Convert any attribute to its name as string. Main use of this utility object is to
perform Python ‘private’ identifier mangling. E.g. you can write::
class MyClass (object):
__slots__ = ('__x')
def get_x (self):
if hasattr (self, as_string.__x):
return self.__x
Advantage is that you don’t have to do mangling ‘by hands’ and hence there is less chance
for a typing error. Furthermore, this code does not require changes if you change
C{MyClass} name to anything else, whereas custom mangling does.
However, usefulness of ‘as_string’ is still doubtful. When I wrote it, I didn’t know one
could just write ``__slots__ = ('__x')``, I thought it needed to be
``__slots__ = ('_MyClass__x')``. Imagine...
"""
__docformat__ = 'epytext en'
__all__ = ('is_callable', 'is_valid_identifier', 'mangle_identifier',
'as_string',
'raise_not_implemented_exception',
'execute',
'frozendict', 'DummyReference', 'ClassTypes', 'StringType')
import re
import sys
import types
from keyword import iskeyword
if sys.version_info[:3] < (2, 6, 0):
is_callable = callable
else:
def is_callable (object):
return hasattr (object, '__call__')
def is_valid_identifier (identifier):
"""
Determine if C{identifier} is a valid Python identifier. This function never raises
any exceptions. If C{identifier} is not a string, it simply returns C{False}.
@param identifier: identifier to determin if it is valid
@type identifier: C{basestring}
@rtype: C{bool}
"""
return (isinstance (identifier, StringType)
and re.match ('^[_a-zA-Z][_a-zA-Z0-9]*$', identifier) is not None
and not iskeyword (identifier))
def mangle_identifier (class_name, identifier):
"""
Mangle C{identifier} as how would be done if it appeared in a class with
C{class_name}. This function allows to mimic standard Python mangling of
pseudo-private attributes, i.e. those which names start with two underscores and don’t
end in two. If C{identifier} is not considered a private name, it is returned
unchanged.
@param class_name: name of Python class.
@type class_name: C{basestring}
@param identifier: name of an attribute of that class.
@type identifier: C{basestring}
@rtype: C{str}
@raises ValueError: if either C{class_name} or C{identifier} is not valid from
Python’s point of view.
"""
if not (is_valid_identifier (class_name) and is_valid_identifier (identifier)):
raise ValueError ("'class_name' and 'identifier' must be valid Python identifiers")
if (identifier.startswith ('__')
and not identifier.endswith ('__')
and class_name != '_' * len (class_name)):
return '_%s%s' % (class_name.lstrip ('_'), identifier)
else:
return identifier
class _AsString (object):
"""
Internal helper class for C{L{as_string}}. Don’t use directly.
"""
__slots__ = ()
def __getattribute__(self, name) | name
def __setattr__(self, name, value):
raise TypeError ("'as_string' attributes cannot be set")
def __delattr__(self, name):
raise TypeError ("'as_string' attributes cannot be deleted")
def __repr__(self):
return 'notify.utils.as_string'
as_string = _AsString ()
def raise_not_implemented_exception (object = None, function_name = None):
"""
Raise C{NotImplementedError} for a method invoked with C{object} as C{self}. The
function determines object class and method declaration class(es) itself and that’s
the whole point of it.
It should be called like this:
>>> raise_not_implemented_exception (self)
And output might look like this::
File ".../foo.py", line # in ?
Foo ().bar ()
File ".../foo.py", line #, in bar
raise_not_implemented_exception (self)
File ".../notify/utils.py", line #, in raise_not_implemented_exception
raise exception
NotImplementedError: bar() not implemented in class Foo (declared in AbstractFoo)
Optionally, C{function_name} can be specified. This argument mainly exists for C
extension, since function name cannot be detected automatically in this case. In
Python code you should just leave this argument out.
@param object: the object for which a non-implemented method is called.
@type object: C{object}
@param function_name: name of the unimplemented function or method (inferred
automatically for non-extension functions).
@type function_name: C{basestring} or C{None}
@raises NotImplementedError: always.
"""
if function_name is None:
try:
raise Exception
except Exception:
try:
traceback = sys.exc_info () [2]
function_name = traceback.tb_frame.f_back.f_code.co_name
except Exception:
# We can do nothing, ignore.
pass
if function_name is not None:
function_description = '%s()' % function_name
else:
function_description = 'UNKNOWN FUNCTION'
try:
class_description = ' in class %s' % object.__class__.__name__
if function_name is not None:
declaration_classes = _find_declaration_classes (object.__class__, function_name)
if len (declaration_classes) == 1:
if declaration_classes[0] is not object.__class__:
class_description += ' (declared in %s)' % declaration_classes[0].__name__
elif len (declaration_classes) > 1:
class_description += (' (declared in %s)'
% ', '.join ([_class.__name__
for _class in declaration_classes]))
except Exception:
class_description = ''
exception = NotImplementedError ('%s not implemented%s'
% (function_description, class_description))
raise exception
def _find_declaration_classes (_class, function_name):
declaring_bases = [base for base in _class.__bases__ if hasattr (base, function_name)]
if declaring_bases:
return reduce (lambda list1, list2: list1 + list2,
[_find_declaration_classes (base, function_name)
for base in declaring_bases],
[])
else:
return [_class]
if sys.version_info[0] >= 3:
execute = eval ('exec')
else:
from notify._2_x import execute
class frozendict (dict):
__slots__ = ('__hash')
def __init__(self, *arguments, **keywords):
super (frozendict, self).__init__(*arguments, **keywords)
self.__hash = None
def clear (self):
raise TypeError ("'%s' object doesn't support clearing" % type (self).__name__)
def pop (self, key, default = None):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def popitem (self):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def setdefault (self, key, default = None):
raise TypeError ("'%s' object doesn't support setdefault operation" % type (self).__name__)
def update (self, dict):
raise TypeError ("'%s' object doesn't support updating" % type (self).__name__)
def __setitem__(self, key, value):
raise TypeError ("'%s' object doesn't support item setting" % type (self).__name__)
def __delitem__(self, key):
raise TypeError ("'%s' object doesn't support item deletion" % type (self).__name__)
def __hash__(self):
_hash = self.__hash
if _hash is None:
_hash = 0x1337
if hasattr (dict, 'iteritems'):
for key, value in self.iteritems ():
_hash ^= hash (key) ^ hash (value)
else:
for key, value in self.items ():
_hash ^= hash (key) ^ hash (value)
self.__hash = _hash
return _hash
def __repr__(self):
return '%s (%s)' % (type (self).__name__, super (frozendict, self).__repr__())
frozendict.EMPTY = frozendict ({ })
# Force hash to be precomputed.
hash (frozendict.EMPTY)
class DummyReference (object):
"""
Simple class that is interface-compatible with C{weakref.ReferenceType}. In other
words, its constructor accepts only one parameter and this value is later returned
from C{L{__call__}} method. Unlike weak references, instances of this class don’t do
anything special. They are only needed to avoid special cases for non-references,
since you can treat instances of C{weakref.ReferenceType} and this class in the same
way.
"""
__slots__ = ('__object')
def __init__(self, object):
"""
Create a new dummy reference that will return C{object} when called.
@param object: the object that will be returned by this reference.
@type object: C{object}
"""
self.__object = object
def __call__(self):
"""
Return the C{object} specified at construction time.
@rtype: C{object}
"""
return self.__object
def __repr__(self):
return ('<%s.%s at 0x%x; to %r>'
% (self.__module__, self.__class__.__name__, id (self), self.__object))
def __str__(self):
return '<%s at 0x%x; to %s>' % (self.__class__.__name__, id (self), self.__object)
if sys.version_info[0] >= 3:
ClassTypes = (type,)
StringType = str
else:
ClassTypes = (type, types.ClassType)
StringType = basestring
# Local variables:
# mode: python
# python-indent: 4
# indent-tabs-mode: nil
# fill-column: 90
# End:
| :
return | identifier_name |
utils.py | # -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2006, 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
"""
A collection of utilities that can also be used from outside, if wanted. Functions and
classes here can be assumed public and won’t disappear in future Py-notify versions.
@var is_callable:
Determine if C{object} is callable. E.g. if it is a function, method, class, instance of
a class with C{__call__}, etc. This is the same as built-in function C{callable} does.
C{is_callable} is provided since C{callable} is going to disappear in Python 3000 and may
issue warnings in 2.6.
@var as_string:
Convert any attribute to its name as string. Main use of this utility object is to
perform Python ‘private’ identifier mangling. E.g. you can write::
class MyClass (object):
__slots__ = ('__x')
def get_x (self):
if hasattr (self, as_string.__x):
return self.__x
Advantage is that you don’t have to do mangling ‘by hands’ and hence there is less chance
for a typing error. Furthermore, this code does not require changes if you change
C{MyClass} name to anything else, whereas custom mangling does.
However, usefulness of ‘as_string’ is still doubtful. When I wrote it, I didn’t know one
could just write ``__slots__ = ('__x')``, I thought it needed to be
``__slots__ = ('_MyClass__x')``. Imagine...
"""
__docformat__ = 'epytext en'
__all__ = ('is_callable', 'is_valid_identifier', 'mangle_identifier',
'as_string',
'raise_not_implemented_exception',
'execute',
'frozendict', 'DummyReference', 'ClassTypes', 'StringType')
import re
import sys
import types
from keyword import iskeyword
if sys.version_info[:3] < (2, 6, 0):
is_callable = callable
else:
def is_callable (object):
return hasattr (object, '__call__')
def is_valid_identifier (identifier):
"""
Determine if C{identifier} is a valid Python identifier. This function never raises
any exceptions. If C{identifier} is not a string, it simply returns C{False}.
@param identifier: identifier to determin if it is valid
@type identifier: C{basestring}
@rtype: C{bool}
"""
return (isinstance (identifier, StringType)
and re.match ('^[_a-zA-Z][_a-zA-Z0-9]*$', identifier) is not None
and not iskeyword (identifier))
def mangle_identifier (class_name, identifier):
"""
Mangle C{identifier} as how would be done if it appeared in a class with
C{class_name}. This function allows to mimic standard Python mangling of
pseudo-private attributes, i.e. those which names start with two underscores and don’t
end in two. If C{identifier} is not considered a private name, it is returned
unchanged.
@param class_name: name of Python class.
@type class_name: C{basestring}
@param identifier: name of an attribute of that class.
@type identifier: C{basestring}
@rtype: C{str}
@raises ValueError: if either C{class_name} or C{identifier} is not valid from
Python’s point of view.
"""
if not (is_valid_identifier (class_name) and is_valid_identifier (identifier)):
raise ValueError ("'class_name' and 'identifier' must be valid Python identifiers")
if (identifier.startswith ('__')
and not identifier.endswith ('__')
and class_name != '_' * len (class_name)):
return '_%s%s' % (class_name.lstrip ('_'), identifier)
else:
return identifier
class _AsString (object):
"""
Internal helper class for C{L{as_string}}. Don’t use directly.
"""
__slots__ = ()
def __getattribute__(self, name):
return name
def __setattr__(self, name, value):
raise TypeError ("'as_string' attributes cannot be set")
def __delattr__(self, name):
raise TypeError ("'as_string' attributes cannot be deleted")
def __repr__(self):
return 'notify.utils.as_string'
as_string = _AsString ()
def raise_not_implemented_exception (object = None, function_name = None):
"""
Raise C{NotImplementedError} for a method invoked with C{object} as C{self}. The
function determines object class and method declaration class(es) itself and that’s
the whole point of it.
It should be called like this:
>>> raise_not_implemented_exception (self)
And output might look like this::
File ".../foo.py", line # in ?
Foo ().bar ()
File ".../foo.py", line #, in bar
raise_not_implemented_exception (self)
File ".../notify/utils.py", line #, in raise_not_implemented_exception
raise exception
NotImplementedError: bar() not implemented in class Foo (declared in AbstractFoo)
Optionally, C{function_name} can be specified. This argument mainly exists for C
extension, since function name cannot be detected automatically in this case. In
Python code you should just leave this argument out.
@param object: the object for which a non-implemented method is called.
@type object: C{object}
@param function_name: name of the unimplemented function or method (inferred
automatically for non-extension functions).
@type function_name: C{basestring} or C{None}
@raises NotImplementedError: always.
"""
if function_name is None:
try:
raise Exception
except Exception:
try:
traceback = sys.exc_info () [2]
function_name = traceback.tb_frame.f_back.f_code.co_name
except Exception:
# We can do nothing, ignore.
pass
if function_name is not None:
function_description = '%s()' % function_name
else:
function_description = 'UNKNOWN FUNCTION'
try:
class_description = ' in class %s' % object.__class__.__name__
if function_name is not None:
declaration_classes = _find_declaration_classes (object.__class__, function_name)
if len (declaration_classes) == 1:
if declaration_classes[0] is not object.__class__:
class_description += ' (declared in %s)' % declaration_classes[0].__name__
elif len (declaration_classes) > 1:
class_description += (' (declared in %s)'
% ', '.join ([_class.__name__
for _class in declaration_classes]))
except Exception:
class_description = ''
exception = NotImplementedError ('%s not implemented%s'
% (function_description, class_description))
raise exception
def _find_declaration_classes (_class, function_name):
declaring_bases = [base for base in _class.__bases__ if hasattr (base, function_name)]
if declaring_bases:
return reduce (lambda list1, list2: list1 + list2,
[_find_declaration_classes (base, function_name)
for base in declaring_bases],
[])
else:
return [_class]
if sys.version_info[0] >= 3:
execute = eval ('exec')
else:
from notify._2_x import execute
class frozendict (dict):
__slots__ = ('__hash')
def __init__(self, *arguments, **keywords):
super (frozendict, self).__init__(*arguments, **keywords)
self.__hash = None
def clear (self):
raise TypeError ("'%s' object doesn't support clearing" % type (self).__name__)
def pop (self, key, default = None):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def popitem (self):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def setdefault (self, key, default = None):
raise TypeError ("'%s' object doesn't support setdefault operation" % type (self).__name__)
def update (self, dict):
raise TypeError ("'%s' object doesn't support updating" % type (self).__name__)
def __setitem__(self, key, value):
raise TypeError ("'%s' object doesn't support item setting" % type (self).__name__)
def __delitem__(self, key):
raise TypeError ("'%s' object doesn't support item deletion" % type (self).__name__)
def __hash__(self):
_hash = self.__hash
if _hash is None:
_hash = 0x1337
if | ef __repr__(self):
return '%s (%s)' % (type (self).__name__, super (frozendict, self).__repr__())
frozendict.EMPTY = frozendict ({ })
# Force hash to be precomputed.
hash (frozendict.EMPTY)
class DummyReference (object):
"""
Simple class that is interface-compatible with C{weakref.ReferenceType}. In other
words, its constructor accepts only one parameter and this value is later returned
from C{L{__call__}} method. Unlike weak references, instances of this class don’t do
anything special. They are only needed to avoid special cases for non-references,
since you can treat instances of C{weakref.ReferenceType} and this class in the same
way.
"""
__slots__ = ('__object')
def __init__(self, object):
"""
Create a new dummy reference that will return C{object} when called.
@param object: the object that will be returned by this reference.
@type object: C{object}
"""
self.__object = object
def __call__(self):
"""
Return the C{object} specified at construction time.
@rtype: C{object}
"""
return self.__object
def __repr__(self):
return ('<%s.%s at 0x%x; to %r>'
% (self.__module__, self.__class__.__name__, id (self), self.__object))
def __str__(self):
return '<%s at 0x%x; to %s>' % (self.__class__.__name__, id (self), self.__object)
if sys.version_info[0] >= 3:
ClassTypes = (type,)
StringType = str
else:
ClassTypes = (type, types.ClassType)
StringType = basestring
# Local variables:
# mode: python
# python-indent: 4
# indent-tabs-mode: nil
# fill-column: 90
# End:
| hasattr (dict, 'iteritems'):
for key, value in self.iteritems ():
_hash ^= hash (key) ^ hash (value)
else:
for key, value in self.items ():
_hash ^= hash (key) ^ hash (value)
self.__hash = _hash
return _hash
d | conditional_block |
testcase.py | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import output
class TestCase(object):
def | (self, suite, path, flags=None, dependency=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.dependency = dependency # |path| for testcase that must be run first
self.outcomes = None
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, flags):
copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency)
copy.outcomes = self.outcomes
return copy
def PackTask(self):
"""
Extracts those parts of this object that are required to run the test
and returns them as a JSON serializable object.
"""
assert self.id is not None
return [self.suitename(), self.path, self.flags,
self.dependency, list(self.outcomes or []), self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
test = TestCase(str(task[0]), task[1], task[2], task[3])
test.outcomes = set(task[4])
test.id = task[5]
test.run = 1
return test
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
def PackResult(self):
"""Serializes the output of the TestCase after it has run."""
self.suite.StripOutputForTransmit(self)
return [self.id, self.output.Pack(), self.duration]
def MergeResult(self, result):
"""Applies the contents of a Result to this object."""
assert result[0] == self.id
self.output = output.Output.Unpack(result[1])
self.duration = result[2]
def suitename(self):
return self.suite.name
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
| __init__ | identifier_name |
testcase.py | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import output
class TestCase(object):
def __init__(self, suite, path, flags=None, dependency=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.dependency = dependency # |path| for testcase that must be run first
self.outcomes = None
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, flags):
copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency)
copy.outcomes = self.outcomes
return copy
def PackTask(self):
"""
Extracts those parts of this object that are required to run the test
and returns them as a JSON serializable object.
"""
assert self.id is not None
return [self.suitename(), self.path, self.flags,
self.dependency, list(self.outcomes or []), self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
test = TestCase(str(task[0]), task[1], task[2], task[3])
test.outcomes = set(task[4])
test.id = task[5]
test.run = 1
return test
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
def PackResult(self):
"""Serializes the output of the TestCase after it has run."""
self.suite.StripOutputForTransmit(self)
return [self.id, self.output.Pack(), self.duration]
def MergeResult(self, result):
"""Applies the contents of a Result to this object."""
assert result[0] == self.id
self.output = output.Output.Unpack(result[1])
self.duration = result[2]
def suitename(self):
return self.suite.name
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self) | random_line_split |
|
testcase.py | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import output
class TestCase(object):
def __init__(self, suite, path, flags=None, dependency=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.dependency = dependency # |path| for testcase that must be run first
self.outcomes = None
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, flags):
copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency)
copy.outcomes = self.outcomes
return copy
def PackTask(self):
|
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
test = TestCase(str(task[0]), task[1], task[2], task[3])
test.outcomes = set(task[4])
test.id = task[5]
test.run = 1
return test
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
def PackResult(self):
"""Serializes the output of the TestCase after it has run."""
self.suite.StripOutputForTransmit(self)
return [self.id, self.output.Pack(), self.duration]
def MergeResult(self, result):
"""Applies the contents of a Result to this object."""
assert result[0] == self.id
self.output = output.Output.Unpack(result[1])
self.duration = result[2]
def suitename(self):
return self.suite.name
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
| """
Extracts those parts of this object that are required to run the test
and returns them as a JSON serializable object.
"""
assert self.id is not None
return [self.suitename(), self.path, self.flags,
self.dependency, list(self.outcomes or []), self.id] | identifier_body |
home.service.ts | import { Injectable } from "@angular/core";
import { Http } from '@angular/http';
import 'rxjs/add/operator/toPromise';
import { HomeModel } from "./home.model";
import { LogServiceProvider } from '../../providers/log-service/log-service';
import { Constants } from "../../app/app.contants";
@Injectable()
export class | {
constructor(public http: Http, public log: LogServiceProvider) {}
// getData(): Promise<HomeModel> {
// return this.http.get(Constants.URL + '/api/homes')
// .toPromise()
// .then(response => response.json() as HomeModel)
// .catch(this.handleError);
// }
getData(): Promise<HomeModel> {
return this.http.get(Constants.URL + '/api/dataofcategories')
// return this.http.get('./assets/example_data/home.json')
.toPromise()
.then(response => response.json() as HomeModel)
.catch(this.handleError);
}
private handleError(error: any): Promise<any> {
this.log.errorService('An error occurred', error); // for demo purposes only
return Promise.reject(error.message || error);
}
}
| HomeService | identifier_name |
home.service.ts | import { Injectable } from "@angular/core";
import { Http } from '@angular/http';
import 'rxjs/add/operator/toPromise';
import { HomeModel } from "./home.model";
import { LogServiceProvider } from '../../providers/log-service/log-service';
import { Constants } from "../../app/app.contants";
@Injectable()
export class HomeService {
constructor(public http: Http, public log: LogServiceProvider) |
// getData(): Promise<HomeModel> {
// return this.http.get(Constants.URL + '/api/homes')
// .toPromise()
// .then(response => response.json() as HomeModel)
// .catch(this.handleError);
// }
getData(): Promise<HomeModel> {
return this.http.get(Constants.URL + '/api/dataofcategories')
// return this.http.get('./assets/example_data/home.json')
.toPromise()
.then(response => response.json() as HomeModel)
.catch(this.handleError);
}
private handleError(error: any): Promise<any> {
this.log.errorService('An error occurred', error); // for demo purposes only
return Promise.reject(error.message || error);
}
}
| {} | identifier_body |
home.service.ts | import { Injectable } from "@angular/core";
import { Http } from '@angular/http';
import 'rxjs/add/operator/toPromise';
import { HomeModel } from "./home.model";
import { LogServiceProvider } from '../../providers/log-service/log-service';
import { Constants } from "../../app/app.contants";
@Injectable()
export class HomeService {
constructor(public http: Http, public log: LogServiceProvider) {}
// getData(): Promise<HomeModel> {
// return this.http.get(Constants.URL + '/api/homes')
// .toPromise()
// .then(response => response.json() as HomeModel)
// .catch(this.handleError);
// }
getData(): Promise<HomeModel> {
return this.http.get(Constants.URL + '/api/dataofcategories')
// return this.http.get('./assets/example_data/home.json')
.toPromise()
.then(response => response.json() as HomeModel)
.catch(this.handleError);
} | this.log.errorService('An error occurred', error); // for demo purposes only
return Promise.reject(error.message || error);
}
} |
private handleError(error: any): Promise<any> { | random_line_split |
ptx.rs | #![crate_name = "ptx"]
#![feature(convert, slice_chars, vec_push_all)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Dorota Kapturkiewicz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate aho_corasick;
extern crate getopts;
extern crate memchr;
extern crate regex_syntax;
extern crate regex;
use std::collections::{HashMap, HashSet, BTreeSet};
use std::default::Default;
use std::fs::File;
use getopts::{Options, Matches};
use std::io::{stdin, stdout, BufReader, BufWriter, BufRead, Read, Write};
use regex::Regex;
use std::cmp;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "ptx";
static VERSION: &'static str = "1.0.0";
#[derive(Debug)]
enum OutFormat {
Dumb,
Roff,
Tex,
}
#[derive(Debug)]
struct Config {
format : OutFormat,
gnu_ext : bool,
auto_ref : bool,
input_ref : bool,
right_ref : bool,
ignore_case : bool,
macro_name : String,
trunc_str : String,
context_regex : String,
line_width : usize,
gap_size : usize,
}
impl Default for Config {
fn default() -> Config {
Config {
format : OutFormat::Dumb,
gnu_ext : true,
auto_ref : false,
input_ref : false,
right_ref : false,
ignore_case : false,
macro_name : "xx".to_string(),
trunc_str : "/".to_string(),
context_regex : "\\w+".to_string(),
line_width : 72,
gap_size : 3
}
}
}
fn read_word_filter_file(matches: &Matches, option: &str) -> HashSet<String> {
let filename = matches.opt_str(option).expect("parsing options failed!");
let reader = BufReader::new(crash_if_err!(1, File::open(filename)));
let mut words: HashSet<String> = HashSet::new();
for word in reader.lines() {
words.insert(crash_if_err!(1, word));
}
words
}
#[derive(Debug)]
struct WordFilter {
only_specified: bool,
ignore_specified: bool,
only_set: HashSet<String>,
ignore_set: HashSet<String>,
word_regex: String,
}
impl WordFilter {
fn new(matches: &Matches, config: &Config) -> WordFilter {
let (o, oset): (bool, HashSet<String>) =
if matches.opt_present("o") | else {
(false, HashSet::new())
};
let (i, iset): (bool, HashSet<String>) =
if matches.opt_present("i") {
(true, read_word_filter_file(matches, "i"))
} else {
(false, HashSet::new())
};
if matches.opt_present("b") {
crash!(1, "-b not implemented yet");
}
let reg =
if matches.opt_present("W") {
matches.opt_str("W").expect("parsing options failed!")
} else if config.gnu_ext {
"\\w+".to_string()
} else {
"[^ \t\n]+".to_string()
};
WordFilter {
only_specified: o,
ignore_specified: i,
only_set: oset,
ignore_set: iset,
word_regex: reg
}
}
}
#[derive(Debug, PartialOrd, PartialEq, Eq, Ord)]
struct WordRef {
word: String,
global_line_nr: usize,
local_line_nr: usize,
position: usize,
position_end: usize,
filename: String,
}
fn print_version() {
println!("{} {}", NAME, VERSION);
}
fn print_usage(opts: &Options) {
let brief = "Usage: ptx [OPTION]... [INPUT]... (without -G) or: \
ptx -G [OPTION]... [INPUT [OUTPUT]] \n Output a permuted index, \
including context, of the words in the input files. \n\n Mandatory \
arguments to long options are mandatory for short options too.";
let explaination = "With no FILE, or when FILE is -, read standard input. \
Default is '-F /'.";
println!("{}\n{}", opts.usage(&brief), explaination);
}
fn get_config(matches: &Matches) -> Config {
let mut config: Config = Default::default();
let err_msg = "parsing options failed";
if matches.opt_present("G") {
config.gnu_ext = false;
config.format = OutFormat::Roff;
config.context_regex = "[^ \t\n]+".to_string();
} else {
crash!(1, "GNU extensions not implemented yet");
}
if matches.opt_present("S") {
crash!(1, "-S not implemented yet");
}
config.auto_ref = matches.opt_present("A");
config.input_ref = matches.opt_present("r");
config.right_ref &= matches.opt_present("R");
config.ignore_case = matches.opt_present("f");
if matches.opt_present("M") {
config.macro_name =
matches.opt_str("M").expect(err_msg).to_string();
}
if matches.opt_present("F") {
config.trunc_str =
matches.opt_str("F").expect(err_msg).to_string();
}
if matches.opt_present("w") {
let width_str = matches.opt_str("w").expect(err_msg);
config.line_width = crash_if_err!(
1, usize::from_str_radix(width_str.as_str(), 10));
}
if matches.opt_present("g") {
let gap_str = matches.opt_str("g").expect(err_msg);
config.gap_size = crash_if_err!(
1, usize::from_str_radix(gap_str.as_str(), 10));
}
if matches.opt_present("O") {
config.format = OutFormat::Roff;
}
if matches.opt_present("T") {
config.format = OutFormat::Tex;
}
config
}
fn read_input(input_files: Vec<&str>, config: &Config) ->
HashMap<String, (Vec<String>, usize)> {
let mut file_map : HashMap<String, (Vec<String>, usize)> =
HashMap::new();
let mut files = Vec::new();
if input_files.is_empty() {
files.push("-");
} else {
if config.gnu_ext {
files.push_all(input_files.as_slice());
} else {
files.push(input_files[0]);
}
}
let mut lines_so_far: usize = 0;
for filename in files {
let reader: BufReader<Box<Read>> = BufReader::new(
if filename == "-" {
Box::new(stdin())
} else {
let file = crash_if_err!(1, File::open(filename));
Box::new(file)
});
let lines: Vec<String> = reader.lines().map(|x| crash_if_err!(1, x))
.collect();
let size = lines.len();
file_map.insert(filename.to_string(), (lines, lines_so_far));
lines_so_far += size
}
file_map
}
fn create_word_set(config: &Config, filter: &WordFilter,
file_map: &HashMap<String, (Vec<String>, usize)>)->
BTreeSet<WordRef> {
let reg = Regex::new(filter.word_regex.as_str()).unwrap();
let ref_reg = Regex::new(config.context_regex.as_str()).unwrap();
let mut word_set: BTreeSet<WordRef> = BTreeSet::new();
for (file, lines) in file_map.iter() {
let mut count: usize = 0;
let offs = lines.1;
for line in (lines.0).iter() {
// if -r, exclude reference from word set
let (ref_beg, ref_end) = match ref_reg.find(line) {
Some(x) => x,
None => (0,0)
};
// match words with given regex
for (beg, end) in reg.find_iter(line) {
if config.input_ref && ((beg, end) == (ref_beg, ref_end)) {
continue;
}
let mut word = line.slice_chars(beg, end).to_string();
if filter.only_specified &&
!(filter.only_set.contains(&word)) {
continue;
}
if filter.ignore_specified &&
filter.ignore_set.contains(&word) {
continue;
}
if config.ignore_case {
word = word.to_lowercase();
}
word_set.insert(WordRef{
word: word,
filename: String::from(file.as_str()),
global_line_nr: offs + count,
local_line_nr: count,
position: beg,
position_end: end
});
}
count += 1;
}
}
word_set
}
fn get_reference(config: &Config, word_ref: &WordRef, line: &String) ->
String {
if config.auto_ref {
format!("{}:{}", word_ref.filename, word_ref.local_line_nr + 1)
} else if config.input_ref {
let reg = Regex::new(config.context_regex.as_str()).unwrap();
let (beg, end) = match reg.find(line) {
Some(x) => x,
None => (0,0)
};
format!("{}", line.slice_chars(beg, end))
} else {
String::new()
}
}
fn assert_str_integrity(s: &Vec<char>, beg: usize, end: usize) {
assert!(beg <= end);
assert!(end <= s.len());
}
fn trim_broken_word_left(s: &Vec<char>, beg: usize, end: usize) -> usize {
assert_str_integrity(s, beg, end);
if beg == end || beg == 0 || s[beg].is_whitespace() ||
s[beg-1].is_whitespace() {
return beg;
}
let mut b = beg;
while b < end && !s[b].is_whitespace() {
b += 1;
}
b
}
fn trim_broken_word_right(s: &Vec<char>, beg: usize, end: usize) -> usize {
assert_str_integrity(s, beg, end);
if beg == end || end == s.len() || s[end-1].is_whitespace() ||
s[end].is_whitespace() {
return end;
}
let mut e = end;
while beg < e && !s[e-1].is_whitespace() {
e -= 1;
}
e
}
fn trim_idx(s: &Vec<char>, beg: usize, end: usize) -> (usize, usize) {
assert_str_integrity(s, beg, end);
let mut b = beg;
let mut e = end;
while b < e && s[b].is_whitespace() {
b += 1;
}
while b < e && s[e-1].is_whitespace() {
e -= 1;
}
(b,e)
}
fn get_output_chunks(all_before: &String, keyword: &String, all_after: &String,
config: &Config) -> (String, String, String, String) {
assert!(all_before.trim() == all_before.as_str());
assert!(keyword.trim() == keyword.as_str());
assert!(all_after.trim() == all_after.as_str());
let mut head = String::new();
let mut before = String::new();
let mut after = String::new();
let mut tail = String::new();
let half_line_size = cmp::max((config.line_width/2) as isize -
(2*config.trunc_str.len()) as isize, 0) as usize;
let max_after_size = cmp::max(half_line_size as isize -
keyword.len() as isize - 1, 0) as usize;
let max_before_size = half_line_size;
let all_before_vec: Vec<char> = all_before.chars().collect();
let all_after_vec: Vec<char> = all_after.chars().collect();
// get before
let mut bb_tmp =
cmp::max(all_before.len() as isize - max_before_size as isize, 0) as usize;
bb_tmp = trim_broken_word_left(&all_before_vec, bb_tmp, all_before.len());
let (before_beg, before_end) =
trim_idx(&all_before_vec, bb_tmp, all_before.len());
before.push_str(all_before.slice_chars(before_beg, before_end));
assert!(max_before_size >= before.len());
// get after
let mut ae_tmp = cmp::min(max_after_size, all_after.len());
ae_tmp = trim_broken_word_right(&all_after_vec, 0, ae_tmp);
let (after_beg, after_end) = trim_idx(&all_after_vec, 0, ae_tmp);
after.push_str(all_after.slice_chars(after_beg, after_end));
assert!(max_after_size >= after.len());
// get tail
let max_tail_size = max_before_size - before.len();
let (tb, _) = trim_idx(&all_after_vec, after_end, all_after.len());
let mut te_tmp = cmp::min(tb + max_tail_size, all_after.len());
te_tmp = trim_broken_word_right(&all_after_vec, tb, te_tmp);
let (tail_beg, tail_end) = trim_idx(&all_after_vec, tb, te_tmp);
tail.push_str(all_after.slice_chars(tail_beg, tail_end));
// get head
let max_head_size = max_after_size - after.len();
let (_, he) = trim_idx(&all_before_vec, 0, before_beg);
let mut hb_tmp =
cmp::max(he as isize - max_head_size as isize, 0) as usize;
hb_tmp = trim_broken_word_left(&all_before_vec, hb_tmp, he);
let (head_beg, head_end) = trim_idx(&all_before_vec, hb_tmp, he);
head.push_str(all_before.slice_chars(head_beg, head_end));
// put right context truncation string if needed
if after_end != all_after.len() && tail_beg == tail_end {
after.push_str(config.trunc_str.as_str());
} else if after_end != all_after.len() && tail_end != all_after.len() {
tail.push_str(config.trunc_str.as_str());
}
// put left context truncation string if needed
if before_beg != 0 && head_beg == head_end {
before = format!("{}{}", config.trunc_str, before);
} else if before_beg != 0 && head_beg != 0 {
head = format!("{}{}", config.trunc_str, head);
}
// add space before "after" if needed
if after.len() > 0 {
after = format!(" {}", after);
}
(tail, before, after, head)
}
fn tex_mapper(x: char) -> String {
match x {
'\\' => "\\backslash{}".to_string(),
'$' | '%' | '#' | '&' | '_' => format!("\\{}", x),
'}' | '{' => format!("$\\{}$", x),
_ => x.to_string()
}
}
fn adjust_tex_str(context: &str) -> String {
let ws_reg = Regex::new(r"[\t\n\v\f\r ]").unwrap();
let mut fix: String = ws_reg.replace_all(context, " ").trim().to_string();
let mapped_chunks: Vec<String> = fix.chars().map(tex_mapper).collect();
fix = mapped_chunks.connect("");
fix
}
fn format_tex_line(config: &Config, word_ref: &WordRef, line: &String,
reference: &String) -> String {
let mut output = String::new();
output.push_str(&format!("\\{} ", config.macro_name));
let all_before = if config.input_ref {
let before = line.slice_chars(0, word_ref.position);
adjust_tex_str(before.trim().trim_left_matches(reference))
} else {
adjust_tex_str(line.slice_chars(0, word_ref.position))
};
let keyword = adjust_tex_str(
line.slice_chars(word_ref.position, word_ref.position_end));
let all_after = adjust_tex_str(
line.slice_chars(word_ref.position_end, line.len()));
let (tail, before, after, head) =
get_output_chunks(&all_before, &keyword, &all_after, &config);
output.push_str(format!("{5}{0}{6}{5}{1}{6}{5}{2}{6}{5}{3}{6}{5}{4}{6}",
tail, before, keyword, after, head, "{", "}").as_str());
if config.auto_ref || config.input_ref {
output.push_str(
&format!("{}{}{}", "{", adjust_tex_str(&reference), "}"));
}
output
}
fn adjust_roff_str(context: &str) -> String {
let ws_reg = Regex::new(r"[\t\n\v\f\r]").unwrap();
ws_reg.replace_all(context, " ").replace("\"", "\"\"").trim().to_string()
}
fn format_roff_line(config: &Config, word_ref: &WordRef, line: &str,
reference: &str) -> String {
let mut output = String::new();
output.push_str(&format!(".{}", config.macro_name));
let all_before = if config.input_ref {
let before = line.slice_chars(0, word_ref.position);
adjust_roff_str(before.trim().trim_left_matches(reference))
} else {
adjust_roff_str(line.slice_chars(0, word_ref.position))
};
let keyword = adjust_roff_str(
line.slice_chars(word_ref.position, word_ref.position_end));
let all_after = adjust_roff_str(
line.slice_chars(word_ref.position_end, line.len()));
let (tail, before, after, head) =
get_output_chunks(&all_before, &keyword, &all_after, &config);
output.push_str(format!(" \"{}\" \"{}\" \"{}{}\" \"{}\"",
tail, before, keyword, after, head).as_str());
if config.auto_ref || config.input_ref {
output.push_str(&format!(" \"{}\"", adjust_roff_str(&reference)));
}
output
}
fn write_traditional_output(config: &Config,
file_map: &HashMap<String, (Vec<String>,usize)>,
words: &BTreeSet<WordRef>, output_filename: &str) {
let mut writer: BufWriter<Box<Write>> = BufWriter::new(
if output_filename == "-" {
Box::new(stdout())
} else {
let file = crash_if_err!(1, File::create(output_filename));
Box::new(file)
});
for word_ref in words.iter() {
let file_map_value : &(Vec<String>, usize) =
file_map.get(&(word_ref.filename))
.expect("Missing file in file map");
let (ref lines, _) = *(file_map_value);
let reference =
get_reference(config, word_ref, &lines[word_ref.local_line_nr]);
let output_line: String = match config.format {
OutFormat::Tex => format_tex_line(
config, word_ref, &lines[word_ref.local_line_nr], &reference),
OutFormat::Roff => format_roff_line(
config, word_ref, &lines[word_ref.local_line_nr], &reference),
OutFormat::Dumb => crash!(
1, "There is no dumb format with GNU extensions disabled")
};
crash_if_err!(1, writeln!(writer, "{}", output_line));
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("A", "auto-reference",
"output automatically generated references");
opts.optflag("G", "traditional", "behave more like System V 'ptx'");
opts.optopt("F", "flag-truncation",
"use STRING for flagging line truncations", "STRING");
opts.optopt("M", "macro-name", "macro name to use instead of 'xx'",
"STRING");
opts.optflag("O", "format=roff", "generate output as roff directives");
opts.optflag("R", "right-side-refs",
"put references at right, not counted in -w");
opts.optopt("S", "sentence-regexp", "for end of lines or end of sentences",
"REGEXP");
opts.optflag("T", "format=tex", "generate output as TeX directives");
opts.optopt("W", "word-regexp", "use REGEXP to match each keyword",
"REGEXP");
opts.optopt("b", "break-file", "word break characters in this FILE",
"FILE");
opts.optflag("f", "ignore-case",
"fold lower case to upper case for sorting");
opts.optopt("g", "gap-size", "gap size in columns between output fields",
"NUMBER");
opts.optopt("i", "ignore-file", "read ignore word list from FILE", "FILE");
opts.optopt("o", "only-file", "read only word list from this FILE",
"FILE");
opts.optflag("r", "references", "first field of each line is a reference");
opts.optopt("w", "width", "output width in columns, reference excluded",
"NUMBER");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = return_if_err!(1, opts.parse(&args[1..]));
if matches.opt_present("help") {
print_usage(&opts);
return 0;
}
if matches.opt_present("version") {
print_version();
return 0;
}
let config = get_config(&matches);
let word_filter = WordFilter::new(&matches, &config);
let file_map =
read_input(matches.free.iter().map(|x| x.as_str()).collect(), &config);
let word_set = create_word_set(&config, &word_filter, &file_map);
let output_file = if !config.gnu_ext && matches.free.len() == 2 {
matches.free[1].as_str()
} else {
"-"
};
write_traditional_output(&config, &file_map, &word_set, output_file);
0
}
| {
(true, read_word_filter_file(matches, "o"))
} | conditional_block |
ptx.rs | #![crate_name = "ptx"]
#![feature(convert, slice_chars, vec_push_all)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Dorota Kapturkiewicz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate aho_corasick;
extern crate getopts;
extern crate memchr;
extern crate regex_syntax;
extern crate regex;
use std::collections::{HashMap, HashSet, BTreeSet};
use std::default::Default;
use std::fs::File;
use getopts::{Options, Matches};
use std::io::{stdin, stdout, BufReader, BufWriter, BufRead, Read, Write};
use regex::Regex;
use std::cmp;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "ptx";
static VERSION: &'static str = "1.0.0";
#[derive(Debug)]
enum | {
Dumb,
Roff,
Tex,
}
#[derive(Debug)]
struct Config {
format : OutFormat,
gnu_ext : bool,
auto_ref : bool,
input_ref : bool,
right_ref : bool,
ignore_case : bool,
macro_name : String,
trunc_str : String,
context_regex : String,
line_width : usize,
gap_size : usize,
}
impl Default for Config {
fn default() -> Config {
Config {
format : OutFormat::Dumb,
gnu_ext : true,
auto_ref : false,
input_ref : false,
right_ref : false,
ignore_case : false,
macro_name : "xx".to_string(),
trunc_str : "/".to_string(),
context_regex : "\\w+".to_string(),
line_width : 72,
gap_size : 3
}
}
}
fn read_word_filter_file(matches: &Matches, option: &str) -> HashSet<String> {
let filename = matches.opt_str(option).expect("parsing options failed!");
let reader = BufReader::new(crash_if_err!(1, File::open(filename)));
let mut words: HashSet<String> = HashSet::new();
for word in reader.lines() {
words.insert(crash_if_err!(1, word));
}
words
}
#[derive(Debug)]
struct WordFilter {
only_specified: bool,
ignore_specified: bool,
only_set: HashSet<String>,
ignore_set: HashSet<String>,
word_regex: String,
}
impl WordFilter {
fn new(matches: &Matches, config: &Config) -> WordFilter {
let (o, oset): (bool, HashSet<String>) =
if matches.opt_present("o") {
(true, read_word_filter_file(matches, "o"))
} else {
(false, HashSet::new())
};
let (i, iset): (bool, HashSet<String>) =
if matches.opt_present("i") {
(true, read_word_filter_file(matches, "i"))
} else {
(false, HashSet::new())
};
if matches.opt_present("b") {
crash!(1, "-b not implemented yet");
}
let reg =
if matches.opt_present("W") {
matches.opt_str("W").expect("parsing options failed!")
} else if config.gnu_ext {
"\\w+".to_string()
} else {
"[^ \t\n]+".to_string()
};
WordFilter {
only_specified: o,
ignore_specified: i,
only_set: oset,
ignore_set: iset,
word_regex: reg
}
}
}
#[derive(Debug, PartialOrd, PartialEq, Eq, Ord)]
struct WordRef {
word: String,
global_line_nr: usize,
local_line_nr: usize,
position: usize,
position_end: usize,
filename: String,
}
fn print_version() {
println!("{} {}", NAME, VERSION);
}
fn print_usage(opts: &Options) {
let brief = "Usage: ptx [OPTION]... [INPUT]... (without -G) or: \
ptx -G [OPTION]... [INPUT [OUTPUT]] \n Output a permuted index, \
including context, of the words in the input files. \n\n Mandatory \
arguments to long options are mandatory for short options too.";
let explaination = "With no FILE, or when FILE is -, read standard input. \
Default is '-F /'.";
println!("{}\n{}", opts.usage(&brief), explaination);
}
fn get_config(matches: &Matches) -> Config {
let mut config: Config = Default::default();
let err_msg = "parsing options failed";
if matches.opt_present("G") {
config.gnu_ext = false;
config.format = OutFormat::Roff;
config.context_regex = "[^ \t\n]+".to_string();
} else {
crash!(1, "GNU extensions not implemented yet");
}
if matches.opt_present("S") {
crash!(1, "-S not implemented yet");
}
config.auto_ref = matches.opt_present("A");
config.input_ref = matches.opt_present("r");
config.right_ref &= matches.opt_present("R");
config.ignore_case = matches.opt_present("f");
if matches.opt_present("M") {
config.macro_name =
matches.opt_str("M").expect(err_msg).to_string();
}
if matches.opt_present("F") {
config.trunc_str =
matches.opt_str("F").expect(err_msg).to_string();
}
if matches.opt_present("w") {
let width_str = matches.opt_str("w").expect(err_msg);
config.line_width = crash_if_err!(
1, usize::from_str_radix(width_str.as_str(), 10));
}
if matches.opt_present("g") {
let gap_str = matches.opt_str("g").expect(err_msg);
config.gap_size = crash_if_err!(
1, usize::from_str_radix(gap_str.as_str(), 10));
}
if matches.opt_present("O") {
config.format = OutFormat::Roff;
}
if matches.opt_present("T") {
config.format = OutFormat::Tex;
}
config
}
fn read_input(input_files: Vec<&str>, config: &Config) ->
HashMap<String, (Vec<String>, usize)> {
let mut file_map : HashMap<String, (Vec<String>, usize)> =
HashMap::new();
let mut files = Vec::new();
if input_files.is_empty() {
files.push("-");
} else {
if config.gnu_ext {
files.push_all(input_files.as_slice());
} else {
files.push(input_files[0]);
}
}
let mut lines_so_far: usize = 0;
for filename in files {
let reader: BufReader<Box<Read>> = BufReader::new(
if filename == "-" {
Box::new(stdin())
} else {
let file = crash_if_err!(1, File::open(filename));
Box::new(file)
});
let lines: Vec<String> = reader.lines().map(|x| crash_if_err!(1, x))
.collect();
let size = lines.len();
file_map.insert(filename.to_string(), (lines, lines_so_far));
lines_so_far += size
}
file_map
}
fn create_word_set(config: &Config, filter: &WordFilter,
file_map: &HashMap<String, (Vec<String>, usize)>)->
BTreeSet<WordRef> {
let reg = Regex::new(filter.word_regex.as_str()).unwrap();
let ref_reg = Regex::new(config.context_regex.as_str()).unwrap();
let mut word_set: BTreeSet<WordRef> = BTreeSet::new();
for (file, lines) in file_map.iter() {
let mut count: usize = 0;
let offs = lines.1;
for line in (lines.0).iter() {
// if -r, exclude reference from word set
let (ref_beg, ref_end) = match ref_reg.find(line) {
Some(x) => x,
None => (0,0)
};
// match words with given regex
for (beg, end) in reg.find_iter(line) {
if config.input_ref && ((beg, end) == (ref_beg, ref_end)) {
continue;
}
let mut word = line.slice_chars(beg, end).to_string();
if filter.only_specified &&
!(filter.only_set.contains(&word)) {
continue;
}
if filter.ignore_specified &&
filter.ignore_set.contains(&word) {
continue;
}
if config.ignore_case {
word = word.to_lowercase();
}
word_set.insert(WordRef{
word: word,
filename: String::from(file.as_str()),
global_line_nr: offs + count,
local_line_nr: count,
position: beg,
position_end: end
});
}
count += 1;
}
}
word_set
}
fn get_reference(config: &Config, word_ref: &WordRef, line: &String) ->
String {
if config.auto_ref {
format!("{}:{}", word_ref.filename, word_ref.local_line_nr + 1)
} else if config.input_ref {
let reg = Regex::new(config.context_regex.as_str()).unwrap();
let (beg, end) = match reg.find(line) {
Some(x) => x,
None => (0,0)
};
format!("{}", line.slice_chars(beg, end))
} else {
String::new()
}
}
fn assert_str_integrity(s: &Vec<char>, beg: usize, end: usize) {
assert!(beg <= end);
assert!(end <= s.len());
}
fn trim_broken_word_left(s: &Vec<char>, beg: usize, end: usize) -> usize {
assert_str_integrity(s, beg, end);
if beg == end || beg == 0 || s[beg].is_whitespace() ||
s[beg-1].is_whitespace() {
return beg;
}
let mut b = beg;
while b < end && !s[b].is_whitespace() {
b += 1;
}
b
}
fn trim_broken_word_right(s: &Vec<char>, beg: usize, end: usize) -> usize {
assert_str_integrity(s, beg, end);
if beg == end || end == s.len() || s[end-1].is_whitespace() ||
s[end].is_whitespace() {
return end;
}
let mut e = end;
while beg < e && !s[e-1].is_whitespace() {
e -= 1;
}
e
}
fn trim_idx(s: &Vec<char>, beg: usize, end: usize) -> (usize, usize) {
assert_str_integrity(s, beg, end);
let mut b = beg;
let mut e = end;
while b < e && s[b].is_whitespace() {
b += 1;
}
while b < e && s[e-1].is_whitespace() {
e -= 1;
}
(b,e)
}
fn get_output_chunks(all_before: &String, keyword: &String, all_after: &String,
config: &Config) -> (String, String, String, String) {
assert!(all_before.trim() == all_before.as_str());
assert!(keyword.trim() == keyword.as_str());
assert!(all_after.trim() == all_after.as_str());
let mut head = String::new();
let mut before = String::new();
let mut after = String::new();
let mut tail = String::new();
let half_line_size = cmp::max((config.line_width/2) as isize -
(2*config.trunc_str.len()) as isize, 0) as usize;
let max_after_size = cmp::max(half_line_size as isize -
keyword.len() as isize - 1, 0) as usize;
let max_before_size = half_line_size;
let all_before_vec: Vec<char> = all_before.chars().collect();
let all_after_vec: Vec<char> = all_after.chars().collect();
// get before
let mut bb_tmp =
cmp::max(all_before.len() as isize - max_before_size as isize, 0) as usize;
bb_tmp = trim_broken_word_left(&all_before_vec, bb_tmp, all_before.len());
let (before_beg, before_end) =
trim_idx(&all_before_vec, bb_tmp, all_before.len());
before.push_str(all_before.slice_chars(before_beg, before_end));
assert!(max_before_size >= before.len());
// get after
let mut ae_tmp = cmp::min(max_after_size, all_after.len());
ae_tmp = trim_broken_word_right(&all_after_vec, 0, ae_tmp);
let (after_beg, after_end) = trim_idx(&all_after_vec, 0, ae_tmp);
after.push_str(all_after.slice_chars(after_beg, after_end));
assert!(max_after_size >= after.len());
// get tail
let max_tail_size = max_before_size - before.len();
let (tb, _) = trim_idx(&all_after_vec, after_end, all_after.len());
let mut te_tmp = cmp::min(tb + max_tail_size, all_after.len());
te_tmp = trim_broken_word_right(&all_after_vec, tb, te_tmp);
let (tail_beg, tail_end) = trim_idx(&all_after_vec, tb, te_tmp);
tail.push_str(all_after.slice_chars(tail_beg, tail_end));
// get head
let max_head_size = max_after_size - after.len();
let (_, he) = trim_idx(&all_before_vec, 0, before_beg);
let mut hb_tmp =
cmp::max(he as isize - max_head_size as isize, 0) as usize;
hb_tmp = trim_broken_word_left(&all_before_vec, hb_tmp, he);
let (head_beg, head_end) = trim_idx(&all_before_vec, hb_tmp, he);
head.push_str(all_before.slice_chars(head_beg, head_end));
// put right context truncation string if needed
if after_end != all_after.len() && tail_beg == tail_end {
after.push_str(config.trunc_str.as_str());
} else if after_end != all_after.len() && tail_end != all_after.len() {
tail.push_str(config.trunc_str.as_str());
}
// put left context truncation string if needed
if before_beg != 0 && head_beg == head_end {
before = format!("{}{}", config.trunc_str, before);
} else if before_beg != 0 && head_beg != 0 {
head = format!("{}{}", config.trunc_str, head);
}
// add space before "after" if needed
if after.len() > 0 {
after = format!(" {}", after);
}
(tail, before, after, head)
}
fn tex_mapper(x: char) -> String {
match x {
'\\' => "\\backslash{}".to_string(),
'$' | '%' | '#' | '&' | '_' => format!("\\{}", x),
'}' | '{' => format!("$\\{}$", x),
_ => x.to_string()
}
}
fn adjust_tex_str(context: &str) -> String {
let ws_reg = Regex::new(r"[\t\n\v\f\r ]").unwrap();
let mut fix: String = ws_reg.replace_all(context, " ").trim().to_string();
let mapped_chunks: Vec<String> = fix.chars().map(tex_mapper).collect();
fix = mapped_chunks.connect("");
fix
}
fn format_tex_line(config: &Config, word_ref: &WordRef, line: &String,
reference: &String) -> String {
let mut output = String::new();
output.push_str(&format!("\\{} ", config.macro_name));
let all_before = if config.input_ref {
let before = line.slice_chars(0, word_ref.position);
adjust_tex_str(before.trim().trim_left_matches(reference))
} else {
adjust_tex_str(line.slice_chars(0, word_ref.position))
};
let keyword = adjust_tex_str(
line.slice_chars(word_ref.position, word_ref.position_end));
let all_after = adjust_tex_str(
line.slice_chars(word_ref.position_end, line.len()));
let (tail, before, after, head) =
get_output_chunks(&all_before, &keyword, &all_after, &config);
output.push_str(format!("{5}{0}{6}{5}{1}{6}{5}{2}{6}{5}{3}{6}{5}{4}{6}",
tail, before, keyword, after, head, "{", "}").as_str());
if config.auto_ref || config.input_ref {
output.push_str(
&format!("{}{}{}", "{", adjust_tex_str(&reference), "}"));
}
output
}
fn adjust_roff_str(context: &str) -> String {
let ws_reg = Regex::new(r"[\t\n\v\f\r]").unwrap();
ws_reg.replace_all(context, " ").replace("\"", "\"\"").trim().to_string()
}
fn format_roff_line(config: &Config, word_ref: &WordRef, line: &str,
reference: &str) -> String {
let mut output = String::new();
output.push_str(&format!(".{}", config.macro_name));
let all_before = if config.input_ref {
let before = line.slice_chars(0, word_ref.position);
adjust_roff_str(before.trim().trim_left_matches(reference))
} else {
adjust_roff_str(line.slice_chars(0, word_ref.position))
};
let keyword = adjust_roff_str(
line.slice_chars(word_ref.position, word_ref.position_end));
let all_after = adjust_roff_str(
line.slice_chars(word_ref.position_end, line.len()));
let (tail, before, after, head) =
get_output_chunks(&all_before, &keyword, &all_after, &config);
output.push_str(format!(" \"{}\" \"{}\" \"{}{}\" \"{}\"",
tail, before, keyword, after, head).as_str());
if config.auto_ref || config.input_ref {
output.push_str(&format!(" \"{}\"", adjust_roff_str(&reference)));
}
output
}
fn write_traditional_output(config: &Config,
file_map: &HashMap<String, (Vec<String>,usize)>,
words: &BTreeSet<WordRef>, output_filename: &str) {
let mut writer: BufWriter<Box<Write>> = BufWriter::new(
if output_filename == "-" {
Box::new(stdout())
} else {
let file = crash_if_err!(1, File::create(output_filename));
Box::new(file)
});
for word_ref in words.iter() {
let file_map_value : &(Vec<String>, usize) =
file_map.get(&(word_ref.filename))
.expect("Missing file in file map");
let (ref lines, _) = *(file_map_value);
let reference =
get_reference(config, word_ref, &lines[word_ref.local_line_nr]);
let output_line: String = match config.format {
OutFormat::Tex => format_tex_line(
config, word_ref, &lines[word_ref.local_line_nr], &reference),
OutFormat::Roff => format_roff_line(
config, word_ref, &lines[word_ref.local_line_nr], &reference),
OutFormat::Dumb => crash!(
1, "There is no dumb format with GNU extensions disabled")
};
crash_if_err!(1, writeln!(writer, "{}", output_line));
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("A", "auto-reference",
"output automatically generated references");
opts.optflag("G", "traditional", "behave more like System V 'ptx'");
opts.optopt("F", "flag-truncation",
"use STRING for flagging line truncations", "STRING");
opts.optopt("M", "macro-name", "macro name to use instead of 'xx'",
"STRING");
opts.optflag("O", "format=roff", "generate output as roff directives");
opts.optflag("R", "right-side-refs",
"put references at right, not counted in -w");
opts.optopt("S", "sentence-regexp", "for end of lines or end of sentences",
"REGEXP");
opts.optflag("T", "format=tex", "generate output as TeX directives");
opts.optopt("W", "word-regexp", "use REGEXP to match each keyword",
"REGEXP");
opts.optopt("b", "break-file", "word break characters in this FILE",
"FILE");
opts.optflag("f", "ignore-case",
"fold lower case to upper case for sorting");
opts.optopt("g", "gap-size", "gap size in columns between output fields",
"NUMBER");
opts.optopt("i", "ignore-file", "read ignore word list from FILE", "FILE");
opts.optopt("o", "only-file", "read only word list from this FILE",
"FILE");
opts.optflag("r", "references", "first field of each line is a reference");
opts.optopt("w", "width", "output width in columns, reference excluded",
"NUMBER");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = return_if_err!(1, opts.parse(&args[1..]));
if matches.opt_present("help") {
print_usage(&opts);
return 0;
}
if matches.opt_present("version") {
print_version();
return 0;
}
let config = get_config(&matches);
let word_filter = WordFilter::new(&matches, &config);
let file_map =
read_input(matches.free.iter().map(|x| x.as_str()).collect(), &config);
let word_set = create_word_set(&config, &word_filter, &file_map);
let output_file = if !config.gnu_ext && matches.free.len() == 2 {
matches.free[1].as_str()
} else {
"-"
};
write_traditional_output(&config, &file_map, &word_set, output_file);
0
}
| OutFormat | identifier_name |
ptx.rs | #![crate_name = "ptx"]
#![feature(convert, slice_chars, vec_push_all)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Dorota Kapturkiewicz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate aho_corasick;
extern crate getopts;
extern crate memchr;
extern crate regex_syntax;
extern crate regex;
use std::collections::{HashMap, HashSet, BTreeSet};
use std::default::Default;
use std::fs::File;
use getopts::{Options, Matches};
use std::io::{stdin, stdout, BufReader, BufWriter, BufRead, Read, Write};
use regex::Regex;
use std::cmp;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "ptx";
static VERSION: &'static str = "1.0.0";
#[derive(Debug)]
enum OutFormat {
Dumb,
Roff,
Tex,
}
#[derive(Debug)]
struct Config {
format : OutFormat,
gnu_ext : bool,
auto_ref : bool,
input_ref : bool,
right_ref : bool,
ignore_case : bool,
macro_name : String,
trunc_str : String,
context_regex : String,
line_width : usize,
gap_size : usize,
}
impl Default for Config {
fn default() -> Config {
Config {
format : OutFormat::Dumb,
gnu_ext : true,
auto_ref : false,
input_ref : false,
right_ref : false,
ignore_case : false,
macro_name : "xx".to_string(),
trunc_str : "/".to_string(),
context_regex : "\\w+".to_string(),
line_width : 72,
gap_size : 3
}
}
}
fn read_word_filter_file(matches: &Matches, option: &str) -> HashSet<String> |
#[derive(Debug)]
struct WordFilter {
only_specified: bool,
ignore_specified: bool,
only_set: HashSet<String>,
ignore_set: HashSet<String>,
word_regex: String,
}
impl WordFilter {
fn new(matches: &Matches, config: &Config) -> WordFilter {
let (o, oset): (bool, HashSet<String>) =
if matches.opt_present("o") {
(true, read_word_filter_file(matches, "o"))
} else {
(false, HashSet::new())
};
let (i, iset): (bool, HashSet<String>) =
if matches.opt_present("i") {
(true, read_word_filter_file(matches, "i"))
} else {
(false, HashSet::new())
};
if matches.opt_present("b") {
crash!(1, "-b not implemented yet");
}
let reg =
if matches.opt_present("W") {
matches.opt_str("W").expect("parsing options failed!")
} else if config.gnu_ext {
"\\w+".to_string()
} else {
"[^ \t\n]+".to_string()
};
WordFilter {
only_specified: o,
ignore_specified: i,
only_set: oset,
ignore_set: iset,
word_regex: reg
}
}
}
#[derive(Debug, PartialOrd, PartialEq, Eq, Ord)]
struct WordRef {
word: String,
global_line_nr: usize,
local_line_nr: usize,
position: usize,
position_end: usize,
filename: String,
}
fn print_version() {
println!("{} {}", NAME, VERSION);
}
fn print_usage(opts: &Options) {
let brief = "Usage: ptx [OPTION]... [INPUT]... (without -G) or: \
ptx -G [OPTION]... [INPUT [OUTPUT]] \n Output a permuted index, \
including context, of the words in the input files. \n\n Mandatory \
arguments to long options are mandatory for short options too.";
let explaination = "With no FILE, or when FILE is -, read standard input. \
Default is '-F /'.";
println!("{}\n{}", opts.usage(&brief), explaination);
}
fn get_config(matches: &Matches) -> Config {
let mut config: Config = Default::default();
let err_msg = "parsing options failed";
if matches.opt_present("G") {
config.gnu_ext = false;
config.format = OutFormat::Roff;
config.context_regex = "[^ \t\n]+".to_string();
} else {
crash!(1, "GNU extensions not implemented yet");
}
if matches.opt_present("S") {
crash!(1, "-S not implemented yet");
}
config.auto_ref = matches.opt_present("A");
config.input_ref = matches.opt_present("r");
config.right_ref &= matches.opt_present("R");
config.ignore_case = matches.opt_present("f");
if matches.opt_present("M") {
config.macro_name =
matches.opt_str("M").expect(err_msg).to_string();
}
if matches.opt_present("F") {
config.trunc_str =
matches.opt_str("F").expect(err_msg).to_string();
}
if matches.opt_present("w") {
let width_str = matches.opt_str("w").expect(err_msg);
config.line_width = crash_if_err!(
1, usize::from_str_radix(width_str.as_str(), 10));
}
if matches.opt_present("g") {
let gap_str = matches.opt_str("g").expect(err_msg);
config.gap_size = crash_if_err!(
1, usize::from_str_radix(gap_str.as_str(), 10));
}
if matches.opt_present("O") {
config.format = OutFormat::Roff;
}
if matches.opt_present("T") {
config.format = OutFormat::Tex;
}
config
}
fn read_input(input_files: Vec<&str>, config: &Config) ->
HashMap<String, (Vec<String>, usize)> {
let mut file_map : HashMap<String, (Vec<String>, usize)> =
HashMap::new();
let mut files = Vec::new();
if input_files.is_empty() {
files.push("-");
} else {
if config.gnu_ext {
files.push_all(input_files.as_slice());
} else {
files.push(input_files[0]);
}
}
let mut lines_so_far: usize = 0;
for filename in files {
let reader: BufReader<Box<Read>> = BufReader::new(
if filename == "-" {
Box::new(stdin())
} else {
let file = crash_if_err!(1, File::open(filename));
Box::new(file)
});
let lines: Vec<String> = reader.lines().map(|x| crash_if_err!(1, x))
.collect();
let size = lines.len();
file_map.insert(filename.to_string(), (lines, lines_so_far));
lines_so_far += size
}
file_map
}
fn create_word_set(config: &Config, filter: &WordFilter,
file_map: &HashMap<String, (Vec<String>, usize)>)->
BTreeSet<WordRef> {
let reg = Regex::new(filter.word_regex.as_str()).unwrap();
let ref_reg = Regex::new(config.context_regex.as_str()).unwrap();
let mut word_set: BTreeSet<WordRef> = BTreeSet::new();
for (file, lines) in file_map.iter() {
let mut count: usize = 0;
let offs = lines.1;
for line in (lines.0).iter() {
// if -r, exclude reference from word set
let (ref_beg, ref_end) = match ref_reg.find(line) {
Some(x) => x,
None => (0,0)
};
// match words with given regex
for (beg, end) in reg.find_iter(line) {
if config.input_ref && ((beg, end) == (ref_beg, ref_end)) {
continue;
}
let mut word = line.slice_chars(beg, end).to_string();
if filter.only_specified &&
!(filter.only_set.contains(&word)) {
continue;
}
if filter.ignore_specified &&
filter.ignore_set.contains(&word) {
continue;
}
if config.ignore_case {
word = word.to_lowercase();
}
word_set.insert(WordRef{
word: word,
filename: String::from(file.as_str()),
global_line_nr: offs + count,
local_line_nr: count,
position: beg,
position_end: end
});
}
count += 1;
}
}
word_set
}
fn get_reference(config: &Config, word_ref: &WordRef, line: &String) ->
String {
if config.auto_ref {
format!("{}:{}", word_ref.filename, word_ref.local_line_nr + 1)
} else if config.input_ref {
let reg = Regex::new(config.context_regex.as_str()).unwrap();
let (beg, end) = match reg.find(line) {
Some(x) => x,
None => (0,0)
};
format!("{}", line.slice_chars(beg, end))
} else {
String::new()
}
}
fn assert_str_integrity(s: &Vec<char>, beg: usize, end: usize) {
assert!(beg <= end);
assert!(end <= s.len());
}
fn trim_broken_word_left(s: &Vec<char>, beg: usize, end: usize) -> usize {
assert_str_integrity(s, beg, end);
if beg == end || beg == 0 || s[beg].is_whitespace() ||
s[beg-1].is_whitespace() {
return beg;
}
let mut b = beg;
while b < end && !s[b].is_whitespace() {
b += 1;
}
b
}
fn trim_broken_word_right(s: &Vec<char>, beg: usize, end: usize) -> usize {
assert_str_integrity(s, beg, end);
if beg == end || end == s.len() || s[end-1].is_whitespace() ||
s[end].is_whitespace() {
return end;
}
let mut e = end;
while beg < e && !s[e-1].is_whitespace() {
e -= 1;
}
e
}
fn trim_idx(s: &Vec<char>, beg: usize, end: usize) -> (usize, usize) {
assert_str_integrity(s, beg, end);
let mut b = beg;
let mut e = end;
while b < e && s[b].is_whitespace() {
b += 1;
}
while b < e && s[e-1].is_whitespace() {
e -= 1;
}
(b,e)
}
fn get_output_chunks(all_before: &String, keyword: &String, all_after: &String,
config: &Config) -> (String, String, String, String) {
assert!(all_before.trim() == all_before.as_str());
assert!(keyword.trim() == keyword.as_str());
assert!(all_after.trim() == all_after.as_str());
let mut head = String::new();
let mut before = String::new();
let mut after = String::new();
let mut tail = String::new();
let half_line_size = cmp::max((config.line_width/2) as isize -
(2*config.trunc_str.len()) as isize, 0) as usize;
let max_after_size = cmp::max(half_line_size as isize -
keyword.len() as isize - 1, 0) as usize;
let max_before_size = half_line_size;
let all_before_vec: Vec<char> = all_before.chars().collect();
let all_after_vec: Vec<char> = all_after.chars().collect();
// get before
let mut bb_tmp =
cmp::max(all_before.len() as isize - max_before_size as isize, 0) as usize;
bb_tmp = trim_broken_word_left(&all_before_vec, bb_tmp, all_before.len());
let (before_beg, before_end) =
trim_idx(&all_before_vec, bb_tmp, all_before.len());
before.push_str(all_before.slice_chars(before_beg, before_end));
assert!(max_before_size >= before.len());
// get after
let mut ae_tmp = cmp::min(max_after_size, all_after.len());
ae_tmp = trim_broken_word_right(&all_after_vec, 0, ae_tmp);
let (after_beg, after_end) = trim_idx(&all_after_vec, 0, ae_tmp);
after.push_str(all_after.slice_chars(after_beg, after_end));
assert!(max_after_size >= after.len());
// get tail
let max_tail_size = max_before_size - before.len();
let (tb, _) = trim_idx(&all_after_vec, after_end, all_after.len());
let mut te_tmp = cmp::min(tb + max_tail_size, all_after.len());
te_tmp = trim_broken_word_right(&all_after_vec, tb, te_tmp);
let (tail_beg, tail_end) = trim_idx(&all_after_vec, tb, te_tmp);
tail.push_str(all_after.slice_chars(tail_beg, tail_end));
// get head
let max_head_size = max_after_size - after.len();
let (_, he) = trim_idx(&all_before_vec, 0, before_beg);
let mut hb_tmp =
cmp::max(he as isize - max_head_size as isize, 0) as usize;
hb_tmp = trim_broken_word_left(&all_before_vec, hb_tmp, he);
let (head_beg, head_end) = trim_idx(&all_before_vec, hb_tmp, he);
head.push_str(all_before.slice_chars(head_beg, head_end));
// put right context truncation string if needed
if after_end != all_after.len() && tail_beg == tail_end {
after.push_str(config.trunc_str.as_str());
} else if after_end != all_after.len() && tail_end != all_after.len() {
tail.push_str(config.trunc_str.as_str());
}
// put left context truncation string if needed
if before_beg != 0 && head_beg == head_end {
before = format!("{}{}", config.trunc_str, before);
} else if before_beg != 0 && head_beg != 0 {
head = format!("{}{}", config.trunc_str, head);
}
// add space before "after" if needed
if after.len() > 0 {
after = format!(" {}", after);
}
(tail, before, after, head)
}
fn tex_mapper(x: char) -> String {
match x {
'\\' => "\\backslash{}".to_string(),
'$' | '%' | '#' | '&' | '_' => format!("\\{}", x),
'}' | '{' => format!("$\\{}$", x),
_ => x.to_string()
}
}
fn adjust_tex_str(context: &str) -> String {
let ws_reg = Regex::new(r"[\t\n\v\f\r ]").unwrap();
let mut fix: String = ws_reg.replace_all(context, " ").trim().to_string();
let mapped_chunks: Vec<String> = fix.chars().map(tex_mapper).collect();
fix = mapped_chunks.connect("");
fix
}
fn format_tex_line(config: &Config, word_ref: &WordRef, line: &String,
reference: &String) -> String {
let mut output = String::new();
output.push_str(&format!("\\{} ", config.macro_name));
let all_before = if config.input_ref {
let before = line.slice_chars(0, word_ref.position);
adjust_tex_str(before.trim().trim_left_matches(reference))
} else {
adjust_tex_str(line.slice_chars(0, word_ref.position))
};
let keyword = adjust_tex_str(
line.slice_chars(word_ref.position, word_ref.position_end));
let all_after = adjust_tex_str(
line.slice_chars(word_ref.position_end, line.len()));
let (tail, before, after, head) =
get_output_chunks(&all_before, &keyword, &all_after, &config);
output.push_str(format!("{5}{0}{6}{5}{1}{6}{5}{2}{6}{5}{3}{6}{5}{4}{6}",
tail, before, keyword, after, head, "{", "}").as_str());
if config.auto_ref || config.input_ref {
output.push_str(
&format!("{}{}{}", "{", adjust_tex_str(&reference), "}"));
}
output
}
fn adjust_roff_str(context: &str) -> String {
let ws_reg = Regex::new(r"[\t\n\v\f\r]").unwrap();
ws_reg.replace_all(context, " ").replace("\"", "\"\"").trim().to_string()
}
fn format_roff_line(config: &Config, word_ref: &WordRef, line: &str,
reference: &str) -> String {
let mut output = String::new();
output.push_str(&format!(".{}", config.macro_name));
let all_before = if config.input_ref {
let before = line.slice_chars(0, word_ref.position);
adjust_roff_str(before.trim().trim_left_matches(reference))
} else {
adjust_roff_str(line.slice_chars(0, word_ref.position))
};
let keyword = adjust_roff_str(
line.slice_chars(word_ref.position, word_ref.position_end));
let all_after = adjust_roff_str(
line.slice_chars(word_ref.position_end, line.len()));
let (tail, before, after, head) =
get_output_chunks(&all_before, &keyword, &all_after, &config);
output.push_str(format!(" \"{}\" \"{}\" \"{}{}\" \"{}\"",
tail, before, keyword, after, head).as_str());
if config.auto_ref || config.input_ref {
output.push_str(&format!(" \"{}\"", adjust_roff_str(&reference)));
}
output
}
fn write_traditional_output(config: &Config,
file_map: &HashMap<String, (Vec<String>,usize)>,
words: &BTreeSet<WordRef>, output_filename: &str) {
let mut writer: BufWriter<Box<Write>> = BufWriter::new(
if output_filename == "-" {
Box::new(stdout())
} else {
let file = crash_if_err!(1, File::create(output_filename));
Box::new(file)
});
for word_ref in words.iter() {
let file_map_value : &(Vec<String>, usize) =
file_map.get(&(word_ref.filename))
.expect("Missing file in file map");
let (ref lines, _) = *(file_map_value);
let reference =
get_reference(config, word_ref, &lines[word_ref.local_line_nr]);
let output_line: String = match config.format {
OutFormat::Tex => format_tex_line(
config, word_ref, &lines[word_ref.local_line_nr], &reference),
OutFormat::Roff => format_roff_line(
config, word_ref, &lines[word_ref.local_line_nr], &reference),
OutFormat::Dumb => crash!(
1, "There is no dumb format with GNU extensions disabled")
};
crash_if_err!(1, writeln!(writer, "{}", output_line));
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("A", "auto-reference",
"output automatically generated references");
opts.optflag("G", "traditional", "behave more like System V 'ptx'");
opts.optopt("F", "flag-truncation",
"use STRING for flagging line truncations", "STRING");
opts.optopt("M", "macro-name", "macro name to use instead of 'xx'",
"STRING");
opts.optflag("O", "format=roff", "generate output as roff directives");
opts.optflag("R", "right-side-refs",
"put references at right, not counted in -w");
opts.optopt("S", "sentence-regexp", "for end of lines or end of sentences",
"REGEXP");
opts.optflag("T", "format=tex", "generate output as TeX directives");
opts.optopt("W", "word-regexp", "use REGEXP to match each keyword",
"REGEXP");
opts.optopt("b", "break-file", "word break characters in this FILE",
"FILE");
opts.optflag("f", "ignore-case",
"fold lower case to upper case for sorting");
opts.optopt("g", "gap-size", "gap size in columns between output fields",
"NUMBER");
opts.optopt("i", "ignore-file", "read ignore word list from FILE", "FILE");
opts.optopt("o", "only-file", "read only word list from this FILE",
"FILE");
opts.optflag("r", "references", "first field of each line is a reference");
opts.optopt("w", "width", "output width in columns, reference excluded",
"NUMBER");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = return_if_err!(1, opts.parse(&args[1..]));
if matches.opt_present("help") {
print_usage(&opts);
return 0;
}
if matches.opt_present("version") {
print_version();
return 0;
}
let config = get_config(&matches);
let word_filter = WordFilter::new(&matches, &config);
let file_map =
read_input(matches.free.iter().map(|x| x.as_str()).collect(), &config);
let word_set = create_word_set(&config, &word_filter, &file_map);
let output_file = if !config.gnu_ext && matches.free.len() == 2 {
matches.free[1].as_str()
} else {
"-"
};
write_traditional_output(&config, &file_map, &word_set, output_file);
0
}
| {
let filename = matches.opt_str(option).expect("parsing options failed!");
let reader = BufReader::new(crash_if_err!(1, File::open(filename)));
let mut words: HashSet<String> = HashSet::new();
for word in reader.lines() {
words.insert(crash_if_err!(1, word));
}
words
} | identifier_body |
ptx.rs | #![crate_name = "ptx"]
#![feature(convert, slice_chars, vec_push_all)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Dorota Kapturkiewicz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate aho_corasick;
extern crate getopts;
extern crate memchr;
extern crate regex_syntax;
extern crate regex;
use std::collections::{HashMap, HashSet, BTreeSet};
use std::default::Default;
use std::fs::File;
use getopts::{Options, Matches};
use std::io::{stdin, stdout, BufReader, BufWriter, BufRead, Read, Write};
use regex::Regex;
use std::cmp;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "ptx";
static VERSION: &'static str = "1.0.0";
#[derive(Debug)]
enum OutFormat {
Dumb,
Roff,
Tex,
}
#[derive(Debug)]
struct Config {
format : OutFormat,
gnu_ext : bool,
auto_ref : bool,
input_ref : bool,
right_ref : bool,
ignore_case : bool,
macro_name : String,
trunc_str : String,
context_regex : String,
line_width : usize,
gap_size : usize,
}
impl Default for Config {
fn default() -> Config {
Config {
format : OutFormat::Dumb,
gnu_ext : true,
auto_ref : false,
input_ref : false,
right_ref : false,
ignore_case : false,
macro_name : "xx".to_string(),
trunc_str : "/".to_string(),
context_regex : "\\w+".to_string(),
line_width : 72,
gap_size : 3
}
}
}
fn read_word_filter_file(matches: &Matches, option: &str) -> HashSet<String> {
let filename = matches.opt_str(option).expect("parsing options failed!");
let reader = BufReader::new(crash_if_err!(1, File::open(filename)));
let mut words: HashSet<String> = HashSet::new();
for word in reader.lines() {
words.insert(crash_if_err!(1, word));
}
words
}
#[derive(Debug)]
struct WordFilter {
only_specified: bool,
ignore_specified: bool,
only_set: HashSet<String>,
ignore_set: HashSet<String>,
word_regex: String,
}
impl WordFilter {
fn new(matches: &Matches, config: &Config) -> WordFilter {
let (o, oset): (bool, HashSet<String>) =
if matches.opt_present("o") {
(true, read_word_filter_file(matches, "o"))
} else {
(false, HashSet::new())
};
let (i, iset): (bool, HashSet<String>) =
if matches.opt_present("i") {
(true, read_word_filter_file(matches, "i"))
} else {
(false, HashSet::new())
};
if matches.opt_present("b") {
crash!(1, "-b not implemented yet");
}
let reg =
if matches.opt_present("W") {
matches.opt_str("W").expect("parsing options failed!")
} else if config.gnu_ext {
"\\w+".to_string()
} else {
"[^ \t\n]+".to_string()
};
WordFilter {
only_specified: o,
ignore_specified: i,
only_set: oset,
ignore_set: iset,
word_regex: reg
}
}
}
#[derive(Debug, PartialOrd, PartialEq, Eq, Ord)]
struct WordRef {
word: String,
global_line_nr: usize,
local_line_nr: usize,
position: usize,
position_end: usize,
filename: String,
}
fn print_version() {
println!("{} {}", NAME, VERSION);
}
fn print_usage(opts: &Options) {
let brief = "Usage: ptx [OPTION]... [INPUT]... (without -G) or: \
ptx -G [OPTION]... [INPUT [OUTPUT]] \n Output a permuted index, \
including context, of the words in the input files. \n\n Mandatory \
arguments to long options are mandatory for short options too.";
let explaination = "With no FILE, or when FILE is -, read standard input. \
Default is '-F /'.";
println!("{}\n{}", opts.usage(&brief), explaination);
}
fn get_config(matches: &Matches) -> Config {
let mut config: Config = Default::default();
let err_msg = "parsing options failed";
if matches.opt_present("G") {
config.gnu_ext = false;
config.format = OutFormat::Roff;
config.context_regex = "[^ \t\n]+".to_string();
} else { | crash!(1, "-S not implemented yet");
}
config.auto_ref = matches.opt_present("A");
config.input_ref = matches.opt_present("r");
config.right_ref &= matches.opt_present("R");
config.ignore_case = matches.opt_present("f");
if matches.opt_present("M") {
config.macro_name =
matches.opt_str("M").expect(err_msg).to_string();
}
if matches.opt_present("F") {
config.trunc_str =
matches.opt_str("F").expect(err_msg).to_string();
}
if matches.opt_present("w") {
let width_str = matches.opt_str("w").expect(err_msg);
config.line_width = crash_if_err!(
1, usize::from_str_radix(width_str.as_str(), 10));
}
if matches.opt_present("g") {
let gap_str = matches.opt_str("g").expect(err_msg);
config.gap_size = crash_if_err!(
1, usize::from_str_radix(gap_str.as_str(), 10));
}
if matches.opt_present("O") {
config.format = OutFormat::Roff;
}
if matches.opt_present("T") {
config.format = OutFormat::Tex;
}
config
}
fn read_input(input_files: Vec<&str>, config: &Config) ->
HashMap<String, (Vec<String>, usize)> {
let mut file_map : HashMap<String, (Vec<String>, usize)> =
HashMap::new();
let mut files = Vec::new();
if input_files.is_empty() {
files.push("-");
} else {
if config.gnu_ext {
files.push_all(input_files.as_slice());
} else {
files.push(input_files[0]);
}
}
let mut lines_so_far: usize = 0;
for filename in files {
let reader: BufReader<Box<Read>> = BufReader::new(
if filename == "-" {
Box::new(stdin())
} else {
let file = crash_if_err!(1, File::open(filename));
Box::new(file)
});
let lines: Vec<String> = reader.lines().map(|x| crash_if_err!(1, x))
.collect();
let size = lines.len();
file_map.insert(filename.to_string(), (lines, lines_so_far));
lines_so_far += size
}
file_map
}
fn create_word_set(config: &Config, filter: &WordFilter,
file_map: &HashMap<String, (Vec<String>, usize)>)->
BTreeSet<WordRef> {
let reg = Regex::new(filter.word_regex.as_str()).unwrap();
let ref_reg = Regex::new(config.context_regex.as_str()).unwrap();
let mut word_set: BTreeSet<WordRef> = BTreeSet::new();
for (file, lines) in file_map.iter() {
let mut count: usize = 0;
let offs = lines.1;
for line in (lines.0).iter() {
// if -r, exclude reference from word set
let (ref_beg, ref_end) = match ref_reg.find(line) {
Some(x) => x,
None => (0,0)
};
// match words with given regex
for (beg, end) in reg.find_iter(line) {
if config.input_ref && ((beg, end) == (ref_beg, ref_end)) {
continue;
}
let mut word = line.slice_chars(beg, end).to_string();
if filter.only_specified &&
!(filter.only_set.contains(&word)) {
continue;
}
if filter.ignore_specified &&
filter.ignore_set.contains(&word) {
continue;
}
if config.ignore_case {
word = word.to_lowercase();
}
word_set.insert(WordRef{
word: word,
filename: String::from(file.as_str()),
global_line_nr: offs + count,
local_line_nr: count,
position: beg,
position_end: end
});
}
count += 1;
}
}
word_set
}
fn get_reference(config: &Config, word_ref: &WordRef, line: &String) ->
String {
if config.auto_ref {
format!("{}:{}", word_ref.filename, word_ref.local_line_nr + 1)
} else if config.input_ref {
let reg = Regex::new(config.context_regex.as_str()).unwrap();
let (beg, end) = match reg.find(line) {
Some(x) => x,
None => (0,0)
};
format!("{}", line.slice_chars(beg, end))
} else {
String::new()
}
}
fn assert_str_integrity(s: &Vec<char>, beg: usize, end: usize) {
assert!(beg <= end);
assert!(end <= s.len());
}
fn trim_broken_word_left(s: &Vec<char>, beg: usize, end: usize) -> usize {
assert_str_integrity(s, beg, end);
if beg == end || beg == 0 || s[beg].is_whitespace() ||
s[beg-1].is_whitespace() {
return beg;
}
let mut b = beg;
while b < end && !s[b].is_whitespace() {
b += 1;
}
b
}
fn trim_broken_word_right(s: &Vec<char>, beg: usize, end: usize) -> usize {
assert_str_integrity(s, beg, end);
if beg == end || end == s.len() || s[end-1].is_whitespace() ||
s[end].is_whitespace() {
return end;
}
let mut e = end;
while beg < e && !s[e-1].is_whitespace() {
e -= 1;
}
e
}
fn trim_idx(s: &Vec<char>, beg: usize, end: usize) -> (usize, usize) {
assert_str_integrity(s, beg, end);
let mut b = beg;
let mut e = end;
while b < e && s[b].is_whitespace() {
b += 1;
}
while b < e && s[e-1].is_whitespace() {
e -= 1;
}
(b,e)
}
fn get_output_chunks(all_before: &String, keyword: &String, all_after: &String,
config: &Config) -> (String, String, String, String) {
assert!(all_before.trim() == all_before.as_str());
assert!(keyword.trim() == keyword.as_str());
assert!(all_after.trim() == all_after.as_str());
let mut head = String::new();
let mut before = String::new();
let mut after = String::new();
let mut tail = String::new();
let half_line_size = cmp::max((config.line_width/2) as isize -
(2*config.trunc_str.len()) as isize, 0) as usize;
let max_after_size = cmp::max(half_line_size as isize -
keyword.len() as isize - 1, 0) as usize;
let max_before_size = half_line_size;
let all_before_vec: Vec<char> = all_before.chars().collect();
let all_after_vec: Vec<char> = all_after.chars().collect();
// get before
let mut bb_tmp =
cmp::max(all_before.len() as isize - max_before_size as isize, 0) as usize;
bb_tmp = trim_broken_word_left(&all_before_vec, bb_tmp, all_before.len());
let (before_beg, before_end) =
trim_idx(&all_before_vec, bb_tmp, all_before.len());
before.push_str(all_before.slice_chars(before_beg, before_end));
assert!(max_before_size >= before.len());
// get after
let mut ae_tmp = cmp::min(max_after_size, all_after.len());
ae_tmp = trim_broken_word_right(&all_after_vec, 0, ae_tmp);
let (after_beg, after_end) = trim_idx(&all_after_vec, 0, ae_tmp);
after.push_str(all_after.slice_chars(after_beg, after_end));
assert!(max_after_size >= after.len());
// get tail
let max_tail_size = max_before_size - before.len();
let (tb, _) = trim_idx(&all_after_vec, after_end, all_after.len());
let mut te_tmp = cmp::min(tb + max_tail_size, all_after.len());
te_tmp = trim_broken_word_right(&all_after_vec, tb, te_tmp);
let (tail_beg, tail_end) = trim_idx(&all_after_vec, tb, te_tmp);
tail.push_str(all_after.slice_chars(tail_beg, tail_end));
// get head
let max_head_size = max_after_size - after.len();
let (_, he) = trim_idx(&all_before_vec, 0, before_beg);
let mut hb_tmp =
cmp::max(he as isize - max_head_size as isize, 0) as usize;
hb_tmp = trim_broken_word_left(&all_before_vec, hb_tmp, he);
let (head_beg, head_end) = trim_idx(&all_before_vec, hb_tmp, he);
head.push_str(all_before.slice_chars(head_beg, head_end));
// put right context truncation string if needed
if after_end != all_after.len() && tail_beg == tail_end {
after.push_str(config.trunc_str.as_str());
} else if after_end != all_after.len() && tail_end != all_after.len() {
tail.push_str(config.trunc_str.as_str());
}
// put left context truncation string if needed
if before_beg != 0 && head_beg == head_end {
before = format!("{}{}", config.trunc_str, before);
} else if before_beg != 0 && head_beg != 0 {
head = format!("{}{}", config.trunc_str, head);
}
// add space before "after" if needed
if after.len() > 0 {
after = format!(" {}", after);
}
(tail, before, after, head)
}
fn tex_mapper(x: char) -> String {
match x {
'\\' => "\\backslash{}".to_string(),
'$' | '%' | '#' | '&' | '_' => format!("\\{}", x),
'}' | '{' => format!("$\\{}$", x),
_ => x.to_string()
}
}
fn adjust_tex_str(context: &str) -> String {
let ws_reg = Regex::new(r"[\t\n\v\f\r ]").unwrap();
let mut fix: String = ws_reg.replace_all(context, " ").trim().to_string();
let mapped_chunks: Vec<String> = fix.chars().map(tex_mapper).collect();
fix = mapped_chunks.connect("");
fix
}
fn format_tex_line(config: &Config, word_ref: &WordRef, line: &String,
reference: &String) -> String {
let mut output = String::new();
output.push_str(&format!("\\{} ", config.macro_name));
let all_before = if config.input_ref {
let before = line.slice_chars(0, word_ref.position);
adjust_tex_str(before.trim().trim_left_matches(reference))
} else {
adjust_tex_str(line.slice_chars(0, word_ref.position))
};
let keyword = adjust_tex_str(
line.slice_chars(word_ref.position, word_ref.position_end));
let all_after = adjust_tex_str(
line.slice_chars(word_ref.position_end, line.len()));
let (tail, before, after, head) =
get_output_chunks(&all_before, &keyword, &all_after, &config);
output.push_str(format!("{5}{0}{6}{5}{1}{6}{5}{2}{6}{5}{3}{6}{5}{4}{6}",
tail, before, keyword, after, head, "{", "}").as_str());
if config.auto_ref || config.input_ref {
output.push_str(
&format!("{}{}{}", "{", adjust_tex_str(&reference), "}"));
}
output
}
fn adjust_roff_str(context: &str) -> String {
let ws_reg = Regex::new(r"[\t\n\v\f\r]").unwrap();
ws_reg.replace_all(context, " ").replace("\"", "\"\"").trim().to_string()
}
fn format_roff_line(config: &Config, word_ref: &WordRef, line: &str,
reference: &str) -> String {
let mut output = String::new();
output.push_str(&format!(".{}", config.macro_name));
let all_before = if config.input_ref {
let before = line.slice_chars(0, word_ref.position);
adjust_roff_str(before.trim().trim_left_matches(reference))
} else {
adjust_roff_str(line.slice_chars(0, word_ref.position))
};
let keyword = adjust_roff_str(
line.slice_chars(word_ref.position, word_ref.position_end));
let all_after = adjust_roff_str(
line.slice_chars(word_ref.position_end, line.len()));
let (tail, before, after, head) =
get_output_chunks(&all_before, &keyword, &all_after, &config);
output.push_str(format!(" \"{}\" \"{}\" \"{}{}\" \"{}\"",
tail, before, keyword, after, head).as_str());
if config.auto_ref || config.input_ref {
output.push_str(&format!(" \"{}\"", adjust_roff_str(&reference)));
}
output
}
fn write_traditional_output(config: &Config,
file_map: &HashMap<String, (Vec<String>,usize)>,
words: &BTreeSet<WordRef>, output_filename: &str) {
let mut writer: BufWriter<Box<Write>> = BufWriter::new(
if output_filename == "-" {
Box::new(stdout())
} else {
let file = crash_if_err!(1, File::create(output_filename));
Box::new(file)
});
for word_ref in words.iter() {
let file_map_value : &(Vec<String>, usize) =
file_map.get(&(word_ref.filename))
.expect("Missing file in file map");
let (ref lines, _) = *(file_map_value);
let reference =
get_reference(config, word_ref, &lines[word_ref.local_line_nr]);
let output_line: String = match config.format {
OutFormat::Tex => format_tex_line(
config, word_ref, &lines[word_ref.local_line_nr], &reference),
OutFormat::Roff => format_roff_line(
config, word_ref, &lines[word_ref.local_line_nr], &reference),
OutFormat::Dumb => crash!(
1, "There is no dumb format with GNU extensions disabled")
};
crash_if_err!(1, writeln!(writer, "{}", output_line));
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("A", "auto-reference",
"output automatically generated references");
opts.optflag("G", "traditional", "behave more like System V 'ptx'");
opts.optopt("F", "flag-truncation",
"use STRING for flagging line truncations", "STRING");
opts.optopt("M", "macro-name", "macro name to use instead of 'xx'",
"STRING");
opts.optflag("O", "format=roff", "generate output as roff directives");
opts.optflag("R", "right-side-refs",
"put references at right, not counted in -w");
opts.optopt("S", "sentence-regexp", "for end of lines or end of sentences",
"REGEXP");
opts.optflag("T", "format=tex", "generate output as TeX directives");
opts.optopt("W", "word-regexp", "use REGEXP to match each keyword",
"REGEXP");
opts.optopt("b", "break-file", "word break characters in this FILE",
"FILE");
opts.optflag("f", "ignore-case",
"fold lower case to upper case for sorting");
opts.optopt("g", "gap-size", "gap size in columns between output fields",
"NUMBER");
opts.optopt("i", "ignore-file", "read ignore word list from FILE", "FILE");
opts.optopt("o", "only-file", "read only word list from this FILE",
"FILE");
opts.optflag("r", "references", "first field of each line is a reference");
opts.optopt("w", "width", "output width in columns, reference excluded",
"NUMBER");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = return_if_err!(1, opts.parse(&args[1..]));
if matches.opt_present("help") {
print_usage(&opts);
return 0;
}
if matches.opt_present("version") {
print_version();
return 0;
}
let config = get_config(&matches);
let word_filter = WordFilter::new(&matches, &config);
let file_map =
read_input(matches.free.iter().map(|x| x.as_str()).collect(), &config);
let word_set = create_word_set(&config, &word_filter, &file_map);
let output_file = if !config.gnu_ext && matches.free.len() == 2 {
matches.free[1].as_str()
} else {
"-"
};
write_traditional_output(&config, &file_map, &word_set, output_file);
0
} | crash!(1, "GNU extensions not implemented yet");
}
if matches.opt_present("S") { | random_line_split |
data_to_json.py | #!/usr/bin/env python
"""
Extract minor planet orbital elements and discovery dates to json.
Orbital elements are extracted from the file MPCORB.DAT:
https://minorplanetcenter.net/iau/MPCORB/MPCORB.DAT
Discovery dates are extracted from the file NumberedMPs.txt:
https://minorplanetcenter.net/iau/lists/NumberedMPs.txt
Usage:
======
./data_to_json.py [-h] [-c] [N]
Parse orbital and discovery data to json.
positional arguments:
N maximum number of results
optional arguments:
-h, --help show this help message and exit
-c, --compact output as compact json format
TODO:
=====
- Get range between discovery dates
- Create an API (python server)
"""
OUTPUT_FILE = 'catalog.json'
MPCORB_FILE = 'MPCORB.DAT'
NUMMPS_FILE = 'NumberedMPs.txt'
import os, sys, json, argparse
from time import time
from datetime import datetime
from itertools import izip
from operator import itemgetter
# Change working directory to the module path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Datetime to Julian date
def dt2jd(dt):
dt = dt - datetime(2000, 1, 1)
return dt.days + (dt.seconds + dt.microseconds / 1000000) / 86400 + 2451544.5
# Packed date to Datetime
def pd2dt(pd):
y = int(str(int(pd[0], 36)) + pd[1:3])
m = int(pd[3], 36)
d = int(pd[4], 36)
return datetime(y, m, d)
# Packed to Julian date
def pd2jd(pd):
dt = pd2dt(pd)
return dt2jd(dt)
def main(argv):
# Parse argumanets
parser = argparse.ArgumentParser(description='Parse orbital and discovery data to json.')
parser.add_argument('amount', metavar='N', type=int, nargs='?', help='maximum number of results')
parser.add_argument('-c', '--compact', action='store_true', dest='compact', help='output as compact json format')
args = parser.parse_args()
print 'Extracting MPC discovery dates and orbital elements ...'
start_time = time()
# Extract the discovery dates from NumberedMPs.txt
# For a description of the format see https://minorplanetcenter.net/iau/lists/NumberedMPs000001.html
mpcs_disc = {}
for line in open(NUMMPS_FILE):
nr = int(line[1:7].strip().replace('(', ''))
# Extract the discovery date (YYYY MM DD) and convert it to Julian date
date = datetime.strptime(line[41:51], '%Y %m %d')
mpcs_disc[nr] = dt2jd(date)
"""
Extract the orbital elements from MPCORB.DAT
For a description of the format see https://minorplanetcenter.net/iau/info/MPOrbitFormat.html
The following columns are extracted:
epoch = Date for which the information is valid (packed date)
a = Semi-major axis (AU)
e = Orbital eccentricity (0..1)
i = Inclination to the ecliptic (degrees)
W = Longitude of ascending node (degrees)
w = Argument of perihelion (degrees)
M = Mean anomaly (degrees)
n = Mean daily motion (degrees per day)
"""
mpcs = []
count = 0
for line in open(MPCORB_FILE):
nr = line[167:173].strip().replace('(', '')
if not nr: |
nr = int(nr)
# Skip if discovery date is missing
if nr not in mpcs_disc:
print 'Skipping MPC #%d (no discovery date found)' % (nr)
continue
# Extract the orbital elements
_, _, _, epoch, M, w, W, i, e, n, a, _ = line.split(None, 11)
mpc = (mpcs_disc[nr], pd2jd(epoch), float(a), float(e), float(i), float(W), float(w), float(M), float(n))
mpcs.append(mpc)
# Maximum requested reached?
count += 1
if count == args.amount: break
# Sort by discovery date
mpcs.sort(key=itemgetter(0))
if args.compact:
output = mpcs
else:
keys = ['disc', 'epoch', 'a', 'e', 'i', 'W', 'w', 'M', 'n']
output = [dict(izip(keys, mpc)) for mpc in mpcs]
with open(OUTPUT_FILE, 'w') as outfile:
json.dump(output, outfile)
# json.dump(output, outfile, indent=2, separators=(',', ':'))
print 'Finished extracting %d MPCs in %s seconds.' % (len(mpcs), time()-start_time)
if __name__ == '__main__':
main(sys.argv[1:])
| continue | conditional_block |
data_to_json.py | #!/usr/bin/env python
"""
Extract minor planet orbital elements and discovery dates to json.
Orbital elements are extracted from the file MPCORB.DAT:
https://minorplanetcenter.net/iau/MPCORB/MPCORB.DAT
Discovery dates are extracted from the file NumberedMPs.txt:
https://minorplanetcenter.net/iau/lists/NumberedMPs.txt
Usage:
======
./data_to_json.py [-h] [-c] [N]
Parse orbital and discovery data to json.
positional arguments:
N maximum number of results
optional arguments:
-h, --help show this help message and exit
-c, --compact output as compact json format
TODO:
=====
- Get range between discovery dates
- Create an API (python server)
"""
OUTPUT_FILE = 'catalog.json'
MPCORB_FILE = 'MPCORB.DAT'
NUMMPS_FILE = 'NumberedMPs.txt'
import os, sys, json, argparse
from time import time
from datetime import datetime
from itertools import izip
from operator import itemgetter
# Change working directory to the module path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Datetime to Julian date
def dt2jd(dt):
dt = dt - datetime(2000, 1, 1)
return dt.days + (dt.seconds + dt.microseconds / 1000000) / 86400 + 2451544.5
# Packed date to Datetime
def pd2dt(pd):
y = int(str(int(pd[0], 36)) + pd[1:3])
m = int(pd[3], 36)
d = int(pd[4], 36)
return datetime(y, m, d)
# Packed to Julian date
def pd2jd(pd):
dt = pd2dt(pd)
return dt2jd(dt)
def main(argv):
# Parse argumanets
|
if __name__ == '__main__':
main(sys.argv[1:])
| parser = argparse.ArgumentParser(description='Parse orbital and discovery data to json.')
parser.add_argument('amount', metavar='N', type=int, nargs='?', help='maximum number of results')
parser.add_argument('-c', '--compact', action='store_true', dest='compact', help='output as compact json format')
args = parser.parse_args()
print 'Extracting MPC discovery dates and orbital elements ...'
start_time = time()
# Extract the discovery dates from NumberedMPs.txt
# For a description of the format see https://minorplanetcenter.net/iau/lists/NumberedMPs000001.html
mpcs_disc = {}
for line in open(NUMMPS_FILE):
nr = int(line[1:7].strip().replace('(', ''))
# Extract the discovery date (YYYY MM DD) and convert it to Julian date
date = datetime.strptime(line[41:51], '%Y %m %d')
mpcs_disc[nr] = dt2jd(date)
"""
Extract the orbital elements from MPCORB.DAT
For a description of the format see https://minorplanetcenter.net/iau/info/MPOrbitFormat.html
The following columns are extracted:
epoch = Date for which the information is valid (packed date)
a = Semi-major axis (AU)
e = Orbital eccentricity (0..1)
i = Inclination to the ecliptic (degrees)
W = Longitude of ascending node (degrees)
w = Argument of perihelion (degrees)
M = Mean anomaly (degrees)
n = Mean daily motion (degrees per day)
"""
mpcs = []
count = 0
for line in open(MPCORB_FILE):
nr = line[167:173].strip().replace('(', '')
if not nr: continue
nr = int(nr)
# Skip if discovery date is missing
if nr not in mpcs_disc:
print 'Skipping MPC #%d (no discovery date found)' % (nr)
continue
# Extract the orbital elements
_, _, _, epoch, M, w, W, i, e, n, a, _ = line.split(None, 11)
mpc = (mpcs_disc[nr], pd2jd(epoch), float(a), float(e), float(i), float(W), float(w), float(M), float(n))
mpcs.append(mpc)
# Maximum requested reached?
count += 1
if count == args.amount: break
# Sort by discovery date
mpcs.sort(key=itemgetter(0))
if args.compact:
output = mpcs
else:
keys = ['disc', 'epoch', 'a', 'e', 'i', 'W', 'w', 'M', 'n']
output = [dict(izip(keys, mpc)) for mpc in mpcs]
with open(OUTPUT_FILE, 'w') as outfile:
json.dump(output, outfile)
# json.dump(output, outfile, indent=2, separators=(',', ':'))
print 'Finished extracting %d MPCs in %s seconds.' % (len(mpcs), time()-start_time) | identifier_body |
data_to_json.py | #!/usr/bin/env python
"""
Extract minor planet orbital elements and discovery dates to json.
Orbital elements are extracted from the file MPCORB.DAT:
https://minorplanetcenter.net/iau/MPCORB/MPCORB.DAT
Discovery dates are extracted from the file NumberedMPs.txt:
https://minorplanetcenter.net/iau/lists/NumberedMPs.txt
Usage:
======
./data_to_json.py [-h] [-c] [N]
Parse orbital and discovery data to json.
positional arguments:
N maximum number of results
optional arguments:
-h, --help show this help message and exit
-c, --compact output as compact json format
TODO:
=====
- Get range between discovery dates
- Create an API (python server)
"""
OUTPUT_FILE = 'catalog.json'
MPCORB_FILE = 'MPCORB.DAT'
NUMMPS_FILE = 'NumberedMPs.txt'
import os, sys, json, argparse
from time import time
from datetime import datetime
from itertools import izip
from operator import itemgetter
# Change working directory to the module path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Datetime to Julian date
def dt2jd(dt):
dt = dt - datetime(2000, 1, 1) | y = int(str(int(pd[0], 36)) + pd[1:3])
m = int(pd[3], 36)
d = int(pd[4], 36)
return datetime(y, m, d)
# Packed to Julian date
def pd2jd(pd):
dt = pd2dt(pd)
return dt2jd(dt)
def main(argv):
# Parse argumanets
parser = argparse.ArgumentParser(description='Parse orbital and discovery data to json.')
parser.add_argument('amount', metavar='N', type=int, nargs='?', help='maximum number of results')
parser.add_argument('-c', '--compact', action='store_true', dest='compact', help='output as compact json format')
args = parser.parse_args()
print 'Extracting MPC discovery dates and orbital elements ...'
start_time = time()
# Extract the discovery dates from NumberedMPs.txt
# For a description of the format see https://minorplanetcenter.net/iau/lists/NumberedMPs000001.html
mpcs_disc = {}
for line in open(NUMMPS_FILE):
nr = int(line[1:7].strip().replace('(', ''))
# Extract the discovery date (YYYY MM DD) and convert it to Julian date
date = datetime.strptime(line[41:51], '%Y %m %d')
mpcs_disc[nr] = dt2jd(date)
"""
Extract the orbital elements from MPCORB.DAT
For a description of the format see https://minorplanetcenter.net/iau/info/MPOrbitFormat.html
The following columns are extracted:
epoch = Date for which the information is valid (packed date)
a = Semi-major axis (AU)
e = Orbital eccentricity (0..1)
i = Inclination to the ecliptic (degrees)
W = Longitude of ascending node (degrees)
w = Argument of perihelion (degrees)
M = Mean anomaly (degrees)
n = Mean daily motion (degrees per day)
"""
mpcs = []
count = 0
for line in open(MPCORB_FILE):
nr = line[167:173].strip().replace('(', '')
if not nr: continue
nr = int(nr)
# Skip if discovery date is missing
if nr not in mpcs_disc:
print 'Skipping MPC #%d (no discovery date found)' % (nr)
continue
# Extract the orbital elements
_, _, _, epoch, M, w, W, i, e, n, a, _ = line.split(None, 11)
mpc = (mpcs_disc[nr], pd2jd(epoch), float(a), float(e), float(i), float(W), float(w), float(M), float(n))
mpcs.append(mpc)
# Maximum requested reached?
count += 1
if count == args.amount: break
# Sort by discovery date
mpcs.sort(key=itemgetter(0))
if args.compact:
output = mpcs
else:
keys = ['disc', 'epoch', 'a', 'e', 'i', 'W', 'w', 'M', 'n']
output = [dict(izip(keys, mpc)) for mpc in mpcs]
with open(OUTPUT_FILE, 'w') as outfile:
json.dump(output, outfile)
# json.dump(output, outfile, indent=2, separators=(',', ':'))
print 'Finished extracting %d MPCs in %s seconds.' % (len(mpcs), time()-start_time)
if __name__ == '__main__':
main(sys.argv[1:]) | return dt.days + (dt.seconds + dt.microseconds / 1000000) / 86400 + 2451544.5
# Packed date to Datetime
def pd2dt(pd): | random_line_split |
data_to_json.py | #!/usr/bin/env python
"""
Extract minor planet orbital elements and discovery dates to json.
Orbital elements are extracted from the file MPCORB.DAT:
https://minorplanetcenter.net/iau/MPCORB/MPCORB.DAT
Discovery dates are extracted from the file NumberedMPs.txt:
https://minorplanetcenter.net/iau/lists/NumberedMPs.txt
Usage:
======
./data_to_json.py [-h] [-c] [N]
Parse orbital and discovery data to json.
positional arguments:
N maximum number of results
optional arguments:
-h, --help show this help message and exit
-c, --compact output as compact json format
TODO:
=====
- Get range between discovery dates
- Create an API (python server)
"""
OUTPUT_FILE = 'catalog.json'
MPCORB_FILE = 'MPCORB.DAT'
NUMMPS_FILE = 'NumberedMPs.txt'
import os, sys, json, argparse
from time import time
from datetime import datetime
from itertools import izip
from operator import itemgetter
# Change working directory to the module path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Datetime to Julian date
def dt2jd(dt):
dt = dt - datetime(2000, 1, 1)
return dt.days + (dt.seconds + dt.microseconds / 1000000) / 86400 + 2451544.5
# Packed date to Datetime
def | (pd):
y = int(str(int(pd[0], 36)) + pd[1:3])
m = int(pd[3], 36)
d = int(pd[4], 36)
return datetime(y, m, d)
# Packed to Julian date
def pd2jd(pd):
dt = pd2dt(pd)
return dt2jd(dt)
def main(argv):
# Parse argumanets
parser = argparse.ArgumentParser(description='Parse orbital and discovery data to json.')
parser.add_argument('amount', metavar='N', type=int, nargs='?', help='maximum number of results')
parser.add_argument('-c', '--compact', action='store_true', dest='compact', help='output as compact json format')
args = parser.parse_args()
print 'Extracting MPC discovery dates and orbital elements ...'
start_time = time()
# Extract the discovery dates from NumberedMPs.txt
# For a description of the format see https://minorplanetcenter.net/iau/lists/NumberedMPs000001.html
mpcs_disc = {}
for line in open(NUMMPS_FILE):
nr = int(line[1:7].strip().replace('(', ''))
# Extract the discovery date (YYYY MM DD) and convert it to Julian date
date = datetime.strptime(line[41:51], '%Y %m %d')
mpcs_disc[nr] = dt2jd(date)
"""
Extract the orbital elements from MPCORB.DAT
For a description of the format see https://minorplanetcenter.net/iau/info/MPOrbitFormat.html
The following columns are extracted:
epoch = Date for which the information is valid (packed date)
a = Semi-major axis (AU)
e = Orbital eccentricity (0..1)
i = Inclination to the ecliptic (degrees)
W = Longitude of ascending node (degrees)
w = Argument of perihelion (degrees)
M = Mean anomaly (degrees)
n = Mean daily motion (degrees per day)
"""
mpcs = []
count = 0
for line in open(MPCORB_FILE):
nr = line[167:173].strip().replace('(', '')
if not nr: continue
nr = int(nr)
# Skip if discovery date is missing
if nr not in mpcs_disc:
print 'Skipping MPC #%d (no discovery date found)' % (nr)
continue
# Extract the orbital elements
_, _, _, epoch, M, w, W, i, e, n, a, _ = line.split(None, 11)
mpc = (mpcs_disc[nr], pd2jd(epoch), float(a), float(e), float(i), float(W), float(w), float(M), float(n))
mpcs.append(mpc)
# Maximum requested reached?
count += 1
if count == args.amount: break
# Sort by discovery date
mpcs.sort(key=itemgetter(0))
if args.compact:
output = mpcs
else:
keys = ['disc', 'epoch', 'a', 'e', 'i', 'W', 'w', 'M', 'n']
output = [dict(izip(keys, mpc)) for mpc in mpcs]
with open(OUTPUT_FILE, 'w') as outfile:
json.dump(output, outfile)
# json.dump(output, outfile, indent=2, separators=(',', ':'))
print 'Finished extracting %d MPCs in %s seconds.' % (len(mpcs), time()-start_time)
if __name__ == '__main__':
main(sys.argv[1:])
| pd2dt | identifier_name |
d3d8trace.py | ##########################################################################
#
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from specs.d3d8 import d3d8
from trace import DllTracer
class | (DllTracer):
def dump_arg_instance(self, function, arg):
# Dump shaders as strings
if function.name in ('CreateVertexShader', 'CreatePixelShader') and arg.name == 'pFunction':
print ' DumpShader(trace::localWriter, %s);' % (arg.name)
return
DllTracer.dump_arg_instance(self, function, arg)
if __name__ == '__main__':
print '#include <windows.h>'
print '#include <d3d8.h>'
print '#include "d3dshader.hpp"'
print
print '#include "trace_writer.hpp"'
print '#include "os.hpp"'
print
tracer = D3D8Tracer('d3d8.dll')
tracer.trace_api(d3d8)
| D3D8Tracer | identifier_name |
d3d8trace.py | ##########################################################################
#
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from specs.d3d8 import d3d8
from trace import DllTracer
class D3D8Tracer(DllTracer):
def dump_arg_instance(self, function, arg):
# Dump shaders as strings
if function.name in ('CreateVertexShader', 'CreatePixelShader') and arg.name == 'pFunction':
|
DllTracer.dump_arg_instance(self, function, arg)
if __name__ == '__main__':
print '#include <windows.h>'
print '#include <d3d8.h>'
print '#include "d3dshader.hpp"'
print
print '#include "trace_writer.hpp"'
print '#include "os.hpp"'
print
tracer = D3D8Tracer('d3d8.dll')
tracer.trace_api(d3d8)
| print ' DumpShader(trace::localWriter, %s);' % (arg.name)
return | conditional_block |
d3d8trace.py | ##########################################################################
#
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from specs.d3d8 import d3d8
from trace import DllTracer
class D3D8Tracer(DllTracer):
def dump_arg_instance(self, function, arg):
# Dump shaders as strings
|
if __name__ == '__main__':
print '#include <windows.h>'
print '#include <d3d8.h>'
print '#include "d3dshader.hpp"'
print
print '#include "trace_writer.hpp"'
print '#include "os.hpp"'
print
tracer = D3D8Tracer('d3d8.dll')
tracer.trace_api(d3d8)
| if function.name in ('CreateVertexShader', 'CreatePixelShader') and arg.name == 'pFunction':
print ' DumpShader(trace::localWriter, %s);' % (arg.name)
return
DllTracer.dump_arg_instance(self, function, arg) | identifier_body |
d3d8trace.py | ##########################################################################
#
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is | #
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from specs.d3d8 import d3d8
from trace import DllTracer
class D3D8Tracer(DllTracer):
def dump_arg_instance(self, function, arg):
# Dump shaders as strings
if function.name in ('CreateVertexShader', 'CreatePixelShader') and arg.name == 'pFunction':
print ' DumpShader(trace::localWriter, %s);' % (arg.name)
return
DllTracer.dump_arg_instance(self, function, arg)
if __name__ == '__main__':
print '#include <windows.h>'
print '#include <d3d8.h>'
print '#include "d3dshader.hpp"'
print
print '#include "trace_writer.hpp"'
print '#include "os.hpp"'
print
tracer = D3D8Tracer('d3d8.dll')
tracer.trace_api(d3d8) | # furnished to do so, subject to the following conditions: | random_line_split |
parser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The context within which CSS code is parsed.
#![deny(missing_docs)]
use cssparser::{Parser, SourcePosition, UnicodeRange};
use error_reporting::ParseErrorReporter;
#[cfg(feature = "gecko")]
use gecko_bindings::sugar::refptr::{GeckoArcPrincipal, GeckoArcURI};
use servo_url::ServoUrl;
use style_traits::OneOrMoreCommaSeparated;
use stylesheets::{MemoryHoleReporter, Origin};
/// Extra data that the style backend may need to parse stylesheets.
#[cfg(not(feature = "gecko"))]
pub struct ParserContextExtraData;
/// Extra data that the style backend may need to parse stylesheets.
#[cfg(feature = "gecko")]
pub struct ParserContextExtraData {
/// The base URI.
pub base: Option<GeckoArcURI>,
/// The referrer URI.
pub referrer: Option<GeckoArcURI>,
/// The principal that loaded this stylesheet.
pub principal: Option<GeckoArcPrincipal>,
}
#[cfg(not(feature = "gecko"))]
impl Default for ParserContextExtraData {
fn default() -> Self {
ParserContextExtraData
}
}
#[cfg(feature = "gecko")]
impl Default for ParserContextExtraData {
fn default() -> Self {
ParserContextExtraData { base: None, referrer: None, principal: None }
}
}
#[cfg(feature = "gecko")]
impl ParserContextExtraData {
/// Construct from a GeckoParserExtraData
///
/// GeckoParserExtraData must live longer than this call
pub unsafe fn new(data: *const ::gecko_bindings::structs::GeckoParserExtraData) -> Self {
// the to_safe calls are safe since we trust that we have references to
// real Gecko refptrs. The dereferencing of data is safe because this function
// is expected to be called with a `data` living longer than this function.
unsafe { ParserContextExtraData {
base: Some((*data).mBaseURI.to_safe()),
referrer: Some((*data).mReferrer.to_safe()),
principal: Some((*data).mPrincipal.to_safe()),
}}
}
}
/// The data that the parser needs from outside in order to parse a stylesheet.
pub struct ParserContext<'a> {
/// The `Origin` of the stylesheet, whether it's a user, author or
/// user-agent stylesheet.
pub stylesheet_origin: Origin,
/// The base url we're parsing this stylesheet as.
pub base_url: &'a ServoUrl,
/// An error reporter to report syntax errors.
pub error_reporter: Box<ParseErrorReporter + Send>,
/// Implementation-dependent extra data.
pub extra_data: ParserContextExtraData,
}
impl<'a> ParserContext<'a> {
/// Create a `ParserContext` with extra data.
pub fn new_with_extra_data(stylesheet_origin: Origin,
base_url: &'a ServoUrl,
error_reporter: Box<ParseErrorReporter + Send>,
extra_data: ParserContextExtraData)
-> ParserContext<'a> {
ParserContext {
stylesheet_origin: stylesheet_origin,
base_url: base_url,
error_reporter: error_reporter,
extra_data: extra_data,
}
}
/// Create a parser context with the default extra data.
pub fn new(stylesheet_origin: Origin,
base_url: &'a ServoUrl,
error_reporter: Box<ParseErrorReporter + Send>)
-> ParserContext<'a> {
let extra_data = ParserContextExtraData::default();
Self::new_with_extra_data(stylesheet_origin, base_url, error_reporter, extra_data)
}
/// Create a parser context for on-the-fly parsing in CSSOM
pub fn new_for_cssom(base_url: &'a ServoUrl) -> ParserContext<'a> {
Self::new(Origin::User, base_url, Box::new(MemoryHoleReporter))
}
}
/// Defaults to a no-op.
/// Set a `RUST_LOG=style::errors` environment variable
/// to log CSS parse errors to stderr.
pub fn log_css_error(input: &mut Parser, position: SourcePosition, message: &str, parsercontext: &ParserContext) {
let servo_url = parsercontext.base_url;
parsercontext.error_reporter.report_error(input, position, message, servo_url);
}
// XXXManishearth Replace all specified value parse impls with impls of this
// trait. This will make it easy to write more generic values in the future.
/// A trait to abstract parsing of a specified value given a `ParserContext` and
/// CSS input.
pub trait Parse : Sized {
/// Parse a value of this type.
///
/// Returns an error on failure.
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()>;
}
impl<T> Parse for Vec<T> where T: Parse + OneOrMoreCommaSeparated {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> |
}
/// Parse a non-empty space-separated or comma-separated list of objects parsed by parse_one
pub fn parse_space_or_comma_separated<F, T>(input: &mut Parser, mut parse_one: F)
-> Result<Vec<T>, ()>
where F: FnMut(&mut Parser) -> Result<T, ()> {
let first = parse_one(input)?;
let mut vec = vec![first];
loop {
let _ = input.try(|i| i.expect_comma());
if let Ok(val) = input.try(|i| parse_one(i)) {
vec.push(val)
} else {
break
}
}
Ok(vec)
}
impl Parse for UnicodeRange {
fn parse(_context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
UnicodeRange::parse(input)
}
}
| {
input.parse_comma_separated(|input| T::parse(context, input))
} | identifier_body |
parser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The context within which CSS code is parsed.
#![deny(missing_docs)]
use cssparser::{Parser, SourcePosition, UnicodeRange};
use error_reporting::ParseErrorReporter;
#[cfg(feature = "gecko")]
use gecko_bindings::sugar::refptr::{GeckoArcPrincipal, GeckoArcURI};
use servo_url::ServoUrl;
use style_traits::OneOrMoreCommaSeparated;
use stylesheets::{MemoryHoleReporter, Origin};
/// Extra data that the style backend may need to parse stylesheets.
#[cfg(not(feature = "gecko"))]
pub struct ParserContextExtraData;
/// Extra data that the style backend may need to parse stylesheets.
#[cfg(feature = "gecko")]
pub struct ParserContextExtraData {
/// The base URI.
pub base: Option<GeckoArcURI>,
/// The referrer URI.
pub referrer: Option<GeckoArcURI>,
/// The principal that loaded this stylesheet.
pub principal: Option<GeckoArcPrincipal>,
}
#[cfg(not(feature = "gecko"))]
impl Default for ParserContextExtraData {
fn default() -> Self {
ParserContextExtraData
}
}
#[cfg(feature = "gecko")]
impl Default for ParserContextExtraData {
fn default() -> Self {
ParserContextExtraData { base: None, referrer: None, principal: None }
}
}
#[cfg(feature = "gecko")]
impl ParserContextExtraData {
/// Construct from a GeckoParserExtraData
///
/// GeckoParserExtraData must live longer than this call
pub unsafe fn new(data: *const ::gecko_bindings::structs::GeckoParserExtraData) -> Self {
// the to_safe calls are safe since we trust that we have references to
// real Gecko refptrs. The dereferencing of data is safe because this function
// is expected to be called with a `data` living longer than this function.
unsafe { ParserContextExtraData {
base: Some((*data).mBaseURI.to_safe()),
referrer: Some((*data).mReferrer.to_safe()),
principal: Some((*data).mPrincipal.to_safe()),
}}
}
}
/// The data that the parser needs from outside in order to parse a stylesheet.
pub struct ParserContext<'a> {
/// The `Origin` of the stylesheet, whether it's a user, author or
/// user-agent stylesheet.
pub stylesheet_origin: Origin,
/// The base url we're parsing this stylesheet as.
pub base_url: &'a ServoUrl,
/// An error reporter to report syntax errors.
pub error_reporter: Box<ParseErrorReporter + Send>,
/// Implementation-dependent extra data.
pub extra_data: ParserContextExtraData,
}
impl<'a> ParserContext<'a> {
/// Create a `ParserContext` with extra data.
pub fn new_with_extra_data(stylesheet_origin: Origin,
base_url: &'a ServoUrl,
error_reporter: Box<ParseErrorReporter + Send>,
extra_data: ParserContextExtraData)
-> ParserContext<'a> {
ParserContext {
stylesheet_origin: stylesheet_origin,
base_url: base_url,
error_reporter: error_reporter,
extra_data: extra_data,
}
}
/// Create a parser context with the default extra data.
pub fn new(stylesheet_origin: Origin,
base_url: &'a ServoUrl,
error_reporter: Box<ParseErrorReporter + Send>)
-> ParserContext<'a> {
let extra_data = ParserContextExtraData::default();
Self::new_with_extra_data(stylesheet_origin, base_url, error_reporter, extra_data)
}
/// Create a parser context for on-the-fly parsing in CSSOM
pub fn new_for_cssom(base_url: &'a ServoUrl) -> ParserContext<'a> {
Self::new(Origin::User, base_url, Box::new(MemoryHoleReporter))
}
}
/// Defaults to a no-op.
/// Set a `RUST_LOG=style::errors` environment variable
/// to log CSS parse errors to stderr.
pub fn log_css_error(input: &mut Parser, position: SourcePosition, message: &str, parsercontext: &ParserContext) {
let servo_url = parsercontext.base_url;
parsercontext.error_reporter.report_error(input, position, message, servo_url);
}
// XXXManishearth Replace all specified value parse impls with impls of this
// trait. This will make it easy to write more generic values in the future.
/// A trait to abstract parsing of a specified value given a `ParserContext` and
/// CSS input.
pub trait Parse : Sized {
/// Parse a value of this type.
///
/// Returns an error on failure.
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()>;
}
impl<T> Parse for Vec<T> where T: Parse + OneOrMoreCommaSeparated {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
input.parse_comma_separated(|input| T::parse(context, input))
}
}
/// Parse a non-empty space-separated or comma-separated list of objects parsed by parse_one
pub fn parse_space_or_comma_separated<F, T>(input: &mut Parser, mut parse_one: F)
-> Result<Vec<T>, ()>
where F: FnMut(&mut Parser) -> Result<T, ()> {
let first = parse_one(input)?;
let mut vec = vec![first];
loop {
let _ = input.try(|i| i.expect_comma());
if let Ok(val) = input.try(|i| parse_one(i)) | else {
break
}
}
Ok(vec)
}
impl Parse for UnicodeRange {
fn parse(_context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
UnicodeRange::parse(input)
}
}
| {
vec.push(val)
} | conditional_block |
parser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The context within which CSS code is parsed.
#![deny(missing_docs)]
use cssparser::{Parser, SourcePosition, UnicodeRange};
use error_reporting::ParseErrorReporter;
#[cfg(feature = "gecko")]
use gecko_bindings::sugar::refptr::{GeckoArcPrincipal, GeckoArcURI};
use servo_url::ServoUrl;
use style_traits::OneOrMoreCommaSeparated;
use stylesheets::{MemoryHoleReporter, Origin};
/// Extra data that the style backend may need to parse stylesheets.
#[cfg(not(feature = "gecko"))]
pub struct ParserContextExtraData;
/// Extra data that the style backend may need to parse stylesheets.
#[cfg(feature = "gecko")]
pub struct ParserContextExtraData {
/// The base URI.
pub base: Option<GeckoArcURI>,
/// The referrer URI.
pub referrer: Option<GeckoArcURI>,
/// The principal that loaded this stylesheet.
pub principal: Option<GeckoArcPrincipal>,
}
#[cfg(not(feature = "gecko"))]
impl Default for ParserContextExtraData {
fn default() -> Self {
ParserContextExtraData
}
}
#[cfg(feature = "gecko")]
impl Default for ParserContextExtraData {
fn default() -> Self {
ParserContextExtraData { base: None, referrer: None, principal: None }
} | impl ParserContextExtraData {
/// Construct from a GeckoParserExtraData
///
/// GeckoParserExtraData must live longer than this call
pub unsafe fn new(data: *const ::gecko_bindings::structs::GeckoParserExtraData) -> Self {
// the to_safe calls are safe since we trust that we have references to
// real Gecko refptrs. The dereferencing of data is safe because this function
// is expected to be called with a `data` living longer than this function.
unsafe { ParserContextExtraData {
base: Some((*data).mBaseURI.to_safe()),
referrer: Some((*data).mReferrer.to_safe()),
principal: Some((*data).mPrincipal.to_safe()),
}}
}
}
/// The data that the parser needs from outside in order to parse a stylesheet.
pub struct ParserContext<'a> {
/// The `Origin` of the stylesheet, whether it's a user, author or
/// user-agent stylesheet.
pub stylesheet_origin: Origin,
/// The base url we're parsing this stylesheet as.
pub base_url: &'a ServoUrl,
/// An error reporter to report syntax errors.
pub error_reporter: Box<ParseErrorReporter + Send>,
/// Implementation-dependent extra data.
pub extra_data: ParserContextExtraData,
}
impl<'a> ParserContext<'a> {
/// Create a `ParserContext` with extra data.
pub fn new_with_extra_data(stylesheet_origin: Origin,
base_url: &'a ServoUrl,
error_reporter: Box<ParseErrorReporter + Send>,
extra_data: ParserContextExtraData)
-> ParserContext<'a> {
ParserContext {
stylesheet_origin: stylesheet_origin,
base_url: base_url,
error_reporter: error_reporter,
extra_data: extra_data,
}
}
/// Create a parser context with the default extra data.
pub fn new(stylesheet_origin: Origin,
base_url: &'a ServoUrl,
error_reporter: Box<ParseErrorReporter + Send>)
-> ParserContext<'a> {
let extra_data = ParserContextExtraData::default();
Self::new_with_extra_data(stylesheet_origin, base_url, error_reporter, extra_data)
}
/// Create a parser context for on-the-fly parsing in CSSOM
pub fn new_for_cssom(base_url: &'a ServoUrl) -> ParserContext<'a> {
Self::new(Origin::User, base_url, Box::new(MemoryHoleReporter))
}
}
/// Defaults to a no-op.
/// Set a `RUST_LOG=style::errors` environment variable
/// to log CSS parse errors to stderr.
pub fn log_css_error(input: &mut Parser, position: SourcePosition, message: &str, parsercontext: &ParserContext) {
let servo_url = parsercontext.base_url;
parsercontext.error_reporter.report_error(input, position, message, servo_url);
}
// XXXManishearth Replace all specified value parse impls with impls of this
// trait. This will make it easy to write more generic values in the future.
/// A trait to abstract parsing of a specified value given a `ParserContext` and
/// CSS input.
pub trait Parse : Sized {
/// Parse a value of this type.
///
/// Returns an error on failure.
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()>;
}
impl<T> Parse for Vec<T> where T: Parse + OneOrMoreCommaSeparated {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
input.parse_comma_separated(|input| T::parse(context, input))
}
}
/// Parse a non-empty space-separated or comma-separated list of objects parsed by parse_one
pub fn parse_space_or_comma_separated<F, T>(input: &mut Parser, mut parse_one: F)
-> Result<Vec<T>, ()>
where F: FnMut(&mut Parser) -> Result<T, ()> {
let first = parse_one(input)?;
let mut vec = vec![first];
loop {
let _ = input.try(|i| i.expect_comma());
if let Ok(val) = input.try(|i| parse_one(i)) {
vec.push(val)
} else {
break
}
}
Ok(vec)
}
impl Parse for UnicodeRange {
fn parse(_context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
UnicodeRange::parse(input)
}
} | }
#[cfg(feature = "gecko")] | random_line_split |
parser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The context within which CSS code is parsed.
#![deny(missing_docs)]
use cssparser::{Parser, SourcePosition, UnicodeRange};
use error_reporting::ParseErrorReporter;
#[cfg(feature = "gecko")]
use gecko_bindings::sugar::refptr::{GeckoArcPrincipal, GeckoArcURI};
use servo_url::ServoUrl;
use style_traits::OneOrMoreCommaSeparated;
use stylesheets::{MemoryHoleReporter, Origin};
/// Extra data that the style backend may need to parse stylesheets.
#[cfg(not(feature = "gecko"))]
pub struct ParserContextExtraData;
/// Extra data that the style backend may need to parse stylesheets.
#[cfg(feature = "gecko")]
pub struct | {
/// The base URI.
pub base: Option<GeckoArcURI>,
/// The referrer URI.
pub referrer: Option<GeckoArcURI>,
/// The principal that loaded this stylesheet.
pub principal: Option<GeckoArcPrincipal>,
}
#[cfg(not(feature = "gecko"))]
impl Default for ParserContextExtraData {
fn default() -> Self {
ParserContextExtraData
}
}
#[cfg(feature = "gecko")]
impl Default for ParserContextExtraData {
fn default() -> Self {
ParserContextExtraData { base: None, referrer: None, principal: None }
}
}
#[cfg(feature = "gecko")]
impl ParserContextExtraData {
/// Construct from a GeckoParserExtraData
///
/// GeckoParserExtraData must live longer than this call
pub unsafe fn new(data: *const ::gecko_bindings::structs::GeckoParserExtraData) -> Self {
// the to_safe calls are safe since we trust that we have references to
// real Gecko refptrs. The dereferencing of data is safe because this function
// is expected to be called with a `data` living longer than this function.
unsafe { ParserContextExtraData {
base: Some((*data).mBaseURI.to_safe()),
referrer: Some((*data).mReferrer.to_safe()),
principal: Some((*data).mPrincipal.to_safe()),
}}
}
}
/// The data that the parser needs from outside in order to parse a stylesheet.
pub struct ParserContext<'a> {
/// The `Origin` of the stylesheet, whether it's a user, author or
/// user-agent stylesheet.
pub stylesheet_origin: Origin,
/// The base url we're parsing this stylesheet as.
pub base_url: &'a ServoUrl,
/// An error reporter to report syntax errors.
pub error_reporter: Box<ParseErrorReporter + Send>,
/// Implementation-dependent extra data.
pub extra_data: ParserContextExtraData,
}
impl<'a> ParserContext<'a> {
/// Create a `ParserContext` with extra data.
pub fn new_with_extra_data(stylesheet_origin: Origin,
base_url: &'a ServoUrl,
error_reporter: Box<ParseErrorReporter + Send>,
extra_data: ParserContextExtraData)
-> ParserContext<'a> {
ParserContext {
stylesheet_origin: stylesheet_origin,
base_url: base_url,
error_reporter: error_reporter,
extra_data: extra_data,
}
}
/// Create a parser context with the default extra data.
pub fn new(stylesheet_origin: Origin,
base_url: &'a ServoUrl,
error_reporter: Box<ParseErrorReporter + Send>)
-> ParserContext<'a> {
let extra_data = ParserContextExtraData::default();
Self::new_with_extra_data(stylesheet_origin, base_url, error_reporter, extra_data)
}
/// Create a parser context for on-the-fly parsing in CSSOM
pub fn new_for_cssom(base_url: &'a ServoUrl) -> ParserContext<'a> {
Self::new(Origin::User, base_url, Box::new(MemoryHoleReporter))
}
}
/// Defaults to a no-op.
/// Set a `RUST_LOG=style::errors` environment variable
/// to log CSS parse errors to stderr.
pub fn log_css_error(input: &mut Parser, position: SourcePosition, message: &str, parsercontext: &ParserContext) {
let servo_url = parsercontext.base_url;
parsercontext.error_reporter.report_error(input, position, message, servo_url);
}
// XXXManishearth Replace all specified value parse impls with impls of this
// trait. This will make it easy to write more generic values in the future.
/// A trait to abstract parsing of a specified value given a `ParserContext` and
/// CSS input.
pub trait Parse : Sized {
/// Parse a value of this type.
///
/// Returns an error on failure.
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()>;
}
impl<T> Parse for Vec<T> where T: Parse + OneOrMoreCommaSeparated {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
input.parse_comma_separated(|input| T::parse(context, input))
}
}
/// Parse a non-empty space-separated or comma-separated list of objects parsed by parse_one
pub fn parse_space_or_comma_separated<F, T>(input: &mut Parser, mut parse_one: F)
-> Result<Vec<T>, ()>
where F: FnMut(&mut Parser) -> Result<T, ()> {
let first = parse_one(input)?;
let mut vec = vec![first];
loop {
let _ = input.try(|i| i.expect_comma());
if let Ok(val) = input.try(|i| parse_one(i)) {
vec.push(val)
} else {
break
}
}
Ok(vec)
}
impl Parse for UnicodeRange {
fn parse(_context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
UnicodeRange::parse(input)
}
}
| ParserContextExtraData | identifier_name |
helper_arabic_ligature_exceptions.py | from __future__ import print_function
from glyphNameFormatter.tools import camelCase
doNotProcessAsLigatureRanges = [
(0xfc5e, 0xfc63),
(0xfe70, 0xfe74),
#(0xfc5e, 0xfc61),
(0xfcf2, 0xfcf4),
(0xfe76, 0xfe80),
]
def process(self):
# Specifically: do not add suffixes to these ligatures,
# they're really arabic marks
for a, b in doNotProcessAsLigatureRanges:
|
return False
if __name__ == "__main__":
from glyphNameFormatter import GlyphName
print("\ndoNotProcessAsLigatureRanges", doNotProcessAsLigatureRanges)
odd = 0xfe76
for a, b in doNotProcessAsLigatureRanges:
for u in range(a,b+1):
try:
g = GlyphName(uniNumber=u)
n = g.getName()
print(hex(u), n, g.uniName)
except:
import traceback
traceback.print_exc()
| if a <= self.uniNumber <= b:
self.replace('TAIL FRAGMENT', "kashida Fina")
self.replace('INITIAL FORM', "init")
self.replace('MEDIAL FORM', "medi")
self.replace('FINAL FORM', "fina")
self.replace('ISOLATED FORM', "isol")
self.replace('WITH SUPERSCRIPT', "")
self.replace('WITH', "")
self.replace("LIGATURE", "")
self.replace("ARABIC", "")
self.replace("SYMBOL", "")
self.replace("LETTER", "")
self.lower()
self.camelCase()
return True | conditional_block |
helper_arabic_ligature_exceptions.py | from __future__ import print_function
from glyphNameFormatter.tools import camelCase
doNotProcessAsLigatureRanges = [
(0xfc5e, 0xfc63),
(0xfe70, 0xfe74),
#(0xfc5e, 0xfc61),
(0xfcf2, 0xfcf4),
(0xfe76, 0xfe80),
]
def | (self):
# Specifically: do not add suffixes to these ligatures,
# they're really arabic marks
for a, b in doNotProcessAsLigatureRanges:
if a <= self.uniNumber <= b:
self.replace('TAIL FRAGMENT', "kashida Fina")
self.replace('INITIAL FORM', "init")
self.replace('MEDIAL FORM', "medi")
self.replace('FINAL FORM', "fina")
self.replace('ISOLATED FORM', "isol")
self.replace('WITH SUPERSCRIPT', "")
self.replace('WITH', "")
self.replace("LIGATURE", "")
self.replace("ARABIC", "")
self.replace("SYMBOL", "")
self.replace("LETTER", "")
self.lower()
self.camelCase()
return True
return False
if __name__ == "__main__":
from glyphNameFormatter import GlyphName
print("\ndoNotProcessAsLigatureRanges", doNotProcessAsLigatureRanges)
odd = 0xfe76
for a, b in doNotProcessAsLigatureRanges:
for u in range(a,b+1):
try:
g = GlyphName(uniNumber=u)
n = g.getName()
print(hex(u), n, g.uniName)
except:
import traceback
traceback.print_exc()
| process | identifier_name |
helper_arabic_ligature_exceptions.py | from __future__ import print_function
from glyphNameFormatter.tools import camelCase
doNotProcessAsLigatureRanges = [
(0xfc5e, 0xfc63),
(0xfe70, 0xfe74),
#(0xfc5e, 0xfc61),
(0xfcf2, 0xfcf4),
(0xfe76, 0xfe80),
]
def process(self):
# Specifically: do not add suffixes to these ligatures,
# they're really arabic marks
for a, b in doNotProcessAsLigatureRanges:
if a <= self.uniNumber <= b:
self.replace('TAIL FRAGMENT', "kashida Fina")
self.replace('INITIAL FORM', "init")
self.replace('MEDIAL FORM', "medi")
self.replace('FINAL FORM', "fina")
self.replace('ISOLATED FORM', "isol")
self.replace('WITH SUPERSCRIPT', "")
self.replace('WITH', "")
self.replace("LIGATURE", "")
self.replace("ARABIC", "")
self.replace("SYMBOL", "")
self.replace("LETTER", "") | return False
if __name__ == "__main__":
from glyphNameFormatter import GlyphName
print("\ndoNotProcessAsLigatureRanges", doNotProcessAsLigatureRanges)
odd = 0xfe76
for a, b in doNotProcessAsLigatureRanges:
for u in range(a,b+1):
try:
g = GlyphName(uniNumber=u)
n = g.getName()
print(hex(u), n, g.uniName)
except:
import traceback
traceback.print_exc() | self.lower()
self.camelCase()
return True
| random_line_split |
helper_arabic_ligature_exceptions.py | from __future__ import print_function
from glyphNameFormatter.tools import camelCase
doNotProcessAsLigatureRanges = [
(0xfc5e, 0xfc63),
(0xfe70, 0xfe74),
#(0xfc5e, 0xfc61),
(0xfcf2, 0xfcf4),
(0xfe76, 0xfe80),
]
def process(self):
# Specifically: do not add suffixes to these ligatures,
# they're really arabic marks
|
if __name__ == "__main__":
from glyphNameFormatter import GlyphName
print("\ndoNotProcessAsLigatureRanges", doNotProcessAsLigatureRanges)
odd = 0xfe76
for a, b in doNotProcessAsLigatureRanges:
for u in range(a,b+1):
try:
g = GlyphName(uniNumber=u)
n = g.getName()
print(hex(u), n, g.uniName)
except:
import traceback
traceback.print_exc()
| for a, b in doNotProcessAsLigatureRanges:
if a <= self.uniNumber <= b:
self.replace('TAIL FRAGMENT', "kashida Fina")
self.replace('INITIAL FORM', "init")
self.replace('MEDIAL FORM', "medi")
self.replace('FINAL FORM', "fina")
self.replace('ISOLATED FORM', "isol")
self.replace('WITH SUPERSCRIPT', "")
self.replace('WITH', "")
self.replace("LIGATURE", "")
self.replace("ARABIC", "")
self.replace("SYMBOL", "")
self.replace("LETTER", "")
self.lower()
self.camelCase()
return True
return False | identifier_body |
compress.rs | //! An example of offloading work to a thread pool instead of doing work on the
//! main event loop.
//!
//! In this example the server will act as a form of echo server except that
//! it'll echo back gzip-compressed data. Each connected client will have the
//! data written streamed back as the compressed version is available, and all
//! compressing will occur on a thread pool rather than the main event loop.
//!
//! You can preview this example with in one terminal:
//!
//! cargo run --example compress
//!
//! and in another terminal;
//!
//! echo test | cargo run --example connect 127.0.0.1:8080 | gunzip
//!
//! The latter command will need to be tweaked for non-unix-like shells, but
//! you can also redirect the stdout of the `connect` program to a file
//! and then decompress that.
extern crate futures;
extern crate futures_cpupool;
extern crate flate2;
extern crate tokio;
extern crate tokio_io;
use std::io;
use std::env;
use std::net::SocketAddr;
use futures::{Future, Stream, Poll};
use futures::future::Executor;
use futures_cpupool::CpuPool;
use tokio::net::{TcpListener, TcpStream};
use tokio::reactor::Core;
use tokio_io::{AsyncRead, AsyncWrite};
use flate2::write::GzEncoder;
fn main() {
// As with many other examples, parse our CLI arguments and prepare the
// reactor.
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let mut core = Core::new().unwrap();
let handle = core.handle();
let socket = TcpListener::bind(&addr, &handle).unwrap();
println!("Listening on: {}", addr);
// This is where we're going to offload our computationally heavy work
// (compressing) to. Here we just use a convenience constructor to create a
// pool of threads equal to the number of CPUs we have.
let pool = CpuPool::new_num_cpus();
// The compress logic will happen in the function below, but everything's
// still a future! Each client is spawned to concurrently get processed.
let server = socket.incoming().for_each(move |(socket, addr)| {
pool.execute(compress(socket, &pool).then(move |result| {
match result {
Ok((r, w)) => println!("{}: compressed {} bytes to {}", addr, r, w),
Err(e) => println!("{}: failed when compressing: {}", addr, e),
}
Ok(())
})).unwrap(); |
/// The main workhorse of this example. This'll compress all data read from
/// `socket` on the `pool` provided, writing it back out to `socket` as it's
/// available.
fn compress(socket: TcpStream, pool: &CpuPool)
-> Box<Future<Item = (u64, u64), Error = io::Error> + Send>
{
use tokio_io::io;
// The general interface that `CpuPool` provides is that we'll *spawn a
// future* onto it. All execution of the future will occur on the `CpuPool`
// and we'll get back a handle representing the completed value of the
// future. In essence it's our job here to create a future that represents
// compressing `socket`, and then we'll simply spawn it at the very end.
//
// Here we exploit the fact that `TcpStream` itself is `Send` in this
// function as well. That is, we can read/write the TCP stream on any
// thread, and we'll get notifications about it being ready from the reactor
// thread.
//
// Otherwise this is the same as the echo server except that after splitting
// we apply some encoding to one side, followed by a `shutdown` when we're
// done to ensure that all gz footers are written.
let (read, write) = socket.split();
let write = Count { io: write, amt: 0 };
let write = GzEncoder::new(write, flate2::Compression::Best);
let process = io::copy(read, write).and_then(|(amt, _read, write)| {
io::shutdown(write).map(move |io| (amt, io.get_ref().amt))
});
// Spawn the future so is executes entirely on the thread pool here
Box::new(pool.spawn(process))
}
struct Count<T> {
io: T,
amt: u64,
}
impl<T: io::Write> io::Write for Count<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.io.write(buf)?;
self.amt += n as u64;
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.io.flush()
}
}
impl<T: AsyncWrite> AsyncWrite for Count<T> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.io.shutdown()
}
} | Ok(())
});
core.run(server).unwrap();
} | random_line_split |
compress.rs | //! An example of offloading work to a thread pool instead of doing work on the
//! main event loop.
//!
//! In this example the server will act as a form of echo server except that
//! it'll echo back gzip-compressed data. Each connected client will have the
//! data written streamed back as the compressed version is available, and all
//! compressing will occur on a thread pool rather than the main event loop.
//!
//! You can preview this example with in one terminal:
//!
//! cargo run --example compress
//!
//! and in another terminal;
//!
//! echo test | cargo run --example connect 127.0.0.1:8080 | gunzip
//!
//! The latter command will need to be tweaked for non-unix-like shells, but
//! you can also redirect the stdout of the `connect` program to a file
//! and then decompress that.
extern crate futures;
extern crate futures_cpupool;
extern crate flate2;
extern crate tokio;
extern crate tokio_io;
use std::io;
use std::env;
use std::net::SocketAddr;
use futures::{Future, Stream, Poll};
use futures::future::Executor;
use futures_cpupool::CpuPool;
use tokio::net::{TcpListener, TcpStream};
use tokio::reactor::Core;
use tokio_io::{AsyncRead, AsyncWrite};
use flate2::write::GzEncoder;
fn main() {
// As with many other examples, parse our CLI arguments and prepare the
// reactor.
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let mut core = Core::new().unwrap();
let handle = core.handle();
let socket = TcpListener::bind(&addr, &handle).unwrap();
println!("Listening on: {}", addr);
// This is where we're going to offload our computationally heavy work
// (compressing) to. Here we just use a convenience constructor to create a
// pool of threads equal to the number of CPUs we have.
let pool = CpuPool::new_num_cpus();
// The compress logic will happen in the function below, but everything's
// still a future! Each client is spawned to concurrently get processed.
let server = socket.incoming().for_each(move |(socket, addr)| {
pool.execute(compress(socket, &pool).then(move |result| {
match result {
Ok((r, w)) => println!("{}: compressed {} bytes to {}", addr, r, w),
Err(e) => println!("{}: failed when compressing: {}", addr, e),
}
Ok(())
})).unwrap();
Ok(())
});
core.run(server).unwrap();
}
/// The main workhorse of this example. This'll compress all data read from
/// `socket` on the `pool` provided, writing it back out to `socket` as it's
/// available.
fn | (socket: TcpStream, pool: &CpuPool)
-> Box<Future<Item = (u64, u64), Error = io::Error> + Send>
{
use tokio_io::io;
// The general interface that `CpuPool` provides is that we'll *spawn a
// future* onto it. All execution of the future will occur on the `CpuPool`
// and we'll get back a handle representing the completed value of the
// future. In essence it's our job here to create a future that represents
// compressing `socket`, and then we'll simply spawn it at the very end.
//
// Here we exploit the fact that `TcpStream` itself is `Send` in this
// function as well. That is, we can read/write the TCP stream on any
// thread, and we'll get notifications about it being ready from the reactor
// thread.
//
// Otherwise this is the same as the echo server except that after splitting
// we apply some encoding to one side, followed by a `shutdown` when we're
// done to ensure that all gz footers are written.
let (read, write) = socket.split();
let write = Count { io: write, amt: 0 };
let write = GzEncoder::new(write, flate2::Compression::Best);
let process = io::copy(read, write).and_then(|(amt, _read, write)| {
io::shutdown(write).map(move |io| (amt, io.get_ref().amt))
});
// Spawn the future so is executes entirely on the thread pool here
Box::new(pool.spawn(process))
}
struct Count<T> {
io: T,
amt: u64,
}
impl<T: io::Write> io::Write for Count<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.io.write(buf)?;
self.amt += n as u64;
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.io.flush()
}
}
impl<T: AsyncWrite> AsyncWrite for Count<T> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.io.shutdown()
}
}
| compress | identifier_name |
compress.rs | //! An example of offloading work to a thread pool instead of doing work on the
//! main event loop.
//!
//! In this example the server will act as a form of echo server except that
//! it'll echo back gzip-compressed data. Each connected client will have the
//! data written streamed back as the compressed version is available, and all
//! compressing will occur on a thread pool rather than the main event loop.
//!
//! You can preview this example with in one terminal:
//!
//! cargo run --example compress
//!
//! and in another terminal;
//!
//! echo test | cargo run --example connect 127.0.0.1:8080 | gunzip
//!
//! The latter command will need to be tweaked for non-unix-like shells, but
//! you can also redirect the stdout of the `connect` program to a file
//! and then decompress that.
extern crate futures;
extern crate futures_cpupool;
extern crate flate2;
extern crate tokio;
extern crate tokio_io;
use std::io;
use std::env;
use std::net::SocketAddr;
use futures::{Future, Stream, Poll};
use futures::future::Executor;
use futures_cpupool::CpuPool;
use tokio::net::{TcpListener, TcpStream};
use tokio::reactor::Core;
use tokio_io::{AsyncRead, AsyncWrite};
use flate2::write::GzEncoder;
fn main() {
// As with many other examples, parse our CLI arguments and prepare the
// reactor.
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let mut core = Core::new().unwrap();
let handle = core.handle();
let socket = TcpListener::bind(&addr, &handle).unwrap();
println!("Listening on: {}", addr);
// This is where we're going to offload our computationally heavy work
// (compressing) to. Here we just use a convenience constructor to create a
// pool of threads equal to the number of CPUs we have.
let pool = CpuPool::new_num_cpus();
// The compress logic will happen in the function below, but everything's
// still a future! Each client is spawned to concurrently get processed.
let server = socket.incoming().for_each(move |(socket, addr)| {
pool.execute(compress(socket, &pool).then(move |result| {
match result {
Ok((r, w)) => println!("{}: compressed {} bytes to {}", addr, r, w),
Err(e) => println!("{}: failed when compressing: {}", addr, e),
}
Ok(())
})).unwrap();
Ok(())
});
core.run(server).unwrap();
}
/// The main workhorse of this example. This'll compress all data read from
/// `socket` on the `pool` provided, writing it back out to `socket` as it's
/// available.
fn compress(socket: TcpStream, pool: &CpuPool)
-> Box<Future<Item = (u64, u64), Error = io::Error> + Send>
{
use tokio_io::io;
// The general interface that `CpuPool` provides is that we'll *spawn a
// future* onto it. All execution of the future will occur on the `CpuPool`
// and we'll get back a handle representing the completed value of the
// future. In essence it's our job here to create a future that represents
// compressing `socket`, and then we'll simply spawn it at the very end.
//
// Here we exploit the fact that `TcpStream` itself is `Send` in this
// function as well. That is, we can read/write the TCP stream on any
// thread, and we'll get notifications about it being ready from the reactor
// thread.
//
// Otherwise this is the same as the echo server except that after splitting
// we apply some encoding to one side, followed by a `shutdown` when we're
// done to ensure that all gz footers are written.
let (read, write) = socket.split();
let write = Count { io: write, amt: 0 };
let write = GzEncoder::new(write, flate2::Compression::Best);
let process = io::copy(read, write).and_then(|(amt, _read, write)| {
io::shutdown(write).map(move |io| (amt, io.get_ref().amt))
});
// Spawn the future so is executes entirely on the thread pool here
Box::new(pool.spawn(process))
}
struct Count<T> {
io: T,
amt: u64,
}
impl<T: io::Write> io::Write for Count<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.io.write(buf)?;
self.amt += n as u64;
Ok(n)
}
fn flush(&mut self) -> io::Result<()> |
}
impl<T: AsyncWrite> AsyncWrite for Count<T> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.io.shutdown()
}
}
| {
self.io.flush()
} | identifier_body |
reducer.js | /**
* External dependencies
*/
import { expect } from 'chai';
/**
* Internal dependencies
*/
import {
SELECTED_SITE_SET,
SERIALIZE,
DESERIALIZE
} from 'state/action-types';
import reducer, { selectedSiteId } from '../reducer';
describe( 'reducer', () => {
it( 'should include expected keys in return value', () => {
expect( reducer( undefined, {} ) ).to.have.keys( [
'section',
'isLoading',
'layoutFocus',
'hasSidebar',
'isPreviewShowing',
'queryArguments',
'selectedSiteId',
'guidedTour',
'editor',
'reader',
'olark',
'preview',
'actionLog'
] );
} );
it( 'should refuse to persist any state', () => {
const state = reducer( {
selectedSiteId: 2916284
}, { type: SERIALIZE } );
| it( 'should refuse to restore any persisted state', () => {
const state = reducer( {
selectedSiteId: 2916284
}, { type: DESERIALIZE } );
expect( state ).to.eql( {} );
} );
describe( '#selectedSiteId()', () => {
it( 'should default to null', () => {
const state = selectedSiteId( undefined, {} );
expect( state ).to.be.null;
} );
it( 'should set the selected site ID', () => {
const state = selectedSiteId( null, {
type: SELECTED_SITE_SET,
siteId: 2916284
} );
expect( state ).to.equal( 2916284 );
} );
it( 'should set to null if siteId is undefined', () => {
const state = selectedSiteId( null, {
type: SELECTED_SITE_SET,
siteId: undefined
} );
expect( state ).to.be.null;
} );
} );
} ); | expect( state ).to.eql( {} );
} );
| random_line_split |
RewardsProvider.ts | /* | * The MIT License (MIT)
* Copyright (c) 2017 Heat Ledger Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* */
@Service('rewardsProviderFactory')
@Inject('heat','$q')
class RewardsProviderFactory {
constructor(private heat: HeatService, private $q: angular.IQService) {}
public createProvider(): IPaginatedDataProvider {
return new RewardsProvider(this.heat, this.$q);
}
}
class RewardsProvider implements IPaginatedDataProvider {
constructor(private heat: HeatService,
private $q: angular.IQService) {}
/* The number of items available */
public getPaginatedLength(): angular.IPromise<number> {
return this.heat.api.rewardsListCount()
}
/* Returns results starting at firstIndex and up to and including lastIndex */
public getPaginatedResults(firstIndex: number, lastIndex: number): angular.IPromise<Array<IHeatRewardsInfo>> {
return this.heat.api.rewardsList(firstIndex, lastIndex);
}
} | random_line_split |
|
RewardsProvider.ts | /*
* The MIT License (MIT)
* Copyright (c) 2017 Heat Ledger Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* */
@Service('rewardsProviderFactory')
@Inject('heat','$q')
class RewardsProviderFactory {
constructor(private heat: HeatService, private $q: angular.IQService) |
public createProvider(): IPaginatedDataProvider {
return new RewardsProvider(this.heat, this.$q);
}
}
class RewardsProvider implements IPaginatedDataProvider {
constructor(private heat: HeatService,
private $q: angular.IQService) {}
/* The number of items available */
public getPaginatedLength(): angular.IPromise<number> {
return this.heat.api.rewardsListCount()
}
/* Returns results starting at firstIndex and up to and including lastIndex */
public getPaginatedResults(firstIndex: number, lastIndex: number): angular.IPromise<Array<IHeatRewardsInfo>> {
return this.heat.api.rewardsList(firstIndex, lastIndex);
}
} | {} | identifier_body |
RewardsProvider.ts | /*
* The MIT License (MIT)
* Copyright (c) 2017 Heat Ledger Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* */
@Service('rewardsProviderFactory')
@Inject('heat','$q')
class RewardsProviderFactory {
| (private heat: HeatService, private $q: angular.IQService) {}
public createProvider(): IPaginatedDataProvider {
return new RewardsProvider(this.heat, this.$q);
}
}
class RewardsProvider implements IPaginatedDataProvider {
constructor(private heat: HeatService,
private $q: angular.IQService) {}
/* The number of items available */
public getPaginatedLength(): angular.IPromise<number> {
return this.heat.api.rewardsListCount()
}
/* Returns results starting at firstIndex and up to and including lastIndex */
public getPaginatedResults(firstIndex: number, lastIndex: number): angular.IPromise<Array<IHeatRewardsInfo>> {
return this.heat.api.rewardsList(firstIndex, lastIndex);
}
} | constructor | identifier_name |
make_bb_spectrum_plot.py | import ROOT
from math import pi, sqrt, pow, exp
import scipy.integrate
import numpy
from array import array
alpha = 7.2973e-3
m_e = 0.51099892
Z_Xe = 54
Q = 2.4578
def F(Z, KE):
E = KE + m_e
W = E/m_e
Z0 = Z + 2
if W <= 1:
W = 1 + 1e-4
if W > 2.2:
a = -8.46e-2 + 2.48e-2*Z0 + 2.37e-4*Z0**2
b = 1.15e-2 + 3.58e-4*Z0 - 6.17e-5*Z0**2
else:
a = -0.811 + 4.46e-2*Z0 + 1.08e-4*Z0**2
b = 0.673 - 1.82e-2*Z0 + 6.38e-5*Z0**2
x = sqrt(W-1)
p = sqrt(W**2 - 1)
if (p <= 0):
result = 1
else:
result = W/p*exp(a + b*x)
return result
def D(D, K, i):
Z = Z_Xe
T0 = Q/m_e
E1 = 0.5*(K+D) + 1
E2 = 0.5*(K+D) + 1
p1 = sqrt(E1**2 - 1)
p2 = sqrt(E2**2 - 1)
T1 = E1 - 1
T2 = E2 - 1
return p1*E1*F(Z, T1*m_e)*p2*E2*F(Z, T1*m_e)*pow(T0 - K, i)
def SumSpectrum(K, i):
if K < 0:
return 0
elif K > Q:
return 0
a = -K/m_e
b = K/m_e
x = scipy.integrate.quad(D, a, b, (K/m_e, i))[0]
if x < 0:
return 0
else:
return x
def gauss_conv(x, y, res):
|
def normalize(y, eps, f):
return [a*f for a in y]
N = 1000
min_E = 0.0
max_E = 1.2
E_scaled = array('d', numpy.linspace(min_E, max_E, N, False))
Es = array('d', (E*Q for E in E_scaled))
eps = (max_E - min_E)/N
bb0n = [0.5/eps if abs(E-Q)<eps else 0 for E in Es]
bb2n = [SumSpectrum(E, 5) for E in Es]
bb0n_smeared = gauss_conv(Es, bb0n, 0.02)
bb2n_smeared = gauss_conv(Es, bb2n, 0.02)
bb0n_int = scipy.integrate.simps(bb0n_smeared, None, eps)
bb0n_norm = array('d', normalize(bb0n_smeared, eps, 1e-2/bb0n_int))
bb2n_int = scipy.integrate.simps(bb2n_smeared, None, eps)
bb2n_norm = array('d', normalize(bb2n_smeared, eps, 1/bb2n_int))
g_bb0n = ROOT.TGraph(N, E_scaled, bb0n_norm)
g_bb0n.SetTitle("")
g_bb0n.SetLineStyle(ROOT.kDashed)
g_bb2n = ROOT.TGraph(N, E_scaled, bb2n_norm)
g_bb2n.SetTitle("")
bb0nX = []
bb0nX.append([0.5/eps if abs(E-Q)<eps else 0 for E in Es])
for i in [1, 2, 3, 5, 7]:
bb0nX.append([SumSpectrum(E, i) for E in Es])
bb0nX_graphs = []
for bb0nXn in bb0nX:
bb0nX_int = scipy.integrate.simps(bb0nXn, None, eps)
bb0nX_norm = array('d', normalize(bb0nXn, eps, 1/bb0nX_int))
g_bb0nX = ROOT.TGraph(N, E_scaled, bb0nX_norm)
bb0nX_graphs.append(g_bb0nX)
min_E = 0.9
max_E = 1.1
E_scaled_z = array('d', numpy.linspace(min_E, max_E, N, False))
Es_z = array('d', (E*Q for E in E_scaled_z))
eps_z = (max_E - min_E)/N
bb0n_z = [0.5/eps_z if abs(E-Q)<eps_z else 0 for E in Es_z]
bb2n_z = [SumSpectrum(E, 5) for E in Es_z]
bb0n_smeared_z = gauss_conv(Es_z, bb0n_z, 0.02)
bb2n_smeared_z = gauss_conv(Es_z, bb2n_z, 0.02)
bb0n_norm_z = array('d', normalize(bb0n_smeared_z, eps, 1e-6/bb0n_int))
bb2n_norm_z = array('d', normalize(bb2n_smeared_z, eps, 1.0/bb2n_int))
g_bb0n_z = ROOT.TGraph(N, E_scaled_z, bb0n_norm_z)
g_bb0n_z.SetTitle("")
g_bb0n_z.SetLineStyle(ROOT.kDashed)
g_bb2n_z = ROOT.TGraph(N, E_scaled_z, bb2n_norm_z)
g_bb2n_z.SetTitle("")
#print("bb0n %f"%(sum((y*eps for y in bb0n_norm))))
#print("bb2n %f"%(sum((y*eps for y in bb2n_norm))))
c_both = ROOT.TCanvas("c_both","c_both")
p = ROOT.TPad("p", "p", 0, 0, 1, 1)
p.SetRightMargin(0.02)
p.SetTopMargin(0.02)
p.Draw()
p.cd()
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
g_bb2n.GetYaxis().SetTitle("dN/dE")
g_bb2n.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
c_both.cd()
p_inset = ROOT.TPad("p_inset","p_inset",0.5, 0.5, 0.995, 0.995)
p_inset.SetRightMargin(0.05)
p_inset.SetTopMargin(0.05)
p_inset.Draw()
p_inset.cd()
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
g_bb2n_z.GetYaxis().SetTitle("dN/dE")
g_bb2n_z.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
g_bb2n_z.GetYaxis().SetNoExponent(False)
# Zoom in so we can't see edge effects of the convolution
g_bb2n_z.GetXaxis().SetRangeUser(1-0.25*(1-min_E), 1+0.25*(max_E-1))
g_bb2n_z.GetYaxis().SetRangeUser(0, 0.0004)
c_z = ROOT.TCanvas("c_z","c_z")
c_z.SetRightMargin(0.05)
c_z.SetTopMargin(0.05)
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
c = ROOT.TCanvas("c","c")
c.SetRightMargin(0.05)
c.SetTopMargin(0.05)
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
c_majoron = ROOT.TCanvas("c_majoron")
c_majoron.SetRightMargin(0.05)
c_majoron.SetTopMargin(0.05)
colors = [ROOT.kBlack, ROOT.kRed, ROOT.kGreen, ROOT.kBlue,
ROOT.kMagenta, ROOT.kCyan]
draw_opt = "AL"
for i in xrange(len(bb0nX_graphs)):
bb0nX_graphs[-(i+1)].SetLineColor(colors[-(i+1)])
bb0nX_graphs[-(i+1)].Draw(draw_opt)
draw_opt = "L"
# Draw bb0n last so it doesn't scale others to 0
bb0nX_graphs[-1].SetTitle("")
bb0nX_graphs[-1].GetXaxis().SetRangeUser(0, 1.1)
bb0nX_graphs[-1].GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
bb0nX_graphs[-1].GetYaxis().SetTitle("dN/dE")
l_majoron = ROOT.TLegend(0.45, 0.77, 0.85, 0.94)
l_majoron.SetFillColor(ROOT.kWhite)
l_majoron.SetNColumns(2)
l_majoron.AddEntry(bb0nX_graphs[0], "0#nu#beta#beta", "l")
l_majoron.AddEntry(bb0nX_graphs[1], "0#nu#beta#beta#chi^{0} (n=1)", "l")
l_majoron.AddEntry(bb0nX_graphs[4], "2#nu#beta#beta (n=5)", "l")
l_majoron.AddEntry(bb0nX_graphs[2], "0#nu#beta#beta#chi^{0} (n=2)", "l")
l_majoron.AddEntry(None, "", "")
l_majoron.AddEntry(bb0nX_graphs[3], "0#nu#beta#beta#chi^{0}(#chi^{0}) (n=3)", "l")
l_majoron.AddEntry(None, "", "")
l_majoron.AddEntry(bb0nX_graphs[5], "0#nu#beta#beta#chi^{0}#chi^{0} (n=7)", "l")
l_majoron.Draw()
dummy = raw_input("Press Enter...")
| N = len(x)
mu = numpy.mean(x)
s = res*mu
gauss = [1.0/(s*sqrt(2*pi))*exp(-0.5*((a-mu)/s)**2) for a in x]
convolution = numpy.convolve(y, gauss,'same')
return convolution | identifier_body |
make_bb_spectrum_plot.py | import ROOT
from math import pi, sqrt, pow, exp
import scipy.integrate
import numpy
from array import array
alpha = 7.2973e-3
m_e = 0.51099892
Z_Xe = 54
Q = 2.4578
def F(Z, KE):
E = KE + m_e
W = E/m_e
Z0 = Z + 2
if W <= 1:
W = 1 + 1e-4
if W > 2.2:
a = -8.46e-2 + 2.48e-2*Z0 + 2.37e-4*Z0**2
b = 1.15e-2 + 3.58e-4*Z0 - 6.17e-5*Z0**2
else:
a = -0.811 + 4.46e-2*Z0 + 1.08e-4*Z0**2
b = 0.673 - 1.82e-2*Z0 + 6.38e-5*Z0**2
x = sqrt(W-1)
p = sqrt(W**2 - 1)
if (p <= 0):
result = 1
else:
result = W/p*exp(a + b*x)
return result
def D(D, K, i):
Z = Z_Xe
T0 = Q/m_e
E1 = 0.5*(K+D) + 1
E2 = 0.5*(K+D) + 1
p1 = sqrt(E1**2 - 1)
p2 = sqrt(E2**2 - 1)
T1 = E1 - 1
T2 = E2 - 1
return p1*E1*F(Z, T1*m_e)*p2*E2*F(Z, T1*m_e)*pow(T0 - K, i)
def SumSpectrum(K, i):
if K < 0:
return 0
elif K > Q:
return 0
a = -K/m_e
b = K/m_e
x = scipy.integrate.quad(D, a, b, (K/m_e, i))[0]
if x < 0:
return 0
else:
return x
def gauss_conv(x, y, res):
N = len(x)
mu = numpy.mean(x)
s = res*mu
gauss = [1.0/(s*sqrt(2*pi))*exp(-0.5*((a-mu)/s)**2) for a in x]
convolution = numpy.convolve(y, gauss,'same')
return convolution
def normalize(y, eps, f):
return [a*f for a in y]
N = 1000
min_E = 0.0
max_E = 1.2
E_scaled = array('d', numpy.linspace(min_E, max_E, N, False))
Es = array('d', (E*Q for E in E_scaled))
eps = (max_E - min_E)/N
bb0n = [0.5/eps if abs(E-Q)<eps else 0 for E in Es]
bb2n = [SumSpectrum(E, 5) for E in Es]
bb0n_smeared = gauss_conv(Es, bb0n, 0.02)
bb2n_smeared = gauss_conv(Es, bb2n, 0.02)
bb0n_int = scipy.integrate.simps(bb0n_smeared, None, eps)
bb0n_norm = array('d', normalize(bb0n_smeared, eps, 1e-2/bb0n_int))
bb2n_int = scipy.integrate.simps(bb2n_smeared, None, eps)
bb2n_norm = array('d', normalize(bb2n_smeared, eps, 1/bb2n_int))
g_bb0n = ROOT.TGraph(N, E_scaled, bb0n_norm)
g_bb0n.SetTitle("")
g_bb0n.SetLineStyle(ROOT.kDashed)
g_bb2n = ROOT.TGraph(N, E_scaled, bb2n_norm)
g_bb2n.SetTitle("")
bb0nX = []
bb0nX.append([0.5/eps if abs(E-Q)<eps else 0 for E in Es])
for i in [1, 2, 3, 5, 7]:
|
bb0nX_graphs = []
for bb0nXn in bb0nX:
bb0nX_int = scipy.integrate.simps(bb0nXn, None, eps)
bb0nX_norm = array('d', normalize(bb0nXn, eps, 1/bb0nX_int))
g_bb0nX = ROOT.TGraph(N, E_scaled, bb0nX_norm)
bb0nX_graphs.append(g_bb0nX)
min_E = 0.9
max_E = 1.1
E_scaled_z = array('d', numpy.linspace(min_E, max_E, N, False))
Es_z = array('d', (E*Q for E in E_scaled_z))
eps_z = (max_E - min_E)/N
bb0n_z = [0.5/eps_z if abs(E-Q)<eps_z else 0 for E in Es_z]
bb2n_z = [SumSpectrum(E, 5) for E in Es_z]
bb0n_smeared_z = gauss_conv(Es_z, bb0n_z, 0.02)
bb2n_smeared_z = gauss_conv(Es_z, bb2n_z, 0.02)
bb0n_norm_z = array('d', normalize(bb0n_smeared_z, eps, 1e-6/bb0n_int))
bb2n_norm_z = array('d', normalize(bb2n_smeared_z, eps, 1.0/bb2n_int))
g_bb0n_z = ROOT.TGraph(N, E_scaled_z, bb0n_norm_z)
g_bb0n_z.SetTitle("")
g_bb0n_z.SetLineStyle(ROOT.kDashed)
g_bb2n_z = ROOT.TGraph(N, E_scaled_z, bb2n_norm_z)
g_bb2n_z.SetTitle("")
#print("bb0n %f"%(sum((y*eps for y in bb0n_norm))))
#print("bb2n %f"%(sum((y*eps for y in bb2n_norm))))
c_both = ROOT.TCanvas("c_both","c_both")
p = ROOT.TPad("p", "p", 0, 0, 1, 1)
p.SetRightMargin(0.02)
p.SetTopMargin(0.02)
p.Draw()
p.cd()
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
g_bb2n.GetYaxis().SetTitle("dN/dE")
g_bb2n.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
c_both.cd()
p_inset = ROOT.TPad("p_inset","p_inset",0.5, 0.5, 0.995, 0.995)
p_inset.SetRightMargin(0.05)
p_inset.SetTopMargin(0.05)
p_inset.Draw()
p_inset.cd()
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
g_bb2n_z.GetYaxis().SetTitle("dN/dE")
g_bb2n_z.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
g_bb2n_z.GetYaxis().SetNoExponent(False)
# Zoom in so we can't see edge effects of the convolution
g_bb2n_z.GetXaxis().SetRangeUser(1-0.25*(1-min_E), 1+0.25*(max_E-1))
g_bb2n_z.GetYaxis().SetRangeUser(0, 0.0004)
c_z = ROOT.TCanvas("c_z","c_z")
c_z.SetRightMargin(0.05)
c_z.SetTopMargin(0.05)
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
c = ROOT.TCanvas("c","c")
c.SetRightMargin(0.05)
c.SetTopMargin(0.05)
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
c_majoron = ROOT.TCanvas("c_majoron")
c_majoron.SetRightMargin(0.05)
c_majoron.SetTopMargin(0.05)
colors = [ROOT.kBlack, ROOT.kRed, ROOT.kGreen, ROOT.kBlue,
ROOT.kMagenta, ROOT.kCyan]
draw_opt = "AL"
for i in xrange(len(bb0nX_graphs)):
bb0nX_graphs[-(i+1)].SetLineColor(colors[-(i+1)])
bb0nX_graphs[-(i+1)].Draw(draw_opt)
draw_opt = "L"
# Draw bb0n last so it doesn't scale others to 0
bb0nX_graphs[-1].SetTitle("")
bb0nX_graphs[-1].GetXaxis().SetRangeUser(0, 1.1)
bb0nX_graphs[-1].GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
bb0nX_graphs[-1].GetYaxis().SetTitle("dN/dE")
l_majoron = ROOT.TLegend(0.45, 0.77, 0.85, 0.94)
l_majoron.SetFillColor(ROOT.kWhite)
l_majoron.SetNColumns(2)
l_majoron.AddEntry(bb0nX_graphs[0], "0#nu#beta#beta", "l")
l_majoron.AddEntry(bb0nX_graphs[1], "0#nu#beta#beta#chi^{0} (n=1)", "l")
l_majoron.AddEntry(bb0nX_graphs[4], "2#nu#beta#beta (n=5)", "l")
l_majoron.AddEntry(bb0nX_graphs[2], "0#nu#beta#beta#chi^{0} (n=2)", "l")
l_majoron.AddEntry(None, "", "")
l_majoron.AddEntry(bb0nX_graphs[3], "0#nu#beta#beta#chi^{0}(#chi^{0}) (n=3)", "l")
l_majoron.AddEntry(None, "", "")
l_majoron.AddEntry(bb0nX_graphs[5], "0#nu#beta#beta#chi^{0}#chi^{0} (n=7)", "l")
l_majoron.Draw()
dummy = raw_input("Press Enter...")
| bb0nX.append([SumSpectrum(E, i) for E in Es]) | conditional_block |
make_bb_spectrum_plot.py | import ROOT
from math import pi, sqrt, pow, exp
import scipy.integrate
import numpy
from array import array
alpha = 7.2973e-3
m_e = 0.51099892
Z_Xe = 54
Q = 2.4578
def F(Z, KE):
E = KE + m_e
W = E/m_e
Z0 = Z + 2
if W <= 1:
W = 1 + 1e-4
if W > 2.2:
a = -8.46e-2 + 2.48e-2*Z0 + 2.37e-4*Z0**2
b = 1.15e-2 + 3.58e-4*Z0 - 6.17e-5*Z0**2
else:
a = -0.811 + 4.46e-2*Z0 + 1.08e-4*Z0**2
b = 0.673 - 1.82e-2*Z0 + 6.38e-5*Z0**2
x = sqrt(W-1)
p = sqrt(W**2 - 1)
if (p <= 0):
result = 1
else:
result = W/p*exp(a + b*x)
return result
def D(D, K, i):
Z = Z_Xe
T0 = Q/m_e
E1 = 0.5*(K+D) + 1
E2 = 0.5*(K+D) + 1
p1 = sqrt(E1**2 - 1)
p2 = sqrt(E2**2 - 1)
T1 = E1 - 1
T2 = E2 - 1
| if K < 0:
return 0
elif K > Q:
return 0
a = -K/m_e
b = K/m_e
x = scipy.integrate.quad(D, a, b, (K/m_e, i))[0]
if x < 0:
return 0
else:
return x
def gauss_conv(x, y, res):
N = len(x)
mu = numpy.mean(x)
s = res*mu
gauss = [1.0/(s*sqrt(2*pi))*exp(-0.5*((a-mu)/s)**2) for a in x]
convolution = numpy.convolve(y, gauss,'same')
return convolution
def normalize(y, eps, f):
return [a*f for a in y]
N = 1000
min_E = 0.0
max_E = 1.2
E_scaled = array('d', numpy.linspace(min_E, max_E, N, False))
Es = array('d', (E*Q for E in E_scaled))
eps = (max_E - min_E)/N
bb0n = [0.5/eps if abs(E-Q)<eps else 0 for E in Es]
bb2n = [SumSpectrum(E, 5) for E in Es]
bb0n_smeared = gauss_conv(Es, bb0n, 0.02)
bb2n_smeared = gauss_conv(Es, bb2n, 0.02)
bb0n_int = scipy.integrate.simps(bb0n_smeared, None, eps)
bb0n_norm = array('d', normalize(bb0n_smeared, eps, 1e-2/bb0n_int))
bb2n_int = scipy.integrate.simps(bb2n_smeared, None, eps)
bb2n_norm = array('d', normalize(bb2n_smeared, eps, 1/bb2n_int))
g_bb0n = ROOT.TGraph(N, E_scaled, bb0n_norm)
g_bb0n.SetTitle("")
g_bb0n.SetLineStyle(ROOT.kDashed)
g_bb2n = ROOT.TGraph(N, E_scaled, bb2n_norm)
g_bb2n.SetTitle("")
bb0nX = []
bb0nX.append([0.5/eps if abs(E-Q)<eps else 0 for E in Es])
for i in [1, 2, 3, 5, 7]:
bb0nX.append([SumSpectrum(E, i) for E in Es])
bb0nX_graphs = []
for bb0nXn in bb0nX:
bb0nX_int = scipy.integrate.simps(bb0nXn, None, eps)
bb0nX_norm = array('d', normalize(bb0nXn, eps, 1/bb0nX_int))
g_bb0nX = ROOT.TGraph(N, E_scaled, bb0nX_norm)
bb0nX_graphs.append(g_bb0nX)
min_E = 0.9
max_E = 1.1
E_scaled_z = array('d', numpy.linspace(min_E, max_E, N, False))
Es_z = array('d', (E*Q for E in E_scaled_z))
eps_z = (max_E - min_E)/N
bb0n_z = [0.5/eps_z if abs(E-Q)<eps_z else 0 for E in Es_z]
bb2n_z = [SumSpectrum(E, 5) for E in Es_z]
bb0n_smeared_z = gauss_conv(Es_z, bb0n_z, 0.02)
bb2n_smeared_z = gauss_conv(Es_z, bb2n_z, 0.02)
bb0n_norm_z = array('d', normalize(bb0n_smeared_z, eps, 1e-6/bb0n_int))
bb2n_norm_z = array('d', normalize(bb2n_smeared_z, eps, 1.0/bb2n_int))
g_bb0n_z = ROOT.TGraph(N, E_scaled_z, bb0n_norm_z)
g_bb0n_z.SetTitle("")
g_bb0n_z.SetLineStyle(ROOT.kDashed)
g_bb2n_z = ROOT.TGraph(N, E_scaled_z, bb2n_norm_z)
g_bb2n_z.SetTitle("")
#print("bb0n %f"%(sum((y*eps for y in bb0n_norm))))
#print("bb2n %f"%(sum((y*eps for y in bb2n_norm))))
c_both = ROOT.TCanvas("c_both","c_both")
p = ROOT.TPad("p", "p", 0, 0, 1, 1)
p.SetRightMargin(0.02)
p.SetTopMargin(0.02)
p.Draw()
p.cd()
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
g_bb2n.GetYaxis().SetTitle("dN/dE")
g_bb2n.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
c_both.cd()
p_inset = ROOT.TPad("p_inset","p_inset",0.5, 0.5, 0.995, 0.995)
p_inset.SetRightMargin(0.05)
p_inset.SetTopMargin(0.05)
p_inset.Draw()
p_inset.cd()
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
g_bb2n_z.GetYaxis().SetTitle("dN/dE")
g_bb2n_z.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
g_bb2n_z.GetYaxis().SetNoExponent(False)
# Zoom in so we can't see edge effects of the convolution
g_bb2n_z.GetXaxis().SetRangeUser(1-0.25*(1-min_E), 1+0.25*(max_E-1))
g_bb2n_z.GetYaxis().SetRangeUser(0, 0.0004)
c_z = ROOT.TCanvas("c_z","c_z")
c_z.SetRightMargin(0.05)
c_z.SetTopMargin(0.05)
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
c = ROOT.TCanvas("c","c")
c.SetRightMargin(0.05)
c.SetTopMargin(0.05)
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
c_majoron = ROOT.TCanvas("c_majoron")
c_majoron.SetRightMargin(0.05)
c_majoron.SetTopMargin(0.05)
colors = [ROOT.kBlack, ROOT.kRed, ROOT.kGreen, ROOT.kBlue,
ROOT.kMagenta, ROOT.kCyan]
draw_opt = "AL"
for i in xrange(len(bb0nX_graphs)):
bb0nX_graphs[-(i+1)].SetLineColor(colors[-(i+1)])
bb0nX_graphs[-(i+1)].Draw(draw_opt)
draw_opt = "L"
# Draw bb0n last so it doesn't scale others to 0
bb0nX_graphs[-1].SetTitle("")
bb0nX_graphs[-1].GetXaxis().SetRangeUser(0, 1.1)
bb0nX_graphs[-1].GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
bb0nX_graphs[-1].GetYaxis().SetTitle("dN/dE")
l_majoron = ROOT.TLegend(0.45, 0.77, 0.85, 0.94)
l_majoron.SetFillColor(ROOT.kWhite)
l_majoron.SetNColumns(2)
l_majoron.AddEntry(bb0nX_graphs[0], "0#nu#beta#beta", "l")
l_majoron.AddEntry(bb0nX_graphs[1], "0#nu#beta#beta#chi^{0} (n=1)", "l")
l_majoron.AddEntry(bb0nX_graphs[4], "2#nu#beta#beta (n=5)", "l")
l_majoron.AddEntry(bb0nX_graphs[2], "0#nu#beta#beta#chi^{0} (n=2)", "l")
l_majoron.AddEntry(None, "", "")
l_majoron.AddEntry(bb0nX_graphs[3], "0#nu#beta#beta#chi^{0}(#chi^{0}) (n=3)", "l")
l_majoron.AddEntry(None, "", "")
l_majoron.AddEntry(bb0nX_graphs[5], "0#nu#beta#beta#chi^{0}#chi^{0} (n=7)", "l")
l_majoron.Draw()
dummy = raw_input("Press Enter...") | return p1*E1*F(Z, T1*m_e)*p2*E2*F(Z, T1*m_e)*pow(T0 - K, i)
def SumSpectrum(K, i): | random_line_split |
make_bb_spectrum_plot.py | import ROOT
from math import pi, sqrt, pow, exp
import scipy.integrate
import numpy
from array import array
alpha = 7.2973e-3
m_e = 0.51099892
Z_Xe = 54
Q = 2.4578
def F(Z, KE):
E = KE + m_e
W = E/m_e
Z0 = Z + 2
if W <= 1:
W = 1 + 1e-4
if W > 2.2:
a = -8.46e-2 + 2.48e-2*Z0 + 2.37e-4*Z0**2
b = 1.15e-2 + 3.58e-4*Z0 - 6.17e-5*Z0**2
else:
a = -0.811 + 4.46e-2*Z0 + 1.08e-4*Z0**2
b = 0.673 - 1.82e-2*Z0 + 6.38e-5*Z0**2
x = sqrt(W-1)
p = sqrt(W**2 - 1)
if (p <= 0):
result = 1
else:
result = W/p*exp(a + b*x)
return result
def D(D, K, i):
Z = Z_Xe
T0 = Q/m_e
E1 = 0.5*(K+D) + 1
E2 = 0.5*(K+D) + 1
p1 = sqrt(E1**2 - 1)
p2 = sqrt(E2**2 - 1)
T1 = E1 - 1
T2 = E2 - 1
return p1*E1*F(Z, T1*m_e)*p2*E2*F(Z, T1*m_e)*pow(T0 - K, i)
def SumSpectrum(K, i):
if K < 0:
return 0
elif K > Q:
return 0
a = -K/m_e
b = K/m_e
x = scipy.integrate.quad(D, a, b, (K/m_e, i))[0]
if x < 0:
return 0
else:
return x
def | (x, y, res):
N = len(x)
mu = numpy.mean(x)
s = res*mu
gauss = [1.0/(s*sqrt(2*pi))*exp(-0.5*((a-mu)/s)**2) for a in x]
convolution = numpy.convolve(y, gauss,'same')
return convolution
def normalize(y, eps, f):
return [a*f for a in y]
N = 1000
min_E = 0.0
max_E = 1.2
E_scaled = array('d', numpy.linspace(min_E, max_E, N, False))
Es = array('d', (E*Q for E in E_scaled))
eps = (max_E - min_E)/N
bb0n = [0.5/eps if abs(E-Q)<eps else 0 for E in Es]
bb2n = [SumSpectrum(E, 5) for E in Es]
bb0n_smeared = gauss_conv(Es, bb0n, 0.02)
bb2n_smeared = gauss_conv(Es, bb2n, 0.02)
bb0n_int = scipy.integrate.simps(bb0n_smeared, None, eps)
bb0n_norm = array('d', normalize(bb0n_smeared, eps, 1e-2/bb0n_int))
bb2n_int = scipy.integrate.simps(bb2n_smeared, None, eps)
bb2n_norm = array('d', normalize(bb2n_smeared, eps, 1/bb2n_int))
g_bb0n = ROOT.TGraph(N, E_scaled, bb0n_norm)
g_bb0n.SetTitle("")
g_bb0n.SetLineStyle(ROOT.kDashed)
g_bb2n = ROOT.TGraph(N, E_scaled, bb2n_norm)
g_bb2n.SetTitle("")
bb0nX = []
bb0nX.append([0.5/eps if abs(E-Q)<eps else 0 for E in Es])
for i in [1, 2, 3, 5, 7]:
bb0nX.append([SumSpectrum(E, i) for E in Es])
bb0nX_graphs = []
for bb0nXn in bb0nX:
bb0nX_int = scipy.integrate.simps(bb0nXn, None, eps)
bb0nX_norm = array('d', normalize(bb0nXn, eps, 1/bb0nX_int))
g_bb0nX = ROOT.TGraph(N, E_scaled, bb0nX_norm)
bb0nX_graphs.append(g_bb0nX)
min_E = 0.9
max_E = 1.1
E_scaled_z = array('d', numpy.linspace(min_E, max_E, N, False))
Es_z = array('d', (E*Q for E in E_scaled_z))
eps_z = (max_E - min_E)/N
bb0n_z = [0.5/eps_z if abs(E-Q)<eps_z else 0 for E in Es_z]
bb2n_z = [SumSpectrum(E, 5) for E in Es_z]
bb0n_smeared_z = gauss_conv(Es_z, bb0n_z, 0.02)
bb2n_smeared_z = gauss_conv(Es_z, bb2n_z, 0.02)
bb0n_norm_z = array('d', normalize(bb0n_smeared_z, eps, 1e-6/bb0n_int))
bb2n_norm_z = array('d', normalize(bb2n_smeared_z, eps, 1.0/bb2n_int))
g_bb0n_z = ROOT.TGraph(N, E_scaled_z, bb0n_norm_z)
g_bb0n_z.SetTitle("")
g_bb0n_z.SetLineStyle(ROOT.kDashed)
g_bb2n_z = ROOT.TGraph(N, E_scaled_z, bb2n_norm_z)
g_bb2n_z.SetTitle("")
#print("bb0n %f"%(sum((y*eps for y in bb0n_norm))))
#print("bb2n %f"%(sum((y*eps for y in bb2n_norm))))
c_both = ROOT.TCanvas("c_both","c_both")
p = ROOT.TPad("p", "p", 0, 0, 1, 1)
p.SetRightMargin(0.02)
p.SetTopMargin(0.02)
p.Draw()
p.cd()
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
g_bb2n.GetYaxis().SetTitle("dN/dE")
g_bb2n.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
c_both.cd()
p_inset = ROOT.TPad("p_inset","p_inset",0.5, 0.5, 0.995, 0.995)
p_inset.SetRightMargin(0.05)
p_inset.SetTopMargin(0.05)
p_inset.Draw()
p_inset.cd()
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
g_bb2n_z.GetYaxis().SetTitle("dN/dE")
g_bb2n_z.GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
g_bb2n_z.GetYaxis().SetNoExponent(False)
# Zoom in so we can't see edge effects of the convolution
g_bb2n_z.GetXaxis().SetRangeUser(1-0.25*(1-min_E), 1+0.25*(max_E-1))
g_bb2n_z.GetYaxis().SetRangeUser(0, 0.0004)
c_z = ROOT.TCanvas("c_z","c_z")
c_z.SetRightMargin(0.05)
c_z.SetTopMargin(0.05)
g_bb2n_z.Draw("AL")
g_bb0n_z.Draw("L")
c = ROOT.TCanvas("c","c")
c.SetRightMargin(0.05)
c.SetTopMargin(0.05)
g_bb2n.Draw("AL")
g_bb0n.Draw("L")
c_majoron = ROOT.TCanvas("c_majoron")
c_majoron.SetRightMargin(0.05)
c_majoron.SetTopMargin(0.05)
colors = [ROOT.kBlack, ROOT.kRed, ROOT.kGreen, ROOT.kBlue,
ROOT.kMagenta, ROOT.kCyan]
draw_opt = "AL"
for i in xrange(len(bb0nX_graphs)):
bb0nX_graphs[-(i+1)].SetLineColor(colors[-(i+1)])
bb0nX_graphs[-(i+1)].Draw(draw_opt)
draw_opt = "L"
# Draw bb0n last so it doesn't scale others to 0
bb0nX_graphs[-1].SetTitle("")
bb0nX_graphs[-1].GetXaxis().SetRangeUser(0, 1.1)
bb0nX_graphs[-1].GetXaxis().SetTitle("Sum e^{-} Energy (E/Q)")
bb0nX_graphs[-1].GetYaxis().SetTitle("dN/dE")
l_majoron = ROOT.TLegend(0.45, 0.77, 0.85, 0.94)
l_majoron.SetFillColor(ROOT.kWhite)
l_majoron.SetNColumns(2)
l_majoron.AddEntry(bb0nX_graphs[0], "0#nu#beta#beta", "l")
l_majoron.AddEntry(bb0nX_graphs[1], "0#nu#beta#beta#chi^{0} (n=1)", "l")
l_majoron.AddEntry(bb0nX_graphs[4], "2#nu#beta#beta (n=5)", "l")
l_majoron.AddEntry(bb0nX_graphs[2], "0#nu#beta#beta#chi^{0} (n=2)", "l")
l_majoron.AddEntry(None, "", "")
l_majoron.AddEntry(bb0nX_graphs[3], "0#nu#beta#beta#chi^{0}(#chi^{0}) (n=3)", "l")
l_majoron.AddEntry(None, "", "")
l_majoron.AddEntry(bb0nX_graphs[5], "0#nu#beta#beta#chi^{0}#chi^{0} (n=7)", "l")
l_majoron.Draw()
dummy = raw_input("Press Enter...")
| gauss_conv | identifier_name |
hero.js | /**
* Created by kaicui on 17/8/22.
* 表示战场上的一个英雄
*
* 注意:
* 1、hero被添加到一个team之后,可能会被注入一些属性,比如team属性
*/
const oop = require("local-libs").oop;
const event = require("local-libs").event;
const Levelable = require("../../level/levelable");
const {SkillType} = require("../../skill/skill");
const {getRaceCamp} = require("./camp");
const {getJob} = require("./job");
const {Star} = require("./star");
const {injectHeroAttributes,HeroBaseAttributes,HeroDeriveAttributes,HeroOtherAttributes} = require("./attributeRule");
const statusEnum = require("../../effect/implement/statusEnum");
const logger = require('../../../log/logger');
const HeroEvents=require("../lifeCycle").HeroEvents;
const SP_ADD_PER_EFFECT =20 ;//每次普通攻击之后,增加10点sp
let Hero = oop.defineClass({
super:Levelable,
constructor:function({
levelCur, //number,或者Integer 对象,表示当前等级
levelMax, //number,或者Integer 对象,表示最高等级
exp, // number,表示当前获得的经验值
expTableName="small_03", // String,表示经验值增长曲线名称
},{
id,//英雄id
name,//英雄名称
raceCampCode, //种族&阵营编号
jobCode, //职业编号
starLevel,//number,星数
skills,//数组,英雄具有的技能列表。0是普通攻击技能,1是主动技能
context,//hero所处的上下文,hero身上的效果,依靠这种上下文来获取外部世界的事件
rawAttributes, //属性对象,包含需要持久化的所有角色属性数据。如:str,agi,vit,int,dex,luk,hp,sp等
}){
var self = this;
self.id = id;
self.name = name;
self.context = context;//hero所处的上下文,hero身上的效果,依靠这种上下文来获取外部世界的事件
//6项基本属性的初始化,以及对应的附加属性初始化
injectHeroAttributes(self,rawAttributes);
self.camp = getRaceCamp(raceCampCode);//种族&阵营
self.job = getJob(jobCode);//职业
self.star = new Star(starLevel);//星级
self.skills = skills;
self.isDead = self.getAttr(HeroOtherAttributes.HP).getVal()===0;//是否已死亡
if(self.isDead){
//死亡时,能量丢失(设置未0)
self.getAttr(HeroOtherAttributes.SP).updateAddPercent(-1);
}
//检测hp变化,如果到0,且当前没有重生效果,会发射死亡事件
self.on("attrChange",(attr,total,raw,modify,val,oldTotal)=>{
if(attr.name===HeroOtherAttributes.HP){
if(total<=0){
self.emit(HeroEvents.BEFORE_HERO_DIE);
logger.debug(`英雄死亡:[${self}]`);
//设置死亡标记
self.isDead = true;
//死亡时,能量丢失(设置未0)
//派发mutation给target
self.takeMutation({
from:self,
mutation:{
[HeroOtherAttributes.SP]:0-self.getAttr(HeroOtherAttributes.SP).getVal()
},
});
// self.getAttr(HeroOtherAttributes.SP).updateAddPercent(-1);
self.emit(HeroEvents.AFTER_HERO_DIE);
}else{
if(oldTotal<=0){
logger.debug(`英雄复活:[${self}]`);
self.emit(HeroEvents.BEFORE_HERO_REBORN);
//设置死亡标记
self.isDead = false;
self.emit(HeroEvents.AFTER_HERO_REBORN);
}
}
}
});
// self.vital = vital;//精气点数
},
prototype:{
/**
* 将对象内容完全转化为不附带循环引用的纯对象
* @param serializeLevel:决定序列化英雄信息的数量和程度
*/
toJSONObject:function ({serializeLevel}) {
var self = this;
if(serializeLevel === 1){
return {
id:self.id,
name:self.name,
level:self.levelCur.getVal(),
camp:self.camp,
job:self.job,
star:self.star.level,
attrMap:{
[HeroOtherAttributes.HP]:self.getAttr(HeroOtherAttributes.HP).getVal(),
[HeroDeriveAttributes.HP_MAX]:self.getAttr(HeroDeriveAttributes.HP_MAX).getVal(),
[HeroOtherAttributes.SP]:self.getAttr(HeroOtherAttributes.SP).getVal(),
[HeroOtherAttributes.SP_MAX]:self.getAttr(HeroOtherAttributes.SP_MAX).getVal()
}
}
}
},
/**
* 显示英雄详细信息
* @param detail
* @returns {string}
*/
toString:function (detail) {
if(!detail){
return `${this.name}:[${this.id}],hp:[${this.getAttr(HeroOtherAttributes.HP).getVal()}/${this.getAttr(HeroDeriveAttributes.HP_MAX).getVal()}],sp:[${this.getAttr(HeroOtherAttributes.SP).getVal()}/${this.getAttr(HeroOtherAttributes.SP_MAX).getVal()}],SPD:[${this.getAttr(HeroDeriveAttributes.SPD).getVal()}]`
}else{
return `
${this.name}:[${this.id}]:\r\n
hp:[${this.getAttr(HeroOtherAttributes.HP).getVal()}/${this.getAttr(HeroDeriveAttributes.HP_MAX).getVal()}],
sp:[${this.getAttr(HeroOtherAttributes.SP).getVal()}/${this.getAttr(HeroOtherAttributes.SP_MAX).getVal()}],
SPD:[${this.getAttr(HeroDeriveAttributes.SPD).getVal()}],
| s.DEF).getVal()}],
M_ATK:[${this.getAttr(HeroDeriveAttributes.M_ATK).getVal()}],
M_DEF:[${this.getAttr(HeroDeriveAttributes.M_DEF).getVal()}],
`
}
},
/**
* 将英雄绑定到玩家对象,开始英雄生命周期
* @param player
*/
initOnPlayer:function (player) {
var self = this;
self.context = player;
self.skills&&self.skills.forEach((sk)=>{
sk.holder = self;
if(sk.type === SkillType.PASSIVE){
logger.debug(`准备释放[${self.toString()}]的被动技能,context=${self.context}`);
sk.release(self.context); //立刻释放被动技能
logger.debug(`完成释放[${self.toString()}]的被动技能,context=${self.context}`);
}
});
return self;
},
/**
* 判断自身是否完全死透了(hp<0 并且没有被标记重生效果)
* @returns {boolean}
*/
isCompleteDead:function () {
var self = this;
if(self.getAttr(HeroOtherAttributes.HP).getVal()>0 || self.hasEffect("reborn")){
return false;
}else{
return true;
}
},
/** todo:拷贝出一个自身的副本。这副本所处的context可以是一个全新的上下文
* 副本:包含的基本属性、装备与本体是关联的,含部分自定义信息,包括:
* hp值,怒气,效果列表等
*/
copy:function (newContext) {
var self = this;
},
/**
* 当前是否可以放大招
* @returns {boolean}
*/
canBigSkill:function () {
var self = this;
let can = self.getAttr(HeroOtherAttributes.SP).getVal()>=self.getAttr(HeroOtherAttributes.SP_MAX).getVal();
//判断自身的效果列表里,是否存在封印效果
let statusEffects = self.getEffect(function (ef) {
return ef.name ==='Status' && ef.params.stopSkill===true
});
if(statusEffects && statusEffects.length > 0){
can = false;
}
return can;
},
/**
* 角色是否可以进行行动
* @returns {boolean}
*/
canAction:function () {
var self = this;
//默认活着的是可以行动的
let can = !self.isDead;
//判断自身的效果列表里,是否还有眩晕、冰冻效果
let statusEffects = self.getEffect(function (ef) {
return ef.name ==='Status' && ef.params.stopAction===true
});
if(statusEffects && statusEffects.length > 0){
can = false;
}
return can;
},
//hero触发一个主动行为,比如普通攻击,或者技能攻击
startAction:function () {
var self = this;
let skillIndex = 0; //默认是普通攻击(0技能)
/*
如果满足大招要求(怒气、无封印),则触发1技能
*/
// if(self.getAttr(HeroOtherAttributes.SP).getVal()>=self.getAttr(HeroOtherAttributes.SP_MAX).getVal()){
if(self.canBigSkill()){
logger.debug(`[${self}],准备发动主动技能!`)
skillIndex = 1;
}
//释放技能(注意普通攻击也被当做技能处理,固定为0技能)
self.releaseSkill(skillIndex);
//注意,释放1技能之后,怒气值清空。否则增加怒气
if(skillIndex===0){
//获得怒气
self.takeMutation({
from:self,
mutation:{
[HeroOtherAttributes.SP]:SP_ADD_PER_EFFECT
}
});
}else{
//怒气清空
self.takeMutation({
from:self,
mutation:{
[HeroOtherAttributes.SP]: 0-self.getAttr(HeroOtherAttributes.SP).getVal()
}
});
}
},
/**
* 让英雄立刻释放某个技能
*
* 注意:这个方法不产生
* @param index
*/
releaseSkill:function (index) {
var self = this;
let skillToRelease = self.skills[index];
self.emit(HeroEvents.BEFORE_ACTION,skillToRelease);
skillToRelease.release(self.context); //释放技能
self.emit(HeroEvents.AFTER_ACTION,skillToRelease);
},
/**
* 接收一个属性集合修改请求(注意,这里的修改,是一次性修改,而不是modify)
* @param from:修改来源effect对象
* @param remark:本次改动的备注,用于配合数据展示一些备注信息
* @param mutation:key:attrName value:changeNum (+代表增加 -代表减少)
*/
takeMutation:function ({from,mutation,remark}) {
var self = this;
logger.debug(`[${self.toString()}]准备接收mutation:${JSON.stringify(mutation)}`);
self.emit(HeroEvents.BEFORE_MUTATION,from,mutation,remark);
let mutationResult = {};//key:attrName value:value after mutation
//对每一个要修改的属性,进行修改
for(var attName in mutation){
self.getAttr(attName).updateAdd(mutation[attName]);
mutationResult[attName]=self.getAttr(attName).getVal();
}
self.emit(HeroEvents.AFTER_MUTATION,from,mutation,remark,mutationResult);
},
/**
* 加入队伍
* @param team
* @returns {Hero}
*/
joinTeam:function (team) {
this.team = team;
return this;
}
}
});
module.exports = {Hero,HeroEvents};
| CRI:[${this.getAttr(HeroDeriveAttributes.CRI).getVal()}],
HIT:[${this.getAttr(HeroDeriveAttributes.HIT).getVal()}],
FLEE:[${this.getAttr(HeroDeriveAttributes.FLEE).getVal()}],
ATK:[${this.getAttr(HeroDeriveAttributes.ATK).getVal()}],
DEF:[${this.getAttr(HeroDeriveAttribute | conditional_block |
hero.js | /**
* Created by kaicui on 17/8/22.
* 表示战场上的一个英雄
*
* 注意:
* 1、hero被添加到一个team之后,可能会被注入一些属性,比如team属性
*/
const oop = require("local-libs").oop;
const event = require("local-libs").event;
const Levelable = require("../../level/levelable");
const {SkillType} = require("../../skill/skill");
const {getRaceCamp} = require("./camp");
const {getJob} = require("./job");
const {Star} = require("./star");
const {injectHeroAttributes,HeroBaseAttributes,HeroDeriveAttributes,HeroOtherAttributes} = require("./attributeRule");
const statusEnum = require("../../effect/implement/statusEnum");
const logger = require('../../../log/logger');
const HeroEvents=require("../lifeCycle").HeroEvents;
const SP_ADD_PER_EFFECT =20 ;//每次普通攻击之后,增加10点sp
let Hero = oop.defineClass({
super:Levelable,
constructor:function({
levelCur, //number,或者Integer 对象,表示当前等级
levelMax, //number,或者Integer 对象,表示最高等级
exp, // number,表示当前获得的经验值
expTableName="small_03", // String,表示经验值增长曲线名称
},{
id,//英雄id
name,//英雄名称
raceCampCode, //种族&阵营编号
jobCode, //职业编号
starLevel,//number,星数
skills,//数组,英雄具有的技能列表。0是普通攻击技能,1是主动技能
context,//hero所处的上下文,hero身上的效果,依靠这种上下文来获取外部世界的事件
rawAttributes, //属性对象,包含需要持久化的所有角色属性数据。如:str,agi,vit,int,dex,luk,hp,sp等
}){
var self = this;
self.id = id;
self.name = name;
self.context = context;//hero所处的上下文,hero身上的效果,依靠这种上下文来获取外部世界的事件
//6项基本属性的初始化,以及对应的附加属性初始化
injectHeroAttributes(self,rawAttributes);
self.camp = getRaceCamp(raceCampCode);//种族&阵营
self.job = getJob(jobCode);//职业
self.star = new Star(starLevel);//星级
self.skills = skills;
self.isDead = self.getAttr(HeroOtherAttributes.HP).getVal()===0;//是否已死亡
if(self.isDead){
//死亡时,能量丢失(设置未0)
self.getAttr(HeroOtherAttributes.SP).updateAddPercent(-1);
}
//检测hp变化,如果到0,且当前没有重生效果,会发射死亡事件
self.on("attrChange",(attr,total,raw,modify,val,oldTotal)=>{
if(attr.name===HeroOtherAttributes.HP){
if(total<=0){
self.emit(HeroEvents.BEFORE_HERO_DIE);
logger.debug(`英雄死亡:[${self}]`);
//设置死亡标记
self.isDead = true;
//死亡时,能量丢失(设置未0)
//派发mutation给target
self.takeMutation({
from:self,
mutation:{
[HeroOtherAttributes.SP]:0-self.getAttr(HeroOtherAttributes.SP).getVal()
},
});
// self.getAttr(HeroOtherAttributes.SP).updateAddPercent(-1);
self.emit(HeroEvents.AFTER_HERO_DIE);
}else{
if(oldTotal<=0){
logger.debug(`英雄复活:[${self}]`);
self.emit(HeroEvents.BEFORE_HERO_REBORN);
//设置死亡标记
self.isDead = false;
self.emit(HeroEvents.AFTER_HERO_REBORN);
}
}
}
});
// self.vital = vital;//精气点数
},
prototype:{
/**
* 将对象内容完全转化为不附带循环引用的纯对象
* @param serializeLevel:决定序列化英雄信息的数量和程度
*/
toJSONObject:function ({serializeLevel}) {
var self = this;
if(serializeLevel === 1){
return {
id:self.id,
name:self.name,
level:self.levelCur.getVal(),
camp:self.camp,
job:self.job,
star:self.star.level,
attrMap:{
[HeroOtherAttributes.HP]:self.getAttr(HeroOtherAttributes.HP).getVal(),
[HeroDeriveAttributes.HP_MAX]:self.getAttr(HeroDeriveAttributes.HP_MAX).getVal(),
[HeroOtherAttributes.SP]:self.getAttr(HeroOtherAttributes.SP).getVal(),
[HeroOtherAttributes.SP_MAX]:self.getAttr(HeroOtherAttributes.SP_MAX).getVal()
}
}
}
},
/**
* 显示英雄详细信息
* @param detail
* @returns {string}
*/
toString:function (detail) {
if(!detail){
return `${this.name}:[${this.id}],hp:[${this.getAttr(HeroOtherAttributes.HP).getVal()}/${this.getAttr(HeroDeriveAttributes.HP_MAX).getVal()}],sp:[${this.getAttr(HeroOtherAttributes.SP).getVal()}/${this.getAttr(HeroOtherAttributes.SP_MAX).getVal()}],SPD:[${this.getAttr(HeroDeriveAttributes.SPD).getVal()}]`
}else{
return `
${this.name}:[${this.id}]:\r\n
hp:[${this.getAttr(HeroOtherAttributes.HP).getVal()}/${this.getAttr(HeroDeriveAttributes.HP_MAX).getVal()}],
sp:[${this.getAttr(HeroOtherAttributes.SP).getVal()}/${this.getAttr(HeroOtherAttributes.SP_MAX).getVal()}],
SPD:[${this.getAttr(HeroDeriveAttributes.SPD).getVal()}],
CRI:[${this.getAttr(HeroDeriveAttributes.CRI).getVal()}],
HIT:[${this.getAttr(HeroDeriveAttributes.HIT).getVal()}],
FLEE:[${this.getAttr(HeroDeriveAttributes.FLEE).getVal()}],
ATK:[${this.getAttr(HeroDeriveAttributes.ATK).getVal()}],
DEF:[${this.getAttr(HeroDeriveAttributes.DEF).getVal()}],
M_ATK:[${this.getAttr(HeroDeriveAttributes.M_ATK).getVal()}],
M_DEF:[${this.getAttr(HeroDeriveAttributes.M_DEF).getVal()}],
`
}
},
/**
* 将英雄绑定到玩家对象,开始英雄生命周期
* @param player
*/
initOnPlayer:function (player) {
var self = this;
self.context = player;
self.skills&&self.skills.forEach((sk)=>{
sk.holder = self;
if(sk.type === SkillType.PASSIVE){
logger.debug(`准备释放[${self.toString()}]的被动技能,context=${self.context}`);
sk.release(self.context); //立刻释放被动技能
logger.debug(`完成释放[${self.toString()}]的被动技能,context=${self.context}`);
}
});
return self;
},
/**
* 判断自身是否完全死透了(hp<0 并且没有被标记重生效果)
* @returns {boolean}
*/
isCompleteDead:function () {
var self = this;
if(self.getAttr(HeroOtherAttributes.HP).getVal()>0 || self.hasEffect("reborn")){
return false;
}else{
return true;
}
},
/** todo:拷贝出一个自身的副本。这副本所处的context可以是一个全新的上下文
* 副本:包含的基本属性、装备与本体是关联的,含部分自定义信息,包括:
* hp值,怒气,效果列表等
*/
copy:function (newContext) {
var self = this;
},
/**
* 当前是否可以放大招
* @returns {boolean}
*/
canBigSkill:function () {
var self = this;
let can = self.getAttr(HeroOtherAttributes.SP).getVal()>=self.getAttr(HeroOtherAttributes.SP_MAX).getVal();
//判断自身的效果列表里,是否存在封印效果
let statusEffects = self.getEffect(function (ef) {
return ef.name ==='Status' && ef.params.stopSkill===true
});
if(statusEffects && statusEffects.length > 0){
can = false;
}
return can;
},
/**
* 角色是否可以进行行动
* @returns {boolean}
*/
canAction:function () {
var self = this;
//默认活着的是可以行动的
let can = !self.isDead;
//判断自身的效果列表里,是否还有眩晕、冰冻效果
let statusEffects = self.getEffect(function (ef) {
return ef.name ==='Status' && ef.params.stopAction===true
});
if(statusEffects && statusEffects.length > 0){
can = false;
}
return can;
},
//hero触发一个主动行为,比如普通攻击,或者技能攻击
startAction:function () {
var self = this;
let skillIndex = 0; //默认是普通攻击(0技能)
/*
如果满足大招要求(怒气、无封印),则触发1技能
*/
// if(self.getAttr(HeroOtherAttributes.SP).getVal()>=self.getAttr(HeroOtherAttributes.SP_MAX).getVal()){
if(self.canBigSkill()){
logger.debug(`[${self}],准备发动主动技能!`)
skillIndex = 1;
}
//释放技能(注意普通攻击也被当做技能处理,固定为0技能)
self.releaseSkill(skillIndex);
//注意,释放1技能之后,怒气值清空。否则增加怒气
if(skillIndex===0){
//获得怒气
self.takeMutation({
from:self,
mutation:{
[HeroOtherAttributes.SP]:SP_ADD_PER_EFFECT
}
});
}else{
//怒气清空
self.takeMutation({
from:self,
mutation:{
[HeroOtherAttributes.SP]: 0-self.getAttr(HeroOtherAttributes.SP).getVal()
}
});
}
},
/**
* 让英雄立刻释放某个技能
*
* 注意:这个方法不产生
* @param index
*/
releaseSkill:function (index) {
var self = this;
let skillToRelease = self.skills[index];
self.emit(HeroEvents.BEFORE_ACTION,skillToRelease);
skillToRelease.release(self.context); //释放技能 | /**
* 接收一个属性集合修改请求(注意,这里的修改,是一次性修改,而不是modify)
* @param from:修改来源effect对象
* @param remark:本次改动的备注,用于配合数据展示一些备注信息
* @param mutation:key:attrName value:changeNum (+代表增加 -代表减少)
*/
takeMutation:function ({from,mutation,remark}) {
var self = this;
logger.debug(`[${self.toString()}]准备接收mutation:${JSON.stringify(mutation)}`);
self.emit(HeroEvents.BEFORE_MUTATION,from,mutation,remark);
let mutationResult = {};//key:attrName value:value after mutation
//对每一个要修改的属性,进行修改
for(var attName in mutation){
self.getAttr(attName).updateAdd(mutation[attName]);
mutationResult[attName]=self.getAttr(attName).getVal();
}
self.emit(HeroEvents.AFTER_MUTATION,from,mutation,remark,mutationResult);
},
/**
* 加入队伍
* @param team
* @returns {Hero}
*/
joinTeam:function (team) {
this.team = team;
return this;
}
}
});
module.exports = {Hero,HeroEvents}; |
self.emit(HeroEvents.AFTER_ACTION,skillToRelease);
}, | random_line_split |
test_autocomplete_widget.py | from django import forms
from django.contrib.admin.widgets import AutocompleteSelect
from django.forms import ModelChoiceField
from django.test import TestCase, override_settings
from django.utils import translation
from .models import Album, Band
class AlbumForm(forms.ModelForm):
class Meta:
model = Album
fields = ['band', 'featuring']
widgets = {
'band': AutocompleteSelect(
Album._meta.get_field('band').remote_field,
attrs={'class': 'my-class'},
),
'featuring': AutocompleteSelect(
Album._meta.get_field('featuring').remote_field,
)
}
class NotRequiredBandForm(forms.Form):
band = ModelChoiceField(
queryset=Album.objects.all(),
widget=AutocompleteSelect(Album._meta.get_field('band').remote_field),
required=False,
)
class RequiredBandForm(forms.Form):
band = ModelChoiceField(
queryset=Album.objects.all(),
widget=AutocompleteSelect(Album._meta.get_field('band').remote_field),
required=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AutocompleteMixinTests(TestCase):
empty_option = '<option value=""></option>'
maxDiff = 1000
def test_build_attrs(self):
form = AlbumForm()
attrs = form['band'].field.widget.get_context(name='my_field', value=None, attrs={})['widget']['attrs']
self.assertEqual(attrs, {
'class': 'my-classadmin-autocomplete',
'data-ajax--cache': 'true',
'data-ajax--type': 'GET',
'data-ajax--url': '/admin_widgets/band/autocomplete/',
'data-theme': 'admin-autocomplete',
'data-allow-clear': 'false',
'data-placeholder': ''
})
def test_build_attrs_not_required_field(self):
form = NotRequiredBandForm()
attrs = form['band'].field.widget.build_attrs({})
self.assertJSONEqual(attrs['data-allow-clear'], True)
def test_build_attrs_required_field(self):
form = RequiredBandForm()
attrs = form['band'].field.widget.build_attrs({})
self.assertJSONEqual(attrs['data-allow-clear'], False)
def test_get_url(self):
rel = Album._meta.get_field('band').remote_field
w = AutocompleteSelect(rel)
url = w.get_url()
self.assertEqual(url, '/admin_widgets/band/autocomplete/')
def test_render_options(self):
beatles = Band.objects.create(name='The Beatles', style='rock')
who = Band.objects.create(name='The Who', style='rock')
# With 'band', a ForeignKey.
form = AlbumForm(initial={'band': beatles.pk})
output = form.as_table()
selected_option = '<option value="%s" selected>The Beatles</option>' % beatles.pk
option = '<option value="%s">The Who</option>' % who.pk
self.assertIn(selected_option, output)
self.assertNotIn(option, output)
# With 'featuring', a ManyToManyField.
form = AlbumForm(initial={'featuring': [beatles.pk, who.pk]})
output = form.as_table()
selected_option = '<option value="%s" selected>The Beatles</option>' % beatles.pk
option = '<option value="%s" selected>The Who</option>' % who.pk
self.assertIn(selected_option, output)
self.assertIn(option, output)
def test_render_options_required_field(self):
|
def test_render_options_not_required_field(self):
"""Empty option isn't present if the field isn't required."""
form = RequiredBandForm()
output = form.as_table()
self.assertNotIn(self.empty_option, output)
def test_media(self):
rel = Album._meta.get_field('band').remote_field
base_files = (
'admin/js/vendor/jquery/jquery.min.js',
'admin/js/vendor/select2/select2.full.min.js',
# Language file is inserted here.
'admin/js/jquery.init.js',
'admin/js/autocomplete.js',
)
languages = (
('de', 'de'),
# Language with code 00 does not exist.
('00', None),
# Language files are case sensitive.
('sr-cyrl', 'sr-Cyrl'),
('zh-cn', 'zh-CN'),
)
for lang, select_lang in languages:
with self.subTest(lang=lang):
if select_lang:
expected_files = (
base_files[:2] +
(('admin/js/vendor/select2/i18n/%s.js' % select_lang),) +
base_files[2:]
)
else:
expected_files = base_files
with translation.override(lang):
self.assertEqual(AutocompleteSelect(rel).media._js, expected_files)
| """Empty option is present if the field isn't required."""
form = NotRequiredBandForm()
output = form.as_table()
self.assertIn(self.empty_option, output) | identifier_body |
test_autocomplete_widget.py | from django import forms
from django.contrib.admin.widgets import AutocompleteSelect
from django.forms import ModelChoiceField
from django.test import TestCase, override_settings
from django.utils import translation
from .models import Album, Band
class AlbumForm(forms.ModelForm):
class Meta:
model = Album
fields = ['band', 'featuring']
widgets = {
'band': AutocompleteSelect(
Album._meta.get_field('band').remote_field,
attrs={'class': 'my-class'},
),
'featuring': AutocompleteSelect(
Album._meta.get_field('featuring').remote_field,
)
}
class NotRequiredBandForm(forms.Form):
band = ModelChoiceField(
queryset=Album.objects.all(),
widget=AutocompleteSelect(Album._meta.get_field('band').remote_field),
required=False,
)
class RequiredBandForm(forms.Form):
band = ModelChoiceField(
queryset=Album.objects.all(),
widget=AutocompleteSelect(Album._meta.get_field('band').remote_field),
required=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AutocompleteMixinTests(TestCase):
empty_option = '<option value=""></option>'
maxDiff = 1000
def test_build_attrs(self):
form = AlbumForm()
attrs = form['band'].field.widget.get_context(name='my_field', value=None, attrs={})['widget']['attrs']
self.assertEqual(attrs, {
'class': 'my-classadmin-autocomplete',
'data-ajax--cache': 'true',
'data-ajax--type': 'GET',
'data-ajax--url': '/admin_widgets/band/autocomplete/',
'data-theme': 'admin-autocomplete',
'data-allow-clear': 'false',
'data-placeholder': ''
})
def | (self):
form = NotRequiredBandForm()
attrs = form['band'].field.widget.build_attrs({})
self.assertJSONEqual(attrs['data-allow-clear'], True)
def test_build_attrs_required_field(self):
form = RequiredBandForm()
attrs = form['band'].field.widget.build_attrs({})
self.assertJSONEqual(attrs['data-allow-clear'], False)
def test_get_url(self):
rel = Album._meta.get_field('band').remote_field
w = AutocompleteSelect(rel)
url = w.get_url()
self.assertEqual(url, '/admin_widgets/band/autocomplete/')
def test_render_options(self):
beatles = Band.objects.create(name='The Beatles', style='rock')
who = Band.objects.create(name='The Who', style='rock')
# With 'band', a ForeignKey.
form = AlbumForm(initial={'band': beatles.pk})
output = form.as_table()
selected_option = '<option value="%s" selected>The Beatles</option>' % beatles.pk
option = '<option value="%s">The Who</option>' % who.pk
self.assertIn(selected_option, output)
self.assertNotIn(option, output)
# With 'featuring', a ManyToManyField.
form = AlbumForm(initial={'featuring': [beatles.pk, who.pk]})
output = form.as_table()
selected_option = '<option value="%s" selected>The Beatles</option>' % beatles.pk
option = '<option value="%s" selected>The Who</option>' % who.pk
self.assertIn(selected_option, output)
self.assertIn(option, output)
def test_render_options_required_field(self):
"""Empty option is present if the field isn't required."""
form = NotRequiredBandForm()
output = form.as_table()
self.assertIn(self.empty_option, output)
def test_render_options_not_required_field(self):
"""Empty option isn't present if the field isn't required."""
form = RequiredBandForm()
output = form.as_table()
self.assertNotIn(self.empty_option, output)
def test_media(self):
rel = Album._meta.get_field('band').remote_field
base_files = (
'admin/js/vendor/jquery/jquery.min.js',
'admin/js/vendor/select2/select2.full.min.js',
# Language file is inserted here.
'admin/js/jquery.init.js',
'admin/js/autocomplete.js',
)
languages = (
('de', 'de'),
# Language with code 00 does not exist.
('00', None),
# Language files are case sensitive.
('sr-cyrl', 'sr-Cyrl'),
('zh-cn', 'zh-CN'),
)
for lang, select_lang in languages:
with self.subTest(lang=lang):
if select_lang:
expected_files = (
base_files[:2] +
(('admin/js/vendor/select2/i18n/%s.js' % select_lang),) +
base_files[2:]
)
else:
expected_files = base_files
with translation.override(lang):
self.assertEqual(AutocompleteSelect(rel).media._js, expected_files)
| test_build_attrs_not_required_field | identifier_name |
test_autocomplete_widget.py | from django import forms
from django.contrib.admin.widgets import AutocompleteSelect
from django.forms import ModelChoiceField
from django.test import TestCase, override_settings
from django.utils import translation
from .models import Album, Band
class AlbumForm(forms.ModelForm):
class Meta:
model = Album
fields = ['band', 'featuring']
widgets = {
'band': AutocompleteSelect(
Album._meta.get_field('band').remote_field,
attrs={'class': 'my-class'},
),
'featuring': AutocompleteSelect(
Album._meta.get_field('featuring').remote_field,
)
}
class NotRequiredBandForm(forms.Form):
band = ModelChoiceField(
queryset=Album.objects.all(),
widget=AutocompleteSelect(Album._meta.get_field('band').remote_field),
required=False,
)
class RequiredBandForm(forms.Form):
band = ModelChoiceField(
queryset=Album.objects.all(),
widget=AutocompleteSelect(Album._meta.get_field('band').remote_field),
required=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AutocompleteMixinTests(TestCase):
empty_option = '<option value=""></option>'
maxDiff = 1000
def test_build_attrs(self):
form = AlbumForm()
attrs = form['band'].field.widget.get_context(name='my_field', value=None, attrs={})['widget']['attrs']
self.assertEqual(attrs, {
'class': 'my-classadmin-autocomplete',
'data-ajax--cache': 'true',
'data-ajax--type': 'GET',
'data-ajax--url': '/admin_widgets/band/autocomplete/',
'data-theme': 'admin-autocomplete',
'data-allow-clear': 'false',
'data-placeholder': ''
})
def test_build_attrs_not_required_field(self):
form = NotRequiredBandForm()
attrs = form['band'].field.widget.build_attrs({})
self.assertJSONEqual(attrs['data-allow-clear'], True)
def test_build_attrs_required_field(self):
form = RequiredBandForm()
attrs = form['band'].field.widget.build_attrs({})
self.assertJSONEqual(attrs['data-allow-clear'], False)
def test_get_url(self):
rel = Album._meta.get_field('band').remote_field
w = AutocompleteSelect(rel)
url = w.get_url()
self.assertEqual(url, '/admin_widgets/band/autocomplete/')
def test_render_options(self):
beatles = Band.objects.create(name='The Beatles', style='rock')
who = Band.objects.create(name='The Who', style='rock')
# With 'band', a ForeignKey.
form = AlbumForm(initial={'band': beatles.pk})
output = form.as_table()
selected_option = '<option value="%s" selected>The Beatles</option>' % beatles.pk
option = '<option value="%s">The Who</option>' % who.pk
self.assertIn(selected_option, output)
self.assertNotIn(option, output)
# With 'featuring', a ManyToManyField.
form = AlbumForm(initial={'featuring': [beatles.pk, who.pk]})
output = form.as_table()
selected_option = '<option value="%s" selected>The Beatles</option>' % beatles.pk
option = '<option value="%s" selected>The Who</option>' % who.pk
self.assertIn(selected_option, output)
self.assertIn(option, output)
def test_render_options_required_field(self):
"""Empty option is present if the field isn't required."""
form = NotRequiredBandForm()
output = form.as_table()
self.assertIn(self.empty_option, output)
def test_render_options_not_required_field(self):
"""Empty option isn't present if the field isn't required."""
form = RequiredBandForm()
output = form.as_table()
self.assertNotIn(self.empty_option, output)
def test_media(self):
rel = Album._meta.get_field('band').remote_field
base_files = (
'admin/js/vendor/jquery/jquery.min.js',
'admin/js/vendor/select2/select2.full.min.js',
# Language file is inserted here.
'admin/js/jquery.init.js',
'admin/js/autocomplete.js',
)
languages = (
('de', 'de'),
# Language with code 00 does not exist.
('00', None),
# Language files are case sensitive.
('sr-cyrl', 'sr-Cyrl'),
('zh-cn', 'zh-CN'),
)
for lang, select_lang in languages:
with self.subTest(lang=lang):
if select_lang:
expected_files = (
base_files[:2] +
(('admin/js/vendor/select2/i18n/%s.js' % select_lang),) +
base_files[2:]
)
else:
|
with translation.override(lang):
self.assertEqual(AutocompleteSelect(rel).media._js, expected_files)
| expected_files = base_files | conditional_block |
test_autocomplete_widget.py | from django import forms
from django.contrib.admin.widgets import AutocompleteSelect
from django.forms import ModelChoiceField
from django.test import TestCase, override_settings
from django.utils import translation
from .models import Album, Band
class AlbumForm(forms.ModelForm):
class Meta:
model = Album
fields = ['band', 'featuring']
widgets = {
'band': AutocompleteSelect(
Album._meta.get_field('band').remote_field,
attrs={'class': 'my-class'},
),
'featuring': AutocompleteSelect(
Album._meta.get_field('featuring').remote_field,
)
}
class NotRequiredBandForm(forms.Form):
band = ModelChoiceField(
queryset=Album.objects.all(),
widget=AutocompleteSelect(Album._meta.get_field('band').remote_field),
required=False,
)
class RequiredBandForm(forms.Form):
band = ModelChoiceField(
queryset=Album.objects.all(),
widget=AutocompleteSelect(Album._meta.get_field('band').remote_field),
required=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AutocompleteMixinTests(TestCase):
empty_option = '<option value=""></option>'
maxDiff = 1000
def test_build_attrs(self):
form = AlbumForm()
attrs = form['band'].field.widget.get_context(name='my_field', value=None, attrs={})['widget']['attrs']
self.assertEqual(attrs, {
'class': 'my-classadmin-autocomplete',
'data-ajax--cache': 'true',
'data-ajax--type': 'GET',
'data-ajax--url': '/admin_widgets/band/autocomplete/',
'data-theme': 'admin-autocomplete',
'data-allow-clear': 'false',
'data-placeholder': ''
})
def test_build_attrs_not_required_field(self): | form = RequiredBandForm()
attrs = form['band'].field.widget.build_attrs({})
self.assertJSONEqual(attrs['data-allow-clear'], False)
def test_get_url(self):
rel = Album._meta.get_field('band').remote_field
w = AutocompleteSelect(rel)
url = w.get_url()
self.assertEqual(url, '/admin_widgets/band/autocomplete/')
def test_render_options(self):
beatles = Band.objects.create(name='The Beatles', style='rock')
who = Band.objects.create(name='The Who', style='rock')
# With 'band', a ForeignKey.
form = AlbumForm(initial={'band': beatles.pk})
output = form.as_table()
selected_option = '<option value="%s" selected>The Beatles</option>' % beatles.pk
option = '<option value="%s">The Who</option>' % who.pk
self.assertIn(selected_option, output)
self.assertNotIn(option, output)
# With 'featuring', a ManyToManyField.
form = AlbumForm(initial={'featuring': [beatles.pk, who.pk]})
output = form.as_table()
selected_option = '<option value="%s" selected>The Beatles</option>' % beatles.pk
option = '<option value="%s" selected>The Who</option>' % who.pk
self.assertIn(selected_option, output)
self.assertIn(option, output)
def test_render_options_required_field(self):
"""Empty option is present if the field isn't required."""
form = NotRequiredBandForm()
output = form.as_table()
self.assertIn(self.empty_option, output)
def test_render_options_not_required_field(self):
"""Empty option isn't present if the field isn't required."""
form = RequiredBandForm()
output = form.as_table()
self.assertNotIn(self.empty_option, output)
def test_media(self):
rel = Album._meta.get_field('band').remote_field
base_files = (
'admin/js/vendor/jquery/jquery.min.js',
'admin/js/vendor/select2/select2.full.min.js',
# Language file is inserted here.
'admin/js/jquery.init.js',
'admin/js/autocomplete.js',
)
languages = (
('de', 'de'),
# Language with code 00 does not exist.
('00', None),
# Language files are case sensitive.
('sr-cyrl', 'sr-Cyrl'),
('zh-cn', 'zh-CN'),
)
for lang, select_lang in languages:
with self.subTest(lang=lang):
if select_lang:
expected_files = (
base_files[:2] +
(('admin/js/vendor/select2/i18n/%s.js' % select_lang),) +
base_files[2:]
)
else:
expected_files = base_files
with translation.override(lang):
self.assertEqual(AutocompleteSelect(rel).media._js, expected_files) | form = NotRequiredBandForm()
attrs = form['band'].field.widget.build_attrs({})
self.assertJSONEqual(attrs['data-allow-clear'], True)
def test_build_attrs_required_field(self): | random_line_split |
testimonial-list.component.ts | import {Component, OnInit} from '@angular/core';
import {TestimonialService} from "./testimonial.service";
import{TestimonialModel, TestimonialResponse} from "./testimonial.model";
@Component({
selector: 'testimonial-list',
templateUrl: './testimonial-list.html'
})
export class TestimonialComponent implements OnInit { |
objListResponse:TestimonialResponse;
error:any;
showForm:boolean = false;
testimonialId:string;
/* Pagination */
perPage:number = 10;
currentPage:number = 1;
totalPage:number = 1;
first:number = 0;
bindSort:boolean = false;
preIndex:number = 0;
/* End Pagination */
ngOnInit() {
this.getTestimonialList();
}
constructor(private _objService:TestimonialService) {
}
getTestimonialList() {
this._objService.getTestimonialList(this.perPage, this.currentPage)
.subscribe(objRes =>this.bindList(objRes),
error => this.errorMessage(error));
}
errorMessage(objResponse:any) {
swal("Alert !", objResponse.message, "info");
}
bindList(objRes:TestimonialResponse) {
this.objListResponse = objRes;
this.preIndex = (this.perPage * (this.currentPage - 1));
if (objRes.dataList.length > 0) {
let totalPage = objRes.totalItems / this.perPage;
this.totalPage = totalPage > 1 ? Math.ceil(totalPage) : 1;
if (!this.bindSort) {
this.bindSort = true;
this.sortTable();
}
else
jQuery("table").trigger("update", [true]);
}
}
sortTable() {
setTimeout(()=> {
jQuery('.tablesorter').tablesorter({
headers: {
3: {sorter: false},
4: {sorter: false}
}
});
}, 50);
}
edit(id:string) {
this.showForm = true;
this.testimonialId = id;
}
addTestimonial() {
this.showForm = true;
this.testimonialId = null;
}
delete(id:string) {
swal({
title: "Are you sure?",
text: "You will not be able to recover this Testimonial !",
type: "warning",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "Yes, delete it!",
closeOnConfirm: false
},
()=> {
let objSlider:TestimonialModel = new TestimonialModel();
objSlider._id = id;
objSlider.deleted = true;
this._objService.deleteTestimonial(objSlider)
.subscribe(res=> {
this.getTestimonialList();
swal("Deleted!", res.message, "success");
},
error=> {
swal("Alert!", error.message, "info");
});
});
}
showList(arg) {
if (!arg) // is not Canceled
{
this.getTestimonialList();
}
this.showForm = false;
this.sortTable();
}
pageChanged(event) {
this.perPage = event.rows;
this.currentPage = (Math.floor(event.first / event.rows)) + 1;
this.first = event.first;
if (event.first == 0)
this.first = 1;
this.getTestimonialList();
}
} | random_line_split |
|
testimonial-list.component.ts | import {Component, OnInit} from '@angular/core';
import {TestimonialService} from "./testimonial.service";
import{TestimonialModel, TestimonialResponse} from "./testimonial.model";
@Component({
selector: 'testimonial-list',
templateUrl: './testimonial-list.html'
})
export class TestimonialComponent implements OnInit {
objListResponse:TestimonialResponse;
error:any;
showForm:boolean = false;
testimonialId:string;
/* Pagination */
perPage:number = 10;
currentPage:number = 1;
totalPage:number = 1;
first:number = 0;
bindSort:boolean = false;
preIndex:number = 0;
/* End Pagination */
ngOnInit() {
this.getTestimonialList();
}
constructor(private _objService:TestimonialService) {
}
getTestimonialList() {
this._objService.getTestimonialList(this.perPage, this.currentPage)
.subscribe(objRes =>this.bindList(objRes),
error => this.errorMessage(error));
}
errorMessage(objResponse:any) |
bindList(objRes:TestimonialResponse) {
this.objListResponse = objRes;
this.preIndex = (this.perPage * (this.currentPage - 1));
if (objRes.dataList.length > 0) {
let totalPage = objRes.totalItems / this.perPage;
this.totalPage = totalPage > 1 ? Math.ceil(totalPage) : 1;
if (!this.bindSort) {
this.bindSort = true;
this.sortTable();
}
else
jQuery("table").trigger("update", [true]);
}
}
sortTable() {
setTimeout(()=> {
jQuery('.tablesorter').tablesorter({
headers: {
3: {sorter: false},
4: {sorter: false}
}
});
}, 50);
}
edit(id:string) {
this.showForm = true;
this.testimonialId = id;
}
addTestimonial() {
this.showForm = true;
this.testimonialId = null;
}
delete(id:string) {
swal({
title: "Are you sure?",
text: "You will not be able to recover this Testimonial !",
type: "warning",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "Yes, delete it!",
closeOnConfirm: false
},
()=> {
let objSlider:TestimonialModel = new TestimonialModel();
objSlider._id = id;
objSlider.deleted = true;
this._objService.deleteTestimonial(objSlider)
.subscribe(res=> {
this.getTestimonialList();
swal("Deleted!", res.message, "success");
},
error=> {
swal("Alert!", error.message, "info");
});
});
}
showList(arg) {
if (!arg) // is not Canceled
{
this.getTestimonialList();
}
this.showForm = false;
this.sortTable();
}
pageChanged(event) {
this.perPage = event.rows;
this.currentPage = (Math.floor(event.first / event.rows)) + 1;
this.first = event.first;
if (event.first == 0)
this.first = 1;
this.getTestimonialList();
}
}
| {
swal("Alert !", objResponse.message, "info");
} | identifier_body |
testimonial-list.component.ts | import {Component, OnInit} from '@angular/core';
import {TestimonialService} from "./testimonial.service";
import{TestimonialModel, TestimonialResponse} from "./testimonial.model";
@Component({
selector: 'testimonial-list',
templateUrl: './testimonial-list.html'
})
export class TestimonialComponent implements OnInit {
objListResponse:TestimonialResponse;
error:any;
showForm:boolean = false;
testimonialId:string;
/* Pagination */
perPage:number = 10;
currentPage:number = 1;
totalPage:number = 1;
first:number = 0;
bindSort:boolean = false;
preIndex:number = 0;
/* End Pagination */
ngOnInit() {
this.getTestimonialList();
}
constructor(private _objService:TestimonialService) {
}
getTestimonialList() {
this._objService.getTestimonialList(this.perPage, this.currentPage)
.subscribe(objRes =>this.bindList(objRes),
error => this.errorMessage(error));
}
| (objResponse:any) {
swal("Alert !", objResponse.message, "info");
}
bindList(objRes:TestimonialResponse) {
this.objListResponse = objRes;
this.preIndex = (this.perPage * (this.currentPage - 1));
if (objRes.dataList.length > 0) {
let totalPage = objRes.totalItems / this.perPage;
this.totalPage = totalPage > 1 ? Math.ceil(totalPage) : 1;
if (!this.bindSort) {
this.bindSort = true;
this.sortTable();
}
else
jQuery("table").trigger("update", [true]);
}
}
sortTable() {
setTimeout(()=> {
jQuery('.tablesorter').tablesorter({
headers: {
3: {sorter: false},
4: {sorter: false}
}
});
}, 50);
}
edit(id:string) {
this.showForm = true;
this.testimonialId = id;
}
addTestimonial() {
this.showForm = true;
this.testimonialId = null;
}
delete(id:string) {
swal({
title: "Are you sure?",
text: "You will not be able to recover this Testimonial !",
type: "warning",
showCancelButton: true,
confirmButtonColor: "#DD6B55",
confirmButtonText: "Yes, delete it!",
closeOnConfirm: false
},
()=> {
let objSlider:TestimonialModel = new TestimonialModel();
objSlider._id = id;
objSlider.deleted = true;
this._objService.deleteTestimonial(objSlider)
.subscribe(res=> {
this.getTestimonialList();
swal("Deleted!", res.message, "success");
},
error=> {
swal("Alert!", error.message, "info");
});
});
}
showList(arg) {
if (!arg) // is not Canceled
{
this.getTestimonialList();
}
this.showForm = false;
this.sortTable();
}
pageChanged(event) {
this.perPage = event.rows;
this.currentPage = (Math.floor(event.first / event.rows)) + 1;
this.first = event.first;
if (event.first == 0)
this.first = 1;
this.getTestimonialList();
}
}
| errorMessage | identifier_name |
dir_info.rs | use std::env;
use std::path::Path;
struct DirInfo {
size: u64,
depth: u32
}
const EMPTY: DirInfo = DirInfo {size: 0, depth: 0};
fn main() {
let arg = env::args_os().nth(1).expect("Please, provide a file as argument");
let path = Path::new(&arg);
if path.is_dir() {
let info = dir_info(path);
println!("size:\t{} bytes\ndepth:\t{} ", info.size, info.depth - 1);
} else {
println!("{} is not a directory", arg.to_str().expect("Fatal"));
}
}
fn update_info(info: DirInfo, path: &Path) -> DirInfo {
let child_info = dir_entry_info(path);
let new_size = info.size + child_info.size;
let new_depth = std::cmp::max(info.depth, 1 + child_info.depth);
DirInfo {size: new_size, depth: new_depth}
}
fn dir_entry_info(path: &Path) -> DirInfo {
if path.is_file() {
file_info(path)
} else if path.is_dir() {
dir_info(path)
} else {
EMPTY
}
}
fn file_info(path: &Path) -> DirInfo {
let metadata = path.metadata().ok().expect("Cannot get file metadata");
DirInfo {size: metadata.len(), depth: 0}
}
fn dir_info(path: &Path) -> DirInfo | {
match std::fs::read_dir(path) {
Err(_) => EMPTY,
Ok(entries) =>
entries.fold(EMPTY,
|info, entry| update_info(info, &entry.unwrap().path()))
}
} | identifier_body |
|
dir_info.rs | use std::env;
use std::path::Path;
struct DirInfo {
size: u64,
depth: u32
}
const EMPTY: DirInfo = DirInfo {size: 0, depth: 0};
fn main() {
let arg = env::args_os().nth(1).expect("Please, provide a file as argument");
let path = Path::new(&arg);
if path.is_dir() {
let info = dir_info(path);
println!("size:\t{} bytes\ndepth:\t{} ", info.size, info.depth - 1);
} else {
println!("{} is not a directory", arg.to_str().expect("Fatal"));
}
}
fn update_info(info: DirInfo, path: &Path) -> DirInfo {
let child_info = dir_entry_info(path);
let new_size = info.size + child_info.size;
let new_depth = std::cmp::max(info.depth, 1 + child_info.depth);
DirInfo {size: new_size, depth: new_depth}
}
fn | (path: &Path) -> DirInfo {
if path.is_file() {
file_info(path)
} else if path.is_dir() {
dir_info(path)
} else {
EMPTY
}
}
fn file_info(path: &Path) -> DirInfo {
let metadata = path.metadata().ok().expect("Cannot get file metadata");
DirInfo {size: metadata.len(), depth: 0}
}
fn dir_info(path: &Path) -> DirInfo {
match std::fs::read_dir(path) {
Err(_) => EMPTY,
Ok(entries) =>
entries.fold(EMPTY,
|info, entry| update_info(info, &entry.unwrap().path()))
}
}
| dir_entry_info | identifier_name |
dir_info.rs | use std::env;
use std::path::Path;
struct DirInfo {
size: u64,
depth: u32
}
const EMPTY: DirInfo = DirInfo {size: 0, depth: 0};
fn main() {
let arg = env::args_os().nth(1).expect("Please, provide a file as argument");
let path = Path::new(&arg);
if path.is_dir() {
let info = dir_info(path);
println!("size:\t{} bytes\ndepth:\t{} ", info.size, info.depth - 1);
} else {
println!("{} is not a directory", arg.to_str().expect("Fatal"));
}
}
fn update_info(info: DirInfo, path: &Path) -> DirInfo {
let child_info = dir_entry_info(path);
let new_size = info.size + child_info.size;
let new_depth = std::cmp::max(info.depth, 1 + child_info.depth);
DirInfo {size: new_size, depth: new_depth}
}
fn dir_entry_info(path: &Path) -> DirInfo {
if path.is_file() | else if path.is_dir() {
dir_info(path)
} else {
EMPTY
}
}
fn file_info(path: &Path) -> DirInfo {
let metadata = path.metadata().ok().expect("Cannot get file metadata");
DirInfo {size: metadata.len(), depth: 0}
}
fn dir_info(path: &Path) -> DirInfo {
match std::fs::read_dir(path) {
Err(_) => EMPTY,
Ok(entries) =>
entries.fold(EMPTY,
|info, entry| update_info(info, &entry.unwrap().path()))
}
}
| {
file_info(path)
} | conditional_block |
dir_info.rs | use std::env;
use std::path::Path;
struct DirInfo {
size: u64,
depth: u32
}
const EMPTY: DirInfo = DirInfo {size: 0, depth: 0};
fn main() {
let arg = env::args_os().nth(1).expect("Please, provide a file as argument");
let path = Path::new(&arg);
if path.is_dir() {
let info = dir_info(path);
println!("size:\t{} bytes\ndepth:\t{} ", info.size, info.depth - 1);
} else {
println!("{} is not a directory", arg.to_str().expect("Fatal"));
}
}
fn update_info(info: DirInfo, path: &Path) -> DirInfo {
let child_info = dir_entry_info(path);
let new_size = info.size + child_info.size;
let new_depth = std::cmp::max(info.depth, 1 + child_info.depth);
DirInfo {size: new_size, depth: new_depth}
}
fn dir_entry_info(path: &Path) -> DirInfo {
if path.is_file() {
file_info(path)
} else if path.is_dir() {
dir_info(path)
} else {
EMPTY
}
}
fn file_info(path: &Path) -> DirInfo {
let metadata = path.metadata().ok().expect("Cannot get file metadata");
DirInfo {size: metadata.len(), depth: 0}
}
fn dir_info(path: &Path) -> DirInfo {
match std::fs::read_dir(path) {
Err(_) => EMPTY,
Ok(entries) => | |info, entry| update_info(info, &entry.unwrap().path()))
}
} | entries.fold(EMPTY, | random_line_split |
settings.py | """
Django settings for sample_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hj6+-%d0cv@&x%bbb1_t%^+#lkuk2+-5@uci#zrt&xdw2ki&y*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'easy',
'test_app',
)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
| 'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR, '/static')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
] | # Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': { | random_line_split |
find_class_methods.py | #!/usr/bin/python
import sys
import logging
#logging.basicConfig(level = logging.DEBUG)
| from gi.repository import Vips, GObject
# Search for all VipsOperation which don't have an input image object ... these
# should be class methods and need to have their names pasted into Vips.py
# This is slow :-( so we don't do this dynamically
vips_type_image = GObject.GType.from_name("VipsImage")
vips_type_operation = GObject.GType.from_name("VipsOperation")
def find_class_methods(cls):
if not cls.is_abstract():
op = Vips.Operation.new(cls.name)
found = False
for prop in op.props:
flags = op.get_argument_flags(prop.name)
if not flags & Vips.ArgumentFlags.INPUT:
continue
if not flags & Vips.ArgumentFlags.REQUIRED:
continue
if GObject.type_is_a(vips_type_image, prop.value_type):
found = True
break
if not found:
gtype = Vips.type_find("VipsOperation", cls.name)
nickname = Vips.nickname_find(gtype)
print ' "%s",' % nickname
if len(cls.children) > 0:
for child in cls.children:
# not easy to get at the deprecated flag in an abtract type?
if cls.name != 'VipsWrap7':
find_class_methods(child)
print 'found class methods:'
find_class_methods(vips_type_operation) | random_line_split |
|
find_class_methods.py | #!/usr/bin/python
import sys
import logging
#logging.basicConfig(level = logging.DEBUG)
from gi.repository import Vips, GObject
# Search for all VipsOperation which don't have an input image object ... these
# should be class methods and need to have their names pasted into Vips.py
# This is slow :-( so we don't do this dynamically
vips_type_image = GObject.GType.from_name("VipsImage")
vips_type_operation = GObject.GType.from_name("VipsOperation")
def | (cls):
if not cls.is_abstract():
op = Vips.Operation.new(cls.name)
found = False
for prop in op.props:
flags = op.get_argument_flags(prop.name)
if not flags & Vips.ArgumentFlags.INPUT:
continue
if not flags & Vips.ArgumentFlags.REQUIRED:
continue
if GObject.type_is_a(vips_type_image, prop.value_type):
found = True
break
if not found:
gtype = Vips.type_find("VipsOperation", cls.name)
nickname = Vips.nickname_find(gtype)
print ' "%s",' % nickname
if len(cls.children) > 0:
for child in cls.children:
# not easy to get at the deprecated flag in an abtract type?
if cls.name != 'VipsWrap7':
find_class_methods(child)
print 'found class methods:'
find_class_methods(vips_type_operation)
| find_class_methods | identifier_name |
find_class_methods.py | #!/usr/bin/python
import sys
import logging
#logging.basicConfig(level = logging.DEBUG)
from gi.repository import Vips, GObject
# Search for all VipsOperation which don't have an input image object ... these
# should be class methods and need to have their names pasted into Vips.py
# This is slow :-( so we don't do this dynamically
vips_type_image = GObject.GType.from_name("VipsImage")
vips_type_operation = GObject.GType.from_name("VipsOperation")
def find_class_methods(cls):
|
print 'found class methods:'
find_class_methods(vips_type_operation)
| if not cls.is_abstract():
op = Vips.Operation.new(cls.name)
found = False
for prop in op.props:
flags = op.get_argument_flags(prop.name)
if not flags & Vips.ArgumentFlags.INPUT:
continue
if not flags & Vips.ArgumentFlags.REQUIRED:
continue
if GObject.type_is_a(vips_type_image, prop.value_type):
found = True
break
if not found:
gtype = Vips.type_find("VipsOperation", cls.name)
nickname = Vips.nickname_find(gtype)
print ' "%s",' % nickname
if len(cls.children) > 0:
for child in cls.children:
# not easy to get at the deprecated flag in an abtract type?
if cls.name != 'VipsWrap7':
find_class_methods(child) | identifier_body |
find_class_methods.py | #!/usr/bin/python
import sys
import logging
#logging.basicConfig(level = logging.DEBUG)
from gi.repository import Vips, GObject
# Search for all VipsOperation which don't have an input image object ... these
# should be class methods and need to have their names pasted into Vips.py
# This is slow :-( so we don't do this dynamically
vips_type_image = GObject.GType.from_name("VipsImage")
vips_type_operation = GObject.GType.from_name("VipsOperation")
def find_class_methods(cls):
if not cls.is_abstract():
op = Vips.Operation.new(cls.name)
found = False
for prop in op.props:
|
if not found:
gtype = Vips.type_find("VipsOperation", cls.name)
nickname = Vips.nickname_find(gtype)
print ' "%s",' % nickname
if len(cls.children) > 0:
for child in cls.children:
# not easy to get at the deprecated flag in an abtract type?
if cls.name != 'VipsWrap7':
find_class_methods(child)
print 'found class methods:'
find_class_methods(vips_type_operation)
| flags = op.get_argument_flags(prop.name)
if not flags & Vips.ArgumentFlags.INPUT:
continue
if not flags & Vips.ArgumentFlags.REQUIRED:
continue
if GObject.type_is_a(vips_type_image, prop.value_type):
found = True
break | conditional_block |
test-concurrent-load.js | /**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as lolex from 'lolex';
import {createElementWithAttributes} from '../../../../src/dom';
import {
getAmpAdRenderOutsideViewport,
incrementLoadingAds,
is3pThrottled,
waitFor3pThrottle,
} from '../concurrent-load';
import {installTimerService} from '../../../../src/service/timer-impl';
import {macroTask} from '../../../../testing/yield';
describes.realWin('concurrent-load', {}, env => {
describe('getAmpAdRenderOutsideViewport', () => {
it('should return null if ' +
'data-loading-strategy attribute does not exist', () => {
const element = env.win.document.createElement('amp-ad');
expect(getAmpAdRenderOutsideViewport(element)).to.be.null;
});
it('should respect data-loading-strategy attribute', () => {
// data-loading-strategy=prefer-viewability-over-views is 1.25
verifyGetAmpAdRenderOutsideViewport(
'prefer-viewability-over-views', 1.25);
// data-loading-strategy attribute with no value is 1.25
verifyGetAmpAdRenderOutsideViewport('', 1.25);
verifyGetAmpAdRenderOutsideViewport('0', 0);
verifyGetAmpAdRenderOutsideViewport('0.256', 0.256);
verifyGetAmpAdRenderOutsideViewport('1.25', 1.25);
verifyGetAmpAdRenderOutsideViewport('3.0', 3);
expectGetAmpAdRenderOutsideViewportThrow('3.1');
expectGetAmpAdRenderOutsideViewportThrow('-0.1');
expectGetAmpAdRenderOutsideViewportThrow('invalid-value');
});
function verifyGetAmpAdRenderOutsideViewport(loadingStrategy, viewportNum) {
const element = createElementWithAttributes(env.win.document, 'amp-ad', {
'data-loading-strategy': loadingStrategy,
});
expect(getAmpAdRenderOutsideViewport(element)).to.equal(viewportNum);
}
function | (loadingStrategy) {
const element = createElementWithAttributes(env.win.document, 'amp-ad', {
'data-loading-strategy': loadingStrategy,
});
allowConsoleError(() => {
expect(() => getAmpAdRenderOutsideViewport(element)).to.throw();
});
}
});
describe('incrementLoadingAds', () => {
let win;
let clock;
beforeEach(() => {
win = env.win;
clock = lolex.install({
target: win, toFake: ['Date', 'setTimeout', 'clearTimeout']});
installTimerService(win);
});
afterEach(() => {
clock.uninstall();
});
it('should throttle ad loading one per second', function* () {
expect(is3pThrottled(win)).to.be.false;
incrementLoadingAds(win);
expect(is3pThrottled(win)).to.be.true;
clock.tick(999);
yield macroTask();
expect(is3pThrottled(win)).to.be.true;
clock.tick(1);
yield macroTask();
expect(is3pThrottled(win)).to.be.false;
});
it('should throttle ad one a time', function* () {
expect(is3pThrottled(win)).to.be.false;
let resolver;
incrementLoadingAds(win, new Promise(res => {
resolver = res;
}));
expect(is3pThrottled(win)).to.be.true;
resolver();
yield macroTask();
expect(is3pThrottled(win)).to.be.false;
});
});
describe('waitFor3pThrottle', () => {
beforeEach(() => {
installTimerService(env.win);
});
// TODO(jeffkaufman, #13422): this test was silently failing
it.skip('should block if incremented', () => {
incrementLoadingAds(env.win);
const start = Date.now();
return waitFor3pThrottle(env.win).then(
() => expect(Date.now() - start).to.be.at.least(1000));
});
it('should not block if never incremented', () => {
const start = Date.now();
return waitFor3pThrottle(env.win).then(
() => expect(Date.now() - start).to.be.at.most(50));
});
});
});
| expectGetAmpAdRenderOutsideViewportThrow | identifier_name |
test-concurrent-load.js | /**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as lolex from 'lolex';
import {createElementWithAttributes} from '../../../../src/dom';
import {
getAmpAdRenderOutsideViewport,
incrementLoadingAds,
is3pThrottled,
waitFor3pThrottle,
} from '../concurrent-load';
import {installTimerService} from '../../../../src/service/timer-impl';
import {macroTask} from '../../../../testing/yield';
describes.realWin('concurrent-load', {}, env => {
describe('getAmpAdRenderOutsideViewport', () => {
it('should return null if ' +
'data-loading-strategy attribute does not exist', () => {
const element = env.win.document.createElement('amp-ad');
expect(getAmpAdRenderOutsideViewport(element)).to.be.null;
});
it('should respect data-loading-strategy attribute', () => {
// data-loading-strategy=prefer-viewability-over-views is 1.25
verifyGetAmpAdRenderOutsideViewport(
'prefer-viewability-over-views', 1.25);
// data-loading-strategy attribute with no value is 1.25
verifyGetAmpAdRenderOutsideViewport('', 1.25);
verifyGetAmpAdRenderOutsideViewport('0', 0);
verifyGetAmpAdRenderOutsideViewport('0.256', 0.256);
verifyGetAmpAdRenderOutsideViewport('1.25', 1.25);
verifyGetAmpAdRenderOutsideViewport('3.0', 3);
expectGetAmpAdRenderOutsideViewportThrow('3.1');
expectGetAmpAdRenderOutsideViewportThrow('-0.1');
expectGetAmpAdRenderOutsideViewportThrow('invalid-value');
});
function verifyGetAmpAdRenderOutsideViewport(loadingStrategy, viewportNum) {
const element = createElementWithAttributes(env.win.document, 'amp-ad', {
'data-loading-strategy': loadingStrategy,
});
expect(getAmpAdRenderOutsideViewport(element)).to.equal(viewportNum);
}
function expectGetAmpAdRenderOutsideViewportThrow(loadingStrategy) |
});
describe('incrementLoadingAds', () => {
let win;
let clock;
beforeEach(() => {
win = env.win;
clock = lolex.install({
target: win, toFake: ['Date', 'setTimeout', 'clearTimeout']});
installTimerService(win);
});
afterEach(() => {
clock.uninstall();
});
it('should throttle ad loading one per second', function* () {
expect(is3pThrottled(win)).to.be.false;
incrementLoadingAds(win);
expect(is3pThrottled(win)).to.be.true;
clock.tick(999);
yield macroTask();
expect(is3pThrottled(win)).to.be.true;
clock.tick(1);
yield macroTask();
expect(is3pThrottled(win)).to.be.false;
});
it('should throttle ad one a time', function* () {
expect(is3pThrottled(win)).to.be.false;
let resolver;
incrementLoadingAds(win, new Promise(res => {
resolver = res;
}));
expect(is3pThrottled(win)).to.be.true;
resolver();
yield macroTask();
expect(is3pThrottled(win)).to.be.false;
});
});
describe('waitFor3pThrottle', () => {
beforeEach(() => {
installTimerService(env.win);
});
// TODO(jeffkaufman, #13422): this test was silently failing
it.skip('should block if incremented', () => {
incrementLoadingAds(env.win);
const start = Date.now();
return waitFor3pThrottle(env.win).then(
() => expect(Date.now() - start).to.be.at.least(1000));
});
it('should not block if never incremented', () => {
const start = Date.now();
return waitFor3pThrottle(env.win).then(
() => expect(Date.now() - start).to.be.at.most(50));
});
});
});
| {
const element = createElementWithAttributes(env.win.document, 'amp-ad', {
'data-loading-strategy': loadingStrategy,
});
allowConsoleError(() => {
expect(() => getAmpAdRenderOutsideViewport(element)).to.throw();
});
} | identifier_body |
test-concurrent-load.js | /**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
import * as lolex from 'lolex';
import {createElementWithAttributes} from '../../../../src/dom';
import {
getAmpAdRenderOutsideViewport,
incrementLoadingAds,
is3pThrottled,
waitFor3pThrottle,
} from '../concurrent-load';
import {installTimerService} from '../../../../src/service/timer-impl';
import {macroTask} from '../../../../testing/yield';
describes.realWin('concurrent-load', {}, env => {
describe('getAmpAdRenderOutsideViewport', () => {
it('should return null if ' +
'data-loading-strategy attribute does not exist', () => {
const element = env.win.document.createElement('amp-ad');
expect(getAmpAdRenderOutsideViewport(element)).to.be.null;
});
it('should respect data-loading-strategy attribute', () => {
// data-loading-strategy=prefer-viewability-over-views is 1.25
verifyGetAmpAdRenderOutsideViewport(
'prefer-viewability-over-views', 1.25);
// data-loading-strategy attribute with no value is 1.25
verifyGetAmpAdRenderOutsideViewport('', 1.25);
verifyGetAmpAdRenderOutsideViewport('0', 0);
verifyGetAmpAdRenderOutsideViewport('0.256', 0.256);
verifyGetAmpAdRenderOutsideViewport('1.25', 1.25);
verifyGetAmpAdRenderOutsideViewport('3.0', 3);
expectGetAmpAdRenderOutsideViewportThrow('3.1');
expectGetAmpAdRenderOutsideViewportThrow('-0.1');
expectGetAmpAdRenderOutsideViewportThrow('invalid-value');
});
function verifyGetAmpAdRenderOutsideViewport(loadingStrategy, viewportNum) {
const element = createElementWithAttributes(env.win.document, 'amp-ad', {
'data-loading-strategy': loadingStrategy,
});
expect(getAmpAdRenderOutsideViewport(element)).to.equal(viewportNum);
}
function expectGetAmpAdRenderOutsideViewportThrow(loadingStrategy) {
const element = createElementWithAttributes(env.win.document, 'amp-ad', {
'data-loading-strategy': loadingStrategy,
});
allowConsoleError(() => {
expect(() => getAmpAdRenderOutsideViewport(element)).to.throw();
});
}
});
describe('incrementLoadingAds', () => {
let win;
let clock;
beforeEach(() => {
win = env.win;
clock = lolex.install({
target: win, toFake: ['Date', 'setTimeout', 'clearTimeout']});
installTimerService(win);
});
afterEach(() => {
clock.uninstall();
});
it('should throttle ad loading one per second', function* () {
expect(is3pThrottled(win)).to.be.false;
incrementLoadingAds(win);
expect(is3pThrottled(win)).to.be.true;
clock.tick(999);
yield macroTask();
expect(is3pThrottled(win)).to.be.true;
clock.tick(1);
yield macroTask();
expect(is3pThrottled(win)).to.be.false;
});
it('should throttle ad one a time', function* () {
expect(is3pThrottled(win)).to.be.false;
let resolver;
incrementLoadingAds(win, new Promise(res => {
resolver = res;
}));
expect(is3pThrottled(win)).to.be.true;
resolver();
yield macroTask();
expect(is3pThrottled(win)).to.be.false;
});
});
describe('waitFor3pThrottle', () => {
beforeEach(() => {
installTimerService(env.win);
});
// TODO(jeffkaufman, #13422): this test was silently failing
it.skip('should block if incremented', () => {
incrementLoadingAds(env.win);
const start = Date.now();
return waitFor3pThrottle(env.win).then(
() => expect(Date.now() - start).to.be.at.least(1000));
});
it('should not block if never incremented', () => {
const start = Date.now();
return waitFor3pThrottle(env.win).then(
() => expect(Date.now() - start).to.be.at.most(50));
});
});
}); | * See the License for the specific language governing permissions and
* limitations under the License.
*/ | random_line_split |
snmp.py | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from glances.logger import logger
# Import mandatory PySNMP lib
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
except ImportError:
logger.critical("PySNMP library not found. To install it: pip install pysnmp")
sys.exit(2)
class GlancesSNMPClient(object):
"""SNMP client class (based on pysnmp library)."""
def __init__(self, host='localhost', port=161, version='2c',
community='public', user='private', auth=''):
super(GlancesSNMPClient, self).__init__()
self.cmdGen = cmdgen.CommandGenerator()
self.version = version
self.host = host
self.port = port
self.community = community
self.user = user
self.auth = auth
def __buid_result(self, varBinds):
"""Build the results."""
ret = {}
for name, val in varBinds:
if str(val) == '':
ret[name.prettyPrint()] = ''
else:
ret[name.prettyPrint()] = val.prettyPrint()
# In Python 3, prettyPrint() return 'b'linux'' instead of 'linux'
if ret[name.prettyPrint()].startswith('b\''):
ret[name.prettyPrint()] = ret[name.prettyPrint()][2:-1]
return ret
def __get_result__(self, errorIndication, errorStatus, errorIndex, varBinds):
"""Put results in table."""
ret = {}
if not errorIndication or not errorStatus:
ret = self.__buid_result(varBinds)
return ret
def get_by_oid(self, *oid):
|
def __bulk_result__(self, errorIndication, errorStatus, errorIndex, varBindTable):
ret = []
if not errorIndication or not errorStatus:
for varBindTableRow in varBindTable:
ret.append(self.__buid_result(varBindTableRow))
return ret
def getbulk_by_oid(self, non_repeaters, max_repetitions, *oid):
"""SNMP getbulk request.
In contrast to snmpwalk, this information will typically be gathered in
a single transaction with the agent, rather than one transaction per
variable found.
* non_repeaters: This specifies the number of supplied variables that
should not be iterated over.
* max_repetitions: This specifies the maximum number of iterations over
the repeating variables.
* oid: oid list
> Return a list of dicts
"""
if self.version.startswith('3'):
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
if self.version.startswith('2'):
errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
else:
# Bulk request are not available with SNMP version 1
return []
return self.__bulk_result__(errorIndication, errorStatus, errorIndex, varBindTable)
| """SNMP simple request (list of OID).
One request per OID list.
* oid: oid list
> Return a dict
"""
if self.version == '3':
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
else:
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
return self.__get_result__(errorIndication, errorStatus, errorIndex, varBinds) | identifier_body |
snmp.py | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from glances.logger import logger
# Import mandatory PySNMP lib
try: | except ImportError:
logger.critical("PySNMP library not found. To install it: pip install pysnmp")
sys.exit(2)
class GlancesSNMPClient(object):
"""SNMP client class (based on pysnmp library)."""
def __init__(self, host='localhost', port=161, version='2c',
community='public', user='private', auth=''):
super(GlancesSNMPClient, self).__init__()
self.cmdGen = cmdgen.CommandGenerator()
self.version = version
self.host = host
self.port = port
self.community = community
self.user = user
self.auth = auth
def __buid_result(self, varBinds):
"""Build the results."""
ret = {}
for name, val in varBinds:
if str(val) == '':
ret[name.prettyPrint()] = ''
else:
ret[name.prettyPrint()] = val.prettyPrint()
# In Python 3, prettyPrint() return 'b'linux'' instead of 'linux'
if ret[name.prettyPrint()].startswith('b\''):
ret[name.prettyPrint()] = ret[name.prettyPrint()][2:-1]
return ret
def __get_result__(self, errorIndication, errorStatus, errorIndex, varBinds):
"""Put results in table."""
ret = {}
if not errorIndication or not errorStatus:
ret = self.__buid_result(varBinds)
return ret
def get_by_oid(self, *oid):
"""SNMP simple request (list of OID).
One request per OID list.
* oid: oid list
> Return a dict
"""
if self.version == '3':
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
else:
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
return self.__get_result__(errorIndication, errorStatus, errorIndex, varBinds)
def __bulk_result__(self, errorIndication, errorStatus, errorIndex, varBindTable):
ret = []
if not errorIndication or not errorStatus:
for varBindTableRow in varBindTable:
ret.append(self.__buid_result(varBindTableRow))
return ret
def getbulk_by_oid(self, non_repeaters, max_repetitions, *oid):
"""SNMP getbulk request.
In contrast to snmpwalk, this information will typically be gathered in
a single transaction with the agent, rather than one transaction per
variable found.
* non_repeaters: This specifies the number of supplied variables that
should not be iterated over.
* max_repetitions: This specifies the maximum number of iterations over
the repeating variables.
* oid: oid list
> Return a list of dicts
"""
if self.version.startswith('3'):
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
if self.version.startswith('2'):
errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
else:
# Bulk request are not available with SNMP version 1
return []
return self.__bulk_result__(errorIndication, errorStatus, errorIndex, varBindTable) | from pysnmp.entity.rfc3413.oneliner import cmdgen | random_line_split |
snmp.py | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from glances.logger import logger
# Import mandatory PySNMP lib
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
except ImportError:
logger.critical("PySNMP library not found. To install it: pip install pysnmp")
sys.exit(2)
class GlancesSNMPClient(object):
"""SNMP client class (based on pysnmp library)."""
def __init__(self, host='localhost', port=161, version='2c',
community='public', user='private', auth=''):
super(GlancesSNMPClient, self).__init__()
self.cmdGen = cmdgen.CommandGenerator()
self.version = version
self.host = host
self.port = port
self.community = community
self.user = user
self.auth = auth
def __buid_result(self, varBinds):
"""Build the results."""
ret = {}
for name, val in varBinds:
|
return ret
def __get_result__(self, errorIndication, errorStatus, errorIndex, varBinds):
"""Put results in table."""
ret = {}
if not errorIndication or not errorStatus:
ret = self.__buid_result(varBinds)
return ret
def get_by_oid(self, *oid):
"""SNMP simple request (list of OID).
One request per OID list.
* oid: oid list
> Return a dict
"""
if self.version == '3':
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
else:
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
return self.__get_result__(errorIndication, errorStatus, errorIndex, varBinds)
def __bulk_result__(self, errorIndication, errorStatus, errorIndex, varBindTable):
ret = []
if not errorIndication or not errorStatus:
for varBindTableRow in varBindTable:
ret.append(self.__buid_result(varBindTableRow))
return ret
def getbulk_by_oid(self, non_repeaters, max_repetitions, *oid):
"""SNMP getbulk request.
In contrast to snmpwalk, this information will typically be gathered in
a single transaction with the agent, rather than one transaction per
variable found.
* non_repeaters: This specifies the number of supplied variables that
should not be iterated over.
* max_repetitions: This specifies the maximum number of iterations over
the repeating variables.
* oid: oid list
> Return a list of dicts
"""
if self.version.startswith('3'):
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
if self.version.startswith('2'):
errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
else:
# Bulk request are not available with SNMP version 1
return []
return self.__bulk_result__(errorIndication, errorStatus, errorIndex, varBindTable)
| if str(val) == '':
ret[name.prettyPrint()] = ''
else:
ret[name.prettyPrint()] = val.prettyPrint()
# In Python 3, prettyPrint() return 'b'linux'' instead of 'linux'
if ret[name.prettyPrint()].startswith('b\''):
ret[name.prettyPrint()] = ret[name.prettyPrint()][2:-1] | conditional_block |
snmp.py | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from glances.logger import logger
# Import mandatory PySNMP lib
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
except ImportError:
logger.critical("PySNMP library not found. To install it: pip install pysnmp")
sys.exit(2)
class GlancesSNMPClient(object):
"""SNMP client class (based on pysnmp library)."""
def __init__(self, host='localhost', port=161, version='2c',
community='public', user='private', auth=''):
super(GlancesSNMPClient, self).__init__()
self.cmdGen = cmdgen.CommandGenerator()
self.version = version
self.host = host
self.port = port
self.community = community
self.user = user
self.auth = auth
def __buid_result(self, varBinds):
"""Build the results."""
ret = {}
for name, val in varBinds:
if str(val) == '':
ret[name.prettyPrint()] = ''
else:
ret[name.prettyPrint()] = val.prettyPrint()
# In Python 3, prettyPrint() return 'b'linux'' instead of 'linux'
if ret[name.prettyPrint()].startswith('b\''):
ret[name.prettyPrint()] = ret[name.prettyPrint()][2:-1]
return ret
def __get_result__(self, errorIndication, errorStatus, errorIndex, varBinds):
"""Put results in table."""
ret = {}
if not errorIndication or not errorStatus:
ret = self.__buid_result(varBinds)
return ret
def | (self, *oid):
"""SNMP simple request (list of OID).
One request per OID list.
* oid: oid list
> Return a dict
"""
if self.version == '3':
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
else:
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
return self.__get_result__(errorIndication, errorStatus, errorIndex, varBinds)
def __bulk_result__(self, errorIndication, errorStatus, errorIndex, varBindTable):
ret = []
if not errorIndication or not errorStatus:
for varBindTableRow in varBindTable:
ret.append(self.__buid_result(varBindTableRow))
return ret
def getbulk_by_oid(self, non_repeaters, max_repetitions, *oid):
"""SNMP getbulk request.
In contrast to snmpwalk, this information will typically be gathered in
a single transaction with the agent, rather than one transaction per
variable found.
* non_repeaters: This specifies the number of supplied variables that
should not be iterated over.
* max_repetitions: This specifies the maximum number of iterations over
the repeating variables.
* oid: oid list
> Return a list of dicts
"""
if self.version.startswith('3'):
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
if self.version.startswith('2'):
errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
else:
# Bulk request are not available with SNMP version 1
return []
return self.__bulk_result__(errorIndication, errorStatus, errorIndex, varBindTable)
| get_by_oid | identifier_name |
shootout-fasta.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* -*- mode: rust; indent-tabs-mode: nil -*-
* Implementation of 'fasta' benchmark from
* Computer Language Benchmarks Game
* http://shootout.alioth.debian.org/
*/
use std::io;
use std::io::{BufferedWriter, File};
use std::num::min;
use std::os;
static LINE_LENGTH: uint = 60;
static IM: u32 = 139968;
struct MyRandom {
last: u32
}
impl MyRandom {
fn new() -> MyRandom { MyRandom { last: 42 } }
fn normalize(p: f32) -> u32 {(p * IM as f32).floor() as u32}
fn gen(&mut self) -> u32 {
self.last = (self.last * 3877 + 29573) % IM;
self.last
}
}
struct AAGen<'a> {
rng: &'a mut MyRandom,
data: ~[(u32, u8)]
}
impl<'a> AAGen<'a> {
fn new<'b>(rng: &'b mut MyRandom, aa: &[(char, f32)]) -> AAGen<'b> {
let mut cum = 0.;
let data = aa.iter()
.map(|&(ch, p)| { cum += p; (MyRandom::normalize(cum), ch as u8) })
.collect();
AAGen { rng: rng, data: data }
}
}
impl<'a> Iterator<u8> for AAGen<'a> {
fn next(&mut self) -> Option<u8> {
let r = self.rng.gen();
self.data.iter()
.skip_while(|pc| pc.n0() < r)
.map(|&(_, c)| c)
.next()
}
}
fn make_fasta<W: Writer, I: Iterator<u8>>(
wr: &mut W, header: &str, mut it: I, mut n: uint)
{
wr.write(header.as_bytes());
let mut line = [0u8, .. LINE_LENGTH + 1];
while n > 0 {
let nb = min(LINE_LENGTH, n);
for i in range(0, nb) {
line[i] = it.next().unwrap();
}
n -= nb;
line[nb] = '\n' as u8;
wr.write(line.slice_to(nb + 1));
}
}
fn run<W: Writer>(writer: &mut W) |
fn main() {
if os::getenv("RUST_BENCH").is_some() {
let mut file = BufferedWriter::new(File::create(&Path::new("./shootout-fasta.data")));
run(&mut file);
} else {
run(&mut BufferedWriter::new(io::stdout()));
}
}
| {
let args = os::args();
let n = if os::getenv("RUST_BENCH").is_some() {
25000000
} else if args.len() <= 1u {
1000
} else {
from_str(args[1]).unwrap()
};
let rng = &mut MyRandom::new();
let alu =
"GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG\
GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA\
CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT\
ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA\
GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG\
AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC\
AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
let iub = &[('a', 0.27), ('c', 0.12), ('g', 0.12),
('t', 0.27), ('B', 0.02), ('D', 0.02),
('H', 0.02), ('K', 0.02), ('M', 0.02),
('N', 0.02), ('R', 0.02), ('S', 0.02),
('V', 0.02), ('W', 0.02), ('Y', 0.02)];
let homosapiens = &[('a', 0.3029549426680),
('c', 0.1979883004921),
('g', 0.1975473066391),
('t', 0.3015094502008)];
make_fasta(writer, ">ONE Homo sapiens alu\n",
alu.as_bytes().iter().cycle().map(|c| *c), n * 2);
make_fasta(writer, ">TWO IUB ambiguity codes\n",
AAGen::new(rng, iub), n * 3);
make_fasta(writer, ">THREE Homo sapiens frequency\n",
AAGen::new(rng, homosapiens), n * 5);
writer.flush();
} | identifier_body |
shootout-fasta.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* -*- mode: rust; indent-tabs-mode: nil -*-
* Implementation of 'fasta' benchmark from
* Computer Language Benchmarks Game
* http://shootout.alioth.debian.org/
*/
use std::io;
use std::io::{BufferedWriter, File};
use std::num::min;
use std::os;
static LINE_LENGTH: uint = 60;
static IM: u32 = 139968;
struct MyRandom {
last: u32
}
impl MyRandom {
fn new() -> MyRandom { MyRandom { last: 42 } }
fn normalize(p: f32) -> u32 {(p * IM as f32).floor() as u32}
fn gen(&mut self) -> u32 {
self.last = (self.last * 3877 + 29573) % IM;
self.last
}
}
struct AAGen<'a> {
rng: &'a mut MyRandom,
data: ~[(u32, u8)]
}
impl<'a> AAGen<'a> {
fn new<'b>(rng: &'b mut MyRandom, aa: &[(char, f32)]) -> AAGen<'b> {
let mut cum = 0.;
let data = aa.iter()
.map(|&(ch, p)| { cum += p; (MyRandom::normalize(cum), ch as u8) })
.collect();
AAGen { rng: rng, data: data }
}
}
impl<'a> Iterator<u8> for AAGen<'a> {
fn next(&mut self) -> Option<u8> {
let r = self.rng.gen();
self.data.iter()
.skip_while(|pc| pc.n0() < r)
.map(|&(_, c)| c)
.next()
}
}
fn make_fasta<W: Writer, I: Iterator<u8>>(
wr: &mut W, header: &str, mut it: I, mut n: uint)
{
wr.write(header.as_bytes());
let mut line = [0u8, .. LINE_LENGTH + 1];
while n > 0 {
let nb = min(LINE_LENGTH, n);
for i in range(0, nb) {
line[i] = it.next().unwrap();
}
n -= nb;
line[nb] = '\n' as u8;
wr.write(line.slice_to(nb + 1));
}
}
fn run<W: Writer>(writer: &mut W) {
let args = os::args();
let n = if os::getenv("RUST_BENCH").is_some() {
25000000
} else if args.len() <= 1u {
1000
} else {
from_str(args[1]).unwrap()
};
let rng = &mut MyRandom::new();
let alu =
"GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG\
GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA\
CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT\
ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA\
GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG\
AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC\
AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
let iub = &[('a', 0.27), ('c', 0.12), ('g', 0.12),
('t', 0.27), ('B', 0.02), ('D', 0.02),
('H', 0.02), ('K', 0.02), ('M', 0.02),
('N', 0.02), ('R', 0.02), ('S', 0.02),
('V', 0.02), ('W', 0.02), ('Y', 0.02)];
let homosapiens = &[('a', 0.3029549426680),
('c', 0.1979883004921),
('g', 0.1975473066391),
('t', 0.3015094502008)];
make_fasta(writer, ">ONE Homo sapiens alu\n",
alu.as_bytes().iter().cycle().map(|c| *c), n * 2);
make_fasta(writer, ">TWO IUB ambiguity codes\n",
AAGen::new(rng, iub), n * 3);
make_fasta(writer, ">THREE Homo sapiens frequency\n", |
writer.flush();
}
fn main() {
if os::getenv("RUST_BENCH").is_some() {
let mut file = BufferedWriter::new(File::create(&Path::new("./shootout-fasta.data")));
run(&mut file);
} else {
run(&mut BufferedWriter::new(io::stdout()));
}
} | AAGen::new(rng, homosapiens), n * 5); | random_line_split |
shootout-fasta.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* -*- mode: rust; indent-tabs-mode: nil -*-
* Implementation of 'fasta' benchmark from
* Computer Language Benchmarks Game
* http://shootout.alioth.debian.org/
*/
use std::io;
use std::io::{BufferedWriter, File};
use std::num::min;
use std::os;
static LINE_LENGTH: uint = 60;
static IM: u32 = 139968;
struct MyRandom {
last: u32
}
impl MyRandom {
fn new() -> MyRandom { MyRandom { last: 42 } }
fn normalize(p: f32) -> u32 {(p * IM as f32).floor() as u32}
fn gen(&mut self) -> u32 {
self.last = (self.last * 3877 + 29573) % IM;
self.last
}
}
struct AAGen<'a> {
rng: &'a mut MyRandom,
data: ~[(u32, u8)]
}
impl<'a> AAGen<'a> {
fn new<'b>(rng: &'b mut MyRandom, aa: &[(char, f32)]) -> AAGen<'b> {
let mut cum = 0.;
let data = aa.iter()
.map(|&(ch, p)| { cum += p; (MyRandom::normalize(cum), ch as u8) })
.collect();
AAGen { rng: rng, data: data }
}
}
impl<'a> Iterator<u8> for AAGen<'a> {
fn next(&mut self) -> Option<u8> {
let r = self.rng.gen();
self.data.iter()
.skip_while(|pc| pc.n0() < r)
.map(|&(_, c)| c)
.next()
}
}
fn make_fasta<W: Writer, I: Iterator<u8>>(
wr: &mut W, header: &str, mut it: I, mut n: uint)
{
wr.write(header.as_bytes());
let mut line = [0u8, .. LINE_LENGTH + 1];
while n > 0 {
let nb = min(LINE_LENGTH, n);
for i in range(0, nb) {
line[i] = it.next().unwrap();
}
n -= nb;
line[nb] = '\n' as u8;
wr.write(line.slice_to(nb + 1));
}
}
fn run<W: Writer>(writer: &mut W) {
let args = os::args();
let n = if os::getenv("RUST_BENCH").is_some() | else if args.len() <= 1u {
1000
} else {
from_str(args[1]).unwrap()
};
let rng = &mut MyRandom::new();
let alu =
"GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG\
GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA\
CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT\
ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA\
GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG\
AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC\
AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
let iub = &[('a', 0.27), ('c', 0.12), ('g', 0.12),
('t', 0.27), ('B', 0.02), ('D', 0.02),
('H', 0.02), ('K', 0.02), ('M', 0.02),
('N', 0.02), ('R', 0.02), ('S', 0.02),
('V', 0.02), ('W', 0.02), ('Y', 0.02)];
let homosapiens = &[('a', 0.3029549426680),
('c', 0.1979883004921),
('g', 0.1975473066391),
('t', 0.3015094502008)];
make_fasta(writer, ">ONE Homo sapiens alu\n",
alu.as_bytes().iter().cycle().map(|c| *c), n * 2);
make_fasta(writer, ">TWO IUB ambiguity codes\n",
AAGen::new(rng, iub), n * 3);
make_fasta(writer, ">THREE Homo sapiens frequency\n",
AAGen::new(rng, homosapiens), n * 5);
writer.flush();
}
fn main() {
if os::getenv("RUST_BENCH").is_some() {
let mut file = BufferedWriter::new(File::create(&Path::new("./shootout-fasta.data")));
run(&mut file);
} else {
run(&mut BufferedWriter::new(io::stdout()));
}
}
| {
25000000
} | conditional_block |
shootout-fasta.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* -*- mode: rust; indent-tabs-mode: nil -*-
* Implementation of 'fasta' benchmark from
* Computer Language Benchmarks Game
* http://shootout.alioth.debian.org/
*/
use std::io;
use std::io::{BufferedWriter, File};
use std::num::min;
use std::os;
static LINE_LENGTH: uint = 60;
static IM: u32 = 139968;
struct MyRandom {
last: u32
}
impl MyRandom {
fn new() -> MyRandom { MyRandom { last: 42 } }
fn normalize(p: f32) -> u32 {(p * IM as f32).floor() as u32}
fn | (&mut self) -> u32 {
self.last = (self.last * 3877 + 29573) % IM;
self.last
}
}
struct AAGen<'a> {
rng: &'a mut MyRandom,
data: ~[(u32, u8)]
}
impl<'a> AAGen<'a> {
fn new<'b>(rng: &'b mut MyRandom, aa: &[(char, f32)]) -> AAGen<'b> {
let mut cum = 0.;
let data = aa.iter()
.map(|&(ch, p)| { cum += p; (MyRandom::normalize(cum), ch as u8) })
.collect();
AAGen { rng: rng, data: data }
}
}
impl<'a> Iterator<u8> for AAGen<'a> {
fn next(&mut self) -> Option<u8> {
let r = self.rng.gen();
self.data.iter()
.skip_while(|pc| pc.n0() < r)
.map(|&(_, c)| c)
.next()
}
}
fn make_fasta<W: Writer, I: Iterator<u8>>(
wr: &mut W, header: &str, mut it: I, mut n: uint)
{
wr.write(header.as_bytes());
let mut line = [0u8, .. LINE_LENGTH + 1];
while n > 0 {
let nb = min(LINE_LENGTH, n);
for i in range(0, nb) {
line[i] = it.next().unwrap();
}
n -= nb;
line[nb] = '\n' as u8;
wr.write(line.slice_to(nb + 1));
}
}
fn run<W: Writer>(writer: &mut W) {
let args = os::args();
let n = if os::getenv("RUST_BENCH").is_some() {
25000000
} else if args.len() <= 1u {
1000
} else {
from_str(args[1]).unwrap()
};
let rng = &mut MyRandom::new();
let alu =
"GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG\
GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA\
CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT\
ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA\
GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG\
AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC\
AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
let iub = &[('a', 0.27), ('c', 0.12), ('g', 0.12),
('t', 0.27), ('B', 0.02), ('D', 0.02),
('H', 0.02), ('K', 0.02), ('M', 0.02),
('N', 0.02), ('R', 0.02), ('S', 0.02),
('V', 0.02), ('W', 0.02), ('Y', 0.02)];
let homosapiens = &[('a', 0.3029549426680),
('c', 0.1979883004921),
('g', 0.1975473066391),
('t', 0.3015094502008)];
make_fasta(writer, ">ONE Homo sapiens alu\n",
alu.as_bytes().iter().cycle().map(|c| *c), n * 2);
make_fasta(writer, ">TWO IUB ambiguity codes\n",
AAGen::new(rng, iub), n * 3);
make_fasta(writer, ">THREE Homo sapiens frequency\n",
AAGen::new(rng, homosapiens), n * 5);
writer.flush();
}
fn main() {
if os::getenv("RUST_BENCH").is_some() {
let mut file = BufferedWriter::new(File::create(&Path::new("./shootout-fasta.data")));
run(&mut file);
} else {
run(&mut BufferedWriter::new(io::stdout()));
}
}
| gen | identifier_name |
__init__.py | """
ICS Ops Common Library
"""
import os
from os.path import dirname | import boto
__version__ = "0.0.3.3"
__release__ = "alpha"
CONFIG = "opslib.ini"
LOG_NAME = "opslib"
AWS_ACCESS_KEY_NAME = "aws_access_key_id"
AWS_SECRET_KEY_NAME = "aws_secret_access_key"
def init_config(filepath=None, enable_boto=True, enable_botocore=False):
# Default credential file will be located at current folder
if filepath is None or not os.path.exists(filepath):
pwdpath = dirname(realpath(__file__))
filepath = pathjoin(pwdpath, CONFIG)
if enable_boto:
# Initialize credentials for boto
from boto.pyami.config import Config
boto.config = Config(filepath)
access_key = boto.config.get('Credentials', AWS_ACCESS_KEY_NAME, None)
secret_key = boto.config.get('Credentials', AWS_SECRET_KEY_NAME, None)
# FIXME: a trick when the value is empty
if not access_key or not secret_key:
boto.config.remove_section('Credentials')
if enable_botocore:
# Initialize credentials for botocore
import botocore.credentials
if access_key and secret_key:
def get_credentials(session, metadata=None):
return botocore.credentials.Credentials(access_key, secret_key)
botocore.credentials.get_credentials = get_credentials
if access_key and secret_key:
return access_key, secret_key
def init_logging(name=LOG_NAME, logfile=None,
console=False, loglevel="INFO",
enable_boto_log=False):
global logger
from opslib.icslog import IcsLog
logger = IcsLog(name, level=loglevel, console=console, logfile=logfile)
if enable_boto_log:
boto.log = logger
return logger
init_config()
init_logging()
# vim: tabstop=4 shiftwidth=4 softtabstop=4 | from os.path import realpath
from os.path import join as pathjoin
| random_line_split |
__init__.py | """
ICS Ops Common Library
"""
import os
from os.path import dirname
from os.path import realpath
from os.path import join as pathjoin
import boto
__version__ = "0.0.3.3"
__release__ = "alpha"
CONFIG = "opslib.ini"
LOG_NAME = "opslib"
AWS_ACCESS_KEY_NAME = "aws_access_key_id"
AWS_SECRET_KEY_NAME = "aws_secret_access_key"
def | (filepath=None, enable_boto=True, enable_botocore=False):
# Default credential file will be located at current folder
if filepath is None or not os.path.exists(filepath):
pwdpath = dirname(realpath(__file__))
filepath = pathjoin(pwdpath, CONFIG)
if enable_boto:
# Initialize credentials for boto
from boto.pyami.config import Config
boto.config = Config(filepath)
access_key = boto.config.get('Credentials', AWS_ACCESS_KEY_NAME, None)
secret_key = boto.config.get('Credentials', AWS_SECRET_KEY_NAME, None)
# FIXME: a trick when the value is empty
if not access_key or not secret_key:
boto.config.remove_section('Credentials')
if enable_botocore:
# Initialize credentials for botocore
import botocore.credentials
if access_key and secret_key:
def get_credentials(session, metadata=None):
return botocore.credentials.Credentials(access_key, secret_key)
botocore.credentials.get_credentials = get_credentials
if access_key and secret_key:
return access_key, secret_key
def init_logging(name=LOG_NAME, logfile=None,
console=False, loglevel="INFO",
enable_boto_log=False):
global logger
from opslib.icslog import IcsLog
logger = IcsLog(name, level=loglevel, console=console, logfile=logfile)
if enable_boto_log:
boto.log = logger
return logger
init_config()
init_logging()
# vim: tabstop=4 shiftwidth=4 softtabstop=4
| init_config | identifier_name |
__init__.py | """
ICS Ops Common Library
"""
import os
from os.path import dirname
from os.path import realpath
from os.path import join as pathjoin
import boto
__version__ = "0.0.3.3"
__release__ = "alpha"
CONFIG = "opslib.ini"
LOG_NAME = "opslib"
AWS_ACCESS_KEY_NAME = "aws_access_key_id"
AWS_SECRET_KEY_NAME = "aws_secret_access_key"
def init_config(filepath=None, enable_boto=True, enable_botocore=False):
# Default credential file will be located at current folder
if filepath is None or not os.path.exists(filepath):
pwdpath = dirname(realpath(__file__))
filepath = pathjoin(pwdpath, CONFIG)
if enable_boto:
# Initialize credentials for boto
from boto.pyami.config import Config
boto.config = Config(filepath)
access_key = boto.config.get('Credentials', AWS_ACCESS_KEY_NAME, None)
secret_key = boto.config.get('Credentials', AWS_SECRET_KEY_NAME, None)
# FIXME: a trick when the value is empty
if not access_key or not secret_key:
boto.config.remove_section('Credentials')
if enable_botocore:
# Initialize credentials for botocore
import botocore.credentials
if access_key and secret_key:
def get_credentials(session, metadata=None):
|
botocore.credentials.get_credentials = get_credentials
if access_key and secret_key:
return access_key, secret_key
def init_logging(name=LOG_NAME, logfile=None,
console=False, loglevel="INFO",
enable_boto_log=False):
global logger
from opslib.icslog import IcsLog
logger = IcsLog(name, level=loglevel, console=console, logfile=logfile)
if enable_boto_log:
boto.log = logger
return logger
init_config()
init_logging()
# vim: tabstop=4 shiftwidth=4 softtabstop=4
| return botocore.credentials.Credentials(access_key, secret_key) | identifier_body |
__init__.py | """
ICS Ops Common Library
"""
import os
from os.path import dirname
from os.path import realpath
from os.path import join as pathjoin
import boto
__version__ = "0.0.3.3"
__release__ = "alpha"
CONFIG = "opslib.ini"
LOG_NAME = "opslib"
AWS_ACCESS_KEY_NAME = "aws_access_key_id"
AWS_SECRET_KEY_NAME = "aws_secret_access_key"
def init_config(filepath=None, enable_boto=True, enable_botocore=False):
# Default credential file will be located at current folder
if filepath is None or not os.path.exists(filepath):
pwdpath = dirname(realpath(__file__))
filepath = pathjoin(pwdpath, CONFIG)
if enable_boto:
# Initialize credentials for boto
from boto.pyami.config import Config
boto.config = Config(filepath)
access_key = boto.config.get('Credentials', AWS_ACCESS_KEY_NAME, None)
secret_key = boto.config.get('Credentials', AWS_SECRET_KEY_NAME, None)
# FIXME: a trick when the value is empty
if not access_key or not secret_key:
|
if enable_botocore:
# Initialize credentials for botocore
import botocore.credentials
if access_key and secret_key:
def get_credentials(session, metadata=None):
return botocore.credentials.Credentials(access_key, secret_key)
botocore.credentials.get_credentials = get_credentials
if access_key and secret_key:
return access_key, secret_key
def init_logging(name=LOG_NAME, logfile=None,
console=False, loglevel="INFO",
enable_boto_log=False):
global logger
from opslib.icslog import IcsLog
logger = IcsLog(name, level=loglevel, console=console, logfile=logfile)
if enable_boto_log:
boto.log = logger
return logger
init_config()
init_logging()
# vim: tabstop=4 shiftwidth=4 softtabstop=4
| boto.config.remove_section('Credentials') | conditional_block |
app.ts | import "reflect-metadata";
import {createConnection, ConnectionOptions} from "../../src/index";
import {Post} from "./entity/Post";
import {Author} from "./entity/Author";
import {Category} from "./entity/Category";
const options: ConnectionOptions = {
driver: {
type: "mysql",
host: "localhost",
port: 3306,
username: "root",
password: "admin",
database: "test"
},
logging: {
logOnlyFailedQueries: true,
logFailedQueryError: true
},
autoSchemaSync: true, | entities: [Post, Author, Category]
};
createConnection(options).then(connection => {
let postRepository = connection.getRepository(Post);
let author = new Author();
author.name = "Umed";
let category1 = new Category();
category1.name = "Category #1";
let category2 = new Category();
category2.name = "Category #2";
let post = new Post();
post.text = "Hello how are you?";
post.title = "hello";
post.author = author;
post.categories = [category1, category2];
postRepository
.persist(post)
.then(post => {
console.log("Post has been saved. Lets load it now.");
return postRepository.find({
join: {
alias: "post",
leftJoinAndSelect: {
categories: "post.categories",
author: "post.user" // note that table column is used, not object property
}
}
});
})
.then(loadedPosts => {
console.log("loadedPosts: ", loadedPosts);
})
.catch(error => console.log(error.stack));
}, error => console.log("Cannot connect: ", error)); | random_line_split |
|
models.py | from __future__ import unicode_literals
from functools import partial
from future.utils import with_metaclass
from django import VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model, Field
from django.db.models.signals import class_prepared
from django.utils import six
from mezzanine.utils.importing import import_dotted_path
# Backward compatibility with Django 1.5's "get_user_model".
if VERSION >= (1, 5):
from django.contrib.auth import get_user_model
else:
def | ():
from django.contrib.auth.models import User
return User
# Emulate Django 1.7's exception-raising get_registered_model
# when running under earlier versions
if VERSION >= (1, 7):
from django.apps import apps
get_model = apps.get_model
get_registered_model = apps.get_registered_model
else:
from django.db.models import get_model as django_get_model
def get_model(app_label, model_name=None):
if model_name is None:
app_label, model_name = app_label.split('.')
model = django_get_model(app_label, model_name)
if not model:
raise LookupError
return model
def get_registered_model(app_label, model_name):
model = django_get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if not model:
raise LookupError
return model
def get_user_model_name():
"""
Returns the app_label.object_name string for the user model.
"""
return getattr(settings, "AUTH_USER_MODEL", "auth.User")
def base_concrete_model(abstract, instance):
"""
Used in methods of abstract models to find the super-most concrete
(non abstract) model in the inheritance chain that inherits from the
given abstract model. This is so the methods in the abstract model can
query data consistently across the correct concrete model.
Consider the following::
class Abstract(models.Model)
class Meta:
abstract = True
def concrete(self):
return base_concrete_model(Abstract, self)
class Super(Abstract):
pass
class Sub(Super):
pass
sub = Sub.objects.create()
sub.concrete() # returns Super
In actual Mezzanine usage, this allows methods in the ``Displayable`` and
``Orderable`` abstract models to access the ``Page`` instance when
instances of custom content types, (eg: models that inherit from ``Page``)
need to query the ``Page`` model to determine correct values for ``slug``
and ``_order`` which are only relevant in the context of the ``Page``
model and not the model of the custom content type.
"""
for cls in reversed(instance.__class__.__mro__):
if issubclass(cls, abstract) and not cls._meta.abstract:
return cls
return instance.__class__
def upload_to(field_path, default):
"""
Used as the ``upload_to`` arg for file fields - allows for custom
handlers to be implemented on a per field basis defined by the
``UPLOAD_TO_HANDLERS`` setting.
"""
from mezzanine.conf import settings
for k, v in settings.UPLOAD_TO_HANDLERS.items():
if k.lower() == field_path.lower():
return import_dotted_path(v)
return default
class AdminThumbMixin(object):
"""
Provides a thumbnail method on models for admin classes to
reference in the ``list_display`` definition.
"""
admin_thumb_field = None
def admin_thumb(self):
thumb = ""
if self.admin_thumb_field:
thumb = getattr(self, self.admin_thumb_field, "")
if not thumb:
return ""
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
x, y = settings.ADMIN_THUMB_SIZE.split('x')
thumb_url = thumbnail(thumb, x, y)
return "<img src='%s%s'>" % (settings.MEDIA_URL, thumb_url)
admin_thumb.allow_tags = True
admin_thumb.short_description = ""
class ModelMixinBase(type):
"""
Metaclass for ``ModelMixin`` which is used for injecting model
fields and methods into models defined outside of a project.
This currently isn't used anywhere.
"""
def __new__(cls, name, bases, attrs):
"""
Checks for an inner ``Meta`` class with a ``mixin_for``
attribute containing the model that this model will be mixed
into. Once found, copy over any model fields and methods onto
the model being mixed into, and return it as the actual class
definition for the mixin.
"""
if name == "ModelMixin":
# Actual ModelMixin class definition.
return super(ModelMixinBase, cls).__new__(cls, name, bases, attrs)
try:
mixin_for = attrs.pop("Meta").mixin_for
if not issubclass(mixin_for, Model):
raise TypeError
except (TypeError, KeyError, AttributeError):
raise ImproperlyConfigured("The ModelMixin class '%s' requires "
"an inner Meta class with the "
"``mixin_for`` attribute defined, "
"with a value that is a valid model.")
# Copy fields and methods onto the model being mixed into, and
# return it as the definition for the mixin class itself.
for k, v in attrs.items():
if isinstance(v, Field):
v.contribute_to_class(mixin_for, k)
elif k != "__module__":
setattr(mixin_for, k, v)
return mixin_for
class ModelMixin(with_metaclass(ModelMixinBase, object)):
"""
Used as a subclass for mixin models that inject their behaviour onto
models defined outside of a project. The subclass should define an
inner ``Meta`` class with a ``mixin_for`` attribute containing the
model that will be mixed into.
"""
class LazyModelOperations(object):
"""
This class connects itself to Django's class_prepared signal.
Pass a function and a model or model name to its ``add()`` method,
and the function will be called with the model as its only
parameter once the model has been loaded. If the model is already
loaded, the function is called immediately.
Adapted from ``django.db.models.fields.related`` and used in
``mezzanine.generic.fields``.
"""
def __init__(self):
self.pending_operations = {}
class_prepared.connect(self.signal_receiver)
@staticmethod
def model_key(model_or_name):
"""
Returns an (app_label, model_name) tuple from a model or string.
"""
if isinstance(model_or_name, six.string_types):
app_label, model_name = model_or_name.split(".")
else:
# It's actually a model class.
app_label = model_or_name._meta.app_label
model_name = model_or_name._meta.object_name
return app_label, model_name
def add(self, function, *models_or_names):
"""
The function passed to this method should accept n arguments,
where n=len(models_or_names). When all the models are ready,
the function will be called with the models as arguments, in
the order they appear in this argument list.
"""
# Eagerly parse all model strings so we can fail immediately
# if any are plainly invalid.
model_keys = [self.model_key(m) if not isinstance(m, tuple) else m
for m in models_or_names]
# If this function depends on more than one model, recursively call add
# for each, partially applying the given function on each iteration.
model_key, more_models = model_keys[0], model_keys[1:]
if more_models:
inner_function = function
function = lambda model: self.add(partial(inner_function, model),
*more_models)
# If the model is already loaded, pass it to the function immediately.
# Otherwise, delay execution until the class is prepared.
try:
model_class = get_registered_model(*model_key)
except LookupError:
self.pending_operations.setdefault(model_key, []).append(function)
else:
function(model_class)
def signal_receiver(self, sender, **kwargs):
"""
Receive ``class_prepared``, and pass the freshly prepared
model to each function waiting for it.
"""
key = (sender._meta.app_label, sender.__name__)
for function in self.pending_operations.pop(key, []):
function(sender)
lazy_model_ops = LazyModelOperations()
| get_user_model | identifier_name |
models.py | from __future__ import unicode_literals
from functools import partial
from future.utils import with_metaclass
from django import VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model, Field
from django.db.models.signals import class_prepared
from django.utils import six
from mezzanine.utils.importing import import_dotted_path
# Backward compatibility with Django 1.5's "get_user_model".
if VERSION >= (1, 5):
from django.contrib.auth import get_user_model
else:
def get_user_model():
from django.contrib.auth.models import User
return User
# Emulate Django 1.7's exception-raising get_registered_model
# when running under earlier versions
if VERSION >= (1, 7):
from django.apps import apps
get_model = apps.get_model
get_registered_model = apps.get_registered_model
else:
from django.db.models import get_model as django_get_model
def get_model(app_label, model_name=None):
if model_name is None:
app_label, model_name = app_label.split('.')
model = django_get_model(app_label, model_name)
if not model:
raise LookupError
return model
def get_registered_model(app_label, model_name):
model = django_get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if not model:
raise LookupError
return model
def get_user_model_name():
"""
Returns the app_label.object_name string for the user model.
"""
return getattr(settings, "AUTH_USER_MODEL", "auth.User")
def base_concrete_model(abstract, instance):
|
def upload_to(field_path, default):
"""
Used as the ``upload_to`` arg for file fields - allows for custom
handlers to be implemented on a per field basis defined by the
``UPLOAD_TO_HANDLERS`` setting.
"""
from mezzanine.conf import settings
for k, v in settings.UPLOAD_TO_HANDLERS.items():
if k.lower() == field_path.lower():
return import_dotted_path(v)
return default
class AdminThumbMixin(object):
"""
Provides a thumbnail method on models for admin classes to
reference in the ``list_display`` definition.
"""
admin_thumb_field = None
def admin_thumb(self):
thumb = ""
if self.admin_thumb_field:
thumb = getattr(self, self.admin_thumb_field, "")
if not thumb:
return ""
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
x, y = settings.ADMIN_THUMB_SIZE.split('x')
thumb_url = thumbnail(thumb, x, y)
return "<img src='%s%s'>" % (settings.MEDIA_URL, thumb_url)
admin_thumb.allow_tags = True
admin_thumb.short_description = ""
class ModelMixinBase(type):
"""
Metaclass for ``ModelMixin`` which is used for injecting model
fields and methods into models defined outside of a project.
This currently isn't used anywhere.
"""
def __new__(cls, name, bases, attrs):
"""
Checks for an inner ``Meta`` class with a ``mixin_for``
attribute containing the model that this model will be mixed
into. Once found, copy over any model fields and methods onto
the model being mixed into, and return it as the actual class
definition for the mixin.
"""
if name == "ModelMixin":
# Actual ModelMixin class definition.
return super(ModelMixinBase, cls).__new__(cls, name, bases, attrs)
try:
mixin_for = attrs.pop("Meta").mixin_for
if not issubclass(mixin_for, Model):
raise TypeError
except (TypeError, KeyError, AttributeError):
raise ImproperlyConfigured("The ModelMixin class '%s' requires "
"an inner Meta class with the "
"``mixin_for`` attribute defined, "
"with a value that is a valid model.")
# Copy fields and methods onto the model being mixed into, and
# return it as the definition for the mixin class itself.
for k, v in attrs.items():
if isinstance(v, Field):
v.contribute_to_class(mixin_for, k)
elif k != "__module__":
setattr(mixin_for, k, v)
return mixin_for
class ModelMixin(with_metaclass(ModelMixinBase, object)):
"""
Used as a subclass for mixin models that inject their behaviour onto
models defined outside of a project. The subclass should define an
inner ``Meta`` class with a ``mixin_for`` attribute containing the
model that will be mixed into.
"""
class LazyModelOperations(object):
"""
This class connects itself to Django's class_prepared signal.
Pass a function and a model or model name to its ``add()`` method,
and the function will be called with the model as its only
parameter once the model has been loaded. If the model is already
loaded, the function is called immediately.
Adapted from ``django.db.models.fields.related`` and used in
``mezzanine.generic.fields``.
"""
def __init__(self):
self.pending_operations = {}
class_prepared.connect(self.signal_receiver)
@staticmethod
def model_key(model_or_name):
"""
Returns an (app_label, model_name) tuple from a model or string.
"""
if isinstance(model_or_name, six.string_types):
app_label, model_name = model_or_name.split(".")
else:
# It's actually a model class.
app_label = model_or_name._meta.app_label
model_name = model_or_name._meta.object_name
return app_label, model_name
def add(self, function, *models_or_names):
"""
The function passed to this method should accept n arguments,
where n=len(models_or_names). When all the models are ready,
the function will be called with the models as arguments, in
the order they appear in this argument list.
"""
# Eagerly parse all model strings so we can fail immediately
# if any are plainly invalid.
model_keys = [self.model_key(m) if not isinstance(m, tuple) else m
for m in models_or_names]
# If this function depends on more than one model, recursively call add
# for each, partially applying the given function on each iteration.
model_key, more_models = model_keys[0], model_keys[1:]
if more_models:
inner_function = function
function = lambda model: self.add(partial(inner_function, model),
*more_models)
# If the model is already loaded, pass it to the function immediately.
# Otherwise, delay execution until the class is prepared.
try:
model_class = get_registered_model(*model_key)
except LookupError:
self.pending_operations.setdefault(model_key, []).append(function)
else:
function(model_class)
def signal_receiver(self, sender, **kwargs):
"""
Receive ``class_prepared``, and pass the freshly prepared
model to each function waiting for it.
"""
key = (sender._meta.app_label, sender.__name__)
for function in self.pending_operations.pop(key, []):
function(sender)
lazy_model_ops = LazyModelOperations()
| """
Used in methods of abstract models to find the super-most concrete
(non abstract) model in the inheritance chain that inherits from the
given abstract model. This is so the methods in the abstract model can
query data consistently across the correct concrete model.
Consider the following::
class Abstract(models.Model)
class Meta:
abstract = True
def concrete(self):
return base_concrete_model(Abstract, self)
class Super(Abstract):
pass
class Sub(Super):
pass
sub = Sub.objects.create()
sub.concrete() # returns Super
In actual Mezzanine usage, this allows methods in the ``Displayable`` and
``Orderable`` abstract models to access the ``Page`` instance when
instances of custom content types, (eg: models that inherit from ``Page``)
need to query the ``Page`` model to determine correct values for ``slug``
and ``_order`` which are only relevant in the context of the ``Page``
model and not the model of the custom content type.
"""
for cls in reversed(instance.__class__.__mro__):
if issubclass(cls, abstract) and not cls._meta.abstract:
return cls
return instance.__class__ | identifier_body |
models.py | from __future__ import unicode_literals
from functools import partial
from future.utils import with_metaclass
from django import VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model, Field
from django.db.models.signals import class_prepared
from django.utils import six
from mezzanine.utils.importing import import_dotted_path
# Backward compatibility with Django 1.5's "get_user_model".
if VERSION >= (1, 5):
|
else:
def get_user_model():
from django.contrib.auth.models import User
return User
# Emulate Django 1.7's exception-raising get_registered_model
# when running under earlier versions
if VERSION >= (1, 7):
from django.apps import apps
get_model = apps.get_model
get_registered_model = apps.get_registered_model
else:
from django.db.models import get_model as django_get_model
def get_model(app_label, model_name=None):
if model_name is None:
app_label, model_name = app_label.split('.')
model = django_get_model(app_label, model_name)
if not model:
raise LookupError
return model
def get_registered_model(app_label, model_name):
model = django_get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if not model:
raise LookupError
return model
def get_user_model_name():
"""
Returns the app_label.object_name string for the user model.
"""
return getattr(settings, "AUTH_USER_MODEL", "auth.User")
def base_concrete_model(abstract, instance):
"""
Used in methods of abstract models to find the super-most concrete
(non abstract) model in the inheritance chain that inherits from the
given abstract model. This is so the methods in the abstract model can
query data consistently across the correct concrete model.
Consider the following::
class Abstract(models.Model)
class Meta:
abstract = True
def concrete(self):
return base_concrete_model(Abstract, self)
class Super(Abstract):
pass
class Sub(Super):
pass
sub = Sub.objects.create()
sub.concrete() # returns Super
In actual Mezzanine usage, this allows methods in the ``Displayable`` and
``Orderable`` abstract models to access the ``Page`` instance when
instances of custom content types, (eg: models that inherit from ``Page``)
need to query the ``Page`` model to determine correct values for ``slug``
and ``_order`` which are only relevant in the context of the ``Page``
model and not the model of the custom content type.
"""
for cls in reversed(instance.__class__.__mro__):
if issubclass(cls, abstract) and not cls._meta.abstract:
return cls
return instance.__class__
def upload_to(field_path, default):
"""
Used as the ``upload_to`` arg for file fields - allows for custom
handlers to be implemented on a per field basis defined by the
``UPLOAD_TO_HANDLERS`` setting.
"""
from mezzanine.conf import settings
for k, v in settings.UPLOAD_TO_HANDLERS.items():
if k.lower() == field_path.lower():
return import_dotted_path(v)
return default
class AdminThumbMixin(object):
"""
Provides a thumbnail method on models for admin classes to
reference in the ``list_display`` definition.
"""
admin_thumb_field = None
def admin_thumb(self):
thumb = ""
if self.admin_thumb_field:
thumb = getattr(self, self.admin_thumb_field, "")
if not thumb:
return ""
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
x, y = settings.ADMIN_THUMB_SIZE.split('x')
thumb_url = thumbnail(thumb, x, y)
return "<img src='%s%s'>" % (settings.MEDIA_URL, thumb_url)
admin_thumb.allow_tags = True
admin_thumb.short_description = ""
class ModelMixinBase(type):
"""
Metaclass for ``ModelMixin`` which is used for injecting model
fields and methods into models defined outside of a project.
This currently isn't used anywhere.
"""
def __new__(cls, name, bases, attrs):
"""
Checks for an inner ``Meta`` class with a ``mixin_for``
attribute containing the model that this model will be mixed
into. Once found, copy over any model fields and methods onto
the model being mixed into, and return it as the actual class
definition for the mixin.
"""
if name == "ModelMixin":
# Actual ModelMixin class definition.
return super(ModelMixinBase, cls).__new__(cls, name, bases, attrs)
try:
mixin_for = attrs.pop("Meta").mixin_for
if not issubclass(mixin_for, Model):
raise TypeError
except (TypeError, KeyError, AttributeError):
raise ImproperlyConfigured("The ModelMixin class '%s' requires "
"an inner Meta class with the "
"``mixin_for`` attribute defined, "
"with a value that is a valid model.")
# Copy fields and methods onto the model being mixed into, and
# return it as the definition for the mixin class itself.
for k, v in attrs.items():
if isinstance(v, Field):
v.contribute_to_class(mixin_for, k)
elif k != "__module__":
setattr(mixin_for, k, v)
return mixin_for
class ModelMixin(with_metaclass(ModelMixinBase, object)):
"""
Used as a subclass for mixin models that inject their behaviour onto
models defined outside of a project. The subclass should define an
inner ``Meta`` class with a ``mixin_for`` attribute containing the
model that will be mixed into.
"""
class LazyModelOperations(object):
"""
This class connects itself to Django's class_prepared signal.
Pass a function and a model or model name to its ``add()`` method,
and the function will be called with the model as its only
parameter once the model has been loaded. If the model is already
loaded, the function is called immediately.
Adapted from ``django.db.models.fields.related`` and used in
``mezzanine.generic.fields``.
"""
def __init__(self):
self.pending_operations = {}
class_prepared.connect(self.signal_receiver)
@staticmethod
def model_key(model_or_name):
"""
Returns an (app_label, model_name) tuple from a model or string.
"""
if isinstance(model_or_name, six.string_types):
app_label, model_name = model_or_name.split(".")
else:
# It's actually a model class.
app_label = model_or_name._meta.app_label
model_name = model_or_name._meta.object_name
return app_label, model_name
def add(self, function, *models_or_names):
"""
The function passed to this method should accept n arguments,
where n=len(models_or_names). When all the models are ready,
the function will be called with the models as arguments, in
the order they appear in this argument list.
"""
# Eagerly parse all model strings so we can fail immediately
# if any are plainly invalid.
model_keys = [self.model_key(m) if not isinstance(m, tuple) else m
for m in models_or_names]
# If this function depends on more than one model, recursively call add
# for each, partially applying the given function on each iteration.
model_key, more_models = model_keys[0], model_keys[1:]
if more_models:
inner_function = function
function = lambda model: self.add(partial(inner_function, model),
*more_models)
# If the model is already loaded, pass it to the function immediately.
# Otherwise, delay execution until the class is prepared.
try:
model_class = get_registered_model(*model_key)
except LookupError:
self.pending_operations.setdefault(model_key, []).append(function)
else:
function(model_class)
def signal_receiver(self, sender, **kwargs):
"""
Receive ``class_prepared``, and pass the freshly prepared
model to each function waiting for it.
"""
key = (sender._meta.app_label, sender.__name__)
for function in self.pending_operations.pop(key, []):
function(sender)
lazy_model_ops = LazyModelOperations()
| from django.contrib.auth import get_user_model | conditional_block |
models.py | from __future__ import unicode_literals
from functools import partial
from future.utils import with_metaclass
from django import VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model, Field
from django.db.models.signals import class_prepared
from django.utils import six
from mezzanine.utils.importing import import_dotted_path
# Backward compatibility with Django 1.5's "get_user_model".
if VERSION >= (1, 5):
from django.contrib.auth import get_user_model
else:
def get_user_model():
from django.contrib.auth.models import User
return User
# Emulate Django 1.7's exception-raising get_registered_model
# when running under earlier versions
if VERSION >= (1, 7):
from django.apps import apps
get_model = apps.get_model
get_registered_model = apps.get_registered_model
else:
from django.db.models import get_model as django_get_model
def get_model(app_label, model_name=None):
if model_name is None:
app_label, model_name = app_label.split('.')
model = django_get_model(app_label, model_name)
if not model:
raise LookupError
return model
def get_registered_model(app_label, model_name):
model = django_get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if not model:
raise LookupError
return model
def get_user_model_name():
"""
Returns the app_label.object_name string for the user model.
"""
return getattr(settings, "AUTH_USER_MODEL", "auth.User")
def base_concrete_model(abstract, instance):
"""
Used in methods of abstract models to find the super-most concrete
(non abstract) model in the inheritance chain that inherits from the
given abstract model. This is so the methods in the abstract model can
query data consistently across the correct concrete model.
Consider the following::
class Abstract(models.Model)
class Meta:
abstract = True
def concrete(self):
return base_concrete_model(Abstract, self)
class Super(Abstract):
pass
class Sub(Super):
pass
sub = Sub.objects.create()
sub.concrete() # returns Super
In actual Mezzanine usage, this allows methods in the ``Displayable`` and
``Orderable`` abstract models to access the ``Page`` instance when
instances of custom content types, (eg: models that inherit from ``Page``)
need to query the ``Page`` model to determine correct values for ``slug``
and ``_order`` which are only relevant in the context of the ``Page``
model and not the model of the custom content type.
"""
for cls in reversed(instance.__class__.__mro__):
if issubclass(cls, abstract) and not cls._meta.abstract:
return cls
return instance.__class__
def upload_to(field_path, default):
"""
Used as the ``upload_to`` arg for file fields - allows for custom
handlers to be implemented on a per field basis defined by the
``UPLOAD_TO_HANDLERS`` setting.
"""
from mezzanine.conf import settings
for k, v in settings.UPLOAD_TO_HANDLERS.items():
if k.lower() == field_path.lower():
return import_dotted_path(v)
return default
class AdminThumbMixin(object):
"""
Provides a thumbnail method on models for admin classes to
reference in the ``list_display`` definition.
"""
admin_thumb_field = None
def admin_thumb(self):
thumb = ""
if self.admin_thumb_field:
thumb = getattr(self, self.admin_thumb_field, "")
if not thumb:
return ""
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
x, y = settings.ADMIN_THUMB_SIZE.split('x')
thumb_url = thumbnail(thumb, x, y)
return "<img src='%s%s'>" % (settings.MEDIA_URL, thumb_url)
admin_thumb.allow_tags = True
admin_thumb.short_description = ""
class ModelMixinBase(type):
"""
Metaclass for ``ModelMixin`` which is used for injecting model
fields and methods into models defined outside of a project.
This currently isn't used anywhere.
"""
def __new__(cls, name, bases, attrs):
"""
Checks for an inner ``Meta`` class with a ``mixin_for``
attribute containing the model that this model will be mixed
into. Once found, copy over any model fields and methods onto
the model being mixed into, and return it as the actual class
definition for the mixin.
"""
if name == "ModelMixin":
# Actual ModelMixin class definition.
return super(ModelMixinBase, cls).__new__(cls, name, bases, attrs)
try:
mixin_for = attrs.pop("Meta").mixin_for
if not issubclass(mixin_for, Model): | "an inner Meta class with the "
"``mixin_for`` attribute defined, "
"with a value that is a valid model.")
# Copy fields and methods onto the model being mixed into, and
# return it as the definition for the mixin class itself.
for k, v in attrs.items():
if isinstance(v, Field):
v.contribute_to_class(mixin_for, k)
elif k != "__module__":
setattr(mixin_for, k, v)
return mixin_for
class ModelMixin(with_metaclass(ModelMixinBase, object)):
"""
Used as a subclass for mixin models that inject their behaviour onto
models defined outside of a project. The subclass should define an
inner ``Meta`` class with a ``mixin_for`` attribute containing the
model that will be mixed into.
"""
class LazyModelOperations(object):
"""
This class connects itself to Django's class_prepared signal.
Pass a function and a model or model name to its ``add()`` method,
and the function will be called with the model as its only
parameter once the model has been loaded. If the model is already
loaded, the function is called immediately.
Adapted from ``django.db.models.fields.related`` and used in
``mezzanine.generic.fields``.
"""
def __init__(self):
self.pending_operations = {}
class_prepared.connect(self.signal_receiver)
@staticmethod
def model_key(model_or_name):
"""
Returns an (app_label, model_name) tuple from a model or string.
"""
if isinstance(model_or_name, six.string_types):
app_label, model_name = model_or_name.split(".")
else:
# It's actually a model class.
app_label = model_or_name._meta.app_label
model_name = model_or_name._meta.object_name
return app_label, model_name
def add(self, function, *models_or_names):
"""
The function passed to this method should accept n arguments,
where n=len(models_or_names). When all the models are ready,
the function will be called with the models as arguments, in
the order they appear in this argument list.
"""
# Eagerly parse all model strings so we can fail immediately
# if any are plainly invalid.
model_keys = [self.model_key(m) if not isinstance(m, tuple) else m
for m in models_or_names]
# If this function depends on more than one model, recursively call add
# for each, partially applying the given function on each iteration.
model_key, more_models = model_keys[0], model_keys[1:]
if more_models:
inner_function = function
function = lambda model: self.add(partial(inner_function, model),
*more_models)
# If the model is already loaded, pass it to the function immediately.
# Otherwise, delay execution until the class is prepared.
try:
model_class = get_registered_model(*model_key)
except LookupError:
self.pending_operations.setdefault(model_key, []).append(function)
else:
function(model_class)
def signal_receiver(self, sender, **kwargs):
"""
Receive ``class_prepared``, and pass the freshly prepared
model to each function waiting for it.
"""
key = (sender._meta.app_label, sender.__name__)
for function in self.pending_operations.pop(key, []):
function(sender)
lazy_model_ops = LazyModelOperations() | raise TypeError
except (TypeError, KeyError, AttributeError):
raise ImproperlyConfigured("The ModelMixin class '%s' requires " | random_line_split |
test_optimization.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gast
import pytest
from tangent import optimization
from tangent import quoting
def test_assignment_propagation():
def f(x):
y = x
z = y
return z
node = quoting.parse_function(f)
node = optimization.assignment_propagation(node)
assert len(node.body[0].body) == 2
def test_dce():
def f(x):
y = 2 * x
return x
node = quoting.parse_function(f)
node = optimization.dead_code_elimination(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_fixed_point():
def f(x):
y = g(x)
z = h(y)
return x
node = quoting.parse_function(f)
node = optimization.optimize(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_constant_folding():
def f(x):
x = 1 * x
x = 0 * x
x = x * 1
x = x * 0
x = x * 2
x = 2 * x
x = 2 * 3
x = 1 + x
x = 0 + x
x = x + 1
x = x + 0
x = x + 2
x = 2 + x
x = 2 + 3
x = 1 - x
x = 0 - x
x = x - 1
x = x - 0
x = x - 2
x = 2 - x
x = 2 - 3
x = 1 / x
x = 0 / x
x = x / 1
x = x / 0
x = x / 2
x = 2 / x
x = 2 / 8
x = 1 ** x
x = 0 ** x
x = x ** 1
x = x ** 0
x = x ** 2
x = 2 ** x
x = 2 ** 3
def f_opt(x):
x = x
x = 0
x = x
x = 0
x = x * 2
x = 2 * x
x = 6
x = 1 + x
x = x
x = x + 1
x = x
x = x + 2
x = 2 + x
x = 5
x = 1 - x
x = -x
x = x - 1
x = x
x = x - 2
x = 2 - x
x = -1
x = 1 / x
x = 0 / x
x = x
x = x / 0
x = x / 2
x = 2 / x
x = 0.25
x = 1
x = 0
x = x
x = 1
x = x ** 2
x = 2 ** x
x = 8
node = quoting.parse_function(f)
node = optimization.constant_folding(node)
node_opt = quoting.parse_function(f_opt)
lines = quoting.to_source(node).strip().split('\n')[1:]
lines_opt = quoting.to_source(node_opt).strip().split('\n')[1:]
# In Python 2 integer division could be on, in which case...
if 1 / 2 == 0:
lines_opt[27] = ' x = 0'
assert lines == lines_opt
if __name__ == '__main__':
| assert not pytest.main([__file__]) | conditional_block |
|
test_optimization.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gast
import pytest
from tangent import optimization
from tangent import quoting
def test_assignment_propagation():
def f(x):
y = x
z = y
return z
node = quoting.parse_function(f)
node = optimization.assignment_propagation(node)
assert len(node.body[0].body) == 2
def test_dce():
def f(x):
y = 2 * x
return x
node = quoting.parse_function(f)
node = optimization.dead_code_elimination(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_fixed_point():
def f(x):
y = g(x)
z = h(y)
return x
node = quoting.parse_function(f)
node = optimization.optimize(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_constant_folding():
def f(x):
|
def f_opt(x):
x = x
x = 0
x = x
x = 0
x = x * 2
x = 2 * x
x = 6
x = 1 + x
x = x
x = x + 1
x = x
x = x + 2
x = 2 + x
x = 5
x = 1 - x
x = -x
x = x - 1
x = x
x = x - 2
x = 2 - x
x = -1
x = 1 / x
x = 0 / x
x = x
x = x / 0
x = x / 2
x = 2 / x
x = 0.25
x = 1
x = 0
x = x
x = 1
x = x ** 2
x = 2 ** x
x = 8
node = quoting.parse_function(f)
node = optimization.constant_folding(node)
node_opt = quoting.parse_function(f_opt)
lines = quoting.to_source(node).strip().split('\n')[1:]
lines_opt = quoting.to_source(node_opt).strip().split('\n')[1:]
# In Python 2 integer division could be on, in which case...
if 1 / 2 == 0:
lines_opt[27] = ' x = 0'
assert lines == lines_opt
if __name__ == '__main__':
assert not pytest.main([__file__])
| x = 1 * x
x = 0 * x
x = x * 1
x = x * 0
x = x * 2
x = 2 * x
x = 2 * 3
x = 1 + x
x = 0 + x
x = x + 1
x = x + 0
x = x + 2
x = 2 + x
x = 2 + 3
x = 1 - x
x = 0 - x
x = x - 1
x = x - 0
x = x - 2
x = 2 - x
x = 2 - 3
x = 1 / x
x = 0 / x
x = x / 1
x = x / 0
x = x / 2
x = 2 / x
x = 2 / 8
x = 1 ** x
x = 0 ** x
x = x ** 1
x = x ** 0
x = x ** 2
x = 2 ** x
x = 2 ** 3 | identifier_body |
test_optimization.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gast
import pytest
from tangent import optimization
from tangent import quoting
def test_assignment_propagation():
def f(x):
y = x
z = y
return z
node = quoting.parse_function(f)
node = optimization.assignment_propagation(node)
assert len(node.body[0].body) == 2
def test_dce():
def f(x):
y = 2 * x
return x
node = quoting.parse_function(f)
node = optimization.dead_code_elimination(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_fixed_point():
def f(x):
y = g(x) | return x
node = quoting.parse_function(f)
node = optimization.optimize(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_constant_folding():
def f(x):
x = 1 * x
x = 0 * x
x = x * 1
x = x * 0
x = x * 2
x = 2 * x
x = 2 * 3
x = 1 + x
x = 0 + x
x = x + 1
x = x + 0
x = x + 2
x = 2 + x
x = 2 + 3
x = 1 - x
x = 0 - x
x = x - 1
x = x - 0
x = x - 2
x = 2 - x
x = 2 - 3
x = 1 / x
x = 0 / x
x = x / 1
x = x / 0
x = x / 2
x = 2 / x
x = 2 / 8
x = 1 ** x
x = 0 ** x
x = x ** 1
x = x ** 0
x = x ** 2
x = 2 ** x
x = 2 ** 3
def f_opt(x):
x = x
x = 0
x = x
x = 0
x = x * 2
x = 2 * x
x = 6
x = 1 + x
x = x
x = x + 1
x = x
x = x + 2
x = 2 + x
x = 5
x = 1 - x
x = -x
x = x - 1
x = x
x = x - 2
x = 2 - x
x = -1
x = 1 / x
x = 0 / x
x = x
x = x / 0
x = x / 2
x = 2 / x
x = 0.25
x = 1
x = 0
x = x
x = 1
x = x ** 2
x = 2 ** x
x = 8
node = quoting.parse_function(f)
node = optimization.constant_folding(node)
node_opt = quoting.parse_function(f_opt)
lines = quoting.to_source(node).strip().split('\n')[1:]
lines_opt = quoting.to_source(node_opt).strip().split('\n')[1:]
# In Python 2 integer division could be on, in which case...
if 1 / 2 == 0:
lines_opt[27] = ' x = 0'
assert lines == lines_opt
if __name__ == '__main__':
assert not pytest.main([__file__]) | z = h(y) | random_line_split |
test_optimization.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gast
import pytest
from tangent import optimization
from tangent import quoting
def test_assignment_propagation():
def f(x):
y = x
z = y
return z
node = quoting.parse_function(f)
node = optimization.assignment_propagation(node)
assert len(node.body[0].body) == 2
def test_dce():
def f(x):
y = 2 * x
return x
node = quoting.parse_function(f)
node = optimization.dead_code_elimination(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_fixed_point():
def | (x):
y = g(x)
z = h(y)
return x
node = quoting.parse_function(f)
node = optimization.optimize(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_constant_folding():
def f(x):
x = 1 * x
x = 0 * x
x = x * 1
x = x * 0
x = x * 2
x = 2 * x
x = 2 * 3
x = 1 + x
x = 0 + x
x = x + 1
x = x + 0
x = x + 2
x = 2 + x
x = 2 + 3
x = 1 - x
x = 0 - x
x = x - 1
x = x - 0
x = x - 2
x = 2 - x
x = 2 - 3
x = 1 / x
x = 0 / x
x = x / 1
x = x / 0
x = x / 2
x = 2 / x
x = 2 / 8
x = 1 ** x
x = 0 ** x
x = x ** 1
x = x ** 0
x = x ** 2
x = 2 ** x
x = 2 ** 3
def f_opt(x):
x = x
x = 0
x = x
x = 0
x = x * 2
x = 2 * x
x = 6
x = 1 + x
x = x
x = x + 1
x = x
x = x + 2
x = 2 + x
x = 5
x = 1 - x
x = -x
x = x - 1
x = x
x = x - 2
x = 2 - x
x = -1
x = 1 / x
x = 0 / x
x = x
x = x / 0
x = x / 2
x = 2 / x
x = 0.25
x = 1
x = 0
x = x
x = 1
x = x ** 2
x = 2 ** x
x = 8
node = quoting.parse_function(f)
node = optimization.constant_folding(node)
node_opt = quoting.parse_function(f_opt)
lines = quoting.to_source(node).strip().split('\n')[1:]
lines_opt = quoting.to_source(node_opt).strip().split('\n')[1:]
# In Python 2 integer division could be on, in which case...
if 1 / 2 == 0:
lines_opt[27] = ' x = 0'
assert lines == lines_opt
if __name__ == '__main__':
assert not pytest.main([__file__])
| f | identifier_name |
accelerator.py |
import numpy as _np
import lnls as _lnls
import pyaccel as _pyaccel
from . import lattice as _lattice
default_cavity_on = False
default_radiation_on = False
default_vchamber_on = False
def create_accelerator(optics_mode=_lattice.default_optics_mode, energy=_lattice.energy):
|
accelerator_data = dict()
accelerator_data['lattice_version'] = 'BO_V06_01'
accelerator_data['global_coupling'] = 0.0002 # expected corrected value
accelerator_data['pressure_profile'] = _np.array([[0, 496.8], [1.5e-8]*2]) # [s [m], p [mbar]]o
496.78745
| lattice = _lattice.create_lattice(optics_mode=optics_mode, energy=energy)
accelerator = _pyaccel.accelerator.Accelerator(
lattice=lattice,
energy=energy,
harmonic_number=_lattice.harmonic_number,
cavity_on=default_cavity_on,
radiation_on=default_radiation_on,
vchamber_on=default_vchamber_on
)
return accelerator | identifier_body |
accelerator.py |
import numpy as _np
import lnls as _lnls
import pyaccel as _pyaccel
from . import lattice as _lattice
default_cavity_on = False
default_radiation_on = False
default_vchamber_on = False
def | (optics_mode=_lattice.default_optics_mode, energy=_lattice.energy):
lattice = _lattice.create_lattice(optics_mode=optics_mode, energy=energy)
accelerator = _pyaccel.accelerator.Accelerator(
lattice=lattice,
energy=energy,
harmonic_number=_lattice.harmonic_number,
cavity_on=default_cavity_on,
radiation_on=default_radiation_on,
vchamber_on=default_vchamber_on
)
return accelerator
accelerator_data = dict()
accelerator_data['lattice_version'] = 'BO_V06_01'
accelerator_data['global_coupling'] = 0.0002 # expected corrected value
accelerator_data['pressure_profile'] = _np.array([[0, 496.8], [1.5e-8]*2]) # [s [m], p [mbar]]o
496.78745
| create_accelerator | identifier_name |
accelerator.py | import numpy as _np
import lnls as _lnls
import pyaccel as _pyaccel
from . import lattice as _lattice |
default_cavity_on = False
default_radiation_on = False
default_vchamber_on = False
def create_accelerator(optics_mode=_lattice.default_optics_mode, energy=_lattice.energy):
lattice = _lattice.create_lattice(optics_mode=optics_mode, energy=energy)
accelerator = _pyaccel.accelerator.Accelerator(
lattice=lattice,
energy=energy,
harmonic_number=_lattice.harmonic_number,
cavity_on=default_cavity_on,
radiation_on=default_radiation_on,
vchamber_on=default_vchamber_on
)
return accelerator
accelerator_data = dict()
accelerator_data['lattice_version'] = 'BO_V06_01'
accelerator_data['global_coupling'] = 0.0002 # expected corrected value
accelerator_data['pressure_profile'] = _np.array([[0, 496.8], [1.5e-8]*2]) # [s [m], p [mbar]]o
496.78745 | random_line_split |
|
player.rs | extern crate serde_redis;
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Player {
#[serde(skip_serializing)]
pub addr: String,
pub name: Option<String>,
pub state: PlayerState
}
impl Player {
pub fn new(addr: String, name: Option<String>) -> Player {
Player {
addr: addr,
name: name,
state: PlayerState::Watching
}
}
|
pub fn format_hand_key(addr: &str, game_id: &str) -> String {
format!("HAND:{}:{}", addr, game_id)
}
pub fn state_key(&self) -> String {
Player::format_state_key(&self.addr)
}
pub fn hand_key(&self, game_id: &str) -> String {
Player::format_hand_key(&self.addr, game_id)
}
}
// States
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(tag = "type")]
pub enum PlayerState {
Watching,
Playing,
Judging,
TimeOut,
Banned,
}
// Transitions | pub fn format_state_key(addr: &str) -> String {
format!("STATE:{}", addr)
} | random_line_split |
player.rs | extern crate serde_redis;
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Player {
#[serde(skip_serializing)]
pub addr: String,
pub name: Option<String>,
pub state: PlayerState
}
impl Player {
pub fn new(addr: String, name: Option<String>) -> Player {
Player {
addr: addr,
name: name,
state: PlayerState::Watching
}
}
pub fn format_state_key(addr: &str) -> String {
format!("STATE:{}", addr)
}
pub fn format_hand_key(addr: &str, game_id: &str) -> String {
format!("HAND:{}:{}", addr, game_id)
}
pub fn state_key(&self) -> String {
Player::format_state_key(&self.addr)
}
pub fn | (&self, game_id: &str) -> String {
Player::format_hand_key(&self.addr, game_id)
}
}
// States
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(tag = "type")]
pub enum PlayerState {
Watching,
Playing,
Judging,
TimeOut,
Banned,
}
// Transitions | hand_key | identifier_name |
index.d.ts | // Type definitions for react-hammerjs 1.0
// Project: https://github.com/JedWatson/react-hammerjs#readme
// Definitions by: Jason Unger <https://github.com/jsonunger>
// Cecchi MacNaughton <https://github.com/cecchi>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.8
import * as Hammer from "hammerjs";
import * as React from "react";
type Omit<T, K> = Pick<T, Exclude<keyof T, K>>;
type HammerOptionsWithRecognizers = Omit<HammerOptions, "recognizers"> & {
recognizers?: { [gesture: string]: RecognizerOptions };
};
declare namespace ReactHammer {
interface ReactHammerProps {
direction?:
| "DIRECTION_NONE"
| "DIRECTION_LEFT"
| "DIRECTION_RIGHT"
| "DIRECTION_UP"
| "DIRECTION_DOWN"
| "DIRECTION_HORIZONTAL"
| "DIRECTION_VERTICAL"
| "DIRECTION_ALL";
options?: HammerOptionsWithRecognizers;
recognizeWith?: { [gesture: string]: Recognizer | string };
vertical?: boolean; | action?: HammerListener;
onDoubleTap?: HammerListener;
onPan?: HammerListener;
onPanCancel?: HammerListener;
onPanEnd?: HammerListener;
onPanStart?: HammerListener;
onPinch?: HammerListener;
onPinchCancel?: HammerListener;
onPinchEnd?: HammerListener;
onPinchIn?: HammerListener;
onPinchStart?: HammerListener;
onPress?: HammerListener;
onPressUp?: HammerListener;
onRotate?: HammerListener;
onRotateCancel?: HammerListener;
onRotateEnd?: HammerListener;
onRotateMove?: HammerListener;
onRotateStart?: HammerListener;
onSwipe?: HammerListener;
onTap?: HammerListener;
}
}
declare const ReactHammer: React.ComponentClass<ReactHammer.ReactHammerProps>;
export = ReactHammer; | random_line_split |
|
BuyTshirt.spec.ts | import { browser } from 'protractor';
import { MenuContentPage } from '../src/page';
import { OrderResumePage } from '../src/page';
import { ProductDetailPage } from '../src/page';
import { ProductAddedModalPage } from '../src/page'; | import { SummaryStepPage } from '../src/page';
import { SignInStepPage } from '../src/page';
import { AddressStepPage } from '../src/page';
import { ShippingStepPage } from '../src/page';
import { PaymentStepPage } from '../src/page';
import { BankPaymentPage } from '../src/page';
import { ProductListPage } from '../src/page';
describe('Open page', () => {
beforeAll(async () => {
await browser.get('http://automationpractice.com/');
});
describe('buy a item', () => {
beforeAll(async () => {
const menuContentPage: MenuContentPage = new MenuContentPage();
const orderResumePage: OrderResumePage = new OrderResumePage();
const productDetailPage: ProductDetailPage = new ProductDetailPage();
const productAddedModalPage: ProductAddedModalPage = new ProductAddedModalPage();
const summaryStepPage: SummaryStepPage = new SummaryStepPage();
await menuContentPage.goToTShirtMenu();
await orderResumePage.selectProduct('Faded Short Sleeve T-shirts');
await productDetailPage.goToAddToCar();
await productAddedModalPage.goToCheckout();
await summaryStepPage.goToProceedToCheckout();
});
describe('login to the app', () => {
beforeAll(async () => {
const signInStepPage: SignInStepPage = new SignInStepPage();
await signInStepPage.login('[email protected]', 'WorkshopProtractor');
});
describe('select address', () => {
beforeAll(async () => {
const addressStepPage: AddressStepPage = new AddressStepPage();
await addressStepPage.goToReviewAddress();
});
describe('pay', () => {
beforeAll(async () => {
const shippingStepPage: ShippingStepPage = new ShippingStepPage();
const paymentStepPage: PaymentStepPage = new PaymentStepPage();
const bankPaymentPage: BankPaymentPage = new BankPaymentPage();
await shippingStepPage.acceptShipping();
await paymentStepPage.goTopaymentOption();
await bankPaymentPage.goToconfirm();
});
it('perfect!!! it is mine', async () => {
const productListPage: ProductListPage = new ProductListPage();
await expect(productListPage.getOrderText()).toBe('Your order on My Store is complete.');
});
});
});
});
});
}); | random_line_split |
|
product.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Pexego Sistemas Informáticos All Rights Reserved
# $Jesús Ventosinos Mayor <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api
class pr | odels.Model):
_inherit = 'product.product'
is_outlet = fields.Boolean('Is outlet', compute='_is_outlet')
normal_product_id = fields.Many2one('product.product', 'normal product')
outlet_product_ids = fields.One2many('product.product',
'normal_product_id',
'Outlet products')
@api.one
def _is_outlet(self):
outlet_cat = self.env.ref('product_outlet.product_category_outlet')
if self.categ_id == outlet_cat or \
self.categ_id.parent_id == outlet_cat:
self.is_outlet = True
else:
self.is_outlet = False
@api.model
def cron_update_outlet_price(self):
outlet_categ_ids = []
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o1').id)
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o2').id)
outlet_products = self.env['product.product'].search([('categ_id', 'in', outlet_categ_ids),
('normal_product_id.list_price', '!=', 0)],
order="id desc")
for product_o in outlet_products:
origin_product = product_o.normal_product_id
price_outlet = origin_product.list_price * (1 - product_o.categ_id.percent / 100)
price_outlet2 = origin_product.list_price2 * (1 - product_o.categ_id.percent / 100)
price_outlet3 = origin_product.list_price3 * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd = origin_product.pvd1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd2 = origin_product.pvd2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd3 = origin_product.pvd3_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi = origin_product.pvi1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi2 = origin_product.pvi2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi3 = origin_product.pvi3_price * (1 - product_o.categ_id.percent / 100)
if round(product_o.list_price, 2) != round(price_outlet, 2) or \
round(product_o.list_price2, 2) != round(price_outlet2, 2) or \
round(product_o.list_price3, 2) != round(price_outlet3, 2) or \
round(product_o.pvd1_price, 2) != round(price_outlet_pvd, 2) or \
round(product_o.pvd2_price, 2) != round(price_outlet_pvd2, 2) or \
round(product_o.pvd3_price, 2) != round(price_outlet_pvd3, 2) or \
round(product_o.pvi1_price, 2) != round(price_outlet_pvi, 2) or \
round(product_o.pvi2_price, 2) != round(price_outlet_pvi2, 2) or \
round(product_o.pvi3_price, 2) != round(price_outlet_pvi3, 2) or \
round(product_o.commercial_cost, 2) != round(origin_product.commercial_cost, 2):
# update all prices
values = {
'standard_price': price_outlet,
'list_price': price_outlet,
'list_price2': price_outlet2,
'list_price3': price_outlet3,
'pvd1_price': price_outlet_pvd,
'pvd2_price': price_outlet_pvd2,
'pvd3_price': price_outlet_pvd3,
'pvi1_price': price_outlet_pvi,
'pvi2_price': price_outlet_pvi2,
'pvi3_price': price_outlet_pvi3,
'commercial_cost': origin_product.commercial_cost,
}
product_o.write(values)
| oduct_product(m | identifier_name |
product.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Pexego Sistemas Informáticos All Rights Reserved
# $Jesús Ventosinos Mayor <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api
class product_product(models.Model):
_inherit = 'product.product'
is_outlet = fields.Boolean('Is outlet', compute='_is_outlet')
normal_product_id = fields.Many2one('product.product', 'normal product')
outlet_product_ids = fields.One2many('product.product',
'normal_product_id',
'Outlet products')
@api.one
def _is_outlet(self):
outlet_cat = self.env.ref('product_outlet.product_category_outlet')
if self.categ_id == outlet_cat or \
self.categ_id.parent_id == outlet_cat:
se | else:
self.is_outlet = False
@api.model
def cron_update_outlet_price(self):
outlet_categ_ids = []
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o1').id)
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o2').id)
outlet_products = self.env['product.product'].search([('categ_id', 'in', outlet_categ_ids),
('normal_product_id.list_price', '!=', 0)],
order="id desc")
for product_o in outlet_products:
origin_product = product_o.normal_product_id
price_outlet = origin_product.list_price * (1 - product_o.categ_id.percent / 100)
price_outlet2 = origin_product.list_price2 * (1 - product_o.categ_id.percent / 100)
price_outlet3 = origin_product.list_price3 * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd = origin_product.pvd1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd2 = origin_product.pvd2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd3 = origin_product.pvd3_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi = origin_product.pvi1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi2 = origin_product.pvi2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi3 = origin_product.pvi3_price * (1 - product_o.categ_id.percent / 100)
if round(product_o.list_price, 2) != round(price_outlet, 2) or \
round(product_o.list_price2, 2) != round(price_outlet2, 2) or \
round(product_o.list_price3, 2) != round(price_outlet3, 2) or \
round(product_o.pvd1_price, 2) != round(price_outlet_pvd, 2) or \
round(product_o.pvd2_price, 2) != round(price_outlet_pvd2, 2) or \
round(product_o.pvd3_price, 2) != round(price_outlet_pvd3, 2) or \
round(product_o.pvi1_price, 2) != round(price_outlet_pvi, 2) or \
round(product_o.pvi2_price, 2) != round(price_outlet_pvi2, 2) or \
round(product_o.pvi3_price, 2) != round(price_outlet_pvi3, 2) or \
round(product_o.commercial_cost, 2) != round(origin_product.commercial_cost, 2):
# update all prices
values = {
'standard_price': price_outlet,
'list_price': price_outlet,
'list_price2': price_outlet2,
'list_price3': price_outlet3,
'pvd1_price': price_outlet_pvd,
'pvd2_price': price_outlet_pvd2,
'pvd3_price': price_outlet_pvd3,
'pvi1_price': price_outlet_pvi,
'pvi2_price': price_outlet_pvi2,
'pvi3_price': price_outlet_pvi3,
'commercial_cost': origin_product.commercial_cost,
}
product_o.write(values)
| lf.is_outlet = True
| conditional_block |
product.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Pexego Sistemas Informáticos All Rights Reserved | # by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api
class product_product(models.Model):
_inherit = 'product.product'
is_outlet = fields.Boolean('Is outlet', compute='_is_outlet')
normal_product_id = fields.Many2one('product.product', 'normal product')
outlet_product_ids = fields.One2many('product.product',
'normal_product_id',
'Outlet products')
@api.one
def _is_outlet(self):
outlet_cat = self.env.ref('product_outlet.product_category_outlet')
if self.categ_id == outlet_cat or \
self.categ_id.parent_id == outlet_cat:
self.is_outlet = True
else:
self.is_outlet = False
@api.model
def cron_update_outlet_price(self):
outlet_categ_ids = []
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o1').id)
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o2').id)
outlet_products = self.env['product.product'].search([('categ_id', 'in', outlet_categ_ids),
('normal_product_id.list_price', '!=', 0)],
order="id desc")
for product_o in outlet_products:
origin_product = product_o.normal_product_id
price_outlet = origin_product.list_price * (1 - product_o.categ_id.percent / 100)
price_outlet2 = origin_product.list_price2 * (1 - product_o.categ_id.percent / 100)
price_outlet3 = origin_product.list_price3 * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd = origin_product.pvd1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd2 = origin_product.pvd2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd3 = origin_product.pvd3_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi = origin_product.pvi1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi2 = origin_product.pvi2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi3 = origin_product.pvi3_price * (1 - product_o.categ_id.percent / 100)
if round(product_o.list_price, 2) != round(price_outlet, 2) or \
round(product_o.list_price2, 2) != round(price_outlet2, 2) or \
round(product_o.list_price3, 2) != round(price_outlet3, 2) or \
round(product_o.pvd1_price, 2) != round(price_outlet_pvd, 2) or \
round(product_o.pvd2_price, 2) != round(price_outlet_pvd2, 2) or \
round(product_o.pvd3_price, 2) != round(price_outlet_pvd3, 2) or \
round(product_o.pvi1_price, 2) != round(price_outlet_pvi, 2) or \
round(product_o.pvi2_price, 2) != round(price_outlet_pvi2, 2) or \
round(product_o.pvi3_price, 2) != round(price_outlet_pvi3, 2) or \
round(product_o.commercial_cost, 2) != round(origin_product.commercial_cost, 2):
# update all prices
values = {
'standard_price': price_outlet,
'list_price': price_outlet,
'list_price2': price_outlet2,
'list_price3': price_outlet3,
'pvd1_price': price_outlet_pvd,
'pvd2_price': price_outlet_pvd2,
'pvd3_price': price_outlet_pvd3,
'pvi1_price': price_outlet_pvi,
'pvi2_price': price_outlet_pvi2,
'pvi3_price': price_outlet_pvi3,
'commercial_cost': origin_product.commercial_cost,
}
product_o.write(values) | # $Jesús Ventosinos Mayor <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.