file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
calendar-state.ts
|
import ModulDate from './../../../../utils/modul-date/modul-date';
import { RangeDate, SingleDate } from './abstract-calendar-state';
export enum CalendarEvent {
DAY_SELECT = 'day-select',
DAY_MOUSE_ENTER = 'day-mouse-enter',
DAY_MOUSE_LEAVE = 'day-mouse-leave',
DAY_KEYBOARD_TAB = 'day-keyboard-tab',
MONTH_SELECT = 'month-select',
MONTH_NEXT = 'month-next',
MONTH_PREVIOUS = 'month-previous',
YEAR_SELECT = 'year-select',
YEAR_NEXT = 'year-next',
YEAR_PREVIOUS = 'year-previous',
YEAR_MONTH_SELECT = 'year-month-select'
}
export enum CalendarType {
SINGLE_DATE = 'single-date',
DATE_RANGE = 'date-range'
}
export type DaySelectCallBack = (date: SingleDate | RangeDate) => void;
export default interface CalendarState {
buildCurrentCalendar(): CalendarCurrentState;
onDateSelect(callback: DaySelectCallBack): void;
updateState(value: SingleDate | RangeDate, minDate?: string, maxDate?: string): void;
}
export interface CalendarCurrentState {
calendar: Calendar;
calendarEvents: CalendarEvents;
}
export interface Calendar {
value?: SingleDate | RangeDate;
dates: { min: ModulDate, current: ModulDate, max: ModulDate };
years: YearState[];
months: MonthState[];
yearsMonths: YearMonthState[];
days: DayState[];
type?: CalendarType;
}
export interface CalendarEvents {
[CalendarEvent.DAY_SELECT]: (event: DayState) => void;
[CalendarEvent.DAY_MOUSE_ENTER]: (event: DayState) => void;
[CalendarEvent.DAY_MOUSE_LEAVE]: (event: DayState) => void;
[CalendarEvent.DAY_KEYBOARD_TAB]: (event: DayState) => void;
[CalendarEvent.MONTH_SELECT]: (event: MonthState) => void;
[CalendarEvent.MONTH_PREVIOUS]: (event: Event) => void;
[CalendarEvent.MONTH_NEXT]: (event: Event) => void;
[CalendarEvent.YEAR_SELECT]: (event: YearState) => void;
[CalendarEvent.YEAR_PREVIOUS]: (event: Event) => void;
[CalendarEvent.YEAR_NEXT]: (event: Event) => void;
[CalendarEvent.YEAR_MONTH_SELECT]: (event: Event) => void;
}
export interface YearState {
year: number;
isCurrent: boolean;
}
export interface MonthState {
month: number;
|
isCurrent: boolean;
isDisabled: boolean;
}
export interface YearMonthState {
year: YearState;
months: MonthState[];
}
export interface DayState {
date: ModulDate;
day: number;
month: number;
year: number;
isDisabled: boolean;
isToday: boolean;
isSelected: boolean;
isSelectionStart: boolean;
isSelectionEnd: boolean;
isInPreviousMonth: boolean;
isInNextMonth: boolean;
isHighlighted: boolean;
hasFocus: boolean;
}
|
random_line_split
|
|
Redeem.js
|
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
|
var _jsxRuntime = require("react/jsx-runtime");
var _default = (0, _createSvgIcon.default)( /*#__PURE__*/(0, _jsxRuntime.jsx)("path", {
d: "M20 6h-2.18c.11-.31.18-.65.18-1 0-1.66-1.34-3-3-3-1.05 0-1.96.54-2.5 1.35l-.5.67-.5-.68C10.96 2.54 10.05 2 9 2 7.34 2 6 3.34 6 5c0 .35.07.69.18 1H4c-1.11 0-1.99.89-1.99 2L2 19c0 1.11.89 2 2 2h16c1.11 0 2-.89 2-2V8c0-1.11-.89-2-2-2zm-5-2c.55 0 1 .45 1 1s-.45 1-1 1-1-.45-1-1 .45-1 1-1zM9 4c.55 0 1 .45 1 1s-.45 1-1 1-1-.45-1-1 .45-1 1-1zm11 15H4v-2h16v2zm0-5H4V8h5.08L7 10.83 8.62 12 11 8.76l1-1.36 1 1.36L15.38 12 17 10.83 14.92 8H20v6z"
}), 'Redeem');
exports.default = _default;
|
var _createSvgIcon = _interopRequireDefault(require("./utils/createSvgIcon"));
|
random_line_split
|
lib.rs
|
//! Low-level Rust lexer.
//!
//! The idea with `rustc_lexer` is to make a reusable library,
//! by separating out pure lexing and rustc-specific concerns, like spans,
//! error reporting, and interning. So, rustc_lexer operates directly on `&str`,
//! produces simple tokens which are a pair of type-tag and a bit of original text,
//! and does not report errors, instead storing them as flags on the token.
//!
//! Tokens produced by this lexer are not yet ready for parsing the Rust syntax.
//! For that see [`rustc_parse::lexer`], which converts this basic token stream
//! into wide tokens used by actual parser.
//!
//! The purpose of this crate is to convert raw sources into a labeled sequence
//! of well-known token types, so building an actual Rust token stream will
//! be easier.
//!
//! The main entity of this crate is the [`TokenKind`] enum which represents common
//! lexeme types.
//!
//! [`rustc_parse::lexer`]: ../rustc_parse/lexer/index.html
// We want to be able to build this crate with a stable compiler, so no
// `#![feature]` attributes should be added.
mod cursor;
pub mod unescape;
#[cfg(test)]
mod tests;
use self::LiteralKind::*;
use self::TokenKind::*;
use crate::cursor::{Cursor, EOF_CHAR};
use std::convert::TryFrom;
/// Parsed token.
/// It doesn't contain information about data that has been parsed,
/// only the type of the token and its size.
#[derive(Debug)]
pub struct Token {
pub kind: TokenKind,
pub len: usize,
}
impl Token {
fn new(kind: TokenKind, len: usize) -> Token {
Token { kind, len }
}
}
/// Enum representing common lexeme types.
// perf note: Changing all `usize` to `u32` doesn't change performance. See #77629
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Multi-char tokens:
/// "// comment"
LineComment { doc_style: Option<DocStyle> },
/// `/* block comment */`
///
/// Block comments can be recursive, so the sequence like `/* /* */`
/// will not be considered terminated and will result in a parsing error.
BlockComment { doc_style: Option<DocStyle>, terminated: bool },
/// Any whitespace characters sequence.
Whitespace,
/// "ident" or "continue"
/// At this step keywords are also considered identifiers.
Ident,
/// "r#ident"
RawIdent,
/// An unknown prefix like `foo#`, `foo'`, `foo"`. Note that only the
/// prefix (`foo`) is included in the token, not the separator (which is
/// lexed as its own distinct token). In Rust 2021 and later, reserved
/// prefixes are reported as errors; in earlier editions, they result in a
/// (allowed by default) lint, and are treated as regular identifier
/// tokens.
UnknownPrefix,
/// "12_u8", "1.0e-40", "b"123"". See `LiteralKind` for more details.
Literal { kind: LiteralKind, suffix_start: usize },
/// "'a"
Lifetime { starts_with_number: bool },
// One-char tokens:
/// ";"
Semi,
/// ","
Comma,
/// "."
Dot,
/// "("
OpenParen,
/// ")"
CloseParen,
/// "{"
OpenBrace,
/// "}"
CloseBrace,
/// "["
OpenBracket,
/// "]"
CloseBracket,
/// "@"
At,
/// "#"
Pound,
/// "~"
Tilde,
/// "?"
Question,
/// ":"
Colon,
/// "$"
Dollar,
/// "="
Eq,
/// "!"
Bang,
/// "<"
Lt,
/// ">"
Gt,
/// "-"
Minus,
/// "&"
And,
/// "|"
Or,
/// "+"
Plus,
/// "*"
Star,
/// "/"
Slash,
/// "^"
Caret,
/// "%"
Percent,
/// Unknown token, not expected by the lexer, e.g. "№"
Unknown,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum DocStyle {
Outer,
Inner,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum LiteralKind {
/// "12_u8", "0o100", "0b120i99"
Int { base: Base, empty_int: bool },
/// "12.34f32", "0b100.100"
Float { base: Base, empty_exponent: bool },
/// "'a'", "'\\'", "'''", "';"
Char { terminated: bool },
/// "b'a'", "b'\\'", "b'''", "b';"
Byte { terminated: bool },
/// ""abc"", ""abc"
Str { terminated: bool },
/// "b"abc"", "b"abc"
ByteStr { terminated: bool },
/// "r"abc"", "r#"abc"#", "r####"ab"###"c"####", "r#"a"
RawStr { n_hashes: u16, err: Option<RawStrError> },
/// "br"abc"", "br#"abc"#", "br####"ab"###"c"####", "br#"a"
RawByteStr { n_hashes: u16, err: Option<RawStrError> },
}
/// Error produced validating a raw string. Represents cases like:
/// - `r##~"abcde"##`: `InvalidStarter`
/// - `r###"abcde"##`: `NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)`
/// - Too many `#`s (>65535): `TooManyDelimiters`
// perf note: It doesn't matter that this makes `Token` 36 bytes bigger. See #77629
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum RawStrError {
/// Non `#` characters exist between `r` and `"` eg. `r#~"..`
InvalidStarter { bad_char: char },
/// The string was never terminated. `possible_terminator_offset` is the number of characters after `r` or `br` where they
/// may have intended to terminate it.
NoTerminator { expected: usize, found: usize, possible_terminator_offset: Option<usize> },
/// More than 65535 `#`s exist.
TooManyDelimiters { found: usize },
}
/// Base of numeric literal encoding according to its prefix.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum Base {
/// Literal starts with "0b".
Binary,
/// Literal starts with "0o".
Octal,
/// Literal starts with "0x".
Hexadecimal,
/// Literal doesn't contain a prefix.
Decimal,
}
/// `rustc` allows files to have a shebang, e.g. "#!/usr/bin/rustrun",
/// but shebang isn't a part of rust syntax.
pub fn strip_shebang(input: &str) -> Option<usize> {
// Shebang must start with `#!` literally, without any preceding whitespace.
// For simplicity we consider any line starting with `#!` a shebang,
// regardless of restrictions put on shebangs by specific platforms.
if let Some(input_tail) = input.strip_prefix("#!") {
// Ok, this is a shebang but if the next non-whitespace token is `[`,
// then it may be valid Rust code, so consider it Rust code.
let next_non_whitespace_token = tokenize(input_tail).map(|tok| tok.kind).find(|tok| {
!matches!(
tok,
TokenKind::Whitespace
| TokenKind::LineComment { doc_style: None }
| TokenKind::BlockComment { doc_style: None, .. }
)
});
if next_non_whitespace_token != Some(TokenKind::OpenBracket) {
// No other choice than to consider this a shebang.
return Some(2 + input_tail.lines().next().unwrap_or_default().len());
}
}
None
}
/// Parses the first token from the provided input string.
pub fn first_token(input: &str) -> Token {
debug_assert!(!input.is_empty());
Cursor::new(input).advance_token()
}
/// Creates an iterator that produces tokens from the input string.
pub fn tokenize(mut input: &str) -> impl Iterator<Item = Token> + '_ {
std::iter::from_fn(move || {
if input.is_empty() {
return None;
}
let token = first_token(input);
input = &input[token.len..];
Some(token)
})
}
/// True if `c` is considered a whitespace according to Rust language definition.
/// See [Rust language reference](https://doc.rust-lang.org/reference/whitespace.html)
/// for definitions of these classes.
pub fn is_whitespace(c: char) -> bool {
// This is Pattern_White_Space.
//
// Note that this set is stable (ie, it doesn't change with different
// Unicode versions), so it's ok to just hard-code the values.
matches!(
c,
// Usual ASCII suspects
'\u{0009}' // \t
| '\u{000A}' // \n
| '\u{000B}' // vertical tab
| '\u{000C}' // form feed
| '\u{000D}' // \r
| '\u{0020}' // space
// NEXT LINE from latin1
| '\u{0085}'
// Bidi markers
| '\u{200E}' // LEFT-TO-RIGHT MARK
| '\u{200F}' // RIGHT-TO-LEFT MARK
// Dedicated whitespace characters from Unicode
| '\u{2028}' // LINE SEPARATOR
| '\u{2029}' // PARAGRAPH SEPARATOR
)
}
/// True if `c` is valid as a first character of an identifier.
/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
/// a formal definition of valid identifier name.
pub fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
c == '_' || unicode_xid::UnicodeXID::is_xid_start(c)
}
/// True if `c` is valid as a non-first character of an identifier.
/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
/// a formal definition of valid identifier name.
pub fn is_id_continue(c: char) -> bool {
unicode_xid::UnicodeXID::is_xid_continue(c)
}
/// The passed string is lexically an identifier.
pub fn is_ident(string: &str) -> bool {
let mut chars = string.chars();
if let Some(start) = chars.next() {
is_id_start(start) && chars.all(is_id_continue)
} else {
false
}
}
impl Cursor<'_> {
/// Parses a token from the input string.
fn advance_token(&mut self) -> Token {
let first_char = self.bump().unwrap();
let token_kind = match first_char {
// Slash, comment or block comment.
'/' => match self.first() {
'/' => self.line_comment(),
'*' => self.block_comment(),
_ => Slash,
},
// Whitespace sequence.
c if is_whitespace(c) => self.whitespace(),
// Raw identifier, raw string literal or identifier.
'r' => match (self.first(), self.second()) {
('#', c1) if is_id_start(c1) => self.raw_ident(),
('#', _) | ('"', _) => {
let (n_hashes, err) = self.raw_double_quoted_string(1);
let suffix_start = self.len_consumed();
if err.is_none() {
self.eat_literal_suffix();
}
let kind = RawStr { n_hashes, err };
Literal { kind, suffix_start }
}
_ => self.ident_or_unknown_prefix(),
},
// Byte literal, byte string literal, raw byte string literal or identifier.
'b' => match (self.first(), self.second()) {
('\'', _) => {
self.bump();
let terminated = self.single_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = Byte { terminated };
Literal { kind, suffix_start }
}
('"', _) => {
self.bump();
let terminated = self.double_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = ByteStr { terminated };
Literal { kind, suffix_start }
}
('r', '"') | ('r', '#') => {
self.bump();
let (n_hashes, err) = self.raw_double_quoted_string(2);
let suffix_start = self.len_consumed();
if err.is_none() {
self.eat_literal_suffix();
}
let kind = RawByteStr { n_hashes, err };
Literal { kind, suffix_start }
}
_ => self.ident_or_unknown_prefix(),
},
// Identifier (this should be checked after other variant that can
// start as identifier).
c if is_id_start(c) => self.ident_or_unknown_prefix(),
// Numeric literal.
c @ '0'..='9' => {
let literal_kind = self.number(c);
let suffix_start = self.len_consumed();
self.eat_literal_suffix();
TokenKind::Literal { kind: literal_kind, suffix_start }
}
// One-symbol tokens.
';' => Semi,
',' => Comma,
'.' => Dot,
'(' => OpenParen,
')' => CloseParen,
'{' => OpenBrace,
'}' => CloseBrace,
'[' => OpenBracket,
']' => CloseBracket,
'@' => At,
'#' => Pound,
'~' => Tilde,
'?' => Question,
':' => Colon,
'$' => Dollar,
'=' => Eq,
'!' => Bang,
'<' => Lt,
'>' => Gt,
'-' => Minus,
'&' => And,
'|' => Or,
'+' => Plus,
'*' => Star,
'^' => Caret,
'%' => Percent,
// Lifetime or character literal.
'\'' => self.lifetime_or_char(),
// String literal.
'"' => {
let terminated = self.double_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = Str { terminated };
Literal { kind, suffix_start }
}
_ => Unknown,
};
Token::new(token_kind, self.len_consumed())
}
fn line_comment(&mut self) -> TokenKind {
debug_assert!(self.prev() == '/' && self.first() == '/');
self.bump();
let doc_style = match self.first() {
// `//!` is an inner line doc comment.
'!' => Some(DocStyle::Inner),
// `////` (more than 3 slashes) is not considered a doc comment.
'/' if self.second() != '/' => Some(DocStyle::Outer),
_ => None,
};
self.eat_while(|c| c != '\n');
LineComment { doc_style }
}
fn block_comment(&mut self) -> TokenKind {
debug_assert!(self.prev() == '/' && self.first() == '*');
self.bump();
let doc_style = match self.first() {
// `/*!` is an inner block doc comment.
'!' => Some(DocStyle::Inner),
// `/***` (more than 2 stars) is not considered a doc comment.
// `/**/` is not considered a doc comment.
'*' if !matches!(self.second(), '*' | '/') => Some(DocStyle::Outer),
_ => None,
};
let mut depth = 1usize;
while let Some(c) = self.bump() {
match c {
'/' if self.first() == '*' => {
self.bump();
depth += 1;
}
'*' if self.first() == '/' => {
self.bump();
depth -= 1;
if depth == 0 {
// This block comment is closed, so for a construction like "/* */ */"
// there will be a successfully parsed block comment "/* */"
// and " */" will be processed separately.
break;
}
}
_ => (),
}
}
BlockComment { doc_style, terminated: depth == 0 }
}
fn whitespace(&mut self) -> TokenKind {
debug_assert!(is_whitespace(self.prev()));
self.eat_while(is_whitespace);
Whitespace
}
fn raw_ident(&mut self) -> TokenKind {
debug_assert!(self.prev() == 'r' && self.first() == '#' && is_id_start(self.second()));
// Eat "#" symbol.
self.bump();
// Eat the identifier part of RawIdent.
self.eat_identifier();
RawIdent
}
fn ident_or_unknown_prefix(&mut self) -> TokenKind {
debug_assert!(is_id_start(self.prev()));
// Start is already eaten, eat the rest of identifier.
self.eat_while(is_id_continue);
// Known prefixes must have been handled earlier. So if
// we see a prefix here, it is definitely an unknown prefix.
match self.first() {
'#' | '"' | '\'' => UnknownPrefix,
_ => Ident,
}
}
fn nu
|
mut self, first_digit: char) -> LiteralKind {
debug_assert!('0' <= self.prev() && self.prev() <= '9');
let mut base = Base::Decimal;
if first_digit == '0' {
// Attempt to parse encoding base.
let has_digits = match self.first() {
'b' => {
base = Base::Binary;
self.bump();
self.eat_decimal_digits()
}
'o' => {
base = Base::Octal;
self.bump();
self.eat_decimal_digits()
}
'x' => {
base = Base::Hexadecimal;
self.bump();
self.eat_hexadecimal_digits()
}
// Not a base prefix.
'0'..='9' | '_' | '.' | 'e' | 'E' => {
self.eat_decimal_digits();
true
}
// Just a 0.
_ => return Int { base, empty_int: false },
};
// Base prefix was provided, but there were no digits
// after it, e.g. "0x".
if !has_digits {
return Int { base, empty_int: true };
}
} else {
// No base prefix, parse number in the usual way.
self.eat_decimal_digits();
};
match self.first() {
// Don't be greedy if this is actually an
// integer literal followed by field/method access or a range pattern
// (`0..2` and `12.foo()`)
'.' if self.second() != '.' && !is_id_start(self.second()) => {
// might have stuff after the ., and if it does, it needs to start
// with a number
self.bump();
let mut empty_exponent = false;
if self.first().is_digit(10) {
self.eat_decimal_digits();
match self.first() {
'e' | 'E' => {
self.bump();
empty_exponent = !self.eat_float_exponent();
}
_ => (),
}
}
Float { base, empty_exponent }
}
'e' | 'E' => {
self.bump();
let empty_exponent = !self.eat_float_exponent();
Float { base, empty_exponent }
}
_ => Int { base, empty_int: false },
}
}
fn lifetime_or_char(&mut self) -> TokenKind {
debug_assert!(self.prev() == '\'');
let can_be_a_lifetime = if self.second() == '\'' {
// It's surely not a lifetime.
false
} else {
// If the first symbol is valid for identifier, it can be a lifetime.
// Also check if it's a number for a better error reporting (so '0 will
// be reported as invalid lifetime and not as unterminated char literal).
is_id_start(self.first()) || self.first().is_digit(10)
};
if !can_be_a_lifetime {
let terminated = self.single_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = Char { terminated };
return Literal { kind, suffix_start };
}
// Either a lifetime or a character literal with
// length greater than 1.
let starts_with_number = self.first().is_digit(10);
// Skip the literal contents.
// First symbol can be a number (which isn't a valid identifier start),
// so skip it without any checks.
self.bump();
self.eat_while(is_id_continue);
// Check if after skipping literal contents we've met a closing
// single quote (which means that user attempted to create a
// string with single quotes).
if self.first() == '\'' {
self.bump();
let kind = Char { terminated: true };
Literal { kind, suffix_start: self.len_consumed() }
} else {
Lifetime { starts_with_number }
}
}
fn single_quoted_string(&mut self) -> bool {
debug_assert!(self.prev() == '\'');
// Check if it's a one-symbol literal.
if self.second() == '\'' && self.first() != '\\' {
self.bump();
self.bump();
return true;
}
// Literal has more than one symbol.
// Parse until either quotes are terminated or error is detected.
loop {
match self.first() {
// Quotes are terminated, finish parsing.
'\'' => {
self.bump();
return true;
}
// Probably beginning of the comment, which we don't want to include
// to the error report.
'/' => break,
// Newline without following '\'' means unclosed quote, stop parsing.
'\n' if self.second() != '\'' => break,
// End of file, stop parsing.
EOF_CHAR if self.is_eof() => break,
// Escaped slash is considered one character, so bump twice.
'\\' => {
self.bump();
self.bump();
}
// Skip the character.
_ => {
self.bump();
}
}
}
// String was not terminated.
false
}
/// Eats double-quoted string and returns true
/// if string is terminated.
fn double_quoted_string(&mut self) -> bool {
debug_assert!(self.prev() == '"');
while let Some(c) = self.bump() {
match c {
'"' => {
return true;
}
'\\' if self.first() == '\\' || self.first() == '"' => {
// Bump again to skip escaped character.
self.bump();
}
_ => (),
}
}
// End of file reached.
false
}
/// Eats the double-quoted string and returns `n_hashes` and an error if encountered.
fn raw_double_quoted_string(&mut self, prefix_len: usize) -> (u16, Option<RawStrError>) {
// Wrap the actual function to handle the error with too many hashes.
// This way, it eats the whole raw string.
let (n_hashes, err) = self.raw_string_unvalidated(prefix_len);
// Only up to 65535 `#`s are allowed in raw strings
match u16::try_from(n_hashes) {
Ok(num) => (num, err),
// We lie about the number of hashes here :P
Err(_) => (0, Some(RawStrError::TooManyDelimiters { found: n_hashes })),
}
}
fn raw_string_unvalidated(&mut self, prefix_len: usize) -> (usize, Option<RawStrError>) {
debug_assert!(self.prev() == 'r');
let start_pos = self.len_consumed();
let mut possible_terminator_offset = None;
let mut max_hashes = 0;
// Count opening '#' symbols.
let mut eaten = 0;
while self.first() == '#' {
eaten += 1;
self.bump();
}
let n_start_hashes = eaten;
// Check that string is started.
match self.bump() {
Some('"') => (),
c => {
let c = c.unwrap_or(EOF_CHAR);
return (n_start_hashes, Some(RawStrError::InvalidStarter { bad_char: c }));
}
}
// Skip the string contents and on each '#' character met, check if this is
// a raw string termination.
loop {
self.eat_while(|c| c != '"');
if self.is_eof() {
return (
n_start_hashes,
Some(RawStrError::NoTerminator {
expected: n_start_hashes,
found: max_hashes,
possible_terminator_offset,
}),
);
}
// Eat closing double quote.
self.bump();
// Check that amount of closing '#' symbols
// is equal to the amount of opening ones.
// Note that this will not consume extra trailing `#` characters:
// `r###"abcde"####` is lexed as a `RawStr { n_hashes: 3 }`
// followed by a `#` token.
let mut n_end_hashes = 0;
while self.first() == '#' && n_end_hashes < n_start_hashes {
n_end_hashes += 1;
self.bump();
}
if n_end_hashes == n_start_hashes {
return (n_start_hashes, None);
} else if n_end_hashes > max_hashes {
// Keep track of possible terminators to give a hint about
// where there might be a missing terminator
possible_terminator_offset =
Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len);
max_hashes = n_end_hashes;
}
}
}
fn eat_decimal_digits(&mut self) -> bool {
let mut has_digits = false;
loop {
match self.first() {
'_' => {
self.bump();
}
'0'..='9' => {
has_digits = true;
self.bump();
}
_ => break,
}
}
has_digits
}
fn eat_hexadecimal_digits(&mut self) -> bool {
let mut has_digits = false;
loop {
match self.first() {
'_' => {
self.bump();
}
'0'..='9' | 'a'..='f' | 'A'..='F' => {
has_digits = true;
self.bump();
}
_ => break,
}
}
has_digits
}
/// Eats the float exponent. Returns true if at least one digit was met,
/// and returns false otherwise.
fn eat_float_exponent(&mut self) -> bool {
debug_assert!(self.prev() == 'e' || self.prev() == 'E');
if self.first() == '-' || self.first() == '+' {
self.bump();
}
self.eat_decimal_digits()
}
// Eats the suffix of the literal, e.g. "_u8".
fn eat_literal_suffix(&mut self) {
self.eat_identifier();
}
// Eats the identifier.
fn eat_identifier(&mut self) {
if !is_id_start(self.first()) {
return;
}
self.bump();
self.eat_while(is_id_continue);
}
/// Eats symbols while predicate returns true or until the end of file is reached.
fn eat_while(&mut self, mut predicate: impl FnMut(char) -> bool) {
while predicate(self.first()) && !self.is_eof() {
self.bump();
}
}
}
|
mber(&
|
identifier_name
|
lib.rs
|
//! Low-level Rust lexer.
//!
//! The idea with `rustc_lexer` is to make a reusable library,
//! by separating out pure lexing and rustc-specific concerns, like spans,
//! error reporting, and interning. So, rustc_lexer operates directly on `&str`,
//! produces simple tokens which are a pair of type-tag and a bit of original text,
//! and does not report errors, instead storing them as flags on the token.
//!
//! Tokens produced by this lexer are not yet ready for parsing the Rust syntax.
//! For that see [`rustc_parse::lexer`], which converts this basic token stream
//! into wide tokens used by actual parser.
//!
//! The purpose of this crate is to convert raw sources into a labeled sequence
//! of well-known token types, so building an actual Rust token stream will
//! be easier.
//!
//! The main entity of this crate is the [`TokenKind`] enum which represents common
//! lexeme types.
//!
//! [`rustc_parse::lexer`]: ../rustc_parse/lexer/index.html
// We want to be able to build this crate with a stable compiler, so no
// `#![feature]` attributes should be added.
mod cursor;
pub mod unescape;
#[cfg(test)]
mod tests;
use self::LiteralKind::*;
use self::TokenKind::*;
use crate::cursor::{Cursor, EOF_CHAR};
use std::convert::TryFrom;
/// Parsed token.
/// It doesn't contain information about data that has been parsed,
/// only the type of the token and its size.
#[derive(Debug)]
pub struct Token {
pub kind: TokenKind,
pub len: usize,
}
impl Token {
fn new(kind: TokenKind, len: usize) -> Token {
Token { kind, len }
}
}
/// Enum representing common lexeme types.
// perf note: Changing all `usize` to `u32` doesn't change performance. See #77629
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Multi-char tokens:
/// "// comment"
LineComment { doc_style: Option<DocStyle> },
/// `/* block comment */`
///
/// Block comments can be recursive, so the sequence like `/* /* */`
/// will not be considered terminated and will result in a parsing error.
BlockComment { doc_style: Option<DocStyle>, terminated: bool },
/// Any whitespace characters sequence.
Whitespace,
/// "ident" or "continue"
/// At this step keywords are also considered identifiers.
Ident,
/// "r#ident"
RawIdent,
/// An unknown prefix like `foo#`, `foo'`, `foo"`. Note that only the
/// prefix (`foo`) is included in the token, not the separator (which is
/// lexed as its own distinct token). In Rust 2021 and later, reserved
/// prefixes are reported as errors; in earlier editions, they result in a
/// (allowed by default) lint, and are treated as regular identifier
/// tokens.
UnknownPrefix,
/// "12_u8", "1.0e-40", "b"123"". See `LiteralKind` for more details.
Literal { kind: LiteralKind, suffix_start: usize },
/// "'a"
Lifetime { starts_with_number: bool },
// One-char tokens:
/// ";"
Semi,
/// ","
Comma,
/// "."
Dot,
/// "("
OpenParen,
/// ")"
CloseParen,
/// "{"
OpenBrace,
/// "}"
CloseBrace,
/// "["
OpenBracket,
/// "]"
CloseBracket,
/// "@"
At,
/// "#"
Pound,
/// "~"
Tilde,
/// "?"
Question,
/// ":"
Colon,
/// "$"
Dollar,
/// "="
Eq,
/// "!"
Bang,
/// "<"
Lt,
/// ">"
Gt,
/// "-"
Minus,
/// "&"
And,
/// "|"
Or,
/// "+"
Plus,
/// "*"
Star,
/// "/"
Slash,
/// "^"
Caret,
/// "%"
Percent,
/// Unknown token, not expected by the lexer, e.g. "№"
Unknown,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum DocStyle {
Outer,
Inner,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum LiteralKind {
/// "12_u8", "0o100", "0b120i99"
Int { base: Base, empty_int: bool },
/// "12.34f32", "0b100.100"
Float { base: Base, empty_exponent: bool },
/// "'a'", "'\\'", "'''", "';"
Char { terminated: bool },
/// "b'a'", "b'\\'", "b'''", "b';"
Byte { terminated: bool },
/// ""abc"", ""abc"
Str { terminated: bool },
/// "b"abc"", "b"abc"
ByteStr { terminated: bool },
/// "r"abc"", "r#"abc"#", "r####"ab"###"c"####", "r#"a"
RawStr { n_hashes: u16, err: Option<RawStrError> },
/// "br"abc"", "br#"abc"#", "br####"ab"###"c"####", "br#"a"
RawByteStr { n_hashes: u16, err: Option<RawStrError> },
}
/// Error produced validating a raw string. Represents cases like:
/// - `r##~"abcde"##`: `InvalidStarter`
/// - `r###"abcde"##`: `NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)`
/// - Too many `#`s (>65535): `TooManyDelimiters`
// perf note: It doesn't matter that this makes `Token` 36 bytes bigger. See #77629
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum RawStrError {
/// Non `#` characters exist between `r` and `"` eg. `r#~"..`
InvalidStarter { bad_char: char },
/// The string was never terminated. `possible_terminator_offset` is the number of characters after `r` or `br` where they
/// may have intended to terminate it.
NoTerminator { expected: usize, found: usize, possible_terminator_offset: Option<usize> },
/// More than 65535 `#`s exist.
TooManyDelimiters { found: usize },
}
/// Base of numeric literal encoding according to its prefix.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum Base {
/// Literal starts with "0b".
Binary,
/// Literal starts with "0o".
Octal,
/// Literal starts with "0x".
Hexadecimal,
/// Literal doesn't contain a prefix.
Decimal,
}
/// `rustc` allows files to have a shebang, e.g. "#!/usr/bin/rustrun",
/// but shebang isn't a part of rust syntax.
pub fn strip_shebang(input: &str) -> Option<usize> {
// Shebang must start with `#!` literally, without any preceding whitespace.
// For simplicity we consider any line starting with `#!` a shebang,
// regardless of restrictions put on shebangs by specific platforms.
if let Some(input_tail) = input.strip_prefix("#!") {
// Ok, this is a shebang but if the next non-whitespace token is `[`,
// then it may be valid Rust code, so consider it Rust code.
let next_non_whitespace_token = tokenize(input_tail).map(|tok| tok.kind).find(|tok| {
!matches!(
tok,
TokenKind::Whitespace
| TokenKind::LineComment { doc_style: None }
| TokenKind::BlockComment { doc_style: None, .. }
)
});
if next_non_whitespace_token != Some(TokenKind::OpenBracket) {
// No other choice than to consider this a shebang.
return Some(2 + input_tail.lines().next().unwrap_or_default().len());
}
}
None
}
/// Parses the first token from the provided input string.
pub fn first_token(input: &str) -> Token {
debug_assert!(!input.is_empty());
Cursor::new(input).advance_token()
}
/// Creates an iterator that produces tokens from the input string.
pub fn tokenize(mut input: &str) -> impl Iterator<Item = Token> + '_ {
std::iter::from_fn(move || {
if input.is_empty() {
return None;
}
let token = first_token(input);
input = &input[token.len..];
Some(token)
})
}
/// True if `c` is considered a whitespace according to Rust language definition.
/// See [Rust language reference](https://doc.rust-lang.org/reference/whitespace.html)
/// for definitions of these classes.
pub fn is_whitespace(c: char) -> bool {
// This is Pattern_White_Space.
//
// Note that this set is stable (ie, it doesn't change with different
// Unicode versions), so it's ok to just hard-code the values.
matches!(
c,
// Usual ASCII suspects
'\u{0009}' // \t
| '\u{000A}' // \n
| '\u{000B}' // vertical tab
| '\u{000C}' // form feed
| '\u{000D}' // \r
| '\u{0020}' // space
// NEXT LINE from latin1
| '\u{0085}'
// Bidi markers
| '\u{200E}' // LEFT-TO-RIGHT MARK
| '\u{200F}' // RIGHT-TO-LEFT MARK
// Dedicated whitespace characters from Unicode
| '\u{2028}' // LINE SEPARATOR
| '\u{2029}' // PARAGRAPH SEPARATOR
)
}
/// True if `c` is valid as a first character of an identifier.
/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
/// a formal definition of valid identifier name.
pub fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
c == '_' || unicode_xid::UnicodeXID::is_xid_start(c)
}
/// True if `c` is valid as a non-first character of an identifier.
/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
/// a formal definition of valid identifier name.
pub fn is_id_continue(c: char) -> bool {
unicode_xid::UnicodeXID::is_xid_continue(c)
}
/// The passed string is lexically an identifier.
pub fn is_ident(string: &str) -> bool {
let mut chars = string.chars();
if let Some(start) = chars.next() {
is_id_start(start) && chars.all(is_id_continue)
} else {
false
}
}
impl Cursor<'_> {
/// Parses a token from the input string.
fn advance_token(&mut self) -> Token {
let first_char = self.bump().unwrap();
let token_kind = match first_char {
// Slash, comment or block comment.
'/' => match self.first() {
'/' => self.line_comment(),
'*' => self.block_comment(),
_ => Slash,
},
// Whitespace sequence.
c if is_whitespace(c) => self.whitespace(),
// Raw identifier, raw string literal or identifier.
'r' => match (self.first(), self.second()) {
('#', c1) if is_id_start(c1) => self.raw_ident(),
('#', _) | ('"', _) => {
let (n_hashes, err) = self.raw_double_quoted_string(1);
let suffix_start = self.len_consumed();
if err.is_none() {
self.eat_literal_suffix();
}
let kind = RawStr { n_hashes, err };
Literal { kind, suffix_start }
}
_ => self.ident_or_unknown_prefix(),
},
// Byte literal, byte string literal, raw byte string literal or identifier.
'b' => match (self.first(), self.second()) {
('\'', _) => {
self.bump();
let terminated = self.single_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = Byte { terminated };
Literal { kind, suffix_start }
}
('"', _) => {
self.bump();
let terminated = self.double_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = ByteStr { terminated };
Literal { kind, suffix_start }
}
('r', '"') | ('r', '#') => {
self.bump();
let (n_hashes, err) = self.raw_double_quoted_string(2);
let suffix_start = self.len_consumed();
if err.is_none() {
self.eat_literal_suffix();
}
let kind = RawByteStr { n_hashes, err };
Literal { kind, suffix_start }
}
_ => self.ident_or_unknown_prefix(),
},
// Identifier (this should be checked after other variant that can
// start as identifier).
c if is_id_start(c) => self.ident_or_unknown_prefix(),
// Numeric literal.
c @ '0'..='9' => {
let literal_kind = self.number(c);
let suffix_start = self.len_consumed();
self.eat_literal_suffix();
TokenKind::Literal { kind: literal_kind, suffix_start }
}
// One-symbol tokens.
';' => Semi,
',' => Comma,
'.' => Dot,
'(' => OpenParen,
')' => CloseParen,
'{' => OpenBrace,
'}' => CloseBrace,
'[' => OpenBracket,
']' => CloseBracket,
'@' => At,
'#' => Pound,
'~' => Tilde,
'?' => Question,
':' => Colon,
'$' => Dollar,
'=' => Eq,
'!' => Bang,
'<' => Lt,
'>' => Gt,
'-' => Minus,
'&' => And,
'|' => Or,
'+' => Plus,
'*' => Star,
'^' => Caret,
'%' => Percent,
// Lifetime or character literal.
'\'' => self.lifetime_or_char(),
// String literal.
'"' => {
let terminated = self.double_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = Str { terminated };
Literal { kind, suffix_start }
}
_ => Unknown,
};
Token::new(token_kind, self.len_consumed())
}
fn line_comment(&mut self) -> TokenKind {
debug_assert!(self.prev() == '/' && self.first() == '/');
self.bump();
let doc_style = match self.first() {
// `//!` is an inner line doc comment.
'!' => Some(DocStyle::Inner),
// `////` (more than 3 slashes) is not considered a doc comment.
'/' if self.second() != '/' => Some(DocStyle::Outer),
_ => None,
};
self.eat_while(|c| c != '\n');
LineComment { doc_style }
}
fn block_comment(&mut self) -> TokenKind {
debug_assert!(self.prev() == '/' && self.first() == '*');
self.bump();
let doc_style = match self.first() {
// `/*!` is an inner block doc comment.
'!' => Some(DocStyle::Inner),
// `/***` (more than 2 stars) is not considered a doc comment.
// `/**/` is not considered a doc comment.
'*' if !matches!(self.second(), '*' | '/') => Some(DocStyle::Outer),
_ => None,
};
let mut depth = 1usize;
while let Some(c) = self.bump() {
match c {
'/' if self.first() == '*' => {
self.bump();
depth += 1;
}
'*' if self.first() == '/' => {
self.bump();
depth -= 1;
if depth == 0 {
// This block comment is closed, so for a construction like "/* */ */"
// there will be a successfully parsed block comment "/* */"
// and " */" will be processed separately.
break;
}
}
_ => (),
}
}
BlockComment { doc_style, terminated: depth == 0 }
}
fn whitespace(&mut self) -> TokenKind {
debug_assert!(is_whitespace(self.prev()));
self.eat_while(is_whitespace);
Whitespace
}
fn raw_ident(&mut self) -> TokenKind {
debug_assert!(self.prev() == 'r' && self.first() == '#' && is_id_start(self.second()));
// Eat "#" symbol.
self.bump();
// Eat the identifier part of RawIdent.
self.eat_identifier();
RawIdent
}
fn ident_or_unknown_prefix(&mut self) -> TokenKind {
debug_assert!(is_id_start(self.prev()));
// Start is already eaten, eat the rest of identifier.
self.eat_while(is_id_continue);
// Known prefixes must have been handled earlier. So if
// we see a prefix here, it is definitely an unknown prefix.
match self.first() {
'#' | '"' | '\'' => UnknownPrefix,
_ => Ident,
}
}
fn number(&mut self, first_digit: char) -> LiteralKind {
debug_assert!('0' <= self.prev() && self.prev() <= '9');
let mut base = Base::Decimal;
if first_digit == '0' {
// Attempt to parse encoding base.
let has_digits = match self.first() {
'b' => {
base = Base::Binary;
self.bump();
self.eat_decimal_digits()
}
'o' => {
base = Base::Octal;
self.bump();
self.eat_decimal_digits()
}
'x' => {
base = Base::Hexadecimal;
self.bump();
self.eat_hexadecimal_digits()
}
// Not a base prefix.
'0'..='9' | '_' | '.' | 'e' | 'E' => {
self.eat_decimal_digits();
true
}
// Just a 0.
_ => return Int { base, empty_int: false },
};
// Base prefix was provided, but there were no digits
// after it, e.g. "0x".
if !has_digits {
return Int { base, empty_int: true };
}
} else {
// No base prefix, parse number in the usual way.
self.eat_decimal_digits();
};
match self.first() {
// Don't be greedy if this is actually an
// integer literal followed by field/method access or a range pattern
// (`0..2` and `12.foo()`)
'.' if self.second() != '.' && !is_id_start(self.second()) => {
// might have stuff after the ., and if it does, it needs to start
// with a number
self.bump();
let mut empty_exponent = false;
if self.first().is_digit(10) {
self.eat_decimal_digits();
match self.first() {
'e' | 'E' => {
self.bump();
empty_exponent = !self.eat_float_exponent();
}
_ => (),
}
}
Float { base, empty_exponent }
}
'e' | 'E' => {
self.bump();
let empty_exponent = !self.eat_float_exponent();
Float { base, empty_exponent }
}
_ => Int { base, empty_int: false },
}
}
fn lifetime_or_char(&mut self) -> TokenKind {
debug_assert!(self.prev() == '\'');
let can_be_a_lifetime = if self.second() == '\'' {
// It's surely not a lifetime.
false
} else {
// If the first symbol is valid for identifier, it can be a lifetime.
// Also check if it's a number for a better error reporting (so '0 will
// be reported as invalid lifetime and not as unterminated char literal).
is_id_start(self.first()) || self.first().is_digit(10)
};
if !can_be_a_lifetime {
let terminated = self.single_quoted_string();
let suffix_start = self.len_consumed();
if terminated {
self.eat_literal_suffix();
}
let kind = Char { terminated };
return Literal { kind, suffix_start };
}
// Either a lifetime or a character literal with
// length greater than 1.
let starts_with_number = self.first().is_digit(10);
// Skip the literal contents.
// First symbol can be a number (which isn't a valid identifier start),
// so skip it without any checks.
self.bump();
self.eat_while(is_id_continue);
// Check if after skipping literal contents we've met a closing
// single quote (which means that user attempted to create a
// string with single quotes).
if self.first() == '\'' {
self.bump();
let kind = Char { terminated: true };
Literal { kind, suffix_start: self.len_consumed() }
} else {
Lifetime { starts_with_number }
}
}
fn single_quoted_string(&mut self) -> bool {
debug_assert!(self.prev() == '\'');
// Check if it's a one-symbol literal.
if self.second() == '\'' && self.first() != '\\' {
self.bump();
self.bump();
return true;
}
// Literal has more than one symbol.
// Parse until either quotes are terminated or error is detected.
loop {
match self.first() {
// Quotes are terminated, finish parsing.
'\'' => {
self.bump();
return true;
}
// Probably beginning of the comment, which we don't want to include
// to the error report.
'/' => break,
// Newline without following '\'' means unclosed quote, stop parsing.
'\n' if self.second() != '\'' => break,
// End of file, stop parsing.
EOF_CHAR if self.is_eof() => break,
// Escaped slash is considered one character, so bump twice.
'\\' => {
self.bump();
self.bump();
}
// Skip the character.
_ => {
self.bump();
}
}
}
// String was not terminated.
false
}
/// Eats double-quoted string and returns true
/// if string is terminated.
fn double_quoted_string(&mut self) -> bool {
debug_assert!(self.prev() == '"');
while let Some(c) = self.bump() {
match c {
'"' => {
return true;
}
'\\' if self.first() == '\\' || self.first() == '"' => {
|
self.bump();
}
_ => (),
}
}
// End of file reached.
false
}
/// Eats the double-quoted string and returns `n_hashes` and an error if encountered.
fn raw_double_quoted_string(&mut self, prefix_len: usize) -> (u16, Option<RawStrError>) {
// Wrap the actual function to handle the error with too many hashes.
// This way, it eats the whole raw string.
let (n_hashes, err) = self.raw_string_unvalidated(prefix_len);
// Only up to 65535 `#`s are allowed in raw strings
match u16::try_from(n_hashes) {
Ok(num) => (num, err),
// We lie about the number of hashes here :P
Err(_) => (0, Some(RawStrError::TooManyDelimiters { found: n_hashes })),
}
}
fn raw_string_unvalidated(&mut self, prefix_len: usize) -> (usize, Option<RawStrError>) {
debug_assert!(self.prev() == 'r');
let start_pos = self.len_consumed();
let mut possible_terminator_offset = None;
let mut max_hashes = 0;
// Count opening '#' symbols.
let mut eaten = 0;
while self.first() == '#' {
eaten += 1;
self.bump();
}
let n_start_hashes = eaten;
// Check that string is started.
match self.bump() {
Some('"') => (),
c => {
let c = c.unwrap_or(EOF_CHAR);
return (n_start_hashes, Some(RawStrError::InvalidStarter { bad_char: c }));
}
}
// Skip the string contents and on each '#' character met, check if this is
// a raw string termination.
loop {
self.eat_while(|c| c != '"');
if self.is_eof() {
return (
n_start_hashes,
Some(RawStrError::NoTerminator {
expected: n_start_hashes,
found: max_hashes,
possible_terminator_offset,
}),
);
}
// Eat closing double quote.
self.bump();
// Check that amount of closing '#' symbols
// is equal to the amount of opening ones.
// Note that this will not consume extra trailing `#` characters:
// `r###"abcde"####` is lexed as a `RawStr { n_hashes: 3 }`
// followed by a `#` token.
let mut n_end_hashes = 0;
while self.first() == '#' && n_end_hashes < n_start_hashes {
n_end_hashes += 1;
self.bump();
}
if n_end_hashes == n_start_hashes {
return (n_start_hashes, None);
} else if n_end_hashes > max_hashes {
// Keep track of possible terminators to give a hint about
// where there might be a missing terminator
possible_terminator_offset =
Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len);
max_hashes = n_end_hashes;
}
}
}
fn eat_decimal_digits(&mut self) -> bool {
let mut has_digits = false;
loop {
match self.first() {
'_' => {
self.bump();
}
'0'..='9' => {
has_digits = true;
self.bump();
}
_ => break,
}
}
has_digits
}
fn eat_hexadecimal_digits(&mut self) -> bool {
let mut has_digits = false;
loop {
match self.first() {
'_' => {
self.bump();
}
'0'..='9' | 'a'..='f' | 'A'..='F' => {
has_digits = true;
self.bump();
}
_ => break,
}
}
has_digits
}
/// Eats the float exponent. Returns true if at least one digit was met,
/// and returns false otherwise.
fn eat_float_exponent(&mut self) -> bool {
debug_assert!(self.prev() == 'e' || self.prev() == 'E');
if self.first() == '-' || self.first() == '+' {
self.bump();
}
self.eat_decimal_digits()
}
// Eats the suffix of the literal, e.g. "_u8".
fn eat_literal_suffix(&mut self) {
self.eat_identifier();
}
// Eats the identifier.
fn eat_identifier(&mut self) {
if !is_id_start(self.first()) {
return;
}
self.bump();
self.eat_while(is_id_continue);
}
/// Eats symbols while predicate returns true or until the end of file is reached.
fn eat_while(&mut self, mut predicate: impl FnMut(char) -> bool) {
while predicate(self.first()) && !self.is_eof() {
self.bump();
}
}
}
|
// Bump again to skip escaped character.
|
random_line_split
|
BigHistory.tsx
|
import Avatar from '@material-ui/core/Avatar';
import List from '@material-ui/core/List';
import ListItem from '@material-ui/core/ListItem';
import ListItemText from '@material-ui/core/ListItemText';
import { withStyles } from '@material-ui/core/styles';
import NotificationIcon from '@material-ui/icons/Notifications';
import React from 'react';
import { connect } from 'react-redux';
import ScrollArea from 'react-scrollbar';
import ZephyrNotification from '../../models/ZephyrNotification';
import IStoreState from '../../store/IStoreState';
import { Container, VRTitle } from '../primitives';
const styles = _ => ({
avatar: {
width: 100,
height: 200
},
listItemTextPrimary: {
fontSize: '3em'
},
listItemTextSecondary: {
fontSize: '2.5em'
}
});
|
class BigHistory extends React.Component<any, any> {
state = {
notifications: new Array<ZephyrNotification>()
};
listItems() {
return (
<div>
{this.props.notifications.reverse().map((notification) => {
return (
<ListItem key={notification.id}>
<Avatar style={{ width: 75, height: 75, marginRight: '20px' }} src={notification.icon ? 'data:image/png;base64, ' + notification.icon : ''}>
<NotificationIcon style={{ fontSize: '40px' }} />
</Avatar>
<ListItemText
classes={{ primary: this.props.classes.listItemTextPrimary, secondary: this.props.classes.listItemTextSecondary }}
primary={notification.title}
secondary={notification.body} />
</ListItem>
);
})}
</div>);
}
emptyList() {
return (
<ListItem key='notifications-none'>
<ListItemText
classes={{ primary: this.props.classes.listItemTextPrimary, secondary: this.props.classes.listItemTextSecondary }}
primary='No notifications'
secondary='Notifications from your current session will be shown here.' />
</ListItem>
);
}
render() {
return (
<Container>
<VRTitle>Zephyr β</VRTitle>
<ScrollArea
speed={0.8}
className='area'
contentClassName='content'
style={{ width: '100%', height: '775px' }}
horizontal={false}>
<List>
{this.props.notifications.length > 0 ? this.listItems() : this.emptyList()}
</List>
</ScrollArea>
</Container>
);
}
}
function mapStatesToProps (state: IStoreState) {
return {
notifications: Array.from(state.notifications.values()).sort((a, b) => a.timestamp < b.timestamp ? -1 : a.timestamp > b.timestamp ? 1 : 0)
};
}
export default connect(mapStatesToProps)(withStyles(styles)(BigHistory));
|
random_line_split
|
|
portal.rs
|
use crate::compat::Mutex;
use crate::screen::{Screen, ScreenBuffer};
use alloc::sync::Arc;
use core::mem;
use graphics_base::frame_buffer::{AsSurfaceMut, FrameBuffer};
use graphics_base::system::{DeletedIndex, System};
use graphics_base::types::Rect;
use graphics_base::Result;
use hecs::World;
#[cfg(target_os = "rust_os")]
mod rust_os {
use alloc::sync::Arc;
use graphics_base::ipc;
use graphics_base::types::{Event, EventInput};
use graphics_base::Result;
use os::{File, Mutex};
#[derive(Clone)]
pub struct PortalRef {
pub server2client: Arc<Mutex<File>>,
pub portal_id: usize,
}
impl PartialEq for PortalRef {
fn eq(&self, other: &Self) -> bool {
self.portal_id == other.portal_id && Arc::ptr_eq(&self.server2client, &other.server2client)
}
}
impl PortalRef {
pub fn send_input(&self, input: EventInput) -> Result<()> {
let mut server2client = self.server2client.lock();
ipc::send_message(
&mut *server2client,
&Event::Input {
portal_id: self.portal_id,
input,
},
)
}
}
}
#[cfg(not(target_os = "rust_os"))]
mod posix {
use graphics_base::types::{Event, EventInput};
use graphics_base::Result;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::rc::Rc;
#[derive(Clone)]
pub struct PortalRef {
pub portal_id: usize,
pub events: Rc<RefCell<VecDeque<Event>>>,
}
impl PartialEq for PortalRef {
fn eq(&self, other: &Self) -> bool {
self.portal_id == other.portal_id && Rc::ptr_eq(&self.events, &other.events)
}
}
impl PortalRef {
pub fn send_input(&self, input: EventInput) -> Result<()> {
let event = Event::Input {
portal_id: self.portal_id,
input,
};
self.events.borrow_mut().push_back(event);
Ok(())
}
}
}
#[cfg(target_os = "rust_os")]
pub use rust_os::PortalRef;
#[cfg(not(target_os = "rust_os"))]
pub use posix::PortalRef;
pub struct ServerPortal {
portal_ref: PortalRef,
pos: Rect,
prev_pos: Rect,
z_index: usize,
frame_buffer_id: usize,
frame_buffer_size: (u16, u16),
frame_buffer: Arc<FrameBuffer>,
needs_paint: bool,
}
impl ServerPortal {
pub fn new(
world: &World,
portal_ref: PortalRef,
pos: Rect,
frame_buffer_id: usize,
frame_buffer_size: (u16, u16),
frame_buffer: FrameBuffer,
) -> Self {
let z_index = world
.query::<&Self>()
.iter()
.map(|(_, portal)| &portal.z_index)
.max()
.cloned()
.unwrap_or(0);
Self {
portal_ref,
pos,
prev_pos: pos,
z_index,
frame_buffer_id,
frame_buffer_size,
frame_buffer: Arc::new(frame_buffer),
needs_paint: true,
}
}
}
impl ServerPortal {
pub fn move_to(&mut self, pos: Rect) {
self.pos = pos;
}
pub fn draw(&mut self, frame_buffer_id: usize, frame_buffer_size: (u16, u16), frame_buffer: FrameBuffer) -> usize {
self.frame_buffer_size = frame_buffer_size;
self.frame_buffer = Arc::new(frame_buffer);
self.needs_paint = true;
mem::replace(&mut self.frame_buffer_id, frame_buffer_id)
}
}
impl ServerPortal {
fn as_screen_buffer(&self) -> ScreenBuffer {
ScreenBuffer {
pos: self.pos,
frame_buffer_size: self.frame_buffer_size,
frame_buffer: Arc::downgrade(&self.frame_buffer),
portal_ref: self.portal_ref.clone(),
}
}
}
pub struct
|
<S> {
screen: Arc<Mutex<Screen<S>>>,
input_state: Arc<Mutex<Option<PortalRef>>>,
deleted_index: DeletedIndex<()>,
}
impl<S> ServerPortalSystem<S> {
pub fn new(screen: Arc<Mutex<Screen<S>>>, input_state: Arc<Mutex<Option<PortalRef>>>) -> Self {
ServerPortalSystem {
screen,
input_state,
deleted_index: DeletedIndex::new(),
}
}
}
impl<S> System for ServerPortalSystem<S>
where
S: AsSurfaceMut,
{
fn run(&mut self, world: &mut World) -> Result<()> {
let mut portals_borrow = world.query::<&mut ServerPortal>();
let mut portals = portals_borrow.iter().map(|(_, portal)| portal).collect::<Vec<_>>();
portals.sort_by(|a, b| a.z_index.cmp(&b.z_index));
for portal in portals.iter_mut() {
if portal.prev_pos != portal.pos {
portal.prev_pos = portal.pos;
portal.needs_paint = true;
}
}
*self.input_state.lock() = portals.last().map(|p| p.portal_ref.clone());
let deleted_entities = self
.deleted_index
.update(world.query::<()>().with::<ServerPortal>().iter());
if !deleted_entities.is_empty() || portals.iter().any(|p| p.needs_paint) {
self.screen
.lock()
.update_buffers(portals.iter_mut().rev().map(|portal| {
portal.needs_paint = false;
portal.as_screen_buffer()
}));
}
Ok(())
}
}
|
ServerPortalSystem
|
identifier_name
|
portal.rs
|
use crate::compat::Mutex;
use crate::screen::{Screen, ScreenBuffer};
use alloc::sync::Arc;
use core::mem;
use graphics_base::frame_buffer::{AsSurfaceMut, FrameBuffer};
use graphics_base::system::{DeletedIndex, System};
use graphics_base::types::Rect;
use graphics_base::Result;
use hecs::World;
#[cfg(target_os = "rust_os")]
mod rust_os {
use alloc::sync::Arc;
use graphics_base::ipc;
use graphics_base::types::{Event, EventInput};
use graphics_base::Result;
use os::{File, Mutex};
#[derive(Clone)]
pub struct PortalRef {
pub server2client: Arc<Mutex<File>>,
pub portal_id: usize,
}
impl PartialEq for PortalRef {
fn eq(&self, other: &Self) -> bool {
self.portal_id == other.portal_id && Arc::ptr_eq(&self.server2client, &other.server2client)
}
}
impl PortalRef {
pub fn send_input(&self, input: EventInput) -> Result<()> {
let mut server2client = self.server2client.lock();
ipc::send_message(
&mut *server2client,
&Event::Input {
portal_id: self.portal_id,
input,
},
)
}
}
}
#[cfg(not(target_os = "rust_os"))]
mod posix {
use graphics_base::types::{Event, EventInput};
use graphics_base::Result;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::rc::Rc;
#[derive(Clone)]
pub struct PortalRef {
pub portal_id: usize,
pub events: Rc<RefCell<VecDeque<Event>>>,
}
impl PartialEq for PortalRef {
fn eq(&self, other: &Self) -> bool {
self.portal_id == other.portal_id && Rc::ptr_eq(&self.events, &other.events)
}
}
impl PortalRef {
pub fn send_input(&self, input: EventInput) -> Result<()> {
let event = Event::Input {
portal_id: self.portal_id,
input,
};
self.events.borrow_mut().push_back(event);
Ok(())
}
}
}
#[cfg(target_os = "rust_os")]
pub use rust_os::PortalRef;
#[cfg(not(target_os = "rust_os"))]
pub use posix::PortalRef;
pub struct ServerPortal {
portal_ref: PortalRef,
pos: Rect,
prev_pos: Rect,
z_index: usize,
frame_buffer_id: usize,
frame_buffer_size: (u16, u16),
frame_buffer: Arc<FrameBuffer>,
needs_paint: bool,
}
impl ServerPortal {
pub fn new(
world: &World,
portal_ref: PortalRef,
pos: Rect,
frame_buffer_id: usize,
frame_buffer_size: (u16, u16),
frame_buffer: FrameBuffer,
) -> Self {
let z_index = world
.query::<&Self>()
.iter()
.map(|(_, portal)| &portal.z_index)
.max()
.cloned()
.unwrap_or(0);
Self {
portal_ref,
pos,
prev_pos: pos,
z_index,
frame_buffer_id,
frame_buffer_size,
frame_buffer: Arc::new(frame_buffer),
needs_paint: true,
}
}
}
impl ServerPortal {
pub fn move_to(&mut self, pos: Rect) {
self.pos = pos;
}
pub fn draw(&mut self, frame_buffer_id: usize, frame_buffer_size: (u16, u16), frame_buffer: FrameBuffer) -> usize {
self.frame_buffer_size = frame_buffer_size;
self.frame_buffer = Arc::new(frame_buffer);
self.needs_paint = true;
mem::replace(&mut self.frame_buffer_id, frame_buffer_id)
}
}
impl ServerPortal {
fn as_screen_buffer(&self) -> ScreenBuffer {
ScreenBuffer {
pos: self.pos,
frame_buffer_size: self.frame_buffer_size,
frame_buffer: Arc::downgrade(&self.frame_buffer),
portal_ref: self.portal_ref.clone(),
}
}
}
pub struct ServerPortalSystem<S> {
screen: Arc<Mutex<Screen<S>>>,
input_state: Arc<Mutex<Option<PortalRef>>>,
deleted_index: DeletedIndex<()>,
}
impl<S> ServerPortalSystem<S> {
pub fn new(screen: Arc<Mutex<Screen<S>>>, input_state: Arc<Mutex<Option<PortalRef>>>) -> Self {
ServerPortalSystem {
screen,
input_state,
deleted_index: DeletedIndex::new(),
}
}
}
impl<S> System for ServerPortalSystem<S>
where
S: AsSurfaceMut,
{
fn run(&mut self, world: &mut World) -> Result<()> {
let mut portals_borrow = world.query::<&mut ServerPortal>();
let mut portals = portals_borrow.iter().map(|(_, portal)| portal).collect::<Vec<_>>();
portals.sort_by(|a, b| a.z_index.cmp(&b.z_index));
for portal in portals.iter_mut() {
if portal.prev_pos != portal.pos
|
}
*self.input_state.lock() = portals.last().map(|p| p.portal_ref.clone());
let deleted_entities = self
.deleted_index
.update(world.query::<()>().with::<ServerPortal>().iter());
if !deleted_entities.is_empty() || portals.iter().any(|p| p.needs_paint) {
self.screen
.lock()
.update_buffers(portals.iter_mut().rev().map(|portal| {
portal.needs_paint = false;
portal.as_screen_buffer()
}));
}
Ok(())
}
}
|
{
portal.prev_pos = portal.pos;
portal.needs_paint = true;
}
|
conditional_block
|
portal.rs
|
use crate::compat::Mutex;
use crate::screen::{Screen, ScreenBuffer};
use alloc::sync::Arc;
use core::mem;
use graphics_base::frame_buffer::{AsSurfaceMut, FrameBuffer};
use graphics_base::system::{DeletedIndex, System};
use graphics_base::types::Rect;
use graphics_base::Result;
use hecs::World;
#[cfg(target_os = "rust_os")]
mod rust_os {
use alloc::sync::Arc;
use graphics_base::ipc;
use graphics_base::types::{Event, EventInput};
use graphics_base::Result;
use os::{File, Mutex};
#[derive(Clone)]
pub struct PortalRef {
pub server2client: Arc<Mutex<File>>,
pub portal_id: usize,
}
impl PartialEq for PortalRef {
fn eq(&self, other: &Self) -> bool {
self.portal_id == other.portal_id && Arc::ptr_eq(&self.server2client, &other.server2client)
}
}
impl PortalRef {
pub fn send_input(&self, input: EventInput) -> Result<()> {
let mut server2client = self.server2client.lock();
ipc::send_message(
&mut *server2client,
&Event::Input {
portal_id: self.portal_id,
input,
},
)
}
}
}
#[cfg(not(target_os = "rust_os"))]
mod posix {
use graphics_base::types::{Event, EventInput};
use graphics_base::Result;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::rc::Rc;
|
pub events: Rc<RefCell<VecDeque<Event>>>,
}
impl PartialEq for PortalRef {
fn eq(&self, other: &Self) -> bool {
self.portal_id == other.portal_id && Rc::ptr_eq(&self.events, &other.events)
}
}
impl PortalRef {
pub fn send_input(&self, input: EventInput) -> Result<()> {
let event = Event::Input {
portal_id: self.portal_id,
input,
};
self.events.borrow_mut().push_back(event);
Ok(())
}
}
}
#[cfg(target_os = "rust_os")]
pub use rust_os::PortalRef;
#[cfg(not(target_os = "rust_os"))]
pub use posix::PortalRef;
pub struct ServerPortal {
portal_ref: PortalRef,
pos: Rect,
prev_pos: Rect,
z_index: usize,
frame_buffer_id: usize,
frame_buffer_size: (u16, u16),
frame_buffer: Arc<FrameBuffer>,
needs_paint: bool,
}
impl ServerPortal {
pub fn new(
world: &World,
portal_ref: PortalRef,
pos: Rect,
frame_buffer_id: usize,
frame_buffer_size: (u16, u16),
frame_buffer: FrameBuffer,
) -> Self {
let z_index = world
.query::<&Self>()
.iter()
.map(|(_, portal)| &portal.z_index)
.max()
.cloned()
.unwrap_or(0);
Self {
portal_ref,
pos,
prev_pos: pos,
z_index,
frame_buffer_id,
frame_buffer_size,
frame_buffer: Arc::new(frame_buffer),
needs_paint: true,
}
}
}
impl ServerPortal {
pub fn move_to(&mut self, pos: Rect) {
self.pos = pos;
}
pub fn draw(&mut self, frame_buffer_id: usize, frame_buffer_size: (u16, u16), frame_buffer: FrameBuffer) -> usize {
self.frame_buffer_size = frame_buffer_size;
self.frame_buffer = Arc::new(frame_buffer);
self.needs_paint = true;
mem::replace(&mut self.frame_buffer_id, frame_buffer_id)
}
}
impl ServerPortal {
fn as_screen_buffer(&self) -> ScreenBuffer {
ScreenBuffer {
pos: self.pos,
frame_buffer_size: self.frame_buffer_size,
frame_buffer: Arc::downgrade(&self.frame_buffer),
portal_ref: self.portal_ref.clone(),
}
}
}
pub struct ServerPortalSystem<S> {
screen: Arc<Mutex<Screen<S>>>,
input_state: Arc<Mutex<Option<PortalRef>>>,
deleted_index: DeletedIndex<()>,
}
impl<S> ServerPortalSystem<S> {
pub fn new(screen: Arc<Mutex<Screen<S>>>, input_state: Arc<Mutex<Option<PortalRef>>>) -> Self {
ServerPortalSystem {
screen,
input_state,
deleted_index: DeletedIndex::new(),
}
}
}
impl<S> System for ServerPortalSystem<S>
where
S: AsSurfaceMut,
{
fn run(&mut self, world: &mut World) -> Result<()> {
let mut portals_borrow = world.query::<&mut ServerPortal>();
let mut portals = portals_borrow.iter().map(|(_, portal)| portal).collect::<Vec<_>>();
portals.sort_by(|a, b| a.z_index.cmp(&b.z_index));
for portal in portals.iter_mut() {
if portal.prev_pos != portal.pos {
portal.prev_pos = portal.pos;
portal.needs_paint = true;
}
}
*self.input_state.lock() = portals.last().map(|p| p.portal_ref.clone());
let deleted_entities = self
.deleted_index
.update(world.query::<()>().with::<ServerPortal>().iter());
if !deleted_entities.is_empty() || portals.iter().any(|p| p.needs_paint) {
self.screen
.lock()
.update_buffers(portals.iter_mut().rev().map(|portal| {
portal.needs_paint = false;
portal.as_screen_buffer()
}));
}
Ok(())
}
}
|
#[derive(Clone)]
pub struct PortalRef {
pub portal_id: usize,
|
random_line_split
|
browserify.d.ts
|
// Type definitions for Browserify v12.0.1
// Project: http://browserify.org/
// Definitions by: Andrew Gaspar <https://github.com/AndrewGaspar/>, John Vilk <https://github.com/jvilk>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
/// <reference path="../node/node.d.ts" />
declare namespace Browserify {
/**
* Options pertaining to an individual file.
*/
interface FileOptions {
// If true, this is considered an entry point to your app.
entry?: boolean;
// Expose this file under a custom dependency name.
// require('./vendor/angular/angular.js', {expose: 'angular'}) enables require('angular')
expose?: string;
// Basedir to use to resolve this file's path.
basedir?: string;
// The name/path to the file.
file?: string;
// Forward file to external() to be externalized.
external?: boolean;
// Disable transforms on file if set to false.
transform?: boolean;
// The ID to use for require() statements.
id?: string;
}
// Browserify accepts a filename, an input stream for file inputs, or a FileOptions configuration
// for each file in a bundle.
type InputFile = string | NodeJS.ReadableStream | FileOptions;
/**
* Options pertaining to a Browserify instance.
*/
interface Options {
// Custom properties can be defined on Options.
// These options are forwarded along to module-deps and browser-pack directly.
[propName: string]: any;
// String, file object, or array of those types (they may be mixed) specifying entry file(s).
entries?: InputFile | InputFile[];
// an array which will skip all require() and global parsing for each file in the array.
// Use this for giant libs like jquery or threejs that don't have any requires or node-style globals but take forever to parse.
noParse?: string[];
// an array of optional extra extensions for the module lookup machinery to use when the extension has not been specified.
// By default Browserify considers only .js and .json files in such cases.
extensions?: string[];
// the directory that Browserify starts bundling from for filenames that start with ..
basedir?: string;
// an array of directories that Browserify searches when looking for modules which are not referenced using relative path.
// Can be absolute or relative to basedir. Equivalent of setting NODE_PATH environmental variable when calling Browserify command.
paths?: string[];
// sets the algorithm used to parse out the common paths. Use false to turn this off, otherwise it uses the commondir module.
commondir?: boolean;
// disables converting module ids into numerical indexes. This is useful for preserving the original paths that a bundle was generated with.
fullPaths?: boolean;
// sets the list of built-ins to use, which by default is set in lib/builtins.js in this distribution.
builtins?: string[] | {[builtinName: string]: string} | boolean;
// set if external modules should be bundled. Defaults to true.
bundleExternal?: boolean;
// When true, always insert process, global, __filename, and __dirname without analyzing the AST for faster builds but larger output bundles. Default false.
insertGlobals?: boolean;
// When true, scan all files for process, global, __filename, and __dirname, defining as necessary.
// With this option npm modules are more likely to work but bundling takes longer. Default true.
detectGlobals?: boolean;
// When true, add a source map inline to the end of the bundle. This makes debugging easier because you can see all the original files if you are in a modern enough browser.
debug?: boolean;
// When a non-empty string, a standalone module is created with that name and a umd wrapper.
// You can use namespaces in the standalone global export using a . in the string name as a separator, for example 'A.B.C'.
// The global export will be sanitized and camel cased.
standalone?: string;
// will be passed to insert-module-globals as the opts.vars parameter.
insertGlobalVars?: {[globalName: string]: (file: string, basedir: string) => any};
// defaults to 'require' in expose mode but you can use another name.
externalRequireName?: string;
}
interface BrowserifyConstructor {
(files: InputFile[], opts?: Options): BrowserifyObject;
(file: InputFile, opts?: Options): BrowserifyObject;
(opts: Options): BrowserifyObject;
(): BrowserifyObject
new(files: InputFile[], opts?: Options): BrowserifyObject;
new(file: InputFile, opts?: Options): BrowserifyObject;
new(opts: Options): BrowserifyObject;
new(): BrowserifyObject
}
interface BrowserifyObject extends NodeJS.EventEmitter {
/**
|
/**
* Make file available from outside the bundle with require(file).
* The file param is anything that can be resolved by require.resolve().
* file can also be a stream, but you should also use opts.basedir so that relative requires will be resolvable.
* If file is an array, each item in file will be required. In file array form, you can use a string or object for each item. Object items should have a file property and the rest of the parameters will be used for the opts.
* Use the expose property of opts to specify a custom dependency name. require('./vendor/angular/angular.js', {expose: 'angular'}) enables require('angular')
*/
require(file: InputFile, opts?: FileOptions): BrowserifyObject;
/**
* Bundle the files and their dependencies into a single javascript file.
* Return a readable stream with the javascript file contents or optionally specify a cb(err, buf) to get the buffered results.
*/
bundle(cb?: (err: any, src: Buffer) => any): NodeJS.ReadableStream;
/**
* Prevent file from being loaded into the current bundle, instead referencing from another bundle.
* If file is an array, each item in file will be externalized.
* If file is another bundle, that bundle's contents will be read and excluded from the current bundle as the bundle in file gets bundled.
*/
external(file: string[], opts?: { basedir?: string }): BrowserifyObject;
external(file: string, opts?: { basedir?: string }): BrowserifyObject;
external(file: BrowserifyObject): BrowserifyObject;
/**
* Prevent the module name or file at file from showing up in the output bundle.
* Instead you will get a file with module.exports = {}.
*/
ignore(file: string, opts?: { basedir?: string }): BrowserifyObject;
/**
* Prevent the module name or file at file from showing up in the output bundle.
* If your code tries to require() that file it will throw unless you've provided another mechanism for loading it.
*/
exclude(file: string, opts?: { basedir?: string }): BrowserifyObject;
/**
* Transform source code before parsing it for require() calls with the transform function or module name tr.
* If tr is a function, it will be called with tr(file) and it should return a through-stream that takes the raw file contents and produces the transformed source.
* If tr is a string, it should be a module name or file path of a transform module
*/
transform<T extends { basedir?: string }>(tr: string, opts?: T): BrowserifyObject;
transform<T extends { basedir?: string }>(tr: (file: string, opts: T) => NodeJS.ReadWriteStream, opts?: T): BrowserifyObject;
/**
* Register a plugin with opts. Plugins can be a string module name or a function the same as transforms.
* plugin(b, opts) is called with the Browserify instance b.
*/
plugin<T extends { basedir?: string }>(plugin: string, opts?: T): BrowserifyObject;
plugin<T extends { basedir?: string }>(plugin: (b: BrowserifyObject, opts: T) => any, opts?: T): BrowserifyObject;
/**
* Reset the pipeline back to a normal state. This function is called automatically when bundle() is called multiple times.
* This function triggers a 'reset' event.
*/
reset(opts?: Options): void;
/**
* When a file is resolved for the bundle, the bundle emits a 'file' event with the full file path, the id string passed to require(), and the parent object used by browser-resolve.
* You could use the file event to implement a file watcher to regenerate bundles when files change.
*/
on(event: 'file', listener: (file: string, id: string, parent: any) => any): this;
/**
* When a package.json file is read, this event fires with the contents.
* The package directory is available at pkg.__dirname.
*/
on(event: 'package', listener: (pkg: any) => any): this;
/**
* When .bundle() is called, this event fires with the bundle output stream.
*/
on(event: 'bundle', listener: (bundle: NodeJS.ReadableStream) => any): this;
/**
* When the .reset() method is called or implicitly called by another call to .bundle(), this event fires.
*/
on(event: 'reset', listener: () => any): this;
/**
* When a transform is applied to a file, the 'transform' event fires on the bundle stream with the transform stream tr and the file that the transform is being applied to.
*/
on(event: 'transform', listener: (tr: NodeJS.ReadWriteStream, file: string) => any): this;
on(event: string, listener: Function): this;
/**
* Set to any until substack/labeled-stream-splicer is defined
*/
pipeline: any;
}
}
declare module "browserify" {
var browserify: Browserify.BrowserifyConstructor;
export = browserify;
}
|
* Add an entry file from file that will be executed when the bundle loads.
* If file is an array, each item in file will be added as an entry file.
*/
add(file: InputFile[], opts?: FileOptions): BrowserifyObject;
add(file: InputFile, opts?: FileOptions): BrowserifyObject;
|
random_line_split
|
country-telephone-info.js
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* Country->phone number info.
*/
/**
* Each country entry should have the fields listed below.
*/
/**
* Format `serverPhoneNumber` for display.
*
* @method format
* @param {String} serverPhoneNumber phone number returned by the server
* @return {String} phone number formatted for country
*/
/**
* Normalize a string accepted by `pattern` to the full phone number,
* including country code prefix.
*
* @method normalize
* @param {String} num phone number to convert
* @return {String} full phone number with country code prefix
*/
/**
* Pattern used for input validation
*
* @property pattern
* @type {RegExp}
*/
/**
* Country code prefix
* @property prefix
* @type {String}
*/
/**
* Rollout rate. Should be in the range of [0, 1]. Used
* for gradual rollouts to a country. If rolloutRate is
* not defined, 1 is assumed.
*
* @property rolloutRate
* @type {Number}
*/
/**
* Create a `format` function. `${serverPhoneNumber}` in `format`
* will be replaced with `serverPhoneNumber`
*
* @param {String} format
* @returns {Function}
*/
function formatter (format) {
return (serverPhoneNumber) => format.replace(/\$\{serverPhoneNumber\}/, serverPhoneNumber);
}
function hasPrefix (num, prefix) {
return num.indexOf(prefix) === 0;
}
function ensurePrefix (prefix)
|
module.exports = {
// Austria
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Austria
AT: {
format: formatter('+43 ${serverPhoneNumber}'),
normalize: ensurePrefix('+43'),
pattern: /^(?:\+43)?\d{6,}$/,
prefix: '+43',
rolloutRate: 1
},
// Australia
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Australia
AU: {
format: formatter('+61 ${serverPhoneNumber}'),
normalize: ensurePrefix('+61'),
pattern: /^(?:\+61\d{9}|\d{10})$/,
prefix: '+61',
rolloutRate: 1
},
// Belgium
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Belgium
BE: {
format: formatter('+32 ${serverPhoneNumber}'),
normalize: ensurePrefix('+32'),
pattern: /^(?:\+32\d{9}|\d{10})$/,
prefix: '+32',
rolloutRate: 1
},
// Germany
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Germany
DE: {
format: formatter('+49 ${serverPhoneNumber}'),
normalize: ensurePrefix('+49'),
pattern: /^(?:\+49)?\d{6,13}$/,
prefix: '+49',
rolloutRate: 1
},
// Denmark
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Denmark
DK: {
format: formatter('+45 ${serverPhoneNumber}'),
normalize: ensurePrefix('+45'),
pattern: /^(?:\+45)?\d{8}$/,
prefix: '+45',
rolloutRate: 1
},
// Spain
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Spain
ES: {
format: formatter('+34 ${serverPhoneNumber}'),
normalize: ensurePrefix('+34'),
pattern: /^(?:\+34)?\d{9}$/,
prefix: '+34',
rolloutRate: 1
},
// France
// https://en.wikipedia.org/wiki/Telephone_numbers_in_France
FR: {
format: formatter('+33 ${serverPhoneNumber}'),
normalize: ensurePrefix('+33'),
pattern: /^(?:\+33\d{9}|\d{10})$/,
prefix: '+33',
rolloutRate: 1
},
GB: {
format: formatter('+44 ${serverPhoneNumber}'),
normalize: ensurePrefix('+44'),
pattern: /^(?:\+44\d{10}|\d{11})$/,
prefix: '+44',
rolloutRate: 1
},
// Italy
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Italy
IT: {
format: formatter('+39 ${serverPhoneNumber}'),
normalize: ensurePrefix('+39'),
// Italy can have either 9 or 10 digit numbers. 9 digits
// are the old style and are still used.
pattern: /^(?:\+39)?\d{9,10}$/,
prefix: '+39',
rolloutRate: 1
},
// Luxembourg
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Luxembourg
LU: {
format: formatter('+352 ${serverPhoneNumber}'),
normalize: ensurePrefix('+352'),
pattern: /^(?:\+352)?\d{9}$/,
prefix: '+352',
rolloutRate: 0 // being soft launched. Testers will need to open `/sms?service=sync&country=LU`
},
// Netherlands
// https://en.wikipedia.org/wiki/Telephone_numbers_in_the_Netherlands
NL: {
format: formatter('+31 ${serverPhoneNumber}'),
normalize: ensurePrefix('+31'),
pattern: /^(?:\+31)?\d{4,}$/, // Non-geographical numbers have no fixed length. 3 access digits + at least one other digit.
prefix: '+31',
rolloutRate: 1
},
// Portugal
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Portugal
PT: {
format: formatter('+351 ${serverPhoneNumber}'),
normalize: ensurePrefix('+351'),
pattern: /^(?:\+351)?\d{9}$/,
prefix: '+351',
rolloutRate: 1
},
RO: {
format: formatter('+40 ${serverPhoneNumber}'),
normalize(num) {
// allow +40 country code prefix
// as well as an extra 0 before the 7 prefix.
const prefix = /^(\+40)?0?/;
if (prefix.test(num)) {
num = num.replace(prefix, '');
}
return `+40${num}`;
},
// +407xxxxxxxx, allow leading 0 for sloppiness.
pattern: /^(?:\+40)?0?7\d{8,8}$/,
prefix: '+40',
rolloutRate: 1
},
US: {
// Americans don't use country codes, just return the number
// as formatted by the backend.
format: (formattedNumber) => formattedNumber,
normalize (num) {
if (/^\+1/.test(num)) {
return num;
} else if (/^1/.test(num) && num.length === 11) {
return `+${num}`;
}
return `+1${num}`;
},
pattern: /^(\+?1)?[2-9]\d{9,9}$/, // allow for a +1 or 1 prefix before the area code, area codes are all 2-9
prefix: '+1',
rolloutRate: 1
}
};
// alias CA (Canada) to use the same info as the US.
module.exports.CA = module.exports.US;
|
{
return function (num) {
if (hasPrefix(num, prefix)) {
return num;
}
return `${prefix}${num}`;
};
}
|
identifier_body
|
country-telephone-info.js
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* Country->phone number info.
*/
/**
* Each country entry should have the fields listed below.
*/
/**
* Format `serverPhoneNumber` for display.
*
* @method format
* @param {String} serverPhoneNumber phone number returned by the server
* @return {String} phone number formatted for country
*/
/**
* Normalize a string accepted by `pattern` to the full phone number,
* including country code prefix.
*
* @method normalize
* @param {String} num phone number to convert
* @return {String} full phone number with country code prefix
*/
/**
* Pattern used for input validation
*
* @property pattern
* @type {RegExp}
*/
/**
* Country code prefix
* @property prefix
* @type {String}
*/
/**
* Rollout rate. Should be in the range of [0, 1]. Used
* for gradual rollouts to a country. If rolloutRate is
* not defined, 1 is assumed.
*
* @property rolloutRate
* @type {Number}
*/
/**
* Create a `format` function. `${serverPhoneNumber}` in `format`
* will be replaced with `serverPhoneNumber`
*
* @param {String} format
* @returns {Function}
*/
function formatter (format) {
return (serverPhoneNumber) => format.replace(/\$\{serverPhoneNumber\}/, serverPhoneNumber);
}
function
|
(num, prefix) {
return num.indexOf(prefix) === 0;
}
function ensurePrefix (prefix) {
return function (num) {
if (hasPrefix(num, prefix)) {
return num;
}
return `${prefix}${num}`;
};
}
module.exports = {
// Austria
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Austria
AT: {
format: formatter('+43 ${serverPhoneNumber}'),
normalize: ensurePrefix('+43'),
pattern: /^(?:\+43)?\d{6,}$/,
prefix: '+43',
rolloutRate: 1
},
// Australia
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Australia
AU: {
format: formatter('+61 ${serverPhoneNumber}'),
normalize: ensurePrefix('+61'),
pattern: /^(?:\+61\d{9}|\d{10})$/,
prefix: '+61',
rolloutRate: 1
},
// Belgium
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Belgium
BE: {
format: formatter('+32 ${serverPhoneNumber}'),
normalize: ensurePrefix('+32'),
pattern: /^(?:\+32\d{9}|\d{10})$/,
prefix: '+32',
rolloutRate: 1
},
// Germany
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Germany
DE: {
format: formatter('+49 ${serverPhoneNumber}'),
normalize: ensurePrefix('+49'),
pattern: /^(?:\+49)?\d{6,13}$/,
prefix: '+49',
rolloutRate: 1
},
// Denmark
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Denmark
DK: {
format: formatter('+45 ${serverPhoneNumber}'),
normalize: ensurePrefix('+45'),
pattern: /^(?:\+45)?\d{8}$/,
prefix: '+45',
rolloutRate: 1
},
// Spain
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Spain
ES: {
format: formatter('+34 ${serverPhoneNumber}'),
normalize: ensurePrefix('+34'),
pattern: /^(?:\+34)?\d{9}$/,
prefix: '+34',
rolloutRate: 1
},
// France
// https://en.wikipedia.org/wiki/Telephone_numbers_in_France
FR: {
format: formatter('+33 ${serverPhoneNumber}'),
normalize: ensurePrefix('+33'),
pattern: /^(?:\+33\d{9}|\d{10})$/,
prefix: '+33',
rolloutRate: 1
},
GB: {
format: formatter('+44 ${serverPhoneNumber}'),
normalize: ensurePrefix('+44'),
pattern: /^(?:\+44\d{10}|\d{11})$/,
prefix: '+44',
rolloutRate: 1
},
// Italy
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Italy
IT: {
format: formatter('+39 ${serverPhoneNumber}'),
normalize: ensurePrefix('+39'),
// Italy can have either 9 or 10 digit numbers. 9 digits
// are the old style and are still used.
pattern: /^(?:\+39)?\d{9,10}$/,
prefix: '+39',
rolloutRate: 1
},
// Luxembourg
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Luxembourg
LU: {
format: formatter('+352 ${serverPhoneNumber}'),
normalize: ensurePrefix('+352'),
pattern: /^(?:\+352)?\d{9}$/,
prefix: '+352',
rolloutRate: 0 // being soft launched. Testers will need to open `/sms?service=sync&country=LU`
},
// Netherlands
// https://en.wikipedia.org/wiki/Telephone_numbers_in_the_Netherlands
NL: {
format: formatter('+31 ${serverPhoneNumber}'),
normalize: ensurePrefix('+31'),
pattern: /^(?:\+31)?\d{4,}$/, // Non-geographical numbers have no fixed length. 3 access digits + at least one other digit.
prefix: '+31',
rolloutRate: 1
},
// Portugal
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Portugal
PT: {
format: formatter('+351 ${serverPhoneNumber}'),
normalize: ensurePrefix('+351'),
pattern: /^(?:\+351)?\d{9}$/,
prefix: '+351',
rolloutRate: 1
},
RO: {
format: formatter('+40 ${serverPhoneNumber}'),
normalize(num) {
// allow +40 country code prefix
// as well as an extra 0 before the 7 prefix.
const prefix = /^(\+40)?0?/;
if (prefix.test(num)) {
num = num.replace(prefix, '');
}
return `+40${num}`;
},
// +407xxxxxxxx, allow leading 0 for sloppiness.
pattern: /^(?:\+40)?0?7\d{8,8}$/,
prefix: '+40',
rolloutRate: 1
},
US: {
// Americans don't use country codes, just return the number
// as formatted by the backend.
format: (formattedNumber) => formattedNumber,
normalize (num) {
if (/^\+1/.test(num)) {
return num;
} else if (/^1/.test(num) && num.length === 11) {
return `+${num}`;
}
return `+1${num}`;
},
pattern: /^(\+?1)?[2-9]\d{9,9}$/, // allow for a +1 or 1 prefix before the area code, area codes are all 2-9
prefix: '+1',
rolloutRate: 1
}
};
// alias CA (Canada) to use the same info as the US.
module.exports.CA = module.exports.US;
|
hasPrefix
|
identifier_name
|
country-telephone-info.js
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* Country->phone number info.
*/
/**
* Each country entry should have the fields listed below.
*/
/**
* Format `serverPhoneNumber` for display.
*
* @method format
* @param {String} serverPhoneNumber phone number returned by the server
* @return {String} phone number formatted for country
*/
/**
* Normalize a string accepted by `pattern` to the full phone number,
* including country code prefix.
*
* @method normalize
* @param {String} num phone number to convert
* @return {String} full phone number with country code prefix
*/
/**
* Pattern used for input validation
*
* @property pattern
* @type {RegExp}
*/
/**
* Country code prefix
* @property prefix
* @type {String}
*/
/**
* Rollout rate. Should be in the range of [0, 1]. Used
* for gradual rollouts to a country. If rolloutRate is
* not defined, 1 is assumed.
*
* @property rolloutRate
* @type {Number}
*/
/**
* Create a `format` function. `${serverPhoneNumber}` in `format`
* will be replaced with `serverPhoneNumber`
*
* @param {String} format
* @returns {Function}
*/
function formatter (format) {
return (serverPhoneNumber) => format.replace(/\$\{serverPhoneNumber\}/, serverPhoneNumber);
|
function hasPrefix (num, prefix) {
return num.indexOf(prefix) === 0;
}
function ensurePrefix (prefix) {
return function (num) {
if (hasPrefix(num, prefix)) {
return num;
}
return `${prefix}${num}`;
};
}
module.exports = {
// Austria
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Austria
AT: {
format: formatter('+43 ${serverPhoneNumber}'),
normalize: ensurePrefix('+43'),
pattern: /^(?:\+43)?\d{6,}$/,
prefix: '+43',
rolloutRate: 1
},
// Australia
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Australia
AU: {
format: formatter('+61 ${serverPhoneNumber}'),
normalize: ensurePrefix('+61'),
pattern: /^(?:\+61\d{9}|\d{10})$/,
prefix: '+61',
rolloutRate: 1
},
// Belgium
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Belgium
BE: {
format: formatter('+32 ${serverPhoneNumber}'),
normalize: ensurePrefix('+32'),
pattern: /^(?:\+32\d{9}|\d{10})$/,
prefix: '+32',
rolloutRate: 1
},
// Germany
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Germany
DE: {
format: formatter('+49 ${serverPhoneNumber}'),
normalize: ensurePrefix('+49'),
pattern: /^(?:\+49)?\d{6,13}$/,
prefix: '+49',
rolloutRate: 1
},
// Denmark
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Denmark
DK: {
format: formatter('+45 ${serverPhoneNumber}'),
normalize: ensurePrefix('+45'),
pattern: /^(?:\+45)?\d{8}$/,
prefix: '+45',
rolloutRate: 1
},
// Spain
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Spain
ES: {
format: formatter('+34 ${serverPhoneNumber}'),
normalize: ensurePrefix('+34'),
pattern: /^(?:\+34)?\d{9}$/,
prefix: '+34',
rolloutRate: 1
},
// France
// https://en.wikipedia.org/wiki/Telephone_numbers_in_France
FR: {
format: formatter('+33 ${serverPhoneNumber}'),
normalize: ensurePrefix('+33'),
pattern: /^(?:\+33\d{9}|\d{10})$/,
prefix: '+33',
rolloutRate: 1
},
GB: {
format: formatter('+44 ${serverPhoneNumber}'),
normalize: ensurePrefix('+44'),
pattern: /^(?:\+44\d{10}|\d{11})$/,
prefix: '+44',
rolloutRate: 1
},
// Italy
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Italy
IT: {
format: formatter('+39 ${serverPhoneNumber}'),
normalize: ensurePrefix('+39'),
// Italy can have either 9 or 10 digit numbers. 9 digits
// are the old style and are still used.
pattern: /^(?:\+39)?\d{9,10}$/,
prefix: '+39',
rolloutRate: 1
},
// Luxembourg
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Luxembourg
LU: {
format: formatter('+352 ${serverPhoneNumber}'),
normalize: ensurePrefix('+352'),
pattern: /^(?:\+352)?\d{9}$/,
prefix: '+352',
rolloutRate: 0 // being soft launched. Testers will need to open `/sms?service=sync&country=LU`
},
// Netherlands
// https://en.wikipedia.org/wiki/Telephone_numbers_in_the_Netherlands
NL: {
format: formatter('+31 ${serverPhoneNumber}'),
normalize: ensurePrefix('+31'),
pattern: /^(?:\+31)?\d{4,}$/, // Non-geographical numbers have no fixed length. 3 access digits + at least one other digit.
prefix: '+31',
rolloutRate: 1
},
// Portugal
// https://en.wikipedia.org/wiki/Telephone_numbers_in_Portugal
PT: {
format: formatter('+351 ${serverPhoneNumber}'),
normalize: ensurePrefix('+351'),
pattern: /^(?:\+351)?\d{9}$/,
prefix: '+351',
rolloutRate: 1
},
RO: {
format: formatter('+40 ${serverPhoneNumber}'),
normalize(num) {
// allow +40 country code prefix
// as well as an extra 0 before the 7 prefix.
const prefix = /^(\+40)?0?/;
if (prefix.test(num)) {
num = num.replace(prefix, '');
}
return `+40${num}`;
},
// +407xxxxxxxx, allow leading 0 for sloppiness.
pattern: /^(?:\+40)?0?7\d{8,8}$/,
prefix: '+40',
rolloutRate: 1
},
US: {
// Americans don't use country codes, just return the number
// as formatted by the backend.
format: (formattedNumber) => formattedNumber,
normalize (num) {
if (/^\+1/.test(num)) {
return num;
} else if (/^1/.test(num) && num.length === 11) {
return `+${num}`;
}
return `+1${num}`;
},
pattern: /^(\+?1)?[2-9]\d{9,9}$/, // allow for a +1 or 1 prefix before the area code, area codes are all 2-9
prefix: '+1',
rolloutRate: 1
}
};
// alias CA (Canada) to use the same info as the US.
module.exports.CA = module.exports.US;
|
}
|
random_line_split
|
ServerPreferencesFetcher.tsx
|
import { Component, InfernoNode } from "inferno";
import { resolveAsset } from "../../assets";
import { fetchRetry } from "../../http";
import { ServerData } from "./data";
// Cache response so it's only sent once
let fetchServerData: Promise<ServerData> | undefined;
export class ServerPreferencesFetcher extends Component<{
render: (serverData: ServerData | undefined) => InfernoNode,
}, {
serverData?: ServerData;
}> {
state = {
serverData: undefined,
};
componentDidMount() {
this.populateServerData();
}
async populateServerData() {
if (!fetchServerData) {
fetchServerData = fetchRetry(resolveAsset("preferences.json"))
.then(response => response.json());
}
const preferencesData: ServerData = await fetchServerData;
this.setState({
serverData: preferencesData,
});
}
|
() {
return this.props.render(this.state.serverData);
}
}
|
render
|
identifier_name
|
ServerPreferencesFetcher.tsx
|
import { Component, InfernoNode } from "inferno";
import { resolveAsset } from "../../assets";
import { fetchRetry } from "../../http";
import { ServerData } from "./data";
// Cache response so it's only sent once
let fetchServerData: Promise<ServerData> | undefined;
export class ServerPreferencesFetcher extends Component<{
render: (serverData: ServerData | undefined) => InfernoNode,
}, {
serverData?: ServerData;
}> {
state = {
serverData: undefined,
};
componentDidMount() {
this.populateServerData();
}
async populateServerData() {
if (!fetchServerData)
|
const preferencesData: ServerData = await fetchServerData;
this.setState({
serverData: preferencesData,
});
}
render() {
return this.props.render(this.state.serverData);
}
}
|
{
fetchServerData = fetchRetry(resolveAsset("preferences.json"))
.then(response => response.json());
}
|
conditional_block
|
ServerPreferencesFetcher.tsx
|
import { Component, InfernoNode } from "inferno";
import { resolveAsset } from "../../assets";
import { fetchRetry } from "../../http";
import { ServerData } from "./data";
// Cache response so it's only sent once
let fetchServerData: Promise<ServerData> | undefined;
export class ServerPreferencesFetcher extends Component<{
render: (serverData: ServerData | undefined) => InfernoNode,
}, {
serverData?: ServerData;
}> {
state = {
serverData: undefined,
};
componentDidMount() {
this.populateServerData();
}
async populateServerData() {
if (!fetchServerData) {
fetchServerData = fetchRetry(resolveAsset("preferences.json"))
.then(response => response.json());
}
const preferencesData: ServerData = await fetchServerData;
this.setState({
serverData: preferencesData,
});
}
|
render() {
return this.props.render(this.state.serverData);
}
}
|
random_line_split
|
|
jquery.effects.scale.js
|
/*!
* jQuery UI Effects Scale 1.8.23
*
* Copyright 2012, AUTHORS.txt (http://jqueryui.com/about)
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://jquery.org/license
*
* http://docs.jquery.com/UI/Effects/Scale
*
* Depends:
* jquery.effects.core.js
*/
(function( $, undefined ) {
$.effects.puff = function(o) {
return this.queue(function() {
var elem = $(this),
mode = $.effects.setMode(elem, o.options.mode || 'hide'),
percent = parseInt(o.options.percent, 10) || 150,
factor = percent / 100,
original = { height: elem.height(), width: elem.width() };
$.extend(o.options, {
fade: true,
mode: mode,
percent: mode == 'hide' ? percent : 100,
from: mode == 'hide'
? original
: {
height: original.height * factor,
width: original.width * factor
}
});
elem.effect('scale', o.options, o.duration, o.callback);
elem.dequeue();
});
};
$.effects.scale = function(o) {
return this.queue(function() {
// Create element
var el = $(this);
// Set options
var options = $.extend(true, {}, o.options);
var mode = $.effects.setMode(el, o.options.mode || 'effect'); // Set Mode
var percent = parseInt(o.options.percent,10) || (parseInt(o.options.percent,10) == 0 ? 0 : (mode == 'hide' ? 0 : 100)); // Set default scaling percent
var direction = o.options.direction || 'both'; // Set default axis
var origin = o.options.origin; // The origin of the scaling
if (mode != 'effect') { // Set default origin and restore for show/hide
options.origin = origin || ['middle','center'];
options.restore = true;
}
var original = {height: el.height(), width: el.width()}; // Save original
el.from = o.options.from || (mode == 'show' ? {height: 0, width: 0} : original); // Default from state
|
var factor = { // Set scaling factor
y: direction != 'horizontal' ? (percent / 100) : 1,
x: direction != 'vertical' ? (percent / 100) : 1
};
el.to = {height: original.height * factor.y, width: original.width * factor.x}; // Set to state
if (o.options.fade) { // Fade option to support puff
if (mode == 'show') {el.from.opacity = 0; el.to.opacity = 1;};
if (mode == 'hide') {el.from.opacity = 1; el.to.opacity = 0;};
};
// Animation
options.from = el.from; options.to = el.to; options.mode = mode;
// Animate
el.effect('size', options, o.duration, o.callback);
el.dequeue();
});
};
$.effects.size = function(o) {
return this.queue(function() {
// Create element
var el = $(this), props = ['position','top','bottom','left','right','width','height','overflow','opacity'];
var props1 = ['position','top','bottom','left','right','overflow','opacity']; // Always restore
var props2 = ['width','height','overflow']; // Copy for children
var cProps = ['fontSize'];
var vProps = ['borderTopWidth', 'borderBottomWidth', 'paddingTop', 'paddingBottom'];
var hProps = ['borderLeftWidth', 'borderRightWidth', 'paddingLeft', 'paddingRight'];
// Set options
var mode = $.effects.setMode(el, o.options.mode || 'effect'); // Set Mode
var restore = o.options.restore || false; // Default restore
var scale = o.options.scale || 'both'; // Default scale mode
var origin = o.options.origin; // The origin of the sizing
var original = {height: el.height(), width: el.width()}; // Save original
el.from = o.options.from || original; // Default from state
el.to = o.options.to || original; // Default to state
// Adjust
if (origin) { // Calculate baseline shifts
var baseline = $.effects.getBaseline(origin, original);
el.from.top = (original.height - el.from.height) * baseline.y;
el.from.left = (original.width - el.from.width) * baseline.x;
el.to.top = (original.height - el.to.height) * baseline.y;
el.to.left = (original.width - el.to.width) * baseline.x;
};
var factor = { // Set scaling factor
from: {y: el.from.height / original.height, x: el.from.width / original.width},
to: {y: el.to.height / original.height, x: el.to.width / original.width}
};
if (scale == 'box' || scale == 'both') { // Scale the css box
if (factor.from.y != factor.to.y) { // Vertical props scaling
props = props.concat(vProps);
el.from = $.effects.setTransition(el, vProps, factor.from.y, el.from);
el.to = $.effects.setTransition(el, vProps, factor.to.y, el.to);
};
if (factor.from.x != factor.to.x) { // Horizontal props scaling
props = props.concat(hProps);
el.from = $.effects.setTransition(el, hProps, factor.from.x, el.from);
el.to = $.effects.setTransition(el, hProps, factor.to.x, el.to);
};
};
if (scale == 'content' || scale == 'both') { // Scale the content
if (factor.from.y != factor.to.y) { // Vertical props scaling
props = props.concat(cProps);
el.from = $.effects.setTransition(el, cProps, factor.from.y, el.from);
el.to = $.effects.setTransition(el, cProps, factor.to.y, el.to);
};
};
$.effects.save(el, restore ? props : props1); el.show(); // Save & Show
$.effects.createWrapper(el); // Create Wrapper
el.css('overflow','hidden').css(el.from); // Shift
// Animate
if (scale == 'content' || scale == 'both') { // Scale the children
vProps = vProps.concat(['marginTop','marginBottom']).concat(cProps); // Add margins/font-size
hProps = hProps.concat(['marginLeft','marginRight']); // Add margins
props2 = props.concat(vProps).concat(hProps); // Concat
el.find("*[width]").each(function(){
var child = $(this);
if (restore) $.effects.save(child, props2);
var c_original = {height: child.height(), width: child.width()}; // Save original
child.from = {height: c_original.height * factor.from.y, width: c_original.width * factor.from.x};
child.to = {height: c_original.height * factor.to.y, width: c_original.width * factor.to.x};
if (factor.from.y != factor.to.y) { // Vertical props scaling
child.from = $.effects.setTransition(child, vProps, factor.from.y, child.from);
child.to = $.effects.setTransition(child, vProps, factor.to.y, child.to);
};
if (factor.from.x != factor.to.x) { // Horizontal props scaling
child.from = $.effects.setTransition(child, hProps, factor.from.x, child.from);
child.to = $.effects.setTransition(child, hProps, factor.to.x, child.to);
};
child.css(child.from); // Shift children
child.animate(child.to, o.duration, o.options.easing, function(){
if (restore) $.effects.restore(child, props2); // Restore children
}); // Animate children
});
};
// Animate
el.animate(el.to, { queue: false, duration: o.duration, easing: o.options.easing, complete: function() {
if (el.to.opacity === 0) {
el.css('opacity', el.from.opacity);
}
if(mode == 'hide') el.hide(); // Hide
$.effects.restore(el, restore ? props : props1); $.effects.removeWrapper(el); // Restore
if(o.callback) o.callback.apply(this, arguments); // Callback
el.dequeue();
}});
});
};
})(jQuery);
|
// Adjust
|
random_line_split
|
memory.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::U256;
pub trait Memory {
/// Retrieve current size of the memory
fn size(&self) -> usize;
/// Resize (shrink or expand) the memory to specified size (fills 0)
fn resize(&mut self, new_size: usize);
/// Resize the memory only if its smaller
fn expand(&mut self, new_size: usize);
/// Write single byte to memory
fn write_byte(&mut self, offset: U256, value: U256);
/// Write a word to memory. Does not resize memory!
fn write(&mut self, offset: U256, value: U256);
/// Read a word from memory
fn read(&self, offset: U256) -> U256;
/// Write slice of bytes to memory. Does not resize memory!
fn write_slice(&mut self, offset: U256, &[u8]);
/// Retrieve part of the memory between offset and offset + size
fn read_slice(&self, offset: U256, size: U256) -> &[u8];
/// Retrieve writeable part of memory
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8];
fn dump(&self);
}
/// Checks whether offset and size is valid memory range
fn is_valid_range(off: usize, size: usize) -> bool {
// When size is zero we haven't actually expanded the memory
let overflow = off.overflowing_add(size).1;
size > 0 && !overflow
}
impl Memory for Vec<u8> {
fn dump(&self) {
println!("MemoryDump:");
for i in self.iter() {
println!("{:02x} ", i);
}
println!("");
}
fn size(&self) -> usize {
self.len()
}
fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] {
let off = init_off_u.low_u64() as usize;
let size = init_size_u.low_u64() as usize;
if !is_valid_range(off, size) { &self[0..0] } else { &self[off..off + size] }
}
fn read(&self, offset: U256) -> U256 {
let off = offset.low_u64() as usize;
U256::from(&self[off..off + 32])
}
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] {
let off = offset.low_u64() as usize;
let s = size.low_u64() as usize;
if !is_valid_range(off, s) { &mut self[0..0] } else { &mut self[off..off + s] }
}
fn
|
(&mut self, offset: U256, slice: &[u8]) {
let off = offset.low_u64() as usize;
// TODO [todr] Optimize?
for pos in off..off + slice.len() {
self[pos] = slice[pos - off];
}
}
fn write(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
value.to_big_endian(&mut self[off..off + 32]);
}
fn write_byte(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
let val = value.low_u64() as u64;
self[off] = val as u8;
}
fn resize(&mut self, new_size: usize) {
self.resize(new_size, 0);
}
fn expand(&mut self, size: usize) {
if size > self.len() {
Memory::resize(self, size)
}
}
}
#[test]
fn test_memory_read_and_write() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(0x80 + 32);
// when
mem.write(U256::from(0x80), U256::from(0xabcdef));
// then
assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef));
}
#[test]
fn test_memory_read_and_write_byte() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(32);
// when
mem.write_byte(U256::from(0x1d), U256::from(0xab));
mem.write_byte(U256::from(0x1e), U256::from(0xcd));
mem.write_byte(U256::from(0x1f), U256::from(0xef));
// then
assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef));
}
|
write_slice
|
identifier_name
|
memory.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::U256;
pub trait Memory {
/// Retrieve current size of the memory
fn size(&self) -> usize;
/// Resize (shrink or expand) the memory to specified size (fills 0)
fn resize(&mut self, new_size: usize);
/// Resize the memory only if its smaller
fn expand(&mut self, new_size: usize);
/// Write single byte to memory
fn write_byte(&mut self, offset: U256, value: U256);
/// Write a word to memory. Does not resize memory!
fn write(&mut self, offset: U256, value: U256);
/// Read a word from memory
fn read(&self, offset: U256) -> U256;
/// Write slice of bytes to memory. Does not resize memory!
fn write_slice(&mut self, offset: U256, &[u8]);
/// Retrieve part of the memory between offset and offset + size
fn read_slice(&self, offset: U256, size: U256) -> &[u8];
/// Retrieve writeable part of memory
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8];
fn dump(&self);
}
/// Checks whether offset and size is valid memory range
fn is_valid_range(off: usize, size: usize) -> bool {
// When size is zero we haven't actually expanded the memory
let overflow = off.overflowing_add(size).1;
size > 0 && !overflow
}
impl Memory for Vec<u8> {
fn dump(&self) {
println!("MemoryDump:");
for i in self.iter() {
println!("{:02x} ", i);
}
println!("");
}
fn size(&self) -> usize
|
fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] {
let off = init_off_u.low_u64() as usize;
let size = init_size_u.low_u64() as usize;
if !is_valid_range(off, size) { &self[0..0] } else { &self[off..off + size] }
}
fn read(&self, offset: U256) -> U256 {
let off = offset.low_u64() as usize;
U256::from(&self[off..off + 32])
}
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] {
let off = offset.low_u64() as usize;
let s = size.low_u64() as usize;
if !is_valid_range(off, s) { &mut self[0..0] } else { &mut self[off..off + s] }
}
fn write_slice(&mut self, offset: U256, slice: &[u8]) {
let off = offset.low_u64() as usize;
// TODO [todr] Optimize?
for pos in off..off + slice.len() {
self[pos] = slice[pos - off];
}
}
fn write(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
value.to_big_endian(&mut self[off..off + 32]);
}
fn write_byte(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
let val = value.low_u64() as u64;
self[off] = val as u8;
}
fn resize(&mut self, new_size: usize) {
self.resize(new_size, 0);
}
fn expand(&mut self, size: usize) {
if size > self.len() {
Memory::resize(self, size)
}
}
}
#[test]
fn test_memory_read_and_write() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(0x80 + 32);
// when
mem.write(U256::from(0x80), U256::from(0xabcdef));
// then
assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef));
}
#[test]
fn test_memory_read_and_write_byte() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(32);
// when
mem.write_byte(U256::from(0x1d), U256::from(0xab));
mem.write_byte(U256::from(0x1e), U256::from(0xcd));
mem.write_byte(U256::from(0x1f), U256::from(0xef));
// then
assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef));
}
|
{
self.len()
}
|
identifier_body
|
memory.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::U256;
pub trait Memory {
/// Retrieve current size of the memory
fn size(&self) -> usize;
/// Resize (shrink or expand) the memory to specified size (fills 0)
fn resize(&mut self, new_size: usize);
/// Resize the memory only if its smaller
fn expand(&mut self, new_size: usize);
/// Write single byte to memory
fn write_byte(&mut self, offset: U256, value: U256);
/// Write a word to memory. Does not resize memory!
fn write(&mut self, offset: U256, value: U256);
/// Read a word from memory
fn read(&self, offset: U256) -> U256;
/// Write slice of bytes to memory. Does not resize memory!
fn write_slice(&mut self, offset: U256, &[u8]);
/// Retrieve part of the memory between offset and offset + size
fn read_slice(&self, offset: U256, size: U256) -> &[u8];
/// Retrieve writeable part of memory
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8];
fn dump(&self);
}
/// Checks whether offset and size is valid memory range
fn is_valid_range(off: usize, size: usize) -> bool {
// When size is zero we haven't actually expanded the memory
let overflow = off.overflowing_add(size).1;
size > 0 && !overflow
}
impl Memory for Vec<u8> {
fn dump(&self) {
println!("MemoryDump:");
for i in self.iter() {
println!("{:02x} ", i);
}
println!("");
}
fn size(&self) -> usize {
self.len()
}
fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] {
let off = init_off_u.low_u64() as usize;
let size = init_size_u.low_u64() as usize;
if !is_valid_range(off, size) { &self[0..0] } else { &self[off..off + size] }
}
fn read(&self, offset: U256) -> U256 {
let off = offset.low_u64() as usize;
U256::from(&self[off..off + 32])
}
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] {
let off = offset.low_u64() as usize;
let s = size.low_u64() as usize;
if !is_valid_range(off, s) { &mut self[0..0] } else
|
}
fn write_slice(&mut self, offset: U256, slice: &[u8]) {
let off = offset.low_u64() as usize;
// TODO [todr] Optimize?
for pos in off..off + slice.len() {
self[pos] = slice[pos - off];
}
}
fn write(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
value.to_big_endian(&mut self[off..off + 32]);
}
fn write_byte(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
let val = value.low_u64() as u64;
self[off] = val as u8;
}
fn resize(&mut self, new_size: usize) {
self.resize(new_size, 0);
}
fn expand(&mut self, size: usize) {
if size > self.len() {
Memory::resize(self, size)
}
}
}
#[test]
fn test_memory_read_and_write() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(0x80 + 32);
// when
mem.write(U256::from(0x80), U256::from(0xabcdef));
// then
assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef));
}
#[test]
fn test_memory_read_and_write_byte() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(32);
// when
mem.write_byte(U256::from(0x1d), U256::from(0xab));
mem.write_byte(U256::from(0x1e), U256::from(0xcd));
mem.write_byte(U256::from(0x1f), U256::from(0xef));
// then
assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef));
}
|
{ &mut self[off..off + s] }
|
conditional_block
|
memory.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
|
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::U256;
pub trait Memory {
/// Retrieve current size of the memory
fn size(&self) -> usize;
/// Resize (shrink or expand) the memory to specified size (fills 0)
fn resize(&mut self, new_size: usize);
/// Resize the memory only if its smaller
fn expand(&mut self, new_size: usize);
/// Write single byte to memory
fn write_byte(&mut self, offset: U256, value: U256);
/// Write a word to memory. Does not resize memory!
fn write(&mut self, offset: U256, value: U256);
/// Read a word from memory
fn read(&self, offset: U256) -> U256;
/// Write slice of bytes to memory. Does not resize memory!
fn write_slice(&mut self, offset: U256, &[u8]);
/// Retrieve part of the memory between offset and offset + size
fn read_slice(&self, offset: U256, size: U256) -> &[u8];
/// Retrieve writeable part of memory
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8];
fn dump(&self);
}
/// Checks whether offset and size is valid memory range
fn is_valid_range(off: usize, size: usize) -> bool {
// When size is zero we haven't actually expanded the memory
let overflow = off.overflowing_add(size).1;
size > 0 && !overflow
}
impl Memory for Vec<u8> {
fn dump(&self) {
println!("MemoryDump:");
for i in self.iter() {
println!("{:02x} ", i);
}
println!("");
}
fn size(&self) -> usize {
self.len()
}
fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] {
let off = init_off_u.low_u64() as usize;
let size = init_size_u.low_u64() as usize;
if !is_valid_range(off, size) { &self[0..0] } else { &self[off..off + size] }
}
fn read(&self, offset: U256) -> U256 {
let off = offset.low_u64() as usize;
U256::from(&self[off..off + 32])
}
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] {
let off = offset.low_u64() as usize;
let s = size.low_u64() as usize;
if !is_valid_range(off, s) { &mut self[0..0] } else { &mut self[off..off + s] }
}
fn write_slice(&mut self, offset: U256, slice: &[u8]) {
let off = offset.low_u64() as usize;
// TODO [todr] Optimize?
for pos in off..off + slice.len() {
self[pos] = slice[pos - off];
}
}
fn write(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
value.to_big_endian(&mut self[off..off + 32]);
}
fn write_byte(&mut self, offset: U256, value: U256) {
let off = offset.low_u64() as usize;
let val = value.low_u64() as u64;
self[off] = val as u8;
}
fn resize(&mut self, new_size: usize) {
self.resize(new_size, 0);
}
fn expand(&mut self, size: usize) {
if size > self.len() {
Memory::resize(self, size)
}
}
}
#[test]
fn test_memory_read_and_write() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(0x80 + 32);
// when
mem.write(U256::from(0x80), U256::from(0xabcdef));
// then
assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef));
}
#[test]
fn test_memory_read_and_write_byte() {
// given
let mem: &mut Memory = &mut vec![];
mem.resize(32);
// when
mem.write_byte(U256::from(0x1d), U256::from(0xab));
mem.write_byte(U256::from(0x1e), U256::from(0xcd));
mem.write_byte(U256::from(0x1f), U256::from(0xef));
// then
assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef));
}
|
random_line_split
|
|
day11.js
|
/*
--- Day 11: Corporate Policy ---
Santa's previous password expired, and he needs help choosing a new one.
To help him remember his new password after the old one expires,
Santa has devised a method of coming up with a password based on the previous one.
Corporate policy dictates that passwords must be exactly eight lowercase letters (for security reasons),
so he finds his new password by incrementing his old password string repeatedly until it is valid.
|
Increase the rightmost letter one step; if it was z, it wraps around to a,
and repeat with the next letter to the left until one doesn't wrap around.
Unfortunately for Santa, a new Security-Elf recently started, and he has imposed some additional password requirements:
Passwords must include one increasing straight of at least three letters, like abc, bcd, cde, and so on, up to xyz.
They cannot skip letters; abd doesn't count.
Passwords may not contain the letters i, o, or l, as these letters can be mistaken for other characters and are therefore confusing.
Passwords must contain at least two different, non-overlapping pairs of letters, like aa, bb, or zz.
For example:
hijklmmn meets the first requirement
(because it contains the straight hij) but fails the second requirement requirement (because it contains i and l).
abbceffg meets the third requirement
(because it repeats bb and ff) but fails the first requirement.
abbcegjk fails the third requirement,
because it only has one double letter (bb).
The next password after abcdefgh is abcdffaa.
The next password after ghijklmn is ghjaabcc,
because you eventually skip all the passwords that start with ghi..., since i is not allowed.
Given Santa's current password (your puzzle input), what should his next password be?
--- Part Two ---
Santa's password expired again. What's the next one?
*/
var corporatePolicy = require('./app/corporate_policy');
console.log(corporatePolicy.day11());
console.log(corporatePolicy.part2());
|
Incrementing is just like counting with numbers: xx, xy, xz, ya, yb, and so on.
|
random_line_split
|
hello-world-leds.js
|
/*
This is the example config file used to demonstrate
turning leds of different types and protocols on and off.
*/
// converts a number to a hex
function toHex(c) {
var hex = c.toString(16);
hex = hex.length == 1 ? "0" + hex : hex;
return "0x" + hex;
}
// formats an input to be received by a tri-color gpio led
function triGpioColor(c) {
if(c === 1) return 1;
if(c === 0) return 0;
var isArray = Array.isArray(c);
if(isArray && this._component.name === 'red') return c[0] === 255 ? 1 : 0;
if(isArray && this._component.name === 'blue') return c[1] === 255 ? 1: 0;
if(isArray && this._component.name === 'green') return c[2] === 255 ? 1: 0;
return c;
}
// formats the input to be received by the blinkM led
function blinkMInput(c) {
if(c === 1) return [255, 255, 255];
if(c === 0) return [0, 0, 0];
return [c[0] ,c[1],c[2]];
}
// formats the input to be received by a 5050led (apa102)
var apaIdx = 0;
function
|
(c, cmd) {
if(c === 1) return [0xff, 0xff, 0xff, 0xff];
else if(c === 0) return [0xff, 0x00, 0x00, 0x00];
return [0xff, toHex(c[2]), toHex(c[1]), toHex(c[0])];
}
module.exports = {
"name":"hello-world-leds",
"i2c-path": '/dev/i2c-1',
"components" : [{"type":"led", "name":"blue", "direction": "out",
"address":17, "interface": "gpio", "formatInput" : triGpioColor},
{"type":"led","name":"green", "address":27, "direction": "out",
"interface": "gpio", formatInput: triGpioColor},
{"type":"led","name":"red","address":22, "direction": "out",
"interface": "gpio", formatInput: triGpioColor },
{type:"led",path: 1, address: 0x09, "name":"blinkm",
interface: "i2c",
init: {type: 'write', cmd: 0x6f},
set:{type: 'write', cmd:0x6e , formatInput: blinkMInput}},
{type: "led", interface: "spi", name: "apa102",
address: "/dev/spidev0.0", set: [
{val: new Buffer(32).fill(0)},{formatInput: apa102Input}]}
]
};
|
apa102Input
|
identifier_name
|
hello-world-leds.js
|
/*
This is the example config file used to demonstrate
turning leds of different types and protocols on and off.
*/
// converts a number to a hex
function toHex(c) {
var hex = c.toString(16);
hex = hex.length == 1 ? "0" + hex : hex;
return "0x" + hex;
}
// formats an input to be received by a tri-color gpio led
function triGpioColor(c) {
if(c === 1) return 1;
if(c === 0) return 0;
var isArray = Array.isArray(c);
if(isArray && this._component.name === 'red') return c[0] === 255 ? 1 : 0;
if(isArray && this._component.name === 'blue') return c[1] === 255 ? 1: 0;
if(isArray && this._component.name === 'green') return c[2] === 255 ? 1: 0;
return c;
}
// formats the input to be received by the blinkM led
function blinkMInput(c) {
if(c === 1) return [255, 255, 255];
if(c === 0) return [0, 0, 0];
return [c[0] ,c[1],c[2]];
}
|
if(c === 1) return [0xff, 0xff, 0xff, 0xff];
else if(c === 0) return [0xff, 0x00, 0x00, 0x00];
return [0xff, toHex(c[2]), toHex(c[1]), toHex(c[0])];
}
module.exports = {
"name":"hello-world-leds",
"i2c-path": '/dev/i2c-1',
"components" : [{"type":"led", "name":"blue", "direction": "out",
"address":17, "interface": "gpio", "formatInput" : triGpioColor},
{"type":"led","name":"green", "address":27, "direction": "out",
"interface": "gpio", formatInput: triGpioColor},
{"type":"led","name":"red","address":22, "direction": "out",
"interface": "gpio", formatInput: triGpioColor },
{type:"led",path: 1, address: 0x09, "name":"blinkm",
interface: "i2c",
init: {type: 'write', cmd: 0x6f},
set:{type: 'write', cmd:0x6e , formatInput: blinkMInput}},
{type: "led", interface: "spi", name: "apa102",
address: "/dev/spidev0.0", set: [
{val: new Buffer(32).fill(0)},{formatInput: apa102Input}]}
]
};
|
// formats the input to be received by a 5050led (apa102)
var apaIdx = 0;
function apa102Input(c, cmd) {
|
random_line_split
|
hello-world-leds.js
|
/*
This is the example config file used to demonstrate
turning leds of different types and protocols on and off.
*/
// converts a number to a hex
function toHex(c) {
var hex = c.toString(16);
hex = hex.length == 1 ? "0" + hex : hex;
return "0x" + hex;
}
// formats an input to be received by a tri-color gpio led
function triGpioColor(c) {
if(c === 1) return 1;
if(c === 0) return 0;
var isArray = Array.isArray(c);
if(isArray && this._component.name === 'red') return c[0] === 255 ? 1 : 0;
if(isArray && this._component.name === 'blue') return c[1] === 255 ? 1: 0;
if(isArray && this._component.name === 'green') return c[2] === 255 ? 1: 0;
return c;
}
// formats the input to be received by the blinkM led
function blinkMInput(c) {
if(c === 1) return [255, 255, 255];
if(c === 0) return [0, 0, 0];
return [c[0] ,c[1],c[2]];
}
// formats the input to be received by a 5050led (apa102)
var apaIdx = 0;
function apa102Input(c, cmd)
|
module.exports = {
"name":"hello-world-leds",
"i2c-path": '/dev/i2c-1',
"components" : [{"type":"led", "name":"blue", "direction": "out",
"address":17, "interface": "gpio", "formatInput" : triGpioColor},
{"type":"led","name":"green", "address":27, "direction": "out",
"interface": "gpio", formatInput: triGpioColor},
{"type":"led","name":"red","address":22, "direction": "out",
"interface": "gpio", formatInput: triGpioColor },
{type:"led",path: 1, address: 0x09, "name":"blinkm",
interface: "i2c",
init: {type: 'write', cmd: 0x6f},
set:{type: 'write', cmd:0x6e , formatInput: blinkMInput}},
{type: "led", interface: "spi", name: "apa102",
address: "/dev/spidev0.0", set: [
{val: new Buffer(32).fill(0)},{formatInput: apa102Input}]}
]
};
|
{
if(c === 1) return [0xff, 0xff, 0xff, 0xff];
else if(c === 0) return [0xff, 0x00, 0x00, 0x00];
return [0xff, toHex(c[2]), toHex(c[1]), toHex(c[0])];
}
|
identifier_body
|
vitrine_quatro_horizontais.js
|
Ti.include("/api/config.js");
Ti.include("/api/category_render.js");
Ti.include("/database/produtos.js");
Ti.include("/database/imagens_produtos.js");
var args = arguments[0] || {};
var marca = args.marca || 0;
var categoria = args.cat_id || 0;
var template = args.template || 0;
var current_page = 1;
var itemsperpage = 4;
var empresa = Ti.App.Properties.getString(CURRENT_EMPRESA);
var produtos = selectProductsCount(categoria, marca, empresa);
var paginas = Math.ceil(produtos / itemsperpage);
redimencionaVitrine($.vitrine);
renderProducts();
function renderProducts() {
$.paginacao.title = current_page + "/" + paginas;
var i = 0;
var preco;
var seleciona;
var imagem;
var imagens;
var tempo;
var referencia = "null";
var start = (current_page - 1) * itemsperpage;
var produtos = selectProductsByPage(empresa, marca, categoria, start, itemsperpage);
$.gradeA.hide();
$.gradeB.hide();
$.gradeC.hide();
$.gradeD.hide();
while (produtos.isValidRow()) {
switch(i) {
case 0:
$.gradeA.show();
preco = $.precoA;
tempo = $.tempoA;
seleciona = $.selecionaA;
imagem = $.imagemA;
imagens = $.imagesA;
break;
case 1:
$.gradeB.show();
preco = $.precoB;
tempo = $.tempoB;function redimencionaVitrine(vitrine) {
var ALTURA_PADRAO = 710;
var LARGURA_PADRAO = 1260;
/*
var alturaTela = dipUnitsToPixels(Ti.Platform.displayCaps.ydpi);
[INFO] : tb_produtos.prd_id var larguraTela = dipUnitsToPixels(Ti.Platform.displayCaps.xdpi);
*/
var alturaTela = 730;
var larguraTela = 1280;
if (Ti.Platform.osname != "android") {
alturaTela = alturaTela - 200;
larguraTela = larguraTela - 100;
} else {
alturaTela -250;
larguraTela -250;
}
var alturaView = Math.round(alturaTela * 0.9);
var larguraView = Math.round(LARGURA_PADRAO * alturaView / ALTURA_PADRAO);
if (larguraView < larguraTela) {
vitrine.width = larguraView;
vitrine.height = alturaView;
//alert('largura' + larguraView + ", width: " + v.size.width + ", height: " + v.size.height);
} else {
alturaView = Math.round(ALTURA_PADRAO * larguraTela / LARGURA_PADRAO);
vitrine.width = larguraTela;
vitrine.height = alturaView;
//alert('largura' + alturaView + ", width: " + larguraTela + ", height: " + alturaTela);
}
}
seleciona = $.selecionaB;
imagem = $.imagemB;
imagens = $.imagesB;
break;
case 2:
$.gradeC.show();
preco = $.precoC;
tempo = $.tempoC;
seleciona = $.selecionaC;
imagem = $.imagemC;
imagens = $.imagesC;
break;
case 3:
$.gradeD.show();
preco = $.precoD;
tempo = $.tempoD;
seleciona = $.selecionaD;
imagem = $.imagemD;
imagens = $.imagesD;
break;
}
loadItems(template, produtos, referencia, preco, tempo, seleciona, imagem, imagens, $.quantidade);
i++;
produtos.next();
}
produtos.close();
}
function limpar() {
var valores = ["sim","nao"];
// if(Ti.Platform.osname == "android"){
var exclui = Ti.UI.createAlertDialog({
//options: valores,
buttonNames: ['Confirmar','Cancelar'],
destructive: 2,
title: "Desmarcar itens",
message: "Essa opcao ira desmarcar todos os itens selecionados em todas as paginas!"
});
exclui.show();
exclui.addEventListener("click", function(e){
if(e.index == 0){
categoryClear($.quantidade);
} else {
alert("Continue comprando");
}
});
}
function voltar()
|
function anterior() {
current_page--;
if (current_page <= 0)
current_page = paginas;
cleanImages();
renderProducts();
}
function proximo() {
current_page++;
if (current_page > paginas)
current_page = 1;
cleanImages();
renderProducts();
}
function primeiro() {
current_page = 1;
cleanImages();
renderProducts();
}
function ultimo() {
current_page = paginas;
cleanImages();
renderProducts();
}
function cesta() {
categoryCesta();
}
var eventListener = function() {
Ti.App.removeEventListener('removeBitmap', eventListener);
Ti.API.info('Quatro horizontais');
cleanImages();
};
Ti.App.addEventListener('removeBitmap', eventListener);
if(Ti.Platform.osname == "ipad"){
$.botaoQuatroVerticais.font = {fontSize: 13};
$.botaoQuatroVerticais.height = "63%";
$.botaoQuatroVerticais.title = "Limpar marcações";
$.botaoQuatroVerticais.textAlign = "center";
}
|
{
categoryVoltar();
}
|
identifier_body
|
vitrine_quatro_horizontais.js
|
Ti.include("/api/config.js");
Ti.include("/api/category_render.js");
Ti.include("/database/produtos.js");
Ti.include("/database/imagens_produtos.js");
var args = arguments[0] || {};
var marca = args.marca || 0;
var categoria = args.cat_id || 0;
var template = args.template || 0;
var current_page = 1;
var itemsperpage = 4;
var empresa = Ti.App.Properties.getString(CURRENT_EMPRESA);
var produtos = selectProductsCount(categoria, marca, empresa);
var paginas = Math.ceil(produtos / itemsperpage);
redimencionaVitrine($.vitrine);
renderProducts();
function renderProducts() {
$.paginacao.title = current_page + "/" + paginas;
var i = 0;
var preco;
var seleciona;
var imagem;
var imagens;
var tempo;
var referencia = "null";
var start = (current_page - 1) * itemsperpage;
var produtos = selectProductsByPage(empresa, marca, categoria, start, itemsperpage);
$.gradeA.hide();
$.gradeB.hide();
$.gradeC.hide();
$.gradeD.hide();
while (produtos.isValidRow()) {
switch(i) {
case 0:
$.gradeA.show();
preco = $.precoA;
tempo = $.tempoA;
seleciona = $.selecionaA;
imagem = $.imagemA;
imagens = $.imagesA;
break;
case 1:
$.gradeB.show();
preco = $.precoB;
tempo = $.tempoB;function redimencionaVitrine(vitrine) {
var ALTURA_PADRAO = 710;
var LARGURA_PADRAO = 1260;
/*
var alturaTela = dipUnitsToPixels(Ti.Platform.displayCaps.ydpi);
[INFO] : tb_produtos.prd_id var larguraTela = dipUnitsToPixels(Ti.Platform.displayCaps.xdpi);
*/
var alturaTela = 730;
var larguraTela = 1280;
if (Ti.Platform.osname != "android") {
alturaTela = alturaTela - 200;
larguraTela = larguraTela - 100;
} else
|
var alturaView = Math.round(alturaTela * 0.9);
var larguraView = Math.round(LARGURA_PADRAO * alturaView / ALTURA_PADRAO);
if (larguraView < larguraTela) {
vitrine.width = larguraView;
vitrine.height = alturaView;
//alert('largura' + larguraView + ", width: " + v.size.width + ", height: " + v.size.height);
} else {
alturaView = Math.round(ALTURA_PADRAO * larguraTela / LARGURA_PADRAO);
vitrine.width = larguraTela;
vitrine.height = alturaView;
//alert('largura' + alturaView + ", width: " + larguraTela + ", height: " + alturaTela);
}
}
seleciona = $.selecionaB;
imagem = $.imagemB;
imagens = $.imagesB;
break;
case 2:
$.gradeC.show();
preco = $.precoC;
tempo = $.tempoC;
seleciona = $.selecionaC;
imagem = $.imagemC;
imagens = $.imagesC;
break;
case 3:
$.gradeD.show();
preco = $.precoD;
tempo = $.tempoD;
seleciona = $.selecionaD;
imagem = $.imagemD;
imagens = $.imagesD;
break;
}
loadItems(template, produtos, referencia, preco, tempo, seleciona, imagem, imagens, $.quantidade);
i++;
produtos.next();
}
produtos.close();
}
function limpar() {
var valores = ["sim","nao"];
// if(Ti.Platform.osname == "android"){
var exclui = Ti.UI.createAlertDialog({
//options: valores,
buttonNames: ['Confirmar','Cancelar'],
destructive: 2,
title: "Desmarcar itens",
message: "Essa opcao ira desmarcar todos os itens selecionados em todas as paginas!"
});
exclui.show();
exclui.addEventListener("click", function(e){
if(e.index == 0){
categoryClear($.quantidade);
} else {
alert("Continue comprando");
}
});
}
function voltar() {
categoryVoltar();
}
function anterior() {
current_page--;
if (current_page <= 0)
current_page = paginas;
cleanImages();
renderProducts();
}
function proximo() {
current_page++;
if (current_page > paginas)
current_page = 1;
cleanImages();
renderProducts();
}
function primeiro() {
current_page = 1;
cleanImages();
renderProducts();
}
function ultimo() {
current_page = paginas;
cleanImages();
renderProducts();
}
function cesta() {
categoryCesta();
}
var eventListener = function() {
Ti.App.removeEventListener('removeBitmap', eventListener);
Ti.API.info('Quatro horizontais');
cleanImages();
};
Ti.App.addEventListener('removeBitmap', eventListener);
if(Ti.Platform.osname == "ipad"){
$.botaoQuatroVerticais.font = {fontSize: 13};
$.botaoQuatroVerticais.height = "63%";
$.botaoQuatroVerticais.title = "Limpar marcações";
$.botaoQuatroVerticais.textAlign = "center";
}
|
{
alturaTela -250;
larguraTela -250;
}
|
conditional_block
|
vitrine_quatro_horizontais.js
|
Ti.include("/api/config.js");
Ti.include("/api/category_render.js");
Ti.include("/database/produtos.js");
Ti.include("/database/imagens_produtos.js");
var args = arguments[0] || {};
var marca = args.marca || 0;
var categoria = args.cat_id || 0;
var template = args.template || 0;
var current_page = 1;
var itemsperpage = 4;
var empresa = Ti.App.Properties.getString(CURRENT_EMPRESA);
var produtos = selectProductsCount(categoria, marca, empresa);
var paginas = Math.ceil(produtos / itemsperpage);
redimencionaVitrine($.vitrine);
renderProducts();
function renderProducts() {
$.paginacao.title = current_page + "/" + paginas;
var i = 0;
var preco;
var seleciona;
var imagem;
var imagens;
var tempo;
var referencia = "null";
var start = (current_page - 1) * itemsperpage;
var produtos = selectProductsByPage(empresa, marca, categoria, start, itemsperpage);
$.gradeA.hide();
$.gradeB.hide();
$.gradeC.hide();
$.gradeD.hide();
while (produtos.isValidRow()) {
switch(i) {
case 0:
$.gradeA.show();
preco = $.precoA;
tempo = $.tempoA;
seleciona = $.selecionaA;
imagem = $.imagemA;
imagens = $.imagesA;
break;
case 1:
$.gradeB.show();
preco = $.precoB;
tempo = $.tempoB;function redimencionaVitrine(vitrine) {
var ALTURA_PADRAO = 710;
var LARGURA_PADRAO = 1260;
/*
var alturaTela = dipUnitsToPixels(Ti.Platform.displayCaps.ydpi);
[INFO] : tb_produtos.prd_id var larguraTela = dipUnitsToPixels(Ti.Platform.displayCaps.xdpi);
*/
var alturaTela = 730;
var larguraTela = 1280;
if (Ti.Platform.osname != "android") {
alturaTela = alturaTela - 200;
larguraTela = larguraTela - 100;
} else {
alturaTela -250;
larguraTela -250;
}
var alturaView = Math.round(alturaTela * 0.9);
var larguraView = Math.round(LARGURA_PADRAO * alturaView / ALTURA_PADRAO);
if (larguraView < larguraTela) {
vitrine.width = larguraView;
vitrine.height = alturaView;
//alert('largura' + larguraView + ", width: " + v.size.width + ", height: " + v.size.height);
} else {
alturaView = Math.round(ALTURA_PADRAO * larguraTela / LARGURA_PADRAO);
vitrine.width = larguraTela;
vitrine.height = alturaView;
//alert('largura' + alturaView + ", width: " + larguraTela + ", height: " + alturaTela);
}
}
seleciona = $.selecionaB;
imagem = $.imagemB;
imagens = $.imagesB;
break;
case 2:
$.gradeC.show();
preco = $.precoC;
tempo = $.tempoC;
seleciona = $.selecionaC;
imagem = $.imagemC;
imagens = $.imagesC;
break;
case 3:
$.gradeD.show();
preco = $.precoD;
tempo = $.tempoD;
seleciona = $.selecionaD;
imagem = $.imagemD;
imagens = $.imagesD;
break;
}
loadItems(template, produtos, referencia, preco, tempo, seleciona, imagem, imagens, $.quantidade);
i++;
produtos.next();
}
produtos.close();
}
function limpar() {
var valores = ["sim","nao"];
// if(Ti.Platform.osname == "android"){
var exclui = Ti.UI.createAlertDialog({
//options: valores,
buttonNames: ['Confirmar','Cancelar'],
destructive: 2,
|
exclui.show();
exclui.addEventListener("click", function(e){
if(e.index == 0){
categoryClear($.quantidade);
} else {
alert("Continue comprando");
}
});
}
function voltar() {
categoryVoltar();
}
function anterior() {
current_page--;
if (current_page <= 0)
current_page = paginas;
cleanImages();
renderProducts();
}
function proximo() {
current_page++;
if (current_page > paginas)
current_page = 1;
cleanImages();
renderProducts();
}
function primeiro() {
current_page = 1;
cleanImages();
renderProducts();
}
function ultimo() {
current_page = paginas;
cleanImages();
renderProducts();
}
function cesta() {
categoryCesta();
}
var eventListener = function() {
Ti.App.removeEventListener('removeBitmap', eventListener);
Ti.API.info('Quatro horizontais');
cleanImages();
};
Ti.App.addEventListener('removeBitmap', eventListener);
if(Ti.Platform.osname == "ipad"){
$.botaoQuatroVerticais.font = {fontSize: 13};
$.botaoQuatroVerticais.height = "63%";
$.botaoQuatroVerticais.title = "Limpar marcações";
$.botaoQuatroVerticais.textAlign = "center";
}
|
title: "Desmarcar itens",
message: "Essa opcao ira desmarcar todos os itens selecionados em todas as paginas!"
});
|
random_line_split
|
vitrine_quatro_horizontais.js
|
Ti.include("/api/config.js");
Ti.include("/api/category_render.js");
Ti.include("/database/produtos.js");
Ti.include("/database/imagens_produtos.js");
var args = arguments[0] || {};
var marca = args.marca || 0;
var categoria = args.cat_id || 0;
var template = args.template || 0;
var current_page = 1;
var itemsperpage = 4;
var empresa = Ti.App.Properties.getString(CURRENT_EMPRESA);
var produtos = selectProductsCount(categoria, marca, empresa);
var paginas = Math.ceil(produtos / itemsperpage);
redimencionaVitrine($.vitrine);
renderProducts();
function renderProducts() {
$.paginacao.title = current_page + "/" + paginas;
var i = 0;
var preco;
var seleciona;
var imagem;
var imagens;
var tempo;
var referencia = "null";
var start = (current_page - 1) * itemsperpage;
var produtos = selectProductsByPage(empresa, marca, categoria, start, itemsperpage);
$.gradeA.hide();
$.gradeB.hide();
$.gradeC.hide();
$.gradeD.hide();
while (produtos.isValidRow()) {
switch(i) {
case 0:
$.gradeA.show();
preco = $.precoA;
tempo = $.tempoA;
seleciona = $.selecionaA;
imagem = $.imagemA;
imagens = $.imagesA;
break;
case 1:
$.gradeB.show();
preco = $.precoB;
tempo = $.tempoB;function redimencionaVitrine(vitrine) {
var ALTURA_PADRAO = 710;
var LARGURA_PADRAO = 1260;
/*
var alturaTela = dipUnitsToPixels(Ti.Platform.displayCaps.ydpi);
[INFO] : tb_produtos.prd_id var larguraTela = dipUnitsToPixels(Ti.Platform.displayCaps.xdpi);
*/
var alturaTela = 730;
var larguraTela = 1280;
if (Ti.Platform.osname != "android") {
alturaTela = alturaTela - 200;
larguraTela = larguraTela - 100;
} else {
alturaTela -250;
larguraTela -250;
}
var alturaView = Math.round(alturaTela * 0.9);
var larguraView = Math.round(LARGURA_PADRAO * alturaView / ALTURA_PADRAO);
if (larguraView < larguraTela) {
vitrine.width = larguraView;
vitrine.height = alturaView;
//alert('largura' + larguraView + ", width: " + v.size.width + ", height: " + v.size.height);
} else {
alturaView = Math.round(ALTURA_PADRAO * larguraTela / LARGURA_PADRAO);
vitrine.width = larguraTela;
vitrine.height = alturaView;
//alert('largura' + alturaView + ", width: " + larguraTela + ", height: " + alturaTela);
}
}
seleciona = $.selecionaB;
imagem = $.imagemB;
imagens = $.imagesB;
break;
case 2:
$.gradeC.show();
preco = $.precoC;
tempo = $.tempoC;
seleciona = $.selecionaC;
imagem = $.imagemC;
imagens = $.imagesC;
break;
case 3:
$.gradeD.show();
preco = $.precoD;
tempo = $.tempoD;
seleciona = $.selecionaD;
imagem = $.imagemD;
imagens = $.imagesD;
break;
}
loadItems(template, produtos, referencia, preco, tempo, seleciona, imagem, imagens, $.quantidade);
i++;
produtos.next();
}
produtos.close();
}
function limpar() {
var valores = ["sim","nao"];
// if(Ti.Platform.osname == "android"){
var exclui = Ti.UI.createAlertDialog({
//options: valores,
buttonNames: ['Confirmar','Cancelar'],
destructive: 2,
title: "Desmarcar itens",
message: "Essa opcao ira desmarcar todos os itens selecionados em todas as paginas!"
});
exclui.show();
exclui.addEventListener("click", function(e){
if(e.index == 0){
categoryClear($.quantidade);
} else {
alert("Continue comprando");
}
});
}
function voltar() {
categoryVoltar();
}
function anterior() {
current_page--;
if (current_page <= 0)
current_page = paginas;
cleanImages();
renderProducts();
}
function proximo() {
current_page++;
if (current_page > paginas)
current_page = 1;
cleanImages();
renderProducts();
}
function
|
() {
current_page = 1;
cleanImages();
renderProducts();
}
function ultimo() {
current_page = paginas;
cleanImages();
renderProducts();
}
function cesta() {
categoryCesta();
}
var eventListener = function() {
Ti.App.removeEventListener('removeBitmap', eventListener);
Ti.API.info('Quatro horizontais');
cleanImages();
};
Ti.App.addEventListener('removeBitmap', eventListener);
if(Ti.Platform.osname == "ipad"){
$.botaoQuatroVerticais.font = {fontSize: 13};
$.botaoQuatroVerticais.height = "63%";
$.botaoQuatroVerticais.title = "Limpar marcações";
$.botaoQuatroVerticais.textAlign = "center";
}
|
primeiro
|
identifier_name
|
view_resolver_mock.d.ts
|
import { Type } from 'angular2/src/facade/lang';
import { ViewMetadata } from '../core/metadata';
import { ViewResolver } from 'angular2/src/core/linker/view_resolver';
export declare class
|
extends ViewResolver {
constructor();
/**
* Overrides the {@link ViewMetadata} for a component.
*
* @param {Type} component
* @param {ViewDefinition} view
*/
setView(component: Type, view: ViewMetadata): void;
/**
* Overrides the inline template for a component - other configuration remains unchanged.
*
* @param {Type} component
* @param {string} template
*/
setInlineTemplate(component: Type, template: string): void;
/**
* Overrides a directive from the component {@link ViewMetadata}.
*
* @param {Type} component
* @param {Type} from
* @param {Type} to
*/
overrideViewDirective(component: Type, from: Type, to: Type): void;
/**
* Returns the {@link ViewMetadata} for a component:
* - Set the {@link ViewMetadata} to the overridden view when it exists or fallback to the default
* `ViewResolver`,
* see `setView`.
* - Override the directives, see `overrideViewDirective`.
* - Override the @View definition, see `setInlineTemplate`.
*
* @param component
* @returns {ViewDefinition}
*/
resolve(component: Type): ViewMetadata;
}
|
MockViewResolver
|
identifier_name
|
view_resolver_mock.d.ts
|
import { Type } from 'angular2/src/facade/lang';
import { ViewMetadata } from '../core/metadata';
import { ViewResolver } from 'angular2/src/core/linker/view_resolver';
export declare class MockViewResolver extends ViewResolver {
constructor();
/**
* Overrides the {@link ViewMetadata} for a component.
*
* @param {Type} component
* @param {ViewDefinition} view
*/
setView(component: Type, view: ViewMetadata): void;
/**
* Overrides the inline template for a component - other configuration remains unchanged.
*
* @param {Type} component
* @param {string} template
*/
setInlineTemplate(component: Type, template: string): void;
/**
|
* @param {Type} from
* @param {Type} to
*/
overrideViewDirective(component: Type, from: Type, to: Type): void;
/**
* Returns the {@link ViewMetadata} for a component:
* - Set the {@link ViewMetadata} to the overridden view when it exists or fallback to the default
* `ViewResolver`,
* see `setView`.
* - Override the directives, see `overrideViewDirective`.
* - Override the @View definition, see `setInlineTemplate`.
*
* @param component
* @returns {ViewDefinition}
*/
resolve(component: Type): ViewMetadata;
}
|
* Overrides a directive from the component {@link ViewMetadata}.
*
* @param {Type} component
|
random_line_split
|
err_2021_001_logging_perm.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
|
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
|
random_line_split
|
err_2021_001_logging_perm.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
|
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
|
identifier_body
|
|
err_2021_001_logging_perm.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
|
report.add_ok(np)
|
conditional_block
|
|
err_2021_001_logging_perm.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def
|
(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
|
prefetch_rule
|
identifier_name
|
11.8.3-4.js
|
// Copyright (c) 2012 Ecma International. All rights reserved.
// Ecma International makes this code available under the terms and conditions set
// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
// "Use Terms"). Any redistribution of this code must retain the above
// copyright and this notice and otherwise comply with the Use Terms.
/*---
es5id: 11.8.3-4
description: >
11.8.3 Less-than-or-equal Operator - Partial left to right order
enforced when using Less-than-or-equal operator: toString <=
toString
includes: [runTestCase.js]
---*/
function
|
() {
var accessed = false;
var obj1 = {
toString: function () {
accessed = true;
return 3;
}
};
var obj2 = {
toString: function () {
if (accessed === true) {
return 4;
} else {
return 2;
}
}
};
return (obj1 <= obj2);
}
runTestCase(testcase);
|
testcase
|
identifier_name
|
11.8.3-4.js
|
// Copyright (c) 2012 Ecma International. All rights reserved.
// Ecma International makes this code available under the terms and conditions set
// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
// "Use Terms"). Any redistribution of this code must retain the above
// copyright and this notice and otherwise comply with the Use Terms.
/*---
|
11.8.3 Less-than-or-equal Operator - Partial left to right order
enforced when using Less-than-or-equal operator: toString <=
toString
includes: [runTestCase.js]
---*/
function testcase() {
var accessed = false;
var obj1 = {
toString: function () {
accessed = true;
return 3;
}
};
var obj2 = {
toString: function () {
if (accessed === true) {
return 4;
} else {
return 2;
}
}
};
return (obj1 <= obj2);
}
runTestCase(testcase);
|
es5id: 11.8.3-4
description: >
|
random_line_split
|
11.8.3-4.js
|
// Copyright (c) 2012 Ecma International. All rights reserved.
// Ecma International makes this code available under the terms and conditions set
// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
// "Use Terms"). Any redistribution of this code must retain the above
// copyright and this notice and otherwise comply with the Use Terms.
/*---
es5id: 11.8.3-4
description: >
11.8.3 Less-than-or-equal Operator - Partial left to right order
enforced when using Less-than-or-equal operator: toString <=
toString
includes: [runTestCase.js]
---*/
function testcase()
|
runTestCase(testcase);
|
{
var accessed = false;
var obj1 = {
toString: function () {
accessed = true;
return 3;
}
};
var obj2 = {
toString: function () {
if (accessed === true) {
return 4;
} else {
return 2;
}
}
};
return (obj1 <= obj2);
}
|
identifier_body
|
11.8.3-4.js
|
// Copyright (c) 2012 Ecma International. All rights reserved.
// Ecma International makes this code available under the terms and conditions set
// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
// "Use Terms"). Any redistribution of this code must retain the above
// copyright and this notice and otherwise comply with the Use Terms.
/*---
es5id: 11.8.3-4
description: >
11.8.3 Less-than-or-equal Operator - Partial left to right order
enforced when using Less-than-or-equal operator: toString <=
toString
includes: [runTestCase.js]
---*/
function testcase() {
var accessed = false;
var obj1 = {
toString: function () {
accessed = true;
return 3;
}
};
var obj2 = {
toString: function () {
if (accessed === true) {
return 4;
} else
|
}
};
return (obj1 <= obj2);
}
runTestCase(testcase);
|
{
return 2;
}
|
conditional_block
|
views.py
|
# -*- coding: utf-8 -*-
from django.core.cache import cache
from django.shortcuts import render
from django.http import Http404
from styleguide.utils import (Styleguide, STYLEGUIDE_DIR_NAME,
STYLEGUIDE_DEBUG, STYLEGUIDE_CACHE_NAME,
STYLEGUIDE_ACCESS)
def index(request, module_name=None, component_name=None):
|
if not STYLEGUIDE_ACCESS(request.user):
raise Http404()
styleguide = None
if not STYLEGUIDE_DEBUG:
styleguide = cache.get(STYLEGUIDE_CACHE_NAME)
if styleguide is None:
styleguide = Styleguide()
cache.set(STYLEGUIDE_CACHE_NAME, styleguide, None)
if module_name is not None:
styleguide.set_current_module(module_name)
context = {'styleguide': styleguide}
index_path = "%s/index.html" % STYLEGUIDE_DIR_NAME
return render(request, index_path, context)
|
identifier_body
|
|
views.py
|
# -*- coding: utf-8 -*-
from django.core.cache import cache
from django.shortcuts import render
from django.http import Http404
from styleguide.utils import (Styleguide, STYLEGUIDE_DIR_NAME,
STYLEGUIDE_DEBUG, STYLEGUIDE_CACHE_NAME,
STYLEGUIDE_ACCESS)
def
|
(request, module_name=None, component_name=None):
if not STYLEGUIDE_ACCESS(request.user):
raise Http404()
styleguide = None
if not STYLEGUIDE_DEBUG:
styleguide = cache.get(STYLEGUIDE_CACHE_NAME)
if styleguide is None:
styleguide = Styleguide()
cache.set(STYLEGUIDE_CACHE_NAME, styleguide, None)
if module_name is not None:
styleguide.set_current_module(module_name)
context = {'styleguide': styleguide}
index_path = "%s/index.html" % STYLEGUIDE_DIR_NAME
return render(request, index_path, context)
|
index
|
identifier_name
|
views.py
|
# -*- coding: utf-8 -*-
from django.core.cache import cache
from django.shortcuts import render
from django.http import Http404
from styleguide.utils import (Styleguide, STYLEGUIDE_DIR_NAME,
STYLEGUIDE_DEBUG, STYLEGUIDE_CACHE_NAME,
STYLEGUIDE_ACCESS)
def index(request, module_name=None, component_name=None):
if not STYLEGUIDE_ACCESS(request.user):
|
raise Http404()
styleguide = None
if not STYLEGUIDE_DEBUG:
styleguide = cache.get(STYLEGUIDE_CACHE_NAME)
if styleguide is None:
styleguide = Styleguide()
cache.set(STYLEGUIDE_CACHE_NAME, styleguide, None)
if module_name is not None:
styleguide.set_current_module(module_name)
context = {'styleguide': styleguide}
index_path = "%s/index.html" % STYLEGUIDE_DIR_NAME
return render(request, index_path, context)
|
random_line_split
|
|
views.py
|
# -*- coding: utf-8 -*-
from django.core.cache import cache
from django.shortcuts import render
from django.http import Http404
from styleguide.utils import (Styleguide, STYLEGUIDE_DIR_NAME,
STYLEGUIDE_DEBUG, STYLEGUIDE_CACHE_NAME,
STYLEGUIDE_ACCESS)
def index(request, module_name=None, component_name=None):
if not STYLEGUIDE_ACCESS(request.user):
raise Http404()
styleguide = None
if not STYLEGUIDE_DEBUG:
styleguide = cache.get(STYLEGUIDE_CACHE_NAME)
if styleguide is None:
styleguide = Styleguide()
cache.set(STYLEGUIDE_CACHE_NAME, styleguide, None)
if module_name is not None:
|
context = {'styleguide': styleguide}
index_path = "%s/index.html" % STYLEGUIDE_DIR_NAME
return render(request, index_path, context)
|
styleguide.set_current_module(module_name)
|
conditional_block
|
users.py
|
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
class User(base.Resource):
"""Represents an Identity user.
Attributes:
* id: a uuid that identifies the user
"""
pass
class UserManager(base.CrudManager):
"""Manager class for manipulating Identity users."""
resource_class = User
collection_key = 'users'
key = 'user'
def _require_user_and_group(self, user, group):
if not (user and group):
msg = 'Specify both a user and a group'
raise exceptions.ValidationError(msg)
def create(self, name, domain=None, project=None, password=None,
email=None, description=None, enabled=True):
return super(UserManager, self).create(
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
enabled=enabled)
def list(self, project=None, domain=None, group=None, **kwargs):
"""List users.
If project, domain or group are provided, then filter
users with those attributes.
If ``**kwargs`` are provided, then filter users with
attributes matching ``**kwargs``.
"""
if group:
base_url = '/groups/%s' % base.getid(group)
else:
|
return super(UserManager, self).list(
base_url=base_url,
domain_id=base.getid(domain),
project_id=base.getid(project),
**kwargs)
def get(self, user):
return super(UserManager, self).get(
user_id=base.getid(user))
def update(self, user, name=None, domain=None, project=None, password=None,
email=None, description=None, enabled=None):
return super(UserManager, self).update(
user_id=base.getid(user),
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
enabled=enabled)
def add_to_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).put(
base_url=base_url,
user_id=base.getid(user))
def check_in_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).head(
base_url=base_url,
user_id=base.getid(user))
def remove_from_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).delete(
base_url=base_url,
user_id=base.getid(user))
def delete(self, user):
return super(UserManager, self).delete(
user_id=base.getid(user))
|
base_url = None
|
conditional_block
|
users.py
|
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
class User(base.Resource):
"""Represents an Identity user.
Attributes:
* id: a uuid that identifies the user
"""
pass
class UserManager(base.CrudManager):
"""Manager class for manipulating Identity users."""
resource_class = User
collection_key = 'users'
key = 'user'
def _require_user_and_group(self, user, group):
if not (user and group):
msg = 'Specify both a user and a group'
raise exceptions.ValidationError(msg)
def create(self, name, domain=None, project=None, password=None,
email=None, description=None, enabled=True):
return super(UserManager, self).create(
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
enabled=enabled)
def list(self, project=None, domain=None, group=None, **kwargs):
"""List users.
If project, domain or group are provided, then filter
users with those attributes.
If ``**kwargs`` are provided, then filter users with
attributes matching ``**kwargs``.
"""
if group:
base_url = '/groups/%s' % base.getid(group)
else:
base_url = None
return super(UserManager, self).list(
base_url=base_url,
domain_id=base.getid(domain),
project_id=base.getid(project),
**kwargs)
def get(self, user):
return super(UserManager, self).get(
user_id=base.getid(user))
def update(self, user, name=None, domain=None, project=None, password=None,
email=None, description=None, enabled=None):
return super(UserManager, self).update(
user_id=base.getid(user),
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
enabled=enabled)
def add_to_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).put(
base_url=base_url,
user_id=base.getid(user))
def
|
(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).head(
base_url=base_url,
user_id=base.getid(user))
def remove_from_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).delete(
base_url=base_url,
user_id=base.getid(user))
def delete(self, user):
return super(UserManager, self).delete(
user_id=base.getid(user))
|
check_in_group
|
identifier_name
|
users.py
|
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
class User(base.Resource):
"""Represents an Identity user.
Attributes:
* id: a uuid that identifies the user
"""
pass
class UserManager(base.CrudManager):
"""Manager class for manipulating Identity users."""
resource_class = User
collection_key = 'users'
key = 'user'
def _require_user_and_group(self, user, group):
if not (user and group):
msg = 'Specify both a user and a group'
raise exceptions.ValidationError(msg)
def create(self, name, domain=None, project=None, password=None,
email=None, description=None, enabled=True):
return super(UserManager, self).create(
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
|
enabled=enabled)
def list(self, project=None, domain=None, group=None, **kwargs):
"""List users.
If project, domain or group are provided, then filter
users with those attributes.
If ``**kwargs`` are provided, then filter users with
attributes matching ``**kwargs``.
"""
if group:
base_url = '/groups/%s' % base.getid(group)
else:
base_url = None
return super(UserManager, self).list(
base_url=base_url,
domain_id=base.getid(domain),
project_id=base.getid(project),
**kwargs)
def get(self, user):
return super(UserManager, self).get(
user_id=base.getid(user))
def update(self, user, name=None, domain=None, project=None, password=None,
email=None, description=None, enabled=None):
return super(UserManager, self).update(
user_id=base.getid(user),
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
enabled=enabled)
def add_to_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).put(
base_url=base_url,
user_id=base.getid(user))
def check_in_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).head(
base_url=base_url,
user_id=base.getid(user))
def remove_from_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).delete(
base_url=base_url,
user_id=base.getid(user))
def delete(self, user):
return super(UserManager, self).delete(
user_id=base.getid(user))
|
random_line_split
|
|
users.py
|
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
class User(base.Resource):
"""Represents an Identity user.
Attributes:
* id: a uuid that identifies the user
"""
pass
class UserManager(base.CrudManager):
"""Manager class for manipulating Identity users."""
resource_class = User
collection_key = 'users'
key = 'user'
def _require_user_and_group(self, user, group):
if not (user and group):
msg = 'Specify both a user and a group'
raise exceptions.ValidationError(msg)
def create(self, name, domain=None, project=None, password=None,
email=None, description=None, enabled=True):
return super(UserManager, self).create(
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
enabled=enabled)
def list(self, project=None, domain=None, group=None, **kwargs):
"""List users.
If project, domain or group are provided, then filter
users with those attributes.
If ``**kwargs`` are provided, then filter users with
attributes matching ``**kwargs``.
"""
if group:
base_url = '/groups/%s' % base.getid(group)
else:
base_url = None
return super(UserManager, self).list(
base_url=base_url,
domain_id=base.getid(domain),
project_id=base.getid(project),
**kwargs)
def get(self, user):
return super(UserManager, self).get(
user_id=base.getid(user))
def update(self, user, name=None, domain=None, project=None, password=None,
email=None, description=None, enabled=None):
|
def add_to_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).put(
base_url=base_url,
user_id=base.getid(user))
def check_in_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).head(
base_url=base_url,
user_id=base.getid(user))
def remove_from_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).delete(
base_url=base_url,
user_id=base.getid(user))
def delete(self, user):
return super(UserManager, self).delete(
user_id=base.getid(user))
|
return super(UserManager, self).update(
user_id=base.getid(user),
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
enabled=enabled)
|
identifier_body
|
hibernate_state.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::metapb::Region;
use pd_client::{Feature, FeatureGate};
use serde_derive::{Deserialize, Serialize};
/// Because negotiation protocol can't be recognized by old version of binaries,
/// so enabling it directly can cause a lot of connection reset.
const NEGOTIATION_HIBERNATE: Feature = Feature::require(5, 0, 0);
/// Represents state of the group.
#[derive(Clone, Copy, PartialEq, Debug, Serialize, Deserialize)]
pub enum GroupState {
/// The group is working generally, leader keeps
/// replicating data to followers.
Ordered,
/// The group is out of order. Leadership may not be hold.
Chaos,
/// The group is about to be out of order. It leave some
/// safe space to avoid stepping chaos too often.
PreChaos,
/// The group is hibernated.
Idle,
}
#[derive(PartialEq, Debug)]
pub enum LeaderState {
Awaken,
Poll(Vec<u64>),
Hibernated,
}
#[derive(Debug)]
pub struct HibernateState {
group: GroupState,
leader: LeaderState,
}
impl HibernateState {
pub fn ordered() -> HibernateState {
HibernateState {
group: GroupState::Ordered,
leader: LeaderState::Awaken,
}
}
pub fn group_state(&self) -> GroupState {
self.group
}
pub fn reset(&mut self, group_state: GroupState) {
self.group = group_state;
if group_state != GroupState::Idle {
self.leader = LeaderState::Awaken;
}
}
pub fn count_vote(&mut self, from: u64)
|
pub fn should_bcast(&self, gate: &FeatureGate) -> bool {
gate.can_enable(NEGOTIATION_HIBERNATE)
}
pub fn maybe_hibernate(&mut self, my_id: u64, region: &Region) -> bool {
let peers = region.get_peers();
let v = match &mut self.leader {
LeaderState::Awaken => {
self.leader = LeaderState::Poll(Vec::with_capacity(peers.len()));
return false;
}
LeaderState::Poll(v) => v,
LeaderState::Hibernated => return true,
};
// 1 is for leader itself, which is not counted into votes.
if v.len() + 1 < peers.len() {
return false;
}
if peers
.iter()
.all(|p| p.get_id() == my_id || v.contains(&p.get_id()))
{
self.leader = LeaderState::Hibernated;
true
} else {
false
}
}
}
|
{
if let LeaderState::Poll(v) = &mut self.leader {
if !v.contains(&from) {
v.push(from);
}
}
}
|
identifier_body
|
hibernate_state.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::metapb::Region;
use pd_client::{Feature, FeatureGate};
use serde_derive::{Deserialize, Serialize};
/// Because negotiation protocol can't be recognized by old version of binaries,
/// so enabling it directly can cause a lot of connection reset.
const NEGOTIATION_HIBERNATE: Feature = Feature::require(5, 0, 0);
/// Represents state of the group.
#[derive(Clone, Copy, PartialEq, Debug, Serialize, Deserialize)]
pub enum GroupState {
/// The group is working generally, leader keeps
/// replicating data to followers.
Ordered,
/// The group is out of order. Leadership may not be hold.
Chaos,
/// The group is about to be out of order. It leave some
/// safe space to avoid stepping chaos too often.
PreChaos,
/// The group is hibernated.
Idle,
}
#[derive(PartialEq, Debug)]
pub enum LeaderState {
Awaken,
Poll(Vec<u64>),
Hibernated,
}
#[derive(Debug)]
pub struct HibernateState {
group: GroupState,
leader: LeaderState,
}
impl HibernateState {
pub fn ordered() -> HibernateState {
HibernateState {
group: GroupState::Ordered,
leader: LeaderState::Awaken,
}
}
pub fn group_state(&self) -> GroupState {
self.group
}
pub fn reset(&mut self, group_state: GroupState) {
self.group = group_state;
if group_state != GroupState::Idle {
self.leader = LeaderState::Awaken;
}
}
pub fn count_vote(&mut self, from: u64) {
if let LeaderState::Poll(v) = &mut self.leader {
if !v.contains(&from) {
v.push(from);
}
}
}
pub fn should_bcast(&self, gate: &FeatureGate) -> bool {
gate.can_enable(NEGOTIATION_HIBERNATE)
}
pub fn maybe_hibernate(&mut self, my_id: u64, region: &Region) -> bool {
let peers = region.get_peers();
let v = match &mut self.leader {
LeaderState::Awaken => {
self.leader = LeaderState::Poll(Vec::with_capacity(peers.len()));
return false;
}
LeaderState::Poll(v) => v,
LeaderState::Hibernated => return true,
};
// 1 is for leader itself, which is not counted into votes.
if v.len() + 1 < peers.len() {
return false;
}
if peers
.iter()
.all(|p| p.get_id() == my_id || v.contains(&p.get_id()))
{
self.leader = LeaderState::Hibernated;
true
|
}
}
}
|
} else {
false
|
random_line_split
|
hibernate_state.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::metapb::Region;
use pd_client::{Feature, FeatureGate};
use serde_derive::{Deserialize, Serialize};
/// Because negotiation protocol can't be recognized by old version of binaries,
/// so enabling it directly can cause a lot of connection reset.
const NEGOTIATION_HIBERNATE: Feature = Feature::require(5, 0, 0);
/// Represents state of the group.
#[derive(Clone, Copy, PartialEq, Debug, Serialize, Deserialize)]
pub enum GroupState {
/// The group is working generally, leader keeps
/// replicating data to followers.
Ordered,
/// The group is out of order. Leadership may not be hold.
Chaos,
/// The group is about to be out of order. It leave some
/// safe space to avoid stepping chaos too often.
PreChaos,
/// The group is hibernated.
Idle,
}
#[derive(PartialEq, Debug)]
pub enum LeaderState {
Awaken,
Poll(Vec<u64>),
Hibernated,
}
#[derive(Debug)]
pub struct
|
{
group: GroupState,
leader: LeaderState,
}
impl HibernateState {
pub fn ordered() -> HibernateState {
HibernateState {
group: GroupState::Ordered,
leader: LeaderState::Awaken,
}
}
pub fn group_state(&self) -> GroupState {
self.group
}
pub fn reset(&mut self, group_state: GroupState) {
self.group = group_state;
if group_state != GroupState::Idle {
self.leader = LeaderState::Awaken;
}
}
pub fn count_vote(&mut self, from: u64) {
if let LeaderState::Poll(v) = &mut self.leader {
if !v.contains(&from) {
v.push(from);
}
}
}
pub fn should_bcast(&self, gate: &FeatureGate) -> bool {
gate.can_enable(NEGOTIATION_HIBERNATE)
}
pub fn maybe_hibernate(&mut self, my_id: u64, region: &Region) -> bool {
let peers = region.get_peers();
let v = match &mut self.leader {
LeaderState::Awaken => {
self.leader = LeaderState::Poll(Vec::with_capacity(peers.len()));
return false;
}
LeaderState::Poll(v) => v,
LeaderState::Hibernated => return true,
};
// 1 is for leader itself, which is not counted into votes.
if v.len() + 1 < peers.len() {
return false;
}
if peers
.iter()
.all(|p| p.get_id() == my_id || v.contains(&p.get_id()))
{
self.leader = LeaderState::Hibernated;
true
} else {
false
}
}
}
|
HibernateState
|
identifier_name
|
hibernate_state.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::metapb::Region;
use pd_client::{Feature, FeatureGate};
use serde_derive::{Deserialize, Serialize};
/// Because negotiation protocol can't be recognized by old version of binaries,
/// so enabling it directly can cause a lot of connection reset.
const NEGOTIATION_HIBERNATE: Feature = Feature::require(5, 0, 0);
/// Represents state of the group.
#[derive(Clone, Copy, PartialEq, Debug, Serialize, Deserialize)]
pub enum GroupState {
/// The group is working generally, leader keeps
/// replicating data to followers.
Ordered,
/// The group is out of order. Leadership may not be hold.
Chaos,
/// The group is about to be out of order. It leave some
/// safe space to avoid stepping chaos too often.
PreChaos,
/// The group is hibernated.
Idle,
}
#[derive(PartialEq, Debug)]
pub enum LeaderState {
Awaken,
Poll(Vec<u64>),
Hibernated,
}
#[derive(Debug)]
pub struct HibernateState {
group: GroupState,
leader: LeaderState,
}
impl HibernateState {
pub fn ordered() -> HibernateState {
HibernateState {
group: GroupState::Ordered,
leader: LeaderState::Awaken,
}
}
pub fn group_state(&self) -> GroupState {
self.group
}
pub fn reset(&mut self, group_state: GroupState) {
self.group = group_state;
if group_state != GroupState::Idle {
self.leader = LeaderState::Awaken;
}
}
pub fn count_vote(&mut self, from: u64) {
if let LeaderState::Poll(v) = &mut self.leader {
if !v.contains(&from) {
v.push(from);
}
}
}
pub fn should_bcast(&self, gate: &FeatureGate) -> bool {
gate.can_enable(NEGOTIATION_HIBERNATE)
}
pub fn maybe_hibernate(&mut self, my_id: u64, region: &Region) -> bool {
let peers = region.get_peers();
let v = match &mut self.leader {
LeaderState::Awaken => {
self.leader = LeaderState::Poll(Vec::with_capacity(peers.len()));
return false;
}
LeaderState::Poll(v) => v,
LeaderState::Hibernated => return true,
};
// 1 is for leader itself, which is not counted into votes.
if v.len() + 1 < peers.len() {
return false;
}
if peers
.iter()
.all(|p| p.get_id() == my_id || v.contains(&p.get_id()))
|
else {
false
}
}
}
|
{
self.leader = LeaderState::Hibernated;
true
}
|
conditional_block
|
movie.js
|
// pages/movies/movie.js
Page({
|
data: {
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
}
})
|
/**
* 页面的初始数据
*/
|
random_line_split
|
matlab.py
|
r"""
Functions that make it easier to deal with Matlab data.
Notes
-----
#. Written by David C. Stauffer in December 2018.
"""
#%% Imports
from __future__ import annotations
import doctest
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import unittest
from dstauffman.constants import HAVE_H5PY, HAVE_NUMPY
if HAVE_H5PY:
import h5py
if HAVE_NUMPY:
import numpy as np
#%% load_matlab
def load_matlab(
filename: Union[str, Path],
varlist: Union[List[str], Set[str], Tuple[str]] = None,
*,
squeeze: bool = True,
enums: Dict[str, Any] = None,
) -> Dict[str, Any]:
r"""
Load simple arrays from a MATLAB v7.3 HDF5 based *.mat file.
Parameters
----------
filename : class pathlib.Path
Name of the file to load
varlist : list of str, optional
Name of the variables to load
squeeze : bool, optional, default is True
Whether to squeeze any singleton vectors down a dimension
Returns
-------
out : dict
Equivalent structure as python dictionary
Examples
--------
>>> from dstauffman import load_matlab, get_tests_dir
>>> filename = get_tests_dir() / 'test_numbers.mat'
>>> out = load_matlab(filename)
>>> print(out['row_nums'][1])
2.2
"""
def _load(
file: h5py.Group,
varlist: Optional[Union[List[str], Set[str], Tuple[str]]],
squeeze: bool,
enums: Optional[Dict[str, Any]],
) -> Dict[str, Any]:
|
if not isinstance(filename, h5py.Group):
with h5py.File(filename, 'r') as file:
# normal method
out = _load(file=file, varlist=varlist, squeeze=squeeze, enums=enums)
else:
# recursive call method where the file is already opened to a given group
out = _load(file=filename, varlist=varlist, squeeze=squeeze, enums=enums)
return out
#%% Unit test
if __name__ == '__main__':
unittest.main(module='dstauffman.tests.test_matlab', exit=False)
doctest.testmod(verbose=False)
|
r"""Wrapped subfunction so it can be called recursively."""
# initialize output
out: Dict[str, Any] = {}
# loop through keys, keys are the MATLAB variable names, like TELM
for key in file:
# skip keys that are not in the given varlist
if varlist is not None and key not in varlist:
continue
# if no varlist (thus loading every key), still skip those that start with #
if varlist is None and key in {'#refs#', '#subsystem#'}:
continue
# alias this group
grp = file[key]
# check if this is a dataset, meaning its just an array and not a structure
if isinstance(grp, h5py.Dataset):
# Note: data is transposed due to how Matlab stores columnwise
values = grp[()].T
# check for cell array references
if isinstance(values.flat[0], h5py.Reference):
# TODO: for now, always collapse to 1D cell array as a list
temp = [file[item] for item in values.flat]
temp2 = []
for x in temp:
if isinstance(x, h5py.Group):
temp2.append(load_matlab(x, varlist=None, squeeze=squeeze, enums=enums))
else:
data = x[()].T
temp2.append(np.squeeze(data) if squeeze else data)
out[key] = temp2
else:
out[key] = np.squeeze(values) if squeeze else values
elif 'EnumerationInstanceTag' in grp:
# likely a MATLAB enumerator???
class_name = grp.attrs['MATLAB_class'].decode()
if enums is None or class_name not in enums:
raise ValueError(
f'Tried to load a MATLAB enumeration class called "{class_name}" without a decoder ring, pass in via `enums`.'
)
ix = grp['ValueIndices'][()].T
values = np.array([enums[class_name][x] for x in ix.flatten()]).reshape(ix.shape)
out[key] = np.squeeze(values) if squeeze else values
else:
# call recursively
out[key] = load_matlab(grp, varlist=None, squeeze=squeeze, enums=enums)
return out
|
identifier_body
|
matlab.py
|
r"""
Functions that make it easier to deal with Matlab data.
Notes
-----
#. Written by David C. Stauffer in December 2018.
"""
#%% Imports
from __future__ import annotations
import doctest
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import unittest
from dstauffman.constants import HAVE_H5PY, HAVE_NUMPY
if HAVE_H5PY:
import h5py
if HAVE_NUMPY:
import numpy as np
#%% load_matlab
def load_matlab(
filename: Union[str, Path],
varlist: Union[List[str], Set[str], Tuple[str]] = None,
*,
squeeze: bool = True,
enums: Dict[str, Any] = None,
) -> Dict[str, Any]:
r"""
Load simple arrays from a MATLAB v7.3 HDF5 based *.mat file.
Parameters
----------
filename : class pathlib.Path
Name of the file to load
varlist : list of str, optional
Name of the variables to load
squeeze : bool, optional, default is True
Whether to squeeze any singleton vectors down a dimension
Returns
-------
out : dict
Equivalent structure as python dictionary
Examples
--------
>>> from dstauffman import load_matlab, get_tests_dir
>>> filename = get_tests_dir() / 'test_numbers.mat'
>>> out = load_matlab(filename)
>>> print(out['row_nums'][1])
2.2
"""
def
|
(
file: h5py.Group,
varlist: Optional[Union[List[str], Set[str], Tuple[str]]],
squeeze: bool,
enums: Optional[Dict[str, Any]],
) -> Dict[str, Any]:
r"""Wrapped subfunction so it can be called recursively."""
# initialize output
out: Dict[str, Any] = {}
# loop through keys, keys are the MATLAB variable names, like TELM
for key in file:
# skip keys that are not in the given varlist
if varlist is not None and key not in varlist:
continue
# if no varlist (thus loading every key), still skip those that start with #
if varlist is None and key in {'#refs#', '#subsystem#'}:
continue
# alias this group
grp = file[key]
# check if this is a dataset, meaning its just an array and not a structure
if isinstance(grp, h5py.Dataset):
# Note: data is transposed due to how Matlab stores columnwise
values = grp[()].T
# check for cell array references
if isinstance(values.flat[0], h5py.Reference):
# TODO: for now, always collapse to 1D cell array as a list
temp = [file[item] for item in values.flat]
temp2 = []
for x in temp:
if isinstance(x, h5py.Group):
temp2.append(load_matlab(x, varlist=None, squeeze=squeeze, enums=enums))
else:
data = x[()].T
temp2.append(np.squeeze(data) if squeeze else data)
out[key] = temp2
else:
out[key] = np.squeeze(values) if squeeze else values
elif 'EnumerationInstanceTag' in grp:
# likely a MATLAB enumerator???
class_name = grp.attrs['MATLAB_class'].decode()
if enums is None or class_name not in enums:
raise ValueError(
f'Tried to load a MATLAB enumeration class called "{class_name}" without a decoder ring, pass in via `enums`.'
)
ix = grp['ValueIndices'][()].T
values = np.array([enums[class_name][x] for x in ix.flatten()]).reshape(ix.shape)
out[key] = np.squeeze(values) if squeeze else values
else:
# call recursively
out[key] = load_matlab(grp, varlist=None, squeeze=squeeze, enums=enums)
return out
if not isinstance(filename, h5py.Group):
with h5py.File(filename, 'r') as file:
# normal method
out = _load(file=file, varlist=varlist, squeeze=squeeze, enums=enums)
else:
# recursive call method where the file is already opened to a given group
out = _load(file=filename, varlist=varlist, squeeze=squeeze, enums=enums)
return out
#%% Unit test
if __name__ == '__main__':
unittest.main(module='dstauffman.tests.test_matlab', exit=False)
doctest.testmod(verbose=False)
|
_load
|
identifier_name
|
matlab.py
|
r"""
Functions that make it easier to deal with Matlab data.
Notes
-----
#. Written by David C. Stauffer in December 2018.
"""
#%% Imports
from __future__ import annotations
import doctest
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import unittest
from dstauffman.constants import HAVE_H5PY, HAVE_NUMPY
if HAVE_H5PY:
import h5py
if HAVE_NUMPY:
import numpy as np
#%% load_matlab
def load_matlab(
filename: Union[str, Path],
varlist: Union[List[str], Set[str], Tuple[str]] = None,
*,
squeeze: bool = True,
enums: Dict[str, Any] = None,
) -> Dict[str, Any]:
r"""
Load simple arrays from a MATLAB v7.3 HDF5 based *.mat file.
Parameters
----------
filename : class pathlib.Path
Name of the file to load
varlist : list of str, optional
Name of the variables to load
squeeze : bool, optional, default is True
Whether to squeeze any singleton vectors down a dimension
Returns
-------
out : dict
Equivalent structure as python dictionary
Examples
--------
>>> from dstauffman import load_matlab, get_tests_dir
>>> filename = get_tests_dir() / 'test_numbers.mat'
>>> out = load_matlab(filename)
>>> print(out['row_nums'][1])
2.2
"""
def _load(
file: h5py.Group,
varlist: Optional[Union[List[str], Set[str], Tuple[str]]],
squeeze: bool,
enums: Optional[Dict[str, Any]],
) -> Dict[str, Any]:
r"""Wrapped subfunction so it can be called recursively."""
# initialize output
out: Dict[str, Any] = {}
# loop through keys, keys are the MATLAB variable names, like TELM
for key in file:
# skip keys that are not in the given varlist
if varlist is not None and key not in varlist:
continue
# if no varlist (thus loading every key), still skip those that start with #
if varlist is None and key in {'#refs#', '#subsystem#'}:
continue
# alias this group
grp = file[key]
# check if this is a dataset, meaning its just an array and not a structure
|
if isinstance(values.flat[0], h5py.Reference):
# TODO: for now, always collapse to 1D cell array as a list
temp = [file[item] for item in values.flat]
temp2 = []
for x in temp:
if isinstance(x, h5py.Group):
temp2.append(load_matlab(x, varlist=None, squeeze=squeeze, enums=enums))
else:
data = x[()].T
temp2.append(np.squeeze(data) if squeeze else data)
out[key] = temp2
else:
out[key] = np.squeeze(values) if squeeze else values
elif 'EnumerationInstanceTag' in grp:
# likely a MATLAB enumerator???
class_name = grp.attrs['MATLAB_class'].decode()
if enums is None or class_name not in enums:
raise ValueError(
f'Tried to load a MATLAB enumeration class called "{class_name}" without a decoder ring, pass in via `enums`.'
)
ix = grp['ValueIndices'][()].T
values = np.array([enums[class_name][x] for x in ix.flatten()]).reshape(ix.shape)
out[key] = np.squeeze(values) if squeeze else values
else:
# call recursively
out[key] = load_matlab(grp, varlist=None, squeeze=squeeze, enums=enums)
return out
if not isinstance(filename, h5py.Group):
with h5py.File(filename, 'r') as file:
# normal method
out = _load(file=file, varlist=varlist, squeeze=squeeze, enums=enums)
else:
# recursive call method where the file is already opened to a given group
out = _load(file=filename, varlist=varlist, squeeze=squeeze, enums=enums)
return out
#%% Unit test
if __name__ == '__main__':
unittest.main(module='dstauffman.tests.test_matlab', exit=False)
doctest.testmod(verbose=False)
|
if isinstance(grp, h5py.Dataset):
# Note: data is transposed due to how Matlab stores columnwise
values = grp[()].T
# check for cell array references
|
random_line_split
|
matlab.py
|
r"""
Functions that make it easier to deal with Matlab data.
Notes
-----
#. Written by David C. Stauffer in December 2018.
"""
#%% Imports
from __future__ import annotations
import doctest
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import unittest
from dstauffman.constants import HAVE_H5PY, HAVE_NUMPY
if HAVE_H5PY:
import h5py
if HAVE_NUMPY:
import numpy as np
#%% load_matlab
def load_matlab(
filename: Union[str, Path],
varlist: Union[List[str], Set[str], Tuple[str]] = None,
*,
squeeze: bool = True,
enums: Dict[str, Any] = None,
) -> Dict[str, Any]:
r"""
Load simple arrays from a MATLAB v7.3 HDF5 based *.mat file.
Parameters
----------
filename : class pathlib.Path
Name of the file to load
varlist : list of str, optional
Name of the variables to load
squeeze : bool, optional, default is True
Whether to squeeze any singleton vectors down a dimension
Returns
-------
out : dict
Equivalent structure as python dictionary
Examples
--------
>>> from dstauffman import load_matlab, get_tests_dir
>>> filename = get_tests_dir() / 'test_numbers.mat'
>>> out = load_matlab(filename)
>>> print(out['row_nums'][1])
2.2
"""
def _load(
file: h5py.Group,
varlist: Optional[Union[List[str], Set[str], Tuple[str]]],
squeeze: bool,
enums: Optional[Dict[str, Any]],
) -> Dict[str, Any]:
r"""Wrapped subfunction so it can be called recursively."""
# initialize output
out: Dict[str, Any] = {}
# loop through keys, keys are the MATLAB variable names, like TELM
for key in file:
# skip keys that are not in the given varlist
if varlist is not None and key not in varlist:
continue
# if no varlist (thus loading every key), still skip those that start with #
if varlist is None and key in {'#refs#', '#subsystem#'}:
continue
# alias this group
grp = file[key]
# check if this is a dataset, meaning its just an array and not a structure
if isinstance(grp, h5py.Dataset):
# Note: data is transposed due to how Matlab stores columnwise
values = grp[()].T
# check for cell array references
if isinstance(values.flat[0], h5py.Reference):
# TODO: for now, always collapse to 1D cell array as a list
temp = [file[item] for item in values.flat]
temp2 = []
for x in temp:
if isinstance(x, h5py.Group):
temp2.append(load_matlab(x, varlist=None, squeeze=squeeze, enums=enums))
else:
data = x[()].T
temp2.append(np.squeeze(data) if squeeze else data)
out[key] = temp2
else:
out[key] = np.squeeze(values) if squeeze else values
elif 'EnumerationInstanceTag' in grp:
# likely a MATLAB enumerator???
class_name = grp.attrs['MATLAB_class'].decode()
if enums is None or class_name not in enums:
raise ValueError(
f'Tried to load a MATLAB enumeration class called "{class_name}" without a decoder ring, pass in via `enums`.'
)
ix = grp['ValueIndices'][()].T
values = np.array([enums[class_name][x] for x in ix.flatten()]).reshape(ix.shape)
out[key] = np.squeeze(values) if squeeze else values
else:
# call recursively
out[key] = load_matlab(grp, varlist=None, squeeze=squeeze, enums=enums)
return out
if not isinstance(filename, h5py.Group):
|
else:
# recursive call method where the file is already opened to a given group
out = _load(file=filename, varlist=varlist, squeeze=squeeze, enums=enums)
return out
#%% Unit test
if __name__ == '__main__':
unittest.main(module='dstauffman.tests.test_matlab', exit=False)
doctest.testmod(verbose=False)
|
with h5py.File(filename, 'r') as file:
# normal method
out = _load(file=file, varlist=varlist, squeeze=squeeze, enums=enums)
|
conditional_block
|
shootout-chameneos-redux.rs
|
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2012-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// no-pretty-expanded
use self::Color::{Red, Yellow, Blue};
use std::sync::mpsc::{channel, Sender, Receiver};
use std::fmt;
use std::thread::Thread;
fn print_complements() {
let all = [Blue, Red, Yellow];
for aa in all.iter() {
for bb in all.iter() {
println!("{:?} + {:?} -> {:?}", *aa, *bb, transform(*aa, *bb));
}
}
}
enum Color {
Red,
Yellow,
Blue,
}
impl Copy for Color {}
impl fmt::Show for Color {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let str = match *self {
Red => "red",
Yellow => "yellow",
Blue => "blue",
};
write!(f, "{}", str)
}
}
struct CreatureInfo {
name: uint,
color: Color
}
impl Copy for CreatureInfo {}
fn show_color_list(set: Vec<Color>) -> String {
let mut out = String::new();
for col in set.iter() {
out.push(' ');
out.push_str(format!("{:?}", col).as_slice());
}
out
}
fn show_digit(nn: uint) -> &'static str {
match nn {
0 => {" zero"}
1 => {" one"}
2 => {" two"}
3 => {" three"}
4 => {" four"}
5 => {" five"}
6 => {" six"}
7 => {" seven"}
8 => {" eight"}
9 => {" nine"}
_ => {panic!("expected digits from 0 to 9...")}
}
}
struct Number(uint);
impl fmt::Show for Number {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut out = vec![];
let Number(mut num) = *self;
if num == 0 { out.push(show_digit(0)) };
while num != 0 {
let dig = num % 10;
num = num / 10;
let s = show_digit(dig);
out.push(s);
}
for s in out.iter().rev() {
try!(write!(f, "{}", s))
}
Ok(())
}
}
fn transform(aa: Color, bb: Color) -> Color {
match (aa, bb) {
(Red, Red ) => { Red }
(Red, Yellow) => { Blue }
(Red, Blue ) => { Yellow }
(Yellow, Red ) => { Blue }
(Yellow, Yellow) => { Yellow }
(Yellow, Blue ) => { Red }
(Blue, Red ) => { Yellow }
(Blue, Yellow) => { Red }
(Blue, Blue ) => { Blue }
}
}
fn
|
(
name: uint,
mut color: Color,
from_rendezvous: Receiver<CreatureInfo>,
to_rendezvous: Sender<CreatureInfo>,
to_rendezvous_log: Sender<String>
) {
let mut creatures_met = 0i32;
let mut evil_clones_met = 0;
let mut rendezvous = from_rendezvous.iter();
loop {
// ask for a pairing
to_rendezvous.send(CreatureInfo {name: name, color: color}).unwrap();
// log and change, or quit
match rendezvous.next() {
Some(other_creature) => {
color = transform(color, other_creature.color);
// track some statistics
creatures_met += 1;
if other_creature.name == name {
evil_clones_met += 1;
}
}
None => break
}
}
// log creatures met and evil clones of self
let report = format!("{}{:?}", creatures_met, Number(evil_clones_met));
to_rendezvous_log.send(report).unwrap();
}
fn rendezvous(nn: uint, set: Vec<Color>) {
// these ports will allow us to hear from the creatures
let (to_rendezvous, from_creatures) = channel::<CreatureInfo>();
// these channels will be passed to the creatures so they can talk to us
let (to_rendezvous_log, from_creatures_log) = channel::<String>();
// these channels will allow us to talk to each creature by 'name'/index
let mut to_creature: Vec<Sender<CreatureInfo>> =
set.iter().enumerate().map(|(ii, &col)| {
// create each creature as a listener with a port, and
// give us a channel to talk to each
let to_rendezvous = to_rendezvous.clone();
let to_rendezvous_log = to_rendezvous_log.clone();
let (to_creature, from_rendezvous) = channel();
Thread::spawn(move|| {
creature(ii,
col,
from_rendezvous,
to_rendezvous,
to_rendezvous_log);
});
to_creature
}).collect();
let mut creatures_met = 0;
// set up meetings...
for _ in range(0, nn) {
let fst_creature = from_creatures.recv().unwrap();
let snd_creature = from_creatures.recv().unwrap();
creatures_met += 2;
to_creature[fst_creature.name].send(snd_creature).unwrap();
to_creature[snd_creature.name].send(fst_creature).unwrap();
}
// tell each creature to stop
drop(to_creature);
// print each color in the set
println!("{}", show_color_list(set));
// print each creature's stats
drop(to_rendezvous_log);
for rep in from_creatures_log.iter() {
println!("{}", rep);
}
// print the total number of creatures met
println!("{:?}\n", Number(creatures_met));
}
fn main() {
let nn = if std::os::getenv("RUST_BENCH").is_some() {
200000
} else {
std::os::args().as_slice()
.get(1)
.and_then(|arg| arg.parse())
.unwrap_or(600u)
};
print_complements();
println!("");
rendezvous(nn, vec!(Blue, Red, Yellow));
rendezvous(nn,
vec!(Blue, Red, Yellow, Red, Yellow, Blue, Red, Yellow, Red, Blue));
}
|
creature
|
identifier_name
|
shootout-chameneos-redux.rs
|
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2012-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// no-pretty-expanded
use self::Color::{Red, Yellow, Blue};
use std::sync::mpsc::{channel, Sender, Receiver};
use std::fmt;
use std::thread::Thread;
fn print_complements() {
let all = [Blue, Red, Yellow];
for aa in all.iter() {
for bb in all.iter() {
println!("{:?} + {:?} -> {:?}", *aa, *bb, transform(*aa, *bb));
}
}
}
enum Color {
Red,
Yellow,
Blue,
}
impl Copy for Color {}
impl fmt::Show for Color {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let str = match *self {
Red => "red",
Yellow => "yellow",
Blue => "blue",
};
write!(f, "{}", str)
}
}
struct CreatureInfo {
name: uint,
color: Color
}
impl Copy for CreatureInfo {}
fn show_color_list(set: Vec<Color>) -> String {
let mut out = String::new();
for col in set.iter() {
out.push(' ');
out.push_str(format!("{:?}", col).as_slice());
}
out
}
fn show_digit(nn: uint) -> &'static str {
match nn {
0 => {" zero"}
1 => {" one"}
2 => {" two"}
3 => {" three"}
4 => {" four"}
5 => {" five"}
6 =>
|
7 => {" seven"}
8 => {" eight"}
9 => {" nine"}
_ => {panic!("expected digits from 0 to 9...")}
}
}
struct Number(uint);
impl fmt::Show for Number {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut out = vec![];
let Number(mut num) = *self;
if num == 0 { out.push(show_digit(0)) };
while num != 0 {
let dig = num % 10;
num = num / 10;
let s = show_digit(dig);
out.push(s);
}
for s in out.iter().rev() {
try!(write!(f, "{}", s))
}
Ok(())
}
}
fn transform(aa: Color, bb: Color) -> Color {
match (aa, bb) {
(Red, Red ) => { Red }
(Red, Yellow) => { Blue }
(Red, Blue ) => { Yellow }
(Yellow, Red ) => { Blue }
(Yellow, Yellow) => { Yellow }
(Yellow, Blue ) => { Red }
(Blue, Red ) => { Yellow }
(Blue, Yellow) => { Red }
(Blue, Blue ) => { Blue }
}
}
fn creature(
name: uint,
mut color: Color,
from_rendezvous: Receiver<CreatureInfo>,
to_rendezvous: Sender<CreatureInfo>,
to_rendezvous_log: Sender<String>
) {
let mut creatures_met = 0i32;
let mut evil_clones_met = 0;
let mut rendezvous = from_rendezvous.iter();
loop {
// ask for a pairing
to_rendezvous.send(CreatureInfo {name: name, color: color}).unwrap();
// log and change, or quit
match rendezvous.next() {
Some(other_creature) => {
color = transform(color, other_creature.color);
// track some statistics
creatures_met += 1;
if other_creature.name == name {
evil_clones_met += 1;
}
}
None => break
}
}
// log creatures met and evil clones of self
let report = format!("{}{:?}", creatures_met, Number(evil_clones_met));
to_rendezvous_log.send(report).unwrap();
}
fn rendezvous(nn: uint, set: Vec<Color>) {
// these ports will allow us to hear from the creatures
let (to_rendezvous, from_creatures) = channel::<CreatureInfo>();
// these channels will be passed to the creatures so they can talk to us
let (to_rendezvous_log, from_creatures_log) = channel::<String>();
// these channels will allow us to talk to each creature by 'name'/index
let mut to_creature: Vec<Sender<CreatureInfo>> =
set.iter().enumerate().map(|(ii, &col)| {
// create each creature as a listener with a port, and
// give us a channel to talk to each
let to_rendezvous = to_rendezvous.clone();
let to_rendezvous_log = to_rendezvous_log.clone();
let (to_creature, from_rendezvous) = channel();
Thread::spawn(move|| {
creature(ii,
col,
from_rendezvous,
to_rendezvous,
to_rendezvous_log);
});
to_creature
}).collect();
let mut creatures_met = 0;
// set up meetings...
for _ in range(0, nn) {
let fst_creature = from_creatures.recv().unwrap();
let snd_creature = from_creatures.recv().unwrap();
creatures_met += 2;
to_creature[fst_creature.name].send(snd_creature).unwrap();
to_creature[snd_creature.name].send(fst_creature).unwrap();
}
// tell each creature to stop
drop(to_creature);
// print each color in the set
println!("{}", show_color_list(set));
// print each creature's stats
drop(to_rendezvous_log);
for rep in from_creatures_log.iter() {
println!("{}", rep);
}
// print the total number of creatures met
println!("{:?}\n", Number(creatures_met));
}
fn main() {
let nn = if std::os::getenv("RUST_BENCH").is_some() {
200000
} else {
std::os::args().as_slice()
.get(1)
.and_then(|arg| arg.parse())
.unwrap_or(600u)
};
print_complements();
println!("");
rendezvous(nn, vec!(Blue, Red, Yellow));
rendezvous(nn,
vec!(Blue, Red, Yellow, Red, Yellow, Blue, Red, Yellow, Red, Blue));
}
|
{" six"}
|
conditional_block
|
shootout-chameneos-redux.rs
|
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2012-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// no-pretty-expanded
use self::Color::{Red, Yellow, Blue};
use std::sync::mpsc::{channel, Sender, Receiver};
use std::fmt;
use std::thread::Thread;
fn print_complements() {
let all = [Blue, Red, Yellow];
for aa in all.iter() {
for bb in all.iter() {
println!("{:?} + {:?} -> {:?}", *aa, *bb, transform(*aa, *bb));
}
}
}
enum Color {
Red,
Yellow,
Blue,
}
impl Copy for Color {}
impl fmt::Show for Color {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let str = match *self {
Red => "red",
Yellow => "yellow",
Blue => "blue",
};
write!(f, "{}", str)
}
}
struct CreatureInfo {
name: uint,
color: Color
}
impl Copy for CreatureInfo {}
fn show_color_list(set: Vec<Color>) -> String {
let mut out = String::new();
for col in set.iter() {
out.push(' ');
out.push_str(format!("{:?}", col).as_slice());
}
out
}
fn show_digit(nn: uint) -> &'static str {
match nn {
0 => {" zero"}
1 => {" one"}
2 => {" two"}
3 => {" three"}
4 => {" four"}
5 => {" five"}
6 => {" six"}
7 => {" seven"}
8 => {" eight"}
9 => {" nine"}
_ => {panic!("expected digits from 0 to 9...")}
}
}
struct Number(uint);
impl fmt::Show for Number {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut out = vec![];
let Number(mut num) = *self;
if num == 0 { out.push(show_digit(0)) };
while num != 0 {
let dig = num % 10;
num = num / 10;
let s = show_digit(dig);
out.push(s);
}
for s in out.iter().rev() {
try!(write!(f, "{}", s))
}
Ok(())
}
}
fn transform(aa: Color, bb: Color) -> Color {
match (aa, bb) {
(Red, Red ) => { Red }
(Red, Yellow) => { Blue }
(Red, Blue ) => { Yellow }
(Yellow, Red ) => { Blue }
(Yellow, Yellow) => { Yellow }
(Yellow, Blue ) => { Red }
(Blue, Red ) => { Yellow }
(Blue, Yellow) => { Red }
(Blue, Blue ) => { Blue }
}
}
fn creature(
name: uint,
mut color: Color,
from_rendezvous: Receiver<CreatureInfo>,
to_rendezvous: Sender<CreatureInfo>,
to_rendezvous_log: Sender<String>
) {
let mut creatures_met = 0i32;
let mut evil_clones_met = 0;
let mut rendezvous = from_rendezvous.iter();
loop {
// ask for a pairing
to_rendezvous.send(CreatureInfo {name: name, color: color}).unwrap();
// log and change, or quit
match rendezvous.next() {
Some(other_creature) => {
color = transform(color, other_creature.color);
// track some statistics
creatures_met += 1;
if other_creature.name == name {
evil_clones_met += 1;
}
}
None => break
}
}
// log creatures met and evil clones of self
let report = format!("{}{:?}", creatures_met, Number(evil_clones_met));
to_rendezvous_log.send(report).unwrap();
}
fn rendezvous(nn: uint, set: Vec<Color>) {
// these ports will allow us to hear from the creatures
let (to_rendezvous, from_creatures) = channel::<CreatureInfo>();
// these channels will be passed to the creatures so they can talk to us
let (to_rendezvous_log, from_creatures_log) = channel::<String>();
// these channels will allow us to talk to each creature by 'name'/index
let mut to_creature: Vec<Sender<CreatureInfo>> =
set.iter().enumerate().map(|(ii, &col)| {
// create each creature as a listener with a port, and
// give us a channel to talk to each
let to_rendezvous = to_rendezvous.clone();
let to_rendezvous_log = to_rendezvous_log.clone();
let (to_creature, from_rendezvous) = channel();
Thread::spawn(move|| {
creature(ii,
col,
from_rendezvous,
to_rendezvous,
to_rendezvous_log);
});
to_creature
}).collect();
let mut creatures_met = 0;
// set up meetings...
for _ in range(0, nn) {
let fst_creature = from_creatures.recv().unwrap();
let snd_creature = from_creatures.recv().unwrap();
creatures_met += 2;
to_creature[fst_creature.name].send(snd_creature).unwrap();
to_creature[snd_creature.name].send(fst_creature).unwrap();
}
// tell each creature to stop
drop(to_creature);
// print each color in the set
println!("{}", show_color_list(set));
// print each creature's stats
drop(to_rendezvous_log);
for rep in from_creatures_log.iter() {
println!("{}", rep);
}
// print the total number of creatures met
println!("{:?}\n", Number(creatures_met));
}
fn main() {
let nn = if std::os::getenv("RUST_BENCH").is_some() {
200000
} else {
std::os::args().as_slice()
.get(1)
.and_then(|arg| arg.parse())
.unwrap_or(600u)
};
print_complements();
println!("");
rendezvous(nn, vec!(Blue, Red, Yellow));
rendezvous(nn,
vec!(Blue, Red, Yellow, Red, Yellow, Blue, Red, Yellow, Red, Blue));
}
|
random_line_split
|
|
shootout-chameneos-redux.rs
|
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2012-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// no-pretty-expanded
use self::Color::{Red, Yellow, Blue};
use std::sync::mpsc::{channel, Sender, Receiver};
use std::fmt;
use std::thread::Thread;
fn print_complements() {
let all = [Blue, Red, Yellow];
for aa in all.iter() {
for bb in all.iter() {
println!("{:?} + {:?} -> {:?}", *aa, *bb, transform(*aa, *bb));
}
}
}
enum Color {
Red,
Yellow,
Blue,
}
impl Copy for Color {}
impl fmt::Show for Color {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let str = match *self {
Red => "red",
Yellow => "yellow",
Blue => "blue",
};
write!(f, "{}", str)
}
}
struct CreatureInfo {
name: uint,
color: Color
}
impl Copy for CreatureInfo {}
fn show_color_list(set: Vec<Color>) -> String {
let mut out = String::new();
for col in set.iter() {
out.push(' ');
out.push_str(format!("{:?}", col).as_slice());
}
out
}
fn show_digit(nn: uint) -> &'static str {
match nn {
0 => {" zero"}
1 => {" one"}
2 => {" two"}
3 => {" three"}
4 => {" four"}
5 => {" five"}
6 => {" six"}
7 => {" seven"}
8 => {" eight"}
9 => {" nine"}
_ => {panic!("expected digits from 0 to 9...")}
}
}
struct Number(uint);
impl fmt::Show for Number {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut out = vec![];
let Number(mut num) = *self;
if num == 0 { out.push(show_digit(0)) };
while num != 0 {
let dig = num % 10;
num = num / 10;
let s = show_digit(dig);
out.push(s);
}
for s in out.iter().rev() {
try!(write!(f, "{}", s))
}
Ok(())
}
}
fn transform(aa: Color, bb: Color) -> Color {
match (aa, bb) {
(Red, Red ) => { Red }
(Red, Yellow) => { Blue }
(Red, Blue ) => { Yellow }
(Yellow, Red ) => { Blue }
(Yellow, Yellow) => { Yellow }
(Yellow, Blue ) => { Red }
(Blue, Red ) => { Yellow }
(Blue, Yellow) => { Red }
(Blue, Blue ) => { Blue }
}
}
fn creature(
name: uint,
mut color: Color,
from_rendezvous: Receiver<CreatureInfo>,
to_rendezvous: Sender<CreatureInfo>,
to_rendezvous_log: Sender<String>
) {
let mut creatures_met = 0i32;
let mut evil_clones_met = 0;
let mut rendezvous = from_rendezvous.iter();
loop {
// ask for a pairing
to_rendezvous.send(CreatureInfo {name: name, color: color}).unwrap();
// log and change, or quit
match rendezvous.next() {
Some(other_creature) => {
color = transform(color, other_creature.color);
// track some statistics
creatures_met += 1;
if other_creature.name == name {
evil_clones_met += 1;
}
}
None => break
}
}
// log creatures met and evil clones of self
let report = format!("{}{:?}", creatures_met, Number(evil_clones_met));
to_rendezvous_log.send(report).unwrap();
}
fn rendezvous(nn: uint, set: Vec<Color>) {
// these ports will allow us to hear from the creatures
let (to_rendezvous, from_creatures) = channel::<CreatureInfo>();
// these channels will be passed to the creatures so they can talk to us
let (to_rendezvous_log, from_creatures_log) = channel::<String>();
// these channels will allow us to talk to each creature by 'name'/index
let mut to_creature: Vec<Sender<CreatureInfo>> =
set.iter().enumerate().map(|(ii, &col)| {
// create each creature as a listener with a port, and
// give us a channel to talk to each
let to_rendezvous = to_rendezvous.clone();
let to_rendezvous_log = to_rendezvous_log.clone();
let (to_creature, from_rendezvous) = channel();
Thread::spawn(move|| {
creature(ii,
col,
from_rendezvous,
to_rendezvous,
to_rendezvous_log);
});
to_creature
}).collect();
let mut creatures_met = 0;
// set up meetings...
for _ in range(0, nn) {
let fst_creature = from_creatures.recv().unwrap();
let snd_creature = from_creatures.recv().unwrap();
creatures_met += 2;
to_creature[fst_creature.name].send(snd_creature).unwrap();
to_creature[snd_creature.name].send(fst_creature).unwrap();
}
// tell each creature to stop
drop(to_creature);
// print each color in the set
println!("{}", show_color_list(set));
// print each creature's stats
drop(to_rendezvous_log);
for rep in from_creatures_log.iter() {
println!("{}", rep);
}
// print the total number of creatures met
println!("{:?}\n", Number(creatures_met));
}
fn main()
|
{
let nn = if std::os::getenv("RUST_BENCH").is_some() {
200000
} else {
std::os::args().as_slice()
.get(1)
.and_then(|arg| arg.parse())
.unwrap_or(600u)
};
print_complements();
println!("");
rendezvous(nn, vec!(Blue, Red, Yellow));
rendezvous(nn,
vec!(Blue, Red, Yellow, Red, Yellow, Blue, Red, Yellow, Red, Blue));
}
|
identifier_body
|
|
ws_impl.rs
|
use flate2::read::ZlibDecoder;
use serde_json;
use websocket::message::OwnedMessage;
use websocket::sync::stream::{TcpStream, TlsStream};
use websocket::sync::Client as WsClient;
use gateway::GatewayError;
use internal::prelude::*;
pub trait ReceiverExt {
fn recv_json<F, T>(&mut self, decode: F) -> Result<T> where F: Fn(Value) -> Result<T>;
}
pub trait SenderExt {
fn send_json(&mut self, value: &Value) -> Result<()>;
}
impl ReceiverExt for WsClient<TlsStream<TcpStream>> {
fn recv_json<F, T>(&mut self, decode: F) -> Result<T>
where F: Fn(Value) -> Result<T> {
let message = self.recv_message()?;
let res = match message {
OwnedMessage::Binary(bytes) => {
let value = serde_json::from_reader(ZlibDecoder::new(&bytes[..]))?;
Some(decode(value).map_err(|why| {
let s = String::from_utf8_lossy(&bytes);
warn!("(╯°□°)╯︵ ┻━┻ Error decoding: {}", s);
|
OwnedMessage::Text(payload) => {
let value = serde_json::from_str(&payload)?;
Some(decode(value).map_err(|why| {
warn!("(╯°□°)╯︵ ┻━┻ Error decoding: {}", payload);
why
}))
},
OwnedMessage::Ping(x) => {
self.send_message(&OwnedMessage::Pong(x)).map_err(
Error::from,
)?;
None
},
OwnedMessage::Pong(_) => None,
};
// As to ignore the `None`s returned from `Ping` and `Pong`.
// Since they're essentially useless to us anyway.
match res {
Some(data) => data,
None => self.recv_json(decode),
}
}
}
impl SenderExt for WsClient<TlsStream<TcpStream>> {
fn send_json(&mut self, value: &Value) -> Result<()> {
serde_json::to_string(value)
.map(OwnedMessage::Text)
.map_err(Error::from)
.and_then(|m| self.send_message(&m).map_err(Error::from))
}
}
|
why
}))
},
OwnedMessage::Close(data) => Some(Err(Error::Gateway(GatewayError::Closed(data)))),
|
random_line_split
|
ws_impl.rs
|
use flate2::read::ZlibDecoder;
use serde_json;
use websocket::message::OwnedMessage;
use websocket::sync::stream::{TcpStream, TlsStream};
use websocket::sync::Client as WsClient;
use gateway::GatewayError;
use internal::prelude::*;
pub trait ReceiverExt {
fn recv_json<F, T>(&mut self, decode: F) -> Result<T> where F: Fn(Value) -> Result<T>;
}
pub trait SenderExt {
fn send_json(&mut self, value: &Value) -> Result<()>;
}
impl ReceiverExt for WsClient<TlsStream<TcpStream>> {
fn recv_json<F, T>(&mut self, decode: F) -> Result<T>
where F: Fn(Value) -> Result<T>
|
tream<TcpStream>> {
fn send_json(&mut self, value: &Value) -> Result<()> {
serde_json::to_string(value)
.map(OwnedMessage::Text)
.map_err(Error::from)
.and_then(|m| self.send_message(&m).map_err(Error::from))
}
}
|
{
let message = self.recv_message()?;
let res = match message {
OwnedMessage::Binary(bytes) => {
let value = serde_json::from_reader(ZlibDecoder::new(&bytes[..]))?;
Some(decode(value).map_err(|why| {
let s = String::from_utf8_lossy(&bytes);
warn!("(╯°□°)╯︵ ┻━┻ Error decoding: {}", s);
why
}))
},
OwnedMessage::Close(data) => Some(Err(Error::Gateway(GatewayError::Closed(data)))),
OwnedMessage::Text(payload) => {
let value = serde_json::from_str(&payload)?;
Some(decode(value).map_err(|why| {
warn!("(╯°□°)╯︵ ┻━┻ Error decoding: {}", payload);
why
}))
},
OwnedMessage::Ping(x) => {
self.send_message(&OwnedMessage::Pong(x)).map_err(
Error::from,
)?;
None
},
OwnedMessage::Pong(_) => None,
};
// As to ignore the `None`s returned from `Ping` and `Pong`.
// Since they're essentially useless to us anyway.
match res {
Some(data) => data,
None => self.recv_json(decode),
}
}
}
impl SenderExt for WsClient<TlsS
|
identifier_body
|
ws_impl.rs
|
use flate2::read::ZlibDecoder;
use serde_json;
use websocket::message::OwnedMessage;
use websocket::sync::stream::{TcpStream, TlsStream};
use websocket::sync::Client as WsClient;
use gateway::GatewayError;
use internal::prelude::*;
pub trait ReceiverExt {
fn recv_json<F, T>(&mut self, decode: F) -> Result<T> where F: Fn(Value) -> Result<T>;
}
pub trait SenderExt {
fn send_json(&mut self, value: &Value) -> Result<()>;
}
impl ReceiverExt for WsClient<TlsStream<TcpStream>> {
fn recv_json<F, T>(&mut self, decode: F) -> Result<T>
where F: Fn(Value) -> Result<T> {
let message = self.recv_message()?;
let res = match message {
OwnedMessage::Binary(bytes) => {
let value = serde_json::from_reader(ZlibDecoder::new(&bytes[..]))?;
Some(decode(value).map_err(|why| {
let s = String::from_utf8_lossy(&bytes);
warn!("(╯°□°)╯︵ ┻━┻ Error decoding: {}", s);
why
}))
},
OwnedMessage::Close(data) => Some(Err(Error::Gateway(GatewayError::Closed(data)))),
OwnedMessage::Text(payload) => {
let value = serde_json::from_str(&payload)?;
Some(decode(value).map_err(|why| {
warn!("(╯°□°)╯︵ ┻━┻ Error decoding: {}", payload);
why
}))
},
OwnedMessage::Ping(x) => {
self.send_message(&OwnedMessage::Pong(x)).map_err(
Error::from,
)?;
None
},
OwnedMessage::Pong(_) => None,
};
// As to ignore the `None`s returned from `Ping` and `Pong`.
// Since they're essentially useless to us anyway.
match res {
Some(data) => data,
None => self.recv_json(decode),
}
}
}
impl SenderExt for WsClient<TlsStream<TcpStream>> {
fn send_json(&mut self, value: &Value)
|
<()> {
serde_json::to_string(value)
.map(OwnedMessage::Text)
.map_err(Error::from)
.and_then(|m| self.send_message(&m).map_err(Error::from))
}
}
|
-> Result
|
identifier_name
|
tomcat.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin
from datetime import datetime
class Tomcat(Plugin, RedHatPlugin):
"""Apache Tomcat server
"""
plugin_name = 'tomcat'
profiles = ('webserver', 'java', 'services', 'sysmgmt')
packages = ('tomcat', 'tomcat6', 'tomcat7', 'tomcat8')
def setup(self):
self.add_copy_spec([
"/etc/tomcat",
"/etc/tomcat6",
"/etc/tomcat7",
"/etc/tomcat8"
])
limit = self.get_option("log_size")
if not self.get_option("all_logs"):
|
else:
self.add_copy_spec("/var/log/tomcat*/*")
def postproc(self):
serverXmlPasswordAttributes = ['keyPass', 'keystorePass',
'truststorePass', 'SSLPassword']
for attr in serverXmlPasswordAttributes:
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/server.xml",
r"%s=(\S*)" % attr,
r'%s="********"' % attr
)
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/tomcat-users.xml",
r"password=(\S*)",
r'password="********"'
)
# vim: set et ts=4 sw=4 :
|
log_glob = "/var/log/tomcat*/catalina.out"
self.add_copy_spec(log_glob, sizelimit=limit)
# get today's date in iso format so that days/months below 10
# prepend 0
today = datetime.date(datetime.now()).isoformat()
log_glob = "/var/log/tomcat*/catalina.%s.log" % today
self.add_copy_spec(log_glob, sizelimit=limit)
|
conditional_block
|
tomcat.py
|
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin
from datetime import datetime
class Tomcat(Plugin, RedHatPlugin):
"""Apache Tomcat server
"""
plugin_name = 'tomcat'
profiles = ('webserver', 'java', 'services', 'sysmgmt')
packages = ('tomcat', 'tomcat6', 'tomcat7', 'tomcat8')
def setup(self):
self.add_copy_spec([
"/etc/tomcat",
"/etc/tomcat6",
"/etc/tomcat7",
"/etc/tomcat8"
])
limit = self.get_option("log_size")
if not self.get_option("all_logs"):
log_glob = "/var/log/tomcat*/catalina.out"
self.add_copy_spec(log_glob, sizelimit=limit)
# get today's date in iso format so that days/months below 10
# prepend 0
today = datetime.date(datetime.now()).isoformat()
log_glob = "/var/log/tomcat*/catalina.%s.log" % today
self.add_copy_spec(log_glob, sizelimit=limit)
else:
self.add_copy_spec("/var/log/tomcat*/*")
def postproc(self):
serverXmlPasswordAttributes = ['keyPass', 'keystorePass',
'truststorePass', 'SSLPassword']
for attr in serverXmlPasswordAttributes:
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/server.xml",
r"%s=(\S*)" % attr,
r'%s="********"' % attr
)
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/tomcat-users.xml",
r"password=(\S*)",
r'password="********"'
)
# vim: set et ts=4 sw=4 :
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
|
random_line_split
|
|
tomcat.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin
from datetime import datetime
class Tomcat(Plugin, RedHatPlugin):
|
# vim: set et ts=4 sw=4 :
|
"""Apache Tomcat server
"""
plugin_name = 'tomcat'
profiles = ('webserver', 'java', 'services', 'sysmgmt')
packages = ('tomcat', 'tomcat6', 'tomcat7', 'tomcat8')
def setup(self):
self.add_copy_spec([
"/etc/tomcat",
"/etc/tomcat6",
"/etc/tomcat7",
"/etc/tomcat8"
])
limit = self.get_option("log_size")
if not self.get_option("all_logs"):
log_glob = "/var/log/tomcat*/catalina.out"
self.add_copy_spec(log_glob, sizelimit=limit)
# get today's date in iso format so that days/months below 10
# prepend 0
today = datetime.date(datetime.now()).isoformat()
log_glob = "/var/log/tomcat*/catalina.%s.log" % today
self.add_copy_spec(log_glob, sizelimit=limit)
else:
self.add_copy_spec("/var/log/tomcat*/*")
def postproc(self):
serverXmlPasswordAttributes = ['keyPass', 'keystorePass',
'truststorePass', 'SSLPassword']
for attr in serverXmlPasswordAttributes:
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/server.xml",
r"%s=(\S*)" % attr,
r'%s="********"' % attr
)
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/tomcat-users.xml",
r"password=(\S*)",
r'password="********"'
)
|
identifier_body
|
tomcat.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin
from datetime import datetime
class
|
(Plugin, RedHatPlugin):
"""Apache Tomcat server
"""
plugin_name = 'tomcat'
profiles = ('webserver', 'java', 'services', 'sysmgmt')
packages = ('tomcat', 'tomcat6', 'tomcat7', 'tomcat8')
def setup(self):
self.add_copy_spec([
"/etc/tomcat",
"/etc/tomcat6",
"/etc/tomcat7",
"/etc/tomcat8"
])
limit = self.get_option("log_size")
if not self.get_option("all_logs"):
log_glob = "/var/log/tomcat*/catalina.out"
self.add_copy_spec(log_glob, sizelimit=limit)
# get today's date in iso format so that days/months below 10
# prepend 0
today = datetime.date(datetime.now()).isoformat()
log_glob = "/var/log/tomcat*/catalina.%s.log" % today
self.add_copy_spec(log_glob, sizelimit=limit)
else:
self.add_copy_spec("/var/log/tomcat*/*")
def postproc(self):
serverXmlPasswordAttributes = ['keyPass', 'keystorePass',
'truststorePass', 'SSLPassword']
for attr in serverXmlPasswordAttributes:
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/server.xml",
r"%s=(\S*)" % attr,
r'%s="********"' % attr
)
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/tomcat-users.xml",
r"password=(\S*)",
r'password="********"'
)
# vim: set et ts=4 sw=4 :
|
Tomcat
|
identifier_name
|
diagrammeJoseph.js
|
"use strict";
// utilisation de : http://bl.ocks.org/brattonc/5e5ce9beee483220e2f6#index.html
// les données
var donneesJardinJoseph = {
"printemps": [
{nomProduit: "Racines", poids: 12850},
{nomProduit: "Poids et Haricots", poids: 10934},
{nomProduit: "Choux", poids: 8760},
{nomProduit: "Epinards et Salades", poids: 19851},
{nomProduit: "Courges", poids: 77400},
{nomProduit: "Tomates", poids: 35314},
{nomProduit: "Aromatiques", poids: 1260},
{nomProduit: "Fruits", poids: 11600},
{nomProduit: "Autres", poids: 2400}
],
"ete": [
{nomProduit: "Racines", poids: 7200},
{nomProduit: "Poids et Haricots", poids: 5466},
{nomProduit: "Choux", poids: 0},
{nomProduit: "Epinards et Salades", poids: 6671},
{nomProduit: "Courges", poids: 0},
{nomProduit: "Tomates", poids: 88286},
{nomProduit: "Aromatiques", poids: 1267},
{nomProduit: "Fruits", poids: 2695},
{nomProduit: "Autres", poids: 7265}
],
"automne": [
{nomProduit: "Racines", poids: 0},
{nomProduit: "Poids et Haricots", poids: 0},
{nomProduit: "Choux", poids: 2920},
{nomProduit: "Epinards et Salades", poids: 0},
{nomProduit: "Courges", poids: 0},
{nomProduit: "Tomates", poids: 0},
{nomProduit: "Aromatiques", poids: 1273},
{nomProduit: "Fruits", poids: 15715},
{nomProduit: "Autres", poids: 2300}
],
"hiver": [
{nomProduit: "Racines", poids: 0},
{nomProduit: "Poids et Haricots", poids: 0},
{nomProduit: "Choux", poids: 2920},
{nomProduit: "Epinards et Salades", poids: 13233},
{nomProduit: "Courges", poids: 0},
{nomProduit: "Tomates", poids: 0},
{nomProduit: "Aromatiques", poids: 0},
{nomProduit: "Fruits", poids: 0},
{nomProduit: "Autres", poids: 0}
],
"total": [
{nomProduit: "Racines", poids: 20050},
{nomProduit: "Poids et Haricots", poids: 16400},
{nomProduit: "Choux", poids: 14600},
{nomProduit: "Epinards et Salades", poids: 39755},
{nomProduit: "Courges", poids: 77400},
{nomProduit: "Tomates", poids: 123600},
{nomProduit: "Aromatiques", poids: 3800},
{nomProduit: "Fruits", poids: 30010},
{nomProduit: "Autres", poids: 11965}
]
};
var infosJardinJoseph = {
"Racines": {
idJauge: "jauge_racine",
couleur: "#f7bd48",
maximum: 20050,
cheminImage: "./img/joseph/racines.svg"
},
"Poids et Haricots": {
idJauge: "jauge_haricots",
couleur: "#c96d63",
maximum: 16400,
cheminImage: "./img/joseph/haricots.svg"
},
"Choux": {
idJauge: "jauge_choux",
couleur: "#b5ff9c",
maximum: 14600,
cheminImage: "./img/joseph/choux.svg"
},
"Epinards et Salades": {
idJauge: "jauge_salades",
couleur: "#c9ff73",
maximum: 39755,
cheminImage: "./img/joseph/salades.svg"
},
"Courges": {
idJauge: "jauge_courges",
couleur: "#d6ff38",
maximum: 77400,
cheminImage: "./img/joseph/courges.svg"
},
"Tomates": {
idJauge: "jauge_tomates",
couleur: "#ee7268",
maximum: 123600,
cheminImage: "./img/joseph/tomates.svg"
},
"Aromatiques": {
idJauge: "jauge_aromatiques",
couleur: "#aef86e",
maximum: 3800,
cheminImage: "./img/joseph/aromatiques.svg"
},
"Fruits": {
idJauge: "jauge_fruits",
couleur: "#ffa87b",
maximum: 30010,
cheminImage: "./img/joseph/fruits.svg"
},
"Autres": {
idJauge: "jauge_autres",
couleur: "#f7ff64",
maximum: 11965,
cheminImage: "./img/joseph/autres.svg"
}
};
// utilisé pour sauvegarder les Jauge Updater (GaugeUpdater)
var jauges = {};
// on récupère les données pour la saison actuelle
var saisonActuelle = getSaisonActuelle();
var donneesActuelles = donneesJardinJoseph[saisonActuelle];
// création de la balise contenant le total
var total = [ calculerPoidsTotal(donneesActuelles) ];
d3.select("#totalJoseph").selectAll("p").data(total).enter().append("p").text(function(d) {
return d;
});
// pour chaque produit, on crée une balise
donneesActuelles.forEach(function(element, index, tableau) {
var infoElement = infosJardinJoseph[element.nomProduit];
var idJauge = infoElement.idJauge;
// création de la balise contenant la jauge
var baliseJauge = d3.select("#diagrammeJoseph").append("div").attr("class", "baliseJauge");
var svgJauge = baliseJauge.append("svg").attr({
"id": idJauge,
"viewBox": "0 0 100 150",
//"height": 150,
//"width": 100
});
// configuration de la jauge
var config = liquidFillGaugeDefaultSettings();
config.minValue = 0;
config.maxValue = (infoElement.maximum * 0.001).toFixed(1);
config.circleThickness = 0.02; // taille cercle extérieur
config.circleFillGap = 0; // espacement entre cercle extérieur et intérieur
config.textVertPosition = 1.6; // positionner le texte au dessus de la gauge
config.circleColor = "#706f6f";
config.textColor = "#999999";
config.waveColor = infoElement.couleur;
//config.waveTextColor = "#6DA398";
config.waveAnimateTime = 5000;
config.waveHeight = 0;
config.waveAnimate = false;
config.waveCount = 2;
config.waveOffset = 0.25;
config.textSize = 1.2;
config.displayPercent = false;
var poids = (element.poids * 0.001).toFixed(1);
var jauge = loadLiquidFillGauge(idJauge, poids, config);
jauges[idJauge] = jauge;
// ajout du "kg" au dessus de la jauge
svgJauge.select("g").append("text").attr("x", 50).attr("y", -6).attr("class", "liquidFillGaugeText").attr("text-anchor", "middle").style("fill", "#fdbe63").text("kg");
// remettre le groupe à la position (0, 0) (nécessaire car le texte de la jauge sortait de la zone)
svgJauge.select("g").attr("transform", "translate(0, 50)");
// ajout de l'image dans la jauge
svgJauge.select("g").append("image").attr({
"xlink:href": infoElement.cheminImage,
"x": 0,
"y": 0,
"height": 50,
"width": 50,
"transform": "translate(25, 25)",
});
});
// on pet à jour les données du diagramme avec les données de la saison actuelle
updateDiagrammeJoseph(saisonActuelle);
// mise à jour du radio sélectionné
$("#radioJoseph_" + saisonActuelle).attr("checked", "");
/**
* met à jour les données du diagramme de joseph (l'histogramme) en fonction de la saison
* @param {string} saison peut prendre 5 valeurs: "printemps", "ete", "automne", "hiver", "total"
*/
function updateDiagrammeJoseph(saison) {
// récupération des données selon la saison
var donneesActuelles = donneesJardinJoseph[saison] || [];
// mise à jour des colonnes de l'histogramme
donneesActuelles.forEach(function(element, index, tableau) {
var infosElement = infosJardinJoseph[element.nomProduit];
var jauge = jauges[infosElement.idJauge];
var poids = (element.poids * 0.001).toFixed(1);
jauge.update(poids);
});
// mise à jour du total
var total = [ calculerPoidsTotal(donneesActuelles) ];
var balisesTotal = d3.select("#totalJoseph").selectAll("p").data(total).transition().duration(1000).tween("text", function() {
var i = d3.interpolateRound(Number(this.textContent), total[0]);
return function(t) {
this.textContent = i(t);
};
});
// tween permet d'animer la mise à jour d'un nombre
// mise à jour de l'image de saison
$("#imageSaison").attr("src", "./img/joseph/background_" + saison + ".png");
}
/**
* retourne le poids total de légumes récoltés pour les données passées en entrées
* @param {Array} donnees la liste des produits, chaque produit étant enregistrer dans un objet contenant les attrbuts "nomProduit" et "poids"
* @returns {number} le poids total
*/
function calculerPoidsTotal(donnees) {
var total = donnees.reduce(function(prec, elem, indice, tab) {
return prec + elem.poids;
}, 0);
return (total * 0.001).toFixed(1);
}
/**
* retourne la saison actuelle (la saison en fonction de la date actuelle)
* @returns {string} la saison actuelle, peut être "printemps", "ete", "automne", "hiver"
*/
function getSaisonActuelle() {
var today = new Date();
va
|
r mois = today.getMonth();
var jour = today.getDay();
// janvier: 0, fevrier: 1, ...
if(mois >=2 && mois <= 5) { // entre mars et juin
if(mois == 2 && jour < 20) return "hiver";
else if(mois == 5 && jour > 20) return "ete";
else return "printemps";
}
else if(mois >= 5 && mois <= 8) { // entre juin et septembre
if(mois == 5 && jour < 21) return "printemps";
else if(mois == 8 && jour > 22) return "automne";
else return "ete";
}
else if(mois >= 8 && mois <= 11) { // entre septemps et decembre
if(mois == 8 && jour < 23) return "ete";
else if(mois == 11 && jour > 20) return "hiver";
else return "automne";
}
else return "hiver"
}
|
identifier_body
|
|
diagrammeJoseph.js
|
"use strict";
// utilisation de : http://bl.ocks.org/brattonc/5e5ce9beee483220e2f6#index.html
// les données
var donneesJardinJoseph = {
"printemps": [
{nomProduit: "Racines", poids: 12850},
{nomProduit: "Poids et Haricots", poids: 10934},
{nomProduit: "Choux", poids: 8760},
{nomProduit: "Epinards et Salades", poids: 19851},
{nomProduit: "Courges", poids: 77400},
{nomProduit: "Tomates", poids: 35314},
{nomProduit: "Aromatiques", poids: 1260},
{nomProduit: "Fruits", poids: 11600},
{nomProduit: "Autres", poids: 2400}
],
"ete": [
{nomProduit: "Racines", poids: 7200},
{nomProduit: "Poids et Haricots", poids: 5466},
{nomProduit: "Choux", poids: 0},
{nomProduit: "Epinards et Salades", poids: 6671},
{nomProduit: "Courges", poids: 0},
{nomProduit: "Tomates", poids: 88286},
{nomProduit: "Aromatiques", poids: 1267},
{nomProduit: "Fruits", poids: 2695},
{nomProduit: "Autres", poids: 7265}
],
"automne": [
{nomProduit: "Racines", poids: 0},
{nomProduit: "Poids et Haricots", poids: 0},
{nomProduit: "Choux", poids: 2920},
{nomProduit: "Epinards et Salades", poids: 0},
{nomProduit: "Courges", poids: 0},
{nomProduit: "Tomates", poids: 0},
{nomProduit: "Aromatiques", poids: 1273},
{nomProduit: "Fruits", poids: 15715},
{nomProduit: "Autres", poids: 2300}
],
"hiver": [
{nomProduit: "Racines", poids: 0},
{nomProduit: "Poids et Haricots", poids: 0},
{nomProduit: "Choux", poids: 2920},
{nomProduit: "Epinards et Salades", poids: 13233},
{nomProduit: "Courges", poids: 0},
{nomProduit: "Tomates", poids: 0},
{nomProduit: "Aromatiques", poids: 0},
{nomProduit: "Fruits", poids: 0},
{nomProduit: "Autres", poids: 0}
],
"total": [
{nomProduit: "Racines", poids: 20050},
{nomProduit: "Poids et Haricots", poids: 16400},
{nomProduit: "Choux", poids: 14600},
{nomProduit: "Epinards et Salades", poids: 39755},
{nomProduit: "Courges", poids: 77400},
{nomProduit: "Tomates", poids: 123600},
|
]
};
var infosJardinJoseph = {
"Racines": {
idJauge: "jauge_racine",
couleur: "#f7bd48",
maximum: 20050,
cheminImage: "./img/joseph/racines.svg"
},
"Poids et Haricots": {
idJauge: "jauge_haricots",
couleur: "#c96d63",
maximum: 16400,
cheminImage: "./img/joseph/haricots.svg"
},
"Choux": {
idJauge: "jauge_choux",
couleur: "#b5ff9c",
maximum: 14600,
cheminImage: "./img/joseph/choux.svg"
},
"Epinards et Salades": {
idJauge: "jauge_salades",
couleur: "#c9ff73",
maximum: 39755,
cheminImage: "./img/joseph/salades.svg"
},
"Courges": {
idJauge: "jauge_courges",
couleur: "#d6ff38",
maximum: 77400,
cheminImage: "./img/joseph/courges.svg"
},
"Tomates": {
idJauge: "jauge_tomates",
couleur: "#ee7268",
maximum: 123600,
cheminImage: "./img/joseph/tomates.svg"
},
"Aromatiques": {
idJauge: "jauge_aromatiques",
couleur: "#aef86e",
maximum: 3800,
cheminImage: "./img/joseph/aromatiques.svg"
},
"Fruits": {
idJauge: "jauge_fruits",
couleur: "#ffa87b",
maximum: 30010,
cheminImage: "./img/joseph/fruits.svg"
},
"Autres": {
idJauge: "jauge_autres",
couleur: "#f7ff64",
maximum: 11965,
cheminImage: "./img/joseph/autres.svg"
}
};
// utilisé pour sauvegarder les Jauge Updater (GaugeUpdater)
var jauges = {};
// on récupère les données pour la saison actuelle
var saisonActuelle = getSaisonActuelle();
var donneesActuelles = donneesJardinJoseph[saisonActuelle];
// création de la balise contenant le total
var total = [ calculerPoidsTotal(donneesActuelles) ];
d3.select("#totalJoseph").selectAll("p").data(total).enter().append("p").text(function(d) {
return d;
});
// pour chaque produit, on crée une balise
donneesActuelles.forEach(function(element, index, tableau) {
var infoElement = infosJardinJoseph[element.nomProduit];
var idJauge = infoElement.idJauge;
// création de la balise contenant la jauge
var baliseJauge = d3.select("#diagrammeJoseph").append("div").attr("class", "baliseJauge");
var svgJauge = baliseJauge.append("svg").attr({
"id": idJauge,
"viewBox": "0 0 100 150",
//"height": 150,
//"width": 100
});
// configuration de la jauge
var config = liquidFillGaugeDefaultSettings();
config.minValue = 0;
config.maxValue = (infoElement.maximum * 0.001).toFixed(1);
config.circleThickness = 0.02; // taille cercle extérieur
config.circleFillGap = 0; // espacement entre cercle extérieur et intérieur
config.textVertPosition = 1.6; // positionner le texte au dessus de la gauge
config.circleColor = "#706f6f";
config.textColor = "#999999";
config.waveColor = infoElement.couleur;
//config.waveTextColor = "#6DA398";
config.waveAnimateTime = 5000;
config.waveHeight = 0;
config.waveAnimate = false;
config.waveCount = 2;
config.waveOffset = 0.25;
config.textSize = 1.2;
config.displayPercent = false;
var poids = (element.poids * 0.001).toFixed(1);
var jauge = loadLiquidFillGauge(idJauge, poids, config);
jauges[idJauge] = jauge;
// ajout du "kg" au dessus de la jauge
svgJauge.select("g").append("text").attr("x", 50).attr("y", -6).attr("class", "liquidFillGaugeText").attr("text-anchor", "middle").style("fill", "#fdbe63").text("kg");
// remettre le groupe à la position (0, 0) (nécessaire car le texte de la jauge sortait de la zone)
svgJauge.select("g").attr("transform", "translate(0, 50)");
// ajout de l'image dans la jauge
svgJauge.select("g").append("image").attr({
"xlink:href": infoElement.cheminImage,
"x": 0,
"y": 0,
"height": 50,
"width": 50,
"transform": "translate(25, 25)",
});
});
// on pet à jour les données du diagramme avec les données de la saison actuelle
updateDiagrammeJoseph(saisonActuelle);
// mise à jour du radio sélectionné
$("#radioJoseph_" + saisonActuelle).attr("checked", "");
/**
* met à jour les données du diagramme de joseph (l'histogramme) en fonction de la saison
* @param {string} saison peut prendre 5 valeurs: "printemps", "ete", "automne", "hiver", "total"
*/
function updateDiagrammeJoseph(saison) {
// récupération des données selon la saison
var donneesActuelles = donneesJardinJoseph[saison] || [];
// mise à jour des colonnes de l'histogramme
donneesActuelles.forEach(function(element, index, tableau) {
var infosElement = infosJardinJoseph[element.nomProduit];
var jauge = jauges[infosElement.idJauge];
var poids = (element.poids * 0.001).toFixed(1);
jauge.update(poids);
});
// mise à jour du total
var total = [ calculerPoidsTotal(donneesActuelles) ];
var balisesTotal = d3.select("#totalJoseph").selectAll("p").data(total).transition().duration(1000).tween("text", function() {
var i = d3.interpolateRound(Number(this.textContent), total[0]);
return function(t) {
this.textContent = i(t);
};
});
// tween permet d'animer la mise à jour d'un nombre
// mise à jour de l'image de saison
$("#imageSaison").attr("src", "./img/joseph/background_" + saison + ".png");
}
/**
* retourne le poids total de légumes récoltés pour les données passées en entrées
* @param {Array} donnees la liste des produits, chaque produit étant enregistrer dans un objet contenant les attrbuts "nomProduit" et "poids"
* @returns {number} le poids total
*/
function calculerPoidsTotal(donnees) {
var total = donnees.reduce(function(prec, elem, indice, tab) {
return prec + elem.poids;
}, 0);
return (total * 0.001).toFixed(1);
}
/**
* retourne la saison actuelle (la saison en fonction de la date actuelle)
* @returns {string} la saison actuelle, peut être "printemps", "ete", "automne", "hiver"
*/
function getSaisonActuelle() {
var today = new Date();
var mois = today.getMonth();
var jour = today.getDay();
// janvier: 0, fevrier: 1, ...
if(mois >=2 && mois <= 5) { // entre mars et juin
if(mois == 2 && jour < 20) return "hiver";
else if(mois == 5 && jour > 20) return "ete";
else return "printemps";
}
else if(mois >= 5 && mois <= 8) { // entre juin et septembre
if(mois == 5 && jour < 21) return "printemps";
else if(mois == 8 && jour > 22) return "automne";
else return "ete";
}
else if(mois >= 8 && mois <= 11) { // entre septemps et decembre
if(mois == 8 && jour < 23) return "ete";
else if(mois == 11 && jour > 20) return "hiver";
else return "automne";
}
else return "hiver"
}
|
{nomProduit: "Aromatiques", poids: 3800},
{nomProduit: "Fruits", poids: 30010},
{nomProduit: "Autres", poids: 11965}
|
random_line_split
|
diagrammeJoseph.js
|
"use strict";
// utilisation de : http://bl.ocks.org/brattonc/5e5ce9beee483220e2f6#index.html
// les données
var donneesJardinJoseph = {
"printemps": [
{nomProduit: "Racines", poids: 12850},
{nomProduit: "Poids et Haricots", poids: 10934},
{nomProduit: "Choux", poids: 8760},
{nomProduit: "Epinards et Salades", poids: 19851},
{nomProduit: "Courges", poids: 77400},
{nomProduit: "Tomates", poids: 35314},
{nomProduit: "Aromatiques", poids: 1260},
{nomProduit: "Fruits", poids: 11600},
{nomProduit: "Autres", poids: 2400}
],
"ete": [
{nomProduit: "Racines", poids: 7200},
{nomProduit: "Poids et Haricots", poids: 5466},
{nomProduit: "Choux", poids: 0},
{nomProduit: "Epinards et Salades", poids: 6671},
{nomProduit: "Courges", poids: 0},
{nomProduit: "Tomates", poids: 88286},
{nomProduit: "Aromatiques", poids: 1267},
{nomProduit: "Fruits", poids: 2695},
{nomProduit: "Autres", poids: 7265}
],
"automne": [
{nomProduit: "Racines", poids: 0},
{nomProduit: "Poids et Haricots", poids: 0},
{nomProduit: "Choux", poids: 2920},
{nomProduit: "Epinards et Salades", poids: 0},
{nomProduit: "Courges", poids: 0},
{nomProduit: "Tomates", poids: 0},
{nomProduit: "Aromatiques", poids: 1273},
{nomProduit: "Fruits", poids: 15715},
{nomProduit: "Autres", poids: 2300}
],
"hiver": [
{nomProduit: "Racines", poids: 0},
{nomProduit: "Poids et Haricots", poids: 0},
{nomProduit: "Choux", poids: 2920},
{nomProduit: "Epinards et Salades", poids: 13233},
{nomProduit: "Courges", poids: 0},
{nomProduit: "Tomates", poids: 0},
{nomProduit: "Aromatiques", poids: 0},
{nomProduit: "Fruits", poids: 0},
{nomProduit: "Autres", poids: 0}
],
"total": [
{nomProduit: "Racines", poids: 20050},
{nomProduit: "Poids et Haricots", poids: 16400},
{nomProduit: "Choux", poids: 14600},
{nomProduit: "Epinards et Salades", poids: 39755},
{nomProduit: "Courges", poids: 77400},
{nomProduit: "Tomates", poids: 123600},
{nomProduit: "Aromatiques", poids: 3800},
{nomProduit: "Fruits", poids: 30010},
{nomProduit: "Autres", poids: 11965}
]
};
var infosJardinJoseph = {
"Racines": {
idJauge: "jauge_racine",
couleur: "#f7bd48",
maximum: 20050,
cheminImage: "./img/joseph/racines.svg"
},
"Poids et Haricots": {
idJauge: "jauge_haricots",
couleur: "#c96d63",
maximum: 16400,
cheminImage: "./img/joseph/haricots.svg"
},
"Choux": {
idJauge: "jauge_choux",
couleur: "#b5ff9c",
maximum: 14600,
cheminImage: "./img/joseph/choux.svg"
},
"Epinards et Salades": {
idJauge: "jauge_salades",
couleur: "#c9ff73",
maximum: 39755,
cheminImage: "./img/joseph/salades.svg"
},
"Courges": {
idJauge: "jauge_courges",
couleur: "#d6ff38",
maximum: 77400,
cheminImage: "./img/joseph/courges.svg"
},
"Tomates": {
idJauge: "jauge_tomates",
couleur: "#ee7268",
maximum: 123600,
cheminImage: "./img/joseph/tomates.svg"
},
"Aromatiques": {
idJauge: "jauge_aromatiques",
couleur: "#aef86e",
maximum: 3800,
cheminImage: "./img/joseph/aromatiques.svg"
},
"Fruits": {
idJauge: "jauge_fruits",
couleur: "#ffa87b",
maximum: 30010,
cheminImage: "./img/joseph/fruits.svg"
},
"Autres": {
idJauge: "jauge_autres",
couleur: "#f7ff64",
maximum: 11965,
cheminImage: "./img/joseph/autres.svg"
}
};
// utilisé pour sauvegarder les Jauge Updater (GaugeUpdater)
var jauges = {};
// on récupère les données pour la saison actuelle
var saisonActuelle = getSaisonActuelle();
var donneesActuelles = donneesJardinJoseph[saisonActuelle];
// création de la balise contenant le total
var total = [ calculerPoidsTotal(donneesActuelles) ];
d3.select("#totalJoseph").selectAll("p").data(total).enter().append("p").text(function(d) {
return d;
});
// pour chaque produit, on crée une balise
donneesActuelles.forEach(function(element, index, tableau) {
var infoElement = infosJardinJoseph[element.nomProduit];
var idJauge = infoElement.idJauge;
// création de la balise contenant la jauge
var baliseJauge = d3.select("#diagrammeJoseph").append("div").attr("class", "baliseJauge");
var svgJauge = baliseJauge.append("svg").attr({
"id": idJauge,
"viewBox": "0 0 100 150",
//"height": 150,
//"width": 100
});
// configuration de la jauge
var config = liquidFillGaugeDefaultSettings();
config.minValue = 0;
config.maxValue = (infoElement.maximum * 0.001).toFixed(1);
config.circleThickness = 0.02; // taille cercle extérieur
config.circleFillGap = 0; // espacement entre cercle extérieur et intérieur
config.textVertPosition = 1.6; // positionner le texte au dessus de la gauge
config.circleColor = "#706f6f";
config.textColor = "#999999";
config.waveColor = infoElement.couleur;
//config.waveTextColor = "#6DA398";
config.waveAnimateTime = 5000;
config.waveHeight = 0;
config.waveAnimate = false;
config.waveCount = 2;
config.waveOffset = 0.25;
config.textSize = 1.2;
config.displayPercent = false;
var poids = (element.poids * 0.001).toFixed(1);
var jauge = loadLiquidFillGauge(idJauge, poids, config);
jauges[idJauge] = jauge;
// ajout du "kg" au dessus de la jauge
svgJauge.select("g").append("text").attr("x", 50).attr("y", -6).attr("class", "liquidFillGaugeText").attr("text-anchor", "middle").style("fill", "#fdbe63").text("kg");
// remettre le groupe à la position (0, 0) (nécessaire car le texte de la jauge sortait de la zone)
svgJauge.select("g").attr("transform", "translate(0, 50)");
// ajout de l'image dans la jauge
svgJauge.select("g").append("image").attr({
"xlink:href": infoElement.cheminImage,
"x": 0,
"y": 0,
"height": 50,
"width": 50,
"transform": "translate(25, 25)",
});
});
// on pet à jour les données du diagramme avec les données de la saison actuelle
updateDiagrammeJoseph(saisonActuelle);
// mise à jour du radio sélectionné
$("#radioJoseph_" + saisonActuelle).attr("checked", "");
/**
* met à jour les données du diagramme de joseph (l'histogramme) en fonction de la saison
* @param {string} saison peut prendre 5 valeurs: "printemps", "ete", "automne", "hiver", "total"
*/
function updateDiagrammeJoseph(saison) {
// récupération des données selon la saison
var donneesActuelles = donneesJardinJoseph[saison] || [];
// mise à jour des colonnes de l'histogramme
donneesActuelles.forEach(function(element, index, tableau) {
var infosElement = infosJardinJoseph[element.nomProduit];
var jauge = jauges[infosElement.idJauge];
var poids = (element.poids * 0.001).toFixed(1);
jauge.update(poids);
});
// mise à jour du total
var total = [ calculerPoidsTotal(donneesActuelles) ];
var balisesTotal = d3.select("#totalJoseph").selectAll("p").data(total).transition().duration(1000).tween("text", function() {
var i = d3.interpolateRound(Number(this.textContent), total[0]);
return function(t) {
this.textContent = i(t);
};
});
// tween permet d'animer la mise à jour d'un nombre
// mise à jour de l'image de saison
$("#imageSaison").attr("src", "./img/joseph/background_" + saison + ".png");
}
/**
* retourne le poids total de légumes récoltés pour les données passées en entrées
* @param {Array} donnees la liste des produits, chaque produit étant enregistrer dans un objet contenant les attrbuts "nomProduit" et "poids"
* @returns {number} le poids total
*/
function calculerPoidsTotal(donnees) {
v
|
.reduce(function(prec, elem, indice, tab) {
return prec + elem.poids;
}, 0);
return (total * 0.001).toFixed(1);
}
/**
* retourne la saison actuelle (la saison en fonction de la date actuelle)
* @returns {string} la saison actuelle, peut être "printemps", "ete", "automne", "hiver"
*/
function getSaisonActuelle() {
var today = new Date();
var mois = today.getMonth();
var jour = today.getDay();
// janvier: 0, fevrier: 1, ...
if(mois >=2 && mois <= 5) { // entre mars et juin
if(mois == 2 && jour < 20) return "hiver";
else if(mois == 5 && jour > 20) return "ete";
else return "printemps";
}
else if(mois >= 5 && mois <= 8) { // entre juin et septembre
if(mois == 5 && jour < 21) return "printemps";
else if(mois == 8 && jour > 22) return "automne";
else return "ete";
}
else if(mois >= 8 && mois <= 11) { // entre septemps et decembre
if(mois == 8 && jour < 23) return "ete";
else if(mois == 11 && jour > 20) return "hiver";
else return "automne";
}
else return "hiver"
}
|
ar total = donnees
|
identifier_name
|
about.ts
|
/**
* @module Core
*/
/// <reference path="corePlugin.ts"/>
module Core {
_module.controller("Core.AboutController", ["$scope", "$location", "jolokia", "branding", "localStorage", ($scope, $location, jolokia, branding, localStorage) => {
var log:Logging.Logger = Logger.get("About");
// load the about.md file
$.ajax({
url: "app/core/doc/about.md",
dataType: 'html',
cache: false,
success: function (data, textStatus, jqXHR) {
$scope.html = "Unable to download about.md";
if (angular.isDefined(data))
|
Core.$apply($scope);
},
error: function (jqXHR, textStatus, errorThrown) {
$scope.html = "Unable to download about.md";
Core.$apply($scope);
}
})
}]);
}
|
{
$scope.html = marked(data);
$scope.branding = branding;
$scope.customBranding = branding.enabled;
try {
$scope.hawtioVersion = jolokia.request({
type: "read",
mbean: "hawtio:type=About",
attribute: "HawtioVersion"
}).value;
} catch (Error) {
// ignore
$scope.hawtioVersion = "N/A";
}
$scope.jolokiaVersion = jolokia.version().agent;
$scope.serverProduct = jolokia.version().info.product;
$scope.serverVendor = jolokia.version().info.vendor;
$scope.serverVersion = jolokia.version().info.version;
}
|
conditional_block
|
about.ts
|
/**
* @module Core
*/
/// <reference path="corePlugin.ts"/>
module Core {
_module.controller("Core.AboutController", ["$scope", "$location", "jolokia", "branding", "localStorage", ($scope, $location, jolokia, branding, localStorage) => {
var log:Logging.Logger = Logger.get("About");
// load the about.md file
$.ajax({
url: "app/core/doc/about.md",
dataType: 'html',
|
$scope.branding = branding;
$scope.customBranding = branding.enabled;
try {
$scope.hawtioVersion = jolokia.request({
type: "read",
mbean: "hawtio:type=About",
attribute: "HawtioVersion"
}).value;
} catch (Error) {
// ignore
$scope.hawtioVersion = "N/A";
}
$scope.jolokiaVersion = jolokia.version().agent;
$scope.serverProduct = jolokia.version().info.product;
$scope.serverVendor = jolokia.version().info.vendor;
$scope.serverVersion = jolokia.version().info.version;
}
Core.$apply($scope);
},
error: function (jqXHR, textStatus, errorThrown) {
$scope.html = "Unable to download about.md";
Core.$apply($scope);
}
})
}]);
}
|
cache: false,
success: function (data, textStatus, jqXHR) {
$scope.html = "Unable to download about.md";
if (angular.isDefined(data)) {
$scope.html = marked(data);
|
random_line_split
|
service.rs
|
use crate::protocol;
#[cfg(windows)]
use core::os::process::windows_child::{ChildStderr,
ChildStdout,
ExitStatus};
use core::util::BufReadLossy;
use habitat_common::output::{self,
StructuredOutput};
#[cfg(unix)]
use std::process::{ChildStderr,
ChildStdout,
ExitStatus};
use std::{fmt,
io::{self,
BufReader,
Read},
thread};
pub use crate::sys::service::*;
pub struct Service {
args: protocol::Spawn,
process: Process,
}
impl Service {
pub fn new(spawn: protocol::Spawn,
process: Process,
stdout: Option<ChildStdout>,
stderr: Option<ChildStderr>)
-> Self {
if let Some(stdout) = stdout {
let id = spawn.id.to_string();
thread::Builder::new().name(format!("{}-out", spawn.id))
.spawn(move || pipe_stdout(stdout, &id))
.ok();
}
if let Some(stderr) = stderr {
let id = spawn.id.to_string();
thread::Builder::new().name(format!("{}-err", spawn.id))
.spawn(move || pipe_stderr(stderr, &id))
.ok();
}
Service { args: spawn,
process }
}
pub fn args(&self) -> &protocol::Spawn { &self.args }
pub fn
|
(&self) -> u32 { self.process.id() }
/// Attempt to gracefully terminate a proccess and then forcefully kill it after
/// 8 seconds if it has not terminated.
pub fn kill(&mut self) -> protocol::ShutdownMethod { self.process.kill() }
pub fn name(&self) -> &str { &self.args.id }
pub fn take_args(self) -> protocol::Spawn { self.args }
pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { self.process.try_wait() }
pub fn wait(&mut self) -> io::Result<ExitStatus> { self.process.wait() }
}
impl fmt::Debug for Service {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Service {{ pid: {:?} }}", self.process.id())
}
}
/// Consume output from a child process until EOF, then finish
fn pipe_stdout<T>(out: T, id: &str)
where T: Read
{
for line in BufReader::new(out).lines_lossy() {
match line {
Ok(line) => {
let so = StructuredOutput::succinct(id, "O", output::get_format(), &line);
if let Err(e) = so.println() {
println!("printing output: '{}' to stdout resulted in error: {}",
&line, e);
}
}
Err(e) => {
println!("reading output from to stdout resulted in error: {}", e);
break;
}
}
}
}
/// Consume standard error from a child process until EOF, then finish
fn pipe_stderr<T>(err: T, id: &str)
where T: Read
{
for line in BufReader::new(err).lines_lossy() {
match line {
Ok(line) => {
let so = StructuredOutput::succinct(id, "E", output::get_format(), &line);
if let Err(e) = so.eprintln() {
println!("printing output: '{}' to stderr resulted in error: {}",
&line, e);
}
}
Err(e) => {
println!("reading output from to stderr resulted in error: {}", e);
break;
}
}
}
}
|
id
|
identifier_name
|
service.rs
|
use crate::protocol;
#[cfg(windows)]
use core::os::process::windows_child::{ChildStderr,
ChildStdout,
ExitStatus};
use core::util::BufReadLossy;
use habitat_common::output::{self,
StructuredOutput};
#[cfg(unix)]
use std::process::{ChildStderr,
ChildStdout,
ExitStatus};
use std::{fmt,
io::{self,
BufReader,
Read},
thread};
pub use crate::sys::service::*;
pub struct Service {
args: protocol::Spawn,
process: Process,
}
impl Service {
pub fn new(spawn: protocol::Spawn,
process: Process,
stdout: Option<ChildStdout>,
stderr: Option<ChildStderr>)
-> Self {
if let Some(stdout) = stdout {
let id = spawn.id.to_string();
thread::Builder::new().name(format!("{}-out", spawn.id))
.spawn(move || pipe_stdout(stdout, &id))
.ok();
}
if let Some(stderr) = stderr {
let id = spawn.id.to_string();
thread::Builder::new().name(format!("{}-err", spawn.id))
.spawn(move || pipe_stderr(stderr, &id))
.ok();
}
Service { args: spawn,
process }
}
pub fn args(&self) -> &protocol::Spawn { &self.args }
pub fn id(&self) -> u32 { self.process.id() }
/// Attempt to gracefully terminate a proccess and then forcefully kill it after
/// 8 seconds if it has not terminated.
pub fn kill(&mut self) -> protocol::ShutdownMethod { self.process.kill() }
pub fn name(&self) -> &str { &self.args.id }
pub fn take_args(self) -> protocol::Spawn { self.args }
pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { self.process.try_wait() }
pub fn wait(&mut self) -> io::Result<ExitStatus> { self.process.wait() }
}
impl fmt::Debug for Service {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Service {{ pid: {:?} }}", self.process.id())
}
}
/// Consume output from a child process until EOF, then finish
fn pipe_stdout<T>(out: T, id: &str)
where T: Read
|
/// Consume standard error from a child process until EOF, then finish
fn pipe_stderr<T>(err: T, id: &str)
where T: Read
{
for line in BufReader::new(err).lines_lossy() {
match line {
Ok(line) => {
let so = StructuredOutput::succinct(id, "E", output::get_format(), &line);
if let Err(e) = so.eprintln() {
println!("printing output: '{}' to stderr resulted in error: {}",
&line, e);
}
}
Err(e) => {
println!("reading output from to stderr resulted in error: {}", e);
break;
}
}
}
}
|
{
for line in BufReader::new(out).lines_lossy() {
match line {
Ok(line) => {
let so = StructuredOutput::succinct(id, "O", output::get_format(), &line);
if let Err(e) = so.println() {
println!("printing output: '{}' to stdout resulted in error: {}",
&line, e);
}
}
Err(e) => {
println!("reading output from to stdout resulted in error: {}", e);
break;
}
}
}
}
|
identifier_body
|
service.rs
|
use crate::protocol;
#[cfg(windows)]
use core::os::process::windows_child::{ChildStderr,
ChildStdout,
ExitStatus};
use core::util::BufReadLossy;
use habitat_common::output::{self,
StructuredOutput};
#[cfg(unix)]
use std::process::{ChildStderr,
ChildStdout,
ExitStatus};
use std::{fmt,
io::{self,
BufReader,
Read},
thread};
pub use crate::sys::service::*;
pub struct Service {
args: protocol::Spawn,
process: Process,
}
|
impl Service {
pub fn new(spawn: protocol::Spawn,
process: Process,
stdout: Option<ChildStdout>,
stderr: Option<ChildStderr>)
-> Self {
if let Some(stdout) = stdout {
let id = spawn.id.to_string();
thread::Builder::new().name(format!("{}-out", spawn.id))
.spawn(move || pipe_stdout(stdout, &id))
.ok();
}
if let Some(stderr) = stderr {
let id = spawn.id.to_string();
thread::Builder::new().name(format!("{}-err", spawn.id))
.spawn(move || pipe_stderr(stderr, &id))
.ok();
}
Service { args: spawn,
process }
}
pub fn args(&self) -> &protocol::Spawn { &self.args }
pub fn id(&self) -> u32 { self.process.id() }
/// Attempt to gracefully terminate a proccess and then forcefully kill it after
/// 8 seconds if it has not terminated.
pub fn kill(&mut self) -> protocol::ShutdownMethod { self.process.kill() }
pub fn name(&self) -> &str { &self.args.id }
pub fn take_args(self) -> protocol::Spawn { self.args }
pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { self.process.try_wait() }
pub fn wait(&mut self) -> io::Result<ExitStatus> { self.process.wait() }
}
impl fmt::Debug for Service {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Service {{ pid: {:?} }}", self.process.id())
}
}
/// Consume output from a child process until EOF, then finish
fn pipe_stdout<T>(out: T, id: &str)
where T: Read
{
for line in BufReader::new(out).lines_lossy() {
match line {
Ok(line) => {
let so = StructuredOutput::succinct(id, "O", output::get_format(), &line);
if let Err(e) = so.println() {
println!("printing output: '{}' to stdout resulted in error: {}",
&line, e);
}
}
Err(e) => {
println!("reading output from to stdout resulted in error: {}", e);
break;
}
}
}
}
/// Consume standard error from a child process until EOF, then finish
fn pipe_stderr<T>(err: T, id: &str)
where T: Read
{
for line in BufReader::new(err).lines_lossy() {
match line {
Ok(line) => {
let so = StructuredOutput::succinct(id, "E", output::get_format(), &line);
if let Err(e) = so.eprintln() {
println!("printing output: '{}' to stderr resulted in error: {}",
&line, e);
}
}
Err(e) => {
println!("reading output from to stderr resulted in error: {}", e);
break;
}
}
}
}
|
random_line_split
|
|
bind-by-move-neither-can-live-while-the-other-survives-4.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct
|
{ x: (), }
impl Drop for X {
fn drop(&mut self) {
println!("destructor runs");
}
}
fn main() {
let x = Some((X { x: () }, X { x: () }));
match x {
Some((_y, ref _z)) => { }, //~ ERROR cannot bind by-move and by-ref in the same pattern
None => fail!()
}
}
|
X
|
identifier_name
|
bind-by-move-neither-can-live-while-the-other-survives-4.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct X { x: (), }
impl Drop for X {
fn drop(&mut self) {
println!("destructor runs");
}
}
fn main()
|
{
let x = Some((X { x: () }, X { x: () }));
match x {
Some((_y, ref _z)) => { }, //~ ERROR cannot bind by-move and by-ref in the same pattern
None => fail!()
}
}
|
identifier_body
|
|
bind-by-move-neither-can-live-while-the-other-survives-4.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
impl Drop for X {
fn drop(&mut self) {
println!("destructor runs");
}
}
fn main() {
let x = Some((X { x: () }, X { x: () }));
match x {
Some((_y, ref _z)) => { }, //~ ERROR cannot bind by-move and by-ref in the same pattern
None => fail!()
}
}
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct X { x: (), }
|
random_line_split
|
app.js
|
import React from 'react'
import {render} from 'react-dom'
import MuiThemeProvider from 'material-ui/styles/MuiThemeProvider'
import themeDefault from './themeDefault'
import injectTapEventPlugin from 'react-tap-event-plugin'
import router from './router.js'
import IPFSDAO from './dao/IPFSDAO'
import OrbitDAO from './dao/OrbitDAO'
import './styles.scss'
import 'font-awesome/css/font-awesome.css'
import 'flexboxgrid/css/flexboxgrid.css'
import ErrorPage from './pages/ErrorPage'
class App {
|
() {
IPFSDAO.init().then(ipfsNode => {
OrbitDAO.init(ipfsNode)
/** Needed for onTouchTap @link http://stackoverflow.com/a/34015469/988941 */
injectTapEventPlugin()
render(
<MuiThemeProvider muiTheme={themeDefault}>{router}</MuiThemeProvider>,
document.getElementById('react-root')
)
}).catch(e => {
render(
<MuiThemeProvider muiTheme={themeDefault}>
<ErrorPage error={e} />
</MuiThemeProvider>,
document.getElementById('react-root')
)
})
}
}
export default new App()
|
start
|
identifier_name
|
app.js
|
import React from 'react'
import {render} from 'react-dom'
import MuiThemeProvider from 'material-ui/styles/MuiThemeProvider'
import themeDefault from './themeDefault'
import injectTapEventPlugin from 'react-tap-event-plugin'
import router from './router.js'
import IPFSDAO from './dao/IPFSDAO'
import OrbitDAO from './dao/OrbitDAO'
import './styles.scss'
import 'font-awesome/css/font-awesome.css'
import 'flexboxgrid/css/flexboxgrid.css'
|
import ErrorPage from './pages/ErrorPage'
class App {
start () {
IPFSDAO.init().then(ipfsNode => {
OrbitDAO.init(ipfsNode)
/** Needed for onTouchTap @link http://stackoverflow.com/a/34015469/988941 */
injectTapEventPlugin()
render(
<MuiThemeProvider muiTheme={themeDefault}>{router}</MuiThemeProvider>,
document.getElementById('react-root')
)
}).catch(e => {
render(
<MuiThemeProvider muiTheme={themeDefault}>
<ErrorPage error={e} />
</MuiThemeProvider>,
document.getElementById('react-root')
)
})
}
}
export default new App()
|
random_line_split
|
|
main.py
|
#! /usr/bin/python
import re
import os
import sys
import time
import pydas.communicator as apiMidas
import pydas.exceptions as pydasException
import uuid
import json
import shutil
from zipfile import ZipFile, ZIP_DEFLATED
from subprocess import Popen, PIPE, STDOUT
from contextlib import closing
# Load configuration file
def loadConfig(filename):
try: configfile = open(filename, "r")
except Exception, e: raise
try: configtext = configfile.read()
except Exception, e: raise
pattern = re.compile("\\n([\w_]+)[\t ]*([\w: \\\/~.-]+)")
# Find all matches to this pattern in the text of the config file
tuples = re.findall(pattern, configtext)
# Create a new dictionary and fill it: for every tuple (key, value) in
# the 'tuples' list, set ret[key] to value
ret = dict()
for x in tuples: ret[x[0]] = x[1]
# Return the fully-loaded dictionary object
return ret
# Set internal configuration
def setInternalConfig(email, apikey, token):
try: configfile = open('config.internal.cfg', "w")
except Exception, e: raise
configfile.write("\nemail "+email)
configfile.write("\napikey "+apikey)
configfile.write("\ntoken "+token)
configfile.close()
return
# Register a server to Midas
def registerServer():
"""
Register Server
"""
cfg = loadConfig('config.cfg')
if os.path.exists('config.internal.cfg') == False:
setInternalConfig('undefined', 'undefined', 'undefined')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['email'] = cfginternal['email']+'@example.org'
parameters['securitykey'] = cfg['securityKey']
parameters['apikey'] = cfginternal['apikey']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.registerserver', parameters)
except pydasException.PydasException, e:
parameters = dict()
parameters['securitykey'] = cfg['securityKey']
parameters['os'] = cfg['os']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.registerserver', parameters)
except pydasException.PydasException, e:
print "Unable to Register. Please check the configuration."
return False
setInternalConfig(response['email'], response['apikey'], response['token'])
print "Registered"
return True
# Register a server to Midas
def keepAliveServer():
"""
Keep Alive
"""
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['token'] = cfginternal['token']
parameters['os'] = cfg['os']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.keepaliveserver', parameters)
except pydasException.PydasException, e:
print "Keep aline failed"
print e
return False
return response
# Send results to Midas
def sendResults(file):
"""
Send Results
"""
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['token'] = cfginternal['token']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.resultsserver', parameters, file)
except pydasException.PydasException, e:
print "Unable to send results"
print e
return False
return response
# Handle Midas command
def handleMidasResponse(response):
"""
Handle response
"""
if response['action'] == 'wait':
print "Wait"
time.sleep(120)
elif response['action'] == 'process':
params = json.loads(response['params'])
script = response['script']
#Create processing folder
unique_name = str(uuid.uuid4())
pathProcessingFolder = sys.path[0]+'/tmp/'+unique_name
os.mkdir(pathProcessingFolder)
os.mkdir(pathProcessingFolder+'/script')
os.mkdir(pathProcessingFolder+'/results')
#Create Script file
try: scriptFile = open(pathProcessingFolder+'/script/script.py', "w")
except Exception, e: raise
scriptFile.write(script)
scriptFile.close()
#Create Params file
try: scriptFile = open(pathProcessingFolder+'/results/parameters.txt', "w")
except Exception, e: raise
scriptFile.write(response['params'])
scriptFile.close()
inputFiles = params['input']
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
if inputFiles:
print "Download Data"
for file in inputFiles:
interfaceMidas.downloadItem(file, pathProcessingFolder+'/script', cfginternal['token'])
print "Run script"
os.chdir(pathProcessingFolder+'/script/')
cmd = sys.executable+" "+pathProcessingFolder+'/script/script.py'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=False)
p.wait()
stdout = p.stdout.read()
os.chdir(sys.path[0])
#Create Log files
try: scriptFile = open(pathProcessingFolder+'/results/log.txt', "w")
except Exception, e: raise
scriptFile.write(stdout)
scriptFile.close()
outputFiles = params['output']
if outputFiles:
for file in outputFiles:
if os.path.exists(pathProcessingFolder+'/script/'+file):
os.rename(pathProcessingFolder+'/script/'+file, pathProcessingFolder+'/results/'+file)
zipdir(pathProcessingFolder+'/results', pathProcessingFolder+'/results.zip')
print "Sending results"
sendResults(pathProcessingFolder+'/results.zip')
shutil.rmtree(pathProcessingFolder)
else:
print "Error, Unable to find command"
return False
return True
def zipdir(basedir, archivename):
assert os.path.isdir(basedir)
with closing(ZipFile(archivename, "w", ZIP_DEFLATED)) as z:
for root, dirs, files in os.walk(basedir):
#NOTE: ignore empty directories
for fn in files:
absfn = os.path.join(root, fn)
zfn = absfn[len(basedir)+len(os.sep):] #XXX: relative path
z.write(absfn, zfn)
# ------ Main --------
if __name__ == "__main__":
#Set directory location
while True:
os.chdir(sys.path[0])
registered = registerServer()
# Create tmp directory
if os.path.exists(sys.path[0]+'/tmp') == False:
os.mkdir('tmp')
if registered == True:
response = keepAliveServer()
if response != False:
|
else:
time.sleep(120)
else:
time.sleep(120)
|
handleMidasResponse(response)
|
conditional_block
|
main.py
|
#! /usr/bin/python
import re
import os
import sys
import time
import pydas.communicator as apiMidas
import pydas.exceptions as pydasException
import uuid
import json
import shutil
from zipfile import ZipFile, ZIP_DEFLATED
from subprocess import Popen, PIPE, STDOUT
from contextlib import closing
# Load configuration file
def loadConfig(filename):
try: configfile = open(filename, "r")
except Exception, e: raise
try: configtext = configfile.read()
except Exception, e: raise
pattern = re.compile("\\n([\w_]+)[\t ]*([\w: \\\/~.-]+)")
# Find all matches to this pattern in the text of the config file
tuples = re.findall(pattern, configtext)
# Create a new dictionary and fill it: for every tuple (key, value) in
# the 'tuples' list, set ret[key] to value
ret = dict()
for x in tuples: ret[x[0]] = x[1]
# Return the fully-loaded dictionary object
return ret
# Set internal configuration
def setInternalConfig(email, apikey, token):
try: configfile = open('config.internal.cfg', "w")
except Exception, e: raise
configfile.write("\nemail "+email)
configfile.write("\napikey "+apikey)
configfile.write("\ntoken "+token)
configfile.close()
return
# Register a server to Midas
def registerServer():
"""
Register Server
"""
cfg = loadConfig('config.cfg')
if os.path.exists('config.internal.cfg') == False:
setInternalConfig('undefined', 'undefined', 'undefined')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['email'] = cfginternal['email']+'@example.org'
parameters['securitykey'] = cfg['securityKey']
parameters['apikey'] = cfginternal['apikey']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.registerserver', parameters)
except pydasException.PydasException, e:
parameters = dict()
parameters['securitykey'] = cfg['securityKey']
parameters['os'] = cfg['os']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.registerserver', parameters)
except pydasException.PydasException, e:
print "Unable to Register. Please check the configuration."
return False
setInternalConfig(response['email'], response['apikey'], response['token'])
print "Registered"
return True
# Register a server to Midas
def keepAliveServer():
"""
Keep Alive
"""
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['token'] = cfginternal['token']
parameters['os'] = cfg['os']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.keepaliveserver', parameters)
except pydasException.PydasException, e:
print "Keep aline failed"
print e
return False
return response
# Send results to Midas
def sendResults(file):
"""
Send Results
"""
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['token'] = cfginternal['token']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.resultsserver', parameters, file)
except pydasException.PydasException, e:
print "Unable to send results"
print e
return False
return response
# Handle Midas command
def handleMidasResponse(response):
"""
Handle response
"""
if response['action'] == 'wait':
print "Wait"
time.sleep(120)
elif response['action'] == 'process':
params = json.loads(response['params'])
script = response['script']
#Create processing folder
unique_name = str(uuid.uuid4())
|
pathProcessingFolder = sys.path[0]+'/tmp/'+unique_name
os.mkdir(pathProcessingFolder)
os.mkdir(pathProcessingFolder+'/script')
os.mkdir(pathProcessingFolder+'/results')
#Create Script file
try: scriptFile = open(pathProcessingFolder+'/script/script.py', "w")
except Exception, e: raise
scriptFile.write(script)
scriptFile.close()
#Create Params file
try: scriptFile = open(pathProcessingFolder+'/results/parameters.txt', "w")
except Exception, e: raise
scriptFile.write(response['params'])
scriptFile.close()
inputFiles = params['input']
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
if inputFiles:
print "Download Data"
for file in inputFiles:
interfaceMidas.downloadItem(file, pathProcessingFolder+'/script', cfginternal['token'])
print "Run script"
os.chdir(pathProcessingFolder+'/script/')
cmd = sys.executable+" "+pathProcessingFolder+'/script/script.py'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=False)
p.wait()
stdout = p.stdout.read()
os.chdir(sys.path[0])
#Create Log files
try: scriptFile = open(pathProcessingFolder+'/results/log.txt', "w")
except Exception, e: raise
scriptFile.write(stdout)
scriptFile.close()
outputFiles = params['output']
if outputFiles:
for file in outputFiles:
if os.path.exists(pathProcessingFolder+'/script/'+file):
os.rename(pathProcessingFolder+'/script/'+file, pathProcessingFolder+'/results/'+file)
zipdir(pathProcessingFolder+'/results', pathProcessingFolder+'/results.zip')
print "Sending results"
sendResults(pathProcessingFolder+'/results.zip')
shutil.rmtree(pathProcessingFolder)
else:
print "Error, Unable to find command"
return False
return True
def zipdir(basedir, archivename):
assert os.path.isdir(basedir)
with closing(ZipFile(archivename, "w", ZIP_DEFLATED)) as z:
for root, dirs, files in os.walk(basedir):
#NOTE: ignore empty directories
for fn in files:
absfn = os.path.join(root, fn)
zfn = absfn[len(basedir)+len(os.sep):] #XXX: relative path
z.write(absfn, zfn)
# ------ Main --------
if __name__ == "__main__":
#Set directory location
while True:
os.chdir(sys.path[0])
registered = registerServer()
# Create tmp directory
if os.path.exists(sys.path[0]+'/tmp') == False:
os.mkdir('tmp')
if registered == True:
response = keepAliveServer()
if response != False:
handleMidasResponse(response)
else:
time.sleep(120)
else:
time.sleep(120)
|
random_line_split
|
|
main.py
|
#! /usr/bin/python
import re
import os
import sys
import time
import pydas.communicator as apiMidas
import pydas.exceptions as pydasException
import uuid
import json
import shutil
from zipfile import ZipFile, ZIP_DEFLATED
from subprocess import Popen, PIPE, STDOUT
from contextlib import closing
# Load configuration file
def loadConfig(filename):
try: configfile = open(filename, "r")
except Exception, e: raise
try: configtext = configfile.read()
except Exception, e: raise
pattern = re.compile("\\n([\w_]+)[\t ]*([\w: \\\/~.-]+)")
# Find all matches to this pattern in the text of the config file
tuples = re.findall(pattern, configtext)
# Create a new dictionary and fill it: for every tuple (key, value) in
# the 'tuples' list, set ret[key] to value
ret = dict()
for x in tuples: ret[x[0]] = x[1]
# Return the fully-loaded dictionary object
return ret
# Set internal configuration
def setInternalConfig(email, apikey, token):
try: configfile = open('config.internal.cfg', "w")
except Exception, e: raise
configfile.write("\nemail "+email)
configfile.write("\napikey "+apikey)
configfile.write("\ntoken "+token)
configfile.close()
return
# Register a server to Midas
def registerServer():
"""
Register Server
"""
cfg = loadConfig('config.cfg')
if os.path.exists('config.internal.cfg') == False:
setInternalConfig('undefined', 'undefined', 'undefined')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['email'] = cfginternal['email']+'@example.org'
parameters['securitykey'] = cfg['securityKey']
parameters['apikey'] = cfginternal['apikey']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.registerserver', parameters)
except pydasException.PydasException, e:
parameters = dict()
parameters['securitykey'] = cfg['securityKey']
parameters['os'] = cfg['os']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.registerserver', parameters)
except pydasException.PydasException, e:
print "Unable to Register. Please check the configuration."
return False
setInternalConfig(response['email'], response['apikey'], response['token'])
print "Registered"
return True
# Register a server to Midas
def keepAliveServer():
"""
Keep Alive
"""
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['token'] = cfginternal['token']
parameters['os'] = cfg['os']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.keepaliveserver', parameters)
except pydasException.PydasException, e:
print "Keep aline failed"
print e
return False
return response
# Send results to Midas
def sendResults(file):
"""
Send Results
"""
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['token'] = cfginternal['token']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.resultsserver', parameters, file)
except pydasException.PydasException, e:
print "Unable to send results"
print e
return False
return response
# Handle Midas command
def handleMidasResponse(response):
"""
Handle response
"""
if response['action'] == 'wait':
print "Wait"
time.sleep(120)
elif response['action'] == 'process':
params = json.loads(response['params'])
script = response['script']
#Create processing folder
unique_name = str(uuid.uuid4())
pathProcessingFolder = sys.path[0]+'/tmp/'+unique_name
os.mkdir(pathProcessingFolder)
os.mkdir(pathProcessingFolder+'/script')
os.mkdir(pathProcessingFolder+'/results')
#Create Script file
try: scriptFile = open(pathProcessingFolder+'/script/script.py', "w")
except Exception, e: raise
scriptFile.write(script)
scriptFile.close()
#Create Params file
try: scriptFile = open(pathProcessingFolder+'/results/parameters.txt', "w")
except Exception, e: raise
scriptFile.write(response['params'])
scriptFile.close()
inputFiles = params['input']
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
if inputFiles:
print "Download Data"
for file in inputFiles:
interfaceMidas.downloadItem(file, pathProcessingFolder+'/script', cfginternal['token'])
print "Run script"
os.chdir(pathProcessingFolder+'/script/')
cmd = sys.executable+" "+pathProcessingFolder+'/script/script.py'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=False)
p.wait()
stdout = p.stdout.read()
os.chdir(sys.path[0])
#Create Log files
try: scriptFile = open(pathProcessingFolder+'/results/log.txt', "w")
except Exception, e: raise
scriptFile.write(stdout)
scriptFile.close()
outputFiles = params['output']
if outputFiles:
for file in outputFiles:
if os.path.exists(pathProcessingFolder+'/script/'+file):
os.rename(pathProcessingFolder+'/script/'+file, pathProcessingFolder+'/results/'+file)
zipdir(pathProcessingFolder+'/results', pathProcessingFolder+'/results.zip')
print "Sending results"
sendResults(pathProcessingFolder+'/results.zip')
shutil.rmtree(pathProcessingFolder)
else:
print "Error, Unable to find command"
return False
return True
def
|
(basedir, archivename):
assert os.path.isdir(basedir)
with closing(ZipFile(archivename, "w", ZIP_DEFLATED)) as z:
for root, dirs, files in os.walk(basedir):
#NOTE: ignore empty directories
for fn in files:
absfn = os.path.join(root, fn)
zfn = absfn[len(basedir)+len(os.sep):] #XXX: relative path
z.write(absfn, zfn)
# ------ Main --------
if __name__ == "__main__":
#Set directory location
while True:
os.chdir(sys.path[0])
registered = registerServer()
# Create tmp directory
if os.path.exists(sys.path[0]+'/tmp') == False:
os.mkdir('tmp')
if registered == True:
response = keepAliveServer()
if response != False:
handleMidasResponse(response)
else:
time.sleep(120)
else:
time.sleep(120)
|
zipdir
|
identifier_name
|
main.py
|
#! /usr/bin/python
import re
import os
import sys
import time
import pydas.communicator as apiMidas
import pydas.exceptions as pydasException
import uuid
import json
import shutil
from zipfile import ZipFile, ZIP_DEFLATED
from subprocess import Popen, PIPE, STDOUT
from contextlib import closing
# Load configuration file
def loadConfig(filename):
try: configfile = open(filename, "r")
except Exception, e: raise
try: configtext = configfile.read()
except Exception, e: raise
pattern = re.compile("\\n([\w_]+)[\t ]*([\w: \\\/~.-]+)")
# Find all matches to this pattern in the text of the config file
tuples = re.findall(pattern, configtext)
# Create a new dictionary and fill it: for every tuple (key, value) in
# the 'tuples' list, set ret[key] to value
ret = dict()
for x in tuples: ret[x[0]] = x[1]
# Return the fully-loaded dictionary object
return ret
# Set internal configuration
def setInternalConfig(email, apikey, token):
try: configfile = open('config.internal.cfg', "w")
except Exception, e: raise
configfile.write("\nemail "+email)
configfile.write("\napikey "+apikey)
configfile.write("\ntoken "+token)
configfile.close()
return
# Register a server to Midas
def registerServer():
"""
Register Server
"""
cfg = loadConfig('config.cfg')
if os.path.exists('config.internal.cfg') == False:
setInternalConfig('undefined', 'undefined', 'undefined')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['email'] = cfginternal['email']+'@example.org'
parameters['securitykey'] = cfg['securityKey']
parameters['apikey'] = cfginternal['apikey']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.registerserver', parameters)
except pydasException.PydasException, e:
parameters = dict()
parameters['securitykey'] = cfg['securityKey']
parameters['os'] = cfg['os']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.registerserver', parameters)
except pydasException.PydasException, e:
print "Unable to Register. Please check the configuration."
return False
setInternalConfig(response['email'], response['apikey'], response['token'])
print "Registered"
return True
# Register a server to Midas
def keepAliveServer():
"""
Keep Alive
"""
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['token'] = cfginternal['token']
parameters['os'] = cfg['os']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.keepaliveserver', parameters)
except pydasException.PydasException, e:
print "Keep aline failed"
print e
return False
return response
# Send results to Midas
def sendResults(file):
|
# Handle Midas command
def handleMidasResponse(response):
"""
Handle response
"""
if response['action'] == 'wait':
print "Wait"
time.sleep(120)
elif response['action'] == 'process':
params = json.loads(response['params'])
script = response['script']
#Create processing folder
unique_name = str(uuid.uuid4())
pathProcessingFolder = sys.path[0]+'/tmp/'+unique_name
os.mkdir(pathProcessingFolder)
os.mkdir(pathProcessingFolder+'/script')
os.mkdir(pathProcessingFolder+'/results')
#Create Script file
try: scriptFile = open(pathProcessingFolder+'/script/script.py', "w")
except Exception, e: raise
scriptFile.write(script)
scriptFile.close()
#Create Params file
try: scriptFile = open(pathProcessingFolder+'/results/parameters.txt', "w")
except Exception, e: raise
scriptFile.write(response['params'])
scriptFile.close()
inputFiles = params['input']
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
if inputFiles:
print "Download Data"
for file in inputFiles:
interfaceMidas.downloadItem(file, pathProcessingFolder+'/script', cfginternal['token'])
print "Run script"
os.chdir(pathProcessingFolder+'/script/')
cmd = sys.executable+" "+pathProcessingFolder+'/script/script.py'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=False)
p.wait()
stdout = p.stdout.read()
os.chdir(sys.path[0])
#Create Log files
try: scriptFile = open(pathProcessingFolder+'/results/log.txt', "w")
except Exception, e: raise
scriptFile.write(stdout)
scriptFile.close()
outputFiles = params['output']
if outputFiles:
for file in outputFiles:
if os.path.exists(pathProcessingFolder+'/script/'+file):
os.rename(pathProcessingFolder+'/script/'+file, pathProcessingFolder+'/results/'+file)
zipdir(pathProcessingFolder+'/results', pathProcessingFolder+'/results.zip')
print "Sending results"
sendResults(pathProcessingFolder+'/results.zip')
shutil.rmtree(pathProcessingFolder)
else:
print "Error, Unable to find command"
return False
return True
def zipdir(basedir, archivename):
assert os.path.isdir(basedir)
with closing(ZipFile(archivename, "w", ZIP_DEFLATED)) as z:
for root, dirs, files in os.walk(basedir):
#NOTE: ignore empty directories
for fn in files:
absfn = os.path.join(root, fn)
zfn = absfn[len(basedir)+len(os.sep):] #XXX: relative path
z.write(absfn, zfn)
# ------ Main --------
if __name__ == "__main__":
#Set directory location
while True:
os.chdir(sys.path[0])
registered = registerServer()
# Create tmp directory
if os.path.exists(sys.path[0]+'/tmp') == False:
os.mkdir('tmp')
if registered == True:
response = keepAliveServer()
if response != False:
handleMidasResponse(response)
else:
time.sleep(120)
else:
time.sleep(120)
|
"""
Send Results
"""
cfg = loadConfig('config.cfg')
cfginternal = loadConfig('config.internal.cfg')
url = cfg['url']
interfaceMidas = apiMidas.Communicator (url)
parameters = dict()
parameters['token'] = cfginternal['token']
try: response = interfaceMidas.makeRequest('midas.remoteprocessing.resultsserver', parameters, file)
except pydasException.PydasException, e:
print "Unable to send results"
print e
return False
return response
|
identifier_body
|
res_partner.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class Partner(models.Model):
_inherit = 'res.partner'
team_id = fields.Many2one('crm.team', string='Sales Team', oldname='section_id')
opportunity_ids = fields.One2many('crm.lead', 'partner_id', string='Opportunities', domain=[('type', '=', 'opportunity')])
meeting_ids = fields.Many2many('calendar.event', 'calendar_event_res_partner_rel', 'res_partner_id', 'calendar_event_id', string='Meetings', copy=False)
opportunity_count = fields.Integer("Opportunity", compute='_compute_opportunity_count')
meeting_count = fields.Integer("# Meetings", compute='_compute_meeting_count')
@api.model
def default_get(self, fields):
|
rec = super(Partner, self).default_get(fields)
active_model = self.env.context.get('active_model')
if active_model == 'crm.lead':
lead = self.env[active_model].browse(self.env.context.get('active_id')).exists()
if lead:
rec.update(
phone=lead.phone,
mobile=lead.mobile,
function=lead.function,
title=lead.title.id,
website=lead.website,
street=lead.street,
street2=lead.street2,
city=lead.city,
state_id=lead.state_id.id,
country_id=lead.country_id.id,
zip=lead.zip,
)
return rec
@api.multi
def _compute_opportunity_count(self):
for partner in self:
operator = 'child_of' if partner.is_company else '=' # the opportunity count should counts the opportunities of this company and all its contacts
partner.opportunity_count = self.env['crm.lead'].search_count([('partner_id', operator, partner.id), ('type', '=', 'opportunity')])
@api.multi
def _compute_meeting_count(self):
for partner in self:
partner.meeting_count = len(partner.meeting_ids)
@api.multi
def schedule_meeting(self):
partner_ids = self.ids
partner_ids.append(self.env.user.partner_id.id)
action = self.env.ref('calendar.action_calendar_event').read()[0]
action['context'] = {
'search_default_partner_ids': self._context['partner_name'],
'default_partner_ids': partner_ids,
}
return action
|
random_line_split
|
|
res_partner.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class Partner(models.Model):
_inherit = 'res.partner'
team_id = fields.Many2one('crm.team', string='Sales Team', oldname='section_id')
opportunity_ids = fields.One2many('crm.lead', 'partner_id', string='Opportunities', domain=[('type', '=', 'opportunity')])
meeting_ids = fields.Many2many('calendar.event', 'calendar_event_res_partner_rel', 'res_partner_id', 'calendar_event_id', string='Meetings', copy=False)
opportunity_count = fields.Integer("Opportunity", compute='_compute_opportunity_count')
meeting_count = fields.Integer("# Meetings", compute='_compute_meeting_count')
@api.model
def default_get(self, fields):
rec = super(Partner, self).default_get(fields)
active_model = self.env.context.get('active_model')
if active_model == 'crm.lead':
lead = self.env[active_model].browse(self.env.context.get('active_id')).exists()
if lead:
|
return rec
@api.multi
def _compute_opportunity_count(self):
for partner in self:
operator = 'child_of' if partner.is_company else '=' # the opportunity count should counts the opportunities of this company and all its contacts
partner.opportunity_count = self.env['crm.lead'].search_count([('partner_id', operator, partner.id), ('type', '=', 'opportunity')])
@api.multi
def _compute_meeting_count(self):
for partner in self:
partner.meeting_count = len(partner.meeting_ids)
@api.multi
def schedule_meeting(self):
partner_ids = self.ids
partner_ids.append(self.env.user.partner_id.id)
action = self.env.ref('calendar.action_calendar_event').read()[0]
action['context'] = {
'search_default_partner_ids': self._context['partner_name'],
'default_partner_ids': partner_ids,
}
return action
|
rec.update(
phone=lead.phone,
mobile=lead.mobile,
function=lead.function,
title=lead.title.id,
website=lead.website,
street=lead.street,
street2=lead.street2,
city=lead.city,
state_id=lead.state_id.id,
country_id=lead.country_id.id,
zip=lead.zip,
)
|
conditional_block
|
res_partner.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class Partner(models.Model):
_inherit = 'res.partner'
team_id = fields.Many2one('crm.team', string='Sales Team', oldname='section_id')
opportunity_ids = fields.One2many('crm.lead', 'partner_id', string='Opportunities', domain=[('type', '=', 'opportunity')])
meeting_ids = fields.Many2many('calendar.event', 'calendar_event_res_partner_rel', 'res_partner_id', 'calendar_event_id', string='Meetings', copy=False)
opportunity_count = fields.Integer("Opportunity", compute='_compute_opportunity_count')
meeting_count = fields.Integer("# Meetings", compute='_compute_meeting_count')
@api.model
def default_get(self, fields):
rec = super(Partner, self).default_get(fields)
active_model = self.env.context.get('active_model')
if active_model == 'crm.lead':
lead = self.env[active_model].browse(self.env.context.get('active_id')).exists()
if lead:
rec.update(
phone=lead.phone,
mobile=lead.mobile,
function=lead.function,
title=lead.title.id,
website=lead.website,
street=lead.street,
street2=lead.street2,
city=lead.city,
state_id=lead.state_id.id,
country_id=lead.country_id.id,
zip=lead.zip,
)
return rec
@api.multi
def _compute_opportunity_count(self):
for partner in self:
operator = 'child_of' if partner.is_company else '=' # the opportunity count should counts the opportunities of this company and all its contacts
partner.opportunity_count = self.env['crm.lead'].search_count([('partner_id', operator, partner.id), ('type', '=', 'opportunity')])
@api.multi
def _compute_meeting_count(self):
for partner in self:
partner.meeting_count = len(partner.meeting_ids)
@api.multi
def schedule_meeting(self):
|
partner_ids = self.ids
partner_ids.append(self.env.user.partner_id.id)
action = self.env.ref('calendar.action_calendar_event').read()[0]
action['context'] = {
'search_default_partner_ids': self._context['partner_name'],
'default_partner_ids': partner_ids,
}
return action
|
identifier_body
|
|
res_partner.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class
|
(models.Model):
_inherit = 'res.partner'
team_id = fields.Many2one('crm.team', string='Sales Team', oldname='section_id')
opportunity_ids = fields.One2many('crm.lead', 'partner_id', string='Opportunities', domain=[('type', '=', 'opportunity')])
meeting_ids = fields.Many2many('calendar.event', 'calendar_event_res_partner_rel', 'res_partner_id', 'calendar_event_id', string='Meetings', copy=False)
opportunity_count = fields.Integer("Opportunity", compute='_compute_opportunity_count')
meeting_count = fields.Integer("# Meetings", compute='_compute_meeting_count')
@api.model
def default_get(self, fields):
rec = super(Partner, self).default_get(fields)
active_model = self.env.context.get('active_model')
if active_model == 'crm.lead':
lead = self.env[active_model].browse(self.env.context.get('active_id')).exists()
if lead:
rec.update(
phone=lead.phone,
mobile=lead.mobile,
function=lead.function,
title=lead.title.id,
website=lead.website,
street=lead.street,
street2=lead.street2,
city=lead.city,
state_id=lead.state_id.id,
country_id=lead.country_id.id,
zip=lead.zip,
)
return rec
@api.multi
def _compute_opportunity_count(self):
for partner in self:
operator = 'child_of' if partner.is_company else '=' # the opportunity count should counts the opportunities of this company and all its contacts
partner.opportunity_count = self.env['crm.lead'].search_count([('partner_id', operator, partner.id), ('type', '=', 'opportunity')])
@api.multi
def _compute_meeting_count(self):
for partner in self:
partner.meeting_count = len(partner.meeting_ids)
@api.multi
def schedule_meeting(self):
partner_ids = self.ids
partner_ids.append(self.env.user.partner_id.id)
action = self.env.ref('calendar.action_calendar_event').read()[0]
action['context'] = {
'search_default_partner_ids': self._context['partner_name'],
'default_partner_ids': partner_ids,
}
return action
|
Partner
|
identifier_name
|
p011.rs
|
//! [Problem 11](https://projecteuler.net/problem=11) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#![feature(iter_arith)]
#[macro_use(problem)] extern crate common;
const INPUT: &'static str = "
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
";
fn compute(prod_len: usize) -> u32 {
let grid: Vec<Vec<u32>> = INPUT
.trim()
.lines()
.map(|line| {
line.split_whitespace().filter_map(|s| s.parse().ok()).collect()
})
.collect();
|
let h = grid.len();
let mut lines: Vec<Vec<_>> = vec![];
// rows
lines.extend((0 .. h).map(|y| (0 .. w).map(|x| (x, y)).collect()));
// cols
lines.extend((0 .. w).map(|x| (0 .. h).map(|y| (x, y)).collect()));
// top 2 right diagonal
lines.extend((0 .. w).map(|i| {
let (x0, y0) = (i, 0);
(0 .. w - x0).map(|j| (x0 + j, y0 + j)).collect()
}));
// left 2 bottom diagonal
lines.extend((0 .. h - 1).map(|i| {
let (x0, y0) = (0, i + 1);
(0 .. h - y0).map(|j| (x0 + j, y0 + j)).collect()
}));
// top 2 left diagonal
lines.extend((0 .. w).map(|i| {
let (x0, y0) = (i, 0);
(0 .. x0 + 1).map(|j| (x0 - j, y0 + j)).collect()
}));
// right 2 bottom diagonal
lines.extend((0 .. h - 1).map(|i| {
let (x0, y0) = (w - 1, i + 1);
(0 .. h - y0).map(|j| (x0 - j, y0 + j)).collect()
}));
lines.iter()
.map(|cells| {
cells.windows(prod_len)
.map(|ns| ns.iter().map(|&(x, y)| grid[y][x]).product())
.max()
.unwrap_or(0)
}).max()
.unwrap()
}
fn solve() -> String { compute(4).to_string() }
problem!("70600674", solve);
|
let w = grid[0].len();
|
random_line_split
|
p011.rs
|
//! [Problem 11](https://projecteuler.net/problem=11) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#![feature(iter_arith)]
#[macro_use(problem)] extern crate common;
const INPUT: &'static str = "
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
";
fn compute(prod_len: usize) -> u32 {
let grid: Vec<Vec<u32>> = INPUT
.trim()
.lines()
.map(|line| {
line.split_whitespace().filter_map(|s| s.parse().ok()).collect()
})
.collect();
let w = grid[0].len();
let h = grid.len();
let mut lines: Vec<Vec<_>> = vec![];
// rows
lines.extend((0 .. h).map(|y| (0 .. w).map(|x| (x, y)).collect()));
// cols
lines.extend((0 .. w).map(|x| (0 .. h).map(|y| (x, y)).collect()));
// top 2 right diagonal
lines.extend((0 .. w).map(|i| {
let (x0, y0) = (i, 0);
(0 .. w - x0).map(|j| (x0 + j, y0 + j)).collect()
}));
// left 2 bottom diagonal
lines.extend((0 .. h - 1).map(|i| {
let (x0, y0) = (0, i + 1);
(0 .. h - y0).map(|j| (x0 + j, y0 + j)).collect()
}));
// top 2 left diagonal
lines.extend((0 .. w).map(|i| {
let (x0, y0) = (i, 0);
(0 .. x0 + 1).map(|j| (x0 - j, y0 + j)).collect()
}));
// right 2 bottom diagonal
lines.extend((0 .. h - 1).map(|i| {
let (x0, y0) = (w - 1, i + 1);
(0 .. h - y0).map(|j| (x0 - j, y0 + j)).collect()
}));
lines.iter()
.map(|cells| {
cells.windows(prod_len)
.map(|ns| ns.iter().map(|&(x, y)| grid[y][x]).product())
.max()
.unwrap_or(0)
}).max()
.unwrap()
}
fn
|
() -> String { compute(4).to_string() }
problem!("70600674", solve);
|
solve
|
identifier_name
|
p011.rs
|
//! [Problem 11](https://projecteuler.net/problem=11) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#![feature(iter_arith)]
#[macro_use(problem)] extern crate common;
const INPUT: &'static str = "
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
";
fn compute(prod_len: usize) -> u32
|
fn solve() -> String { compute(4).to_string() }
problem!("70600674", solve);
|
{
let grid: Vec<Vec<u32>> = INPUT
.trim()
.lines()
.map(|line| {
line.split_whitespace().filter_map(|s| s.parse().ok()).collect()
})
.collect();
let w = grid[0].len();
let h = grid.len();
let mut lines: Vec<Vec<_>> = vec![];
// rows
lines.extend((0 .. h).map(|y| (0 .. w).map(|x| (x, y)).collect()));
// cols
lines.extend((0 .. w).map(|x| (0 .. h).map(|y| (x, y)).collect()));
// top 2 right diagonal
lines.extend((0 .. w).map(|i| {
let (x0, y0) = (i, 0);
(0 .. w - x0).map(|j| (x0 + j, y0 + j)).collect()
}));
// left 2 bottom diagonal
lines.extend((0 .. h - 1).map(|i| {
let (x0, y0) = (0, i + 1);
(0 .. h - y0).map(|j| (x0 + j, y0 + j)).collect()
}));
// top 2 left diagonal
lines.extend((0 .. w).map(|i| {
let (x0, y0) = (i, 0);
(0 .. x0 + 1).map(|j| (x0 - j, y0 + j)).collect()
}));
// right 2 bottom diagonal
lines.extend((0 .. h - 1).map(|i| {
let (x0, y0) = (w - 1, i + 1);
(0 .. h - y0).map(|j| (x0 - j, y0 + j)).collect()
}));
lines.iter()
.map(|cells| {
cells.windows(prod_len)
.map(|ns| ns.iter().map(|&(x, y)| grid[y][x]).product())
.max()
.unwrap_or(0)
}).max()
.unwrap()
}
|
identifier_body
|
api.js
|
var meetcute_api = require("ti.cloud.meetcute.api"),
MC_API_TOKEN = '23#AFE92',
Cloud = require('ti.cloud');
Cloud.debug = true;
/**
* 1. Filter by: age & _gender & !viewed
Order by geo location. Limit 20
2. Find the current user's Facebook Friends who also registered in the same App.
Then exclude them :)
http://docs.appcelerator.com/cloud/latest/#!/api/SocialIntegrations-method-facebook_search_friends
*/
exports.loadFeeds = function(data, success, error) {
var customFields = Ti.App.currentUser.custom_fields,
excludedUserIDS = data.excludedUserIDS;
// exclude current user and FB Friends + viewed photo
excludedUserIDS.push( Ti.App.currentUser.id );
// TODO: Comment out filter for now
/*
if ( customFields.viewed ) {
var viewed = customFields.viewed.split(':');
if ( viewed.length > 0 ) {
excludedUserIDS = excludedUserIDS.concat( viewed );
}
}
*/
var filter = {
"id": { "$nin": excludedUserIDS },
"has_photo": true, // to make sure matchers have photo
"$and": [ { "age": {"$gte": customFields.like_age_from} }, { "age": {"$lte": customFields.like_age_to} } ],
"status": 'approved'
};
if ( customFields.like_gender != 'Anyone' ) {
filter['_gender'] = customFields.like_gender;
}
// order by coordinates
if ( customFields.coordinates && customFields.coordinates.length ) {
filter["coordinates"] = { "$nearSphere": customFields.coordinates[0] };
}
Cloud.Users.query({
page: data.page,
per_page: data.per_page,
sel: { "all": ["id", "_gender", "liked", "device_token", "photo", "urls"] }, // Selects the object fields to display
where: filter
}, function (e) {
if (e.success) {
success && success( e.users, e.meta );
} else {
error && error();
alert('Error:\n' +
((e.error && e.message) || JSON.stringify(e)));
}
});
};
exports.searchFacebookFriends = function(success, error) {
Cloud.SocialIntegrations.searchFacebookFriends(function (e){
if (e.success) {
success && success(e.users);
} else {
error && error();
// alert('Error:\n' +
// ((e.error && e.message) || JSON.stringify(e)));
}
});
};
/**
* Mark photo as viewed
*/
function onViewPhoto( userId, isLiked ) {
if ( !userId ) {
return;
}
var customFields = Ti.App.currentUser.custom_fields;
// viewed
var viewed = customFields.viewed;
if ( !viewed ) {
viewed = userId;
} else if ( viewed.indexOf(userId) == -1 )
|
// liked
var liked;
if ( isLiked ) {
liked = customFields.liked;
if ( !liked ) {
liked = userId;
} else if ( liked.indexOf(userId) == -1 ) {
liked += ':' + userId;
}
}
Cloud.Users.update(
{
custom_fields: isLiked ? { viewed: viewed, liked: liked } : { viewed: viewed }
},
function () {
customFields.viewed = viewed;
if ( isLiked ) {
customFields.liked = liked;
}
var u = Ti.App.currentUser;
u.custom_fields = customFields;
Ti.App.currentUser = u;
}
);
}
exports.onViewPhoto = onViewPhoto;
/**
* Mark photo as viewed & liked
*/
exports.onLikePhoto = function(userId) {
onViewPhoto( userId, true );
};
exports.crossPath = function( data, callback ) {
data.api_token = MC_API_TOKEN;
// The default generated bindings file allows you to send payload data and a success callback.
meetcute_api.places_cross_path(
data,
function( res ) {
var res = JSON.parse ( res );
if ( res ) {
callback && callback ( res );
}
}
);
};
exports.push = function( data ) {
data.api_token = MC_API_TOKEN;
// The default generated bindings file allows you to send payload data and a success callback.
meetcute_api.places_push(
data,
function( res ) {
Ti.API.info( res );
}
);
};
/**
* 1. Filter by: age & _gender
Order by geo location. Limit 20
2. Find the current user's Facebook Friends who also registered in the same App.
Then exclude them :)
http://docs.appcelerator.com/cloud/latest/#!/api/SocialIntegrations-method-facebook_search_friends
3. Interested in
4. Time Availability
5. Match Delay (has the user recieved a notification in the last 24 hours)
6. Feedback
*/
exports.filterMatchers = function(start_time, excludedUserIDS, success, error) {
meetcute_api.user_filter_matchers({
api_token: MC_API_TOKEN,
user_id: Ti.App.currentUser.id,
user: JSON.stringify(Ti.App.currentUser.custom_fields),
start_time: start_time,
excluded: JSON.stringify(excludedUserIDS)
}, function(res) {
if (res.success) {
success && success(res.users);
} else {
Ti.API.error( 'API::filterMatchers error: ' + res);
error && error(res);
}
});
};
exports.updateEvent = function( data, callback ) {
data.api_token = MC_API_TOKEN;
meetcute_api.places_update_event(
data,
function( res ) {
var res = JSON.parse ( res );
if ( res ) {
callback && callback ( res );
}
}
);
};
exports.getEventById = function ( data, success, error ) {
Cloud.Events.query({
where: {
id: data.event_id
}
}, function (e) {
if (e.success) {
success && success( e.events );
} else {
error && error( (e.error && e.message) || JSON.stringify(e));
}
});
};
exports.checkCrossPath = function ( userId, success, error ) {
Cloud.Events.query({
// sel: { "all": ["user", "matched_users", "disagree_users"] }, TODO - which field do you want to select?
where: {
status: 'new'
}
}, function (e) {
if (e.success) {
var events = e.events,
custom_fields,
found = false;
for (var i=0, len=events.length; i < len; i++) {
var event = events[i],
crossPath = {
eventId: event.id,
place : {
name: event.place['name'],
address: [event.place['address']],
latitude: event.place['latitude'],
longitude: event.place['longitude'],
status: event.place['custom_fields']['status']
},
event: {
event_id: event.id,
start_time: event.start_time
}
};
if ( event.user.id == userId ) {
success({
has_active_cross_path: true,
type: 'initor',
crossPath: crossPath
});
found = true;
break;
}
custom_fields = event.custom_fields;
if ( custom_fields.matched_users.indexOf(userId) != -1 ) { // User in matched list
if ( custom_fields.disagree_users.indexOf(userId) != -1 ) { // User Denied a CrossPath
success({
has_active_cross_path: false
});
} else if ( custom_fields.agree_users && custom_fields.agree_users.indexOf(userId) != -1 ) { //user has accepted this event
success({
has_active_cross_path: true,
type: 'accepted',
crossPath: crossPath
});
} else { // User not has a decision yet
success({
has_active_cross_path: true,
type: 'matcher',
crossPath: crossPath
});
}
found = true;
break;
}
}
if ( !found ) {
success({
has_active_cross_path: false
});
}
} else {
error && error( (e.error && e.message) || JSON.stringify(e));
}
});
};
exports.updateUser = function (data, success, error) {
Cloud.Users.update( data, function (e) {
if (e.success) {
success && success(e);
} else {
Alloy.Globals.Common.showDialog({
title: 'Error',
message: e.error && e.message,
});
}
});
};
exports.emailPhoto = function ( data, success, error ) {
data.api_token = MC_API_TOKEN;
meetcute_api.user_email_photo( data );
};
|
{
viewed += ':' + userId;
}
|
conditional_block
|
api.js
|
var meetcute_api = require("ti.cloud.meetcute.api"),
MC_API_TOKEN = '23#AFE92',
Cloud = require('ti.cloud');
Cloud.debug = true;
/**
* 1. Filter by: age & _gender & !viewed
Order by geo location. Limit 20
2. Find the current user's Facebook Friends who also registered in the same App.
Then exclude them :)
http://docs.appcelerator.com/cloud/latest/#!/api/SocialIntegrations-method-facebook_search_friends
*/
exports.loadFeeds = function(data, success, error) {
var customFields = Ti.App.currentUser.custom_fields,
excludedUserIDS = data.excludedUserIDS;
// exclude current user and FB Friends + viewed photo
excludedUserIDS.push( Ti.App.currentUser.id );
// TODO: Comment out filter for now
/*
if ( customFields.viewed ) {
var viewed = customFields.viewed.split(':');
if ( viewed.length > 0 ) {
excludedUserIDS = excludedUserIDS.concat( viewed );
}
}
*/
var filter = {
"id": { "$nin": excludedUserIDS },
"has_photo": true, // to make sure matchers have photo
"$and": [ { "age": {"$gte": customFields.like_age_from} }, { "age": {"$lte": customFields.like_age_to} } ],
"status": 'approved'
};
if ( customFields.like_gender != 'Anyone' ) {
filter['_gender'] = customFields.like_gender;
}
// order by coordinates
if ( customFields.coordinates && customFields.coordinates.length ) {
filter["coordinates"] = { "$nearSphere": customFields.coordinates[0] };
}
Cloud.Users.query({
page: data.page,
per_page: data.per_page,
sel: { "all": ["id", "_gender", "liked", "device_token", "photo", "urls"] }, // Selects the object fields to display
where: filter
}, function (e) {
if (e.success) {
success && success( e.users, e.meta );
} else {
error && error();
alert('Error:\n' +
((e.error && e.message) || JSON.stringify(e)));
}
});
};
exports.searchFacebookFriends = function(success, error) {
Cloud.SocialIntegrations.searchFacebookFriends(function (e){
if (e.success) {
success && success(e.users);
} else {
error && error();
// alert('Error:\n' +
// ((e.error && e.message) || JSON.stringify(e)));
}
});
};
/**
* Mark photo as viewed
*/
function onViewPhoto( userId, isLiked )
|
exports.onViewPhoto = onViewPhoto;
/**
* Mark photo as viewed & liked
*/
exports.onLikePhoto = function(userId) {
onViewPhoto( userId, true );
};
exports.crossPath = function( data, callback ) {
data.api_token = MC_API_TOKEN;
// The default generated bindings file allows you to send payload data and a success callback.
meetcute_api.places_cross_path(
data,
function( res ) {
var res = JSON.parse ( res );
if ( res ) {
callback && callback ( res );
}
}
);
};
exports.push = function( data ) {
data.api_token = MC_API_TOKEN;
// The default generated bindings file allows you to send payload data and a success callback.
meetcute_api.places_push(
data,
function( res ) {
Ti.API.info( res );
}
);
};
/**
* 1. Filter by: age & _gender
Order by geo location. Limit 20
2. Find the current user's Facebook Friends who also registered in the same App.
Then exclude them :)
http://docs.appcelerator.com/cloud/latest/#!/api/SocialIntegrations-method-facebook_search_friends
3. Interested in
4. Time Availability
5. Match Delay (has the user recieved a notification in the last 24 hours)
6. Feedback
*/
exports.filterMatchers = function(start_time, excludedUserIDS, success, error) {
meetcute_api.user_filter_matchers({
api_token: MC_API_TOKEN,
user_id: Ti.App.currentUser.id,
user: JSON.stringify(Ti.App.currentUser.custom_fields),
start_time: start_time,
excluded: JSON.stringify(excludedUserIDS)
}, function(res) {
if (res.success) {
success && success(res.users);
} else {
Ti.API.error( 'API::filterMatchers error: ' + res);
error && error(res);
}
});
};
exports.updateEvent = function( data, callback ) {
data.api_token = MC_API_TOKEN;
meetcute_api.places_update_event(
data,
function( res ) {
var res = JSON.parse ( res );
if ( res ) {
callback && callback ( res );
}
}
);
};
exports.getEventById = function ( data, success, error ) {
Cloud.Events.query({
where: {
id: data.event_id
}
}, function (e) {
if (e.success) {
success && success( e.events );
} else {
error && error( (e.error && e.message) || JSON.stringify(e));
}
});
};
exports.checkCrossPath = function ( userId, success, error ) {
Cloud.Events.query({
// sel: { "all": ["user", "matched_users", "disagree_users"] }, TODO - which field do you want to select?
where: {
status: 'new'
}
}, function (e) {
if (e.success) {
var events = e.events,
custom_fields,
found = false;
for (var i=0, len=events.length; i < len; i++) {
var event = events[i],
crossPath = {
eventId: event.id,
place : {
name: event.place['name'],
address: [event.place['address']],
latitude: event.place['latitude'],
longitude: event.place['longitude'],
status: event.place['custom_fields']['status']
},
event: {
event_id: event.id,
start_time: event.start_time
}
};
if ( event.user.id == userId ) {
success({
has_active_cross_path: true,
type: 'initor',
crossPath: crossPath
});
found = true;
break;
}
custom_fields = event.custom_fields;
if ( custom_fields.matched_users.indexOf(userId) != -1 ) { // User in matched list
if ( custom_fields.disagree_users.indexOf(userId) != -1 ) { // User Denied a CrossPath
success({
has_active_cross_path: false
});
} else if ( custom_fields.agree_users && custom_fields.agree_users.indexOf(userId) != -1 ) { //user has accepted this event
success({
has_active_cross_path: true,
type: 'accepted',
crossPath: crossPath
});
} else { // User not has a decision yet
success({
has_active_cross_path: true,
type: 'matcher',
crossPath: crossPath
});
}
found = true;
break;
}
}
if ( !found ) {
success({
has_active_cross_path: false
});
}
} else {
error && error( (e.error && e.message) || JSON.stringify(e));
}
});
};
exports.updateUser = function (data, success, error) {
Cloud.Users.update( data, function (e) {
if (e.success) {
success && success(e);
} else {
Alloy.Globals.Common.showDialog({
title: 'Error',
message: e.error && e.message,
});
}
});
};
exports.emailPhoto = function ( data, success, error ) {
data.api_token = MC_API_TOKEN;
meetcute_api.user_email_photo( data );
};
|
{
if ( !userId ) {
return;
}
var customFields = Ti.App.currentUser.custom_fields;
// viewed
var viewed = customFields.viewed;
if ( !viewed ) {
viewed = userId;
} else if ( viewed.indexOf(userId) == -1 ) {
viewed += ':' + userId;
}
// liked
var liked;
if ( isLiked ) {
liked = customFields.liked;
if ( !liked ) {
liked = userId;
} else if ( liked.indexOf(userId) == -1 ) {
liked += ':' + userId;
}
}
Cloud.Users.update(
{
custom_fields: isLiked ? { viewed: viewed, liked: liked } : { viewed: viewed }
},
function () {
customFields.viewed = viewed;
if ( isLiked ) {
customFields.liked = liked;
}
var u = Ti.App.currentUser;
u.custom_fields = customFields;
Ti.App.currentUser = u;
}
);
}
|
identifier_body
|
api.js
|
var meetcute_api = require("ti.cloud.meetcute.api"),
MC_API_TOKEN = '23#AFE92',
Cloud = require('ti.cloud');
Cloud.debug = true;
/**
* 1. Filter by: age & _gender & !viewed
Order by geo location. Limit 20
2. Find the current user's Facebook Friends who also registered in the same App.
Then exclude them :)
http://docs.appcelerator.com/cloud/latest/#!/api/SocialIntegrations-method-facebook_search_friends
*/
exports.loadFeeds = function(data, success, error) {
var customFields = Ti.App.currentUser.custom_fields,
excludedUserIDS = data.excludedUserIDS;
// exclude current user and FB Friends + viewed photo
excludedUserIDS.push( Ti.App.currentUser.id );
// TODO: Comment out filter for now
/*
if ( customFields.viewed ) {
var viewed = customFields.viewed.split(':');
if ( viewed.length > 0 ) {
excludedUserIDS = excludedUserIDS.concat( viewed );
}
}
*/
var filter = {
"id": { "$nin": excludedUserIDS },
"has_photo": true, // to make sure matchers have photo
"$and": [ { "age": {"$gte": customFields.like_age_from} }, { "age": {"$lte": customFields.like_age_to} } ],
"status": 'approved'
};
if ( customFields.like_gender != 'Anyone' ) {
filter['_gender'] = customFields.like_gender;
}
// order by coordinates
if ( customFields.coordinates && customFields.coordinates.length ) {
filter["coordinates"] = { "$nearSphere": customFields.coordinates[0] };
}
Cloud.Users.query({
page: data.page,
per_page: data.per_page,
sel: { "all": ["id", "_gender", "liked", "device_token", "photo", "urls"] }, // Selects the object fields to display
where: filter
}, function (e) {
if (e.success) {
success && success( e.users, e.meta );
} else {
error && error();
alert('Error:\n' +
((e.error && e.message) || JSON.stringify(e)));
}
});
};
exports.searchFacebookFriends = function(success, error) {
Cloud.SocialIntegrations.searchFacebookFriends(function (e){
if (e.success) {
success && success(e.users);
} else {
error && error();
// alert('Error:\n' +
// ((e.error && e.message) || JSON.stringify(e)));
}
});
};
/**
* Mark photo as viewed
*/
function onViewPhoto( userId, isLiked ) {
if ( !userId ) {
return;
}
var customFields = Ti.App.currentUser.custom_fields;
// viewed
var viewed = customFields.viewed;
if ( !viewed ) {
viewed = userId;
} else if ( viewed.indexOf(userId) == -1 ) {
viewed += ':' + userId;
}
// liked
var liked;
if ( isLiked ) {
liked = customFields.liked;
if ( !liked ) {
liked = userId;
} else if ( liked.indexOf(userId) == -1 ) {
liked += ':' + userId;
}
}
Cloud.Users.update(
{
custom_fields: isLiked ? { viewed: viewed, liked: liked } : { viewed: viewed }
},
function () {
customFields.viewed = viewed;
if ( isLiked ) {
customFields.liked = liked;
}
var u = Ti.App.currentUser;
u.custom_fields = customFields;
Ti.App.currentUser = u;
}
);
}
exports.onViewPhoto = onViewPhoto;
/**
* Mark photo as viewed & liked
*/
exports.onLikePhoto = function(userId) {
onViewPhoto( userId, true );
};
exports.crossPath = function( data, callback ) {
data.api_token = MC_API_TOKEN;
// The default generated bindings file allows you to send payload data and a success callback.
meetcute_api.places_cross_path(
data,
function( res ) {
var res = JSON.parse ( res );
if ( res ) {
callback && callback ( res );
}
}
);
};
exports.push = function( data ) {
data.api_token = MC_API_TOKEN;
// The default generated bindings file allows you to send payload data and a success callback.
meetcute_api.places_push(
data,
function( res ) {
Ti.API.info( res );
}
);
};
/**
* 1. Filter by: age & _gender
Order by geo location. Limit 20
2. Find the current user's Facebook Friends who also registered in the same App.
Then exclude them :)
http://docs.appcelerator.com/cloud/latest/#!/api/SocialIntegrations-method-facebook_search_friends
3. Interested in
4. Time Availability
5. Match Delay (has the user recieved a notification in the last 24 hours)
6. Feedback
*/
exports.filterMatchers = function(start_time, excludedUserIDS, success, error) {
meetcute_api.user_filter_matchers({
api_token: MC_API_TOKEN,
user_id: Ti.App.currentUser.id,
user: JSON.stringify(Ti.App.currentUser.custom_fields),
start_time: start_time,
excluded: JSON.stringify(excludedUserIDS)
}, function(res) {
if (res.success) {
success && success(res.users);
} else {
Ti.API.error( 'API::filterMatchers error: ' + res);
error && error(res);
}
});
};
exports.updateEvent = function( data, callback ) {
data.api_token = MC_API_TOKEN;
meetcute_api.places_update_event(
data,
function( res ) {
var res = JSON.parse ( res );
if ( res ) {
callback && callback ( res );
}
}
);
};
exports.getEventById = function ( data, success, error ) {
Cloud.Events.query({
where: {
id: data.event_id
}
}, function (e) {
if (e.success) {
success && success( e.events );
} else {
error && error( (e.error && e.message) || JSON.stringify(e));
}
});
};
exports.checkCrossPath = function ( userId, success, error ) {
Cloud.Events.query({
// sel: { "all": ["user", "matched_users", "disagree_users"] }, TODO - which field do you want to select?
where: {
status: 'new'
}
}, function (e) {
if (e.success) {
var events = e.events,
custom_fields,
found = false;
for (var i=0, len=events.length; i < len; i++) {
var event = events[i],
crossPath = {
eventId: event.id,
place : {
name: event.place['name'],
address: [event.place['address']],
latitude: event.place['latitude'],
longitude: event.place['longitude'],
status: event.place['custom_fields']['status']
},
event: {
event_id: event.id,
start_time: event.start_time
}
};
if ( event.user.id == userId ) {
success({
has_active_cross_path: true,
type: 'initor',
crossPath: crossPath
});
found = true;
break;
}
custom_fields = event.custom_fields;
if ( custom_fields.matched_users.indexOf(userId) != -1 ) { // User in matched list
if ( custom_fields.disagree_users.indexOf(userId) != -1 ) { // User Denied a CrossPath
success({
has_active_cross_path: false
|
type: 'accepted',
crossPath: crossPath
});
} else { // User not has a decision yet
success({
has_active_cross_path: true,
type: 'matcher',
crossPath: crossPath
});
}
found = true;
break;
}
}
if ( !found ) {
success({
has_active_cross_path: false
});
}
} else {
error && error( (e.error && e.message) || JSON.stringify(e));
}
});
};
exports.updateUser = function (data, success, error) {
Cloud.Users.update( data, function (e) {
if (e.success) {
success && success(e);
} else {
Alloy.Globals.Common.showDialog({
title: 'Error',
message: e.error && e.message,
});
}
});
};
exports.emailPhoto = function ( data, success, error ) {
data.api_token = MC_API_TOKEN;
meetcute_api.user_email_photo( data );
};
|
});
} else if ( custom_fields.agree_users && custom_fields.agree_users.indexOf(userId) != -1 ) { //user has accepted this event
success({
has_active_cross_path: true,
|
random_line_split
|
api.js
|
var meetcute_api = require("ti.cloud.meetcute.api"),
MC_API_TOKEN = '23#AFE92',
Cloud = require('ti.cloud');
Cloud.debug = true;
/**
* 1. Filter by: age & _gender & !viewed
Order by geo location. Limit 20
2. Find the current user's Facebook Friends who also registered in the same App.
Then exclude them :)
http://docs.appcelerator.com/cloud/latest/#!/api/SocialIntegrations-method-facebook_search_friends
*/
exports.loadFeeds = function(data, success, error) {
var customFields = Ti.App.currentUser.custom_fields,
excludedUserIDS = data.excludedUserIDS;
// exclude current user and FB Friends + viewed photo
excludedUserIDS.push( Ti.App.currentUser.id );
// TODO: Comment out filter for now
/*
if ( customFields.viewed ) {
var viewed = customFields.viewed.split(':');
if ( viewed.length > 0 ) {
excludedUserIDS = excludedUserIDS.concat( viewed );
}
}
*/
var filter = {
"id": { "$nin": excludedUserIDS },
"has_photo": true, // to make sure matchers have photo
"$and": [ { "age": {"$gte": customFields.like_age_from} }, { "age": {"$lte": customFields.like_age_to} } ],
"status": 'approved'
};
if ( customFields.like_gender != 'Anyone' ) {
filter['_gender'] = customFields.like_gender;
}
// order by coordinates
if ( customFields.coordinates && customFields.coordinates.length ) {
filter["coordinates"] = { "$nearSphere": customFields.coordinates[0] };
}
Cloud.Users.query({
page: data.page,
per_page: data.per_page,
sel: { "all": ["id", "_gender", "liked", "device_token", "photo", "urls"] }, // Selects the object fields to display
where: filter
}, function (e) {
if (e.success) {
success && success( e.users, e.meta );
} else {
error && error();
alert('Error:\n' +
((e.error && e.message) || JSON.stringify(e)));
}
});
};
exports.searchFacebookFriends = function(success, error) {
Cloud.SocialIntegrations.searchFacebookFriends(function (e){
if (e.success) {
success && success(e.users);
} else {
error && error();
// alert('Error:\n' +
// ((e.error && e.message) || JSON.stringify(e)));
}
});
};
/**
* Mark photo as viewed
*/
function
|
( userId, isLiked ) {
if ( !userId ) {
return;
}
var customFields = Ti.App.currentUser.custom_fields;
// viewed
var viewed = customFields.viewed;
if ( !viewed ) {
viewed = userId;
} else if ( viewed.indexOf(userId) == -1 ) {
viewed += ':' + userId;
}
// liked
var liked;
if ( isLiked ) {
liked = customFields.liked;
if ( !liked ) {
liked = userId;
} else if ( liked.indexOf(userId) == -1 ) {
liked += ':' + userId;
}
}
Cloud.Users.update(
{
custom_fields: isLiked ? { viewed: viewed, liked: liked } : { viewed: viewed }
},
function () {
customFields.viewed = viewed;
if ( isLiked ) {
customFields.liked = liked;
}
var u = Ti.App.currentUser;
u.custom_fields = customFields;
Ti.App.currentUser = u;
}
);
}
exports.onViewPhoto = onViewPhoto;
/**
* Mark photo as viewed & liked
*/
exports.onLikePhoto = function(userId) {
onViewPhoto( userId, true );
};
exports.crossPath = function( data, callback ) {
data.api_token = MC_API_TOKEN;
// The default generated bindings file allows you to send payload data and a success callback.
meetcute_api.places_cross_path(
data,
function( res ) {
var res = JSON.parse ( res );
if ( res ) {
callback && callback ( res );
}
}
);
};
exports.push = function( data ) {
data.api_token = MC_API_TOKEN;
// The default generated bindings file allows you to send payload data and a success callback.
meetcute_api.places_push(
data,
function( res ) {
Ti.API.info( res );
}
);
};
/**
* 1. Filter by: age & _gender
Order by geo location. Limit 20
2. Find the current user's Facebook Friends who also registered in the same App.
Then exclude them :)
http://docs.appcelerator.com/cloud/latest/#!/api/SocialIntegrations-method-facebook_search_friends
3. Interested in
4. Time Availability
5. Match Delay (has the user recieved a notification in the last 24 hours)
6. Feedback
*/
exports.filterMatchers = function(start_time, excludedUserIDS, success, error) {
meetcute_api.user_filter_matchers({
api_token: MC_API_TOKEN,
user_id: Ti.App.currentUser.id,
user: JSON.stringify(Ti.App.currentUser.custom_fields),
start_time: start_time,
excluded: JSON.stringify(excludedUserIDS)
}, function(res) {
if (res.success) {
success && success(res.users);
} else {
Ti.API.error( 'API::filterMatchers error: ' + res);
error && error(res);
}
});
};
exports.updateEvent = function( data, callback ) {
data.api_token = MC_API_TOKEN;
meetcute_api.places_update_event(
data,
function( res ) {
var res = JSON.parse ( res );
if ( res ) {
callback && callback ( res );
}
}
);
};
exports.getEventById = function ( data, success, error ) {
Cloud.Events.query({
where: {
id: data.event_id
}
}, function (e) {
if (e.success) {
success && success( e.events );
} else {
error && error( (e.error && e.message) || JSON.stringify(e));
}
});
};
exports.checkCrossPath = function ( userId, success, error ) {
Cloud.Events.query({
// sel: { "all": ["user", "matched_users", "disagree_users"] }, TODO - which field do you want to select?
where: {
status: 'new'
}
}, function (e) {
if (e.success) {
var events = e.events,
custom_fields,
found = false;
for (var i=0, len=events.length; i < len; i++) {
var event = events[i],
crossPath = {
eventId: event.id,
place : {
name: event.place['name'],
address: [event.place['address']],
latitude: event.place['latitude'],
longitude: event.place['longitude'],
status: event.place['custom_fields']['status']
},
event: {
event_id: event.id,
start_time: event.start_time
}
};
if ( event.user.id == userId ) {
success({
has_active_cross_path: true,
type: 'initor',
crossPath: crossPath
});
found = true;
break;
}
custom_fields = event.custom_fields;
if ( custom_fields.matched_users.indexOf(userId) != -1 ) { // User in matched list
if ( custom_fields.disagree_users.indexOf(userId) != -1 ) { // User Denied a CrossPath
success({
has_active_cross_path: false
});
} else if ( custom_fields.agree_users && custom_fields.agree_users.indexOf(userId) != -1 ) { //user has accepted this event
success({
has_active_cross_path: true,
type: 'accepted',
crossPath: crossPath
});
} else { // User not has a decision yet
success({
has_active_cross_path: true,
type: 'matcher',
crossPath: crossPath
});
}
found = true;
break;
}
}
if ( !found ) {
success({
has_active_cross_path: false
});
}
} else {
error && error( (e.error && e.message) || JSON.stringify(e));
}
});
};
exports.updateUser = function (data, success, error) {
Cloud.Users.update( data, function (e) {
if (e.success) {
success && success(e);
} else {
Alloy.Globals.Common.showDialog({
title: 'Error',
message: e.error && e.message,
});
}
});
};
exports.emailPhoto = function ( data, success, error ) {
data.api_token = MC_API_TOKEN;
meetcute_api.user_email_photo( data );
};
|
onViewPhoto
|
identifier_name
|
XMPCommonFwdDeclarations_8h.js
|
var XMPCommonFwdDeclarations_8h =
[
[ "cIUTF8Strings", "XMPCommonFwdDeclarations_8h.html#aae5dbe164f71188aa24c87fa6306539a", null ],
[ "IConfigurationManager_base", "XMPCommonFwdDeclarations_8h.html#ab6a71f81b4e3c8e5d2d0c90f82fbee08", null ],
|
[ "IErrorNotifier_base", "XMPCommonFwdDeclarations_8h.html#a692c91c0b558cbc476e43e7f0a9112e0", null ],
[ "IMemoryAllocator_base", "XMPCommonFwdDeclarations_8h.html#ae79591aae25236208281cd3e48a2483d", null ],
[ "IObjectFactory_base", "XMPCommonFwdDeclarations_8h.html#a25f44f5d5e5c651d20124037a7c3b5c3", null ],
[ "IUTF8String_base", "XMPCommonFwdDeclarations_8h.html#a169532f774f2b1f3d8501e896d6f1957", null ],
[ "IUTF8Strings", "XMPCommonFwdDeclarations_8h.html#ac82ab59bdaecb219834ac2ff81573414", null ],
[ "MemAllocateProc", "XMPCommonFwdDeclarations_8h.html#a6ddf5efa8f9b686e5d7fef41d6b10096", null ],
[ "MemReleaseProc", "XMPCommonFwdDeclarations_8h.html#a79edf1f9139609bf6ed70b24cc99a2e0", null ],
[ "pcIConfigurable", "XMPCommonFwdDeclarations_8h.html#a394d772cfe78f1dbeba6059e7a3c65d6", null ],
[ "pcIConfigurationManager", "XMPCommonFwdDeclarations_8h.html#a2a6831c5d17f5c9990c01268f67b7bb2", null ],
[ "pcIConfigurationManager_base", "XMPCommonFwdDeclarations_8h.html#a39556e80a6bda05757b7f2766ddc2ba1", null ],
[ "pcIError", "XMPCommonFwdDeclarations_8h.html#a0e0c89c232b777fb332e0ddc10b65fc2", null ],
[ "pcIError_base", "XMPCommonFwdDeclarations_8h.html#ae3cc983230819089dc742273902228c5", null ],
[ "pcIErrorNotifier", "XMPCommonFwdDeclarations_8h.html#aa4baade1a8eb0dbb544860e0c89362f8", null ],
[ "pcIErrorNotifier_base", "XMPCommonFwdDeclarations_8h.html#a15919ce22d7dc4def5a590f0dbb882ca", null ],
[ "pcIMemoryAllocator", "XMPCommonFwdDeclarations_8h.html#af5a7d4d78400043e3e49dfa6a2725abb", null ],
[ "pcIMemoryAllocator_base", "XMPCommonFwdDeclarations_8h.html#a42a89fb94134c8eaed66765f2975b3f9", null ],
[ "pcIObjectFactory", "XMPCommonFwdDeclarations_8h.html#a272e32106a7573db514736bfb68a1673", null ],
[ "pcIObjectFactory_base", "XMPCommonFwdDeclarations_8h.html#a8d7232279921e3ccaa69fe314052415b", null ],
[ "pcISharedObject_I", "XMPCommonFwdDeclarations_8h.html#affd72b6c7abbc84fdef10b5a607cd432", null ],
[ "pcIThreadSafe_I", "XMPCommonFwdDeclarations_8h.html#a2becb4c0d21c71e1dcecdcaae0e92bd7", null ],
[ "pcIUTF8String", "XMPCommonFwdDeclarations_8h.html#aa9b2c45fd472c6e0e322a94c87e36e23", null ],
[ "pcIUTF8String_base", "XMPCommonFwdDeclarations_8h.html#a34ccf48e077c7308540d98149a4a8b2b", null ],
[ "pcvoid", "XMPCommonFwdDeclarations_8h.html#a19692335f9547f4bc07d0ff727f14605", null ],
[ "pIConfigurable", "XMPCommonFwdDeclarations_8h.html#a9befc7ddfe5977c8176eed763d9ce69e", null ],
[ "pIConfigurationManager", "XMPCommonFwdDeclarations_8h.html#a66f57b67d8be6cbf836ad084a3d27d76", null ],
[ "pIConfigurationManager_base", "XMPCommonFwdDeclarations_8h.html#a20fcfcaa614eba9a9fcdf2353b82fdb7", null ],
[ "pIError", "XMPCommonFwdDeclarations_8h.html#ac0db32de8e6a3688ad0dfc36c2757fb4", null ],
[ "pIError_base", "XMPCommonFwdDeclarations_8h.html#a4e907810e5d5937b974343d279e0181d", null ],
[ "pIErrorNotifier", "XMPCommonFwdDeclarations_8h.html#a9981f3ca088023224e80dbad05090f3c", null ],
[ "pIErrorNotifier_base", "XMPCommonFwdDeclarations_8h.html#a4ad7bda9e1289b0bb93510402b6e86f9", null ],
[ "pIMemoryAllocator", "XMPCommonFwdDeclarations_8h.html#a605dcb5281a71a01673c81e6508f932f", null ],
[ "pIMemoryAllocator_base", "XMPCommonFwdDeclarations_8h.html#aff75b5c835a8eb072072be6b404aa0d5", null ],
[ "pIObjectFactory", "XMPCommonFwdDeclarations_8h.html#a0863cf1c24764306839671985373b93f", null ],
[ "pIObjectFactory_base", "XMPCommonFwdDeclarations_8h.html#adb1552a01da782d3f1c5ff00e49f48f8", null ],
[ "pISharedObject_I", "XMPCommonFwdDeclarations_8h.html#ada4b051c58f05ef398453930451bc13d", null ],
[ "pIThreadSafe_I", "XMPCommonFwdDeclarations_8h.html#ac9c07c42e647a17838c9314b8d2823d9", null ],
[ "pIUTF8String", "XMPCommonFwdDeclarations_8h.html#a27ba1868ebc552bef10fe156bd4e340b", null ],
[ "pIUTF8String_base", "XMPCommonFwdDeclarations_8h.html#a0430b250dd00621dcb9f32a3acb87b7f", null ],
[ "pvoid", "XMPCommonFwdDeclarations_8h.html#ab59e650373a5efadfbfd2486313258a6", null ],
[ "spcIConfigurationManager", "XMPCommonFwdDeclarations_8h.html#abe53a5a98ec0d2d6ce76d1d820794177", null ],
[ "spcIError", "XMPCommonFwdDeclarations_8h.html#a3f0de5ba5753a56896f2941927b23560", null ],
[ "spcIErrorNotifier", "XMPCommonFwdDeclarations_8h.html#a4eeee081576f932942082744eb05456c", null ],
[ "spcIMemoryAllocator", "XMPCommonFwdDeclarations_8h.html#a183fc5e814bdad0ddf9946ddc35c307a", null ],
[ "spcIUTF8String", "XMPCommonFwdDeclarations_8h.html#a91aaab1f1d7030b26df65258bd131a63", null ],
[ "spcIUTF8Strings", "XMPCommonFwdDeclarations_8h.html#a181114a44f34efda2d8b4cb98446de86", null ],
[ "spcIUTF8Strings_const", "XMPCommonFwdDeclarations_8h.html#aeea9f5dbdf44fd5382b0ca50dda8f2cf", null ],
[ "spcvoid", "XMPCommonFwdDeclarations_8h.html#a93bf79ca806273dc229d940504654caa", null ],
[ "spIConfigurationManager", "XMPCommonFwdDeclarations_8h.html#a4ebcc3c8cb04d7593dc3250aa720f0d4", null ],
[ "spIError", "XMPCommonFwdDeclarations_8h.html#adb05c7ceec43360b84aee536612151f9", null ],
[ "spIErrorNotifier", "XMPCommonFwdDeclarations_8h.html#a4d5b76921b44568978986a81d2c19336", null ],
[ "spIMemoryAllocator", "XMPCommonFwdDeclarations_8h.html#a9423cafa21356df5d02adf7a94f4e4b0", null ],
[ "spIUTF8String", "XMPCommonFwdDeclarations_8h.html#ac9d7760a1805b1e28828511a2c8c5588", null ],
[ "spIUTF8Strings", "XMPCommonFwdDeclarations_8h.html#a5f4ff8126658f07efbc881778bfea430", null ],
[ "spIUTF8Strings_const", "XMPCommonFwdDeclarations_8h.html#ab4410cb115c4e0fa0800ab6ebf21abf7", null ],
[ "spvoid", "XMPCommonFwdDeclarations_8h.html#a28d3e703084f75a411ece4bb7b22fdd4", null ],
[ "BASE_CLASS", "XMPCommonFwdDeclarations_8h.html#a376c8e3ade71b2836888a07746dd3882", null ],
[ "BASE_CLASS", "XMPCommonFwdDeclarations_8h.html#ae2d21f9711d5fdc36354b14a401ec53b", null ],
[ "BASE_CLASS", "XMPCommonFwdDeclarations_8h.html#a55f2498472648e9b5647f039202820b2", null ],
[ "BASE_CLASS", "XMPCommonFwdDeclarations_8h.html#afee5efdc17ad732ac218b39bb305ed63", null ],
[ "BASE_CLASS", "XMPCommonFwdDeclarations_8h.html#a5d655944f5ebfaff4c6f61d227257035", null ],
[ "BASE_CLASS", "XMPCommonFwdDeclarations_8h.html#a41216c3793d99220ce00076ad18f9b44", null ],
[ "MemAllocate", "XMPCommonFwdDeclarations_8h.html#a3349137d458c48d779670626d7b6ba1a", null ],
[ "MemRelease", "XMPCommonFwdDeclarations_8h.html#ad4c125f465214507005d84a360fa83e5", null ]
];
|
[ "IError_base", "XMPCommonFwdDeclarations_8h.html#a5f4d698bf8beb5f6604b61aa1362d2c6", null ],
|
random_line_split
|
lib.rs
|
// Copyright 2017 Kam Y. Tse
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(warnings)]
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", plugin(clippy))]
extern crate uuid;
extern crate serde;
extern crate chrono;
extern crate reqwest;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate error_chain;
#[macro_use]
extern crate serde_derive;
mod account;
mod pomo;
mod todo;
mod client;
pub use self::account::Account;
|
pub use self::client::Client;
/// The Errors that may occur when communicating with Pomotodo server.
pub mod errors {
error_chain! {
types {
Error, ErrorKind, ResultExt;
}
foreign_links {
ReqError(::reqwest::Error);
}
}
}
|
pub use self::pomo::{Pomo, PomoBuilder, PomoParameter};
pub use self::todo::{Todo, SubTodo, TodoBuilder, SubTodoBuilder, TodoParameter};
|
random_line_split
|
GitLabApiRepository.ts
|
import { injectable } from 'inversify';
import * as request from 'superagent';
import { ISourceConfig } from '../../../IConfig';
@injectable()
export class GitLabApiRepository {
public async getUploadedFile(
sourceConfig: ISourceConfig,
projectPath: string,
fileId: string,
filename: string
): Promise<any[]> {
const path = `${projectPath}/uploads/${fileId}/${filename}`;
return await this.performRequest(
this.getUrl(sourceConfig.url, path),
sourceConfig.apitoken,
url => request.get(url)
).then(response => response.body);
}
public async get(
sourceConfig: ISourceConfig,
path: string,
query?: {}
): Promise<request.Response> {
return await this.performRequest(
this.getUrl(sourceConfig.url, `api/v4/${path}`),
sourceConfig.apitoken,
url => request.get(url),
query
);
}
private performRequest(
url: string,
apiToken: string,
requestFactory: (url: string) => request.SuperAgentRequest,
query?: {}
): Promise<request.Response> {
const req = requestFactory(url)
.set('PRIVATE-TOKEN', apiToken)
.accept('application/json');
if (query)
|
return req;
}
private getUrl(baseUrl: string, path: string) {
return (
`${baseUrl}/${path}`
// Remove any duplicate slashes due to user config input
.replace(/\/\//g, '/')
// Make sure that the protocol at the start of the url is correctly
// terminated with a double slash
.replace('/', '//')
);
}
}
|
{
req.query(query);
}
|
conditional_block
|
GitLabApiRepository.ts
|
import { injectable } from 'inversify';
import * as request from 'superagent';
import { ISourceConfig } from '../../../IConfig';
@injectable()
export class GitLabApiRepository {
public async getUploadedFile(
sourceConfig: ISourceConfig,
projectPath: string,
fileId: string,
filename: string
): Promise<any[]> {
const path = `${projectPath}/uploads/${fileId}/${filename}`;
return await this.performRequest(
this.getUrl(sourceConfig.url, path),
sourceConfig.apitoken,
url => request.get(url)
).then(response => response.body);
}
public async get(
sourceConfig: ISourceConfig,
path: string,
query?: {}
): Promise<request.Response> {
return await this.performRequest(
this.getUrl(sourceConfig.url, `api/v4/${path}`),
sourceConfig.apitoken,
url => request.get(url),
query
);
}
private performRequest(
url: string,
apiToken: string,
requestFactory: (url: string) => request.SuperAgentRequest,
query?: {}
): Promise<request.Response> {
const req = requestFactory(url)
.set('PRIVATE-TOKEN', apiToken)
.accept('application/json');
if (query) {
req.query(query);
}
return req;
}
|
// Remove any duplicate slashes due to user config input
.replace(/\/\//g, '/')
// Make sure that the protocol at the start of the url is correctly
// terminated with a double slash
.replace('/', '//')
);
}
}
|
private getUrl(baseUrl: string, path: string) {
return (
`${baseUrl}/${path}`
|
random_line_split
|
GitLabApiRepository.ts
|
import { injectable } from 'inversify';
import * as request from 'superagent';
import { ISourceConfig } from '../../../IConfig';
@injectable()
export class
|
{
public async getUploadedFile(
sourceConfig: ISourceConfig,
projectPath: string,
fileId: string,
filename: string
): Promise<any[]> {
const path = `${projectPath}/uploads/${fileId}/${filename}`;
return await this.performRequest(
this.getUrl(sourceConfig.url, path),
sourceConfig.apitoken,
url => request.get(url)
).then(response => response.body);
}
public async get(
sourceConfig: ISourceConfig,
path: string,
query?: {}
): Promise<request.Response> {
return await this.performRequest(
this.getUrl(sourceConfig.url, `api/v4/${path}`),
sourceConfig.apitoken,
url => request.get(url),
query
);
}
private performRequest(
url: string,
apiToken: string,
requestFactory: (url: string) => request.SuperAgentRequest,
query?: {}
): Promise<request.Response> {
const req = requestFactory(url)
.set('PRIVATE-TOKEN', apiToken)
.accept('application/json');
if (query) {
req.query(query);
}
return req;
}
private getUrl(baseUrl: string, path: string) {
return (
`${baseUrl}/${path}`
// Remove any duplicate slashes due to user config input
.replace(/\/\//g, '/')
// Make sure that the protocol at the start of the url is correctly
// terminated with a double slash
.replace('/', '//')
);
}
}
|
GitLabApiRepository
|
identifier_name
|
cisco_nexus_configuration.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Edgar Magana, Cisco Systems, Inc.
#
"""
Configuration consolidation for the Nexus Driver
This module will export the configuration parameters
from the nexus.ini file
"""
from quantum.common.utils import find_config_file
from quantum.plugins.cisco.common import cisco_configparser as confp
CP = confp.CiscoConfigParser(find_config_file({'plugin': 'cisco'},
"nexus.ini"))
NEXUS_DETAILS = CP['SWITCH']
|
SECTION = CP['DRIVER']
NEXUS_DRIVER = SECTION['name']
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.