file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
line.rs | /**
* Flow - Realtime log analyzer
* Copyright (C) 2016 Daniel Mircea
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use std::cmp::max;
use std::collections::VecDeque;
use std::iter::{Rev, DoubleEndedIterator};
use unicode_width::UnicodeWidthStr;
use core::filter::{Filter, Parser as FilterParser, Constraint, ParserResult as FilterParserResult};
use utils::ansi_decoder::{ComponentCollection, AnsiStr};
#[derive(Clone)]
pub struct Line {
pub content_without_ansi: String,
pub components: Option<ComponentCollection>,
pub width: usize,
}
impl Line {
pub fn new(content: String) -> Line {
let has_ansi = content.has_ansi_escape_sequence();
let (content_without_ansi, components) = if has_ansi {
(content.strip_ansi(), Some(content.to_components()))
} else {
(content, None)
};
Line {
width: content_without_ansi.width(),
content_without_ansi: content_without_ansi,
components: components,
}
}
pub fn guess_height(&self, container_width: usize) -> usize {
max(1,
(self.width as f32 / container_width as f32).ceil() as usize)
}
pub fn matches_for(&self, text: &str) -> Vec<(usize, &str)> {
self.content_without_ansi.match_indices(text).collect()
}
pub fn contains(&self, text: &str) -> bool {
self.content_without_ansi.contains(text)
}
}
pub struct LineCollection {
pub entries: VecDeque<Line>,
capacity: usize,
}
impl LineCollection {
pub fn new(capacity: usize) -> LineCollection |
fn clear_excess(&mut self) {
while self.entries.len() > self.capacity {
self.entries.pop_front();
}
}
pub fn len(&self) -> usize {
self.entries.len()
}
fn add(&mut self, item: String) {
self.entries.push_back(Line::new(item));
}
}
impl Extend<String> for LineCollection {
fn extend<T: IntoIterator<Item = String>>(&mut self, iter: T) {
for item in iter {
self.add(item);
}
self.clear_excess();
}
}
pub struct ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
iterator: I,
parser: FilterParser,
pending: Vec<&'a Line>,
}
impl<'a, I> ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
fn handle_empty(&mut self) -> Option<I::Item> {
self.iterator.next()
}
fn handle_content(&mut self) -> Option<I::Item> {
let matcher = self.parser.filter.content.as_ref().unwrap();
(&mut self.iterator).filter(|line| matcher.is_match(&line.content_without_ansi)).next()
}
fn handle_boundaries(&mut self) -> Option<I::Item> {
if self.pending.is_empty() {
let mut match_found = false;
for line in &mut self.iterator {
match self.parser.matches(&line.content_without_ansi) {
FilterParserResult::Match => self.pending.push(line),
FilterParserResult::LastMatch(append) => {
match_found = true;
if append {
self.pending.push(line);
}
break;
}
FilterParserResult::Invalid(append) => {
self.pending.clear();
if append {
self.pending.push(line);
}
}
FilterParserResult::NoMatch => {}
}
}
if !(match_found || self.parser.assume_found_matches()) {
return None;
}
self.pending.reverse();
}
self.pending.pop()
}
}
pub trait Parser<'a>: Iterator<Item = &'a Line> {
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized;
}
impl<'a, I> Parser<'a> for I
where I: Iterator<Item = &'a Line>
{
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized
{
ParserState {
iterator: self.rev(),
pending: vec![],
parser: FilterParser::new(filter),
}
}
}
impl<'a, I> Iterator for ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.parser.constraints.is_empty() {
self.handle_empty()
} else if self.parser.constraints == vec![Constraint::Content] {
self.handle_content()
} else {
self.handle_boundaries()
}
}
}
| {
LineCollection {
entries: VecDeque::new(),
capacity: capacity,
}
} | identifier_body |
line.rs | /**
* Flow - Realtime log analyzer
* Copyright (C) 2016 Daniel Mircea
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use std::cmp::max;
use std::collections::VecDeque;
use std::iter::{Rev, DoubleEndedIterator};
use unicode_width::UnicodeWidthStr;
use core::filter::{Filter, Parser as FilterParser, Constraint, ParserResult as FilterParserResult};
use utils::ansi_decoder::{ComponentCollection, AnsiStr};
#[derive(Clone)]
pub struct Line {
pub content_without_ansi: String,
pub components: Option<ComponentCollection>,
pub width: usize,
}
impl Line {
pub fn new(content: String) -> Line {
let has_ansi = content.has_ansi_escape_sequence();
let (content_without_ansi, components) = if has_ansi {
(content.strip_ansi(), Some(content.to_components()))
} else {
(content, None)
};
Line {
width: content_without_ansi.width(),
content_without_ansi: content_without_ansi,
components: components,
}
}
pub fn guess_height(&self, container_width: usize) -> usize {
max(1,
(self.width as f32 / container_width as f32).ceil() as usize)
}
pub fn matches_for(&self, text: &str) -> Vec<(usize, &str)> {
self.content_without_ansi.match_indices(text).collect()
}
pub fn contains(&self, text: &str) -> bool {
self.content_without_ansi.contains(text)
}
}
pub struct LineCollection {
pub entries: VecDeque<Line>,
capacity: usize,
}
impl LineCollection {
pub fn new(capacity: usize) -> LineCollection {
LineCollection {
entries: VecDeque::new(),
capacity: capacity,
}
}
fn clear_excess(&mut self) {
while self.entries.len() > self.capacity {
self.entries.pop_front();
}
}
pub fn len(&self) -> usize {
self.entries.len()
}
fn add(&mut self, item: String) {
self.entries.push_back(Line::new(item));
}
}
impl Extend<String> for LineCollection {
fn extend<T: IntoIterator<Item = String>>(&mut self, iter: T) {
for item in iter {
self.add(item);
}
self.clear_excess();
}
}
pub struct ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
iterator: I,
parser: FilterParser,
pending: Vec<&'a Line>,
}
impl<'a, I> ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
fn handle_empty(&mut self) -> Option<I::Item> {
self.iterator.next()
}
fn handle_content(&mut self) -> Option<I::Item> {
let matcher = self.parser.filter.content.as_ref().unwrap();
(&mut self.iterator).filter(|line| matcher.is_match(&line.content_without_ansi)).next()
}
fn handle_boundaries(&mut self) -> Option<I::Item> {
if self.pending.is_empty() {
let mut match_found = false;
for line in &mut self.iterator {
match self.parser.matches(&line.content_without_ansi) {
FilterParserResult::Match => self.pending.push(line),
FilterParserResult::LastMatch(append) => |
FilterParserResult::Invalid(append) => {
self.pending.clear();
if append {
self.pending.push(line);
}
}
FilterParserResult::NoMatch => {}
}
}
if !(match_found || self.parser.assume_found_matches()) {
return None;
}
self.pending.reverse();
}
self.pending.pop()
}
}
pub trait Parser<'a>: Iterator<Item = &'a Line> {
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized;
}
impl<'a, I> Parser<'a> for I
where I: Iterator<Item = &'a Line>
{
fn parse(self, filter: Filter) -> ParserState<'a, Rev<Self>>
where Self: DoubleEndedIterator + Sized
{
ParserState {
iterator: self.rev(),
pending: vec![],
parser: FilterParser::new(filter),
}
}
}
impl<'a, I> Iterator for ParserState<'a, I>
where I: DoubleEndedIterator<Item = &'a Line>
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.parser.constraints.is_empty() {
self.handle_empty()
} else if self.parser.constraints == vec![Constraint::Content] {
self.handle_content()
} else {
self.handle_boundaries()
}
}
}
| {
match_found = true;
if append {
self.pending.push(line);
}
break;
} | conditional_block |
deriving-cmp-generic-struct.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[deriving(Eq, TotalEq, Ord, TotalOrd)]
struct S<T> {
x: T,
y: T
}
pub fn main() | {
let s1 = S {x: 1, y: 1};
let s2 = S {x: 1, y: 2};
// in order for both Ord and TotalOrd
let ss = [s1, s2];
for (i, s1) in ss.iter().enumerate() {
for (j, s2) in ss.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// Eq
assert_eq!(*s1 == *s2, eq);
assert_eq!(*s1 != *s2, !eq);
// TotalEq
assert_eq!(s1.equals(s2), eq);
// Ord
assert_eq!(*s1 < *s2, lt);
assert_eq!(*s1 > *s2, gt);
assert_eq!(*s1 <= *s2, le);
assert_eq!(*s1 >= *s2, ge);
// TotalOrd
assert_eq!(s1.cmp(s2), ord);
}
}
} | identifier_body |
|
deriving-cmp-generic-struct.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| y: T
}
pub fn main() {
let s1 = S {x: 1, y: 1};
let s2 = S {x: 1, y: 2};
// in order for both Ord and TotalOrd
let ss = [s1, s2];
for (i, s1) in ss.iter().enumerate() {
for (j, s2) in ss.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// Eq
assert_eq!(*s1 == *s2, eq);
assert_eq!(*s1 != *s2, !eq);
// TotalEq
assert_eq!(s1.equals(s2), eq);
// Ord
assert_eq!(*s1 < *s2, lt);
assert_eq!(*s1 > *s2, gt);
assert_eq!(*s1 <= *s2, le);
assert_eq!(*s1 >= *s2, ge);
// TotalOrd
assert_eq!(s1.cmp(s2), ord);
}
}
} | #[deriving(Eq, TotalEq, Ord, TotalOrd)]
struct S<T> {
x: T, | random_line_split |
deriving-cmp-generic-struct.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[deriving(Eq, TotalEq, Ord, TotalOrd)]
struct S<T> {
x: T,
y: T
}
pub fn | () {
let s1 = S {x: 1, y: 1};
let s2 = S {x: 1, y: 2};
// in order for both Ord and TotalOrd
let ss = [s1, s2];
for (i, s1) in ss.iter().enumerate() {
for (j, s2) in ss.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// Eq
assert_eq!(*s1 == *s2, eq);
assert_eq!(*s1 != *s2, !eq);
// TotalEq
assert_eq!(s1.equals(s2), eq);
// Ord
assert_eq!(*s1 < *s2, lt);
assert_eq!(*s1 > *s2, gt);
assert_eq!(*s1 <= *s2, le);
assert_eq!(*s1 >= *s2, ge);
// TotalOrd
assert_eq!(s1.cmp(s2), ord);
}
}
}
| main | identifier_name |
scp_utils.py | __author__ = 'cmantas'
#python ssh lib
import paramiko
import string
import sys
from socket import error as socketError
sys.path.append('lib/scp.py')
from lib.scp import SCPClient
from datetime import datetime, timedelta
from lib.persistance_module import env_vars, home
from time import time
import sys, traceback
ssh_timeout = 10
def reindent(s, numSpaces, prefix=''):
s = string.split(s, '\n')
s = [(numSpaces * ' ') +prefix+ string.lstrip(line) for line in s]
s = string.join(s, '\n')
return s
def run_ssh_command(host, user, command, indent=1, prefix="$: ", logger=None):
"""
runs a command via ssh to the specified host
:param host:
:param user:
:param command:
:return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(home+env_vars["priv_key_path"])
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not logger is None:
logger.debug("Connecting to SSH")
timer = Timer.get_timer()
try:
ssh.connect(host, username=user, timeout=ssh_timeout, pkey=private_key, allow_agent=False, look_for_keys=False)
if not logger is None:
logger.debug("connected in %d sec. now Running SSH command" % timer.stop())
timer.start()
### EXECUTE THE COMMAND ###
stdin, stdout, stderr = ssh.exec_command(command)
ret = ''
for line in stdout:
ret += line
for line in stderr:
ret += line
# close the ssh connection
ssh.close()
if not logger is None:
logger.debug("SSH command took %d sec" % timer.stop())
return reindent(ret, indent, prefix=prefix)
except:
if not logger is None:
logger.error("Could not connect to "+ str(host))
traceback.print_exc()
def | (host, user, files, remote_path='.', recursive=False):
"""
puts the specified file to the specified host
:param host:
:param user:
:param files:
:param remote_path:
:param recursive:
:return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(env_vars["priv_key_path"])
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, timeout=ssh_giveup_timeout, pkey=private_key)
scpc=SCPClient(ssh.get_transport())
scpc.put(files, remote_path, recursive)
ssh.close()
def test_ssh(host, user, logger=None):
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
end_time = datetime.now()+timedelta(seconds=ssh_giveup_timeout)
try:
rv = run_ssh_command(host, user, 'echo success', logger=logger)
return True
except:
return False
# except:
# print "error in connecting ssh:", sys.exc_info()[0]
return False
class Timer():
"""
Helper class that gives the ability to measure time between events
"""
def __init__(self):
self.started = False
self.start_time = 0
def start(self):
if self.started is True:
raise Exception("timer already started")
self.started = True
self.start_time = int(round(time() * 1000))
def stop(self):
end_time = int(round(time() * 1000))
if self.started is False:
print " Timer had not been started"
return 0.0
start_time = self.start_time
self.start_time = 0
self.started = False
return float(end_time - start_time)/1000
@staticmethod
def get_timer():
timer = Timer()
timer.start()
return timer | put_file_scp | identifier_name |
scp_utils.py | __author__ = 'cmantas'
#python ssh lib
import paramiko
import string
import sys
from socket import error as socketError
sys.path.append('lib/scp.py')
from lib.scp import SCPClient
from datetime import datetime, timedelta
from lib.persistance_module import env_vars, home
from time import time
import sys, traceback
ssh_timeout = 10
def reindent(s, numSpaces, prefix=''):
s = string.split(s, '\n')
s = [(numSpaces * ' ') +prefix+ string.lstrip(line) for line in s]
s = string.join(s, '\n')
return s
def run_ssh_command(host, user, command, indent=1, prefix="$: ", logger=None):
"""
runs a command via ssh to the specified host
:param host:
:param user:
:param command:
:return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(home+env_vars["priv_key_path"])
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not logger is None:
logger.debug("Connecting to SSH")
timer = Timer.get_timer()
try:
ssh.connect(host, username=user, timeout=ssh_timeout, pkey=private_key, allow_agent=False, look_for_keys=False)
if not logger is None:
logger.debug("connected in %d sec. now Running SSH command" % timer.stop())
timer.start()
### EXECUTE THE COMMAND ###
stdin, stdout, stderr = ssh.exec_command(command)
ret = ''
for line in stdout:
ret += line
for line in stderr:
ret += line
# close the ssh connection
ssh.close()
if not logger is None:
logger.debug("SSH command took %d sec" % timer.stop())
return reindent(ret, indent, prefix=prefix)
except:
if not logger is None:
logger.error("Could not connect to "+ str(host))
traceback.print_exc()
def put_file_scp (host, user, files, remote_path='.', recursive=False):
"""
puts the specified file to the specified host
:param host:
:param user:
:param files:
:param remote_path:
:param recursive:
:return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(env_vars["priv_key_path"])
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, timeout=ssh_giveup_timeout, pkey=private_key)
scpc=SCPClient(ssh.get_transport())
scpc.put(files, remote_path, recursive)
ssh.close()
def test_ssh(host, user, logger=None):
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
end_time = datetime.now()+timedelta(seconds=ssh_giveup_timeout)
try:
rv = run_ssh_command(host, user, 'echo success', logger=logger)
return True
except:
return False
# except:
# print "error in connecting ssh:", sys.exc_info()[0]
return False
class Timer():
"""
Helper class that gives the ability to measure time between events
"""
def __init__(self):
self.started = False
self.start_time = 0
def start(self):
if self.started is True:
raise Exception("timer already started")
self.started = True
self.start_time = int(round(time() * 1000))
def stop(self):
|
@staticmethod
def get_timer():
timer = Timer()
timer.start()
return timer | end_time = int(round(time() * 1000))
if self.started is False:
print " Timer had not been started"
return 0.0
start_time = self.start_time
self.start_time = 0
self.started = False
return float(end_time - start_time)/1000 | identifier_body |
scp_utils.py | __author__ = 'cmantas'
#python ssh lib
import paramiko
import string
import sys
from socket import error as socketError
sys.path.append('lib/scp.py')
from lib.scp import SCPClient
from datetime import datetime, timedelta
from lib.persistance_module import env_vars, home
from time import time
import sys, traceback
ssh_timeout = 10
def reindent(s, numSpaces, prefix=''):
s = string.split(s, '\n')
s = [(numSpaces * ' ') +prefix+ string.lstrip(line) for line in s]
s = string.join(s, '\n')
return s
def run_ssh_command(host, user, command, indent=1, prefix="$: ", logger=None):
"""
runs a command via ssh to the specified host
:param host:
:param user:
:param command:
:return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(home+env_vars["priv_key_path"])
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not logger is None:
logger.debug("Connecting to SSH")
timer = Timer.get_timer()
try:
ssh.connect(host, username=user, timeout=ssh_timeout, pkey=private_key, allow_agent=False, look_for_keys=False)
if not logger is None:
logger.debug("connected in %d sec. now Running SSH command" % timer.stop())
timer.start()
### EXECUTE THE COMMAND ###
stdin, stdout, stderr = ssh.exec_command(command)
ret = ''
for line in stdout:
ret += line
for line in stderr:
ret += line
# close the ssh connection
ssh.close()
if not logger is None:
logger.debug("SSH command took %d sec" % timer.stop())
return reindent(ret, indent, prefix=prefix)
except:
if not logger is None:
logger.error("Could not connect to "+ str(host))
traceback.print_exc()
def put_file_scp (host, user, files, remote_path='.', recursive=False):
"""
puts the specified file to the specified host
:param host:
:param user:
:param files:
:param remote_path:
:param recursive: | ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, timeout=ssh_giveup_timeout, pkey=private_key)
scpc=SCPClient(ssh.get_transport())
scpc.put(files, remote_path, recursive)
ssh.close()
def test_ssh(host, user, logger=None):
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
end_time = datetime.now()+timedelta(seconds=ssh_giveup_timeout)
try:
rv = run_ssh_command(host, user, 'echo success', logger=logger)
return True
except:
return False
# except:
# print "error in connecting ssh:", sys.exc_info()[0]
return False
class Timer():
"""
Helper class that gives the ability to measure time between events
"""
def __init__(self):
self.started = False
self.start_time = 0
def start(self):
if self.started is True:
raise Exception("timer already started")
self.started = True
self.start_time = int(round(time() * 1000))
def stop(self):
end_time = int(round(time() * 1000))
if self.started is False:
print " Timer had not been started"
return 0.0
start_time = self.start_time
self.start_time = 0
self.started = False
return float(end_time - start_time)/1000
@staticmethod
def get_timer():
timer = Timer()
timer.start()
return timer | :return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(env_vars["priv_key_path"])
ssh = paramiko.SSHClient() | random_line_split |
scp_utils.py | __author__ = 'cmantas'
#python ssh lib
import paramiko
import string
import sys
from socket import error as socketError
sys.path.append('lib/scp.py')
from lib.scp import SCPClient
from datetime import datetime, timedelta
from lib.persistance_module import env_vars, home
from time import time
import sys, traceback
ssh_timeout = 10
def reindent(s, numSpaces, prefix=''):
s = string.split(s, '\n')
s = [(numSpaces * ' ') +prefix+ string.lstrip(line) for line in s]
s = string.join(s, '\n')
return s
def run_ssh_command(host, user, command, indent=1, prefix="$: ", logger=None):
"""
runs a command via ssh to the specified host
:param host:
:param user:
:param command:
:return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(home+env_vars["priv_key_path"])
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not logger is None:
|
timer = Timer.get_timer()
try:
ssh.connect(host, username=user, timeout=ssh_timeout, pkey=private_key, allow_agent=False, look_for_keys=False)
if not logger is None:
logger.debug("connected in %d sec. now Running SSH command" % timer.stop())
timer.start()
### EXECUTE THE COMMAND ###
stdin, stdout, stderr = ssh.exec_command(command)
ret = ''
for line in stdout:
ret += line
for line in stderr:
ret += line
# close the ssh connection
ssh.close()
if not logger is None:
logger.debug("SSH command took %d sec" % timer.stop())
return reindent(ret, indent, prefix=prefix)
except:
if not logger is None:
logger.error("Could not connect to "+ str(host))
traceback.print_exc()
def put_file_scp (host, user, files, remote_path='.', recursive=False):
"""
puts the specified file to the specified host
:param host:
:param user:
:param files:
:param remote_path:
:param recursive:
:return:
"""
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
private_key = paramiko.RSAKey.from_private_key_file(env_vars["priv_key_path"])
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, timeout=ssh_giveup_timeout, pkey=private_key)
scpc=SCPClient(ssh.get_transport())
scpc.put(files, remote_path, recursive)
ssh.close()
def test_ssh(host, user, logger=None):
ssh_giveup_timeout = env_vars['ssh_giveup_timeout']
end_time = datetime.now()+timedelta(seconds=ssh_giveup_timeout)
try:
rv = run_ssh_command(host, user, 'echo success', logger=logger)
return True
except:
return False
# except:
# print "error in connecting ssh:", sys.exc_info()[0]
return False
class Timer():
"""
Helper class that gives the ability to measure time between events
"""
def __init__(self):
self.started = False
self.start_time = 0
def start(self):
if self.started is True:
raise Exception("timer already started")
self.started = True
self.start_time = int(round(time() * 1000))
def stop(self):
end_time = int(round(time() * 1000))
if self.started is False:
print " Timer had not been started"
return 0.0
start_time = self.start_time
self.start_time = 0
self.started = False
return float(end_time - start_time)/1000
@staticmethod
def get_timer():
timer = Timer()
timer.start()
return timer | logger.debug("Connecting to SSH") | conditional_block |
BottomSection.test.tsx | import React from 'react';
import { shallow } from 'enzyme';
import BottomSection from './BottomSection';
jest.mock('../../config', () => ({
bootData: {
navTree: [
{
id: 'profile',
hideFromMenu: true,
},
{
hideFromMenu: true,
},
{
hideFromMenu: false, | },
{
hideFromMenu: true,
},
],
},
user: {
orgCount: 5,
orgName: 'Grafana',
},
}));
jest.mock('app/core/services/context_srv', () => ({
contextSrv: {
sidemenu: true,
isSignedIn: false,
isGrafanaAdmin: false,
hasEditPermissionFolders: false,
},
}));
describe('Render', () => {
it('should render component', () => {
const wrapper = shallow(<BottomSection />);
expect(wrapper).toMatchSnapshot();
});
}); | random_line_split |
|
index.d.ts | // Generated by typings
// Source: https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/80060c94ef549c077a011977c2b5461bd0fd8947/electron-devtools-installer/index.d.ts
declare module "electron-devtools-installer" {
interface ExtensionReference {
id: string,
electron: string,
}
// Default installation function
export default function (extensionReference: ExtensionReference | string, forceDownload?: boolean): Promise<string>;
// Devtools themself
export const EMBER_INSPECTOR: ExtensionReference;
export const REACT_DEVELOPER_TOOLS: ExtensionReference; | export const BACKBONE_DEBUGGER: ExtensionReference;
export const JQUERY_DEBUGGER: ExtensionReference;
export const ANGULARJS_BATARANG: ExtensionReference;
export const VUEJS_DEVTOOLS: ExtensionReference;
export const REDUX_DEVTOOLS: ExtensionReference;
export const REACT_PERF: ExtensionReference;
} | random_line_split |
|
mod.rs | //! Thrift generated Jaeger client
//!
//! Definitions: <https://github.com/uber/jaeger-idl/blob/master/thrift/>
use std::time::{Duration, SystemTime};
use opentelemetry::trace::Event;
use opentelemetry::{Key, KeyValue, Value};
pub(crate) mod agent;
pub(crate) mod jaeger;
pub(crate) mod zipkincore;
impl From<super::Process> for jaeger::Process {
fn | (process: super::Process) -> jaeger::Process {
jaeger::Process::new(
process.service_name,
Some(process.tags.into_iter().map(Into::into).collect()),
)
}
}
impl From<Event> for jaeger::Log {
fn from(event: crate::exporter::Event) -> jaeger::Log {
let timestamp = event
.timestamp
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_micros() as i64;
let mut event_set_via_attribute = false;
let mut fields = event
.attributes
.into_iter()
.map(|attr| {
if attr.key.as_str() == "event" {
event_set_via_attribute = true;
};
attr.into()
})
.collect::<Vec<_>>();
if !event_set_via_attribute {
fields.push(Key::new("event").string(event.name).into());
}
if event.dropped_attributes_count != 0 {
fields.push(
Key::new("otel.event.dropped_attributes_count")
.i64(i64::from(event.dropped_attributes_count))
.into(),
);
}
jaeger::Log::new(timestamp, fields)
}
}
#[rustfmt::skip]
impl From<KeyValue> for jaeger::Tag {
fn from(kv: KeyValue) -> jaeger::Tag {
let KeyValue { key, value } = kv;
match value {
Value::String(s) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(s.into()), None, None, None, None),
Value::F64(f) => jaeger::Tag::new(key.into(), jaeger::TagType::Double, None, Some(f.into()), None, None, None),
Value::Bool(b) => jaeger::Tag::new(key.into(), jaeger::TagType::Bool, None, None, Some(b), None, None),
Value::I64(i) => jaeger::Tag::new(key.into(), jaeger::TagType::Long, None, None, None, Some(i), None),
// TODO: better Array handling, jaeger thrift doesn't support arrays
v @ Value::Array(_) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(v.to_string()), None, None, None, None),
}
}
}
| from | identifier_name |
mod.rs | //! Thrift generated Jaeger client
//!
//! Definitions: <https://github.com/uber/jaeger-idl/blob/master/thrift/>
use std::time::{Duration, SystemTime};
use opentelemetry::trace::Event;
use opentelemetry::{Key, KeyValue, Value};
|
impl From<super::Process> for jaeger::Process {
fn from(process: super::Process) -> jaeger::Process {
jaeger::Process::new(
process.service_name,
Some(process.tags.into_iter().map(Into::into).collect()),
)
}
}
impl From<Event> for jaeger::Log {
fn from(event: crate::exporter::Event) -> jaeger::Log {
let timestamp = event
.timestamp
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_micros() as i64;
let mut event_set_via_attribute = false;
let mut fields = event
.attributes
.into_iter()
.map(|attr| {
if attr.key.as_str() == "event" {
event_set_via_attribute = true;
};
attr.into()
})
.collect::<Vec<_>>();
if !event_set_via_attribute {
fields.push(Key::new("event").string(event.name).into());
}
if event.dropped_attributes_count != 0 {
fields.push(
Key::new("otel.event.dropped_attributes_count")
.i64(i64::from(event.dropped_attributes_count))
.into(),
);
}
jaeger::Log::new(timestamp, fields)
}
}
#[rustfmt::skip]
impl From<KeyValue> for jaeger::Tag {
fn from(kv: KeyValue) -> jaeger::Tag {
let KeyValue { key, value } = kv;
match value {
Value::String(s) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(s.into()), None, None, None, None),
Value::F64(f) => jaeger::Tag::new(key.into(), jaeger::TagType::Double, None, Some(f.into()), None, None, None),
Value::Bool(b) => jaeger::Tag::new(key.into(), jaeger::TagType::Bool, None, None, Some(b), None, None),
Value::I64(i) => jaeger::Tag::new(key.into(), jaeger::TagType::Long, None, None, None, Some(i), None),
// TODO: better Array handling, jaeger thrift doesn't support arrays
v @ Value::Array(_) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(v.to_string()), None, None, None, None),
}
}
} | pub(crate) mod agent;
pub(crate) mod jaeger;
pub(crate) mod zipkincore; | random_line_split |
mod.rs | //! Thrift generated Jaeger client
//!
//! Definitions: <https://github.com/uber/jaeger-idl/blob/master/thrift/>
use std::time::{Duration, SystemTime};
use opentelemetry::trace::Event;
use opentelemetry::{Key, KeyValue, Value};
pub(crate) mod agent;
pub(crate) mod jaeger;
pub(crate) mod zipkincore;
impl From<super::Process> for jaeger::Process {
fn from(process: super::Process) -> jaeger::Process {
jaeger::Process::new(
process.service_name,
Some(process.tags.into_iter().map(Into::into).collect()),
)
}
}
impl From<Event> for jaeger::Log {
fn from(event: crate::exporter::Event) -> jaeger::Log |
}
#[rustfmt::skip]
impl From<KeyValue> for jaeger::Tag {
fn from(kv: KeyValue) -> jaeger::Tag {
let KeyValue { key, value } = kv;
match value {
Value::String(s) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(s.into()), None, None, None, None),
Value::F64(f) => jaeger::Tag::new(key.into(), jaeger::TagType::Double, None, Some(f.into()), None, None, None),
Value::Bool(b) => jaeger::Tag::new(key.into(), jaeger::TagType::Bool, None, None, Some(b), None, None),
Value::I64(i) => jaeger::Tag::new(key.into(), jaeger::TagType::Long, None, None, None, Some(i), None),
// TODO: better Array handling, jaeger thrift doesn't support arrays
v @ Value::Array(_) => jaeger::Tag::new(key.into(), jaeger::TagType::String, Some(v.to_string()), None, None, None, None),
}
}
}
| {
let timestamp = event
.timestamp
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_micros() as i64;
let mut event_set_via_attribute = false;
let mut fields = event
.attributes
.into_iter()
.map(|attr| {
if attr.key.as_str() == "event" {
event_set_via_attribute = true;
};
attr.into()
})
.collect::<Vec<_>>();
if !event_set_via_attribute {
fields.push(Key::new("event").string(event.name).into());
}
if event.dropped_attributes_count != 0 {
fields.push(
Key::new("otel.event.dropped_attributes_count")
.i64(i64::from(event.dropped_attributes_count))
.into(),
);
}
jaeger::Log::new(timestamp, fields)
} | identifier_body |
package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RReprex(RPackage):
| """Convenience wrapper that uses the 'rmarkdown' package to render small
snippets of code to target formats that include both code and output.
The goal is to encourage the sharing of small, reproducible, and
runnable examples on code-oriented websites, such as
<http://stackoverflow.com> and <https://github.com>, or in email.
'reprex' also extracts clean, runnable R code from various common
formats, such as copy/paste from an R session."""
homepage = "https://github.com/jennybc/reprex"
url = "https://cloud.r-project.org/src/contrib/reprex_0.1.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/reprex"
version('0.3.0', sha256='203c2ae6343f6ff887e7a5a3f5d20bae465f6e8d9745c982479f5385f4effb6c')
version('0.2.1', sha256='5d234ddfbcadc5a5194a58eb88973c51581e7e2e231f146974af8f42747b45f3')
version('0.1.1', sha256='919ae93039b2d8fb8eace98da9376c031d734d9e75c237efb24d047f35b5ba4b')
depends_on('[email protected]:', when='@:0.1.2', type=('build', 'run'))
depends_on('[email protected]:', when='@0.2.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-knitr', when='@:0.1.9', type=('build', 'run'))
depends_on('r-rmarkdown', type=('build', 'run'))
depends_on('r-whisker', type=('build', 'run'))
depends_on('r-rlang', when='@0.2.0:', type=('build', 'run'))
depends_on('r-withr', when='@0.2.0:', type=('build', 'run'))
depends_on('r-fs', when='@0.2.1:', type=('build', 'run'))
depends_on('[email protected]:') | identifier_body |
|
package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RReprex(RPackage):
"""Convenience wrapper that uses the 'rmarkdown' package to render small
snippets of code to target formats that include both code and output.
The goal is to encourage the sharing of small, reproducible, and
runnable examples on code-oriented websites, such as
<http://stackoverflow.com> and <https://github.com>, or in email.
'reprex' also extracts clean, runnable R code from various common
formats, such as copy/paste from an R session."""
homepage = "https://github.com/jennybc/reprex"
url = "https://cloud.r-project.org/src/contrib/reprex_0.1.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/reprex"
version('0.3.0', sha256='203c2ae6343f6ff887e7a5a3f5d20bae465f6e8d9745c982479f5385f4effb6c')
version('0.2.1', sha256='5d234ddfbcadc5a5194a58eb88973c51581e7e2e231f146974af8f42747b45f3')
version('0.1.1', sha256='919ae93039b2d8fb8eace98da9376c031d734d9e75c237efb24d047f35b5ba4b')
depends_on('[email protected]:', when='@:0.1.2', type=('build', 'run'))
depends_on('[email protected]:', when='@0.2.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-knitr', when='@:0.1.9', type=('build', 'run')) | depends_on('r-withr', when='@0.2.0:', type=('build', 'run'))
depends_on('r-fs', when='@0.2.1:', type=('build', 'run'))
depends_on('[email protected]:') | depends_on('r-rmarkdown', type=('build', 'run'))
depends_on('r-whisker', type=('build', 'run'))
depends_on('r-rlang', when='@0.2.0:', type=('build', 'run')) | random_line_split |
package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class | (RPackage):
"""Convenience wrapper that uses the 'rmarkdown' package to render small
snippets of code to target formats that include both code and output.
The goal is to encourage the sharing of small, reproducible, and
runnable examples on code-oriented websites, such as
<http://stackoverflow.com> and <https://github.com>, or in email.
'reprex' also extracts clean, runnable R code from various common
formats, such as copy/paste from an R session."""
homepage = "https://github.com/jennybc/reprex"
url = "https://cloud.r-project.org/src/contrib/reprex_0.1.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/reprex"
version('0.3.0', sha256='203c2ae6343f6ff887e7a5a3f5d20bae465f6e8d9745c982479f5385f4effb6c')
version('0.2.1', sha256='5d234ddfbcadc5a5194a58eb88973c51581e7e2e231f146974af8f42747b45f3')
version('0.1.1', sha256='919ae93039b2d8fb8eace98da9376c031d734d9e75c237efb24d047f35b5ba4b')
depends_on('[email protected]:', when='@:0.1.2', type=('build', 'run'))
depends_on('[email protected]:', when='@0.2.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-knitr', when='@:0.1.9', type=('build', 'run'))
depends_on('r-rmarkdown', type=('build', 'run'))
depends_on('r-whisker', type=('build', 'run'))
depends_on('r-rlang', when='@0.2.0:', type=('build', 'run'))
depends_on('r-withr', when='@0.2.0:', type=('build', 'run'))
depends_on('r-fs', when='@0.2.1:', type=('build', 'run'))
depends_on('[email protected]:')
| RReprex | identifier_name |
takeaway.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.event import Event
from db.player import Player
from db.team import Team
class Takeaway(Base, SpecificEvent):
__tablename__ = 'takeaways'
__autoload__ = True
HUMAN_READABLE = 'takeaway'
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "taken_from_team_id"
]
def | (self, event_id, data_dict):
self.takeaway_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
setattr(self, attr, None)
def __str__(self):
plr = Player.find_by_id(self.player_id)
event = Event.find_by_id(self.event_id)
team = Team.find_by_id(self.team_id)
return "Takeaway: %s (%s) - %s" % (
plr.name, team.abbr, event)
| __init__ | identifier_name |
takeaway.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.event import Event
from db.player import Player
from db.team import Team
class Takeaway(Base, SpecificEvent):
__tablename__ = 'takeaways'
__autoload__ = True
HUMAN_READABLE = 'takeaway'
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "taken_from_team_id"
]
def __init__(self, event_id, data_dict):
self.takeaway_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
|
def __str__(self):
plr = Player.find_by_id(self.player_id)
event = Event.find_by_id(self.event_id)
team = Team.find_by_id(self.team_id)
return "Takeaway: %s (%s) - %s" % (
plr.name, team.abbr, event)
| if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
setattr(self, attr, None) | conditional_block |
takeaway.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.event import Event
from db.player import Player
from db.team import Team
class Takeaway(Base, SpecificEvent):
__tablename__ = 'takeaways'
__autoload__ = True
HUMAN_READABLE = 'takeaway'
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "taken_from_team_id"
]
def __init__(self, event_id, data_dict):
self.takeaway_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
setattr(self, attr, None)
def __str__(self): | plr = Player.find_by_id(self.player_id)
event = Event.find_by_id(self.event_id)
team = Team.find_by_id(self.team_id)
return "Takeaway: %s (%s) - %s" % (
plr.name, team.abbr, event) | random_line_split |
|
takeaway.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.event import Event
from db.player import Player
from db.team import Team
class Takeaway(Base, SpecificEvent):
__tablename__ = 'takeaways'
__autoload__ = True
HUMAN_READABLE = 'takeaway'
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "taken_from_team_id"
]
def __init__(self, event_id, data_dict):
self.takeaway_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
setattr(self, attr, None)
def __str__(self):
| plr = Player.find_by_id(self.player_id)
event = Event.find_by_id(self.event_id)
team = Team.find_by_id(self.team_id)
return "Takeaway: %s (%s) - %s" % (
plr.name, team.abbr, event) | identifier_body |
|
main.rs | #[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn counting_sort(array: &mut [i32], min: i32, max: i32) {
// nothing to do for arrays shorter than 2
if array.len() < 2 {
return;
}
// we count occurences of values
let size = (max - min + 1) as usize;
let mut count = vec![0; size];
for e in array.iter() {
count[(*e - min) as usize] += 1;
}
// then we write values back, sorted
let mut index = 0;
for value in 0..count.len() {
for _ in 0..count[value] {
array[index] = value as i32;
index += 1;
}
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
counting_sort(&mut numbers, -31, 782);
}
#[cfg(test)]
mod tests {
extern crate meta;
fn check_sort(array: &mut [i32], min: i32, max: i32) {
super::counting_sort(array, min, max);
meta::test_utils::check_sorted(array);
}
#[test]
fn | () {
let numbers = &mut [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
check_sort(numbers, -31, 782);
}
#[test]
fn one_element_vector() {
let numbers = &mut [0i32];
check_sort(numbers, 0, 0);
}
#[test]
fn repeat_vector() {
let numbers = &mut [1i32, 1, 1, 1, 1];
check_sort(numbers, 1, 1);
}
#[test]
fn worst_case_vector() {
let numbers = &mut [20i32, 10, 0, -1, -5];
check_sort(numbers, -5, 20);
}
#[test]
fn already_sorted_vector() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, -1, 99);
}
#[test]
#[should_panic]
fn bad_min() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, 2, 99);
}
}
| rosetta_vector | identifier_name |
main.rs | #[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn counting_sort(array: &mut [i32], min: i32, max: i32) {
// nothing to do for arrays shorter than 2
if array.len() < 2 {
return;
}
// we count occurences of values
let size = (max - min + 1) as usize; | count[(*e - min) as usize] += 1;
}
// then we write values back, sorted
let mut index = 0;
for value in 0..count.len() {
for _ in 0..count[value] {
array[index] = value as i32;
index += 1;
}
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
counting_sort(&mut numbers, -31, 782);
}
#[cfg(test)]
mod tests {
extern crate meta;
fn check_sort(array: &mut [i32], min: i32, max: i32) {
super::counting_sort(array, min, max);
meta::test_utils::check_sorted(array);
}
#[test]
fn rosetta_vector() {
let numbers = &mut [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
check_sort(numbers, -31, 782);
}
#[test]
fn one_element_vector() {
let numbers = &mut [0i32];
check_sort(numbers, 0, 0);
}
#[test]
fn repeat_vector() {
let numbers = &mut [1i32, 1, 1, 1, 1];
check_sort(numbers, 1, 1);
}
#[test]
fn worst_case_vector() {
let numbers = &mut [20i32, 10, 0, -1, -5];
check_sort(numbers, -5, 20);
}
#[test]
fn already_sorted_vector() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, -1, 99);
}
#[test]
#[should_panic]
fn bad_min() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, 2, 99);
}
} | let mut count = vec![0; size];
for e in array.iter() { | random_line_split |
main.rs | #[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn counting_sort(array: &mut [i32], min: i32, max: i32) {
// nothing to do for arrays shorter than 2
if array.len() < 2 {
return;
}
// we count occurences of values
let size = (max - min + 1) as usize;
let mut count = vec![0; size];
for e in array.iter() {
count[(*e - min) as usize] += 1;
}
// then we write values back, sorted
let mut index = 0;
for value in 0..count.len() {
for _ in 0..count[value] {
array[index] = value as i32;
index += 1;
}
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
counting_sort(&mut numbers, -31, 782);
}
#[cfg(test)]
mod tests {
extern crate meta;
fn check_sort(array: &mut [i32], min: i32, max: i32) {
super::counting_sort(array, min, max);
meta::test_utils::check_sorted(array);
}
#[test]
fn rosetta_vector() {
let numbers = &mut [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
check_sort(numbers, -31, 782);
}
#[test]
fn one_element_vector() {
let numbers = &mut [0i32];
check_sort(numbers, 0, 0);
}
#[test]
fn repeat_vector() |
#[test]
fn worst_case_vector() {
let numbers = &mut [20i32, 10, 0, -1, -5];
check_sort(numbers, -5, 20);
}
#[test]
fn already_sorted_vector() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, -1, 99);
}
#[test]
#[should_panic]
fn bad_min() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, 2, 99);
}
}
| {
let numbers = &mut [1i32, 1, 1, 1, 1];
check_sort(numbers, 1, 1);
} | identifier_body |
main.rs | #[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn counting_sort(array: &mut [i32], min: i32, max: i32) {
// nothing to do for arrays shorter than 2
if array.len() < 2 |
// we count occurences of values
let size = (max - min + 1) as usize;
let mut count = vec![0; size];
for e in array.iter() {
count[(*e - min) as usize] += 1;
}
// then we write values back, sorted
let mut index = 0;
for value in 0..count.len() {
for _ in 0..count[value] {
array[index] = value as i32;
index += 1;
}
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
counting_sort(&mut numbers, -31, 782);
}
#[cfg(test)]
mod tests {
extern crate meta;
fn check_sort(array: &mut [i32], min: i32, max: i32) {
super::counting_sort(array, min, max);
meta::test_utils::check_sorted(array);
}
#[test]
fn rosetta_vector() {
let numbers = &mut [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
check_sort(numbers, -31, 782);
}
#[test]
fn one_element_vector() {
let numbers = &mut [0i32];
check_sort(numbers, 0, 0);
}
#[test]
fn repeat_vector() {
let numbers = &mut [1i32, 1, 1, 1, 1];
check_sort(numbers, 1, 1);
}
#[test]
fn worst_case_vector() {
let numbers = &mut [20i32, 10, 0, -1, -5];
check_sort(numbers, -5, 20);
}
#[test]
fn already_sorted_vector() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, -1, 99);
}
#[test]
#[should_panic]
fn bad_min() {
let numbers = &mut [-1i32, 0, 3, 6, 99];
check_sort(numbers, 2, 99);
}
}
| {
return;
} | conditional_block |
angular-sanitize.js | /**
* @license AngularJS v1.2.3
* (c) 2010-2014 Google, Inc. http://angularjs.org
* License: MIT
*/
(function(window, angular, undefined) {'use strict';
var $sanitizeMinErr = angular.$$minErr('$sanitize');
/**
* @ngdoc overview
* @name ngSanitize
* @description
*
* # ngSanitize
*
* The `ngSanitize` module provides functionality to sanitize HTML.
*
* {@installModule sanitize}
*
* <div doc-module-components="ngSanitize"></div>
*
* See {@link ngSanitize.$sanitize `$sanitize`} for usage.
*/
/*
* HTML Parser By Misko Hevery ([email protected])
* based on: HTML Parser By John Resig (ejohn.org)
* Original code by Erik Arvidsson, Mozilla Public License
* http://erik.eae.net/simplehtmlparser/simplehtmlparser.js
*
* // Use like so:
* htmlParser(htmlString, {
* start: function(tag, attrs, unary) {},
* end: function(tag) {},
* chars: function(text) {},
* comment: function(text) {}
* });
*
*/
/**
* @ngdoc service
* @name ngSanitize.$sanitize
* @function
*
* @description
* The input is sanitized by parsing the html into tokens. All safe tokens (from a whitelist) are
* then serialized back to properly escaped html string. This means that no unsafe input can make
* it into the returned string, however, since our parser is more strict than a typical browser
* parser, it's possible that some obscure input, which would be recognized as valid HTML by a
* browser, won't make it through the sanitizer.
* The whitelist is configured using the functions `aHrefSanitizationWhitelist` and
* `imgSrcSanitizationWhitelist` of {@link ng.$compileProvider `$compileProvider`}.
*
* @param {string} html Html input.
* @returns {string} Sanitized html.
*
* @example
<doc:example module="ngSanitize">
<doc:source>
<script>
function Ctrl($scope, $sce) {
$scope.snippet =
'<p style="color:blue">an html\n' +
'<em onmouseover="this.textContent=\'PWN3D!\'">click here</em>\n' +
'snippet</p>';
$scope.deliberatelyTrustDangerousSnippet = function() {
return $sce.trustAsHtml($scope.snippet);
};
}
</script>
<div ng-controller="Ctrl">
Snippet: <textarea ng-model="snippet" cols="60" rows="3"></textarea>
<table>
<tr>
<td>Directive</td>
<td>How</td>
<td>Source</td>
<td>Rendered</td>
</tr>
<tr id="bind-html-with-sanitize">
<td>ng-bind-html</td>
<td>Automatically uses $sanitize</td>
<td><pre><div ng-bind-html="snippet"><br/></div></pre></td>
<td><div ng-bind-html="snippet"></div></td>
</tr>
<tr id="bind-html-with-trust">
<td>ng-bind-html</td>
<td>Bypass $sanitize by explicitly trusting the dangerous value</td>
<td>
<pre><div ng-bind-html="deliberatelyTrustDangerousSnippet()">
</div></pre>
</td>
<td><div ng-bind-html="deliberatelyTrustDangerousSnippet()"></div></td>
</tr>
<tr id="bind-default">
<td>ng-bind</td>
<td>Automatically escapes</td>
<td><pre><div ng-bind="snippet"><br/></div></pre></td>
<td><div ng-bind="snippet"></div></td>
</tr>
</table>
</div>
</doc:source>
<doc:scenario>
it('should sanitize the html snippet by default', function() {
expect(using('#bind-html-with-sanitize').element('div').html()).
toBe('<p>an html\n<em>click here</em>\nsnippet</p>');
});
it('should inline raw snippet if bound to a trusted value', function() {
expect(using('#bind-html-with-trust').element("div").html()).
toBe("<p style=\"color:blue\">an html\n" +
"<em onmouseover=\"this.textContent='PWN3D!'\">click here</em>\n" +
"snippet</p>");
});
it('should escape snippet without any filter', function() {
expect(using('#bind-default').element('div').html()).
toBe("<p style=\"color:blue\">an html\n" +
"<em onmouseover=\"this.textContent='PWN3D!'\">click here</em>\n" +
"snippet</p>");
});
it('should update', function() {
input('snippet').enter('new <b onclick="alert(1)">text</b>');
expect(using('#bind-html-with-sanitize').element('div').html()).toBe('new <b>text</b>');
expect(using('#bind-html-with-trust').element('div').html()).toBe(
'new <b onclick="alert(1)">text</b>');
expect(using('#bind-default').element('div').html()).toBe(
"new <b onclick=\"alert(1)\">text</b>");
});
</doc:scenario>
</doc:example>
*/
function $SanitizeProvider() {
this.$get = ['$$sanitizeUri', function($$sanitizeUri) {
return function(html) {
var buf = [];
htmlParser(html, htmlSanitizeWriter(buf, function(uri, isImage) {
return !/^unsafe/.test($$sanitizeUri(uri, isImage));
}));
return buf.join('');
};
}];
}
function sanitizeText(chars) {
var buf = [];
var writer = htmlSanitizeWriter(buf, angular.noop);
writer.chars(chars);
return buf.join('');
}
// Regular Expressions for parsing tags and attributes
var START_TAG_REGEXP =
/^<\s*([\w:-]+)((?:\s+[\w:-]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)\s*>/,
END_TAG_REGEXP = /^<\s*\/\s*([\w:-]+)[^>]*>/,
ATTR_REGEXP = /([\w:-]+)(?:\s*=\s*(?:(?:"((?:[^"])*)")|(?:'((?:[^'])*)')|([^>\s]+)))?/g,
BEGIN_TAG_REGEXP = /^</,
BEGING_END_TAGE_REGEXP = /^<\s*\//,
COMMENT_REGEXP = /<!--(.*?)-->/g,
DOCTYPE_REGEXP = /<!DOCTYPE([^>]*?)>/i,
CDATA_REGEXP = /<!\[CDATA\[(.*?)]]>/g,
// Match everything outside of normal chars and " (quote character)
NON_ALPHANUMERIC_REGEXP = /([^\#-~| |!])/g;
// Good source of info about elements and attributes
// http://dev.w3.org/html5/spec/Overview.html#semantics
// http://simon.html5.org/html-elements
// Safe Void Elements - HTML5
// http://dev.w3.org/html5/spec/Overview.html#void-elements
var voidElements = makeMap("area,br,col,hr,img,wbr");
// Elements that you can, intentionally, leave open (and which close themselves)
// http://dev.w3.org/html5/spec/Overview.html#optional-tags
var optionalEndTagBlockElements = makeMap("colgroup,dd,dt,li,p,tbody,td,tfoot,th,thead,tr"),
optionalEndTagInlineElements = makeMap("rp,rt"),
optionalEndTagElements = angular.extend({},
optionalEndTagInlineElements,
optionalEndTagBlockElements);
// Safe Block Elements - HTML5
var blockElements = angular.extend({}, optionalEndTagBlockElements, makeMap("address,article," +
"aside,blockquote,caption,center,del,dir,div,dl,figure,figcaption,footer,h1,h2,h3,h4,h5," +
"h6,header,hgroup,hr,ins,map,menu,nav,ol,pre,script,section,table,ul"));
// Inline Elements - HTML5
var inlineElements = angular.extend({}, optionalEndTagInlineElements, makeMap("a,abbr,acronym,b," +
"bdi,bdo,big,br,cite,code,del,dfn,em,font,i,img,ins,kbd,label,map,mark,q,ruby,rp,rt,s," +
"samp,small,span,strike,strong,sub,sup,time,tt,u,var"));
// Special Elements (can contain anything)
var specialElements = makeMap("script,style");
var validElements = angular.extend({},
voidElements,
blockElements,
inlineElements,
optionalEndTagElements);
//Attributes that have href and hence need to be sanitized
var uriAttrs = makeMap("background,cite,href,longdesc,src,usemap");
var validAttrs = angular.extend({}, uriAttrs, makeMap(
'abbr,align,alt,axis,bgcolor,border,cellpadding,cellspacing,class,clear,'+
'color,cols,colspan,compact,coords,dir,face,headers,height,hreflang,hspace,'+
'ismap,lang,language,nohref,nowrap,rel,rev,rows,rowspan,rules,'+
'scope,scrolling,shape,span,start,summary,target,title,type,'+
'valign,value,vspace,width'));
function makeMap(str) {
var obj = {}, items = str.split(','), i;
for (i = 0; i < items.length; i++) obj[items[i]] = true;
return obj;
}
/**
* @example
* htmlParser(htmlString, {
* start: function(tag, attrs, unary) {},
* end: function(tag) {},
* chars: function(text) {},
* comment: function(text) {}
* });
*
* @param {string} html string
* @param {object} handler
*/
function | ( html, handler ) {
var index, chars, match, stack = [], last = html;
stack.last = function() { return stack[ stack.length - 1 ]; };
while ( html ) {
chars = true;
// Make sure we're not in a script or style element
if ( !stack.last() || !specialElements[ stack.last() ] ) {
// Comment
if ( html.indexOf("<!--") === 0 ) {
// comments containing -- are not allowed unless they terminate the comment
index = html.indexOf("--", 4);
if ( index >= 0 && html.lastIndexOf("-->", index) === index) {
if (handler.comment) handler.comment( html.substring( 4, index ) );
html = html.substring( index + 3 );
chars = false;
}
// DOCTYPE
} else if ( DOCTYPE_REGEXP.test(html) ) {
match = html.match( DOCTYPE_REGEXP );
if ( match ) {
html = html.replace( match[0] , '');
chars = false;
}
// end tag
} else if ( BEGING_END_TAGE_REGEXP.test(html) ) {
match = html.match( END_TAG_REGEXP );
if ( match ) {
html = html.substring( match[0].length );
match[0].replace( END_TAG_REGEXP, parseEndTag );
chars = false;
}
// start tag
} else if ( BEGIN_TAG_REGEXP.test(html) ) {
match = html.match( START_TAG_REGEXP );
if ( match ) {
html = html.substring( match[0].length );
match[0].replace( START_TAG_REGEXP, parseStartTag );
chars = false;
}
}
if ( chars ) {
index = html.indexOf("<");
var text = index < 0 ? html : html.substring( 0, index );
html = index < 0 ? "" : html.substring( index );
if (handler.chars) handler.chars( decodeEntities(text) );
}
} else {
html = html.replace(new RegExp("(.*)<\\s*\\/\\s*" + stack.last() + "[^>]*>", 'i'),
function(all, text){
text = text.replace(COMMENT_REGEXP, "$1").replace(CDATA_REGEXP, "$1");
if (handler.chars) handler.chars( decodeEntities(text) );
return "";
});
parseEndTag( "", stack.last() );
}
if ( html == last ) {
throw $sanitizeMinErr('badparse', "The sanitizer was unable to parse the following block " +
"of html: {0}", html);
}
last = html;
}
// Clean up any remaining tags
parseEndTag();
function parseStartTag( tag, tagName, rest, unary ) {
tagName = angular.lowercase(tagName);
if ( blockElements[ tagName ] ) {
while ( stack.last() && inlineElements[ stack.last() ] ) {
parseEndTag( "", stack.last() );
}
}
if ( optionalEndTagElements[ tagName ] && stack.last() == tagName ) {
parseEndTag( "", tagName );
}
unary = voidElements[ tagName ] || !!unary;
if ( !unary )
stack.push( tagName );
var attrs = {};
rest.replace(ATTR_REGEXP,
function(match, name, doubleQuotedValue, singleQuotedValue, unquotedValue) {
var value = doubleQuotedValue
|| singleQuotedValue
|| unquotedValue
|| '';
attrs[name] = decodeEntities(value);
});
if (handler.start) handler.start( tagName, attrs, unary );
}
function parseEndTag( tag, tagName ) {
var pos = 0, i;
tagName = angular.lowercase(tagName);
if ( tagName )
// Find the closest opened tag of the same type
for ( pos = stack.length - 1; pos >= 0; pos-- )
if ( stack[ pos ] == tagName )
break;
if ( pos >= 0 ) {
// Close all the open elements, up the stack
for ( i = stack.length - 1; i >= pos; i-- )
if (handler.end) handler.end( stack[ i ] );
// Remove the open elements from the stack
stack.length = pos;
}
}
}
/**
* decodes all entities into regular string
* @param value
* @returns {string} A string with decoded entities.
*/
var hiddenPre=document.createElement("pre");
function decodeEntities(value) {
if (!value) {
return '';
}
// Note: IE8 does not preserve spaces at the start/end of innerHTML
var spaceRe = /^(\s*)([\s\S]*?)(\s*)$/;
var parts = spaceRe.exec(value);
parts[0] = '';
if (parts[2]) {
hiddenPre.innerHTML=parts[2].replace(/</g,"<");
parts[2] = hiddenPre.innerText || hiddenPre.textContent;
}
return parts.join('');
}
/**
* Escapes all potentially dangerous characters, so that the
* resulting string can be safely inserted into attribute or
* element text.
* @param value
* @returns escaped text
*/
function encodeEntities(value) {
return value.
replace(/&/g, '&').
replace(NON_ALPHANUMERIC_REGEXP, function(value){
return '&#' + value.charCodeAt(0) + ';';
}).
replace(/</g, '<').
replace(/>/g, '>');
}
/**
* create an HTML/XML writer which writes to buffer
* @param {Array} buf use buf.jain('') to get out sanitized html string
* @returns {object} in the form of {
* start: function(tag, attrs, unary) {},
* end: function(tag) {},
* chars: function(text) {},
* comment: function(text) {}
* }
*/
function htmlSanitizeWriter(buf, uriValidator){
var ignore = false;
var out = angular.bind(buf, buf.push);
return {
start: function(tag, attrs, unary){
tag = angular.lowercase(tag);
if (!ignore && specialElements[tag]) {
ignore = tag;
}
if (!ignore && validElements[tag] === true) {
out('<');
out(tag);
angular.forEach(attrs, function(value, key){
var lkey=angular.lowercase(key);
var isImage = (tag === 'img' && lkey === 'src') || (lkey === 'background');
if (validAttrs[lkey] === true &&
(uriAttrs[lkey] !== true || uriValidator(value, isImage))) {
out(' ');
out(key);
out('="');
out(encodeEntities(value));
out('"');
}
});
out(unary ? '/>' : '>');
}
},
end: function(tag){
tag = angular.lowercase(tag);
if (!ignore && validElements[tag] === true) {
out('</');
out(tag);
out('>');
}
if (tag == ignore) {
ignore = false;
}
},
chars: function(chars){
if (!ignore) {
out(encodeEntities(chars));
}
}
};
}
// define ngSanitize module and register $sanitize service
angular.module('ngSanitize', []).provider('$sanitize', $SanitizeProvider);
/* global sanitizeText: false */
/**
* @ngdoc filter
* @name ngSanitize.filter:linky
* @function
*
* @description
* Finds links in text input and turns them into html links. Supports http/https/ftp/mailto and
* plain email address links.
*
* Requires the {@link ngSanitize `ngSanitize`} module to be installed.
*
* @param {string} text Input text.
* @param {string} target Window (_blank|_self|_parent|_top) or named frame to open links in.
* @returns {string} Html-linkified text.
*
* @usage
<span ng-bind-html="linky_expression | linky"></span>
*
* @example
<doc:example module="ngSanitize">
<doc:source>
<script>
function Ctrl($scope) {
$scope.snippet =
'Pretty text with some links:\n'+
'http://angularjs.org/,\n'+
'mailto:[email protected],\n'+
'[email protected],\n'+
'and one more: ftp://127.0.0.1/.';
$scope.snippetWithTarget = 'http://angularjs.org/';
}
</script>
<div ng-controller="Ctrl">
Snippet: <textarea ng-model="snippet" cols="60" rows="3"></textarea>
<table>
<tr>
<td>Filter</td>
<td>Source</td>
<td>Rendered</td>
</tr>
<tr id="linky-filter">
<td>linky filter</td>
<td>
<pre><div ng-bind-html="snippet | linky"><br></div></pre>
</td>
<td>
<div ng-bind-html="snippet | linky"></div>
</td>
</tr>
<tr id="linky-target">
<td>linky target</td>
<td>
<pre><div ng-bind-html="snippetWithTarget | linky:'_blank'"><br></div></pre>
</td>
<td>
<div ng-bind-html="snippetWithTarget | linky:'_blank'"></div>
</td>
</tr>
<tr id="escaped-html">
<td>no filter</td>
<td><pre><div ng-bind="snippet"><br></div></pre></td>
<td><div ng-bind="snippet"></div></td>
</tr>
</table>
</doc:source>
<doc:scenario>
it('should linkify the snippet with urls', function() {
expect(using('#linky-filter').binding('snippet | linky')).
toBe('Pretty text with some links: ' +
'<a href="http://angularjs.org/">http://angularjs.org/</a>, ' +
'<a href="mailto:[email protected]">[email protected]</a>, ' +
'<a href="mailto:[email protected]">[email protected]</a>, ' +
'and one more: <a href="ftp://127.0.0.1/">ftp://127.0.0.1/</a>.');
});
it ('should not linkify snippet without the linky filter', function() {
expect(using('#escaped-html').binding('snippet')).
toBe("Pretty text with some links:\n" +
"http://angularjs.org/,\n" +
"mailto:[email protected],\n" +
"[email protected],\n" +
"and one more: ftp://127.0.0.1/.");
});
it('should update', function() {
input('snippet').enter('new http://link.');
expect(using('#linky-filter').binding('snippet | linky')).
toBe('new <a href="http://link">http://link</a>.');
expect(using('#escaped-html').binding('snippet')).toBe('new http://link.');
});
it('should work with the target property', function() {
expect(using('#linky-target').binding("snippetWithTarget | linky:'_blank'")).
toBe('<a target="_blank" href="http://angularjs.org/">http://angularjs.org/</a>');
});
</doc:scenario>
</doc:example>
*/
angular.module('ngSanitize').filter('linky', ['$sanitize', function($sanitize) {
var LINKY_URL_REGEXP =
/((ftp|https?):\/\/|(mailto:)?[A-Za-z0-9._%+-]+@)\S*[^\s.;,(){}<>]/,
MAILTO_REGEXP = /^mailto:/;
return function(text, target) {
if (!text) return text;
var match;
var raw = text;
var html = [];
var url;
var i;
while ((match = raw.match(LINKY_URL_REGEXP))) {
// We can not end in these as they are sometimes found at the end of the sentence
url = match[0];
// if we did not match ftp/http/mailto then assume mailto
if (match[2] == match[3]) url = 'mailto:' + url;
i = match.index;
addText(raw.substr(0, i));
addLink(url, match[0].replace(MAILTO_REGEXP, ''));
raw = raw.substring(i + match[0].length);
}
addText(raw);
return $sanitize(html.join(''));
function addText(text) {
if (!text) {
return;
}
html.push(sanitizeText(text));
}
function addLink(url, text) {
html.push('<a ');
if (angular.isDefined(target)) {
html.push('target="');
html.push(target);
html.push('" ');
}
html.push('href="');
html.push(url);
html.push('">');
addText(text);
html.push('</a>');
}
};
}]);
})(window, window.angular);
| htmlParser | identifier_name |
angular-sanitize.js | /**
* @license AngularJS v1.2.3
* (c) 2010-2014 Google, Inc. http://angularjs.org
* License: MIT
*/
(function(window, angular, undefined) {'use strict';
var $sanitizeMinErr = angular.$$minErr('$sanitize');
/**
* @ngdoc overview
* @name ngSanitize
* @description
*
* # ngSanitize
*
* The `ngSanitize` module provides functionality to sanitize HTML.
*
* {@installModule sanitize}
*
* <div doc-module-components="ngSanitize"></div>
*
* See {@link ngSanitize.$sanitize `$sanitize`} for usage.
*/
/*
* HTML Parser By Misko Hevery ([email protected])
* based on: HTML Parser By John Resig (ejohn.org)
* Original code by Erik Arvidsson, Mozilla Public License
* http://erik.eae.net/simplehtmlparser/simplehtmlparser.js
*
* // Use like so:
* htmlParser(htmlString, {
* start: function(tag, attrs, unary) {},
* end: function(tag) {},
* chars: function(text) {},
* comment: function(text) {}
* });
*
*/
/**
* @ngdoc service
* @name ngSanitize.$sanitize
* @function
*
* @description
* The input is sanitized by parsing the html into tokens. All safe tokens (from a whitelist) are
* then serialized back to properly escaped html string. This means that no unsafe input can make
* it into the returned string, however, since our parser is more strict than a typical browser
* parser, it's possible that some obscure input, which would be recognized as valid HTML by a
* browser, won't make it through the sanitizer.
* The whitelist is configured using the functions `aHrefSanitizationWhitelist` and
* `imgSrcSanitizationWhitelist` of {@link ng.$compileProvider `$compileProvider`}.
*
* @param {string} html Html input.
* @returns {string} Sanitized html.
*
* @example
<doc:example module="ngSanitize">
<doc:source>
<script>
function Ctrl($scope, $sce) {
$scope.snippet =
'<p style="color:blue">an html\n' +
'<em onmouseover="this.textContent=\'PWN3D!\'">click here</em>\n' +
'snippet</p>'; | <div ng-controller="Ctrl">
Snippet: <textarea ng-model="snippet" cols="60" rows="3"></textarea>
<table>
<tr>
<td>Directive</td>
<td>How</td>
<td>Source</td>
<td>Rendered</td>
</tr>
<tr id="bind-html-with-sanitize">
<td>ng-bind-html</td>
<td>Automatically uses $sanitize</td>
<td><pre><div ng-bind-html="snippet"><br/></div></pre></td>
<td><div ng-bind-html="snippet"></div></td>
</tr>
<tr id="bind-html-with-trust">
<td>ng-bind-html</td>
<td>Bypass $sanitize by explicitly trusting the dangerous value</td>
<td>
<pre><div ng-bind-html="deliberatelyTrustDangerousSnippet()">
</div></pre>
</td>
<td><div ng-bind-html="deliberatelyTrustDangerousSnippet()"></div></td>
</tr>
<tr id="bind-default">
<td>ng-bind</td>
<td>Automatically escapes</td>
<td><pre><div ng-bind="snippet"><br/></div></pre></td>
<td><div ng-bind="snippet"></div></td>
</tr>
</table>
</div>
</doc:source>
<doc:scenario>
it('should sanitize the html snippet by default', function() {
expect(using('#bind-html-with-sanitize').element('div').html()).
toBe('<p>an html\n<em>click here</em>\nsnippet</p>');
});
it('should inline raw snippet if bound to a trusted value', function() {
expect(using('#bind-html-with-trust').element("div").html()).
toBe("<p style=\"color:blue\">an html\n" +
"<em onmouseover=\"this.textContent='PWN3D!'\">click here</em>\n" +
"snippet</p>");
});
it('should escape snippet without any filter', function() {
expect(using('#bind-default').element('div').html()).
toBe("<p style=\"color:blue\">an html\n" +
"<em onmouseover=\"this.textContent='PWN3D!'\">click here</em>\n" +
"snippet</p>");
});
it('should update', function() {
input('snippet').enter('new <b onclick="alert(1)">text</b>');
expect(using('#bind-html-with-sanitize').element('div').html()).toBe('new <b>text</b>');
expect(using('#bind-html-with-trust').element('div').html()).toBe(
'new <b onclick="alert(1)">text</b>');
expect(using('#bind-default').element('div').html()).toBe(
"new <b onclick=\"alert(1)\">text</b>");
});
</doc:scenario>
</doc:example>
*/
function $SanitizeProvider() {
this.$get = ['$$sanitizeUri', function($$sanitizeUri) {
return function(html) {
var buf = [];
htmlParser(html, htmlSanitizeWriter(buf, function(uri, isImage) {
return !/^unsafe/.test($$sanitizeUri(uri, isImage));
}));
return buf.join('');
};
}];
}
function sanitizeText(chars) {
var buf = [];
var writer = htmlSanitizeWriter(buf, angular.noop);
writer.chars(chars);
return buf.join('');
}
// Regular Expressions for parsing tags and attributes
var START_TAG_REGEXP =
/^<\s*([\w:-]+)((?:\s+[\w:-]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)\s*>/,
END_TAG_REGEXP = /^<\s*\/\s*([\w:-]+)[^>]*>/,
ATTR_REGEXP = /([\w:-]+)(?:\s*=\s*(?:(?:"((?:[^"])*)")|(?:'((?:[^'])*)')|([^>\s]+)))?/g,
BEGIN_TAG_REGEXP = /^</,
BEGING_END_TAGE_REGEXP = /^<\s*\//,
COMMENT_REGEXP = /<!--(.*?)-->/g,
DOCTYPE_REGEXP = /<!DOCTYPE([^>]*?)>/i,
CDATA_REGEXP = /<!\[CDATA\[(.*?)]]>/g,
// Match everything outside of normal chars and " (quote character)
NON_ALPHANUMERIC_REGEXP = /([^\#-~| |!])/g;
// Good source of info about elements and attributes
// http://dev.w3.org/html5/spec/Overview.html#semantics
// http://simon.html5.org/html-elements
// Safe Void Elements - HTML5
// http://dev.w3.org/html5/spec/Overview.html#void-elements
var voidElements = makeMap("area,br,col,hr,img,wbr");
// Elements that you can, intentionally, leave open (and which close themselves)
// http://dev.w3.org/html5/spec/Overview.html#optional-tags
var optionalEndTagBlockElements = makeMap("colgroup,dd,dt,li,p,tbody,td,tfoot,th,thead,tr"),
optionalEndTagInlineElements = makeMap("rp,rt"),
optionalEndTagElements = angular.extend({},
optionalEndTagInlineElements,
optionalEndTagBlockElements);
// Safe Block Elements - HTML5
var blockElements = angular.extend({}, optionalEndTagBlockElements, makeMap("address,article," +
"aside,blockquote,caption,center,del,dir,div,dl,figure,figcaption,footer,h1,h2,h3,h4,h5," +
"h6,header,hgroup,hr,ins,map,menu,nav,ol,pre,script,section,table,ul"));
// Inline Elements - HTML5
var inlineElements = angular.extend({}, optionalEndTagInlineElements, makeMap("a,abbr,acronym,b," +
"bdi,bdo,big,br,cite,code,del,dfn,em,font,i,img,ins,kbd,label,map,mark,q,ruby,rp,rt,s," +
"samp,small,span,strike,strong,sub,sup,time,tt,u,var"));
// Special Elements (can contain anything)
var specialElements = makeMap("script,style");
var validElements = angular.extend({},
voidElements,
blockElements,
inlineElements,
optionalEndTagElements);
//Attributes that have href and hence need to be sanitized
var uriAttrs = makeMap("background,cite,href,longdesc,src,usemap");
var validAttrs = angular.extend({}, uriAttrs, makeMap(
'abbr,align,alt,axis,bgcolor,border,cellpadding,cellspacing,class,clear,'+
'color,cols,colspan,compact,coords,dir,face,headers,height,hreflang,hspace,'+
'ismap,lang,language,nohref,nowrap,rel,rev,rows,rowspan,rules,'+
'scope,scrolling,shape,span,start,summary,target,title,type,'+
'valign,value,vspace,width'));
function makeMap(str) {
var obj = {}, items = str.split(','), i;
for (i = 0; i < items.length; i++) obj[items[i]] = true;
return obj;
}
/**
* @example
* htmlParser(htmlString, {
* start: function(tag, attrs, unary) {},
* end: function(tag) {},
* chars: function(text) {},
* comment: function(text) {}
* });
*
* @param {string} html string
* @param {object} handler
*/
function htmlParser( html, handler ) {
var index, chars, match, stack = [], last = html;
stack.last = function() { return stack[ stack.length - 1 ]; };
while ( html ) {
chars = true;
// Make sure we're not in a script or style element
if ( !stack.last() || !specialElements[ stack.last() ] ) {
// Comment
if ( html.indexOf("<!--") === 0 ) {
// comments containing -- are not allowed unless they terminate the comment
index = html.indexOf("--", 4);
if ( index >= 0 && html.lastIndexOf("-->", index) === index) {
if (handler.comment) handler.comment( html.substring( 4, index ) );
html = html.substring( index + 3 );
chars = false;
}
// DOCTYPE
} else if ( DOCTYPE_REGEXP.test(html) ) {
match = html.match( DOCTYPE_REGEXP );
if ( match ) {
html = html.replace( match[0] , '');
chars = false;
}
// end tag
} else if ( BEGING_END_TAGE_REGEXP.test(html) ) {
match = html.match( END_TAG_REGEXP );
if ( match ) {
html = html.substring( match[0].length );
match[0].replace( END_TAG_REGEXP, parseEndTag );
chars = false;
}
// start tag
} else if ( BEGIN_TAG_REGEXP.test(html) ) {
match = html.match( START_TAG_REGEXP );
if ( match ) {
html = html.substring( match[0].length );
match[0].replace( START_TAG_REGEXP, parseStartTag );
chars = false;
}
}
if ( chars ) {
index = html.indexOf("<");
var text = index < 0 ? html : html.substring( 0, index );
html = index < 0 ? "" : html.substring( index );
if (handler.chars) handler.chars( decodeEntities(text) );
}
} else {
html = html.replace(new RegExp("(.*)<\\s*\\/\\s*" + stack.last() + "[^>]*>", 'i'),
function(all, text){
text = text.replace(COMMENT_REGEXP, "$1").replace(CDATA_REGEXP, "$1");
if (handler.chars) handler.chars( decodeEntities(text) );
return "";
});
parseEndTag( "", stack.last() );
}
if ( html == last ) {
throw $sanitizeMinErr('badparse', "The sanitizer was unable to parse the following block " +
"of html: {0}", html);
}
last = html;
}
// Clean up any remaining tags
parseEndTag();
function parseStartTag( tag, tagName, rest, unary ) {
tagName = angular.lowercase(tagName);
if ( blockElements[ tagName ] ) {
while ( stack.last() && inlineElements[ stack.last() ] ) {
parseEndTag( "", stack.last() );
}
}
if ( optionalEndTagElements[ tagName ] && stack.last() == tagName ) {
parseEndTag( "", tagName );
}
unary = voidElements[ tagName ] || !!unary;
if ( !unary )
stack.push( tagName );
var attrs = {};
rest.replace(ATTR_REGEXP,
function(match, name, doubleQuotedValue, singleQuotedValue, unquotedValue) {
var value = doubleQuotedValue
|| singleQuotedValue
|| unquotedValue
|| '';
attrs[name] = decodeEntities(value);
});
if (handler.start) handler.start( tagName, attrs, unary );
}
function parseEndTag( tag, tagName ) {
var pos = 0, i;
tagName = angular.lowercase(tagName);
if ( tagName )
// Find the closest opened tag of the same type
for ( pos = stack.length - 1; pos >= 0; pos-- )
if ( stack[ pos ] == tagName )
break;
if ( pos >= 0 ) {
// Close all the open elements, up the stack
for ( i = stack.length - 1; i >= pos; i-- )
if (handler.end) handler.end( stack[ i ] );
// Remove the open elements from the stack
stack.length = pos;
}
}
}
/**
* decodes all entities into regular string
* @param value
* @returns {string} A string with decoded entities.
*/
var hiddenPre=document.createElement("pre");
function decodeEntities(value) {
if (!value) {
return '';
}
// Note: IE8 does not preserve spaces at the start/end of innerHTML
var spaceRe = /^(\s*)([\s\S]*?)(\s*)$/;
var parts = spaceRe.exec(value);
parts[0] = '';
if (parts[2]) {
hiddenPre.innerHTML=parts[2].replace(/</g,"<");
parts[2] = hiddenPre.innerText || hiddenPre.textContent;
}
return parts.join('');
}
/**
* Escapes all potentially dangerous characters, so that the
* resulting string can be safely inserted into attribute or
* element text.
* @param value
* @returns escaped text
*/
function encodeEntities(value) {
return value.
replace(/&/g, '&').
replace(NON_ALPHANUMERIC_REGEXP, function(value){
return '&#' + value.charCodeAt(0) + ';';
}).
replace(/</g, '<').
replace(/>/g, '>');
}
/**
* create an HTML/XML writer which writes to buffer
* @param {Array} buf use buf.jain('') to get out sanitized html string
* @returns {object} in the form of {
* start: function(tag, attrs, unary) {},
* end: function(tag) {},
* chars: function(text) {},
* comment: function(text) {}
* }
*/
function htmlSanitizeWriter(buf, uriValidator){
var ignore = false;
var out = angular.bind(buf, buf.push);
return {
start: function(tag, attrs, unary){
tag = angular.lowercase(tag);
if (!ignore && specialElements[tag]) {
ignore = tag;
}
if (!ignore && validElements[tag] === true) {
out('<');
out(tag);
angular.forEach(attrs, function(value, key){
var lkey=angular.lowercase(key);
var isImage = (tag === 'img' && lkey === 'src') || (lkey === 'background');
if (validAttrs[lkey] === true &&
(uriAttrs[lkey] !== true || uriValidator(value, isImage))) {
out(' ');
out(key);
out('="');
out(encodeEntities(value));
out('"');
}
});
out(unary ? '/>' : '>');
}
},
end: function(tag){
tag = angular.lowercase(tag);
if (!ignore && validElements[tag] === true) {
out('</');
out(tag);
out('>');
}
if (tag == ignore) {
ignore = false;
}
},
chars: function(chars){
if (!ignore) {
out(encodeEntities(chars));
}
}
};
}
// define ngSanitize module and register $sanitize service
angular.module('ngSanitize', []).provider('$sanitize', $SanitizeProvider);
/* global sanitizeText: false */
/**
* @ngdoc filter
* @name ngSanitize.filter:linky
* @function
*
* @description
* Finds links in text input and turns them into html links. Supports http/https/ftp/mailto and
* plain email address links.
*
* Requires the {@link ngSanitize `ngSanitize`} module to be installed.
*
* @param {string} text Input text.
* @param {string} target Window (_blank|_self|_parent|_top) or named frame to open links in.
* @returns {string} Html-linkified text.
*
* @usage
<span ng-bind-html="linky_expression | linky"></span>
*
* @example
<doc:example module="ngSanitize">
<doc:source>
<script>
function Ctrl($scope) {
$scope.snippet =
'Pretty text with some links:\n'+
'http://angularjs.org/,\n'+
'mailto:[email protected],\n'+
'[email protected],\n'+
'and one more: ftp://127.0.0.1/.';
$scope.snippetWithTarget = 'http://angularjs.org/';
}
</script>
<div ng-controller="Ctrl">
Snippet: <textarea ng-model="snippet" cols="60" rows="3"></textarea>
<table>
<tr>
<td>Filter</td>
<td>Source</td>
<td>Rendered</td>
</tr>
<tr id="linky-filter">
<td>linky filter</td>
<td>
<pre><div ng-bind-html="snippet | linky"><br></div></pre>
</td>
<td>
<div ng-bind-html="snippet | linky"></div>
</td>
</tr>
<tr id="linky-target">
<td>linky target</td>
<td>
<pre><div ng-bind-html="snippetWithTarget | linky:'_blank'"><br></div></pre>
</td>
<td>
<div ng-bind-html="snippetWithTarget | linky:'_blank'"></div>
</td>
</tr>
<tr id="escaped-html">
<td>no filter</td>
<td><pre><div ng-bind="snippet"><br></div></pre></td>
<td><div ng-bind="snippet"></div></td>
</tr>
</table>
</doc:source>
<doc:scenario>
it('should linkify the snippet with urls', function() {
expect(using('#linky-filter').binding('snippet | linky')).
toBe('Pretty text with some links: ' +
'<a href="http://angularjs.org/">http://angularjs.org/</a>, ' +
'<a href="mailto:[email protected]">[email protected]</a>, ' +
'<a href="mailto:[email protected]">[email protected]</a>, ' +
'and one more: <a href="ftp://127.0.0.1/">ftp://127.0.0.1/</a>.');
});
it ('should not linkify snippet without the linky filter', function() {
expect(using('#escaped-html').binding('snippet')).
toBe("Pretty text with some links:\n" +
"http://angularjs.org/,\n" +
"mailto:[email protected],\n" +
"[email protected],\n" +
"and one more: ftp://127.0.0.1/.");
});
it('should update', function() {
input('snippet').enter('new http://link.');
expect(using('#linky-filter').binding('snippet | linky')).
toBe('new <a href="http://link">http://link</a>.');
expect(using('#escaped-html').binding('snippet')).toBe('new http://link.');
});
it('should work with the target property', function() {
expect(using('#linky-target').binding("snippetWithTarget | linky:'_blank'")).
toBe('<a target="_blank" href="http://angularjs.org/">http://angularjs.org/</a>');
});
</doc:scenario>
</doc:example>
*/
angular.module('ngSanitize').filter('linky', ['$sanitize', function($sanitize) {
var LINKY_URL_REGEXP =
/((ftp|https?):\/\/|(mailto:)?[A-Za-z0-9._%+-]+@)\S*[^\s.;,(){}<>]/,
MAILTO_REGEXP = /^mailto:/;
return function(text, target) {
if (!text) return text;
var match;
var raw = text;
var html = [];
var url;
var i;
while ((match = raw.match(LINKY_URL_REGEXP))) {
// We can not end in these as they are sometimes found at the end of the sentence
url = match[0];
// if we did not match ftp/http/mailto then assume mailto
if (match[2] == match[3]) url = 'mailto:' + url;
i = match.index;
addText(raw.substr(0, i));
addLink(url, match[0].replace(MAILTO_REGEXP, ''));
raw = raw.substring(i + match[0].length);
}
addText(raw);
return $sanitize(html.join(''));
function addText(text) {
if (!text) {
return;
}
html.push(sanitizeText(text));
}
function addLink(url, text) {
html.push('<a ');
if (angular.isDefined(target)) {
html.push('target="');
html.push(target);
html.push('" ');
}
html.push('href="');
html.push(url);
html.push('">');
addText(text);
html.push('</a>');
}
};
}]);
})(window, window.angular); | $scope.deliberatelyTrustDangerousSnippet = function() {
return $sce.trustAsHtml($scope.snippet);
};
}
</script> | random_line_split |
angular-sanitize.js | /**
* @license AngularJS v1.2.3
* (c) 2010-2014 Google, Inc. http://angularjs.org
* License: MIT
*/
(function(window, angular, undefined) {'use strict';
var $sanitizeMinErr = angular.$$minErr('$sanitize');
/**
* @ngdoc overview
* @name ngSanitize
* @description
*
* # ngSanitize
*
* The `ngSanitize` module provides functionality to sanitize HTML.
*
* {@installModule sanitize}
*
* <div doc-module-components="ngSanitize"></div>
*
* See {@link ngSanitize.$sanitize `$sanitize`} for usage.
*/
/*
* HTML Parser By Misko Hevery ([email protected])
* based on: HTML Parser By John Resig (ejohn.org)
* Original code by Erik Arvidsson, Mozilla Public License
* http://erik.eae.net/simplehtmlparser/simplehtmlparser.js
*
* // Use like so:
* htmlParser(htmlString, {
* start: function(tag, attrs, unary) {},
* end: function(tag) {},
* chars: function(text) {},
* comment: function(text) {}
* });
*
*/
/**
* @ngdoc service
* @name ngSanitize.$sanitize
* @function
*
* @description
* The input is sanitized by parsing the html into tokens. All safe tokens (from a whitelist) are
* then serialized back to properly escaped html string. This means that no unsafe input can make
* it into the returned string, however, since our parser is more strict than a typical browser
* parser, it's possible that some obscure input, which would be recognized as valid HTML by a
* browser, won't make it through the sanitizer.
* The whitelist is configured using the functions `aHrefSanitizationWhitelist` and
* `imgSrcSanitizationWhitelist` of {@link ng.$compileProvider `$compileProvider`}.
*
* @param {string} html Html input.
* @returns {string} Sanitized html.
*
* @example
<doc:example module="ngSanitize">
<doc:source>
<script>
function Ctrl($scope, $sce) {
$scope.snippet =
'<p style="color:blue">an html\n' +
'<em onmouseover="this.textContent=\'PWN3D!\'">click here</em>\n' +
'snippet</p>';
$scope.deliberatelyTrustDangerousSnippet = function() {
return $sce.trustAsHtml($scope.snippet);
};
}
</script>
<div ng-controller="Ctrl">
Snippet: <textarea ng-model="snippet" cols="60" rows="3"></textarea>
<table>
<tr>
<td>Directive</td>
<td>How</td>
<td>Source</td>
<td>Rendered</td>
</tr>
<tr id="bind-html-with-sanitize">
<td>ng-bind-html</td>
<td>Automatically uses $sanitize</td>
<td><pre><div ng-bind-html="snippet"><br/></div></pre></td>
<td><div ng-bind-html="snippet"></div></td>
</tr>
<tr id="bind-html-with-trust">
<td>ng-bind-html</td>
<td>Bypass $sanitize by explicitly trusting the dangerous value</td>
<td>
<pre><div ng-bind-html="deliberatelyTrustDangerousSnippet()">
</div></pre>
</td>
<td><div ng-bind-html="deliberatelyTrustDangerousSnippet()"></div></td>
</tr>
<tr id="bind-default">
<td>ng-bind</td>
<td>Automatically escapes</td>
<td><pre><div ng-bind="snippet"><br/></div></pre></td>
<td><div ng-bind="snippet"></div></td>
</tr>
</table>
</div>
</doc:source>
<doc:scenario>
it('should sanitize the html snippet by default', function() {
expect(using('#bind-html-with-sanitize').element('div').html()).
toBe('<p>an html\n<em>click here</em>\nsnippet</p>');
});
it('should inline raw snippet if bound to a trusted value', function() {
expect(using('#bind-html-with-trust').element("div").html()).
toBe("<p style=\"color:blue\">an html\n" +
"<em onmouseover=\"this.textContent='PWN3D!'\">click here</em>\n" +
"snippet</p>");
});
it('should escape snippet without any filter', function() {
expect(using('#bind-default').element('div').html()).
toBe("<p style=\"color:blue\">an html\n" +
"<em onmouseover=\"this.textContent='PWN3D!'\">click here</em>\n" +
"snippet</p>");
});
it('should update', function() {
input('snippet').enter('new <b onclick="alert(1)">text</b>');
expect(using('#bind-html-with-sanitize').element('div').html()).toBe('new <b>text</b>');
expect(using('#bind-html-with-trust').element('div').html()).toBe(
'new <b onclick="alert(1)">text</b>');
expect(using('#bind-default').element('div').html()).toBe(
"new <b onclick=\"alert(1)\">text</b>");
});
</doc:scenario>
</doc:example>
*/
function $SanitizeProvider() {
this.$get = ['$$sanitizeUri', function($$sanitizeUri) {
return function(html) {
var buf = [];
htmlParser(html, htmlSanitizeWriter(buf, function(uri, isImage) {
return !/^unsafe/.test($$sanitizeUri(uri, isImage));
}));
return buf.join('');
};
}];
}
function sanitizeText(chars) {
var buf = [];
var writer = htmlSanitizeWriter(buf, angular.noop);
writer.chars(chars);
return buf.join('');
}
// Regular Expressions for parsing tags and attributes
var START_TAG_REGEXP =
/^<\s*([\w:-]+)((?:\s+[\w:-]+(?:\s*=\s*(?:(?:"[^"]*")|(?:'[^']*')|[^>\s]+))?)*)\s*(\/?)\s*>/,
END_TAG_REGEXP = /^<\s*\/\s*([\w:-]+)[^>]*>/,
ATTR_REGEXP = /([\w:-]+)(?:\s*=\s*(?:(?:"((?:[^"])*)")|(?:'((?:[^'])*)')|([^>\s]+)))?/g,
BEGIN_TAG_REGEXP = /^</,
BEGING_END_TAGE_REGEXP = /^<\s*\//,
COMMENT_REGEXP = /<!--(.*?)-->/g,
DOCTYPE_REGEXP = /<!DOCTYPE([^>]*?)>/i,
CDATA_REGEXP = /<!\[CDATA\[(.*?)]]>/g,
// Match everything outside of normal chars and " (quote character)
NON_ALPHANUMERIC_REGEXP = /([^\#-~| |!])/g;
// Good source of info about elements and attributes
// http://dev.w3.org/html5/spec/Overview.html#semantics
// http://simon.html5.org/html-elements
// Safe Void Elements - HTML5
// http://dev.w3.org/html5/spec/Overview.html#void-elements
var voidElements = makeMap("area,br,col,hr,img,wbr");
// Elements that you can, intentionally, leave open (and which close themselves)
// http://dev.w3.org/html5/spec/Overview.html#optional-tags
var optionalEndTagBlockElements = makeMap("colgroup,dd,dt,li,p,tbody,td,tfoot,th,thead,tr"),
optionalEndTagInlineElements = makeMap("rp,rt"),
optionalEndTagElements = angular.extend({},
optionalEndTagInlineElements,
optionalEndTagBlockElements);
// Safe Block Elements - HTML5
var blockElements = angular.extend({}, optionalEndTagBlockElements, makeMap("address,article," +
"aside,blockquote,caption,center,del,dir,div,dl,figure,figcaption,footer,h1,h2,h3,h4,h5," +
"h6,header,hgroup,hr,ins,map,menu,nav,ol,pre,script,section,table,ul"));
// Inline Elements - HTML5
var inlineElements = angular.extend({}, optionalEndTagInlineElements, makeMap("a,abbr,acronym,b," +
"bdi,bdo,big,br,cite,code,del,dfn,em,font,i,img,ins,kbd,label,map,mark,q,ruby,rp,rt,s," +
"samp,small,span,strike,strong,sub,sup,time,tt,u,var"));
// Special Elements (can contain anything)
var specialElements = makeMap("script,style");
var validElements = angular.extend({},
voidElements,
blockElements,
inlineElements,
optionalEndTagElements);
//Attributes that have href and hence need to be sanitized
var uriAttrs = makeMap("background,cite,href,longdesc,src,usemap");
var validAttrs = angular.extend({}, uriAttrs, makeMap(
'abbr,align,alt,axis,bgcolor,border,cellpadding,cellspacing,class,clear,'+
'color,cols,colspan,compact,coords,dir,face,headers,height,hreflang,hspace,'+
'ismap,lang,language,nohref,nowrap,rel,rev,rows,rowspan,rules,'+
'scope,scrolling,shape,span,start,summary,target,title,type,'+
'valign,value,vspace,width'));
function makeMap(str) {
var obj = {}, items = str.split(','), i;
for (i = 0; i < items.length; i++) obj[items[i]] = true;
return obj;
}
/**
* @example
* htmlParser(htmlString, {
* start: function(tag, attrs, unary) {},
* end: function(tag) {},
* chars: function(text) {},
* comment: function(text) {}
* });
*
* @param {string} html string
* @param {object} handler
*/
function htmlParser( html, handler ) {
var index, chars, match, stack = [], last = html;
stack.last = function() { return stack[ stack.length - 1 ]; };
while ( html ) {
chars = true;
// Make sure we're not in a script or style element
if ( !stack.last() || !specialElements[ stack.last() ] ) {
// Comment
if ( html.indexOf("<!--") === 0 ) {
// comments containing -- are not allowed unless they terminate the comment
index = html.indexOf("--", 4);
if ( index >= 0 && html.lastIndexOf("-->", index) === index) {
if (handler.comment) handler.comment( html.substring( 4, index ) );
html = html.substring( index + 3 );
chars = false;
}
// DOCTYPE
} else if ( DOCTYPE_REGEXP.test(html) ) {
match = html.match( DOCTYPE_REGEXP );
if ( match ) {
html = html.replace( match[0] , '');
chars = false;
}
// end tag
} else if ( BEGING_END_TAGE_REGEXP.test(html) ) {
match = html.match( END_TAG_REGEXP );
if ( match ) {
html = html.substring( match[0].length );
match[0].replace( END_TAG_REGEXP, parseEndTag );
chars = false;
}
// start tag
} else if ( BEGIN_TAG_REGEXP.test(html) ) {
match = html.match( START_TAG_REGEXP );
if ( match ) {
html = html.substring( match[0].length );
match[0].replace( START_TAG_REGEXP, parseStartTag );
chars = false;
}
}
if ( chars ) {
index = html.indexOf("<");
var text = index < 0 ? html : html.substring( 0, index );
html = index < 0 ? "" : html.substring( index );
if (handler.chars) handler.chars( decodeEntities(text) );
}
} else {
html = html.replace(new RegExp("(.*)<\\s*\\/\\s*" + stack.last() + "[^>]*>", 'i'),
function(all, text){
text = text.replace(COMMENT_REGEXP, "$1").replace(CDATA_REGEXP, "$1");
if (handler.chars) handler.chars( decodeEntities(text) );
return "";
});
parseEndTag( "", stack.last() );
}
if ( html == last ) {
throw $sanitizeMinErr('badparse', "The sanitizer was unable to parse the following block " +
"of html: {0}", html);
}
last = html;
}
// Clean up any remaining tags
parseEndTag();
function parseStartTag( tag, tagName, rest, unary ) {
tagName = angular.lowercase(tagName);
if ( blockElements[ tagName ] ) {
while ( stack.last() && inlineElements[ stack.last() ] ) {
parseEndTag( "", stack.last() );
}
}
if ( optionalEndTagElements[ tagName ] && stack.last() == tagName ) {
parseEndTag( "", tagName );
}
unary = voidElements[ tagName ] || !!unary;
if ( !unary )
stack.push( tagName );
var attrs = {};
rest.replace(ATTR_REGEXP,
function(match, name, doubleQuotedValue, singleQuotedValue, unquotedValue) {
var value = doubleQuotedValue
|| singleQuotedValue
|| unquotedValue
|| '';
attrs[name] = decodeEntities(value);
});
if (handler.start) handler.start( tagName, attrs, unary );
}
function parseEndTag( tag, tagName ) |
}
/**
* decodes all entities into regular string
* @param value
* @returns {string} A string with decoded entities.
*/
var hiddenPre=document.createElement("pre");
function decodeEntities(value) {
if (!value) {
return '';
}
// Note: IE8 does not preserve spaces at the start/end of innerHTML
var spaceRe = /^(\s*)([\s\S]*?)(\s*)$/;
var parts = spaceRe.exec(value);
parts[0] = '';
if (parts[2]) {
hiddenPre.innerHTML=parts[2].replace(/</g,"<");
parts[2] = hiddenPre.innerText || hiddenPre.textContent;
}
return parts.join('');
}
/**
* Escapes all potentially dangerous characters, so that the
* resulting string can be safely inserted into attribute or
* element text.
* @param value
* @returns escaped text
*/
function encodeEntities(value) {
return value.
replace(/&/g, '&').
replace(NON_ALPHANUMERIC_REGEXP, function(value){
return '&#' + value.charCodeAt(0) + ';';
}).
replace(/</g, '<').
replace(/>/g, '>');
}
/**
* create an HTML/XML writer which writes to buffer
* @param {Array} buf use buf.jain('') to get out sanitized html string
* @returns {object} in the form of {
* start: function(tag, attrs, unary) {},
* end: function(tag) {},
* chars: function(text) {},
* comment: function(text) {}
* }
*/
function htmlSanitizeWriter(buf, uriValidator){
var ignore = false;
var out = angular.bind(buf, buf.push);
return {
start: function(tag, attrs, unary){
tag = angular.lowercase(tag);
if (!ignore && specialElements[tag]) {
ignore = tag;
}
if (!ignore && validElements[tag] === true) {
out('<');
out(tag);
angular.forEach(attrs, function(value, key){
var lkey=angular.lowercase(key);
var isImage = (tag === 'img' && lkey === 'src') || (lkey === 'background');
if (validAttrs[lkey] === true &&
(uriAttrs[lkey] !== true || uriValidator(value, isImage))) {
out(' ');
out(key);
out('="');
out(encodeEntities(value));
out('"');
}
});
out(unary ? '/>' : '>');
}
},
end: function(tag){
tag = angular.lowercase(tag);
if (!ignore && validElements[tag] === true) {
out('</');
out(tag);
out('>');
}
if (tag == ignore) {
ignore = false;
}
},
chars: function(chars){
if (!ignore) {
out(encodeEntities(chars));
}
}
};
}
// define ngSanitize module and register $sanitize service
angular.module('ngSanitize', []).provider('$sanitize', $SanitizeProvider);
/* global sanitizeText: false */
/**
* @ngdoc filter
* @name ngSanitize.filter:linky
* @function
*
* @description
* Finds links in text input and turns them into html links. Supports http/https/ftp/mailto and
* plain email address links.
*
* Requires the {@link ngSanitize `ngSanitize`} module to be installed.
*
* @param {string} text Input text.
* @param {string} target Window (_blank|_self|_parent|_top) or named frame to open links in.
* @returns {string} Html-linkified text.
*
* @usage
<span ng-bind-html="linky_expression | linky"></span>
*
* @example
<doc:example module="ngSanitize">
<doc:source>
<script>
function Ctrl($scope) {
$scope.snippet =
'Pretty text with some links:\n'+
'http://angularjs.org/,\n'+
'mailto:[email protected],\n'+
'[email protected],\n'+
'and one more: ftp://127.0.0.1/.';
$scope.snippetWithTarget = 'http://angularjs.org/';
}
</script>
<div ng-controller="Ctrl">
Snippet: <textarea ng-model="snippet" cols="60" rows="3"></textarea>
<table>
<tr>
<td>Filter</td>
<td>Source</td>
<td>Rendered</td>
</tr>
<tr id="linky-filter">
<td>linky filter</td>
<td>
<pre><div ng-bind-html="snippet | linky"><br></div></pre>
</td>
<td>
<div ng-bind-html="snippet | linky"></div>
</td>
</tr>
<tr id="linky-target">
<td>linky target</td>
<td>
<pre><div ng-bind-html="snippetWithTarget | linky:'_blank'"><br></div></pre>
</td>
<td>
<div ng-bind-html="snippetWithTarget | linky:'_blank'"></div>
</td>
</tr>
<tr id="escaped-html">
<td>no filter</td>
<td><pre><div ng-bind="snippet"><br></div></pre></td>
<td><div ng-bind="snippet"></div></td>
</tr>
</table>
</doc:source>
<doc:scenario>
it('should linkify the snippet with urls', function() {
expect(using('#linky-filter').binding('snippet | linky')).
toBe('Pretty text with some links: ' +
'<a href="http://angularjs.org/">http://angularjs.org/</a>, ' +
'<a href="mailto:[email protected]">[email protected]</a>, ' +
'<a href="mailto:[email protected]">[email protected]</a>, ' +
'and one more: <a href="ftp://127.0.0.1/">ftp://127.0.0.1/</a>.');
});
it ('should not linkify snippet without the linky filter', function() {
expect(using('#escaped-html').binding('snippet')).
toBe("Pretty text with some links:\n" +
"http://angularjs.org/,\n" +
"mailto:[email protected],\n" +
"[email protected],\n" +
"and one more: ftp://127.0.0.1/.");
});
it('should update', function() {
input('snippet').enter('new http://link.');
expect(using('#linky-filter').binding('snippet | linky')).
toBe('new <a href="http://link">http://link</a>.');
expect(using('#escaped-html').binding('snippet')).toBe('new http://link.');
});
it('should work with the target property', function() {
expect(using('#linky-target').binding("snippetWithTarget | linky:'_blank'")).
toBe('<a target="_blank" href="http://angularjs.org/">http://angularjs.org/</a>');
});
</doc:scenario>
</doc:example>
*/
angular.module('ngSanitize').filter('linky', ['$sanitize', function($sanitize) {
var LINKY_URL_REGEXP =
/((ftp|https?):\/\/|(mailto:)?[A-Za-z0-9._%+-]+@)\S*[^\s.;,(){}<>]/,
MAILTO_REGEXP = /^mailto:/;
return function(text, target) {
if (!text) return text;
var match;
var raw = text;
var html = [];
var url;
var i;
while ((match = raw.match(LINKY_URL_REGEXP))) {
// We can not end in these as they are sometimes found at the end of the sentence
url = match[0];
// if we did not match ftp/http/mailto then assume mailto
if (match[2] == match[3]) url = 'mailto:' + url;
i = match.index;
addText(raw.substr(0, i));
addLink(url, match[0].replace(MAILTO_REGEXP, ''));
raw = raw.substring(i + match[0].length);
}
addText(raw);
return $sanitize(html.join(''));
function addText(text) {
if (!text) {
return;
}
html.push(sanitizeText(text));
}
function addLink(url, text) {
html.push('<a ');
if (angular.isDefined(target)) {
html.push('target="');
html.push(target);
html.push('" ');
}
html.push('href="');
html.push(url);
html.push('">');
addText(text);
html.push('</a>');
}
};
}]);
})(window, window.angular);
| {
var pos = 0, i;
tagName = angular.lowercase(tagName);
if ( tagName )
// Find the closest opened tag of the same type
for ( pos = stack.length - 1; pos >= 0; pos-- )
if ( stack[ pos ] == tagName )
break;
if ( pos >= 0 ) {
// Close all the open elements, up the stack
for ( i = stack.length - 1; i >= pos; i-- )
if (handler.end) handler.end( stack[ i ] );
// Remove the open elements from the stack
stack.length = pos;
}
} | identifier_body |
inspectdb.py | from __future__ import unicode_literals
import keyword
import re
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.utils import six
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def | (self, options):
connection = connections[options.get('database')]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
strip_prefix = lambda s: s.startswith("u'") and s[1:] or s
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
yield ''
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
used_column_names = [] # Holds column names used in the table so far
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = i in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join([
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [" class Meta:",
" db_table = '%s'" % table_name,
""]
| handle_inspection | identifier_name |
inspectdb.py | from __future__ import unicode_literals
import keyword
import re
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.utils import six
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
|
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [" class Meta:",
" db_table = '%s'" % table_name,
""]
| connection = connections[options.get('database')]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
strip_prefix = lambda s: s.startswith("u'") and s[1:] or s
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
yield ''
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
used_column_names = [] # Holds column names used in the table so far
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = i in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join([
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line | identifier_body |
inspectdb.py | from __future__ import unicode_literals
import keyword
import re
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.utils import six
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options.get('database')]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
strip_prefix = lambda s: s.startswith("u'") and s[1:] or s
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
yield ''
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
used_column_names = [] # Holds column names used in the table so far
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = i in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join([
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
|
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [" class Meta:",
" db_table = '%s'" % table_name,
""]
| if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name | conditional_block |
inspectdb.py | from __future__ import unicode_literals
import keyword
import re
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.utils import six
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to ' | db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options.get('database')]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
strip_prefix = lambda s: s.startswith("u'") and s[1:] or s
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
yield ''
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
used_column_names = [] # Holds column names used in the table so far
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = i in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join([
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [" class Meta:",
" db_table = '%s'" % table_name,
""] | 'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
| random_line_split |
MapAddIndexCodec.ts | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint-disable max-len */
import {BitsUtil} from '../util/BitsUtil';
import {ClientMessage, Frame, PARTITION_ID_OFFSET} from '../protocol/ClientMessage';
import {StringCodec} from './builtin/StringCodec';
import {InternalIndexConfig} from '../config/IndexConfig';
import {IndexConfigCodec} from './custom/IndexConfigCodec';
// hex: 0x012900
const REQUEST_MESSAGE_TYPE = 76032;
// hex: 0x012901
// RESPONSE_MESSAGE_TYPE = 76033
const REQUEST_INITIAL_FRAME_SIZE = PARTITION_ID_OFFSET + BitsUtil.INT_SIZE_IN_BYTES;
/** @internal */
export class MapAddIndexCodec {
static | (name: string, indexConfig: InternalIndexConfig): ClientMessage {
const clientMessage = ClientMessage.createForEncode();
clientMessage.setRetryable(false);
const initialFrame = Frame.createInitialFrame(REQUEST_INITIAL_FRAME_SIZE);
clientMessage.addFrame(initialFrame);
clientMessage.setMessageType(REQUEST_MESSAGE_TYPE);
clientMessage.setPartitionId(-1);
StringCodec.encode(clientMessage, name);
IndexConfigCodec.encode(clientMessage, indexConfig);
return clientMessage;
}
}
| encodeRequest | identifier_name |
MapAddIndexCodec.ts | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint-disable max-len */
import {BitsUtil} from '../util/BitsUtil';
import {ClientMessage, Frame, PARTITION_ID_OFFSET} from '../protocol/ClientMessage';
import {StringCodec} from './builtin/StringCodec';
import {InternalIndexConfig} from '../config/IndexConfig';
import {IndexConfigCodec} from './custom/IndexConfigCodec';
// hex: 0x012900
const REQUEST_MESSAGE_TYPE = 76032;
// hex: 0x012901
// RESPONSE_MESSAGE_TYPE = 76033
const REQUEST_INITIAL_FRAME_SIZE = PARTITION_ID_OFFSET + BitsUtil.INT_SIZE_IN_BYTES;
/** @internal */
export class MapAddIndexCodec {
static encodeRequest(name: string, indexConfig: InternalIndexConfig): ClientMessage {
const clientMessage = ClientMessage.createForEncode();
clientMessage.setRetryable(false);
const initialFrame = Frame.createInitialFrame(REQUEST_INITIAL_FRAME_SIZE);
clientMessage.addFrame(initialFrame);
clientMessage.setMessageType(REQUEST_MESSAGE_TYPE);
clientMessage.setPartitionId(-1);
StringCodec.encode(clientMessage, name); | } | IndexConfigCodec.encode(clientMessage, indexConfig);
return clientMessage;
} | random_line_split |
MapAddIndexCodec.ts | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint-disable max-len */
import {BitsUtil} from '../util/BitsUtil';
import {ClientMessage, Frame, PARTITION_ID_OFFSET} from '../protocol/ClientMessage';
import {StringCodec} from './builtin/StringCodec';
import {InternalIndexConfig} from '../config/IndexConfig';
import {IndexConfigCodec} from './custom/IndexConfigCodec';
// hex: 0x012900
const REQUEST_MESSAGE_TYPE = 76032;
// hex: 0x012901
// RESPONSE_MESSAGE_TYPE = 76033
const REQUEST_INITIAL_FRAME_SIZE = PARTITION_ID_OFFSET + BitsUtil.INT_SIZE_IN_BYTES;
/** @internal */
export class MapAddIndexCodec {
static encodeRequest(name: string, indexConfig: InternalIndexConfig): ClientMessage |
}
| {
const clientMessage = ClientMessage.createForEncode();
clientMessage.setRetryable(false);
const initialFrame = Frame.createInitialFrame(REQUEST_INITIAL_FRAME_SIZE);
clientMessage.addFrame(initialFrame);
clientMessage.setMessageType(REQUEST_MESSAGE_TYPE);
clientMessage.setPartitionId(-1);
StringCodec.encode(clientMessage, name);
IndexConfigCodec.encode(clientMessage, indexConfig);
return clientMessage;
} | identifier_body |
TableHead.js | "use strict";
var _interopRequireWildcard = require("@babel/runtime/helpers/interopRequireWildcard");
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = exports.styles = void 0;
var _extends2 = _interopRequireDefault(require("@babel/runtime/helpers/extends"));
var _objectWithoutProperties2 = _interopRequireDefault(require("@babel/runtime/helpers/objectWithoutProperties"));
var React = _interopRequireWildcard(require("react"));
var _propTypes = _interopRequireDefault(require("prop-types"));
var _clsx = _interopRequireDefault(require("clsx"));
var _withStyles = _interopRequireDefault(require("../styles/withStyles"));
var _Tablelvl2Context = _interopRequireDefault(require("../Table/Tablelvl2Context"));
var styles = {
/* Styles applied to the root element. */
root: {
display: 'table-header-group'
}
};
exports.styles = styles;
var tablelvl2 = {
variant: 'head'
};
var defaultComponent = 'thead';
var TableHead = /*#__PURE__*/React.forwardRef(function TableHead(props, ref) {
var classes = props.classes,
className = props.className,
_props$component = props.component,
Component = _props$component === void 0 ? defaultComponent : _props$component,
other = (0, _objectWithoutProperties2.default)(props, ["classes", "className", "component"]);
return /*#__PURE__*/React.createElement(_Tablelvl2Context.default.Provider, {
value: tablelvl2
}, /*#__PURE__*/React.createElement(Component, (0, _extends2.default)({
className: (0, _clsx.default)(classes.root, className),
ref: ref, | }, other)));
});
process.env.NODE_ENV !== "production" ? TableHead.propTypes = {
// ----------------------------- Warning --------------------------------
// | These PropTypes are generated from the TypeScript type definitions |
// | To update them edit the d.ts file and run "yarn proptypes" |
// ----------------------------------------------------------------------
/**
* The content of the component, normally `TableRow`.
*/
children: _propTypes.default.node,
/**
* Override or extend the styles applied to the component.
* See [CSS API](#css) below for more details.
*/
classes: _propTypes.default.object,
/**
* @ignore
*/
className: _propTypes.default.string,
/**
* The component used for the root node.
* Either a string to use a HTML element or a component.
*/
component: _propTypes.default
/* @typescript-to-proptypes-ignore */
.elementType
} : void 0;
var _default = (0, _withStyles.default)(styles, {
name: 'MuiTableHead'
})(TableHead);
exports.default = _default; | role: Component === defaultComponent ? null : 'rowgroup' | random_line_split |
jstool.py | #!/usr/bin/python
import os
import sys
import argparse
import requests
import subprocess | class bcolors:
HEADER = '\033[90m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
class Console:
def __init__(self):
self.verbose = False
def log(self, string):
if self.verbose:
print string
console = Console()
def filter_no_console(line):
if 'console' in line:
return False
return True
def consolidate(input_filename, output_filename, filter_functions=None):
# read all filenames from input file
filenames = [line.rstrip('\n') for line in open(input_filename)]
# concat all lines in each file to the output file
with open(output_filename, 'w') as outfile:
for filename in filenames:
if filename.startswith('#') or len(filename) <= 0:
continue
console.log(bcolors.HEADER + filename + bcolors.ENDC)
with open(filename) as infile:
# write a header
outfile.write("/*\n* " + filename + "\n*/\n")
# write contents
for index, line in enumerate(infile):
# apply filter functions
if isinstance(filter_functions, list) and len(filter_functions) > 0:
add_line = True
for filter_function in filter_functions:
if not filter_function(line):
add_line = False
break
if add_line:
outfile.write(line)
else:
console.log('- line ' + str(index) + ': ' + bcolors.FAIL + line.lstrip().rstrip() + bcolors.ENDC)
# no filters
else:
outfile.write(line)
# newline
outfile.write("\n")
def compression_level_to_string(optimization_level):
if optimization_level >= 3:
compliation_level = 'ADVANCED_OPTIMIZATIONS'
elif optimization_level >= 2:
compliation_level = 'SIMPLE_OPTIMIZATIONS'
else:
compliation_level = 'WHITESPACE_ONLY'
return compliation_level
def get_minified_filename(filename):
return os.path.splitext(filename)[0] + '.min.js'
def compress_local(filename, optimization_level, compiler_path):
# java -jar compiler.jar --js hello.js --js_output_file hello-compiled.js
console.log('compiling with ' + compiler_path)
subprocess.call(['java',
'-jar', compiler_path,
'--js', filename,
'--js_output_file', filename,
'--compilation_level', compression_level_to_string(optimization_level)
])
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC)
def compress_remote(filename, optimization_level):
SERVICE_URL = 'http://closure-compiler.appspot.com/compile'
console.log('compiling with google closure API: ' + SERVICE_URL)
with open(filename, 'r') as file:
javascript = file.read()
data = {
'js_code': javascript,
'output_format': 'text',
'output_info': 'compiled_code',
'compilation_level': compression_level_to_string(optimization_level)
}
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
r = requests.post(SERVICE_URL, data=data, headers=headers)
result = r.text
with open(filename, 'w') as outfile:
outfile.write(result)
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-v, --verbose', dest='verbose', default=False, action='store_true', help='detailed program output')
parser.add_argument('-i, --input', required=True, dest='input_file', type=str, help='input file (required), containing one filename per line to compile')
parser.add_argument('-o, --output', dest='output_file', default='output.js', type=str, help='output file')
parser.add_argument('-c, --compression', default=0, dest='compress', type=int, help='compression level of output file\n0: no compression\n1: strip whitespace\n2: simple\n3: advanced')
parser.add_argument('--compiler', default=None, dest='compiler_path', type=str, help='path to closure compiler jar file. If not specified, online closure API will be used instead')
parser.add_argument('--filter-console', default=False, dest='no_console', help='strips console calls', action='store_true')
args = parser.parse_args()
console.verbose = args.verbose
filters=[]
if args.no_console:
filters.append(filter_no_console)
output_filename = args.output_file
consolidate(input_filename=args.input_file, output_filename=output_filename, filter_functions=filters)
min_output_filename = get_minified_filename(output_filename)
if(args.compress > 0):
if(args.compiler_path is not None):
compress_local(min_output_filename, optimization_level=args.compress, compiler_path=args.compiler_path)
else:
compress_remote(min_output_filename, optimization_level=args.compress)
else:
# no compression was done, but we still want *.min.js
shutil.copyfile(output_filename, min_output_filename)
if __name__ == "__main__":
main(sys.argv[1:]) | import shutil
| random_line_split |
jstool.py | #!/usr/bin/python
import os
import sys
import argparse
import requests
import subprocess
import shutil
class bcolors:
HEADER = '\033[90m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
class Console:
def __init__(self):
self.verbose = False
def log(self, string):
if self.verbose:
print string
console = Console()
def filter_no_console(line):
if 'console' in line:
return False
return True
def consolidate(input_filename, output_filename, filter_functions=None):
# read all filenames from input file
filenames = [line.rstrip('\n') for line in open(input_filename)]
# concat all lines in each file to the output file
with open(output_filename, 'w') as outfile:
for filename in filenames:
if filename.startswith('#') or len(filename) <= 0:
continue
console.log(bcolors.HEADER + filename + bcolors.ENDC)
with open(filename) as infile:
# write a header
outfile.write("/*\n* " + filename + "\n*/\n")
# write contents
for index, line in enumerate(infile):
# apply filter functions
if isinstance(filter_functions, list) and len(filter_functions) > 0:
add_line = True
for filter_function in filter_functions:
if not filter_function(line):
add_line = False
break
if add_line:
|
else:
console.log('- line ' + str(index) + ': ' + bcolors.FAIL + line.lstrip().rstrip() + bcolors.ENDC)
# no filters
else:
outfile.write(line)
# newline
outfile.write("\n")
def compression_level_to_string(optimization_level):
if optimization_level >= 3:
compliation_level = 'ADVANCED_OPTIMIZATIONS'
elif optimization_level >= 2:
compliation_level = 'SIMPLE_OPTIMIZATIONS'
else:
compliation_level = 'WHITESPACE_ONLY'
return compliation_level
def get_minified_filename(filename):
return os.path.splitext(filename)[0] + '.min.js'
def compress_local(filename, optimization_level, compiler_path):
# java -jar compiler.jar --js hello.js --js_output_file hello-compiled.js
console.log('compiling with ' + compiler_path)
subprocess.call(['java',
'-jar', compiler_path,
'--js', filename,
'--js_output_file', filename,
'--compilation_level', compression_level_to_string(optimization_level)
])
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC)
def compress_remote(filename, optimization_level):
SERVICE_URL = 'http://closure-compiler.appspot.com/compile'
console.log('compiling with google closure API: ' + SERVICE_URL)
with open(filename, 'r') as file:
javascript = file.read()
data = {
'js_code': javascript,
'output_format': 'text',
'output_info': 'compiled_code',
'compilation_level': compression_level_to_string(optimization_level)
}
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
r = requests.post(SERVICE_URL, data=data, headers=headers)
result = r.text
with open(filename, 'w') as outfile:
outfile.write(result)
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-v, --verbose', dest='verbose', default=False, action='store_true', help='detailed program output')
parser.add_argument('-i, --input', required=True, dest='input_file', type=str, help='input file (required), containing one filename per line to compile')
parser.add_argument('-o, --output', dest='output_file', default='output.js', type=str, help='output file')
parser.add_argument('-c, --compression', default=0, dest='compress', type=int, help='compression level of output file\n0: no compression\n1: strip whitespace\n2: simple\n3: advanced')
parser.add_argument('--compiler', default=None, dest='compiler_path', type=str, help='path to closure compiler jar file. If not specified, online closure API will be used instead')
parser.add_argument('--filter-console', default=False, dest='no_console', help='strips console calls', action='store_true')
args = parser.parse_args()
console.verbose = args.verbose
filters=[]
if args.no_console:
filters.append(filter_no_console)
output_filename = args.output_file
consolidate(input_filename=args.input_file, output_filename=output_filename, filter_functions=filters)
min_output_filename = get_minified_filename(output_filename)
if(args.compress > 0):
if(args.compiler_path is not None):
compress_local(min_output_filename, optimization_level=args.compress, compiler_path=args.compiler_path)
else:
compress_remote(min_output_filename, optimization_level=args.compress)
else:
# no compression was done, but we still want *.min.js
shutil.copyfile(output_filename, min_output_filename)
if __name__ == "__main__":
main(sys.argv[1:]) | outfile.write(line) | conditional_block |
jstool.py | #!/usr/bin/python
import os
import sys
import argparse
import requests
import subprocess
import shutil
class bcolors:
HEADER = '\033[90m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
class Console:
def __init__(self):
self.verbose = False
def log(self, string):
if self.verbose:
print string
console = Console()
def filter_no_console(line):
if 'console' in line:
return False
return True
def consolidate(input_filename, output_filename, filter_functions=None):
# read all filenames from input file
filenames = [line.rstrip('\n') for line in open(input_filename)]
# concat all lines in each file to the output file
with open(output_filename, 'w') as outfile:
for filename in filenames:
if filename.startswith('#') or len(filename) <= 0:
continue
console.log(bcolors.HEADER + filename + bcolors.ENDC)
with open(filename) as infile:
# write a header
outfile.write("/*\n* " + filename + "\n*/\n")
# write contents
for index, line in enumerate(infile):
# apply filter functions
if isinstance(filter_functions, list) and len(filter_functions) > 0:
add_line = True
for filter_function in filter_functions:
if not filter_function(line):
add_line = False
break
if add_line:
outfile.write(line)
else:
console.log('- line ' + str(index) + ': ' + bcolors.FAIL + line.lstrip().rstrip() + bcolors.ENDC)
# no filters
else:
outfile.write(line)
# newline
outfile.write("\n")
def compression_level_to_string(optimization_level):
if optimization_level >= 3:
compliation_level = 'ADVANCED_OPTIMIZATIONS'
elif optimization_level >= 2:
compliation_level = 'SIMPLE_OPTIMIZATIONS'
else:
compliation_level = 'WHITESPACE_ONLY'
return compliation_level
def get_minified_filename(filename):
return os.path.splitext(filename)[0] + '.min.js'
def compress_local(filename, optimization_level, compiler_path):
# java -jar compiler.jar --js hello.js --js_output_file hello-compiled.js
console.log('compiling with ' + compiler_path)
subprocess.call(['java',
'-jar', compiler_path,
'--js', filename,
'--js_output_file', filename,
'--compilation_level', compression_level_to_string(optimization_level)
])
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC)
def compress_remote(filename, optimization_level):
SERVICE_URL = 'http://closure-compiler.appspot.com/compile'
console.log('compiling with google closure API: ' + SERVICE_URL)
with open(filename, 'r') as file:
javascript = file.read()
data = {
'js_code': javascript,
'output_format': 'text',
'output_info': 'compiled_code',
'compilation_level': compression_level_to_string(optimization_level)
}
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
r = requests.post(SERVICE_URL, data=data, headers=headers)
result = r.text
with open(filename, 'w') as outfile:
outfile.write(result)
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC)
def | (argv):
parser = argparse.ArgumentParser()
parser.add_argument('-v, --verbose', dest='verbose', default=False, action='store_true', help='detailed program output')
parser.add_argument('-i, --input', required=True, dest='input_file', type=str, help='input file (required), containing one filename per line to compile')
parser.add_argument('-o, --output', dest='output_file', default='output.js', type=str, help='output file')
parser.add_argument('-c, --compression', default=0, dest='compress', type=int, help='compression level of output file\n0: no compression\n1: strip whitespace\n2: simple\n3: advanced')
parser.add_argument('--compiler', default=None, dest='compiler_path', type=str, help='path to closure compiler jar file. If not specified, online closure API will be used instead')
parser.add_argument('--filter-console', default=False, dest='no_console', help='strips console calls', action='store_true')
args = parser.parse_args()
console.verbose = args.verbose
filters=[]
if args.no_console:
filters.append(filter_no_console)
output_filename = args.output_file
consolidate(input_filename=args.input_file, output_filename=output_filename, filter_functions=filters)
min_output_filename = get_minified_filename(output_filename)
if(args.compress > 0):
if(args.compiler_path is not None):
compress_local(min_output_filename, optimization_level=args.compress, compiler_path=args.compiler_path)
else:
compress_remote(min_output_filename, optimization_level=args.compress)
else:
# no compression was done, but we still want *.min.js
shutil.copyfile(output_filename, min_output_filename)
if __name__ == "__main__":
main(sys.argv[1:]) | main | identifier_name |
jstool.py | #!/usr/bin/python
import os
import sys
import argparse
import requests
import subprocess
import shutil
class bcolors:
HEADER = '\033[90m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
class Console:
def __init__(self):
self.verbose = False
def log(self, string):
if self.verbose:
print string
console = Console()
def filter_no_console(line):
if 'console' in line:
return False
return True
def consolidate(input_filename, output_filename, filter_functions=None):
# read all filenames from input file
filenames = [line.rstrip('\n') for line in open(input_filename)]
# concat all lines in each file to the output file
with open(output_filename, 'w') as outfile:
for filename in filenames:
if filename.startswith('#') or len(filename) <= 0:
continue
console.log(bcolors.HEADER + filename + bcolors.ENDC)
with open(filename) as infile:
# write a header
outfile.write("/*\n* " + filename + "\n*/\n")
# write contents
for index, line in enumerate(infile):
# apply filter functions
if isinstance(filter_functions, list) and len(filter_functions) > 0:
add_line = True
for filter_function in filter_functions:
if not filter_function(line):
add_line = False
break
if add_line:
outfile.write(line)
else:
console.log('- line ' + str(index) + ': ' + bcolors.FAIL + line.lstrip().rstrip() + bcolors.ENDC)
# no filters
else:
outfile.write(line)
# newline
outfile.write("\n")
def compression_level_to_string(optimization_level):
if optimization_level >= 3:
compliation_level = 'ADVANCED_OPTIMIZATIONS'
elif optimization_level >= 2:
compliation_level = 'SIMPLE_OPTIMIZATIONS'
else:
compliation_level = 'WHITESPACE_ONLY'
return compliation_level
def get_minified_filename(filename):
return os.path.splitext(filename)[0] + '.min.js'
def compress_local(filename, optimization_level, compiler_path):
# java -jar compiler.jar --js hello.js --js_output_file hello-compiled.js
console.log('compiling with ' + compiler_path)
subprocess.call(['java',
'-jar', compiler_path,
'--js', filename,
'--js_output_file', filename,
'--compilation_level', compression_level_to_string(optimization_level)
])
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC)
def compress_remote(filename, optimization_level):
|
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-v, --verbose', dest='verbose', default=False, action='store_true', help='detailed program output')
parser.add_argument('-i, --input', required=True, dest='input_file', type=str, help='input file (required), containing one filename per line to compile')
parser.add_argument('-o, --output', dest='output_file', default='output.js', type=str, help='output file')
parser.add_argument('-c, --compression', default=0, dest='compress', type=int, help='compression level of output file\n0: no compression\n1: strip whitespace\n2: simple\n3: advanced')
parser.add_argument('--compiler', default=None, dest='compiler_path', type=str, help='path to closure compiler jar file. If not specified, online closure API will be used instead')
parser.add_argument('--filter-console', default=False, dest='no_console', help='strips console calls', action='store_true')
args = parser.parse_args()
console.verbose = args.verbose
filters=[]
if args.no_console:
filters.append(filter_no_console)
output_filename = args.output_file
consolidate(input_filename=args.input_file, output_filename=output_filename, filter_functions=filters)
min_output_filename = get_minified_filename(output_filename)
if(args.compress > 0):
if(args.compiler_path is not None):
compress_local(min_output_filename, optimization_level=args.compress, compiler_path=args.compiler_path)
else:
compress_remote(min_output_filename, optimization_level=args.compress)
else:
# no compression was done, but we still want *.min.js
shutil.copyfile(output_filename, min_output_filename)
if __name__ == "__main__":
main(sys.argv[1:]) | SERVICE_URL = 'http://closure-compiler.appspot.com/compile'
console.log('compiling with google closure API: ' + SERVICE_URL)
with open(filename, 'r') as file:
javascript = file.read()
data = {
'js_code': javascript,
'output_format': 'text',
'output_info': 'compiled_code',
'compilation_level': compression_level_to_string(optimization_level)
}
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
r = requests.post(SERVICE_URL, data=data, headers=headers)
result = r.text
with open(filename, 'w') as outfile:
outfile.write(result)
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC) | identifier_body |
struct-style-enum.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// gdb-command:set print union on
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print case1
// gdb-check:$1 = {{Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {Case1, a = 0, b = 2088533116, c = 2088533116}, {Case1, a = 0, b = 8970181431921507452}}
// gdb-command:print case2
// gdb-check:$2 = {{Case2, a = 0, b = 4369, c = 4369, d = 4369, e = 4369}, {Case2, a = 0, b = 286331153, c = 286331153}, {Case2, a = 0, b = 1229782938247303441}}
// gdb-command:print case3
// gdb-check:$3 = {{Case3, a = 0, b = 22873, c = 22873, d = 22873, e = 22873}, {Case3, a = 0, b = 1499027801, c = 1499027801}, {Case3, a = 0, b = 6438275382588823897}}
// gdb-command:print univariant
// gdb-check:$4 = {a = -1}
#![allow(unused_variable)]
#![feature(struct_variant)]
// The first element is to ensure proper alignment, irrespective of the machines word size. Since
// the size of the discriminant value is machine dependent, this has be taken into account when
// datatype layout should be predictable as in this case.
enum | {
Case1 { a: u64, b: u16, c: u16, d: u16, e: u16},
Case2 { a: u64, b: u32, c: u32},
Case3 { a: u64, b: u64 }
}
enum Univariant {
TheOnlyCase { a: i64 }
}
fn main() {
// In order to avoid endianess trouble all of the following test values consist of a single
// repeated byte. This way each interpretation of the union should look the same, no matter if
// this is a big or little endian machine.
// 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
// 0b01111100011111000111110001111100 = 2088533116
// 0b0111110001111100 = 31868
// 0b01111100 = 124
let case1 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 };
// 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
// 0b00010001000100010001000100010001 = 286331153
// 0b0001000100010001 = 4369
// 0b00010001 = 17
let case2 = Case2 { a: 0, b: 286331153, c: 286331153 };
// 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
// 0b01011001010110010101100101011001 = 1499027801
// 0b0101100101011001 = 22873
// 0b01011001 = 89
let case3 = Case3 { a: 0, b: 6438275382588823897 };
let univariant = TheOnlyCase { a: -1 };
zzz();
}
fn zzz() {()}
| Regular | identifier_name |
struct-style-enum.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// gdb-command:set print union on
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print case1
// gdb-check:$1 = {{Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {Case1, a = 0, b = 2088533116, c = 2088533116}, {Case1, a = 0, b = 8970181431921507452}}
// gdb-command:print case2
// gdb-check:$2 = {{Case2, a = 0, b = 4369, c = 4369, d = 4369, e = 4369}, {Case2, a = 0, b = 286331153, c = 286331153}, {Case2, a = 0, b = 1229782938247303441}}
// gdb-command:print case3
// gdb-check:$3 = {{Case3, a = 0, b = 22873, c = 22873, d = 22873, e = 22873}, {Case3, a = 0, b = 1499027801, c = 1499027801}, {Case3, a = 0, b = 6438275382588823897}}
// gdb-command:print univariant
// gdb-check:$4 = {a = -1} | // the size of the discriminant value is machine dependent, this has be taken into account when
// datatype layout should be predictable as in this case.
enum Regular {
Case1 { a: u64, b: u16, c: u16, d: u16, e: u16},
Case2 { a: u64, b: u32, c: u32},
Case3 { a: u64, b: u64 }
}
enum Univariant {
TheOnlyCase { a: i64 }
}
fn main() {
// In order to avoid endianess trouble all of the following test values consist of a single
// repeated byte. This way each interpretation of the union should look the same, no matter if
// this is a big or little endian machine.
// 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
// 0b01111100011111000111110001111100 = 2088533116
// 0b0111110001111100 = 31868
// 0b01111100 = 124
let case1 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 };
// 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
// 0b00010001000100010001000100010001 = 286331153
// 0b0001000100010001 = 4369
// 0b00010001 = 17
let case2 = Case2 { a: 0, b: 286331153, c: 286331153 };
// 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
// 0b01011001010110010101100101011001 = 1499027801
// 0b0101100101011001 = 22873
// 0b01011001 = 89
let case3 = Case3 { a: 0, b: 6438275382588823897 };
let univariant = TheOnlyCase { a: -1 };
zzz();
}
fn zzz() {()} |
#![allow(unused_variable)]
#![feature(struct_variant)]
// The first element is to ensure proper alignment, irrespective of the machines word size. Since | random_line_split |
template.py | #!/usr/bin/env python
import re
class Templates:
TOKENS = re.compile('([A-Za-z]+|[^ ])')
SIMPLE = {
'l': '_n.l.ptb()',
'r': '_n.r.ptb()',
'<': 'addr(_n)',
'>': 'addl(_n)',
}
def compile(self, template):
python = self.parse(self.TOKENS.findall(template))
return eval("lambda _n: %s" % python)
def parse(self, tokens):
t = tokens.pop(0)
if t in '([':
if t == '(':
label = "'%s'" % tokens.pop(0)
args = self.parse_args(tokens, ')')
elif s[0] == '[':
label = 'None'
args = self.parse_args(tokens, ']')
return 'PTB(_n, %s, %s)' % (label, ', '.join(args))
elif t in self.SIMPLE:
return self.SIMPLE[t]
else:
raise SyntaxError, "unknown token '%s'" % t
def parse_args(self, tokens, delimiter):
args = []
while tokens:
if tokens[0] == delimiter:
|
args.append(self.parse(tokens))
raise SyntaxError, "missing closing '%s'" % delimiter
templates = Templates()
t = templates.compile("<")
| tokens.pop(0)
return args | conditional_block |
template.py | #!/usr/bin/env python
import re
class Templates:
TOKENS = re.compile('([A-Za-z]+|[^ ])')
SIMPLE = {
'l': '_n.l.ptb()',
'r': '_n.r.ptb()',
'<': 'addr(_n)',
'>': 'addl(_n)',
}
def compile(self, template):
python = self.parse(self.TOKENS.findall(template))
return eval("lambda _n: %s" % python)
def parse(self, tokens):
t = tokens.pop(0)
if t in '([':
if t == '(':
label = "'%s'" % tokens.pop(0)
args = self.parse_args(tokens, ')')
elif s[0] == '[':
label = 'None'
args = self.parse_args(tokens, ']')
return 'PTB(_n, %s, %s)' % (label, ', '.join(args))
elif t in self.SIMPLE:
return self.SIMPLE[t]
else:
raise SyntaxError, "unknown token '%s'" % t
def | (self, tokens, delimiter):
args = []
while tokens:
if tokens[0] == delimiter:
tokens.pop(0)
return args
args.append(self.parse(tokens))
raise SyntaxError, "missing closing '%s'" % delimiter
templates = Templates()
t = templates.compile("<")
| parse_args | identifier_name |
template.py | #!/usr/bin/env python
import re
class Templates:
TOKENS = re.compile('([A-Za-z]+|[^ ])')
SIMPLE = {
'l': '_n.l.ptb()', | def compile(self, template):
python = self.parse(self.TOKENS.findall(template))
return eval("lambda _n: %s" % python)
def parse(self, tokens):
t = tokens.pop(0)
if t in '([':
if t == '(':
label = "'%s'" % tokens.pop(0)
args = self.parse_args(tokens, ')')
elif s[0] == '[':
label = 'None'
args = self.parse_args(tokens, ']')
return 'PTB(_n, %s, %s)' % (label, ', '.join(args))
elif t in self.SIMPLE:
return self.SIMPLE[t]
else:
raise SyntaxError, "unknown token '%s'" % t
def parse_args(self, tokens, delimiter):
args = []
while tokens:
if tokens[0] == delimiter:
tokens.pop(0)
return args
args.append(self.parse(tokens))
raise SyntaxError, "missing closing '%s'" % delimiter
templates = Templates()
t = templates.compile("<") | 'r': '_n.r.ptb()',
'<': 'addr(_n)',
'>': 'addl(_n)',
}
| random_line_split |
template.py | #!/usr/bin/env python
import re
class Templates:
TOKENS = re.compile('([A-Za-z]+|[^ ])')
SIMPLE = {
'l': '_n.l.ptb()',
'r': '_n.r.ptb()',
'<': 'addr(_n)',
'>': 'addl(_n)',
}
def compile(self, template):
|
def parse(self, tokens):
t = tokens.pop(0)
if t in '([':
if t == '(':
label = "'%s'" % tokens.pop(0)
args = self.parse_args(tokens, ')')
elif s[0] == '[':
label = 'None'
args = self.parse_args(tokens, ']')
return 'PTB(_n, %s, %s)' % (label, ', '.join(args))
elif t in self.SIMPLE:
return self.SIMPLE[t]
else:
raise SyntaxError, "unknown token '%s'" % t
def parse_args(self, tokens, delimiter):
args = []
while tokens:
if tokens[0] == delimiter:
tokens.pop(0)
return args
args.append(self.parse(tokens))
raise SyntaxError, "missing closing '%s'" % delimiter
templates = Templates()
t = templates.compile("<")
| python = self.parse(self.TOKENS.findall(template))
return eval("lambda _n: %s" % python) | identifier_body |
inspect-drawer.spec.ts | import { e2e } from '@grafana/e2e';
const PANEL_UNDER_TEST = '2 yaxis and axis labels';
e2e.scenario({
describeName: 'Inspect drawer tests',
itName: 'Tests various Inspect Drawer scenarios',
addScenarioDataSource: false,
addScenarioDashBoard: false,
skipScenario: false,
scenario: () => {
// @ts-ignore some typing issue
e2e().on('uncaught:exception', (err) => {
if (err.stack?.indexOf("TypeError: Cannot read property 'getText' of null") !== -1) {
// On occasion monaco editor will not have the time to be properly unloaded when we change the tab
// and then the e2e test fails with the uncaught:exception:
// TypeError: Cannot read property 'getText' of null
// at Object.ai [as getFoldingRanges] (http://localhost:3001/public/build/monaco-json.worker.js:2:215257)
// at e.getFoldingRanges (http://localhost:3001/public/build/monaco-json.worker.js:2:221188)
// at e.fmr (http://localhost:3001/public/build/monaco-json.worker.js:2:116605)
// at e._handleMessage (http://localhost:3001/public/build/monaco-json.worker.js:2:7414)
// at Object.handleMessage (http://localhost:3001/public/build/monaco-json.worker.js:2:7018)
// at e._handleMessage (http://localhost:3001/public/build/monaco-json.worker.js:2:5038)
// at e.handleMessage (http://localhost:3001/public/build/monaco-json.worker.js:2:4606)
// at e.onmessage (http://localhost:3001/public/build/monaco-json.worker.js:2:7097)
// at Tt.self.onmessage (http://localhost:3001/public/build/monaco-json.worker.js:2:117109)
// return false to prevent the error from
// failing this test
return false;
}
return true;
});
const viewPortWidth = e2e.config().viewportWidth;
e2e.flows.openDashboard({ uid: '5SdHCadmz' });
// testing opening inspect drawer directly by clicking on Inspect in header menu
e2e.flows.openPanelMenuItem(e2e.flows.PanelMenuItems.Inspect, PANEL_UNDER_TEST);
expectDrawerTabsAndContent();
expectDrawerExpandAndContract(viewPortWidth);
expectDrawerClose();
expectSubMenuScenario('Data');
expectSubMenuScenario('Query');
expectSubMenuScenario('Panel JSON', 'JSON');
e2e.flows.openPanelMenuItem(e2e.flows.PanelMenuItems.Edit, PANEL_UNDER_TEST);
e2e.components.QueryTab.queryInspectorButton().should('be.visible').click();
e2e.components.Drawer.General.title(`Inspect: ${PANEL_UNDER_TEST}`)
.should('be.visible')
.within(() => {
e2e.components.Tab.title('Query').should('be.visible');
// query should be the active tab
e2e.components.Tab.active().should('have.text', 'Query');
});
e2e.components.PanelInspector.Query.content().should('be.visible');
},
});
const expectDrawerTabsAndContent = () => {
e2e.components.Drawer.General.title(`Inspect: ${PANEL_UNDER_TEST}`)
.should('be.visible')
.within(() => {
e2e.components.Tab.title('Data').should('be.visible');
// data should be the active tab
e2e.components.Tab.active().within((li: JQuery<HTMLLIElement>) => {
expect(li.text()).equals('Data');
});
e2e.components.PanelInspector.Data.content().should('be.visible');
e2e.components.PanelInspector.Stats.content().should('not.exist');
e2e.components.PanelInspector.Json.content().should('not.exist');
e2e.components.PanelInspector.Query.content().should('not.exist');
// other tabs should also be visible, click on each to see if we get any console errors
e2e.components.Tab.title('Stats').should('be.visible').click();
e2e.components.PanelInspector.Stats.content().should('be.visible');
e2e.components.PanelInspector.Data.content().should('not.exist');
e2e.components.PanelInspector.Json.content().should('not.exist');
e2e.components.PanelInspector.Query.content().should('not.exist');
e2e.components.Tab.title('JSON').should('be.visible').click();
e2e.components.PanelInspector.Json.content().should('be.visible');
e2e.components.PanelInspector.Data.content().should('not.exist');
e2e.components.PanelInspector.Stats.content().should('not.exist');
e2e.components.PanelInspector.Query.content().should('not.exist');
e2e.components.Tab.title('Query').should('be.visible').click();
e2e.components.PanelInspector.Query.content().should('be.visible');
e2e.components.PanelInspector.Data.content().should('not.exist');
e2e.components.PanelInspector.Stats.content().should('not.exist');
e2e.components.PanelInspector.Json.content().should('not.exist');
});
};
const expectDrawerClose = () => {
// close using close button
e2e.components.Drawer.General.close().click();
e2e.components.Drawer.General.title(`Inspect: ${PANEL_UNDER_TEST}`).should('not.exist');
};
const expectDrawerExpandAndContract = (viewPortWidth: number) => {
// try expand button
// drawer should take up half the screen
e2e.components.Drawer.General.rcContentWrapper()
.should('be.visible')
.should('have.css', 'width', `${viewPortWidth / 2}px`);
e2e.components.Drawer.General.expand().click();
e2e.components.Drawer.General.contract().should('be.visible');
| // drawer should take up the whole screen
e2e.components.Drawer.General.rcContentWrapper()
.should('be.visible')
.should('have.css', 'width', `${viewPortWidth}px`);
// try contract button
e2e.components.Drawer.General.contract().click();
e2e.components.Drawer.General.expand().should('be.visible');
e2e.components.Drawer.General.rcContentWrapper()
.should('be.visible')
.should('have.css', 'width', `${viewPortWidth / 2}px`);
};
const expectSubMenuScenario = (subMenu: string, tabTitle?: string) => {
tabTitle = tabTitle ?? subMenu;
// testing opening inspect drawer from sub menus under Inspect in header menu
e2e.components.Panels.Panel.title(PANEL_UNDER_TEST).scrollIntoView().should('be.visible').click();
// sub menus are in the DOM but not visible and because there is no hover support in Cypress force click
// https://github.com/cypress-io/cypress-example-recipes/blob/master/examples/testing-dom__hover-hidden-elements/cypress/integration/hover-hidden-elements-spec.js
e2e.components.Panels.Panel.headerItems(subMenu).click({ force: true });
// data should be the default tab
e2e.components.Tab.title(tabTitle).should('be.visible');
e2e.components.Tab.active().should('have.text', tabTitle);
expectDrawerClose();
}; | random_line_split |
|
inspect-drawer.spec.ts | import { e2e } from '@grafana/e2e';
const PANEL_UNDER_TEST = '2 yaxis and axis labels';
e2e.scenario({
describeName: 'Inspect drawer tests',
itName: 'Tests various Inspect Drawer scenarios',
addScenarioDataSource: false,
addScenarioDashBoard: false,
skipScenario: false,
scenario: () => {
// @ts-ignore some typing issue
e2e().on('uncaught:exception', (err) => {
if (err.stack?.indexOf("TypeError: Cannot read property 'getText' of null") !== -1) |
return true;
});
const viewPortWidth = e2e.config().viewportWidth;
e2e.flows.openDashboard({ uid: '5SdHCadmz' });
// testing opening inspect drawer directly by clicking on Inspect in header menu
e2e.flows.openPanelMenuItem(e2e.flows.PanelMenuItems.Inspect, PANEL_UNDER_TEST);
expectDrawerTabsAndContent();
expectDrawerExpandAndContract(viewPortWidth);
expectDrawerClose();
expectSubMenuScenario('Data');
expectSubMenuScenario('Query');
expectSubMenuScenario('Panel JSON', 'JSON');
e2e.flows.openPanelMenuItem(e2e.flows.PanelMenuItems.Edit, PANEL_UNDER_TEST);
e2e.components.QueryTab.queryInspectorButton().should('be.visible').click();
e2e.components.Drawer.General.title(`Inspect: ${PANEL_UNDER_TEST}`)
.should('be.visible')
.within(() => {
e2e.components.Tab.title('Query').should('be.visible');
// query should be the active tab
e2e.components.Tab.active().should('have.text', 'Query');
});
e2e.components.PanelInspector.Query.content().should('be.visible');
},
});
const expectDrawerTabsAndContent = () => {
e2e.components.Drawer.General.title(`Inspect: ${PANEL_UNDER_TEST}`)
.should('be.visible')
.within(() => {
e2e.components.Tab.title('Data').should('be.visible');
// data should be the active tab
e2e.components.Tab.active().within((li: JQuery<HTMLLIElement>) => {
expect(li.text()).equals('Data');
});
e2e.components.PanelInspector.Data.content().should('be.visible');
e2e.components.PanelInspector.Stats.content().should('not.exist');
e2e.components.PanelInspector.Json.content().should('not.exist');
e2e.components.PanelInspector.Query.content().should('not.exist');
// other tabs should also be visible, click on each to see if we get any console errors
e2e.components.Tab.title('Stats').should('be.visible').click();
e2e.components.PanelInspector.Stats.content().should('be.visible');
e2e.components.PanelInspector.Data.content().should('not.exist');
e2e.components.PanelInspector.Json.content().should('not.exist');
e2e.components.PanelInspector.Query.content().should('not.exist');
e2e.components.Tab.title('JSON').should('be.visible').click();
e2e.components.PanelInspector.Json.content().should('be.visible');
e2e.components.PanelInspector.Data.content().should('not.exist');
e2e.components.PanelInspector.Stats.content().should('not.exist');
e2e.components.PanelInspector.Query.content().should('not.exist');
e2e.components.Tab.title('Query').should('be.visible').click();
e2e.components.PanelInspector.Query.content().should('be.visible');
e2e.components.PanelInspector.Data.content().should('not.exist');
e2e.components.PanelInspector.Stats.content().should('not.exist');
e2e.components.PanelInspector.Json.content().should('not.exist');
});
};
const expectDrawerClose = () => {
// close using close button
e2e.components.Drawer.General.close().click();
e2e.components.Drawer.General.title(`Inspect: ${PANEL_UNDER_TEST}`).should('not.exist');
};
const expectDrawerExpandAndContract = (viewPortWidth: number) => {
// try expand button
// drawer should take up half the screen
e2e.components.Drawer.General.rcContentWrapper()
.should('be.visible')
.should('have.css', 'width', `${viewPortWidth / 2}px`);
e2e.components.Drawer.General.expand().click();
e2e.components.Drawer.General.contract().should('be.visible');
// drawer should take up the whole screen
e2e.components.Drawer.General.rcContentWrapper()
.should('be.visible')
.should('have.css', 'width', `${viewPortWidth}px`);
// try contract button
e2e.components.Drawer.General.contract().click();
e2e.components.Drawer.General.expand().should('be.visible');
e2e.components.Drawer.General.rcContentWrapper()
.should('be.visible')
.should('have.css', 'width', `${viewPortWidth / 2}px`);
};
const expectSubMenuScenario = (subMenu: string, tabTitle?: string) => {
tabTitle = tabTitle ?? subMenu;
// testing opening inspect drawer from sub menus under Inspect in header menu
e2e.components.Panels.Panel.title(PANEL_UNDER_TEST).scrollIntoView().should('be.visible').click();
// sub menus are in the DOM but not visible and because there is no hover support in Cypress force click
// https://github.com/cypress-io/cypress-example-recipes/blob/master/examples/testing-dom__hover-hidden-elements/cypress/integration/hover-hidden-elements-spec.js
e2e.components.Panels.Panel.headerItems(subMenu).click({ force: true });
// data should be the default tab
e2e.components.Tab.title(tabTitle).should('be.visible');
e2e.components.Tab.active().should('have.text', tabTitle);
expectDrawerClose();
};
| {
// On occasion monaco editor will not have the time to be properly unloaded when we change the tab
// and then the e2e test fails with the uncaught:exception:
// TypeError: Cannot read property 'getText' of null
// at Object.ai [as getFoldingRanges] (http://localhost:3001/public/build/monaco-json.worker.js:2:215257)
// at e.getFoldingRanges (http://localhost:3001/public/build/monaco-json.worker.js:2:221188)
// at e.fmr (http://localhost:3001/public/build/monaco-json.worker.js:2:116605)
// at e._handleMessage (http://localhost:3001/public/build/monaco-json.worker.js:2:7414)
// at Object.handleMessage (http://localhost:3001/public/build/monaco-json.worker.js:2:7018)
// at e._handleMessage (http://localhost:3001/public/build/monaco-json.worker.js:2:5038)
// at e.handleMessage (http://localhost:3001/public/build/monaco-json.worker.js:2:4606)
// at e.onmessage (http://localhost:3001/public/build/monaco-json.worker.js:2:7097)
// at Tt.self.onmessage (http://localhost:3001/public/build/monaco-json.worker.js:2:117109)
// return false to prevent the error from
// failing this test
return false;
} | conditional_block |
parameters.rs | use http_types::Method;
use std::borrow::Cow;
#[derive(Clone, Debug)]
pub enum Parameters {
Query(String),
Body(String),
}
use std::{
iter::Map,
slice::Iter,
};
// This newtype is currently only used internally here, but we might want to move it elsewhere where
// it could be more useful because of genericity. We could also aim at reducing the amount of
// conversions in requests by having a type that only maps parameters once unless changed.
type ParamsMapper<'a, 'p, S, T> = Map<Iter<'a, (Cow<'p, str>, S)>, fn(&(Cow<'p, str>, S)) -> T>;
impl Parameters {
pub fn new<S: AsRef<str>>(method: &Method, params: &[(Cow<str>, S)]) -> Self {
let params_s = Self::params_to_query(params);
if Self::method_requires_body(method) {
Parameters::Body(params_s)
} else {
Parameters::Query(params_s)
}
}
#[inline]
fn | (method: &Method) -> bool {
match *method {
Method::GET | Method::HEAD | Method::DELETE => false,
_ => true,
}
}
pub fn path_and_query<'p>(&self, path: &'p str) -> Cow<'p, str> {
self.query().map_or_else(|| Cow::Borrowed(path),
|q| {
let mut url = path.to_string();
url.push('?');
url.push_str(q);
Cow::Owned(url)
})
}
pub fn uri_and_body<'p>(&self, path: &'p str) -> (Cow<'p, str>, Option<&str>) {
(self.path_and_query(path), self.body())
}
pub fn query(&self) -> Option<&str> {
match self {
Parameters::Query(query) => Some(query.as_str()),
_ => None,
}
}
pub fn body(&self) -> Option<&str> {
match self {
Parameters::Body(body) => Some(body.as_str()),
_ => None,
}
}
pub fn into_inner(self) -> String {
match self {
Parameters::Query(s) => s,
Parameters::Body(s) => s,
}
}
pub fn as_mut_string(&mut self) -> &mut String {
match self {
Parameters::Query(ref mut query) => query,
Parameters::Body(ref mut body) => body,
}
}
pub fn query_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Query(ref mut query) => Some(query),
_ => None,
}
}
pub fn body_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Body(ref mut body) => Some(body),
_ => None,
}
}
pub fn push<S: AsRef<str>>(&mut self, extra_params: &[(Cow<str>, S)]) {
let q = Self::params_to_query(extra_params);
let s = self.as_mut_string();
if !s.is_empty() {
s.push('&');
}
s.push_str(q.as_str());
}
fn params_to_string_collection<'p, 'a: 'p, S: AsRef<str>>(params: &'a [(Cow<str>, S)])
-> ParamsMapper<'a, 'p, S, String> {
params.iter()
.map(|(k, v)| [k.as_ref().as_ref(), "=", v.as_ref()].concat())
}
fn params_to_vec<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> Vec<String> {
Self::params_to_string_collection(params).collect()
}
fn params_to_query<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> String {
Self::params_to_vec(params).join("&")
}
}
#[cfg(all(test, feature = "nightly"))]
mod benches {
use super::*;
use test::Bencher;
#[bench]
fn bench_params_to_query(b: &mut Bencher) {
let params_str = (1..10).map(|i| (format!("unakey{}", i), format!("unvalor{}", i)))
.collect::<Vec<_>>();
let params = params_str.iter()
.map(|(k, v)| (k.as_str().into(), v.as_str()))
.collect::<Vec<(Cow<str>, &str)>>();
b.iter(|| Parameters::params_to_query(¶ms));
}
}
| method_requires_body | identifier_name |
parameters.rs | use http_types::Method;
use std::borrow::Cow;
#[derive(Clone, Debug)]
pub enum Parameters {
Query(String),
Body(String),
}
use std::{
iter::Map,
slice::Iter,
};
// This newtype is currently only used internally here, but we might want to move it elsewhere where
// it could be more useful because of genericity. We could also aim at reducing the amount of
// conversions in requests by having a type that only maps parameters once unless changed.
type ParamsMapper<'a, 'p, S, T> = Map<Iter<'a, (Cow<'p, str>, S)>, fn(&(Cow<'p, str>, S)) -> T>;
impl Parameters {
pub fn new<S: AsRef<str>>(method: &Method, params: &[(Cow<str>, S)]) -> Self {
let params_s = Self::params_to_query(params);
if Self::method_requires_body(method) {
Parameters::Body(params_s)
} else {
Parameters::Query(params_s)
}
}
#[inline]
fn method_requires_body(method: &Method) -> bool {
match *method {
Method::GET | Method::HEAD | Method::DELETE => false,
_ => true,
}
}
pub fn path_and_query<'p>(&self, path: &'p str) -> Cow<'p, str> {
self.query().map_or_else(|| Cow::Borrowed(path),
|q| {
let mut url = path.to_string();
url.push('?');
url.push_str(q);
Cow::Owned(url)
})
}
pub fn uri_and_body<'p>(&self, path: &'p str) -> (Cow<'p, str>, Option<&str>) {
(self.path_and_query(path), self.body())
}
pub fn query(&self) -> Option<&str> {
match self {
Parameters::Query(query) => Some(query.as_str()),
_ => None,
}
}
pub fn body(&self) -> Option<&str> {
match self {
Parameters::Body(body) => Some(body.as_str()),
_ => None,
}
}
pub fn into_inner(self) -> String {
match self {
Parameters::Query(s) => s,
Parameters::Body(s) => s,
}
}
pub fn as_mut_string(&mut self) -> &mut String {
match self {
Parameters::Query(ref mut query) => query,
Parameters::Body(ref mut body) => body,
}
}
pub fn query_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Query(ref mut query) => Some(query),
_ => None,
}
}
pub fn body_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Body(ref mut body) => Some(body),
_ => None,
}
}
pub fn push<S: AsRef<str>>(&mut self, extra_params: &[(Cow<str>, S)]) {
let q = Self::params_to_query(extra_params);
let s = self.as_mut_string();
if !s.is_empty() {
s.push('&');
}
s.push_str(q.as_str());
}
fn params_to_string_collection<'p, 'a: 'p, S: AsRef<str>>(params: &'a [(Cow<str>, S)])
-> ParamsMapper<'a, 'p, S, String> {
params.iter()
.map(|(k, v)| [k.as_ref().as_ref(), "=", v.as_ref()].concat())
}
fn params_to_vec<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> Vec<String> {
Self::params_to_string_collection(params).collect()
}
fn params_to_query<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> String {
Self::params_to_vec(params).join("&")
}
}
#[cfg(all(test, feature = "nightly"))]
mod benches {
use super::*;
use test::Bencher;
#[bench]
fn bench_params_to_query(b: &mut Bencher) |
}
| {
let params_str = (1..10).map(|i| (format!("unakey{}", i), format!("unvalor{}", i)))
.collect::<Vec<_>>();
let params = params_str.iter()
.map(|(k, v)| (k.as_str().into(), v.as_str()))
.collect::<Vec<(Cow<str>, &str)>>();
b.iter(|| Parameters::params_to_query(¶ms));
} | identifier_body |
parameters.rs | use http_types::Method;
use std::borrow::Cow;
#[derive(Clone, Debug)]
pub enum Parameters {
Query(String),
Body(String),
}
use std::{
iter::Map,
slice::Iter,
};
// This newtype is currently only used internally here, but we might want to move it elsewhere where
// it could be more useful because of genericity. We could also aim at reducing the amount of
// conversions in requests by having a type that only maps parameters once unless changed.
type ParamsMapper<'a, 'p, S, T> = Map<Iter<'a, (Cow<'p, str>, S)>, fn(&(Cow<'p, str>, S)) -> T>;
impl Parameters {
pub fn new<S: AsRef<str>>(method: &Method, params: &[(Cow<str>, S)]) -> Self {
let params_s = Self::params_to_query(params);
if Self::method_requires_body(method) {
Parameters::Body(params_s)
} else {
Parameters::Query(params_s)
}
}
#[inline]
fn method_requires_body(method: &Method) -> bool {
match *method {
Method::GET | Method::HEAD | Method::DELETE => false,
_ => true,
}
}
pub fn path_and_query<'p>(&self, path: &'p str) -> Cow<'p, str> {
self.query().map_or_else(|| Cow::Borrowed(path),
|q| {
let mut url = path.to_string();
url.push('?');
url.push_str(q);
Cow::Owned(url)
})
}
pub fn uri_and_body<'p>(&self, path: &'p str) -> (Cow<'p, str>, Option<&str>) {
(self.path_and_query(path), self.body())
}
pub fn query(&self) -> Option<&str> {
match self {
Parameters::Query(query) => Some(query.as_str()),
_ => None,
}
}
pub fn body(&self) -> Option<&str> {
match self {
Parameters::Body(body) => Some(body.as_str()),
_ => None,
}
}
pub fn into_inner(self) -> String {
match self {
Parameters::Query(s) => s,
Parameters::Body(s) => s,
}
}
pub fn as_mut_string(&mut self) -> &mut String {
match self {
Parameters::Query(ref mut query) => query,
Parameters::Body(ref mut body) => body,
}
}
pub fn query_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Query(ref mut query) => Some(query),
_ => None,
}
}
pub fn body_as_mut_string(&mut self) -> Option<&mut String> {
match self {
Parameters::Body(ref mut body) => Some(body),
_ => None,
}
}
pub fn push<S: AsRef<str>>(&mut self, extra_params: &[(Cow<str>, S)]) {
let q = Self::params_to_query(extra_params);
let s = self.as_mut_string();
if !s.is_empty() {
s.push('&');
}
s.push_str(q.as_str());
}
fn params_to_string_collection<'p, 'a: 'p, S: AsRef<str>>(params: &'a [(Cow<str>, S)])
-> ParamsMapper<'a, 'p, S, String> {
params.iter()
.map(|(k, v)| [k.as_ref().as_ref(), "=", v.as_ref()].concat())
}
fn params_to_vec<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> Vec<String> { | fn params_to_query<S: AsRef<str>>(params: &[(Cow<str>, S)]) -> String {
Self::params_to_vec(params).join("&")
}
}
#[cfg(all(test, feature = "nightly"))]
mod benches {
use super::*;
use test::Bencher;
#[bench]
fn bench_params_to_query(b: &mut Bencher) {
let params_str = (1..10).map(|i| (format!("unakey{}", i), format!("unvalor{}", i)))
.collect::<Vec<_>>();
let params = params_str.iter()
.map(|(k, v)| (k.as_str().into(), v.as_str()))
.collect::<Vec<(Cow<str>, &str)>>();
b.iter(|| Parameters::params_to_query(¶ms));
}
} | Self::params_to_string_collection(params).collect()
}
| random_line_split |
total_count.rs | use collectors::{Collector, DocumentMatch};
#[derive(Debug)]
pub struct TotalCountCollector {
total_count: u64,
}
impl TotalCountCollector {
pub fn new() -> TotalCountCollector {
TotalCountCollector {
total_count: 0,
}
}
pub fn get_total_count(&self) -> u64 {
self.total_count
}
}
impl Collector for TotalCountCollector {
fn needs_score(&self) -> bool {
false
} |
#[cfg(test)]
mod tests {
use collectors::{Collector, DocumentMatch};
use super::TotalCountCollector;
#[test]
fn test_total_count_collector_inital_state() {
let collector = TotalCountCollector::new();
assert_eq!(collector.get_total_count(), 0);
}
#[test]
fn test_total_count_collector_needs_score() {
let collector = TotalCountCollector::new();
assert_eq!(collector.needs_score(), false);
}
#[test]
fn test_total_count_collector_collect() {
let mut collector = TotalCountCollector::new();
collector.collect(DocumentMatch::new_unscored(0));
collector.collect(DocumentMatch::new_unscored(1));
collector.collect(DocumentMatch::new_unscored(2));
assert_eq!(collector.get_total_count(), 3);
}
} |
fn collect(&mut self, _doc: DocumentMatch) {
self.total_count += 1;
}
} | random_line_split |
total_count.rs | use collectors::{Collector, DocumentMatch};
#[derive(Debug)]
pub struct TotalCountCollector {
total_count: u64,
}
impl TotalCountCollector {
pub fn new() -> TotalCountCollector {
TotalCountCollector {
total_count: 0,
}
}
pub fn get_total_count(&self) -> u64 {
self.total_count
}
}
impl Collector for TotalCountCollector {
fn needs_score(&self) -> bool {
false
}
fn collect(&mut self, _doc: DocumentMatch) {
self.total_count += 1;
}
}
#[cfg(test)]
mod tests {
use collectors::{Collector, DocumentMatch};
use super::TotalCountCollector;
#[test]
fn test_total_count_collector_inital_state() {
let collector = TotalCountCollector::new();
assert_eq!(collector.get_total_count(), 0);
}
#[test]
fn | () {
let collector = TotalCountCollector::new();
assert_eq!(collector.needs_score(), false);
}
#[test]
fn test_total_count_collector_collect() {
let mut collector = TotalCountCollector::new();
collector.collect(DocumentMatch::new_unscored(0));
collector.collect(DocumentMatch::new_unscored(1));
collector.collect(DocumentMatch::new_unscored(2));
assert_eq!(collector.get_total_count(), 3);
}
}
| test_total_count_collector_needs_score | identifier_name |
typedefs.obj.ts | /*
* Power BI Visualizations
*
* Copyright (c) Microsoft Corporation
* All rights reserved.
* MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy | * copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
///<reference path="../../VisualsCommon/obj/VisualsCommon.d.ts"/>
///<reference path="../../VisualsData/obj/VisualsData.d.ts"/>
///<reference path="../../Visuals/obj/Visuals.d.ts"/> | * of this software and associated documentation files (the ""Software""), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | random_line_split |
DialogFooter.tsx | import { forwardRef, HTMLAttributes } from "react";
import cn from "classnames";
import { bem } from "@react-md/utils";
/**
* An optional alignment for the content within the footer. Since the majority
* of dialog footers are used to contain action buttons, the default alignment
* is near the end.
*
* @remarks \@since 3.1.0
*/
export type DialogFooterAlignment =
| "none"
| "start"
| "end"
| "between"
| "stacked-start"
| "stacked-end";
export interface DialogFooterProps extends HTMLAttributes<HTMLDivElement> {
/** {@inheritDoc DialogFooterAlignment} */
align?: DialogFooterAlignment;
}
const block = bem("rmd-dialog");
export const DialogFooter = forwardRef<HTMLDivElement, DialogFooterProps>(
function DialogFooter({ children, className, align = "end", ...props }, ref) {
return (
<footer
{...props}
ref={ref}
className={ | (
block("footer", {
flex: align !== "none",
"flex-v": align === "stacked-start" || align === "stacked-end",
start: align === "start" || align === "stacked-start",
between: align === "between",
end: align === "end" || align === "stacked-end",
}),
className
)}
>
{children}
</footer>
);
}
);
| cn | identifier_name |
DialogFooter.tsx | import { forwardRef, HTMLAttributes } from "react";
import cn from "classnames";
import { bem } from "@react-md/utils";
/**
* An optional alignment for the content within the footer. Since the majority
* of dialog footers are used to contain action buttons, the default alignment
* is near the end.
*
* @remarks \@since 3.1.0
*/
export type DialogFooterAlignment =
| "none"
| "start"
| "end"
| "between"
| "stacked-start"
| "stacked-end";
export interface DialogFooterProps extends HTMLAttributes<HTMLDivElement> {
/** {@inheritDoc DialogFooterAlignment} */
align?: DialogFooterAlignment;
}
const block = bem("rmd-dialog");
export const DialogFooter = forwardRef<HTMLDivElement, DialogFooterProps>(
function DialogFooter({ children, className, align = "end", ...props }, ref) {
return (
<footer
{...props}
ref={ref}
className={cn(
block("footer", {
flex: align !== "none",
"flex-v": align === "stacked-start" || align === "stacked-end",
start: align === "start" || align === "stacked-start",
between: align === "between",
end: align === "end" || align === "stacked-end",
}),
className
)}
>
|
</footer>
);
}
);
| {children} | identifier_body |
DialogFooter.tsx | import { forwardRef, HTMLAttributes } from "react";
import cn from "classnames";
import { bem } from "@react-md/utils";
/**
* An optional alignment for the content within the footer. Since the majority
* of dialog footers are used to contain action buttons, the default alignment
* is near the end.
*
* @remarks \@since 3.1.0
*/
export type DialogFooterAlignment =
| "none"
| "start"
| "end"
| "between"
| "stacked-start"
| "stacked-end";
export interface DialogFooterProps extends HTMLAttributes<HTMLDivElement> {
/** {@inheritDoc DialogFooterAlignment} */
align?: DialogFooterAlignment;
}
const block = bem("rmd-dialog");
export const DialogFooter = forwardRef<HTMLDivElement, DialogFooterProps>(
function DialogFooter({ children, className, align = "end", ...props }, ref) {
return ( | flex: align !== "none",
"flex-v": align === "stacked-start" || align === "stacked-end",
start: align === "start" || align === "stacked-start",
between: align === "between",
end: align === "end" || align === "stacked-end",
}),
className
)}
>
{children}
</footer>
);
}
); | <footer
{...props}
ref={ref}
className={cn(
block("footer", { | random_line_split |
region_BM.py | """Auto-generated file, do not edit by hand. BM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BM = PhoneMetadata(id='BM', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='(?:441|[58]\\d\\d|900)\\d{7}', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='441(?:[46]\\d\\d|5(?:4\\d|60|89))\\d{4}', example_number='4414123456', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='441(?:[2378]\\d|5[0-39])\\d{5}', example_number='4413701234', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='52(?:3(?:[2-46-9][02-9]\\d|5(?:[02-46-9]\\d|5[0-46-9]))|4(?:[2-478][02-9]\\d|5(?:[034]\\d|2[024-9]|5[0-46-9])|6(?:0[1-9]|[2-9]\\d)|9(?:[05-9]\\d|2[0-5]|49)))\\d{4}|52[34][2-9]1[02-9]\\d{4}|5(?:00|2[12]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
national_prefix='1',
national_prefix_for_parsing='1|([2-8]\\d{6})$',
national_prefix_transform_rule='441\\1',
leading_digits='441', | mobile_number_portable_region=True) | random_line_split |
|
test_buffers_cleaning.py | from plenum.common.event_bus import InternalBus
from plenum.common.startable import Mode
from plenum.common.timer import QueueTimer
from plenum.common.util import get_utc_epoch
from plenum.server.consensus.primary_selector import RoundRobinConstantNodesPrimariesSelector
from plenum.server.database_manager import DatabaseManager
from plenum.server.quorums import Quorums
from plenum.server.replica import Replica
from plenum.test.testing_utils import FakeSomething
def test_ordered_cleaning(tconf):
global_view_no = 2
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=global_view_no,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica._consensus_data.view_no = global_view_no
total = []
num_requests_per_view = 3
for viewNo in range(global_view_no + 1):
|
# gc is called after stable checkpoint, since no request executed
# in this test starting it manually
replica._ordering_service.gc(100)
# Requests with view lower then previous view
# should not be in ordered
assert len(replica._ordering_service.ordered) == len(total[num_requests_per_view:])
def test_primary_names_cleaning(tconf):
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=0,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica.primaryName = "Node1:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node2:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0"), (1, "Node2:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node3:0"
assert list(replica.primaryNames.items()) == \
[(1, "Node2:0"), (2, "Node3:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node4:0"
assert list(replica.primaryNames.items()) == \
[(2, "Node3:0"), (3, "Node4:0")]
| for seqNo in range(num_requests_per_view):
reqId = viewNo, seqNo
replica._ordering_service._add_to_ordered(*reqId)
total.append(reqId) | conditional_block |
test_buffers_cleaning.py | from plenum.common.event_bus import InternalBus
from plenum.common.startable import Mode
from plenum.common.timer import QueueTimer
from plenum.common.util import get_utc_epoch
from plenum.server.consensus.primary_selector import RoundRobinConstantNodesPrimariesSelector
from plenum.server.database_manager import DatabaseManager
from plenum.server.quorums import Quorums
from plenum.server.replica import Replica
from plenum.test.testing_utils import FakeSomething
def test_ordered_cleaning(tconf):
global_view_no = 2
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=global_view_no,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica._consensus_data.view_no = global_view_no
total = []
num_requests_per_view = 3
for viewNo in range(global_view_no + 1):
for seqNo in range(num_requests_per_view):
reqId = viewNo, seqNo
replica._ordering_service._add_to_ordered(*reqId)
total.append(reqId)
# gc is called after stable checkpoint, since no request executed
# in this test starting it manually
replica._ordering_service.gc(100)
# Requests with view lower then previous view
# should not be in ordered
assert len(replica._ordering_service.ordered) == len(total[num_requests_per_view:])
def test_primary_names_cleaning(tconf):
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=0,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) | node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node2:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0"), (1, "Node2:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node3:0"
assert list(replica.primaryNames.items()) == \
[(1, "Node2:0"), (2, "Node3:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node4:0"
assert list(replica.primaryNames.items()) == \
[(2, "Node3:0"), (3, "Node4:0")] |
replica.primaryName = "Node1:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0")]
| random_line_split |
test_buffers_cleaning.py | from plenum.common.event_bus import InternalBus
from plenum.common.startable import Mode
from plenum.common.timer import QueueTimer
from plenum.common.util import get_utc_epoch
from plenum.server.consensus.primary_selector import RoundRobinConstantNodesPrimariesSelector
from plenum.server.database_manager import DatabaseManager
from plenum.server.quorums import Quorums
from plenum.server.replica import Replica
from plenum.test.testing_utils import FakeSomething
def | (tconf):
global_view_no = 2
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=global_view_no,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica._consensus_data.view_no = global_view_no
total = []
num_requests_per_view = 3
for viewNo in range(global_view_no + 1):
for seqNo in range(num_requests_per_view):
reqId = viewNo, seqNo
replica._ordering_service._add_to_ordered(*reqId)
total.append(reqId)
# gc is called after stable checkpoint, since no request executed
# in this test starting it manually
replica._ordering_service.gc(100)
# Requests with view lower then previous view
# should not be in ordered
assert len(replica._ordering_service.ordered) == len(total[num_requests_per_view:])
def test_primary_names_cleaning(tconf):
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=0,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica.primaryName = "Node1:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node2:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0"), (1, "Node2:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node3:0"
assert list(replica.primaryNames.items()) == \
[(1, "Node2:0"), (2, "Node3:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node4:0"
assert list(replica.primaryNames.items()) == \
[(2, "Node3:0"), (3, "Node4:0")]
| test_ordered_cleaning | identifier_name |
test_buffers_cleaning.py | from plenum.common.event_bus import InternalBus
from plenum.common.startable import Mode
from plenum.common.timer import QueueTimer
from plenum.common.util import get_utc_epoch
from plenum.server.consensus.primary_selector import RoundRobinConstantNodesPrimariesSelector
from plenum.server.database_manager import DatabaseManager
from plenum.server.quorums import Quorums
from plenum.server.replica import Replica
from plenum.test.testing_utils import FakeSomething
def test_ordered_cleaning(tconf):
global_view_no = 2
node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=global_view_no,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica._consensus_data.view_no = global_view_no
total = []
num_requests_per_view = 3
for viewNo in range(global_view_no + 1):
for seqNo in range(num_requests_per_view):
reqId = viewNo, seqNo
replica._ordering_service._add_to_ordered(*reqId)
total.append(reqId)
# gc is called after stable checkpoint, since no request executed
# in this test starting it manually
replica._ordering_service.gc(100)
# Requests with view lower then previous view
# should not be in ordered
assert len(replica._ordering_service.ordered) == len(total[num_requests_per_view:])
def test_primary_names_cleaning(tconf):
| node = FakeSomething(
name="fake node",
ledger_ids=[0],
viewNo=0,
utc_epoch=get_utc_epoch,
get_validators=lambda: [],
db_manager=DatabaseManager(),
requests=[],
mode=Mode.participating,
timer=QueueTimer(),
quorums=Quorums(4),
write_manager=None,
poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []),
primaries_selector=RoundRobinConstantNodesPrimariesSelector(["Alpha", "Beta", "Gamma", "Delta"])
)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
)
replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica)
replica.primaryName = "Node1:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node2:0"
assert list(replica.primaryNames.items()) == \
[(0, "Node1:0"), (1, "Node2:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node3:0"
assert list(replica.primaryNames.items()) == \
[(1, "Node2:0"), (2, "Node3:0")]
node.viewNo += 1
replica._consensus_data.view_no = node.viewNo
replica.primaryName = "Node4:0"
assert list(replica.primaryNames.items()) == \
[(2, "Node3:0"), (3, "Node4:0")] | identifier_body |
|
destructure-trait-ref.rs | // The regression test for #15031 to make sure destructuring trait
// reference work properly.
#![feature(box_patterns)]
#![feature(box_syntax)]
trait T { fn foo(&self) | }
impl T for isize {}
fn main() {
// For an expression of the form:
//
// let &...&x = &..&SomeTrait;
//
// Say we have n `&` at the left hand and m `&` right hand, then:
// if n < m, we are golden;
// if n == m, it's a derefing non-derefable type error;
// if n > m, it's a type mismatch error.
// n < m
let &x = &(&1isize as &dyn T);
let &x = &&(&1isize as &dyn T);
let &&x = &&(&1isize as &dyn T);
// n == m
let &x = &1isize as &dyn T; //~ ERROR type `&dyn T` cannot be dereferenced
let &&x = &(&1isize as &dyn T); //~ ERROR type `&dyn T` cannot be dereferenced
let box x = box 1isize as Box<dyn T>;
//~^ ERROR type `Box<dyn T>` cannot be dereferenced
// n > m
let &&x = &1isize as &dyn T;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let &&&x = &(&1isize as &dyn T);
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let box box x = box 1isize as Box<dyn T>;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found struct `Box<_>`
}
| {} | identifier_body |
destructure-trait-ref.rs | // The regression test for #15031 to make sure destructuring trait
// reference work properly.
#![feature(box_patterns)]
#![feature(box_syntax)]
trait T { fn | (&self) {} }
impl T for isize {}
fn main() {
// For an expression of the form:
//
// let &...&x = &..&SomeTrait;
//
// Say we have n `&` at the left hand and m `&` right hand, then:
// if n < m, we are golden;
// if n == m, it's a derefing non-derefable type error;
// if n > m, it's a type mismatch error.
// n < m
let &x = &(&1isize as &dyn T);
let &x = &&(&1isize as &dyn T);
let &&x = &&(&1isize as &dyn T);
// n == m
let &x = &1isize as &dyn T; //~ ERROR type `&dyn T` cannot be dereferenced
let &&x = &(&1isize as &dyn T); //~ ERROR type `&dyn T` cannot be dereferenced
let box x = box 1isize as Box<dyn T>;
//~^ ERROR type `Box<dyn T>` cannot be dereferenced
// n > m
let &&x = &1isize as &dyn T;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let &&&x = &(&1isize as &dyn T);
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let box box x = box 1isize as Box<dyn T>;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found struct `Box<_>`
}
| foo | identifier_name |
destructure-trait-ref.rs | // The regression test for #15031 to make sure destructuring trait
// reference work properly.
#![feature(box_patterns)] | trait T { fn foo(&self) {} }
impl T for isize {}
fn main() {
// For an expression of the form:
//
// let &...&x = &..&SomeTrait;
//
// Say we have n `&` at the left hand and m `&` right hand, then:
// if n < m, we are golden;
// if n == m, it's a derefing non-derefable type error;
// if n > m, it's a type mismatch error.
// n < m
let &x = &(&1isize as &dyn T);
let &x = &&(&1isize as &dyn T);
let &&x = &&(&1isize as &dyn T);
// n == m
let &x = &1isize as &dyn T; //~ ERROR type `&dyn T` cannot be dereferenced
let &&x = &(&1isize as &dyn T); //~ ERROR type `&dyn T` cannot be dereferenced
let box x = box 1isize as Box<dyn T>;
//~^ ERROR type `Box<dyn T>` cannot be dereferenced
// n > m
let &&x = &1isize as &dyn T;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let &&&x = &(&1isize as &dyn T);
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found reference `&_`
let box box x = box 1isize as Box<dyn T>;
//~^ ERROR mismatched types
//~| expected trait object `dyn T`
//~| found struct `Box<_>`
} | #![feature(box_syntax)]
| random_line_split |
detector.ts | import { BrowserDetectInfo, OsDefinition, OsDefinitionInterface } from './browser-detect.interface';
import { browsers, os, osVersions } from './definitions';
import { mobilePrefixRegExp, mobileRegExp } from './regexp';
import Process = NodeJS.Process;
export class Detector {
private userAgent: string;
public constructor(
userAgent: string,
private navigator?: Navigator,
private process?: Process
) {
this.userAgent = userAgent
? userAgent
: this.navigator ? (navigator.userAgent || navigator.vendor) : '';
}
public detect(): BrowserDetectInfo {
if (this.process && !this.userAgent) {
const version = this.process.version.slice(1) .split('.').slice(0, 3);
const versionTail = Array.prototype.slice.call(version, 1).join('') || '0';
return {
name: 'node',
version: version.join('.'),
versionNumber: parseFloat(`${version[0]}.${versionTail}`),
mobile: false,
os: this.process.platform
};
}
if (!this.userAgent)
this.handleMissingError();
return {
...this.checkBrowser(),
...this.checkMobile(),
...this.checkOs()
}; | }
private checkBrowser(): BrowserDetectInfo {
return browsers
.filter(definition => (<RegExp>definition[1]).test(this.userAgent))
.map(definition => {
const match = (<RegExp>definition[1]).exec(this.userAgent);
const version = match && match[1].split(/[._]/).slice(0, 3);
const versionTails = Array.prototype.slice.call(version, 1).join('') || '0';
if (version && version.length < 3)
Array.prototype.push.apply(version, version.length === 1 ? [0, 0] : [0]);
return {
name: String(definition[0]),
version: version.join('.'),
versionNumber: Number(`${version[0]}.${versionTails}`)
};
})
.shift();
}
private checkMobile(): BrowserDetectInfo {
const agentPrefix = this.userAgent.substr(0, 4);
const mobile = mobileRegExp.test(this.userAgent) || mobilePrefixRegExp.test(agentPrefix);
return { mobile };
}
private checkOs(): BrowserDetectInfo {
return os
.map(definition => {
const name = (<OsDefinitionInterface>definition).name || <string>definition;
const pattern = this.getOsPattern(definition);
return {
name,
pattern,
value: RegExp(
`\\b${
pattern.replace(/([ -])(?!$)/g, '$1?')
}(?:x?[\\d._]+|[ \\w.]*)`,
'i'
).exec(this.userAgent)
};
})
.filter(definition => definition.value)
.map(definition => {
let os = definition.value[0] || '';
let osSuffix: string;
if (
definition.pattern &&
definition.name &&
/^Win/i.test(os) &&
!/^Windows Phone /i.test(os) &&
(osSuffix = osVersions[os.replace(/[^\d.]/g, '')])
)
os = `Windows ${osSuffix}`;
if (definition.pattern && definition.name)
os = os.replace(RegExp(definition.pattern, 'i'), definition.name);
os = os
.replace(/ ce$/i, ' CE')
.replace(/\bhpw/i, 'web')
.replace(/\bMacintosh\b/, 'Mac OS')
.replace(/_PowerPC\b/i, ' OS')
.replace(/\b(OS X) [^ \d]+/i, '$1')
.replace(/\bMac (OS X)\b/, '$1')
.replace(/\/(\d)/, ' $1')
.replace(/_/g, '.')
.replace(/(?: BePC|[ .]*fc[ \d.]+)$/i, '')
.replace(/\bx86\.64\b/gi, 'x86_64')
.replace(/\b(Windows Phone) OS\b/, '$1')
.replace(/\b(Chrome OS \w+) [\d.]+\b/, '$1')
.split(' on ')[0]
.trim();
os = /^(?:webOS|i(?:OS|P))/.test(os)
? os
: (os.charAt(0).toUpperCase() + os.slice(1));
return { os };
})
.shift();
}
private getOsPattern(definition: OsDefinition): string {
const definitionInterface = <OsDefinitionInterface>definition;
return (
typeof definition === 'string'
? <string>definition
: undefined
) ||
definitionInterface.pattern ||
definitionInterface.name;
}
private handleMissingError() {
throw new Error('Please give user-agent.\n> browser(navigator.userAgent or res.headers[\'user-agent\']).');
}
} | random_line_split |
|
detector.ts | import { BrowserDetectInfo, OsDefinition, OsDefinitionInterface } from './browser-detect.interface';
import { browsers, os, osVersions } from './definitions';
import { mobilePrefixRegExp, mobileRegExp } from './regexp';
import Process = NodeJS.Process;
export class Detector {
private userAgent: string;
public constructor(
userAgent: string,
private navigator?: Navigator,
private process?: Process
) {
this.userAgent = userAgent
? userAgent
: this.navigator ? (navigator.userAgent || navigator.vendor) : '';
}
public detect(): BrowserDetectInfo {
if (this.process && !this.userAgent) {
const version = this.process.version.slice(1) .split('.').slice(0, 3);
const versionTail = Array.prototype.slice.call(version, 1).join('') || '0';
return {
name: 'node',
version: version.join('.'),
versionNumber: parseFloat(`${version[0]}.${versionTail}`),
mobile: false,
os: this.process.platform
};
}
if (!this.userAgent)
this.handleMissingError();
return {
...this.checkBrowser(),
...this.checkMobile(),
...this.checkOs()
};
}
private checkBrowser(): BrowserDetectInfo {
return browsers
.filter(definition => (<RegExp>definition[1]).test(this.userAgent))
.map(definition => {
const match = (<RegExp>definition[1]).exec(this.userAgent);
const version = match && match[1].split(/[._]/).slice(0, 3);
const versionTails = Array.prototype.slice.call(version, 1).join('') || '0';
if (version && version.length < 3)
Array.prototype.push.apply(version, version.length === 1 ? [0, 0] : [0]);
return {
name: String(definition[0]),
version: version.join('.'),
versionNumber: Number(`${version[0]}.${versionTails}`)
};
})
.shift();
}
private checkMobile(): BrowserDetectInfo {
const agentPrefix = this.userAgent.substr(0, 4);
const mobile = mobileRegExp.test(this.userAgent) || mobilePrefixRegExp.test(agentPrefix);
return { mobile };
}
private checkOs(): BrowserDetectInfo {
return os
.map(definition => {
const name = (<OsDefinitionInterface>definition).name || <string>definition;
const pattern = this.getOsPattern(definition);
return {
name,
pattern,
value: RegExp(
`\\b${
pattern.replace(/([ -])(?!$)/g, '$1?')
}(?:x?[\\d._]+|[ \\w.]*)`,
'i'
).exec(this.userAgent)
};
})
.filter(definition => definition.value)
.map(definition => {
let os = definition.value[0] || '';
let osSuffix: string;
if (
definition.pattern &&
definition.name &&
/^Win/i.test(os) &&
!/^Windows Phone /i.test(os) &&
(osSuffix = osVersions[os.replace(/[^\d.]/g, '')])
)
os = `Windows ${osSuffix}`;
if (definition.pattern && definition.name)
os = os.replace(RegExp(definition.pattern, 'i'), definition.name);
os = os
.replace(/ ce$/i, ' CE')
.replace(/\bhpw/i, 'web')
.replace(/\bMacintosh\b/, 'Mac OS')
.replace(/_PowerPC\b/i, ' OS')
.replace(/\b(OS X) [^ \d]+/i, '$1')
.replace(/\bMac (OS X)\b/, '$1')
.replace(/\/(\d)/, ' $1')
.replace(/_/g, '.')
.replace(/(?: BePC|[ .]*fc[ \d.]+)$/i, '')
.replace(/\bx86\.64\b/gi, 'x86_64')
.replace(/\b(Windows Phone) OS\b/, '$1')
.replace(/\b(Chrome OS \w+) [\d.]+\b/, '$1')
.split(' on ')[0]
.trim();
os = /^(?:webOS|i(?:OS|P))/.test(os)
? os
: (os.charAt(0).toUpperCase() + os.slice(1));
return { os };
})
.shift();
}
private getOsPattern(definition: OsDefinition): string |
private handleMissingError() {
throw new Error('Please give user-agent.\n> browser(navigator.userAgent or res.headers[\'user-agent\']).');
}
}
| {
const definitionInterface = <OsDefinitionInterface>definition;
return (
typeof definition === 'string'
? <string>definition
: undefined
) ||
definitionInterface.pattern ||
definitionInterface.name;
} | identifier_body |
detector.ts | import { BrowserDetectInfo, OsDefinition, OsDefinitionInterface } from './browser-detect.interface';
import { browsers, os, osVersions } from './definitions';
import { mobilePrefixRegExp, mobileRegExp } from './regexp';
import Process = NodeJS.Process;
export class Detector {
private userAgent: string;
public constructor(
userAgent: string,
private navigator?: Navigator,
private process?: Process
) {
this.userAgent = userAgent
? userAgent
: this.navigator ? (navigator.userAgent || navigator.vendor) : '';
}
public detect(): BrowserDetectInfo {
if (this.process && !this.userAgent) {
const version = this.process.version.slice(1) .split('.').slice(0, 3);
const versionTail = Array.prototype.slice.call(version, 1).join('') || '0';
return {
name: 'node',
version: version.join('.'),
versionNumber: parseFloat(`${version[0]}.${versionTail}`),
mobile: false,
os: this.process.platform
};
}
if (!this.userAgent)
this.handleMissingError();
return {
...this.checkBrowser(),
...this.checkMobile(),
...this.checkOs()
};
}
private checkBrowser(): BrowserDetectInfo {
return browsers
.filter(definition => (<RegExp>definition[1]).test(this.userAgent))
.map(definition => {
const match = (<RegExp>definition[1]).exec(this.userAgent);
const version = match && match[1].split(/[._]/).slice(0, 3);
const versionTails = Array.prototype.slice.call(version, 1).join('') || '0';
if (version && version.length < 3)
Array.prototype.push.apply(version, version.length === 1 ? [0, 0] : [0]);
return {
name: String(definition[0]),
version: version.join('.'),
versionNumber: Number(`${version[0]}.${versionTails}`)
};
})
.shift();
}
private checkMobile(): BrowserDetectInfo {
const agentPrefix = this.userAgent.substr(0, 4);
const mobile = mobileRegExp.test(this.userAgent) || mobilePrefixRegExp.test(agentPrefix);
return { mobile };
}
private checkOs(): BrowserDetectInfo {
return os
.map(definition => {
const name = (<OsDefinitionInterface>definition).name || <string>definition;
const pattern = this.getOsPattern(definition);
return {
name,
pattern,
value: RegExp(
`\\b${
pattern.replace(/([ -])(?!$)/g, '$1?')
}(?:x?[\\d._]+|[ \\w.]*)`,
'i'
).exec(this.userAgent)
};
})
.filter(definition => definition.value)
.map(definition => {
let os = definition.value[0] || '';
let osSuffix: string;
if (
definition.pattern &&
definition.name &&
/^Win/i.test(os) &&
!/^Windows Phone /i.test(os) &&
(osSuffix = osVersions[os.replace(/[^\d.]/g, '')])
)
os = `Windows ${osSuffix}`;
if (definition.pattern && definition.name)
os = os.replace(RegExp(definition.pattern, 'i'), definition.name);
os = os
.replace(/ ce$/i, ' CE')
.replace(/\bhpw/i, 'web')
.replace(/\bMacintosh\b/, 'Mac OS')
.replace(/_PowerPC\b/i, ' OS')
.replace(/\b(OS X) [^ \d]+/i, '$1')
.replace(/\bMac (OS X)\b/, '$1')
.replace(/\/(\d)/, ' $1')
.replace(/_/g, '.')
.replace(/(?: BePC|[ .]*fc[ \d.]+)$/i, '')
.replace(/\bx86\.64\b/gi, 'x86_64')
.replace(/\b(Windows Phone) OS\b/, '$1')
.replace(/\b(Chrome OS \w+) [\d.]+\b/, '$1')
.split(' on ')[0]
.trim();
os = /^(?:webOS|i(?:OS|P))/.test(os)
? os
: (os.charAt(0).toUpperCase() + os.slice(1));
return { os };
})
.shift();
}
private | (definition: OsDefinition): string {
const definitionInterface = <OsDefinitionInterface>definition;
return (
typeof definition === 'string'
? <string>definition
: undefined
) ||
definitionInterface.pattern ||
definitionInterface.name;
}
private handleMissingError() {
throw new Error('Please give user-agent.\n> browser(navigator.userAgent or res.headers[\'user-agent\']).');
}
}
| getOsPattern | identifier_name |
lib.rs | // Helianto -- static website generator
// Copyright © 2015-2016 Mickaël RAYBAUD-ROIG
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
extern crate chrono;
extern crate handlebars;
extern crate num;
extern crate pulldown_cmark;
extern crate regex;
extern crate serde;
extern crate toml;
extern crate walkdir;
#[macro_use]
extern crate log;
mod document;
mod error;
mod generators;
pub mod metadata;
pub mod readers;
mod settings;
mod site;
mod templates;
mod utils;
use handlebars::Handlebars;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use walkdir::{DirEntry, WalkDir};
pub use crate::document::{Document, DocumentContent, DocumentMetadata};
pub use crate::error::{Error, Result};
pub use crate::generators::Generator;
use crate::readers::Reader;
pub use crate::settings::Settings;
pub use crate::site::Site;
use crate::templates::Context;
pub struct Compiler {
pub settings: Settings,
pub site: Site,
handlebars: Handlebars<'static>,
readers: HashMap<String, Rc<dyn Reader>>,
generators: Vec<Rc<dyn Generator>>,
documents: HashMap<String, Rc<DocumentMetadata>>,
}
impl Compiler {
pub fn new(settings: &Settings) -> Compiler {
let mut compiler = Compiler {
settings: settings.clone(),
readers: HashMap::new(),
handlebars: Handlebars::new(),
site: Site::new(settings),
documents: HashMap::new(),
generators: Vec::new(),
};
compiler.add_reader::<readers::MarkdownReader>();
compiler.add_generator::<generators::IndexGenerator>();
compiler
}
fn check_settings(&self) -> Result<()> {
let Settings {
ref source_dir,
ref output_dir,
..
} = self.settings;
if !source_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be an existing directory", source_dir.display()),
});
}
if output_dir.exists() && !output_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be a directory", output_dir.display()),
});
}
Ok(())
}
pub fn get_reader(&self, path: &Path) -> Option<Rc<dyn Reader>> {
path.extension()
.and_then(|extension| extension.to_str())
.and_then(|extension_str| self.readers.get(extension_str))
.cloned()
}
pub fn add_reader<T: Reader + 'static>(&mut self) {
let reader = Rc::new(T::new(&self.settings));
for &extension in T::extensions() {
self.readers.insert(extension.into(), reader.clone());
}
}
pub fn add_generator<T: Generator + 'static>(&mut self) {
self.generators.push(Rc::new(T::new()));
}
fn load_templates(&mut self) -> Result<()> {
self.handlebars.clear_templates();
templates::register_helpers(&mut self.handlebars);
let loader = &mut templates::Loader::new(&mut self.handlebars);
loader.load_builtin_templates();
let templates_dir = self.settings.source_dir.join("_layouts");
if templates_dir.is_dir() {
loader.load_templates(&templates_dir);
}
Ok(())
}
fn render_context(&self, context: Context, path: &Path) -> Result<()> {
let output: String = self.handlebars.render("page.html", &context)
.map_err(|err| Error::Render {
cause: Box::new(err)
})?;
let dest_file = self.settings.output_dir.join(&path);
let dest_dir = dest_file.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| {
let mut fd = File::create(&dest_file)?;
fd.write(output.as_ref())?;
fd.sync_data()?;
Ok(())
})
.map_err(|err| Error::Output {
dest: dest_dir.into(),
cause: Box::new(err),
})
}
fn build_document(&mut self, reader: Rc<dyn Reader>, path: &Path) -> Result<()> {
let (body, metadata) = reader.load(path)?;
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| relpath.with_extension("html"))
.unwrap();
let document = Document {
metadata: DocumentMetadata {
url: dest.to_str().unwrap().into(),
.. DocumentMetadata::from_raw(metadata.into_iter())?
},
content: DocumentContent::from(body),
};
debug!(
"Rendering document {} in {} ...",
path.display(),
dest.display()
);
self.render_context(Context::new(&self.site, &document), &dest)
.and_then(|_| {
self.documents.insert(
dest.to_str().unwrap().into(),
Rc::new(document.metadata.clone()),
);
Ok(())
})
}
fn copy_file(&mut self, path: &Path) -> Result<()> {
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| self.settings.output_dir.join(relpath))
.unwrap();
let dest_dir = dest.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| fs::copy(path, &dest))
.and_then(|_| {
debug!("Copying {} to {}", path.display(), dest.display());
Ok(())
})
.map_err(|err| Error::Copy {
from: path.into(),
to: dest_dir.into(),
cause: Box::new(err),
})
}
fn run_generators(&mut self) -> Result<()> {
let documents: Vec<Rc<DocumentMetadata>> = self.documents.values().cloned().collect();
for generator in self.generators.iter() {
let generated_docs = generator.generate(documents.as_ref())?;
for generated_doc in generated_docs.iter() {
if self.documents.contains_key(&generated_doc.metadata.url) {
continue;
}
trace!("Running generator");
let dest = utils::remove_path_prefix(&generated_doc.metadata.url);
if let Err(e) = self.render_context(Context::new(&self.site, generated_doc), &dest)
{
return Err(e);
}
}
}
Ok(())
}
pub fn ru | mut self) -> Result<()> {
self.check_settings()?;
self.load_templates()?;
let entries = WalkDir::new(&self.settings.source_dir)
.min_depth(1)
.max_depth(self.settings.max_depth)
.follow_links(self.settings.follow_links)
.into_iter();
for entry in entries.filter_entry(filter_entry) {
let entry = match entry {
Err(_) => continue,
Ok(e) => {
if e.file_type().is_dir() {
continue;
} else {
PathBuf::from(e.path())
}
}
};
let result = match self.get_reader(&entry) {
Some(reader) => self.build_document(reader.clone(), &entry),
None => self.copy_file(&entry),
};
if let Err(err) = result {
error!("{}", err);
}
}
self.run_generators()?;
Ok(())
}
}
pub fn filter_entry(entry: &DirEntry) -> bool {
let file_type = entry.file_type();
if file_type.is_dir() || file_type.is_file() {
utils::is_public(&entry.path())
} else {
false
}
}
| n(& | identifier_name |
lib.rs | // Helianto -- static website generator
// Copyright © 2015-2016 Mickaël RAYBAUD-ROIG
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
extern crate chrono;
extern crate handlebars;
extern crate num;
extern crate pulldown_cmark;
extern crate regex;
extern crate serde;
extern crate toml;
extern crate walkdir;
#[macro_use]
extern crate log;
mod document;
mod error;
mod generators;
pub mod metadata;
pub mod readers;
mod settings;
mod site;
mod templates;
mod utils;
use handlebars::Handlebars;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use walkdir::{DirEntry, WalkDir};
pub use crate::document::{Document, DocumentContent, DocumentMetadata};
pub use crate::error::{Error, Result};
pub use crate::generators::Generator;
use crate::readers::Reader;
pub use crate::settings::Settings;
pub use crate::site::Site;
use crate::templates::Context;
pub struct Compiler {
pub settings: Settings,
pub site: Site,
handlebars: Handlebars<'static>,
readers: HashMap<String, Rc<dyn Reader>>,
generators: Vec<Rc<dyn Generator>>,
documents: HashMap<String, Rc<DocumentMetadata>>,
}
impl Compiler {
pub fn new(settings: &Settings) -> Compiler {
let mut compiler = Compiler {
settings: settings.clone(),
readers: HashMap::new(),
handlebars: Handlebars::new(),
site: Site::new(settings),
documents: HashMap::new(),
generators: Vec::new(),
};
compiler.add_reader::<readers::MarkdownReader>();
compiler.add_generator::<generators::IndexGenerator>();
compiler
}
fn check_settings(&self) -> Result<()> {
let Settings {
ref source_dir,
ref output_dir,
..
} = self.settings;
if !source_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be an existing directory", source_dir.display()),
});
}
if output_dir.exists() && !output_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be a directory", output_dir.display()),
});
}
Ok(())
}
pub fn get_reader(&self, path: &Path) -> Option<Rc<dyn Reader>> {
path.extension()
.and_then(|extension| extension.to_str())
.and_then(|extension_str| self.readers.get(extension_str))
.cloned()
}
pub fn add_reader<T: Reader + 'static>(&mut self) {
let reader = Rc::new(T::new(&self.settings));
for &extension in T::extensions() {
self.readers.insert(extension.into(), reader.clone());
}
}
pub fn add_generator<T: Generator + 'static>(&mut self) {
self.generators.push(Rc::new(T::new()));
}
fn load_templates(&mut self) -> Result<()> {
self.handlebars.clear_templates();
templates::register_helpers(&mut self.handlebars);
let loader = &mut templates::Loader::new(&mut self.handlebars);
loader.load_builtin_templates();
let templates_dir = self.settings.source_dir.join("_layouts");
if templates_dir.is_dir() {
loader.load_templates(&templates_dir);
}
Ok(())
}
fn render_context(&self, context: Context, path: &Path) -> Result<()> {
let output: String = self.handlebars.render("page.html", &context)
.map_err(|err| Error::Render {
cause: Box::new(err)
})?;
let dest_file = self.settings.output_dir.join(&path);
let dest_dir = dest_file.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| {
let mut fd = File::create(&dest_file)?;
fd.write(output.as_ref())?;
fd.sync_data()?;
Ok(())
})
.map_err(|err| Error::Output {
dest: dest_dir.into(),
cause: Box::new(err),
})
}
fn build_document(&mut self, reader: Rc<dyn Reader>, path: &Path) -> Result<()> {
let (body, metadata) = reader.load(path)?;
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| relpath.with_extension("html"))
.unwrap();
let document = Document {
metadata: DocumentMetadata {
url: dest.to_str().unwrap().into(),
.. DocumentMetadata::from_raw(metadata.into_iter())?
},
content: DocumentContent::from(body),
};
debug!(
"Rendering document {} in {} ...",
path.display(),
dest.display()
);
self.render_context(Context::new(&self.site, &document), &dest) | .and_then(|_| {
self.documents.insert(
dest.to_str().unwrap().into(),
Rc::new(document.metadata.clone()),
);
Ok(())
})
}
fn copy_file(&mut self, path: &Path) -> Result<()> {
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| self.settings.output_dir.join(relpath))
.unwrap();
let dest_dir = dest.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| fs::copy(path, &dest))
.and_then(|_| {
debug!("Copying {} to {}", path.display(), dest.display());
Ok(())
})
.map_err(|err| Error::Copy {
from: path.into(),
to: dest_dir.into(),
cause: Box::new(err),
})
}
fn run_generators(&mut self) -> Result<()> {
let documents: Vec<Rc<DocumentMetadata>> = self.documents.values().cloned().collect();
for generator in self.generators.iter() {
let generated_docs = generator.generate(documents.as_ref())?;
for generated_doc in generated_docs.iter() {
if self.documents.contains_key(&generated_doc.metadata.url) {
continue;
}
trace!("Running generator");
let dest = utils::remove_path_prefix(&generated_doc.metadata.url);
if let Err(e) = self.render_context(Context::new(&self.site, generated_doc), &dest)
{
return Err(e);
}
}
}
Ok(())
}
pub fn run(&mut self) -> Result<()> {
self.check_settings()?;
self.load_templates()?;
let entries = WalkDir::new(&self.settings.source_dir)
.min_depth(1)
.max_depth(self.settings.max_depth)
.follow_links(self.settings.follow_links)
.into_iter();
for entry in entries.filter_entry(filter_entry) {
let entry = match entry {
Err(_) => continue,
Ok(e) => {
if e.file_type().is_dir() {
continue;
} else {
PathBuf::from(e.path())
}
}
};
let result = match self.get_reader(&entry) {
Some(reader) => self.build_document(reader.clone(), &entry),
None => self.copy_file(&entry),
};
if let Err(err) = result {
error!("{}", err);
}
}
self.run_generators()?;
Ok(())
}
}
pub fn filter_entry(entry: &DirEntry) -> bool {
let file_type = entry.file_type();
if file_type.is_dir() || file_type.is_file() {
utils::is_public(&entry.path())
} else {
false
}
} | random_line_split |
|
lib.rs | // Helianto -- static website generator
// Copyright © 2015-2016 Mickaël RAYBAUD-ROIG
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
extern crate chrono;
extern crate handlebars;
extern crate num;
extern crate pulldown_cmark;
extern crate regex;
extern crate serde;
extern crate toml;
extern crate walkdir;
#[macro_use]
extern crate log;
mod document;
mod error;
mod generators;
pub mod metadata;
pub mod readers;
mod settings;
mod site;
mod templates;
mod utils;
use handlebars::Handlebars;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use walkdir::{DirEntry, WalkDir};
pub use crate::document::{Document, DocumentContent, DocumentMetadata};
pub use crate::error::{Error, Result};
pub use crate::generators::Generator;
use crate::readers::Reader;
pub use crate::settings::Settings;
pub use crate::site::Site;
use crate::templates::Context;
pub struct Compiler {
pub settings: Settings,
pub site: Site,
handlebars: Handlebars<'static>,
readers: HashMap<String, Rc<dyn Reader>>,
generators: Vec<Rc<dyn Generator>>,
documents: HashMap<String, Rc<DocumentMetadata>>,
}
impl Compiler {
pub fn new(settings: &Settings) -> Compiler {
let mut compiler = Compiler {
settings: settings.clone(),
readers: HashMap::new(),
handlebars: Handlebars::new(),
site: Site::new(settings),
documents: HashMap::new(),
generators: Vec::new(),
};
compiler.add_reader::<readers::MarkdownReader>();
compiler.add_generator::<generators::IndexGenerator>();
compiler
}
fn check_settings(&self) -> Result<()> {
let Settings {
ref source_dir,
ref output_dir,
..
} = self.settings;
if !source_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be an existing directory", source_dir.display()),
});
}
if output_dir.exists() && !output_dir.is_dir() {
return Err(Error::Settings {
message: format!("{} must be a directory", output_dir.display()),
});
}
Ok(())
}
pub fn get_reader(&self, path: &Path) -> Option<Rc<dyn Reader>> {
path.extension()
.and_then(|extension| extension.to_str())
.and_then(|extension_str| self.readers.get(extension_str))
.cloned()
}
pub fn add_reader<T: Reader + 'static>(&mut self) {
| pub fn add_generator<T: Generator + 'static>(&mut self) {
self.generators.push(Rc::new(T::new()));
}
fn load_templates(&mut self) -> Result<()> {
self.handlebars.clear_templates();
templates::register_helpers(&mut self.handlebars);
let loader = &mut templates::Loader::new(&mut self.handlebars);
loader.load_builtin_templates();
let templates_dir = self.settings.source_dir.join("_layouts");
if templates_dir.is_dir() {
loader.load_templates(&templates_dir);
}
Ok(())
}
fn render_context(&self, context: Context, path: &Path) -> Result<()> {
let output: String = self.handlebars.render("page.html", &context)
.map_err(|err| Error::Render {
cause: Box::new(err)
})?;
let dest_file = self.settings.output_dir.join(&path);
let dest_dir = dest_file.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| {
let mut fd = File::create(&dest_file)?;
fd.write(output.as_ref())?;
fd.sync_data()?;
Ok(())
})
.map_err(|err| Error::Output {
dest: dest_dir.into(),
cause: Box::new(err),
})
}
fn build_document(&mut self, reader: Rc<dyn Reader>, path: &Path) -> Result<()> {
let (body, metadata) = reader.load(path)?;
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| relpath.with_extension("html"))
.unwrap();
let document = Document {
metadata: DocumentMetadata {
url: dest.to_str().unwrap().into(),
.. DocumentMetadata::from_raw(metadata.into_iter())?
},
content: DocumentContent::from(body),
};
debug!(
"Rendering document {} in {} ...",
path.display(),
dest.display()
);
self.render_context(Context::new(&self.site, &document), &dest)
.and_then(|_| {
self.documents.insert(
dest.to_str().unwrap().into(),
Rc::new(document.metadata.clone()),
);
Ok(())
})
}
fn copy_file(&mut self, path: &Path) -> Result<()> {
let dest = path
.strip_prefix(&self.settings.source_dir)
.map(|relpath| self.settings.output_dir.join(relpath))
.unwrap();
let dest_dir = dest.parent().unwrap();
fs::create_dir_all(&dest_dir)
.and_then(|_| fs::copy(path, &dest))
.and_then(|_| {
debug!("Copying {} to {}", path.display(), dest.display());
Ok(())
})
.map_err(|err| Error::Copy {
from: path.into(),
to: dest_dir.into(),
cause: Box::new(err),
})
}
fn run_generators(&mut self) -> Result<()> {
let documents: Vec<Rc<DocumentMetadata>> = self.documents.values().cloned().collect();
for generator in self.generators.iter() {
let generated_docs = generator.generate(documents.as_ref())?;
for generated_doc in generated_docs.iter() {
if self.documents.contains_key(&generated_doc.metadata.url) {
continue;
}
trace!("Running generator");
let dest = utils::remove_path_prefix(&generated_doc.metadata.url);
if let Err(e) = self.render_context(Context::new(&self.site, generated_doc), &dest)
{
return Err(e);
}
}
}
Ok(())
}
pub fn run(&mut self) -> Result<()> {
self.check_settings()?;
self.load_templates()?;
let entries = WalkDir::new(&self.settings.source_dir)
.min_depth(1)
.max_depth(self.settings.max_depth)
.follow_links(self.settings.follow_links)
.into_iter();
for entry in entries.filter_entry(filter_entry) {
let entry = match entry {
Err(_) => continue,
Ok(e) => {
if e.file_type().is_dir() {
continue;
} else {
PathBuf::from(e.path())
}
}
};
let result = match self.get_reader(&entry) {
Some(reader) => self.build_document(reader.clone(), &entry),
None => self.copy_file(&entry),
};
if let Err(err) = result {
error!("{}", err);
}
}
self.run_generators()?;
Ok(())
}
}
pub fn filter_entry(entry: &DirEntry) -> bool {
let file_type = entry.file_type();
if file_type.is_dir() || file_type.is_file() {
utils::is_public(&entry.path())
} else {
false
}
}
| let reader = Rc::new(T::new(&self.settings));
for &extension in T::extensions() {
self.readers.insert(extension.into(), reader.clone());
}
}
| identifier_body |
route_user.py | # -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
from client import app
from . import render
from flask_login import login_required
@app.route("/user/profile")
@login_required
def user_profile():
return render("/user/profile.html")
@app.route("/user/hackathon")
@login_required
def user_hackathon_list():
return render("/user/team.html") | random_line_split |
|
route_user.py | # -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
from client import app
from . import render
from flask_login import login_required
@app.route("/user/profile")
@login_required
def user_profile():
return render("/user/profile.html")
@app.route("/user/hackathon")
@login_required
def user_hackathon_list():
r | eturn render("/user/team.html")
| identifier_body |
|
route_user.py | # -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
from client import app
from . import render
from flask_login import login_required
@app.route("/user/profile")
@login_required
def u | ):
return render("/user/profile.html")
@app.route("/user/hackathon")
@login_required
def user_hackathon_list():
return render("/user/team.html")
| ser_profile( | identifier_name |
smtpd.py | import asyncore
import email
import email.policy
import re
from smtpd import SMTPServer
from django.core.management.base import BaseCommand
from django.db import connections
from hc.api.models import Check
RE_UUID = re.compile(
"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$"
)
def _match(subject, keywords):
for s in keywords.split(","):
s = s.strip()
if s and s in subject:
return True
return False
def _process_message(remote_addr, mailfrom, mailto, data):
to_parts = mailto.split("@")
code = to_parts[0]
if not RE_UUID.match(code):
return f"Not an UUID: {code}"
try:
check = Check.objects.get(code=code)
except Check.DoesNotExist:
return f"Check not found: {code}"
action = "success"
if check.subject or check.subject_fail:
action = "ign"
# Specify policy, the default policy does not decode encoded headers:
data_str = data.decode(errors="replace")
parsed = email.message_from_string(data_str, policy=email.policy.SMTP)
subject = parsed.get("subject", "")
if check.subject_fail and _match(subject, check.subject_fail):
action = "fail"
elif check.subject and _match(subject, check.subject):
action = "success"
ua = "Email from %s" % mailfrom
check.ping(remote_addr, "email", "", ua, data, action)
return f"Processed ping for {code}"
class Listener(SMTPServer):
def __init__(self, localaddr, stdout):
|
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
# get a new db connection in case the old one has timed out:
connections.close_all()
result = _process_message(peer[0], mailfrom, rcpttos[0], data)
self.stdout.write(result)
class Command(BaseCommand):
help = "Listen for ping emails"
def add_arguments(self, parser):
parser.add_argument(
"--host", help="ip address to listen on, default 0.0.0.0", default="0.0.0.0"
)
parser.add_argument(
"--port", help="port to listen on, default 25", type=int, default=25
)
def handle(self, host, port, *args, **options):
_ = Listener((host, port), self.stdout)
print("Starting SMTP listener on %s:%d ..." % (host, port))
asyncore.loop()
| self.stdout = stdout
super(Listener, self).__init__(localaddr, None, decode_data=False) | identifier_body |
smtpd.py | import asyncore
import email
import email.policy
import re
from smtpd import SMTPServer
from django.core.management.base import BaseCommand
from django.db import connections
from hc.api.models import Check
RE_UUID = re.compile(
"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$"
)
def _match(subject, keywords):
for s in keywords.split(","):
|
return False
def _process_message(remote_addr, mailfrom, mailto, data):
to_parts = mailto.split("@")
code = to_parts[0]
if not RE_UUID.match(code):
return f"Not an UUID: {code}"
try:
check = Check.objects.get(code=code)
except Check.DoesNotExist:
return f"Check not found: {code}"
action = "success"
if check.subject or check.subject_fail:
action = "ign"
# Specify policy, the default policy does not decode encoded headers:
data_str = data.decode(errors="replace")
parsed = email.message_from_string(data_str, policy=email.policy.SMTP)
subject = parsed.get("subject", "")
if check.subject_fail and _match(subject, check.subject_fail):
action = "fail"
elif check.subject and _match(subject, check.subject):
action = "success"
ua = "Email from %s" % mailfrom
check.ping(remote_addr, "email", "", ua, data, action)
return f"Processed ping for {code}"
class Listener(SMTPServer):
def __init__(self, localaddr, stdout):
self.stdout = stdout
super(Listener, self).__init__(localaddr, None, decode_data=False)
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
# get a new db connection in case the old one has timed out:
connections.close_all()
result = _process_message(peer[0], mailfrom, rcpttos[0], data)
self.stdout.write(result)
class Command(BaseCommand):
help = "Listen for ping emails"
def add_arguments(self, parser):
parser.add_argument(
"--host", help="ip address to listen on, default 0.0.0.0", default="0.0.0.0"
)
parser.add_argument(
"--port", help="port to listen on, default 25", type=int, default=25
)
def handle(self, host, port, *args, **options):
_ = Listener((host, port), self.stdout)
print("Starting SMTP listener on %s:%d ..." % (host, port))
asyncore.loop()
| s = s.strip()
if s and s in subject:
return True | conditional_block |
smtpd.py | import asyncore
import email
import email.policy
import re
from smtpd import SMTPServer
from django.core.management.base import BaseCommand
from django.db import connections
from hc.api.models import Check
RE_UUID = re.compile(
"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$"
)
def _match(subject, keywords):
for s in keywords.split(","):
s = s.strip()
if s and s in subject:
return True
return False
def _process_message(remote_addr, mailfrom, mailto, data):
to_parts = mailto.split("@")
code = to_parts[0]
| check = Check.objects.get(code=code)
except Check.DoesNotExist:
return f"Check not found: {code}"
action = "success"
if check.subject or check.subject_fail:
action = "ign"
# Specify policy, the default policy does not decode encoded headers:
data_str = data.decode(errors="replace")
parsed = email.message_from_string(data_str, policy=email.policy.SMTP)
subject = parsed.get("subject", "")
if check.subject_fail and _match(subject, check.subject_fail):
action = "fail"
elif check.subject and _match(subject, check.subject):
action = "success"
ua = "Email from %s" % mailfrom
check.ping(remote_addr, "email", "", ua, data, action)
return f"Processed ping for {code}"
class Listener(SMTPServer):
def __init__(self, localaddr, stdout):
self.stdout = stdout
super(Listener, self).__init__(localaddr, None, decode_data=False)
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
# get a new db connection in case the old one has timed out:
connections.close_all()
result = _process_message(peer[0], mailfrom, rcpttos[0], data)
self.stdout.write(result)
class Command(BaseCommand):
help = "Listen for ping emails"
def add_arguments(self, parser):
parser.add_argument(
"--host", help="ip address to listen on, default 0.0.0.0", default="0.0.0.0"
)
parser.add_argument(
"--port", help="port to listen on, default 25", type=int, default=25
)
def handle(self, host, port, *args, **options):
_ = Listener((host, port), self.stdout)
print("Starting SMTP listener on %s:%d ..." % (host, port))
asyncore.loop() | if not RE_UUID.match(code):
return f"Not an UUID: {code}"
try: | random_line_split |
smtpd.py | import asyncore
import email
import email.policy
import re
from smtpd import SMTPServer
from django.core.management.base import BaseCommand
from django.db import connections
from hc.api.models import Check
RE_UUID = re.compile(
"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$"
)
def _match(subject, keywords):
for s in keywords.split(","):
s = s.strip()
if s and s in subject:
return True
return False
def _process_message(remote_addr, mailfrom, mailto, data):
to_parts = mailto.split("@")
code = to_parts[0]
if not RE_UUID.match(code):
return f"Not an UUID: {code}"
try:
check = Check.objects.get(code=code)
except Check.DoesNotExist:
return f"Check not found: {code}"
action = "success"
if check.subject or check.subject_fail:
action = "ign"
# Specify policy, the default policy does not decode encoded headers:
data_str = data.decode(errors="replace")
parsed = email.message_from_string(data_str, policy=email.policy.SMTP)
subject = parsed.get("subject", "")
if check.subject_fail and _match(subject, check.subject_fail):
action = "fail"
elif check.subject and _match(subject, check.subject):
action = "success"
ua = "Email from %s" % mailfrom
check.ping(remote_addr, "email", "", ua, data, action)
return f"Processed ping for {code}"
class Listener(SMTPServer):
def | (self, localaddr, stdout):
self.stdout = stdout
super(Listener, self).__init__(localaddr, None, decode_data=False)
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
# get a new db connection in case the old one has timed out:
connections.close_all()
result = _process_message(peer[0], mailfrom, rcpttos[0], data)
self.stdout.write(result)
class Command(BaseCommand):
help = "Listen for ping emails"
def add_arguments(self, parser):
parser.add_argument(
"--host", help="ip address to listen on, default 0.0.0.0", default="0.0.0.0"
)
parser.add_argument(
"--port", help="port to listen on, default 25", type=int, default=25
)
def handle(self, host, port, *args, **options):
_ = Listener((host, port), self.stdout)
print("Starting SMTP listener on %s:%d ..." % (host, port))
asyncore.loop()
| __init__ | identifier_name |
wizard.js | /**
* Angular Wizard directive
* Copyright (c) 2014 Genadijus Paleckis ([email protected])
* License: MIT
* GIT: https://github.com/sickelap/angular-wizard
*
* Example usage:
*
* In your template add tag:
* <div wizard="wizardConfig"></div>
*
* Then in your controller create wizard configuration:
*
* $scope.wizardConfig = {
* steps: [
* {
* title: 'Step #1',
* titleNext: 'Next: Step #2',
* templateUrl: 'wizard_step1.html',
* callback: function(){ return true; }
* },
* {
* title: 'Step #2',
* titlePrev: 'Back: Step #1'
* titleNext: 'Next: Step #3',
* templateUrl: 'wizard_step1.html',
* callback: function(){ return true; }
* },
* {
* title: 'Step #3',
* titlePrev: 'Back: Step #2',
* titleNext: 'Finish',
* templateUrl: 'wizard_step1.html',
* callback: function(){ return true; }
* }
* ]
* }
*
*/
angular.module('wizard', ['wizardStep'])
.directive('wizard', function () {
var template = '' +
'<div class="wizard">' +
' <ol class="wizard-breadcrumbs">' +
' <li ng-repeat="s in config.steps" class="{{ s.position }}">' +
' <a href="#" ng-click="gotoStep($index)" ng-if="s.position===\'past\'">{{ s.title }}</a>' +
' <span ng-if="s.position!==\'past\'">{{ s.title }}</span>' +
' </li>' +
' </ol>' +
' <ul class="wizard-steps">' +
' <li ng-repeat="s in config.steps" ng-show="isCurrent($index)">' +
' <div data-wizard-step="s"></div>' +
' </li>' +
' </ul>' +
'</div>';
var linkFn = function (scope) {
scope.currentStep = 0;
/**
* set correct position for all the steps in the wizard
*/
angular.forEach(scope.config.steps, function (step, index) {
step.position = (index === 0) ? 'current' : 'future';
});
scope.isCurrent = function (index) {
return (scope.config.steps[index].position === 'current');
};
scope.gotoStep = function (transitionTo) {
if (transitionTo > scope.currentStep) {
var step = scope.config.steps[scope.currentStep];
if (typeof step.callback === 'function') {
if (true !== step.callback()) {
return;
}
}
}
if (transitionTo >= scope.config.steps.length) |
if (transitionTo < 0) {
return; // first step
}
scope.currentStep = transitionTo;
angular.forEach(scope.config.steps, function (step, index) {
if (index < scope.currentStep) {
step.position = 'past';
}
if (index === scope.currentStep) {
step.position = 'current';
}
if (index > scope.currentStep) {
step.position = 'future';
}
});
};
};
var controllerFn = ['$scope', function ($scope) {
return {
goBack: function () {
$scope.gotoStep($scope.currentStep - 1);
},
goNext: function () {
$scope.gotoStep($scope.currentStep + 1);
}
};
}];
return {
replace: true,
scope: {
config: '=wizard'
},
template: template,
link: linkFn,
controller: controllerFn
};
})
;
angular.module('wizardStep', [])
.directive('wizardStep', ['$templateCache', '$compile', function ($templateCache, $compile) {
var template = '' +
'<div class="wizard-step">' +
' <div class="wizard-step-nav">' +
' <a href="#" class="wizard-step-nav-prev" ng-click="goBack()" ng-if="config.titlePrev">{{ config.titlePrev }}</a>' +
' <a href="#" class="wizard-step-nav-next" ng-click="goNext()" ng-if="config.titleNext">{{ config.titleNext }}</a>' +
' </div>' +
'</div>';
var linkFn = function (scope, element, attr, wizard) {
/**
* interpolate template into the step's DOM
*/
element.prepend($templateCache.get(scope.config.templateUrl));
$compile(element.contents())(scope);
/**
* Expose step configuration items directly to the scope.
* This way we can access step's consiguration properties directly,
* for instance {{title}} instead of {{config.title}}.
*/
angular.forEach(scope.config, function (value, key) {
scope[key] = value;
});
scope.goBack = function () {
wizard.goBack();
};
scope.goNext = function () {
// find a form in the scope
var formElm = element.find('form');
var form = scope[formElm.prop('name')];
/**
* By default step will be valid even if form element does not exist
* @type {boolean}
*/
var formValid = true;
if (form) { // Do we have a form with controller?
form.$setDirty();
formValid = form.$valid;
}
if (formValid) {
wizard.goNext();
}
};
};
return {
restrict: 'A',
require: '^wizard',
scope: {
config: '=wizardStep'
},
template: template,
link: linkFn
};
}])
; | {
return; // last step
} | conditional_block |
wizard.js | /**
* Angular Wizard directive
* Copyright (c) 2014 Genadijus Paleckis ([email protected])
* License: MIT
* GIT: https://github.com/sickelap/angular-wizard
*
* Example usage:
*
* In your template add tag:
* <div wizard="wizardConfig"></div>
*
* Then in your controller create wizard configuration:
*
* $scope.wizardConfig = {
* steps: [
* {
* title: 'Step #1',
* titleNext: 'Next: Step #2',
* templateUrl: 'wizard_step1.html',
* callback: function(){ return true; }
* },
* {
* title: 'Step #2',
* titlePrev: 'Back: Step #1'
* titleNext: 'Next: Step #3',
* templateUrl: 'wizard_step1.html',
* callback: function(){ return true; }
* },
* {
* title: 'Step #3',
* titlePrev: 'Back: Step #2',
* titleNext: 'Finish',
* templateUrl: 'wizard_step1.html',
* callback: function(){ return true; }
* }
* ]
* }
*
*/
angular.module('wizard', ['wizardStep'])
.directive('wizard', function () {
var template = '' +
'<div class="wizard">' +
' <ol class="wizard-breadcrumbs">' +
' <li ng-repeat="s in config.steps" class="{{ s.position }}">' +
' <a href="#" ng-click="gotoStep($index)" ng-if="s.position===\'past\'">{{ s.title }}</a>' +
' <span ng-if="s.position!==\'past\'">{{ s.title }}</span>' +
' </li>' + | ' </li>' +
' </ul>' +
'</div>';
var linkFn = function (scope) {
scope.currentStep = 0;
/**
* set correct position for all the steps in the wizard
*/
angular.forEach(scope.config.steps, function (step, index) {
step.position = (index === 0) ? 'current' : 'future';
});
scope.isCurrent = function (index) {
return (scope.config.steps[index].position === 'current');
};
scope.gotoStep = function (transitionTo) {
if (transitionTo > scope.currentStep) {
var step = scope.config.steps[scope.currentStep];
if (typeof step.callback === 'function') {
if (true !== step.callback()) {
return;
}
}
}
if (transitionTo >= scope.config.steps.length) {
return; // last step
}
if (transitionTo < 0) {
return; // first step
}
scope.currentStep = transitionTo;
angular.forEach(scope.config.steps, function (step, index) {
if (index < scope.currentStep) {
step.position = 'past';
}
if (index === scope.currentStep) {
step.position = 'current';
}
if (index > scope.currentStep) {
step.position = 'future';
}
});
};
};
var controllerFn = ['$scope', function ($scope) {
return {
goBack: function () {
$scope.gotoStep($scope.currentStep - 1);
},
goNext: function () {
$scope.gotoStep($scope.currentStep + 1);
}
};
}];
return {
replace: true,
scope: {
config: '=wizard'
},
template: template,
link: linkFn,
controller: controllerFn
};
})
;
angular.module('wizardStep', [])
.directive('wizardStep', ['$templateCache', '$compile', function ($templateCache, $compile) {
var template = '' +
'<div class="wizard-step">' +
' <div class="wizard-step-nav">' +
' <a href="#" class="wizard-step-nav-prev" ng-click="goBack()" ng-if="config.titlePrev">{{ config.titlePrev }}</a>' +
' <a href="#" class="wizard-step-nav-next" ng-click="goNext()" ng-if="config.titleNext">{{ config.titleNext }}</a>' +
' </div>' +
'</div>';
var linkFn = function (scope, element, attr, wizard) {
/**
* interpolate template into the step's DOM
*/
element.prepend($templateCache.get(scope.config.templateUrl));
$compile(element.contents())(scope);
/**
* Expose step configuration items directly to the scope.
* This way we can access step's consiguration properties directly,
* for instance {{title}} instead of {{config.title}}.
*/
angular.forEach(scope.config, function (value, key) {
scope[key] = value;
});
scope.goBack = function () {
wizard.goBack();
};
scope.goNext = function () {
// find a form in the scope
var formElm = element.find('form');
var form = scope[formElm.prop('name')];
/**
* By default step will be valid even if form element does not exist
* @type {boolean}
*/
var formValid = true;
if (form) { // Do we have a form with controller?
form.$setDirty();
formValid = form.$valid;
}
if (formValid) {
wizard.goNext();
}
};
};
return {
restrict: 'A',
require: '^wizard',
scope: {
config: '=wizardStep'
},
template: template,
link: linkFn
};
}])
; | ' </ol>' +
' <ul class="wizard-steps">' +
' <li ng-repeat="s in config.steps" ng-show="isCurrent($index)">' +
' <div data-wizard-step="s"></div>' + | random_line_split |
orphan.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Orphan checker: every impl either implements a trait defined in this
//! crate or pertains to a type defined in this crate.
use rustc::traits;
use rustc::ty::{self, TyCtxt};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir;
pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut orphan = OrphanChecker { tcx };
tcx.hir().krate().visit_all_item_likes(&mut orphan);
}
struct OrphanChecker<'cx, 'tcx: 'cx> {
tcx: TyCtxt<'cx, 'tcx, 'tcx>,
}
impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> {
/// Checks exactly one impl for orphan rules and other such
/// restrictions. In this fn, it can happen that multiple errors
/// apply to a specific impl, so just return after reporting one
/// to prevent inundating the user with a bunch of similar error
/// reports.
fn visit_item(&mut self, item: &hir::Item) {
let def_id = self.tcx.hir().local_def_id(item.id);
// "Trait" impl
if let hir::ItemKind::Impl(.., Some(_), _, _) = item.node {
debug!("coherence2::orphan check: trait impl {}",
self.tcx.hir().node_to_string(item.id));
let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
let trait_def_id = trait_ref.def_id;
let cm = self.tcx.sess.source_map();
let sp = cm.def_span(item.span);
match traits::orphan_check(self.tcx, def_id) {
Ok(()) => {}
Err(traits::OrphanCheckErr::NoLocalInputType) => { | .span_label(sp, "impl doesn't use types inside crate")
.note("the impl does not reference any types defined in this crate")
.note("define and implement a trait or new type instead")
.emit();
return;
}
Err(traits::OrphanCheckErr::UncoveredTy(param_ty)) => {
struct_span_err!(self.tcx.sess,
sp,
E0210,
"type parameter `{}` must be used as the type parameter \
for some local type (e.g., `MyStruct<{}>`)",
param_ty,
param_ty)
.span_label(sp,
format!("type parameter `{}` must be used as the type \
parameter for some local type", param_ty))
.note("only traits defined in the current crate can be implemented \
for a type parameter")
.emit();
return;
}
}
// In addition to the above rules, we restrict impls of auto traits
// so that they can only be implemented on nominal types, such as structs,
// enums or foreign types. To see why this restriction exists, consider the
// following example (#22978). Imagine that crate A defines an auto trait
// `Foo` and a fn that operates on pairs of types:
//
// ```
// // Crate A
// auto trait Foo { }
// fn two_foos<A:Foo,B:Foo>(..) {
// one_foo::<(A,B)>(..)
// }
// fn one_foo<T:Foo>(..) { .. }
// ```
//
// This type-checks fine; in particular the fn
// `two_foos` is able to conclude that `(A,B):Foo`
// because `A:Foo` and `B:Foo`.
//
// Now imagine that crate B comes along and does the following:
//
// ```
// struct A { }
// struct B { }
// impl Foo for A { }
// impl Foo for B { }
// impl !Send for (A, B) { }
// ```
//
// This final impl is legal according to the orpan
// rules, but it invalidates the reasoning from
// `two_foos` above.
debug!("trait_ref={:?} trait_def_id={:?} trait_is_auto={}",
trait_ref,
trait_def_id,
self.tcx.trait_is_auto(trait_def_id));
if self.tcx.trait_is_auto(trait_def_id) &&
!trait_def_id.is_local() {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match self_ty.sty {
ty::Adt(self_def, _) => Some(self_def.did),
ty::Foreign(did) => Some(did),
_ => None,
};
let msg = match opt_self_def_id {
// We only want to permit nominal types, but not *all* nominal types.
// They must be local to the current crate, so that people
// can't do `unsafe impl Send for Rc<SomethingLocal>` or
// `impl !Send for Box<SomethingLocalAndSend>`.
Some(self_def_id) => {
if self_def_id.is_local() {
None
} else {
Some((
format!("cross-crate traits with a default impl, like `{}`, \
can only be implemented for a struct/enum type \
defined in the current crate",
self.tcx.item_path_str(trait_def_id)),
"can't implement cross-crate trait for type in another crate"
))
}
}
_ => {
Some((format!("cross-crate traits with a default impl, like `{}`, can \
only be implemented for a struct/enum type, not `{}`",
self.tcx.item_path_str(trait_def_id),
self_ty),
"can't implement cross-crate trait with a default impl for \
non-struct/enum type"))
}
};
if let Some((msg, label)) = msg {
struct_span_err!(self.tcx.sess, sp, E0321, "{}", msg)
.span_label(sp, label)
.emit();
return;
}
}
}
}
fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) {
}
fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) {
}
} | struct_span_err!(self.tcx.sess,
sp,
E0117,
"only traits defined in the current crate can be \
implemented for arbitrary types") | random_line_split |
orphan.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Orphan checker: every impl either implements a trait defined in this
//! crate or pertains to a type defined in this crate.
use rustc::traits;
use rustc::ty::{self, TyCtxt};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir;
pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) |
struct OrphanChecker<'cx, 'tcx: 'cx> {
tcx: TyCtxt<'cx, 'tcx, 'tcx>,
}
impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> {
/// Checks exactly one impl for orphan rules and other such
/// restrictions. In this fn, it can happen that multiple errors
/// apply to a specific impl, so just return after reporting one
/// to prevent inundating the user with a bunch of similar error
/// reports.
fn visit_item(&mut self, item: &hir::Item) {
let def_id = self.tcx.hir().local_def_id(item.id);
// "Trait" impl
if let hir::ItemKind::Impl(.., Some(_), _, _) = item.node {
debug!("coherence2::orphan check: trait impl {}",
self.tcx.hir().node_to_string(item.id));
let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
let trait_def_id = trait_ref.def_id;
let cm = self.tcx.sess.source_map();
let sp = cm.def_span(item.span);
match traits::orphan_check(self.tcx, def_id) {
Ok(()) => {}
Err(traits::OrphanCheckErr::NoLocalInputType) => {
struct_span_err!(self.tcx.sess,
sp,
E0117,
"only traits defined in the current crate can be \
implemented for arbitrary types")
.span_label(sp, "impl doesn't use types inside crate")
.note("the impl does not reference any types defined in this crate")
.note("define and implement a trait or new type instead")
.emit();
return;
}
Err(traits::OrphanCheckErr::UncoveredTy(param_ty)) => {
struct_span_err!(self.tcx.sess,
sp,
E0210,
"type parameter `{}` must be used as the type parameter \
for some local type (e.g., `MyStruct<{}>`)",
param_ty,
param_ty)
.span_label(sp,
format!("type parameter `{}` must be used as the type \
parameter for some local type", param_ty))
.note("only traits defined in the current crate can be implemented \
for a type parameter")
.emit();
return;
}
}
// In addition to the above rules, we restrict impls of auto traits
// so that they can only be implemented on nominal types, such as structs,
// enums or foreign types. To see why this restriction exists, consider the
// following example (#22978). Imagine that crate A defines an auto trait
// `Foo` and a fn that operates on pairs of types:
//
// ```
// // Crate A
// auto trait Foo { }
// fn two_foos<A:Foo,B:Foo>(..) {
// one_foo::<(A,B)>(..)
// }
// fn one_foo<T:Foo>(..) { .. }
// ```
//
// This type-checks fine; in particular the fn
// `two_foos` is able to conclude that `(A,B):Foo`
// because `A:Foo` and `B:Foo`.
//
// Now imagine that crate B comes along and does the following:
//
// ```
// struct A { }
// struct B { }
// impl Foo for A { }
// impl Foo for B { }
// impl !Send for (A, B) { }
// ```
//
// This final impl is legal according to the orpan
// rules, but it invalidates the reasoning from
// `two_foos` above.
debug!("trait_ref={:?} trait_def_id={:?} trait_is_auto={}",
trait_ref,
trait_def_id,
self.tcx.trait_is_auto(trait_def_id));
if self.tcx.trait_is_auto(trait_def_id) &&
!trait_def_id.is_local() {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match self_ty.sty {
ty::Adt(self_def, _) => Some(self_def.did),
ty::Foreign(did) => Some(did),
_ => None,
};
let msg = match opt_self_def_id {
// We only want to permit nominal types, but not *all* nominal types.
// They must be local to the current crate, so that people
// can't do `unsafe impl Send for Rc<SomethingLocal>` or
// `impl !Send for Box<SomethingLocalAndSend>`.
Some(self_def_id) => {
if self_def_id.is_local() {
None
} else {
Some((
format!("cross-crate traits with a default impl, like `{}`, \
can only be implemented for a struct/enum type \
defined in the current crate",
self.tcx.item_path_str(trait_def_id)),
"can't implement cross-crate trait for type in another crate"
))
}
}
_ => {
Some((format!("cross-crate traits with a default impl, like `{}`, can \
only be implemented for a struct/enum type, not `{}`",
self.tcx.item_path_str(trait_def_id),
self_ty),
"can't implement cross-crate trait with a default impl for \
non-struct/enum type"))
}
};
if let Some((msg, label)) = msg {
struct_span_err!(self.tcx.sess, sp, E0321, "{}", msg)
.span_label(sp, label)
.emit();
return;
}
}
}
}
fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) {
}
fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) {
}
}
| {
let mut orphan = OrphanChecker { tcx };
tcx.hir().krate().visit_all_item_likes(&mut orphan);
} | identifier_body |
orphan.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Orphan checker: every impl either implements a trait defined in this
//! crate or pertains to a type defined in this crate.
use rustc::traits;
use rustc::ty::{self, TyCtxt};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir;
pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut orphan = OrphanChecker { tcx };
tcx.hir().krate().visit_all_item_likes(&mut orphan);
}
struct | <'cx, 'tcx: 'cx> {
tcx: TyCtxt<'cx, 'tcx, 'tcx>,
}
impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> {
/// Checks exactly one impl for orphan rules and other such
/// restrictions. In this fn, it can happen that multiple errors
/// apply to a specific impl, so just return after reporting one
/// to prevent inundating the user with a bunch of similar error
/// reports.
fn visit_item(&mut self, item: &hir::Item) {
let def_id = self.tcx.hir().local_def_id(item.id);
// "Trait" impl
if let hir::ItemKind::Impl(.., Some(_), _, _) = item.node {
debug!("coherence2::orphan check: trait impl {}",
self.tcx.hir().node_to_string(item.id));
let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
let trait_def_id = trait_ref.def_id;
let cm = self.tcx.sess.source_map();
let sp = cm.def_span(item.span);
match traits::orphan_check(self.tcx, def_id) {
Ok(()) => {}
Err(traits::OrphanCheckErr::NoLocalInputType) => {
struct_span_err!(self.tcx.sess,
sp,
E0117,
"only traits defined in the current crate can be \
implemented for arbitrary types")
.span_label(sp, "impl doesn't use types inside crate")
.note("the impl does not reference any types defined in this crate")
.note("define and implement a trait or new type instead")
.emit();
return;
}
Err(traits::OrphanCheckErr::UncoveredTy(param_ty)) => {
struct_span_err!(self.tcx.sess,
sp,
E0210,
"type parameter `{}` must be used as the type parameter \
for some local type (e.g., `MyStruct<{}>`)",
param_ty,
param_ty)
.span_label(sp,
format!("type parameter `{}` must be used as the type \
parameter for some local type", param_ty))
.note("only traits defined in the current crate can be implemented \
for a type parameter")
.emit();
return;
}
}
// In addition to the above rules, we restrict impls of auto traits
// so that they can only be implemented on nominal types, such as structs,
// enums or foreign types. To see why this restriction exists, consider the
// following example (#22978). Imagine that crate A defines an auto trait
// `Foo` and a fn that operates on pairs of types:
//
// ```
// // Crate A
// auto trait Foo { }
// fn two_foos<A:Foo,B:Foo>(..) {
// one_foo::<(A,B)>(..)
// }
// fn one_foo<T:Foo>(..) { .. }
// ```
//
// This type-checks fine; in particular the fn
// `two_foos` is able to conclude that `(A,B):Foo`
// because `A:Foo` and `B:Foo`.
//
// Now imagine that crate B comes along and does the following:
//
// ```
// struct A { }
// struct B { }
// impl Foo for A { }
// impl Foo for B { }
// impl !Send for (A, B) { }
// ```
//
// This final impl is legal according to the orpan
// rules, but it invalidates the reasoning from
// `two_foos` above.
debug!("trait_ref={:?} trait_def_id={:?} trait_is_auto={}",
trait_ref,
trait_def_id,
self.tcx.trait_is_auto(trait_def_id));
if self.tcx.trait_is_auto(trait_def_id) &&
!trait_def_id.is_local() {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match self_ty.sty {
ty::Adt(self_def, _) => Some(self_def.did),
ty::Foreign(did) => Some(did),
_ => None,
};
let msg = match opt_self_def_id {
// We only want to permit nominal types, but not *all* nominal types.
// They must be local to the current crate, so that people
// can't do `unsafe impl Send for Rc<SomethingLocal>` or
// `impl !Send for Box<SomethingLocalAndSend>`.
Some(self_def_id) => {
if self_def_id.is_local() {
None
} else {
Some((
format!("cross-crate traits with a default impl, like `{}`, \
can only be implemented for a struct/enum type \
defined in the current crate",
self.tcx.item_path_str(trait_def_id)),
"can't implement cross-crate trait for type in another crate"
))
}
}
_ => {
Some((format!("cross-crate traits with a default impl, like `{}`, can \
only be implemented for a struct/enum type, not `{}`",
self.tcx.item_path_str(trait_def_id),
self_ty),
"can't implement cross-crate trait with a default impl for \
non-struct/enum type"))
}
};
if let Some((msg, label)) = msg {
struct_span_err!(self.tcx.sess, sp, E0321, "{}", msg)
.span_label(sp, label)
.emit();
return;
}
}
}
}
fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) {
}
fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) {
}
}
| OrphanChecker | identifier_name |
sre_constants.py | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20140917
from _sre import MAXREPEAT, MAXGROUPS
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
"""Exception raised for invalid regular expressions.
Attributes:
msg: The unformatted error message
pattern: The regular expression pattern
pos: The index in the pattern where compilation failed (may be None)
lineno: The line corresponding to pos (may be None)
colno: The column corresponding to pos (may be None)
"""
def | (self, msg, pattern=None, pos=None):
self.msg = msg
self.pattern = pattern
self.pos = pos
if pattern is not None and pos is not None:
msg = '%s at position %d' % (msg, pos)
if isinstance(pattern, str):
newline = '\n'
else:
newline = b'\n'
self.lineno = pattern.count(newline, 0, pos) + 1
self.colno = pos - pattern.rfind(newline, 0, pos)
if newline in pattern:
msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno)
else:
self.lineno = self.colno = None
super().__init__(msg)
class _NamedIntConstant(int):
def __new__(cls, value, name):
self = super(_NamedIntConstant, cls).__new__(cls, value)
self.name = name
return self
def __str__(self):
return self.name
__repr__ = __str__
MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT')
def _makecodes(names):
names = names.strip().split()
items = [_NamedIntConstant(i, name) for i, name in enumerate(names)]
globals().update({item.name: item for item in items})
return items
# operators
# failure=0 success=1 (just because it looks better that way :-)
OPCODES = _makecodes("""
FAILURE SUCCESS
ANY ANY_ALL
ASSERT ASSERT_NOT
AT
BRANCH
CALL
CATEGORY
CHARSET BIGCHARSET
GROUPREF GROUPREF_EXISTS GROUPREF_IGNORE
IN IN_IGNORE
INFO
JUMP
LITERAL LITERAL_IGNORE
MARK
MAX_UNTIL
MIN_UNTIL
NOT_LITERAL NOT_LITERAL_IGNORE
NEGATE
RANGE
REPEAT
REPEAT_ONE
SUBPATTERN
MIN_REPEAT_ONE
RANGE_IGNORE
MIN_REPEAT MAX_REPEAT
""")
del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT
# positions
ATCODES = _makecodes("""
AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING
AT_BOUNDARY AT_NON_BOUNDARY
AT_END AT_END_LINE AT_END_STRING
AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY
AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY
""")
# categories
CHCODES = _makecodes("""
CATEGORY_DIGIT CATEGORY_NOT_DIGIT
CATEGORY_SPACE CATEGORY_NOT_SPACE
CATEGORY_WORD CATEGORY_NOT_WORD
CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK
CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD
CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT
CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE
CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD
CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK
""")
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE,
RANGE: RANGE_IGNORE,
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d)
for item in items:
f.write("#define %s_%s %d\n" % (prefix, item, item))
with open("sre_constants.h", "w") as f:
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
print("done")
| __init__ | identifier_name |
sre_constants.py | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20140917
from _sre import MAXREPEAT, MAXGROUPS
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
"""Exception raised for invalid regular expressions.
Attributes:
msg: The unformatted error message
pattern: The regular expression pattern
pos: The index in the pattern where compilation failed (may be None)
lineno: The line corresponding to pos (may be None)
colno: The column corresponding to pos (may be None)
"""
def __init__(self, msg, pattern=None, pos=None):
self.msg = msg
self.pattern = pattern
self.pos = pos
if pattern is not None and pos is not None:
msg = '%s at position %d' % (msg, pos)
if isinstance(pattern, str):
newline = '\n'
else:
newline = b'\n'
self.lineno = pattern.count(newline, 0, pos) + 1
self.colno = pos - pattern.rfind(newline, 0, pos)
if newline in pattern:
msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno)
else:
self.lineno = self.colno = None
super().__init__(msg)
class _NamedIntConstant(int):
def __new__(cls, value, name):
self = super(_NamedIntConstant, cls).__new__(cls, value)
self.name = name
return self
def __str__(self):
return self.name
__repr__ = __str__
MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT')
def _makecodes(names):
names = names.strip().split()
items = [_NamedIntConstant(i, name) for i, name in enumerate(names)]
globals().update({item.name: item for item in items})
return items
# operators
# failure=0 success=1 (just because it looks better that way :-)
OPCODES = _makecodes("""
FAILURE SUCCESS
ANY ANY_ALL
ASSERT ASSERT_NOT
AT
BRANCH
CALL
CATEGORY
CHARSET BIGCHARSET
GROUPREF GROUPREF_EXISTS GROUPREF_IGNORE
IN IN_IGNORE
INFO
JUMP
LITERAL LITERAL_IGNORE
MARK
MAX_UNTIL
MIN_UNTIL
NOT_LITERAL NOT_LITERAL_IGNORE
NEGATE
RANGE
REPEAT
REPEAT_ONE
SUBPATTERN
| MIN_REPEAT MAX_REPEAT
""")
del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT
# positions
ATCODES = _makecodes("""
AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING
AT_BOUNDARY AT_NON_BOUNDARY
AT_END AT_END_LINE AT_END_STRING
AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY
AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY
""")
# categories
CHCODES = _makecodes("""
CATEGORY_DIGIT CATEGORY_NOT_DIGIT
CATEGORY_SPACE CATEGORY_NOT_SPACE
CATEGORY_WORD CATEGORY_NOT_WORD
CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK
CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD
CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT
CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE
CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD
CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK
""")
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE,
RANGE: RANGE_IGNORE,
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d)
for item in items:
f.write("#define %s_%s %d\n" % (prefix, item, item))
with open("sre_constants.h", "w") as f:
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
print("done") | MIN_REPEAT_ONE
RANGE_IGNORE
| random_line_split |
sre_constants.py | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20140917
from _sre import MAXREPEAT, MAXGROUPS
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
"""Exception raised for invalid regular expressions.
Attributes:
msg: The unformatted error message
pattern: The regular expression pattern
pos: The index in the pattern where compilation failed (may be None)
lineno: The line corresponding to pos (may be None)
colno: The column corresponding to pos (may be None)
"""
def __init__(self, msg, pattern=None, pos=None):
self.msg = msg
self.pattern = pattern
self.pos = pos
if pattern is not None and pos is not None:
msg = '%s at position %d' % (msg, pos)
if isinstance(pattern, str):
newline = '\n'
else:
|
self.lineno = pattern.count(newline, 0, pos) + 1
self.colno = pos - pattern.rfind(newline, 0, pos)
if newline in pattern:
msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno)
else:
self.lineno = self.colno = None
super().__init__(msg)
class _NamedIntConstant(int):
def __new__(cls, value, name):
self = super(_NamedIntConstant, cls).__new__(cls, value)
self.name = name
return self
def __str__(self):
return self.name
__repr__ = __str__
MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT')
def _makecodes(names):
names = names.strip().split()
items = [_NamedIntConstant(i, name) for i, name in enumerate(names)]
globals().update({item.name: item for item in items})
return items
# operators
# failure=0 success=1 (just because it looks better that way :-)
OPCODES = _makecodes("""
FAILURE SUCCESS
ANY ANY_ALL
ASSERT ASSERT_NOT
AT
BRANCH
CALL
CATEGORY
CHARSET BIGCHARSET
GROUPREF GROUPREF_EXISTS GROUPREF_IGNORE
IN IN_IGNORE
INFO
JUMP
LITERAL LITERAL_IGNORE
MARK
MAX_UNTIL
MIN_UNTIL
NOT_LITERAL NOT_LITERAL_IGNORE
NEGATE
RANGE
REPEAT
REPEAT_ONE
SUBPATTERN
MIN_REPEAT_ONE
RANGE_IGNORE
MIN_REPEAT MAX_REPEAT
""")
del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT
# positions
ATCODES = _makecodes("""
AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING
AT_BOUNDARY AT_NON_BOUNDARY
AT_END AT_END_LINE AT_END_STRING
AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY
AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY
""")
# categories
CHCODES = _makecodes("""
CATEGORY_DIGIT CATEGORY_NOT_DIGIT
CATEGORY_SPACE CATEGORY_NOT_SPACE
CATEGORY_WORD CATEGORY_NOT_WORD
CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK
CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD
CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT
CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE
CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD
CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK
""")
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE,
RANGE: RANGE_IGNORE,
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d)
for item in items:
f.write("#define %s_%s %d\n" % (prefix, item, item))
with open("sre_constants.h", "w") as f:
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
print("done")
| newline = b'\n' | conditional_block |
sre_constants.py | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20140917
from _sre import MAXREPEAT, MAXGROUPS
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
"""Exception raised for invalid regular expressions.
Attributes:
msg: The unformatted error message
pattern: The regular expression pattern
pos: The index in the pattern where compilation failed (may be None)
lineno: The line corresponding to pos (may be None)
colno: The column corresponding to pos (may be None)
"""
def __init__(self, msg, pattern=None, pos=None):
self.msg = msg
self.pattern = pattern
self.pos = pos
if pattern is not None and pos is not None:
msg = '%s at position %d' % (msg, pos)
if isinstance(pattern, str):
newline = '\n'
else:
newline = b'\n'
self.lineno = pattern.count(newline, 0, pos) + 1
self.colno = pos - pattern.rfind(newline, 0, pos)
if newline in pattern:
msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno)
else:
self.lineno = self.colno = None
super().__init__(msg)
class _NamedIntConstant(int):
def __new__(cls, value, name):
|
def __str__(self):
return self.name
__repr__ = __str__
MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT')
def _makecodes(names):
names = names.strip().split()
items = [_NamedIntConstant(i, name) for i, name in enumerate(names)]
globals().update({item.name: item for item in items})
return items
# operators
# failure=0 success=1 (just because it looks better that way :-)
OPCODES = _makecodes("""
FAILURE SUCCESS
ANY ANY_ALL
ASSERT ASSERT_NOT
AT
BRANCH
CALL
CATEGORY
CHARSET BIGCHARSET
GROUPREF GROUPREF_EXISTS GROUPREF_IGNORE
IN IN_IGNORE
INFO
JUMP
LITERAL LITERAL_IGNORE
MARK
MAX_UNTIL
MIN_UNTIL
NOT_LITERAL NOT_LITERAL_IGNORE
NEGATE
RANGE
REPEAT
REPEAT_ONE
SUBPATTERN
MIN_REPEAT_ONE
RANGE_IGNORE
MIN_REPEAT MAX_REPEAT
""")
del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT
# positions
ATCODES = _makecodes("""
AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING
AT_BOUNDARY AT_NON_BOUNDARY
AT_END AT_END_LINE AT_END_STRING
AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY
AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY
""")
# categories
CHCODES = _makecodes("""
CATEGORY_DIGIT CATEGORY_NOT_DIGIT
CATEGORY_SPACE CATEGORY_NOT_SPACE
CATEGORY_WORD CATEGORY_NOT_WORD
CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK
CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD
CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT
CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE
CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD
CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK
""")
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE,
RANGE: RANGE_IGNORE,
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d)
for item in items:
f.write("#define %s_%s %d\n" % (prefix, item, item))
with open("sre_constants.h", "w") as f:
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
print("done")
| self = super(_NamedIntConstant, cls).__new__(cls, value)
self.name = name
return self | identifier_body |
quality_stats.py | #!/usr/bin/python -tt
# Quality scores from fastx
# Website: http://hannonlab.cshl.edu/fastx_toolkit/
# Import OS features to run external programs
import os
import glob
v = "Version 0.1"
# Versions:
# 0.1 - Simple script to run cutadapt on all of the files
fastq_indir = "/home/chris/transcriptome/fastq/trimmed/" | os.system("fastx_quality_stats -i %s/Sample_1_L001_trimmed.fastq %s/Sample_1_L001_trimmed.txt" % (fastq_indir, fastq_outdir))
os.system("fastx_quality_stats -i %s/Sample_1_L002_trimmed.fastq %s/Sample_1_L002_trimmed.txt" % (fastq_indir, fastq_outdir)) | fastq_outdir = "/home/chris/transcriptome/fastq/reports/quality stats"
# Sample 1
print "Analyzing Sample 1..." | random_line_split |
kpabenc_adapt_hybrid.py | from charm.toolbox.pairinggroup import PairingGroup,GT,extract_key
from charm.toolbox.symcrypto import AuthenticatedCryptoAbstraction
from charm.toolbox.ABEnc import ABEnc
from charm.schemes.abenc.abenc_lsw08 import KPabe
debug = False
class HybridABEnc(ABEnc):
"""
>>> from charm.schemes.abenc.abenc_lsw08 import KPabe
>>> group = PairingGroup('SS512')
>>> kpabe = KPabe(group)
>>> hyb_abe = HybridABEnc(kpabe, group)
>>> access_policy = ['ONE', 'TWO', 'THREE']
>>> access_key = '((FOUR or THREE) and (TWO or ONE))'
>>> msg = b"hello world this is an important message."
>>> (master_public_key, master_key) = hyb_abe.setup()
>>> secret_key = hyb_abe.keygen(master_public_key, master_key, access_key)
>>> cipher_text = hyb_abe.encrypt(master_public_key, msg, access_policy)
>>> hyb_abe.decrypt(cipher_text, secret_key)
b'hello world this is an important message.'
"""
def __init__(self, scheme, groupObj):
ABEnc.__init__(self)
global abenc
# check properties (TODO)
abenc = scheme
self.group = groupObj
def setup(self):
return abenc.setup()
def keygen(self, pk, mk, object):
return abenc.keygen(pk, mk, object)
def encrypt(self, pk, M, object):
key = self.group.random(GT)
c1 = abenc.encrypt(pk, key, object)
# instantiate a symmetric enc scheme from this key
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
c2 = cipher.encrypt(M)
return { 'c1':c1, 'c2':c2 }
def decrypt(self, ct, sk):
c1, c2 = ct['c1'], ct['c2']
key = abenc.decrypt(c1, sk)
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
return cipher.decrypt(c2)
def main():
groupObj = PairingGroup('SS512')
kpabe = KPabe(groupObj)
hyb_abe = HybridABEnc(kpabe, groupObj)
access_key = '((ONE or TWO) and THREE)'
access_policy = ['ONE', 'TWO', 'THREE']
message = b"hello world this is an important message."
(pk, mk) = hyb_abe.setup() | if debug: print("pk => ", pk)
if debug: print("mk => ", mk)
sk = hyb_abe.keygen(pk, mk, access_key)
if debug: print("sk => ", sk)
ct = hyb_abe.encrypt(pk, message, access_policy)
mdec = hyb_abe.decrypt(ct, sk)
assert mdec == message, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
if __name__ == "__main__":
debug = True
main() | random_line_split |
|
kpabenc_adapt_hybrid.py |
from charm.toolbox.pairinggroup import PairingGroup,GT,extract_key
from charm.toolbox.symcrypto import AuthenticatedCryptoAbstraction
from charm.toolbox.ABEnc import ABEnc
from charm.schemes.abenc.abenc_lsw08 import KPabe
debug = False
class HybridABEnc(ABEnc):
"""
>>> from charm.schemes.abenc.abenc_lsw08 import KPabe
>>> group = PairingGroup('SS512')
>>> kpabe = KPabe(group)
>>> hyb_abe = HybridABEnc(kpabe, group)
>>> access_policy = ['ONE', 'TWO', 'THREE']
>>> access_key = '((FOUR or THREE) and (TWO or ONE))'
>>> msg = b"hello world this is an important message."
>>> (master_public_key, master_key) = hyb_abe.setup()
>>> secret_key = hyb_abe.keygen(master_public_key, master_key, access_key)
>>> cipher_text = hyb_abe.encrypt(master_public_key, msg, access_policy)
>>> hyb_abe.decrypt(cipher_text, secret_key)
b'hello world this is an important message.'
"""
def __init__(self, scheme, groupObj):
ABEnc.__init__(self)
global abenc
# check properties (TODO)
abenc = scheme
self.group = groupObj
def setup(self):
return abenc.setup()
def keygen(self, pk, mk, object):
return abenc.keygen(pk, mk, object)
def encrypt(self, pk, M, object):
key = self.group.random(GT)
c1 = abenc.encrypt(pk, key, object)
# instantiate a symmetric enc scheme from this key
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
c2 = cipher.encrypt(M)
return { 'c1':c1, 'c2':c2 }
def decrypt(self, ct, sk):
|
def main():
groupObj = PairingGroup('SS512')
kpabe = KPabe(groupObj)
hyb_abe = HybridABEnc(kpabe, groupObj)
access_key = '((ONE or TWO) and THREE)'
access_policy = ['ONE', 'TWO', 'THREE']
message = b"hello world this is an important message."
(pk, mk) = hyb_abe.setup()
if debug: print("pk => ", pk)
if debug: print("mk => ", mk)
sk = hyb_abe.keygen(pk, mk, access_key)
if debug: print("sk => ", sk)
ct = hyb_abe.encrypt(pk, message, access_policy)
mdec = hyb_abe.decrypt(ct, sk)
assert mdec == message, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
if __name__ == "__main__":
debug = True
main()
| c1, c2 = ct['c1'], ct['c2']
key = abenc.decrypt(c1, sk)
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
return cipher.decrypt(c2) | identifier_body |
kpabenc_adapt_hybrid.py |
from charm.toolbox.pairinggroup import PairingGroup,GT,extract_key
from charm.toolbox.symcrypto import AuthenticatedCryptoAbstraction
from charm.toolbox.ABEnc import ABEnc
from charm.schemes.abenc.abenc_lsw08 import KPabe
debug = False
class HybridABEnc(ABEnc):
"""
>>> from charm.schemes.abenc.abenc_lsw08 import KPabe
>>> group = PairingGroup('SS512')
>>> kpabe = KPabe(group)
>>> hyb_abe = HybridABEnc(kpabe, group)
>>> access_policy = ['ONE', 'TWO', 'THREE']
>>> access_key = '((FOUR or THREE) and (TWO or ONE))'
>>> msg = b"hello world this is an important message."
>>> (master_public_key, master_key) = hyb_abe.setup()
>>> secret_key = hyb_abe.keygen(master_public_key, master_key, access_key)
>>> cipher_text = hyb_abe.encrypt(master_public_key, msg, access_policy)
>>> hyb_abe.decrypt(cipher_text, secret_key)
b'hello world this is an important message.'
"""
def __init__(self, scheme, groupObj):
ABEnc.__init__(self)
global abenc
# check properties (TODO)
abenc = scheme
self.group = groupObj
def setup(self):
return abenc.setup()
def keygen(self, pk, mk, object):
return abenc.keygen(pk, mk, object)
def encrypt(self, pk, M, object):
key = self.group.random(GT)
c1 = abenc.encrypt(pk, key, object)
# instantiate a symmetric enc scheme from this key
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
c2 = cipher.encrypt(M)
return { 'c1':c1, 'c2':c2 }
def decrypt(self, ct, sk):
c1, c2 = ct['c1'], ct['c2']
key = abenc.decrypt(c1, sk)
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
return cipher.decrypt(c2)
def main():
groupObj = PairingGroup('SS512')
kpabe = KPabe(groupObj)
hyb_abe = HybridABEnc(kpabe, groupObj)
access_key = '((ONE or TWO) and THREE)'
access_policy = ['ONE', 'TWO', 'THREE']
message = b"hello world this is an important message."
(pk, mk) = hyb_abe.setup()
if debug: print("pk => ", pk)
if debug: print("mk => ", mk)
sk = hyb_abe.keygen(pk, mk, access_key)
if debug: print("sk => ", sk)
ct = hyb_abe.encrypt(pk, message, access_policy)
mdec = hyb_abe.decrypt(ct, sk)
assert mdec == message, "Failed Decryption!!!"
if debug: |
if __name__ == "__main__":
debug = True
main()
| print("Successful Decryption!!!") | conditional_block |
kpabenc_adapt_hybrid.py |
from charm.toolbox.pairinggroup import PairingGroup,GT,extract_key
from charm.toolbox.symcrypto import AuthenticatedCryptoAbstraction
from charm.toolbox.ABEnc import ABEnc
from charm.schemes.abenc.abenc_lsw08 import KPabe
debug = False
class HybridABEnc(ABEnc):
"""
>>> from charm.schemes.abenc.abenc_lsw08 import KPabe
>>> group = PairingGroup('SS512')
>>> kpabe = KPabe(group)
>>> hyb_abe = HybridABEnc(kpabe, group)
>>> access_policy = ['ONE', 'TWO', 'THREE']
>>> access_key = '((FOUR or THREE) and (TWO or ONE))'
>>> msg = b"hello world this is an important message."
>>> (master_public_key, master_key) = hyb_abe.setup()
>>> secret_key = hyb_abe.keygen(master_public_key, master_key, access_key)
>>> cipher_text = hyb_abe.encrypt(master_public_key, msg, access_policy)
>>> hyb_abe.decrypt(cipher_text, secret_key)
b'hello world this is an important message.'
"""
def __init__(self, scheme, groupObj):
ABEnc.__init__(self)
global abenc
# check properties (TODO)
abenc = scheme
self.group = groupObj
def setup(self):
return abenc.setup()
def keygen(self, pk, mk, object):
return abenc.keygen(pk, mk, object)
def encrypt(self, pk, M, object):
key = self.group.random(GT)
c1 = abenc.encrypt(pk, key, object)
# instantiate a symmetric enc scheme from this key
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
c2 = cipher.encrypt(M)
return { 'c1':c1, 'c2':c2 }
def decrypt(self, ct, sk):
c1, c2 = ct['c1'], ct['c2']
key = abenc.decrypt(c1, sk)
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
return cipher.decrypt(c2)
def | ():
groupObj = PairingGroup('SS512')
kpabe = KPabe(groupObj)
hyb_abe = HybridABEnc(kpabe, groupObj)
access_key = '((ONE or TWO) and THREE)'
access_policy = ['ONE', 'TWO', 'THREE']
message = b"hello world this is an important message."
(pk, mk) = hyb_abe.setup()
if debug: print("pk => ", pk)
if debug: print("mk => ", mk)
sk = hyb_abe.keygen(pk, mk, access_key)
if debug: print("sk => ", sk)
ct = hyb_abe.encrypt(pk, message, access_policy)
mdec = hyb_abe.decrypt(ct, sk)
assert mdec == message, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
if __name__ == "__main__":
debug = True
main()
| main | identifier_name |
global-plugin.d.ts | // Type definitions for [~НАЗВАНИЕ БИБЛИОТЕКИ~] [~НЕОБЯЗАТЕЛЬНЫЙ НОМЕР ВЕРСИИ~]
// Project: [~НАЗВАНИЕ ПРОЕКТА~]
// Definitions by: [~ВАШЕ ИМЯ~] <[~ВАШ АДРЕС В ИНТЕРНЕТЕ~]>
/*~ Этот шаблон показывает, как создать глобальный плагин */
/*~ Напишите объявление для исходного типа и добавьте новые члены.
*~ Например, здесь к встроенному типу `number` добавляется метод
*~ 'toBinaryString' с двумя перегрузками
*/
interface Number {
toBinaryString(opts?: MyLibrary.BinaryFormatOptions): string;
toBinaryString(callback: MyLibrary.BinaryFormatCallback, opts?: MyLibrary.BinaryFormatOptions): string;
}
/*~ Если нужно объявить несколько типов, поместите их в пространство имен, чтобы
*~ сократить добавления к глобальному пространству имен
*/
declare namespace MyLibrary { | prefix?: string;
padding: number;
}
} | type BinaryFormatCallback = (n: number) => string;
interface BinaryFormatOptions { | random_line_split |
sha1.js | /**
* Copyright 2013 Google, Inc.
* @fileoverview Calculate SHA1 hash of the given content.
*/
goog.provide("adapt.sha1");
goog.require("adapt.base");
/**
* @param {number} n
* @return {string} big-endian byte sequence
*/
adapt.sha1.encode32 = function(n) {
return String.fromCharCode((n >>> 24)&0xFF, (n >>> 16)&0xFF, (n >>> 8)&0xFF, n&0xFF);
};
/**
* @param {string} bytes big-endian byte sequence
* @return {number}
*/
adapt.sha1.decode32 = function(bytes) {
// Important facts: "".charCodeAt(0) == NaN, NaN & 0xFF == 0
var b0 = bytes.charCodeAt(0) & 0xFF;
var b1 = bytes.charCodeAt(1) & 0xFF;
var b2 = bytes.charCodeAt(2) & 0xFF;
var b3 = bytes.charCodeAt(3) & 0xFF;
return (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {Array.<number>} big-endian uint32 numbers representing sha1 hash
*/
adapt.sha1.bytesToSHA1Int32 = function(bytes) {
var sb = new adapt.base.StringBuffer();
sb.append(bytes);
var appendCount = (55 - bytes.length) & 63;
sb.append('\u0080');
while (appendCount > 0) {
appendCount--;
sb.append('\0');
}
sb.append('\0\0\0\0');
sb.append(adapt.sha1.encode32(bytes.length*8));
bytes = sb.toString();
var h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0];
var w = /** @type Array.<number> */ ([]);
var i;
for (var bi = 0; bi < bytes.length; bi += 64) {
for (i = 0; i < 16; i++) {
w[i] = adapt.sha1.decode32(bytes.substr(bi + 4*i, 4));
}
for ( ; i < 80; i++) {
var q = w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16];
w[i] = (q << 1) | (q >>> 31);
}
var a = h[0];
var b = h[1];
var c = h[2];
var d = h[3];
var e = h[4];
var f;
for (i = 0; i < 80; i++) {
if (i < 20) {
f = ((b & c) | (~b & d)) + 0x5A827999;
} else if (i < 40) | else if (i < 60) {
f = ((b & c) | (b & d) | (c & d)) + 0x8F1BBCDC;
} else {
f = (b ^ c ^ d) + 0xCA62C1D6;
}
f += ((a << 5) | (a >>> 27)) + e + w[i];
e = d;
d = c;
c = (b << 30) | (b >>> 2);
b = a;
a = f;
}
h[0] = (h[0] + a) | 0;
h[1] = (h[1] + b) | 0;
h[2] = (h[2] + c) | 0;
h[3] = (h[3] + d) | 0;
h[4] = (h[4] + e) | 0;
}
return h;
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {Array.<number>} uint8 numbers representing sha1 hash
*/
adapt.sha1.bytesToSHA1Int8 = function(bytes) {
var h = adapt.sha1.bytesToSHA1Int32(bytes);
var res = [];
for (var i = 0; i < h.length; i++) {
var n = h[i];
res.push((n >>> 24)&0xFF, (n >>> 16)&0xFF, (n >>> 8)&0xFF, n&0xFF);
}
return res;
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {string} chars with codes 0 - 255 equal to SHA1 hash of the input
*/
adapt.sha1.bytesToSHA1Bytes = function(bytes) {
var h = adapt.sha1.bytesToSHA1Int32(bytes);
var sb = new adapt.base.StringBuffer();
for (var i = 0; i < h.length; i++) {
sb.append(adapt.sha1.encode32(h[i]));
}
return sb.toString();
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {string} hex-encoded SHA1 hash
*/
adapt.sha1.bytesToSHA1Hex = function(bytes) {
var sha1 = adapt.sha1.bytesToSHA1Bytes(bytes);
var sb = new adapt.base.StringBuffer();
for (var i = 0; i < sha1.length; i++) {
sb.append((sha1.charCodeAt(i)|0x100).toString(16).substr(1));
}
return sb.toString();
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {string} base64-encoded SHA1 hash of the input
*/
adapt.sha1.bytesToSHA1Base64 = function(bytes) {
var sha1 = adapt.sha1.bytesToSHA1Bytes(bytes);
var sb = new adapt.base.StringBuffer();
adapt.base.appendBase64(sb, sha1);
return sb.toString();
};
| {
f = (b ^ c ^ d) + 0x6ED9EBA1;
} | conditional_block |
sha1.js | /**
* Copyright 2013 Google, Inc.
* @fileoverview Calculate SHA1 hash of the given content.
*/
goog.provide("adapt.sha1");
goog.require("adapt.base");
/**
* @param {number} n
* @return {string} big-endian byte sequence
*/
adapt.sha1.encode32 = function(n) {
return String.fromCharCode((n >>> 24)&0xFF, (n >>> 16)&0xFF, (n >>> 8)&0xFF, n&0xFF);
};
/**
* @param {string} bytes big-endian byte sequence
* @return {number}
*/
adapt.sha1.decode32 = function(bytes) {
// Important facts: "".charCodeAt(0) == NaN, NaN & 0xFF == 0
var b0 = bytes.charCodeAt(0) & 0xFF;
var b1 = bytes.charCodeAt(1) & 0xFF;
var b2 = bytes.charCodeAt(2) & 0xFF;
var b3 = bytes.charCodeAt(3) & 0xFF;
return (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {Array.<number>} big-endian uint32 numbers representing sha1 hash
*/
adapt.sha1.bytesToSHA1Int32 = function(bytes) {
var sb = new adapt.base.StringBuffer();
sb.append(bytes);
var appendCount = (55 - bytes.length) & 63;
sb.append('\u0080');
while (appendCount > 0) {
appendCount--;
sb.append('\0');
}
sb.append('\0\0\0\0');
sb.append(adapt.sha1.encode32(bytes.length*8));
bytes = sb.toString();
var h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0];
var w = /** @type Array.<number> */ ([]);
var i;
for (var bi = 0; bi < bytes.length; bi += 64) {
for (i = 0; i < 16; i++) {
w[i] = adapt.sha1.decode32(bytes.substr(bi + 4*i, 4));
}
for ( ; i < 80; i++) {
var q = w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16];
w[i] = (q << 1) | (q >>> 31);
}
var a = h[0];
var b = h[1];
var c = h[2];
var d = h[3];
var e = h[4];
var f;
for (i = 0; i < 80; i++) {
if (i < 20) {
f = ((b & c) | (~b & d)) + 0x5A827999;
} else if (i < 40) {
f = (b ^ c ^ d) + 0x6ED9EBA1;
} else if (i < 60) {
f = ((b & c) | (b & d) | (c & d)) + 0x8F1BBCDC;
} else {
f = (b ^ c ^ d) + 0xCA62C1D6;
}
f += ((a << 5) | (a >>> 27)) + e + w[i];
e = d; | c = (b << 30) | (b >>> 2);
b = a;
a = f;
}
h[0] = (h[0] + a) | 0;
h[1] = (h[1] + b) | 0;
h[2] = (h[2] + c) | 0;
h[3] = (h[3] + d) | 0;
h[4] = (h[4] + e) | 0;
}
return h;
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {Array.<number>} uint8 numbers representing sha1 hash
*/
adapt.sha1.bytesToSHA1Int8 = function(bytes) {
var h = adapt.sha1.bytesToSHA1Int32(bytes);
var res = [];
for (var i = 0; i < h.length; i++) {
var n = h[i];
res.push((n >>> 24)&0xFF, (n >>> 16)&0xFF, (n >>> 8)&0xFF, n&0xFF);
}
return res;
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {string} chars with codes 0 - 255 equal to SHA1 hash of the input
*/
adapt.sha1.bytesToSHA1Bytes = function(bytes) {
var h = adapt.sha1.bytesToSHA1Int32(bytes);
var sb = new adapt.base.StringBuffer();
for (var i = 0; i < h.length; i++) {
sb.append(adapt.sha1.encode32(h[i]));
}
return sb.toString();
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {string} hex-encoded SHA1 hash
*/
adapt.sha1.bytesToSHA1Hex = function(bytes) {
var sha1 = adapt.sha1.bytesToSHA1Bytes(bytes);
var sb = new adapt.base.StringBuffer();
for (var i = 0; i < sha1.length; i++) {
sb.append((sha1.charCodeAt(i)|0x100).toString(16).substr(1));
}
return sb.toString();
};
/**
* @param {string} bytes chars with codes 0 - 255 that represent message byte values
* @return {string} base64-encoded SHA1 hash of the input
*/
adapt.sha1.bytesToSHA1Base64 = function(bytes) {
var sha1 = adapt.sha1.bytesToSHA1Bytes(bytes);
var sb = new adapt.base.StringBuffer();
adapt.base.appendBase64(sb, sha1);
return sb.toString();
}; | d = c; | random_line_split |
renderWithLoadProgress.tsx | import { Spinner, SpinnerProps } from "@artsy/palette"
import * as React from "react";
import { QueryRenderer, Container as RelayContainer } from "react-relay"
import styled from "styled-components"
import createLogger from "v2/Utils/logger"
type ReadyState = Parameters<
React.ComponentProps<typeof QueryRenderer>["render"]
>[0]
/**
* WARNING: Do _not_ change this element to something common like a div. If the
* element of this container is the same as the element used in the RelayContainer
* then rehydration can fail and cause the RelayContainer to receive styles
* from the SpinnerContainer and Spinner.
*/
const SpinnerContainer = styled.figure`
align-items: center;
display: flex;
flex-direction: column;
justify-content: center;
position: relative;
`
const RouteSpinnerContainer = styled.figure`
width: 100%;
height: 100px;
position: relative;
`
export const RouteSpinner = () => {
return (
<RouteSpinnerContainer className={LoadingClassName}>
<Spinner />
</RouteSpinnerContainer>
)
}
export const LoadingClassName = "relay-loading"
const handleError = error => {
// In tests we want errors to clearly bubble up.
if (typeof jest !== "undefined") {
throw error
}
const logger = createLogger("Artsy/Relay/renderWithLoadProgress")
if (error.message) {
logger.error(error.message)
}
const networkError = error as any
if (networkError.response && networkError.response._bodyInit) {
const body = networkError.response._bodyInit
try {
const data = JSON.parse(body)
console.error(`Metaphysics Error data:`, data)
logger.error(data)
} catch (e) {
logger.error("Metaphysics Error could not be parsed.", e)
}
}
}
export type LoadProgressRenderer<P> = (
// FIXME: https://github.com/DefinitelyTyped/DefinitelyTyped/issues/37950
readyState: ReadyState
) => React.ReactElement<RelayContainer<P>> | null
export function renderWithLoadProgress<P>(
Container: RelayContainer<P>,
initialProps: object = {},
wrapperProps: object = {},
spinnerProps: SpinnerProps = {
delay: 1000,
}
): LoadProgressRenderer<P> | {
// TODO: We need design for retrying or the approval to use the iOS design.
// See also: https://artsyproduct.atlassian.net/browse/PLATFORM-1272
return ({ error, props, retry }) => {
if (error) {
// TODO: Should we add a callback here so that containers can gracefully
// handle an error state?
handleError(error)
return null
} else if (props) {
return <Container {...initialProps} {...(props as any)} />
} else {
return (
<SpinnerContainer className={LoadingClassName} {...wrapperProps}>
<Spinner {...spinnerProps} />
</SpinnerContainer>
)
}
}
} | identifier_body |
|
renderWithLoadProgress.tsx | import { Spinner, SpinnerProps } from "@artsy/palette"
import * as React from "react";
import { QueryRenderer, Container as RelayContainer } from "react-relay"
import styled from "styled-components"
import createLogger from "v2/Utils/logger"
| * WARNING: Do _not_ change this element to something common like a div. If the
* element of this container is the same as the element used in the RelayContainer
* then rehydration can fail and cause the RelayContainer to receive styles
* from the SpinnerContainer and Spinner.
*/
const SpinnerContainer = styled.figure`
align-items: center;
display: flex;
flex-direction: column;
justify-content: center;
position: relative;
`
const RouteSpinnerContainer = styled.figure`
width: 100%;
height: 100px;
position: relative;
`
export const RouteSpinner = () => {
return (
<RouteSpinnerContainer className={LoadingClassName}>
<Spinner />
</RouteSpinnerContainer>
)
}
export const LoadingClassName = "relay-loading"
const handleError = error => {
// In tests we want errors to clearly bubble up.
if (typeof jest !== "undefined") {
throw error
}
const logger = createLogger("Artsy/Relay/renderWithLoadProgress")
if (error.message) {
logger.error(error.message)
}
const networkError = error as any
if (networkError.response && networkError.response._bodyInit) {
const body = networkError.response._bodyInit
try {
const data = JSON.parse(body)
console.error(`Metaphysics Error data:`, data)
logger.error(data)
} catch (e) {
logger.error("Metaphysics Error could not be parsed.", e)
}
}
}
export type LoadProgressRenderer<P> = (
// FIXME: https://github.com/DefinitelyTyped/DefinitelyTyped/issues/37950
readyState: ReadyState
) => React.ReactElement<RelayContainer<P>> | null
export function renderWithLoadProgress<P>(
Container: RelayContainer<P>,
initialProps: object = {},
wrapperProps: object = {},
spinnerProps: SpinnerProps = {
delay: 1000,
}
): LoadProgressRenderer<P> {
// TODO: We need design for retrying or the approval to use the iOS design.
// See also: https://artsyproduct.atlassian.net/browse/PLATFORM-1272
return ({ error, props, retry }) => {
if (error) {
// TODO: Should we add a callback here so that containers can gracefully
// handle an error state?
handleError(error)
return null
} else if (props) {
return <Container {...initialProps} {...(props as any)} />
} else {
return (
<SpinnerContainer className={LoadingClassName} {...wrapperProps}>
<Spinner {...spinnerProps} />
</SpinnerContainer>
)
}
}
} | type ReadyState = Parameters<
React.ComponentProps<typeof QueryRenderer>["render"]
>[0]
/** | random_line_split |
renderWithLoadProgress.tsx | import { Spinner, SpinnerProps } from "@artsy/palette"
import * as React from "react";
import { QueryRenderer, Container as RelayContainer } from "react-relay"
import styled from "styled-components"
import createLogger from "v2/Utils/logger"
type ReadyState = Parameters<
React.ComponentProps<typeof QueryRenderer>["render"]
>[0]
/**
* WARNING: Do _not_ change this element to something common like a div. If the
* element of this container is the same as the element used in the RelayContainer
* then rehydration can fail and cause the RelayContainer to receive styles
* from the SpinnerContainer and Spinner.
*/
const SpinnerContainer = styled.figure`
align-items: center;
display: flex;
flex-direction: column;
justify-content: center;
position: relative;
`
const RouteSpinnerContainer = styled.figure`
width: 100%;
height: 100px;
position: relative;
`
export const RouteSpinner = () => {
return (
<RouteSpinnerContainer className={LoadingClassName}>
<Spinner />
</RouteSpinnerContainer>
)
}
export const LoadingClassName = "relay-loading"
const handleError = error => {
// In tests we want errors to clearly bubble up.
if (typeof jest !== "undefined") {
throw error
}
const logger = createLogger("Artsy/Relay/renderWithLoadProgress")
if (error.message) {
logger.error(error.message)
}
const networkError = error as any
if (networkError.response && networkError.response._bodyInit) |
}
export type LoadProgressRenderer<P> = (
// FIXME: https://github.com/DefinitelyTyped/DefinitelyTyped/issues/37950
readyState: ReadyState
) => React.ReactElement<RelayContainer<P>> | null
export function renderWithLoadProgress<P>(
Container: RelayContainer<P>,
initialProps: object = {},
wrapperProps: object = {},
spinnerProps: SpinnerProps = {
delay: 1000,
}
): LoadProgressRenderer<P> {
// TODO: We need design for retrying or the approval to use the iOS design.
// See also: https://artsyproduct.atlassian.net/browse/PLATFORM-1272
return ({ error, props, retry }) => {
if (error) {
// TODO: Should we add a callback here so that containers can gracefully
// handle an error state?
handleError(error)
return null
} else if (props) {
return <Container {...initialProps} {...(props as any)} />
} else {
return (
<SpinnerContainer className={LoadingClassName} {...wrapperProps}>
<Spinner {...spinnerProps} />
</SpinnerContainer>
)
}
}
}
| {
const body = networkError.response._bodyInit
try {
const data = JSON.parse(body)
console.error(`Metaphysics Error data:`, data)
logger.error(data)
} catch (e) {
logger.error("Metaphysics Error could not be parsed.", e)
}
} | conditional_block |
renderWithLoadProgress.tsx | import { Spinner, SpinnerProps } from "@artsy/palette"
import * as React from "react";
import { QueryRenderer, Container as RelayContainer } from "react-relay"
import styled from "styled-components"
import createLogger from "v2/Utils/logger"
type ReadyState = Parameters<
React.ComponentProps<typeof QueryRenderer>["render"]
>[0]
/**
* WARNING: Do _not_ change this element to something common like a div. If the
* element of this container is the same as the element used in the RelayContainer
* then rehydration can fail and cause the RelayContainer to receive styles
* from the SpinnerContainer and Spinner.
*/
const SpinnerContainer = styled.figure`
align-items: center;
display: flex;
flex-direction: column;
justify-content: center;
position: relative;
`
const RouteSpinnerContainer = styled.figure`
width: 100%;
height: 100px;
position: relative;
`
export const RouteSpinner = () => {
return (
<RouteSpinnerContainer className={LoadingClassName}>
<Spinner />
</RouteSpinnerContainer>
)
}
export const LoadingClassName = "relay-loading"
const handleError = error => {
// In tests we want errors to clearly bubble up.
if (typeof jest !== "undefined") {
throw error
}
const logger = createLogger("Artsy/Relay/renderWithLoadProgress")
if (error.message) {
logger.error(error.message)
}
const networkError = error as any
if (networkError.response && networkError.response._bodyInit) {
const body = networkError.response._bodyInit
try {
const data = JSON.parse(body)
console.error(`Metaphysics Error data:`, data)
logger.error(data)
} catch (e) {
logger.error("Metaphysics Error could not be parsed.", e)
}
}
}
export type LoadProgressRenderer<P> = (
// FIXME: https://github.com/DefinitelyTyped/DefinitelyTyped/issues/37950
readyState: ReadyState
) => React.ReactElement<RelayContainer<P>> | null
export function | <P>(
Container: RelayContainer<P>,
initialProps: object = {},
wrapperProps: object = {},
spinnerProps: SpinnerProps = {
delay: 1000,
}
): LoadProgressRenderer<P> {
// TODO: We need design for retrying or the approval to use the iOS design.
// See also: https://artsyproduct.atlassian.net/browse/PLATFORM-1272
return ({ error, props, retry }) => {
if (error) {
// TODO: Should we add a callback here so that containers can gracefully
// handle an error state?
handleError(error)
return null
} else if (props) {
return <Container {...initialProps} {...(props as any)} />
} else {
return (
<SpinnerContainer className={LoadingClassName} {...wrapperProps}>
<Spinner {...spinnerProps} />
</SpinnerContainer>
)
}
}
}
| renderWithLoadProgress | identifier_name |
authenticator.js | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* @fileoverview An UI component to authenciate to Chrome. The component hosts
* IdP web pages in a webview. A client who is interested in monitoring
* authentication events should pass a listener object of type
* cr.login.GaiaAuthHost.Listener as defined in this file. After initialization,
* call {@code load} to start the authentication flow.
*/
cr.define('cr.login', function() {
'use strict';
// TODO(rogerta): should use gaia URL from GaiaUrls::gaia_url() instead
// of hardcoding the prod URL here. As is, this does not work with staging
// environments.
var IDP_ORIGIN = 'https://accounts.google.com/';
var IDP_PATH = 'ServiceLogin?skipvpage=true&sarp=1&rm=hide';
var CONTINUE_URL =
'chrome-extension://mfffpogegjflfpflabcdkioaeobkgjik/success.html';
var SIGN_IN_HEADER = 'google-accounts-signin';
var EMBEDDED_FORM_HEADER = 'google-accounts-embedded';
var SAML_HEADER = 'google-accounts-saml';
var LOCATION_HEADER = 'location';
/**
* The source URL parameter for the constrained signin flow.
*/
var CONSTRAINED_FLOW_SOURCE = 'chrome';
/**
* Enum for the authorization mode, must match AuthMode defined in
* chrome/browser/ui/webui/inline_login_ui.cc.
* @enum {number}
*/
var AuthMode = {
DEFAULT: 0,
OFFLINE: 1,
DESKTOP: 2
};
/**
* Enum for the authorization type.
* @enum {number}
*/
var AuthFlow = {
DEFAULT: 0,
SAML: 1
};
/**
* Initializes the authenticator component.
* @param {webview|string} webview The webview element or its ID to host IdP
* web pages.
* @constructor
*/
function | (webview) {
this.webview_ = typeof webview == 'string' ? $(webview) : webview;
assert(this.webview_);
this.email_ = null;
this.password_ = null;
this.gaiaId_ = null,
this.sessionIndex_ = null;
this.chooseWhatToSync_ = false;
this.skipForNow_ = false;
this.authFlow_ = AuthFlow.DEFAULT;
this.loaded_ = false;
this.idpOrigin_ = null;
this.continueUrl_ = null;
this.continueUrlWithoutParams_ = null;
this.initialFrameUrl_ = null;
this.reloadUrl_ = null;
this.trusted_ = true;
}
// TODO(guohui,xiyuan): no need to inherit EventTarget once we deprecate the
// old event-based signin flow.
Authenticator.prototype = Object.create(cr.EventTarget.prototype);
/**
* Loads the authenticator component with the given parameters.
* @param {AuthMode} authMode Authorization mode.
* @param {Object} data Parameters for the authorization flow.
*/
Authenticator.prototype.load = function(authMode, data) {
this.idpOrigin_ = data.gaiaUrl || IDP_ORIGIN;
this.continueUrl_ = data.continueUrl || CONTINUE_URL;
this.continueUrlWithoutParams_ =
this.continueUrl_.substring(0, this.continueUrl_.indexOf('?')) ||
this.continueUrl_;
this.isConstrainedWindow_ = data.constrained == '1';
this.initialFrameUrl_ = this.constructInitialFrameUrl_(data);
this.reloadUrl_ = data.frameUrl || this.initialFrameUrl_;
this.authFlow_ = AuthFlow.DEFAULT;
this.webview_.src = this.reloadUrl_;
this.webview_.addEventListener(
'newwindow', this.onNewWindow_.bind(this));
this.webview_.addEventListener(
'loadstop', this.onLoadStop_.bind(this));
this.webview_.request.onCompleted.addListener(
this.onRequestCompleted_.bind(this),
{urls: ['*://*/*', this.continueUrlWithoutParams_ + '*'],
types: ['main_frame']},
['responseHeaders']);
this.webview_.request.onHeadersReceived.addListener(
this.onHeadersReceived_.bind(this),
{urls: [this.idpOrigin_ + '*'], types: ['main_frame']},
['responseHeaders']);
window.addEventListener(
'message', this.onMessageFromWebview_.bind(this), false);
window.addEventListener(
'focus', this.onFocus_.bind(this), false);
window.addEventListener(
'popstate', this.onPopState_.bind(this), false);
};
/**
* Reloads the authenticator component.
*/
Authenticator.prototype.reload = function() {
this.webview_.src = this.reloadUrl_;
this.authFlow_ = AuthFlow.DEFAULT;
};
Authenticator.prototype.constructInitialFrameUrl_ = function(data) {
var url = this.idpOrigin_ + (data.gaiaPath || IDP_PATH);
url = appendParam(url, 'continue', this.continueUrl_);
url = appendParam(url, 'service', data.service);
if (data.hl)
url = appendParam(url, 'hl', data.hl);
if (data.email)
url = appendParam(url, 'Email', data.email);
if (this.isConstrainedWindow_)
url = appendParam(url, 'source', CONSTRAINED_FLOW_SOURCE);
return url;
};
/**
* Invoked when a main frame request in the webview has completed.
* @private
*/
Authenticator.prototype.onRequestCompleted_ = function(details) {
var currentUrl = details.url;
if (currentUrl.lastIndexOf(this.continueUrlWithoutParams_, 0) == 0) {
if (currentUrl.indexOf('ntp=1') >= 0)
this.skipForNow_ = true;
this.onAuthCompleted_();
return;
}
if (currentUrl.indexOf('https') != 0)
this.trusted_ = false;
if (this.isConstrainedWindow_) {
var isEmbeddedPage = false;
if (this.idpOrigin_ && currentUrl.lastIndexOf(this.idpOrigin_) == 0) {
var headers = details.responseHeaders;
for (var i = 0; headers && i < headers.length; ++i) {
if (headers[i].name.toLowerCase() == EMBEDDED_FORM_HEADER) {
isEmbeddedPage = true;
break;
}
}
}
if (!isEmbeddedPage) {
this.dispatchEvent(new CustomEvent('resize', {detail: currentUrl}));
return;
}
}
this.updateHistoryState_(currentUrl);
// Posts a message to IdP pages to initiate communication.
if (currentUrl.lastIndexOf(this.idpOrigin_) == 0)
this.webview_.contentWindow.postMessage({}, currentUrl);
};
/**
* Manually updates the history. Invoked upon completion of a webview
* navigation.
* @param {string} url Request URL.
* @private
*/
Authenticator.prototype.updateHistoryState_ = function(url) {
if (history.state && history.state.url != url)
history.pushState({url: url}, '');
else
history.replaceState({url: url});
};
/**
* Invoked when the sign-in page takes focus.
* @param {object} e The focus event being triggered.
* @private
*/
Authenticator.prototype.onFocus_ = function(e) {
this.webview_.focus();
};
/**
* Invoked when the history state is changed.
* @param {object} e The popstate event being triggered.
* @private
*/
Authenticator.prototype.onPopState_ = function(e) {
var state = e.state;
if (state && state.url)
this.webview_.src = state.url;
};
/**
* Invoked when headers are received in the main frame of the webview. It
* 1) reads the authenticated user info from a signin header,
* 2) signals the start of a saml flow upon receiving a saml header.
* @return {!Object} Modified request headers.
* @private
*/
Authenticator.prototype.onHeadersReceived_ = function(details) {
var headers = details.responseHeaders;
for (var i = 0; headers && i < headers.length; ++i) {
var header = headers[i];
var headerName = header.name.toLowerCase();
if (headerName == SIGN_IN_HEADER) {
var headerValues = header.value.toLowerCase().split(',');
var signinDetails = {};
headerValues.forEach(function(e) {
var pair = e.split('=');
signinDetails[pair[0].trim()] = pair[1].trim();
});
// Removes "" around.
var email = signinDetails['email'].slice(1, -1);
if (this.email_ != email) {
this.email_ = email;
// Clears the scraped password if the email has changed.
this.password_ = null;
}
this.gaiaId_ = signinDetails['obfuscatedid'].slice(1, -1);
this.sessionIndex_ = signinDetails['sessionindex'];
} else if (headerName == SAML_HEADER) {
this.authFlow_ = AuthFlow.SAML;
} else if (headerName == LOCATION_HEADER) {
// If the "choose what to sync" checkbox was clicked, then the continue
// URL will contain a source=3 field.
var location = decodeURIComponent(header.value);
this.chooseWhatToSync_ = !!location.match(/(\?|&)source=3($|&)/);
}
}
};
/**
* Invoked when an HTML5 message is received from the webview element.
* @param {object} e Payload of the received HTML5 message.
* @private
*/
Authenticator.prototype.onMessageFromWebview_ = function(e) {
// The event origin does not have a trailing slash.
if (e.origin != this.idpOrigin_.substring(0, this.idpOrigin_ - 1)) {
return;
}
var msg = e.data;
if (msg.method == 'attemptLogin') {
this.email_ = msg.email;
this.password_ = msg.password;
this.chooseWhatToSync_ = msg.chooseWhatToSync;
}
};
/**
* Invoked to process authentication completion.
* @private
*/
Authenticator.prototype.onAuthCompleted_ = function() {
if (!this.email_ && !this.skipForNow_) {
this.webview_.src = this.initialFrameUrl_;
return;
}
this.dispatchEvent(
new CustomEvent('authCompleted',
{detail: {email: this.email_,
gaiaId: this.gaiaId_,
password: this.password_,
usingSAML: this.authFlow_ == AuthFlow.SAML,
chooseWhatToSync: this.chooseWhatToSync_,
skipForNow: this.skipForNow_,
sessionIndex: this.sessionIndex_ || '',
trusted: this.trusted_}}));
};
/**
* Invoked when the webview attempts to open a new window.
* @private
*/
Authenticator.prototype.onNewWindow_ = function(e) {
this.dispatchEvent(new CustomEvent('newWindow', {detail: e}));
};
/**
* Invoked when the webview finishes loading a page.
* @private
*/
Authenticator.prototype.onLoadStop_ = function(e) {
if (!this.loaded_) {
this.loaded_ = true;
this.webview_.focus();
this.dispatchEvent(new Event('ready'));
}
};
Authenticator.AuthFlow = AuthFlow;
Authenticator.AuthMode = AuthMode;
return {
// TODO(guohui, xiyuan): Rename GaiaAuthHost to Authenticator once the old
// iframe-based flow is deprecated.
GaiaAuthHost: Authenticator
};
});
| Authenticator | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.