file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
rpc_mock.py
|
from bitcoingraph.bitcoind import BitcoinProxy, BitcoindException
from pathlib import Path
import json
TEST_DATA_PATH = "tests/data"
class BitcoinProxyMock(BitcoinProxy):
def __init__(self, host=None, port=None):
super().__init__(host, port)
self.heights = {}
self.blocks = {}
self.txs = {}
self.load_testdata()
# Load test data into local dicts
def load_testdata(self):
p = Path(TEST_DATA_PATH)
files = [x for x in p.iterdir()
if x.is_file() and x.name.endswith('json')]
for f in files:
if f.name.startswith("block"):
height = f.name[6:-5]
with f.open() as jf:
raw_block = json.load(jf)
block_hash = raw_block['hash']
self.heights[int(height)] = block_hash
self.blocks[block_hash] = raw_block
elif f.name.startswith("tx"):
tx_hash = f.name[3:-5]
with f.open() as jf:
raw_block = json.load(jf)
self.txs[tx_hash] = raw_block
# Override production proxy methods
def getblock(self, block_hash):
if block_hash not in self.blocks:
raise BitcoindException("Unknown block", block_hash)
else:
return self.blocks[block_hash]
def getblockcount(self):
return max(self.heights.keys())
def getblockhash(self, block_height):
if block_height not in self.heights:
raise BitcoindException("Unknown height", block_height)
else:
return self.heights[block_height]
def getinfo(self):
|
def getrawtransaction(self, tx_id, verbose=1):
if tx_id not in self.txs:
raise BitcoindException("Unknown transaction", tx_id)
else:
return self.txs[tx_id]
def getrawtransactions(self, tx_ids, verbose=1):
results = []
for tx_id in tx_ids:
results.append(self.getrawtransaction(tx_id, verbose))
return results
|
print("No info")
|
identifier_body
|
rpc_mock.py
|
from bitcoingraph.bitcoind import BitcoinProxy, BitcoindException
from pathlib import Path
import json
TEST_DATA_PATH = "tests/data"
class BitcoinProxyMock(BitcoinProxy):
def __init__(self, host=None, port=None):
super().__init__(host, port)
self.heights = {}
self.blocks = {}
self.txs = {}
self.load_testdata()
# Load test data into local dicts
def
|
(self):
p = Path(TEST_DATA_PATH)
files = [x for x in p.iterdir()
if x.is_file() and x.name.endswith('json')]
for f in files:
if f.name.startswith("block"):
height = f.name[6:-5]
with f.open() as jf:
raw_block = json.load(jf)
block_hash = raw_block['hash']
self.heights[int(height)] = block_hash
self.blocks[block_hash] = raw_block
elif f.name.startswith("tx"):
tx_hash = f.name[3:-5]
with f.open() as jf:
raw_block = json.load(jf)
self.txs[tx_hash] = raw_block
# Override production proxy methods
def getblock(self, block_hash):
if block_hash not in self.blocks:
raise BitcoindException("Unknown block", block_hash)
else:
return self.blocks[block_hash]
def getblockcount(self):
return max(self.heights.keys())
def getblockhash(self, block_height):
if block_height not in self.heights:
raise BitcoindException("Unknown height", block_height)
else:
return self.heights[block_height]
def getinfo(self):
print("No info")
def getrawtransaction(self, tx_id, verbose=1):
if tx_id not in self.txs:
raise BitcoindException("Unknown transaction", tx_id)
else:
return self.txs[tx_id]
def getrawtransactions(self, tx_ids, verbose=1):
results = []
for tx_id in tx_ids:
results.append(self.getrawtransaction(tx_id, verbose))
return results
|
load_testdata
|
identifier_name
|
rpc_mock.py
|
from bitcoingraph.bitcoind import BitcoinProxy, BitcoindException
from pathlib import Path
import json
TEST_DATA_PATH = "tests/data"
class BitcoinProxyMock(BitcoinProxy):
def __init__(self, host=None, port=None):
super().__init__(host, port)
self.heights = {}
self.blocks = {}
self.txs = {}
self.load_testdata()
# Load test data into local dicts
def load_testdata(self):
p = Path(TEST_DATA_PATH)
files = [x for x in p.iterdir()
if x.is_file() and x.name.endswith('json')]
|
height = f.name[6:-5]
with f.open() as jf:
raw_block = json.load(jf)
block_hash = raw_block['hash']
self.heights[int(height)] = block_hash
self.blocks[block_hash] = raw_block
elif f.name.startswith("tx"):
tx_hash = f.name[3:-5]
with f.open() as jf:
raw_block = json.load(jf)
self.txs[tx_hash] = raw_block
# Override production proxy methods
def getblock(self, block_hash):
if block_hash not in self.blocks:
raise BitcoindException("Unknown block", block_hash)
else:
return self.blocks[block_hash]
def getblockcount(self):
return max(self.heights.keys())
def getblockhash(self, block_height):
if block_height not in self.heights:
raise BitcoindException("Unknown height", block_height)
else:
return self.heights[block_height]
def getinfo(self):
print("No info")
def getrawtransaction(self, tx_id, verbose=1):
if tx_id not in self.txs:
raise BitcoindException("Unknown transaction", tx_id)
else:
return self.txs[tx_id]
def getrawtransactions(self, tx_ids, verbose=1):
results = []
for tx_id in tx_ids:
results.append(self.getrawtransaction(tx_id, verbose))
return results
|
for f in files:
if f.name.startswith("block"):
|
random_line_split
|
body.rs
|
use std::io::Read;
use std::fs::File;
use std::fmt;
/// Body type for a request.
#[derive(Debug)]
pub struct Body {
reader: Kind,
}
impl Body {
/// Instantiate a `Body` from a reader.
///
/// # Note
///
/// While allowing for many types to be used, these bodies do not have
/// a way to reset to the beginning and be reused. This means that when
/// encountering a 307 or 308 status code, instead of repeating the
/// request at the new location, the `Response` will be returned with
/// the redirect status code set.
///
/// A `Body` constructed from a set of bytes, like `String` or `Vec<u8>`,
/// are stored differently and can be reused.
pub fn new<R: Read + Send + 'static>(reader: R) -> Body {
Body {
reader: Kind::Reader(Box::new(reader), None),
}
}
/// Create a `Body` from a `Reader` where we can predict the size in
/// advance, but where we don't want to load the data in memory. This
/// is useful if we need to ensure `Content-Length` is passed with the
/// request.
pub fn sized<R: Read + Send + 'static>(reader: R, len: u64) -> Body {
Body {
reader: Kind::Reader(Box::new(reader), Some(len)),
}
}
/*
pub fn chunked(reader: ()) -> Body {
unimplemented!()
}
*/
}
// useful for tests, but not publicly exposed
#[cfg(test)]
pub fn read_to_string(mut body: Body) -> ::std::io::Result<String> {
let mut s = String::new();
match body.reader {
Kind::Reader(ref mut reader, _) => {
reader.read_to_string(&mut s)
}
Kind::Bytes(ref mut bytes) => {
(&**bytes).read_to_string(&mut s)
}
}.map(|_| s)
}
enum Kind {
Reader(Box<Read + Send>, Option<u64>),
Bytes(Vec<u8>),
}
impl From<Vec<u8>> for Body {
#[inline]
fn from(v: Vec<u8>) -> Body {
Body {
reader: Kind::Bytes(v),
}
}
}
impl From<String> for Body {
#[inline]
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a [u8]> for Body {
#[inline]
fn from(s: &'a [u8]) -> Body {
s.to_vec().into()
}
}
impl<'a> From<&'a str> for Body {
#[inline]
fn from(s: &'a str) -> Body {
s.as_bytes().into()
}
}
impl From<File> for Body {
#[inline]
fn from(f: File) -> Body {
let len = f.metadata().map(|m| m.len()).ok();
Body {
reader: Kind::Reader(Box::new(f), len),
}
}
}
impl fmt::Debug for Kind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Kind::Reader(_, ref v) => f.debug_tuple("Kind::Reader").field(&"_").field(v).finish(),
&Kind::Bytes(ref v) => f.debug_tuple("Kind::Bytes").field(v).finish(),
}
}
}
// Wraps a `std::io::Write`.
//pub struct Pipe(Kind);
pub fn as_hyper_body<'a>(body: &'a mut Body) -> ::hyper::client::Body<'a>
|
pub fn can_reset(body: &Body) -> bool {
match body.reader {
Kind::Bytes(_) => true,
Kind::Reader(..) => false,
}
}
|
{
match body.reader {
Kind::Bytes(ref bytes) => {
let len = bytes.len();
::hyper::client::Body::BufBody(bytes, len)
}
Kind::Reader(ref mut reader, len_opt) => {
match len_opt {
Some(len) => ::hyper::client::Body::SizedBody(reader, len),
None => ::hyper::client::Body::ChunkedBody(reader),
}
}
}
}
|
identifier_body
|
body.rs
|
use std::io::Read;
use std::fs::File;
use std::fmt;
/// Body type for a request.
#[derive(Debug)]
pub struct Body {
reader: Kind,
}
impl Body {
/// Instantiate a `Body` from a reader.
///
/// # Note
///
/// While allowing for many types to be used, these bodies do not have
/// a way to reset to the beginning and be reused. This means that when
/// encountering a 307 or 308 status code, instead of repeating the
/// request at the new location, the `Response` will be returned with
/// the redirect status code set.
///
/// A `Body` constructed from a set of bytes, like `String` or `Vec<u8>`,
/// are stored differently and can be reused.
pub fn new<R: Read + Send + 'static>(reader: R) -> Body {
Body {
reader: Kind::Reader(Box::new(reader), None),
}
}
/// Create a `Body` from a `Reader` where we can predict the size in
/// advance, but where we don't want to load the data in memory. This
/// is useful if we need to ensure `Content-Length` is passed with the
/// request.
pub fn sized<R: Read + Send + 'static>(reader: R, len: u64) -> Body {
Body {
reader: Kind::Reader(Box::new(reader), Some(len)),
}
}
/*
pub fn chunked(reader: ()) -> Body {
unimplemented!()
}
*/
}
// useful for tests, but not publicly exposed
#[cfg(test)]
pub fn read_to_string(mut body: Body) -> ::std::io::Result<String> {
let mut s = String::new();
match body.reader {
Kind::Reader(ref mut reader, _) =>
|
Kind::Bytes(ref mut bytes) => {
(&**bytes).read_to_string(&mut s)
}
}.map(|_| s)
}
enum Kind {
Reader(Box<Read + Send>, Option<u64>),
Bytes(Vec<u8>),
}
impl From<Vec<u8>> for Body {
#[inline]
fn from(v: Vec<u8>) -> Body {
Body {
reader: Kind::Bytes(v),
}
}
}
impl From<String> for Body {
#[inline]
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a [u8]> for Body {
#[inline]
fn from(s: &'a [u8]) -> Body {
s.to_vec().into()
}
}
impl<'a> From<&'a str> for Body {
#[inline]
fn from(s: &'a str) -> Body {
s.as_bytes().into()
}
}
impl From<File> for Body {
#[inline]
fn from(f: File) -> Body {
let len = f.metadata().map(|m| m.len()).ok();
Body {
reader: Kind::Reader(Box::new(f), len),
}
}
}
impl fmt::Debug for Kind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Kind::Reader(_, ref v) => f.debug_tuple("Kind::Reader").field(&"_").field(v).finish(),
&Kind::Bytes(ref v) => f.debug_tuple("Kind::Bytes").field(v).finish(),
}
}
}
// Wraps a `std::io::Write`.
//pub struct Pipe(Kind);
pub fn as_hyper_body<'a>(body: &'a mut Body) -> ::hyper::client::Body<'a> {
match body.reader {
Kind::Bytes(ref bytes) => {
let len = bytes.len();
::hyper::client::Body::BufBody(bytes, len)
}
Kind::Reader(ref mut reader, len_opt) => {
match len_opt {
Some(len) => ::hyper::client::Body::SizedBody(reader, len),
None => ::hyper::client::Body::ChunkedBody(reader),
}
}
}
}
pub fn can_reset(body: &Body) -> bool {
match body.reader {
Kind::Bytes(_) => true,
Kind::Reader(..) => false,
}
}
|
{
reader.read_to_string(&mut s)
}
|
conditional_block
|
body.rs
|
use std::io::Read;
use std::fs::File;
use std::fmt;
/// Body type for a request.
#[derive(Debug)]
pub struct Body {
reader: Kind,
}
impl Body {
/// Instantiate a `Body` from a reader.
///
/// # Note
///
/// While allowing for many types to be used, these bodies do not have
/// a way to reset to the beginning and be reused. This means that when
/// encountering a 307 or 308 status code, instead of repeating the
/// request at the new location, the `Response` will be returned with
/// the redirect status code set.
///
/// A `Body` constructed from a set of bytes, like `String` or `Vec<u8>`,
/// are stored differently and can be reused.
pub fn new<R: Read + Send + 'static>(reader: R) -> Body {
Body {
reader: Kind::Reader(Box::new(reader), None),
}
}
/// Create a `Body` from a `Reader` where we can predict the size in
/// advance, but where we don't want to load the data in memory. This
/// is useful if we need to ensure `Content-Length` is passed with the
/// request.
pub fn sized<R: Read + Send + 'static>(reader: R, len: u64) -> Body {
Body {
reader: Kind::Reader(Box::new(reader), Some(len)),
}
}
/*
pub fn chunked(reader: ()) -> Body {
unimplemented!()
}
*/
}
// useful for tests, but not publicly exposed
#[cfg(test)]
pub fn read_to_string(mut body: Body) -> ::std::io::Result<String> {
let mut s = String::new();
match body.reader {
Kind::Reader(ref mut reader, _) => {
reader.read_to_string(&mut s)
}
Kind::Bytes(ref mut bytes) => {
(&**bytes).read_to_string(&mut s)
}
}.map(|_| s)
}
enum Kind {
Reader(Box<Read + Send>, Option<u64>),
Bytes(Vec<u8>),
}
impl From<Vec<u8>> for Body {
#[inline]
fn from(v: Vec<u8>) -> Body {
Body {
reader: Kind::Bytes(v),
}
}
}
impl From<String> for Body {
#[inline]
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a [u8]> for Body {
#[inline]
fn from(s: &'a [u8]) -> Body {
s.to_vec().into()
}
}
impl<'a> From<&'a str> for Body {
#[inline]
fn from(s: &'a str) -> Body {
s.as_bytes().into()
}
}
impl From<File> for Body {
#[inline]
fn from(f: File) -> Body {
let len = f.metadata().map(|m| m.len()).ok();
Body {
reader: Kind::Reader(Box::new(f), len),
}
}
}
impl fmt::Debug for Kind {
fn
|
(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Kind::Reader(_, ref v) => f.debug_tuple("Kind::Reader").field(&"_").field(v).finish(),
&Kind::Bytes(ref v) => f.debug_tuple("Kind::Bytes").field(v).finish(),
}
}
}
// Wraps a `std::io::Write`.
//pub struct Pipe(Kind);
pub fn as_hyper_body<'a>(body: &'a mut Body) -> ::hyper::client::Body<'a> {
match body.reader {
Kind::Bytes(ref bytes) => {
let len = bytes.len();
::hyper::client::Body::BufBody(bytes, len)
}
Kind::Reader(ref mut reader, len_opt) => {
match len_opt {
Some(len) => ::hyper::client::Body::SizedBody(reader, len),
None => ::hyper::client::Body::ChunkedBody(reader),
}
}
}
}
pub fn can_reset(body: &Body) -> bool {
match body.reader {
Kind::Bytes(_) => true,
Kind::Reader(..) => false,
}
}
|
fmt
|
identifier_name
|
body.rs
|
use std::io::Read;
use std::fs::File;
use std::fmt;
/// Body type for a request.
#[derive(Debug)]
pub struct Body {
reader: Kind,
}
impl Body {
/// Instantiate a `Body` from a reader.
///
/// # Note
///
/// While allowing for many types to be used, these bodies do not have
/// a way to reset to the beginning and be reused. This means that when
/// encountering a 307 or 308 status code, instead of repeating the
/// request at the new location, the `Response` will be returned with
/// the redirect status code set.
///
/// A `Body` constructed from a set of bytes, like `String` or `Vec<u8>`,
/// are stored differently and can be reused.
pub fn new<R: Read + Send + 'static>(reader: R) -> Body {
Body {
reader: Kind::Reader(Box::new(reader), None),
}
}
/// Create a `Body` from a `Reader` where we can predict the size in
/// advance, but where we don't want to load the data in memory. This
/// is useful if we need to ensure `Content-Length` is passed with the
/// request.
pub fn sized<R: Read + Send + 'static>(reader: R, len: u64) -> Body {
Body {
reader: Kind::Reader(Box::new(reader), Some(len)),
}
}
/*
pub fn chunked(reader: ()) -> Body {
unimplemented!()
}
*/
}
// useful for tests, but not publicly exposed
#[cfg(test)]
pub fn read_to_string(mut body: Body) -> ::std::io::Result<String> {
let mut s = String::new();
match body.reader {
Kind::Reader(ref mut reader, _) => {
reader.read_to_string(&mut s)
}
Kind::Bytes(ref mut bytes) => {
(&**bytes).read_to_string(&mut s)
}
}.map(|_| s)
}
enum Kind {
Reader(Box<Read + Send>, Option<u64>),
Bytes(Vec<u8>),
}
impl From<Vec<u8>> for Body {
#[inline]
fn from(v: Vec<u8>) -> Body {
Body {
reader: Kind::Bytes(v),
}
}
}
impl From<String> for Body {
#[inline]
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a [u8]> for Body {
#[inline]
fn from(s: &'a [u8]) -> Body {
s.to_vec().into()
}
}
impl<'a> From<&'a str> for Body {
#[inline]
fn from(s: &'a str) -> Body {
s.as_bytes().into()
}
}
impl From<File> for Body {
|
}
}
}
impl fmt::Debug for Kind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Kind::Reader(_, ref v) => f.debug_tuple("Kind::Reader").field(&"_").field(v).finish(),
&Kind::Bytes(ref v) => f.debug_tuple("Kind::Bytes").field(v).finish(),
}
}
}
// Wraps a `std::io::Write`.
//pub struct Pipe(Kind);
pub fn as_hyper_body<'a>(body: &'a mut Body) -> ::hyper::client::Body<'a> {
match body.reader {
Kind::Bytes(ref bytes) => {
let len = bytes.len();
::hyper::client::Body::BufBody(bytes, len)
}
Kind::Reader(ref mut reader, len_opt) => {
match len_opt {
Some(len) => ::hyper::client::Body::SizedBody(reader, len),
None => ::hyper::client::Body::ChunkedBody(reader),
}
}
}
}
pub fn can_reset(body: &Body) -> bool {
match body.reader {
Kind::Bytes(_) => true,
Kind::Reader(..) => false,
}
}
|
#[inline]
fn from(f: File) -> Body {
let len = f.metadata().map(|m| m.len()).ok();
Body {
reader: Kind::Reader(Box::new(f), len),
|
random_line_split
|
go_thrift_gen.py
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
from pants.backend.codegen.thrift.lib.thrift import Thrift
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.revision import Revision
from pants.base.workunit import WorkUnitLabel
from pants.option.custom_types import target_option
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method, memoized_property
from pants.util.process_handler import subprocess
from twitter.common.collections import OrderedSet
from pants.contrib.go.targets.go_thrift_library import GoThriftGenLibrary, GoThriftLibrary
class GoThriftGen(SimpleCodegenTask):
sources_globs = ('**/*',)
@classmethod
def register_options(cls, register):
super(GoThriftGen, cls).register_options(register)
register('--strict', default=True, fingerprint=True, type=bool,
help='Run thrift compiler with strict warnings.')
register('--gen-options', advanced=True, fingerprint=True,
help='Use these apache thrift go gen options.')
register('--thrift-import', type=str, advanced=True, fingerprint=True,
help='Use this thrift-import gen option to thrift.')
register('--thrift-import-target', type=target_option, advanced=True,
help='Use this thrift import on symbolic defs.')
register('--multiple-files-per-target-override', advanced=True, fingerprint=True,
help='If set, multiple thrift files will be allowed per target, regardless of '
'thrift version. Otherwise, only versions greater than 0.10.0 will be assumed to '
'support multiple files.')
@classmethod
def subsystem_dependencies(cls):
return super(GoThriftGen, cls).subsystem_dependencies() + (Thrift.scoped(cls),)
@property
def
|
(self):
return self._thrift.select(context=self.context)
@property
def _thrift_version(self):
return self._thrift.version(context=self.context)
@memoized_property
def _thrift(self):
return Thrift.scoped_instance(self)
@memoized_property
def _deps(self):
thrift_import_target = self.get_options().thrift_import_target
if thrift_import_target is None:
raise TaskError('Option thrift_import_target in scope {} must be set.'.format(
self.options_scope))
thrift_imports = self.context.resolve(thrift_import_target)
return thrift_imports
@memoized_property
def _service_deps(self):
service_deps = self.get_options().get('service_deps')
return list(self.resolve_deps(service_deps)) if service_deps else self._deps
SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
NAMESPACE_PARSER = re.compile(r'^\s*namespace go\s+([^\s]+)', re.MULTILINE)
def _declares_service(self, source):
with open(source) as thrift:
return any(line for line in thrift if self.SERVICE_PARSER.search(line))
def _get_go_namespace(self, source):
with open(source) as thrift:
namespace = self.NAMESPACE_PARSER.search(thrift.read())
if not namespace:
raise TaskError('Thrift file {} must contain "namespace go "', source)
return namespace.group(1)
def synthetic_target_extra_dependencies(self, target, target_workdir):
for source in target.sources_relative_to_buildroot():
if self._declares_service(os.path.join(get_buildroot(), source)):
return self._service_deps
return self._deps
def synthetic_target_type(self, target):
return GoThriftGenLibrary
def is_gentarget(self, target):
return isinstance(target, GoThriftLibrary)
@memoized_method
def _validate_supports_more_than_one_source(self):
# Support for doing the right thing with multiple files landed in
# https://issues.apache.org/jira/browse/THRIFT-3776; first available in 0.10.0
if self.get_options().multiple_files_per_target_override:
return
required_version = '0.10.0'
if Revision.semver(self._thrift_version) < Revision.semver(required_version):
raise TaskError('A single .thrift source file is supported per go_thrift_library with thrift '
'version `{}`: upgrade to at least `{}` to support multiple files.'.format(
self._thrift_version, required_version))
@memoized_property
def _thrift_cmd(self):
cmd = [self._thrift_binary]
thrift_import = 'thrift_import={}'.format(self.get_options().thrift_import)
if thrift_import is None:
raise TaskError('Option thrift_import in scope {} must be set.'.format(self.options_scope))
gen_options = self.get_options().gen_options
if gen_options:
gen_options += ',' + thrift_import
else:
gen_options = thrift_import
cmd.extend(('--gen', 'go:{}'.format(gen_options)))
if self.get_options().strict:
cmd.append('-strict')
if self.get_options().level == 'debug':
cmd.append('-verbose')
return cmd
def _generate_thrift(self, target, target_workdir):
target_cmd = self._thrift_cmd[:]
bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
for base in bases:
target_cmd.extend(('-I', base))
target_cmd.extend(('-o', target_workdir))
all_sources = list(target.sources_relative_to_buildroot())
if len(all_sources) != 1:
self._validate_supports_more_than_one_source()
for source in all_sources:
file_cmd = target_cmd + [os.path.join(get_buildroot(), source)]
with self.context.new_workunit(name=source,
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(file_cmd)) as workunit:
result = subprocess.call(file_cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))
gen_dir = os.path.join(target_workdir, 'gen-go')
src_dir = os.path.join(target_workdir, 'src')
safe_mkdir(src_dir)
go_dir = os.path.join(target_workdir, 'src', 'go')
os.rename(gen_dir, go_dir)
@classmethod
def product_types(cls):
return ['go']
def execute_codegen(self, target, target_workdir):
self._generate_thrift(target, target_workdir)
@property
def _copy_target_attributes(self):
"""Override `_copy_target_attributes` to exclude `provides`."""
return [a for a in super(GoThriftGen, self)._copy_target_attributes if a != 'provides']
def synthetic_target_dir(self, target, target_workdir):
all_sources = list(target.sources_relative_to_buildroot())
source = all_sources[0]
namespace = self._get_go_namespace(source)
return os.path.join(target_workdir, 'src', 'go', namespace.replace(".", os.path.sep))
|
_thrift_binary
|
identifier_name
|
go_thrift_gen.py
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
from pants.backend.codegen.thrift.lib.thrift import Thrift
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.revision import Revision
from pants.base.workunit import WorkUnitLabel
from pants.option.custom_types import target_option
|
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method, memoized_property
from pants.util.process_handler import subprocess
from twitter.common.collections import OrderedSet
from pants.contrib.go.targets.go_thrift_library import GoThriftGenLibrary, GoThriftLibrary
class GoThriftGen(SimpleCodegenTask):
sources_globs = ('**/*',)
@classmethod
def register_options(cls, register):
super(GoThriftGen, cls).register_options(register)
register('--strict', default=True, fingerprint=True, type=bool,
help='Run thrift compiler with strict warnings.')
register('--gen-options', advanced=True, fingerprint=True,
help='Use these apache thrift go gen options.')
register('--thrift-import', type=str, advanced=True, fingerprint=True,
help='Use this thrift-import gen option to thrift.')
register('--thrift-import-target', type=target_option, advanced=True,
help='Use this thrift import on symbolic defs.')
register('--multiple-files-per-target-override', advanced=True, fingerprint=True,
help='If set, multiple thrift files will be allowed per target, regardless of '
'thrift version. Otherwise, only versions greater than 0.10.0 will be assumed to '
'support multiple files.')
@classmethod
def subsystem_dependencies(cls):
return super(GoThriftGen, cls).subsystem_dependencies() + (Thrift.scoped(cls),)
@property
def _thrift_binary(self):
return self._thrift.select(context=self.context)
@property
def _thrift_version(self):
return self._thrift.version(context=self.context)
@memoized_property
def _thrift(self):
return Thrift.scoped_instance(self)
@memoized_property
def _deps(self):
thrift_import_target = self.get_options().thrift_import_target
if thrift_import_target is None:
raise TaskError('Option thrift_import_target in scope {} must be set.'.format(
self.options_scope))
thrift_imports = self.context.resolve(thrift_import_target)
return thrift_imports
@memoized_property
def _service_deps(self):
service_deps = self.get_options().get('service_deps')
return list(self.resolve_deps(service_deps)) if service_deps else self._deps
SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
NAMESPACE_PARSER = re.compile(r'^\s*namespace go\s+([^\s]+)', re.MULTILINE)
def _declares_service(self, source):
with open(source) as thrift:
return any(line for line in thrift if self.SERVICE_PARSER.search(line))
def _get_go_namespace(self, source):
with open(source) as thrift:
namespace = self.NAMESPACE_PARSER.search(thrift.read())
if not namespace:
raise TaskError('Thrift file {} must contain "namespace go "', source)
return namespace.group(1)
def synthetic_target_extra_dependencies(self, target, target_workdir):
for source in target.sources_relative_to_buildroot():
if self._declares_service(os.path.join(get_buildroot(), source)):
return self._service_deps
return self._deps
def synthetic_target_type(self, target):
return GoThriftGenLibrary
def is_gentarget(self, target):
return isinstance(target, GoThriftLibrary)
@memoized_method
def _validate_supports_more_than_one_source(self):
# Support for doing the right thing with multiple files landed in
# https://issues.apache.org/jira/browse/THRIFT-3776; first available in 0.10.0
if self.get_options().multiple_files_per_target_override:
return
required_version = '0.10.0'
if Revision.semver(self._thrift_version) < Revision.semver(required_version):
raise TaskError('A single .thrift source file is supported per go_thrift_library with thrift '
'version `{}`: upgrade to at least `{}` to support multiple files.'.format(
self._thrift_version, required_version))
@memoized_property
def _thrift_cmd(self):
cmd = [self._thrift_binary]
thrift_import = 'thrift_import={}'.format(self.get_options().thrift_import)
if thrift_import is None:
raise TaskError('Option thrift_import in scope {} must be set.'.format(self.options_scope))
gen_options = self.get_options().gen_options
if gen_options:
gen_options += ',' + thrift_import
else:
gen_options = thrift_import
cmd.extend(('--gen', 'go:{}'.format(gen_options)))
if self.get_options().strict:
cmd.append('-strict')
if self.get_options().level == 'debug':
cmd.append('-verbose')
return cmd
def _generate_thrift(self, target, target_workdir):
target_cmd = self._thrift_cmd[:]
bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
for base in bases:
target_cmd.extend(('-I', base))
target_cmd.extend(('-o', target_workdir))
all_sources = list(target.sources_relative_to_buildroot())
if len(all_sources) != 1:
self._validate_supports_more_than_one_source()
for source in all_sources:
file_cmd = target_cmd + [os.path.join(get_buildroot(), source)]
with self.context.new_workunit(name=source,
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(file_cmd)) as workunit:
result = subprocess.call(file_cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))
gen_dir = os.path.join(target_workdir, 'gen-go')
src_dir = os.path.join(target_workdir, 'src')
safe_mkdir(src_dir)
go_dir = os.path.join(target_workdir, 'src', 'go')
os.rename(gen_dir, go_dir)
@classmethod
def product_types(cls):
return ['go']
def execute_codegen(self, target, target_workdir):
self._generate_thrift(target, target_workdir)
@property
def _copy_target_attributes(self):
"""Override `_copy_target_attributes` to exclude `provides`."""
return [a for a in super(GoThriftGen, self)._copy_target_attributes if a != 'provides']
def synthetic_target_dir(self, target, target_workdir):
all_sources = list(target.sources_relative_to_buildroot())
source = all_sources[0]
namespace = self._get_go_namespace(source)
return os.path.join(target_workdir, 'src', 'go', namespace.replace(".", os.path.sep))
|
from pants.task.simple_codegen_task import SimpleCodegenTask
|
random_line_split
|
go_thrift_gen.py
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
from pants.backend.codegen.thrift.lib.thrift import Thrift
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.revision import Revision
from pants.base.workunit import WorkUnitLabel
from pants.option.custom_types import target_option
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method, memoized_property
from pants.util.process_handler import subprocess
from twitter.common.collections import OrderedSet
from pants.contrib.go.targets.go_thrift_library import GoThriftGenLibrary, GoThriftLibrary
class GoThriftGen(SimpleCodegenTask):
sources_globs = ('**/*',)
@classmethod
def register_options(cls, register):
super(GoThriftGen, cls).register_options(register)
register('--strict', default=True, fingerprint=True, type=bool,
help='Run thrift compiler with strict warnings.')
register('--gen-options', advanced=True, fingerprint=True,
help='Use these apache thrift go gen options.')
register('--thrift-import', type=str, advanced=True, fingerprint=True,
help='Use this thrift-import gen option to thrift.')
register('--thrift-import-target', type=target_option, advanced=True,
help='Use this thrift import on symbolic defs.')
register('--multiple-files-per-target-override', advanced=True, fingerprint=True,
help='If set, multiple thrift files will be allowed per target, regardless of '
'thrift version. Otherwise, only versions greater than 0.10.0 will be assumed to '
'support multiple files.')
@classmethod
def subsystem_dependencies(cls):
return super(GoThriftGen, cls).subsystem_dependencies() + (Thrift.scoped(cls),)
@property
def _thrift_binary(self):
return self._thrift.select(context=self.context)
@property
def _thrift_version(self):
return self._thrift.version(context=self.context)
@memoized_property
def _thrift(self):
return Thrift.scoped_instance(self)
@memoized_property
def _deps(self):
thrift_import_target = self.get_options().thrift_import_target
if thrift_import_target is None:
raise TaskError('Option thrift_import_target in scope {} must be set.'.format(
self.options_scope))
thrift_imports = self.context.resolve(thrift_import_target)
return thrift_imports
@memoized_property
def _service_deps(self):
|
SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
NAMESPACE_PARSER = re.compile(r'^\s*namespace go\s+([^\s]+)', re.MULTILINE)
def _declares_service(self, source):
with open(source) as thrift:
return any(line for line in thrift if self.SERVICE_PARSER.search(line))
def _get_go_namespace(self, source):
with open(source) as thrift:
namespace = self.NAMESPACE_PARSER.search(thrift.read())
if not namespace:
raise TaskError('Thrift file {} must contain "namespace go "', source)
return namespace.group(1)
def synthetic_target_extra_dependencies(self, target, target_workdir):
for source in target.sources_relative_to_buildroot():
if self._declares_service(os.path.join(get_buildroot(), source)):
return self._service_deps
return self._deps
def synthetic_target_type(self, target):
return GoThriftGenLibrary
def is_gentarget(self, target):
return isinstance(target, GoThriftLibrary)
@memoized_method
def _validate_supports_more_than_one_source(self):
# Support for doing the right thing with multiple files landed in
# https://issues.apache.org/jira/browse/THRIFT-3776; first available in 0.10.0
if self.get_options().multiple_files_per_target_override:
return
required_version = '0.10.0'
if Revision.semver(self._thrift_version) < Revision.semver(required_version):
raise TaskError('A single .thrift source file is supported per go_thrift_library with thrift '
'version `{}`: upgrade to at least `{}` to support multiple files.'.format(
self._thrift_version, required_version))
@memoized_property
def _thrift_cmd(self):
cmd = [self._thrift_binary]
thrift_import = 'thrift_import={}'.format(self.get_options().thrift_import)
if thrift_import is None:
raise TaskError('Option thrift_import in scope {} must be set.'.format(self.options_scope))
gen_options = self.get_options().gen_options
if gen_options:
gen_options += ',' + thrift_import
else:
gen_options = thrift_import
cmd.extend(('--gen', 'go:{}'.format(gen_options)))
if self.get_options().strict:
cmd.append('-strict')
if self.get_options().level == 'debug':
cmd.append('-verbose')
return cmd
def _generate_thrift(self, target, target_workdir):
target_cmd = self._thrift_cmd[:]
bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
for base in bases:
target_cmd.extend(('-I', base))
target_cmd.extend(('-o', target_workdir))
all_sources = list(target.sources_relative_to_buildroot())
if len(all_sources) != 1:
self._validate_supports_more_than_one_source()
for source in all_sources:
file_cmd = target_cmd + [os.path.join(get_buildroot(), source)]
with self.context.new_workunit(name=source,
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(file_cmd)) as workunit:
result = subprocess.call(file_cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))
gen_dir = os.path.join(target_workdir, 'gen-go')
src_dir = os.path.join(target_workdir, 'src')
safe_mkdir(src_dir)
go_dir = os.path.join(target_workdir, 'src', 'go')
os.rename(gen_dir, go_dir)
@classmethod
def product_types(cls):
return ['go']
def execute_codegen(self, target, target_workdir):
self._generate_thrift(target, target_workdir)
@property
def _copy_target_attributes(self):
"""Override `_copy_target_attributes` to exclude `provides`."""
return [a for a in super(GoThriftGen, self)._copy_target_attributes if a != 'provides']
def synthetic_target_dir(self, target, target_workdir):
all_sources = list(target.sources_relative_to_buildroot())
source = all_sources[0]
namespace = self._get_go_namespace(source)
return os.path.join(target_workdir, 'src', 'go', namespace.replace(".", os.path.sep))
|
service_deps = self.get_options().get('service_deps')
return list(self.resolve_deps(service_deps)) if service_deps else self._deps
|
identifier_body
|
go_thrift_gen.py
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
from pants.backend.codegen.thrift.lib.thrift import Thrift
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.revision import Revision
from pants.base.workunit import WorkUnitLabel
from pants.option.custom_types import target_option
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method, memoized_property
from pants.util.process_handler import subprocess
from twitter.common.collections import OrderedSet
from pants.contrib.go.targets.go_thrift_library import GoThriftGenLibrary, GoThriftLibrary
class GoThriftGen(SimpleCodegenTask):
sources_globs = ('**/*',)
@classmethod
def register_options(cls, register):
super(GoThriftGen, cls).register_options(register)
register('--strict', default=True, fingerprint=True, type=bool,
help='Run thrift compiler with strict warnings.')
register('--gen-options', advanced=True, fingerprint=True,
help='Use these apache thrift go gen options.')
register('--thrift-import', type=str, advanced=True, fingerprint=True,
help='Use this thrift-import gen option to thrift.')
register('--thrift-import-target', type=target_option, advanced=True,
help='Use this thrift import on symbolic defs.')
register('--multiple-files-per-target-override', advanced=True, fingerprint=True,
help='If set, multiple thrift files will be allowed per target, regardless of '
'thrift version. Otherwise, only versions greater than 0.10.0 will be assumed to '
'support multiple files.')
@classmethod
def subsystem_dependencies(cls):
return super(GoThriftGen, cls).subsystem_dependencies() + (Thrift.scoped(cls),)
@property
def _thrift_binary(self):
return self._thrift.select(context=self.context)
@property
def _thrift_version(self):
return self._thrift.version(context=self.context)
@memoized_property
def _thrift(self):
return Thrift.scoped_instance(self)
@memoized_property
def _deps(self):
thrift_import_target = self.get_options().thrift_import_target
if thrift_import_target is None:
raise TaskError('Option thrift_import_target in scope {} must be set.'.format(
self.options_scope))
thrift_imports = self.context.resolve(thrift_import_target)
return thrift_imports
@memoized_property
def _service_deps(self):
service_deps = self.get_options().get('service_deps')
return list(self.resolve_deps(service_deps)) if service_deps else self._deps
SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
NAMESPACE_PARSER = re.compile(r'^\s*namespace go\s+([^\s]+)', re.MULTILINE)
def _declares_service(self, source):
with open(source) as thrift:
return any(line for line in thrift if self.SERVICE_PARSER.search(line))
def _get_go_namespace(self, source):
with open(source) as thrift:
namespace = self.NAMESPACE_PARSER.search(thrift.read())
if not namespace:
raise TaskError('Thrift file {} must contain "namespace go "', source)
return namespace.group(1)
def synthetic_target_extra_dependencies(self, target, target_workdir):
for source in target.sources_relative_to_buildroot():
if self._declares_service(os.path.join(get_buildroot(), source)):
return self._service_deps
return self._deps
def synthetic_target_type(self, target):
return GoThriftGenLibrary
def is_gentarget(self, target):
return isinstance(target, GoThriftLibrary)
@memoized_method
def _validate_supports_more_than_one_source(self):
# Support for doing the right thing with multiple files landed in
# https://issues.apache.org/jira/browse/THRIFT-3776; first available in 0.10.0
if self.get_options().multiple_files_per_target_override:
return
required_version = '0.10.0'
if Revision.semver(self._thrift_version) < Revision.semver(required_version):
raise TaskError('A single .thrift source file is supported per go_thrift_library with thrift '
'version `{}`: upgrade to at least `{}` to support multiple files.'.format(
self._thrift_version, required_version))
@memoized_property
def _thrift_cmd(self):
cmd = [self._thrift_binary]
thrift_import = 'thrift_import={}'.format(self.get_options().thrift_import)
if thrift_import is None:
raise TaskError('Option thrift_import in scope {} must be set.'.format(self.options_scope))
gen_options = self.get_options().gen_options
if gen_options:
gen_options += ',' + thrift_import
else:
gen_options = thrift_import
cmd.extend(('--gen', 'go:{}'.format(gen_options)))
if self.get_options().strict:
cmd.append('-strict')
if self.get_options().level == 'debug':
cmd.append('-verbose')
return cmd
def _generate_thrift(self, target, target_workdir):
target_cmd = self._thrift_cmd[:]
bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
for base in bases:
target_cmd.extend(('-I', base))
target_cmd.extend(('-o', target_workdir))
all_sources = list(target.sources_relative_to_buildroot())
if len(all_sources) != 1:
self._validate_supports_more_than_one_source()
for source in all_sources:
|
gen_dir = os.path.join(target_workdir, 'gen-go')
src_dir = os.path.join(target_workdir, 'src')
safe_mkdir(src_dir)
go_dir = os.path.join(target_workdir, 'src', 'go')
os.rename(gen_dir, go_dir)
@classmethod
def product_types(cls):
return ['go']
def execute_codegen(self, target, target_workdir):
self._generate_thrift(target, target_workdir)
@property
def _copy_target_attributes(self):
"""Override `_copy_target_attributes` to exclude `provides`."""
return [a for a in super(GoThriftGen, self)._copy_target_attributes if a != 'provides']
def synthetic_target_dir(self, target, target_workdir):
all_sources = list(target.sources_relative_to_buildroot())
source = all_sources[0]
namespace = self._get_go_namespace(source)
return os.path.join(target_workdir, 'src', 'go', namespace.replace(".", os.path.sep))
|
file_cmd = target_cmd + [os.path.join(get_buildroot(), source)]
with self.context.new_workunit(name=source,
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(file_cmd)) as workunit:
result = subprocess.call(file_cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))
|
conditional_block
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use document_loader::{DocumentLoader, LoadType};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use dom::bindings::codegen::Bindings::HTMLImageElementBinding::HTMLImageElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::Bindings::ServoParserBinding;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, Root, RootedReference};
use dom::bindings::refcounted::Trusted;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::str::DOMString;
use dom::document::{Document, DocumentSource, IsHTMLDocument};
use dom::globalscope::GlobalScope;
use dom::htmlformelement::HTMLFormElement;
use dom::htmlimageelement::HTMLImageElement;
use dom::htmlscriptelement::HTMLScriptElement;
use dom::node::{Node, document_from_node, window_from_node};
use encoding::all::UTF_8;
use encoding::types::{DecoderTrap, Encoding};
use html5ever::tokenizer::buffer_queue::BufferQueue;
use hyper::header::ContentType;
use hyper::mime::{Mime, SubLevel, TopLevel};
use hyper_serde::Serde;
use msg::constellation_msg::PipelineId;
use net_traits::{FetchMetadata, FetchResponseListener, Metadata, NetworkError};
use network_listener::PreInvoke;
use profile_traits::time::{TimerMetadata, TimerMetadataFrameType};
use profile_traits::time::{TimerMetadataReflowType, ProfilerCategory, profile};
use script_thread::ScriptThread;
use servo_url::ServoUrl;
use std::cell::Cell;
use std::mem;
use util::resource_files::read_resource_file;
mod html;
mod xml;
#[dom_struct]
/// The parser maintains two input streams: one for input from script through
/// document.write(), and one for input from network.
///
/// There is no concrete representation of the insertion point, instead it
/// always points to just before the next character from the network input,
/// with all of the script input before itself.
///
/// ```text
/// ... script input ... | ... network input ...
/// ^
/// insertion point
/// ```
pub struct ServoParser {
reflector: Reflector,
/// The document associated with this parser.
document: JS<Document>,
/// The pipeline associated with this parse, unavailable if this parse
/// does not correspond to a page load.
pipeline: Option<PipelineId>,
/// Input received from network.
#[ignore_heap_size_of = "Defined in html5ever"]
network_input: DOMRefCell<BufferQueue>,
/// Input received from script. Used only to support document.write().
#[ignore_heap_size_of = "Defined in html5ever"]
script_input: DOMRefCell<BufferQueue>,
/// The tokenizer of this parser.
tokenizer: DOMRefCell<Tokenizer>,
/// Whether to expect any further input from the associated network request.
last_chunk_received: Cell<bool>,
/// Whether this parser should avoid passing any further data to the tokenizer.
suspended: Cell<bool>,
/// https://html.spec.whatwg.org/multipage/#script-nesting-level
script_nesting_level: Cell<usize>,
}
#[derive(PartialEq)]
enum LastChunkState {
Received,
NotReceived,
}
impl ServoParser {
pub fn parse_html_document(
document: &Document,
input: DOMString,
url: ServoUrl,
owner: Option<PipelineId>) {
let parser = ServoParser::new(
document,
owner,
Tokenizer::Html(self::html::Tokenizer::new(document, url, None)),
LastChunkState::NotReceived);
parser.parse_chunk(String::from(input));
}
// https://html.spec.whatwg.org/multipage/#parsing-html-fragments
pub fn parse_html_fragment(
context_node: &Node,
input: DOMString,
output: &Node) {
let window = window_from_node(context_node);
let context_document = document_from_node(context_node);
let url = context_document.url();
// Step 1.
let loader = DocumentLoader::new(&*context_document.loader());
let document = Document::new(&window, None, Some(url.clone()),
IsHTMLDocument::HTMLDocument,
None, None,
DocumentSource::FromParser,
loader,
None, None);
// Step 2.
document.set_quirks_mode(context_document.quirks_mode());
// Step 11.
let form = context_node.inclusive_ancestors()
.find(|element| element.is::<HTMLFormElement>());
let fragment_context = FragmentContext {
context_elem: context_node,
form_elem: form.r(),
};
let parser = ServoParser::new(
&document,
None,
Tokenizer::Html(
self::html::Tokenizer::new(&document, url.clone(), Some(fragment_context))),
LastChunkState::Received);
parser.parse_chunk(String::from(input));
// Step 14.
let root_element = document.GetDocumentElement().expect("no document element");
for child in root_element.upcast::<Node>().children() {
output.AppendChild(&child).unwrap();
}
}
pub fn parse_xml_document(
document: &Document,
input: DOMString,
url: ServoUrl,
owner: Option<PipelineId>) {
let parser = ServoParser::new(
document,
owner,
Tokenizer::Xml(self::xml::Tokenizer::new(document, url)),
LastChunkState::NotReceived);
parser.parse_chunk(String::from(input));
}
|
}
/// Corresponds to the latter part of the "Otherwise" branch of the 'An end
/// tag whose tag name is "script"' of
/// https://html.spec.whatwg.org/multipage/#parsing-main-incdata
///
/// This first moves everything from the script input to the beginning of
/// the network input, effectively resetting the insertion point to just
/// before the next character to be consumed.
///
///
/// ```text
/// | ... script input ... network input ...
/// ^
/// insertion point
/// ```
pub fn resume_with_pending_parsing_blocking_script(&self, script: &HTMLScriptElement) {
assert!(self.suspended.get());
self.suspended.set(false);
mem::swap(&mut *self.script_input.borrow_mut(), &mut *self.network_input.borrow_mut());
while let Some(chunk) = self.script_input.borrow_mut().pop_front() {
self.network_input.borrow_mut().push_back(chunk);
}
let script_nesting_level = self.script_nesting_level.get();
assert_eq!(script_nesting_level, 0);
self.script_nesting_level.set(script_nesting_level + 1);
script.execute();
self.script_nesting_level.set(script_nesting_level);
if !self.suspended.get() {
self.parse_sync();
}
}
/// Steps 6-8 of https://html.spec.whatwg.org/multipage/#document.write()
pub fn write(&self, text: Vec<DOMString>) {
assert!(self.script_nesting_level.get() > 0);
if self.document.get_pending_parsing_blocking_script().is_some() {
// There is already a pending parsing blocking script so the
// parser is suspended, we just append everything to the
// script input and abort these steps.
for chunk in text {
self.script_input.borrow_mut().push_back(String::from(chunk).into());
}
return;
}
// There is no pending parsing blocking script, so all previous calls
// to document.write() should have seen their entire input tokenized
// and process, with nothing pushed to the parser script input.
assert!(self.script_input.borrow().is_empty());
let mut input = BufferQueue::new();
for chunk in text {
input.push_back(String::from(chunk).into());
}
self.tokenize(|tokenizer| tokenizer.feed(&mut input));
if self.suspended.get() {
// Parser got suspended, insert remaining input at end of
// script input, following anything written by scripts executed
// reentrantly during this call.
while let Some(chunk) = input.pop_front() {
self.script_input.borrow_mut().push_back(chunk);
}
return;
}
assert!(input.is_empty());
}
#[allow(unrooted_must_root)]
fn new_inherited(
document: &Document,
pipeline: Option<PipelineId>,
tokenizer: Tokenizer,
last_chunk_state: LastChunkState)
-> Self {
ServoParser {
reflector: Reflector::new(),
document: JS::from_ref(document),
pipeline: pipeline,
network_input: DOMRefCell::new(BufferQueue::new()),
script_input: DOMRefCell::new(BufferQueue::new()),
tokenizer: DOMRefCell::new(tokenizer),
last_chunk_received: Cell::new(last_chunk_state == LastChunkState::Received),
suspended: Default::default(),
script_nesting_level: Default::default(),
}
}
#[allow(unrooted_must_root)]
fn new(
document: &Document,
pipeline: Option<PipelineId>,
tokenizer: Tokenizer,
last_chunk_state: LastChunkState)
-> Root<Self> {
reflect_dom_object(
box ServoParser::new_inherited(document, pipeline, tokenizer, last_chunk_state),
document.window(),
ServoParserBinding::Wrap)
}
fn push_input_chunk(&self, chunk: String) {
self.network_input.borrow_mut().push_back(chunk.into());
}
fn parse_sync(&self) {
let metadata = TimerMetadata {
url: self.document.url().as_str().into(),
iframe: TimerMetadataFrameType::RootWindow,
incremental: TimerMetadataReflowType::FirstReflow,
};
let profiler_category = self.tokenizer.borrow().profiler_category();
profile(profiler_category,
Some(metadata),
self.document.window().upcast::<GlobalScope>().time_profiler_chan().clone(),
|| self.do_parse_sync())
}
fn do_parse_sync(&self) {
assert!(self.script_input.borrow().is_empty());
// This parser will continue to parse while there is either pending input or
// the parser remains unsuspended.
self.tokenize(|tokenizer| tokenizer.feed(&mut *self.network_input.borrow_mut()));
if self.suspended.get() {
return;
}
assert!(self.network_input.borrow().is_empty());
if self.last_chunk_received.get() {
self.finish();
}
}
fn parse_chunk(&self, input: String) {
self.document.set_current_parser(Some(self));
self.push_input_chunk(input);
if !self.suspended.get() {
self.parse_sync();
}
}
fn tokenize<F>(&self, mut feed: F)
where F: FnMut(&mut Tokenizer) -> Result<(), Root<HTMLScriptElement>>
{
loop {
assert!(!self.suspended.get());
self.document.reflow_if_reflow_timer_expired();
let script = match feed(&mut *self.tokenizer.borrow_mut()) {
Ok(()) => return,
Err(script) => script,
};
let script_nesting_level = self.script_nesting_level.get();
self.script_nesting_level.set(script_nesting_level + 1);
script.prepare();
self.script_nesting_level.set(script_nesting_level);
if self.document.get_pending_parsing_blocking_script().is_some() {
self.suspended.set(true);
return;
}
}
}
fn finish(&self) {
assert!(!self.suspended.get());
assert!(self.last_chunk_received.get());
assert!(self.script_input.borrow().is_empty());
assert!(self.network_input.borrow().is_empty());
self.tokenizer.borrow_mut().end();
debug!("finished parsing");
self.document.set_current_parser(None);
if let Some(pipeline) = self.pipeline {
ScriptThread::parsing_complete(pipeline);
}
}
}
#[derive(HeapSizeOf, JSTraceable)]
#[must_root]
enum Tokenizer {
Html(self::html::Tokenizer),
Xml(self::xml::Tokenizer),
}
impl Tokenizer {
fn feed(&mut self, input: &mut BufferQueue) -> Result<(), Root<HTMLScriptElement>> {
match *self {
Tokenizer::Html(ref mut tokenizer) => tokenizer.feed(input),
Tokenizer::Xml(ref mut tokenizer) => tokenizer.feed(input),
}
}
fn end(&mut self) {
match *self {
Tokenizer::Html(ref mut tokenizer) => tokenizer.end(),
Tokenizer::Xml(ref mut tokenizer) => tokenizer.end(),
}
}
fn set_plaintext_state(&mut self) {
match *self {
Tokenizer::Html(ref mut tokenizer) => tokenizer.set_plaintext_state(),
Tokenizer::Xml(_) => unimplemented!(),
}
}
fn profiler_category(&self) -> ProfilerCategory {
match *self {
Tokenizer::Html(_) => ProfilerCategory::ScriptParseHTML,
Tokenizer::Xml(_) => ProfilerCategory::ScriptParseXML,
}
}
}
/// The context required for asynchronously fetching a document
/// and parsing it progressively.
pub struct ParserContext {
/// The parser that initiated the request.
parser: Option<Trusted<ServoParser>>,
/// Is this a synthesized document
is_synthesized_document: bool,
/// The pipeline associated with this document.
id: PipelineId,
/// The URL for this document.
url: ServoUrl,
}
impl ParserContext {
pub fn new(id: PipelineId, url: ServoUrl) -> ParserContext {
ParserContext {
parser: None,
is_synthesized_document: false,
id: id,
url: url,
}
}
}
impl FetchResponseListener for ParserContext {
fn process_request_body(&mut self) {}
fn process_request_eof(&mut self) {}
fn process_response(&mut self,
meta_result: Result<FetchMetadata, NetworkError>) {
let mut ssl_error = None;
let metadata = match meta_result {
Ok(meta) => {
Some(match meta {
FetchMetadata::Unfiltered(m) => m,
FetchMetadata::Filtered { unsafe_, .. } => unsafe_
})
},
Err(NetworkError::SslValidation(url, reason)) => {
ssl_error = Some(reason);
let mut meta = Metadata::default(url);
let mime: Option<Mime> = "text/html".parse().ok();
meta.set_content_type(mime.as_ref());
Some(meta)
},
Err(_) => None,
};
let content_type =
metadata.clone().and_then(|meta| meta.content_type).map(Serde::into_inner);
let parser = match ScriptThread::page_headers_available(&self.id,
metadata) {
Some(parser) => parser,
None => return,
};
self.parser = Some(Trusted::new(&*parser));
match content_type {
Some(ContentType(Mime(TopLevel::Image, _, _))) => {
self.is_synthesized_document = true;
let page = "<html><body></body></html>".into();
parser.push_input_chunk(page);
parser.parse_sync();
let doc = &parser.document;
let doc_body = Root::upcast::<Node>(doc.GetBody().unwrap());
let img = HTMLImageElement::new(local_name!("img"), None, doc);
img.SetSrc(DOMString::from(self.url.to_string()));
doc_body.AppendChild(&Root::upcast::<Node>(img)).expect("Appending failed");
},
Some(ContentType(Mime(TopLevel::Text, SubLevel::Plain, _))) => {
// https://html.spec.whatwg.org/multipage/#read-text
let page = "<pre>\n".into();
parser.push_input_chunk(page);
parser.parse_sync();
parser.tokenizer.borrow_mut().set_plaintext_state();
},
Some(ContentType(Mime(TopLevel::Text, SubLevel::Html, _))) => { // Handle text/html
if let Some(reason) = ssl_error {
self.is_synthesized_document = true;
let page_bytes = read_resource_file("badcert.html").unwrap();
let page = String::from_utf8(page_bytes).unwrap();
let page = page.replace("${reason}", &reason);
parser.push_input_chunk(page);
parser.parse_sync();
}
},
Some(ContentType(Mime(TopLevel::Text, SubLevel::Xml, _))) => {}, // Handle text/xml
Some(ContentType(Mime(toplevel, sublevel, _))) => {
if toplevel.as_str() == "application" && sublevel.as_str() == "xhtml+xml" {
// Handle xhtml (application/xhtml+xml).
return;
}
// Show warning page for unknown mime types.
let page = format!("<html><body><p>Unknown content type ({}/{}).</p></body></html>",
toplevel.as_str(), sublevel.as_str());
self.is_synthesized_document = true;
parser.push_input_chunk(page);
parser.parse_sync();
},
None => {
// No content-type header.
// Merge with #4212 when fixed.
}
}
}
fn process_response_chunk(&mut self, payload: Vec<u8>) {
if !self.is_synthesized_document {
// FIXME: use Vec<u8> (html5ever #34)
let data = UTF_8.decode(&payload, DecoderTrap::Replace).unwrap();
let parser = match self.parser.as_ref() {
Some(parser) => parser.root(),
None => return,
};
parser.parse_chunk(data);
}
}
fn process_response_eof(&mut self, status: Result<(), NetworkError>) {
let parser = match self.parser.as_ref() {
Some(parser) => parser.root(),
None => return,
};
if let Err(NetworkError::Internal(ref reason)) = status {
// Show an error page for network errors,
// certificate errors are handled earlier.
self.is_synthesized_document = true;
let page_bytes = read_resource_file("neterror.html").unwrap();
let page = String::from_utf8(page_bytes).unwrap();
let page = page.replace("${reason}", reason);
parser.push_input_chunk(page);
parser.parse_sync();
} else if let Err(err) = status {
// TODO(Savago): we should send a notification to callers #5463.
debug!("Failed to load page URL {}, error: {:?}", self.url, err);
}
parser.document
.finish_load(LoadType::PageSource(self.url.clone()));
parser.last_chunk_received.set(true);
if !parser.suspended.get() {
parser.parse_sync();
}
}
}
impl PreInvoke for ParserContext {}
pub struct FragmentContext<'a> {
pub context_elem: &'a Node,
pub form_elem: Option<&'a Node>,
}
|
pub fn script_nesting_level(&self) -> usize {
self.script_nesting_level.get()
|
random_line_split
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use document_loader::{DocumentLoader, LoadType};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use dom::bindings::codegen::Bindings::HTMLImageElementBinding::HTMLImageElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::Bindings::ServoParserBinding;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, Root, RootedReference};
use dom::bindings::refcounted::Trusted;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::str::DOMString;
use dom::document::{Document, DocumentSource, IsHTMLDocument};
use dom::globalscope::GlobalScope;
use dom::htmlformelement::HTMLFormElement;
use dom::htmlimageelement::HTMLImageElement;
use dom::htmlscriptelement::HTMLScriptElement;
use dom::node::{Node, document_from_node, window_from_node};
use encoding::all::UTF_8;
use encoding::types::{DecoderTrap, Encoding};
use html5ever::tokenizer::buffer_queue::BufferQueue;
use hyper::header::ContentType;
use hyper::mime::{Mime, SubLevel, TopLevel};
use hyper_serde::Serde;
use msg::constellation_msg::PipelineId;
use net_traits::{FetchMetadata, FetchResponseListener, Metadata, NetworkError};
use network_listener::PreInvoke;
use profile_traits::time::{TimerMetadata, TimerMetadataFrameType};
use profile_traits::time::{TimerMetadataReflowType, ProfilerCategory, profile};
use script_thread::ScriptThread;
use servo_url::ServoUrl;
use std::cell::Cell;
use std::mem;
use util::resource_files::read_resource_file;
mod html;
mod xml;
#[dom_struct]
/// The parser maintains two input streams: one for input from script through
/// document.write(), and one for input from network.
///
/// There is no concrete representation of the insertion point, instead it
/// always points to just before the next character from the network input,
/// with all of the script input before itself.
///
/// ```text
/// ... script input ... | ... network input ...
/// ^
/// insertion point
/// ```
pub struct ServoParser {
reflector: Reflector,
/// The document associated with this parser.
document: JS<Document>,
/// The pipeline associated with this parse, unavailable if this parse
/// does not correspond to a page load.
pipeline: Option<PipelineId>,
/// Input received from network.
#[ignore_heap_size_of = "Defined in html5ever"]
network_input: DOMRefCell<BufferQueue>,
/// Input received from script. Used only to support document.write().
#[ignore_heap_size_of = "Defined in html5ever"]
script_input: DOMRefCell<BufferQueue>,
/// The tokenizer of this parser.
tokenizer: DOMRefCell<Tokenizer>,
/// Whether to expect any further input from the associated network request.
last_chunk_received: Cell<bool>,
/// Whether this parser should avoid passing any further data to the tokenizer.
suspended: Cell<bool>,
/// https://html.spec.whatwg.org/multipage/#script-nesting-level
script_nesting_level: Cell<usize>,
}
#[derive(PartialEq)]
enum
|
{
Received,
NotReceived,
}
impl ServoParser {
pub fn parse_html_document(
document: &Document,
input: DOMString,
url: ServoUrl,
owner: Option<PipelineId>) {
let parser = ServoParser::new(
document,
owner,
Tokenizer::Html(self::html::Tokenizer::new(document, url, None)),
LastChunkState::NotReceived);
parser.parse_chunk(String::from(input));
}
// https://html.spec.whatwg.org/multipage/#parsing-html-fragments
pub fn parse_html_fragment(
context_node: &Node,
input: DOMString,
output: &Node) {
let window = window_from_node(context_node);
let context_document = document_from_node(context_node);
let url = context_document.url();
// Step 1.
let loader = DocumentLoader::new(&*context_document.loader());
let document = Document::new(&window, None, Some(url.clone()),
IsHTMLDocument::HTMLDocument,
None, None,
DocumentSource::FromParser,
loader,
None, None);
// Step 2.
document.set_quirks_mode(context_document.quirks_mode());
// Step 11.
let form = context_node.inclusive_ancestors()
.find(|element| element.is::<HTMLFormElement>());
let fragment_context = FragmentContext {
context_elem: context_node,
form_elem: form.r(),
};
let parser = ServoParser::new(
&document,
None,
Tokenizer::Html(
self::html::Tokenizer::new(&document, url.clone(), Some(fragment_context))),
LastChunkState::Received);
parser.parse_chunk(String::from(input));
// Step 14.
let root_element = document.GetDocumentElement().expect("no document element");
for child in root_element.upcast::<Node>().children() {
output.AppendChild(&child).unwrap();
}
}
pub fn parse_xml_document(
document: &Document,
input: DOMString,
url: ServoUrl,
owner: Option<PipelineId>) {
let parser = ServoParser::new(
document,
owner,
Tokenizer::Xml(self::xml::Tokenizer::new(document, url)),
LastChunkState::NotReceived);
parser.parse_chunk(String::from(input));
}
pub fn script_nesting_level(&self) -> usize {
self.script_nesting_level.get()
}
/// Corresponds to the latter part of the "Otherwise" branch of the 'An end
/// tag whose tag name is "script"' of
/// https://html.spec.whatwg.org/multipage/#parsing-main-incdata
///
/// This first moves everything from the script input to the beginning of
/// the network input, effectively resetting the insertion point to just
/// before the next character to be consumed.
///
///
/// ```text
/// | ... script input ... network input ...
/// ^
/// insertion point
/// ```
pub fn resume_with_pending_parsing_blocking_script(&self, script: &HTMLScriptElement) {
assert!(self.suspended.get());
self.suspended.set(false);
mem::swap(&mut *self.script_input.borrow_mut(), &mut *self.network_input.borrow_mut());
while let Some(chunk) = self.script_input.borrow_mut().pop_front() {
self.network_input.borrow_mut().push_back(chunk);
}
let script_nesting_level = self.script_nesting_level.get();
assert_eq!(script_nesting_level, 0);
self.script_nesting_level.set(script_nesting_level + 1);
script.execute();
self.script_nesting_level.set(script_nesting_level);
if !self.suspended.get() {
self.parse_sync();
}
}
/// Steps 6-8 of https://html.spec.whatwg.org/multipage/#document.write()
pub fn write(&self, text: Vec<DOMString>) {
assert!(self.script_nesting_level.get() > 0);
if self.document.get_pending_parsing_blocking_script().is_some() {
// There is already a pending parsing blocking script so the
// parser is suspended, we just append everything to the
// script input and abort these steps.
for chunk in text {
self.script_input.borrow_mut().push_back(String::from(chunk).into());
}
return;
}
// There is no pending parsing blocking script, so all previous calls
// to document.write() should have seen their entire input tokenized
// and process, with nothing pushed to the parser script input.
assert!(self.script_input.borrow().is_empty());
let mut input = BufferQueue::new();
for chunk in text {
input.push_back(String::from(chunk).into());
}
self.tokenize(|tokenizer| tokenizer.feed(&mut input));
if self.suspended.get() {
// Parser got suspended, insert remaining input at end of
// script input, following anything written by scripts executed
// reentrantly during this call.
while let Some(chunk) = input.pop_front() {
self.script_input.borrow_mut().push_back(chunk);
}
return;
}
assert!(input.is_empty());
}
#[allow(unrooted_must_root)]
fn new_inherited(
document: &Document,
pipeline: Option<PipelineId>,
tokenizer: Tokenizer,
last_chunk_state: LastChunkState)
-> Self {
ServoParser {
reflector: Reflector::new(),
document: JS::from_ref(document),
pipeline: pipeline,
network_input: DOMRefCell::new(BufferQueue::new()),
script_input: DOMRefCell::new(BufferQueue::new()),
tokenizer: DOMRefCell::new(tokenizer),
last_chunk_received: Cell::new(last_chunk_state == LastChunkState::Received),
suspended: Default::default(),
script_nesting_level: Default::default(),
}
}
#[allow(unrooted_must_root)]
fn new(
document: &Document,
pipeline: Option<PipelineId>,
tokenizer: Tokenizer,
last_chunk_state: LastChunkState)
-> Root<Self> {
reflect_dom_object(
box ServoParser::new_inherited(document, pipeline, tokenizer, last_chunk_state),
document.window(),
ServoParserBinding::Wrap)
}
fn push_input_chunk(&self, chunk: String) {
self.network_input.borrow_mut().push_back(chunk.into());
}
fn parse_sync(&self) {
let metadata = TimerMetadata {
url: self.document.url().as_str().into(),
iframe: TimerMetadataFrameType::RootWindow,
incremental: TimerMetadataReflowType::FirstReflow,
};
let profiler_category = self.tokenizer.borrow().profiler_category();
profile(profiler_category,
Some(metadata),
self.document.window().upcast::<GlobalScope>().time_profiler_chan().clone(),
|| self.do_parse_sync())
}
fn do_parse_sync(&self) {
assert!(self.script_input.borrow().is_empty());
// This parser will continue to parse while there is either pending input or
// the parser remains unsuspended.
self.tokenize(|tokenizer| tokenizer.feed(&mut *self.network_input.borrow_mut()));
if self.suspended.get() {
return;
}
assert!(self.network_input.borrow().is_empty());
if self.last_chunk_received.get() {
self.finish();
}
}
fn parse_chunk(&self, input: String) {
self.document.set_current_parser(Some(self));
self.push_input_chunk(input);
if !self.suspended.get() {
self.parse_sync();
}
}
fn tokenize<F>(&self, mut feed: F)
where F: FnMut(&mut Tokenizer) -> Result<(), Root<HTMLScriptElement>>
{
loop {
assert!(!self.suspended.get());
self.document.reflow_if_reflow_timer_expired();
let script = match feed(&mut *self.tokenizer.borrow_mut()) {
Ok(()) => return,
Err(script) => script,
};
let script_nesting_level = self.script_nesting_level.get();
self.script_nesting_level.set(script_nesting_level + 1);
script.prepare();
self.script_nesting_level.set(script_nesting_level);
if self.document.get_pending_parsing_blocking_script().is_some() {
self.suspended.set(true);
return;
}
}
}
fn finish(&self) {
assert!(!self.suspended.get());
assert!(self.last_chunk_received.get());
assert!(self.script_input.borrow().is_empty());
assert!(self.network_input.borrow().is_empty());
self.tokenizer.borrow_mut().end();
debug!("finished parsing");
self.document.set_current_parser(None);
if let Some(pipeline) = self.pipeline {
ScriptThread::parsing_complete(pipeline);
}
}
}
#[derive(HeapSizeOf, JSTraceable)]
#[must_root]
enum Tokenizer {
Html(self::html::Tokenizer),
Xml(self::xml::Tokenizer),
}
impl Tokenizer {
fn feed(&mut self, input: &mut BufferQueue) -> Result<(), Root<HTMLScriptElement>> {
match *self {
Tokenizer::Html(ref mut tokenizer) => tokenizer.feed(input),
Tokenizer::Xml(ref mut tokenizer) => tokenizer.feed(input),
}
}
fn end(&mut self) {
match *self {
Tokenizer::Html(ref mut tokenizer) => tokenizer.end(),
Tokenizer::Xml(ref mut tokenizer) => tokenizer.end(),
}
}
fn set_plaintext_state(&mut self) {
match *self {
Tokenizer::Html(ref mut tokenizer) => tokenizer.set_plaintext_state(),
Tokenizer::Xml(_) => unimplemented!(),
}
}
fn profiler_category(&self) -> ProfilerCategory {
match *self {
Tokenizer::Html(_) => ProfilerCategory::ScriptParseHTML,
Tokenizer::Xml(_) => ProfilerCategory::ScriptParseXML,
}
}
}
/// The context required for asynchronously fetching a document
/// and parsing it progressively.
pub struct ParserContext {
/// The parser that initiated the request.
parser: Option<Trusted<ServoParser>>,
/// Is this a synthesized document
is_synthesized_document: bool,
/// The pipeline associated with this document.
id: PipelineId,
/// The URL for this document.
url: ServoUrl,
}
impl ParserContext {
pub fn new(id: PipelineId, url: ServoUrl) -> ParserContext {
ParserContext {
parser: None,
is_synthesized_document: false,
id: id,
url: url,
}
}
}
impl FetchResponseListener for ParserContext {
fn process_request_body(&mut self) {}
fn process_request_eof(&mut self) {}
fn process_response(&mut self,
meta_result: Result<FetchMetadata, NetworkError>) {
let mut ssl_error = None;
let metadata = match meta_result {
Ok(meta) => {
Some(match meta {
FetchMetadata::Unfiltered(m) => m,
FetchMetadata::Filtered { unsafe_, .. } => unsafe_
})
},
Err(NetworkError::SslValidation(url, reason)) => {
ssl_error = Some(reason);
let mut meta = Metadata::default(url);
let mime: Option<Mime> = "text/html".parse().ok();
meta.set_content_type(mime.as_ref());
Some(meta)
},
Err(_) => None,
};
let content_type =
metadata.clone().and_then(|meta| meta.content_type).map(Serde::into_inner);
let parser = match ScriptThread::page_headers_available(&self.id,
metadata) {
Some(parser) => parser,
None => return,
};
self.parser = Some(Trusted::new(&*parser));
match content_type {
Some(ContentType(Mime(TopLevel::Image, _, _))) => {
self.is_synthesized_document = true;
let page = "<html><body></body></html>".into();
parser.push_input_chunk(page);
parser.parse_sync();
let doc = &parser.document;
let doc_body = Root::upcast::<Node>(doc.GetBody().unwrap());
let img = HTMLImageElement::new(local_name!("img"), None, doc);
img.SetSrc(DOMString::from(self.url.to_string()));
doc_body.AppendChild(&Root::upcast::<Node>(img)).expect("Appending failed");
},
Some(ContentType(Mime(TopLevel::Text, SubLevel::Plain, _))) => {
// https://html.spec.whatwg.org/multipage/#read-text
let page = "<pre>\n".into();
parser.push_input_chunk(page);
parser.parse_sync();
parser.tokenizer.borrow_mut().set_plaintext_state();
},
Some(ContentType(Mime(TopLevel::Text, SubLevel::Html, _))) => { // Handle text/html
if let Some(reason) = ssl_error {
self.is_synthesized_document = true;
let page_bytes = read_resource_file("badcert.html").unwrap();
let page = String::from_utf8(page_bytes).unwrap();
let page = page.replace("${reason}", &reason);
parser.push_input_chunk(page);
parser.parse_sync();
}
},
Some(ContentType(Mime(TopLevel::Text, SubLevel::Xml, _))) => {}, // Handle text/xml
Some(ContentType(Mime(toplevel, sublevel, _))) => {
if toplevel.as_str() == "application" && sublevel.as_str() == "xhtml+xml" {
// Handle xhtml (application/xhtml+xml).
return;
}
// Show warning page for unknown mime types.
let page = format!("<html><body><p>Unknown content type ({}/{}).</p></body></html>",
toplevel.as_str(), sublevel.as_str());
self.is_synthesized_document = true;
parser.push_input_chunk(page);
parser.parse_sync();
},
None => {
// No content-type header.
// Merge with #4212 when fixed.
}
}
}
fn process_response_chunk(&mut self, payload: Vec<u8>) {
if !self.is_synthesized_document {
// FIXME: use Vec<u8> (html5ever #34)
let data = UTF_8.decode(&payload, DecoderTrap::Replace).unwrap();
let parser = match self.parser.as_ref() {
Some(parser) => parser.root(),
None => return,
};
parser.parse_chunk(data);
}
}
fn process_response_eof(&mut self, status: Result<(), NetworkError>) {
let parser = match self.parser.as_ref() {
Some(parser) => parser.root(),
None => return,
};
if let Err(NetworkError::Internal(ref reason)) = status {
// Show an error page for network errors,
// certificate errors are handled earlier.
self.is_synthesized_document = true;
let page_bytes = read_resource_file("neterror.html").unwrap();
let page = String::from_utf8(page_bytes).unwrap();
let page = page.replace("${reason}", reason);
parser.push_input_chunk(page);
parser.parse_sync();
} else if let Err(err) = status {
// TODO(Savago): we should send a notification to callers #5463.
debug!("Failed to load page URL {}, error: {:?}", self.url, err);
}
parser.document
.finish_load(LoadType::PageSource(self.url.clone()));
parser.last_chunk_received.set(true);
if !parser.suspended.get() {
parser.parse_sync();
}
}
}
impl PreInvoke for ParserContext {}
pub struct FragmentContext<'a> {
pub context_elem: &'a Node,
pub form_elem: Option<&'a Node>,
}
|
LastChunkState
|
identifier_name
|
socialMediaMetaTag.ts
|
module ngX.components {
ngX.Component({
module: "ngX.components",
selector: "social-media-meta-tag",
template: [
"<!-- Update your html tag to include the itemscope and itemtype attributes. --> ",
"<html itemscope itemtype='http://schema.org/Article'> ",
" ",
"<!-- Place this data between the <head> tags of your website --> ",
"<title>Page Title. Maximum length 60-70 characters</title> ",
"<meta name='description' content='Page description. No longer than 155 characters.' /> ",
" ",
"<!-- Schema.org markup for Google+ --> ",
"<meta itemprop='name' content='The Name or Title Here'> ",
"<meta itemprop='description' content='This is the page description'> ",
"<meta itemprop='image' content='http://www.example.com/image.jpg'> ",
" ",
"<!-- Twitter Card data --> ",
"<meta name='twitter:card' content='summary_large_image'> ",
"<meta name='twitter:site' content='@publisher_handle'> ",
"<meta name='twitter:title' content='Page Title'> ",
"<meta name='twitter:description' content='Page description less than 200 characters'> ",
"<meta name='twitter:creator' content='@author_handle'> ",
"<!-- Twitter summary card with large image must be at least 280x150px --> ",
"<meta name='twitter:image:src' content='http://www.example.com/image.html'> ",
" ",
|
"<meta property='og:title' content='Title Here' /> ",
"<meta property='og:type' content='article' /> ",
"<meta property='og:url' content='http://www.example.com/' /> ",
"<meta property='og:image' content='http://example.com/image.jpg' /> ",
"<meta property='og:description' content='Description Here' /> ",
"<meta property='og:site_name' content='Site Name, i.e. Moz' /> ",
"<meta property='article:published_time' content='2013-09-17T05:59:00+01:00' /> ",
"<meta property='article:modified_time' content='2013-09-16T19:08:47+01:00' /> ",
"<meta property='article:section' content='Article Section' /> ",
"<meta property='article:tag' content='Article Tag' /> ",
"<meta property='fb:admins' content='Facebook numberic ID' /> "
].join(" ")
});
}
|
"<!-- Open Graph data --> ",
|
random_line_split
|
logger.ts
|
// import * as DailyRotateFile from 'winston-daily-rotate-file';
// import { NullTransport } from 'winston-null';
// import * as winston from 'winston';
// import * as fs from 'fs';
// import { workspace } from "vscode";
// const logFolder: string | undefined = workspace.getConfiguration("hg").get<string>("serverLogFolder");
// const timestampFormat = () => (new Date()).toLocaleTimeString();
// let transports: winston.TransportInstance[] = [];
// if (logFolder) {
// // ensure log folder exists
// if (!fs.existsSync(logFolder)) {
// fs.mkdirSync(logFolder);
// }
// transports = [
// new DailyRotateFile({
// filename: `${logFolder}/-results.log`,
// timestamp: timestampFormat,
// datePattern: 'yyyy-MM-dd',
// prepend: true,
|
// ]
// }
// else {
// transports = [
// new NullTransport()
// ]
// }
// export const logger = new winston.Logger({ transports });
/* */
|
// level: 'debug'
// })
|
random_line_split
|
imgsearch.py
|
import os, sys, argparse
from os import listdir
from os.path import isfile, join
from os import walk
from dd_client import DD
from annoy import AnnoyIndex
import shelve
import cv2
parser = argparse.ArgumentParser()
parser.add_argument("--index",help="repository of images to be indexed")
parser.add_argument("--index-batch-size",type=int,help="size of image batch when indexing",default=1)
parser.add_argument("--search",help="image input file for similarity search")
parser.add_argument("--search-size",help="number of nearest neighbors",type=int,default=10)
args = parser.parse_args()
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
|
def image_resize(imgfile,width):
imgquery = cv2.imread(imgfile)
r = width / imgquery.shape[1]
dim = (int(width), int(imgquery.shape[0] * r))
small = cv2.resize(imgquery,dim)
return small
host = 'localhost'
sname = 'imgserv'
description = 'image classification'
mllib = 'caffe'
mltype = 'unsupervised'
extract_layer = 'loss3/classifier'
#extract_layer = 'pool5/7x7_s1'
nclasses = 1000
layer_size = 1000 # default output code size
width = height = 224
binarized = False
dd = DD(host)
dd.set_return_format(dd.RETURN_PYTHON)
ntrees = 100
metric = 'angular' # or 'euclidean'
# creating ML service
model_repo = os.getcwd() + '/model'
model = {'repository':model_repo,'templates':'../templates/caffe/'}
parameters_input = {'connector':'image','width':width,'height':height}
parameters_mllib = {'nclasses':nclasses,'template':'googlenet'}
parameters_output = {}
dd.put_service(sname,model,description,mllib,
parameters_input,parameters_mllib,parameters_output,mltype)
# reset call params
parameters_input = {}
parameters_mllib = {'gpu':True,'extract_layer':extract_layer}
parameters_output = {'binarized':binarized}
if args.index:
try:
os.remove('names.bin')
except:
pass
s = shelve.open('names.bin')
# list files in image repository
c = 0
onlyfiles = []
for (dirpath, dirnames, filenames) in walk(args.index):
nfilenames = []
for f in filenames:
nfilenames.append(dirpath + '/' + f)
onlyfiles.extend(nfilenames)
for x in batch(onlyfiles,args.index_batch_size):
sys.stdout.write('\r'+str(c)+'/'+str(len(onlyfiles)))
sys.stdout.flush()
classif = dd.post_predict(sname,x,parameters_input,parameters_mllib,parameters_output)
for p in classif['body']['predictions']:
if c == 0:
layer_size = len(p['vals'])
s['layer_size'] = layer_size
t = AnnoyIndex(layer_size,metric) # prepare index
t.add_item(c,p['vals'])
s[str(c)] = p['uri']
c = c + 1
#if c >= 10000:
# break
print 'building index...\n'
print 'layer_size=',layer_size
t.build(ntrees)
t.save('index.ann')
s.close()
if args.search:
s = shelve.open('names.bin')
u = AnnoyIndex(s['layer_size'],metric)
u.load('index.ann')
data = [args.search]
classif = dd.post_predict(sname,data,parameters_input,parameters_mllib,parameters_output)
near = u.get_nns_by_vector(classif['body']['predictions'][0]['vals'],args.search_size,include_distances=True)
print near
near_names = []
for n in near[0]:
near_names.append(s[str(n)])
print near_names
cv2.imshow('query',image_resize(args.search,224.0))
cv2.waitKey(0)
for n in near_names:
cv2.imshow('res',image_resize(n,224.0))
cv2.waitKey(0)
dd.delete_service(sname,clear='')
|
yield iterable[ndx:min(ndx + n, l)]
|
conditional_block
|
imgsearch.py
|
import os, sys, argparse
from os import listdir
from os.path import isfile, join
from os import walk
from dd_client import DD
from annoy import AnnoyIndex
import shelve
import cv2
parser = argparse.ArgumentParser()
parser.add_argument("--index",help="repository of images to be indexed")
parser.add_argument("--index-batch-size",type=int,help="size of image batch when indexing",default=1)
parser.add_argument("--search",help="image input file for similarity search")
parser.add_argument("--search-size",help="number of nearest neighbors",type=int,default=10)
args = parser.parse_args()
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def image_resize(imgfile,width):
imgquery = cv2.imread(imgfile)
r = width / imgquery.shape[1]
dim = (int(width), int(imgquery.shape[0] * r))
small = cv2.resize(imgquery,dim)
return small
host = 'localhost'
sname = 'imgserv'
description = 'image classification'
mllib = 'caffe'
mltype = 'unsupervised'
extract_layer = 'loss3/classifier'
#extract_layer = 'pool5/7x7_s1'
nclasses = 1000
layer_size = 1000 # default output code size
width = height = 224
binarized = False
dd = DD(host)
dd.set_return_format(dd.RETURN_PYTHON)
ntrees = 100
metric = 'angular' # or 'euclidean'
# creating ML service
model_repo = os.getcwd() + '/model'
model = {'repository':model_repo,'templates':'../templates/caffe/'}
parameters_input = {'connector':'image','width':width,'height':height}
parameters_mllib = {'nclasses':nclasses,'template':'googlenet'}
parameters_output = {}
dd.put_service(sname,model,description,mllib,
parameters_input,parameters_mllib,parameters_output,mltype)
# reset call params
parameters_input = {}
parameters_mllib = {'gpu':True,'extract_layer':extract_layer}
parameters_output = {'binarized':binarized}
if args.index:
try:
os.remove('names.bin')
except:
pass
s = shelve.open('names.bin')
# list files in image repository
c = 0
onlyfiles = []
for (dirpath, dirnames, filenames) in walk(args.index):
nfilenames = []
for f in filenames:
nfilenames.append(dirpath + '/' + f)
onlyfiles.extend(nfilenames)
for x in batch(onlyfiles,args.index_batch_size):
sys.stdout.write('\r'+str(c)+'/'+str(len(onlyfiles)))
sys.stdout.flush()
classif = dd.post_predict(sname,x,parameters_input,parameters_mllib,parameters_output)
for p in classif['body']['predictions']:
if c == 0:
layer_size = len(p['vals'])
s['layer_size'] = layer_size
t = AnnoyIndex(layer_size,metric) # prepare index
t.add_item(c,p['vals'])
s[str(c)] = p['uri']
c = c + 1
#if c >= 10000:
# break
print 'building index...\n'
print 'layer_size=',layer_size
t.build(ntrees)
t.save('index.ann')
s.close()
if args.search:
s = shelve.open('names.bin')
u = AnnoyIndex(s['layer_size'],metric)
u.load('index.ann')
data = [args.search]
classif = dd.post_predict(sname,data,parameters_input,parameters_mllib,parameters_output)
near = u.get_nns_by_vector(classif['body']['predictions'][0]['vals'],args.search_size,include_distances=True)
print near
near_names = []
for n in near[0]:
|
cv2.waitKey(0)
for n in near_names:
cv2.imshow('res',image_resize(n,224.0))
cv2.waitKey(0)
dd.delete_service(sname,clear='')
|
near_names.append(s[str(n)])
print near_names
cv2.imshow('query',image_resize(args.search,224.0))
|
random_line_split
|
imgsearch.py
|
import os, sys, argparse
from os import listdir
from os.path import isfile, join
from os import walk
from dd_client import DD
from annoy import AnnoyIndex
import shelve
import cv2
parser = argparse.ArgumentParser()
parser.add_argument("--index",help="repository of images to be indexed")
parser.add_argument("--index-batch-size",type=int,help="size of image batch when indexing",default=1)
parser.add_argument("--search",help="image input file for similarity search")
parser.add_argument("--search-size",help="number of nearest neighbors",type=int,default=10)
args = parser.parse_args()
def
|
(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def image_resize(imgfile,width):
imgquery = cv2.imread(imgfile)
r = width / imgquery.shape[1]
dim = (int(width), int(imgquery.shape[0] * r))
small = cv2.resize(imgquery,dim)
return small
host = 'localhost'
sname = 'imgserv'
description = 'image classification'
mllib = 'caffe'
mltype = 'unsupervised'
extract_layer = 'loss3/classifier'
#extract_layer = 'pool5/7x7_s1'
nclasses = 1000
layer_size = 1000 # default output code size
width = height = 224
binarized = False
dd = DD(host)
dd.set_return_format(dd.RETURN_PYTHON)
ntrees = 100
metric = 'angular' # or 'euclidean'
# creating ML service
model_repo = os.getcwd() + '/model'
model = {'repository':model_repo,'templates':'../templates/caffe/'}
parameters_input = {'connector':'image','width':width,'height':height}
parameters_mllib = {'nclasses':nclasses,'template':'googlenet'}
parameters_output = {}
dd.put_service(sname,model,description,mllib,
parameters_input,parameters_mllib,parameters_output,mltype)
# reset call params
parameters_input = {}
parameters_mllib = {'gpu':True,'extract_layer':extract_layer}
parameters_output = {'binarized':binarized}
if args.index:
try:
os.remove('names.bin')
except:
pass
s = shelve.open('names.bin')
# list files in image repository
c = 0
onlyfiles = []
for (dirpath, dirnames, filenames) in walk(args.index):
nfilenames = []
for f in filenames:
nfilenames.append(dirpath + '/' + f)
onlyfiles.extend(nfilenames)
for x in batch(onlyfiles,args.index_batch_size):
sys.stdout.write('\r'+str(c)+'/'+str(len(onlyfiles)))
sys.stdout.flush()
classif = dd.post_predict(sname,x,parameters_input,parameters_mllib,parameters_output)
for p in classif['body']['predictions']:
if c == 0:
layer_size = len(p['vals'])
s['layer_size'] = layer_size
t = AnnoyIndex(layer_size,metric) # prepare index
t.add_item(c,p['vals'])
s[str(c)] = p['uri']
c = c + 1
#if c >= 10000:
# break
print 'building index...\n'
print 'layer_size=',layer_size
t.build(ntrees)
t.save('index.ann')
s.close()
if args.search:
s = shelve.open('names.bin')
u = AnnoyIndex(s['layer_size'],metric)
u.load('index.ann')
data = [args.search]
classif = dd.post_predict(sname,data,parameters_input,parameters_mllib,parameters_output)
near = u.get_nns_by_vector(classif['body']['predictions'][0]['vals'],args.search_size,include_distances=True)
print near
near_names = []
for n in near[0]:
near_names.append(s[str(n)])
print near_names
cv2.imshow('query',image_resize(args.search,224.0))
cv2.waitKey(0)
for n in near_names:
cv2.imshow('res',image_resize(n,224.0))
cv2.waitKey(0)
dd.delete_service(sname,clear='')
|
batch
|
identifier_name
|
imgsearch.py
|
import os, sys, argparse
from os import listdir
from os.path import isfile, join
from os import walk
from dd_client import DD
from annoy import AnnoyIndex
import shelve
import cv2
parser = argparse.ArgumentParser()
parser.add_argument("--index",help="repository of images to be indexed")
parser.add_argument("--index-batch-size",type=int,help="size of image batch when indexing",default=1)
parser.add_argument("--search",help="image input file for similarity search")
parser.add_argument("--search-size",help="number of nearest neighbors",type=int,default=10)
args = parser.parse_args()
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def image_resize(imgfile,width):
|
host = 'localhost'
sname = 'imgserv'
description = 'image classification'
mllib = 'caffe'
mltype = 'unsupervised'
extract_layer = 'loss3/classifier'
#extract_layer = 'pool5/7x7_s1'
nclasses = 1000
layer_size = 1000 # default output code size
width = height = 224
binarized = False
dd = DD(host)
dd.set_return_format(dd.RETURN_PYTHON)
ntrees = 100
metric = 'angular' # or 'euclidean'
# creating ML service
model_repo = os.getcwd() + '/model'
model = {'repository':model_repo,'templates':'../templates/caffe/'}
parameters_input = {'connector':'image','width':width,'height':height}
parameters_mllib = {'nclasses':nclasses,'template':'googlenet'}
parameters_output = {}
dd.put_service(sname,model,description,mllib,
parameters_input,parameters_mllib,parameters_output,mltype)
# reset call params
parameters_input = {}
parameters_mllib = {'gpu':True,'extract_layer':extract_layer}
parameters_output = {'binarized':binarized}
if args.index:
try:
os.remove('names.bin')
except:
pass
s = shelve.open('names.bin')
# list files in image repository
c = 0
onlyfiles = []
for (dirpath, dirnames, filenames) in walk(args.index):
nfilenames = []
for f in filenames:
nfilenames.append(dirpath + '/' + f)
onlyfiles.extend(nfilenames)
for x in batch(onlyfiles,args.index_batch_size):
sys.stdout.write('\r'+str(c)+'/'+str(len(onlyfiles)))
sys.stdout.flush()
classif = dd.post_predict(sname,x,parameters_input,parameters_mllib,parameters_output)
for p in classif['body']['predictions']:
if c == 0:
layer_size = len(p['vals'])
s['layer_size'] = layer_size
t = AnnoyIndex(layer_size,metric) # prepare index
t.add_item(c,p['vals'])
s[str(c)] = p['uri']
c = c + 1
#if c >= 10000:
# break
print 'building index...\n'
print 'layer_size=',layer_size
t.build(ntrees)
t.save('index.ann')
s.close()
if args.search:
s = shelve.open('names.bin')
u = AnnoyIndex(s['layer_size'],metric)
u.load('index.ann')
data = [args.search]
classif = dd.post_predict(sname,data,parameters_input,parameters_mllib,parameters_output)
near = u.get_nns_by_vector(classif['body']['predictions'][0]['vals'],args.search_size,include_distances=True)
print near
near_names = []
for n in near[0]:
near_names.append(s[str(n)])
print near_names
cv2.imshow('query',image_resize(args.search,224.0))
cv2.waitKey(0)
for n in near_names:
cv2.imshow('res',image_resize(n,224.0))
cv2.waitKey(0)
dd.delete_service(sname,clear='')
|
imgquery = cv2.imread(imgfile)
r = width / imgquery.shape[1]
dim = (int(width), int(imgquery.shape[0] * r))
small = cv2.resize(imgquery,dim)
return small
|
identifier_body
|
conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Qcloud COS SDK for Python 3 documentation build configuration file, created by
# cookiecutter pipproject
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
|
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Qcloud COS SDK for Python 3'
copyright = '2016, Dan Su'
author = 'Dan Su'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'Qcloud COS SDK for Python 3 v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Qcloud COS SDK for Python 3doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Qcloud COS SDK for Python 3.tex', 'Qcloud COS SDK for Python 3 Documentation',
'Dan Su', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Qcloud COS SDK for Python 3', 'Qcloud COS SDK for Python 3 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Qcloud COS SDK for Python 3', 'Qcloud COS SDK for Python 3 Documentation',
author, 'Qcloud COS SDK for Python 3', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
|
random_line_split
|
handlers.rs
|
use x11::xlib;
use window_system::WindowSystem;
use libc::{c_ulong};
pub struct KeyPressedHandler;
pub struct MapRequestHandler;
fn create_some_window(window_system: &WindowSystem, width: u32, height: u32, x: i32, y: i32) -> c_ulong {
let border_width = 2;
unsafe {
let border = xlib::XWhitePixel(window_system.display,
xlib::XDefaultScreen(window_system.display));
let background = xlib::XBlackPixel(window_system.display,
xlib::XDefaultScreen(window_system.display));
let window = xlib::XCreateSimpleWindow(window_system.display,
window_system.root,
|
border_width,
border,background);
xlib::XSelectInput(window_system.display,
window,
xlib::SubstructureNotifyMask | xlib::SubstructureRedirectMask);
return window;
}
}
impl MapRequestHandler {
pub fn new() -> MapRequestHandler {
return MapRequestHandler;
}
pub fn handle(&self, event: xlib::XEvent, window_system: &WindowSystem) {
let event = xlib::XMapRequestEvent::from(event);
let height: u32;
let width: u32;
let mut x: i32 = 0;
let y: i32 = 0;
if window_system.count.get() == 0 {
width = window_system.info.width as u32;
height = window_system.info.height as u32;
}
else {
width = (window_system.info.width / 2) as u32;
height = window_system.info.height as u32;
x = width as i32;
}
// create frame as a new parent for the window to be mapped
let frame = create_some_window(window_system, width, height, x, y);
unsafe {
// resize window to fit parent
xlib::XResizeWindow(window_system.display, event.window, width as u32, height as u32);
// make frame window parent of window to be mapped
xlib::XReparentWindow(window_system.display, event.window, frame, 0, 0);
// show frame
xlib::XMapWindow(window_system.display, frame);
// show window inside frame
xlib::XMapWindow(window_system.display, event.window);
}
window_system.count.set(window_system.count.get() + 1);
}
}
impl KeyPressedHandler {
pub fn new() -> KeyPressedHandler {
return KeyPressedHandler;
}
pub fn handle(&self, event: xlib::XEvent) {
let event = xlib::XKeyPressedEvent::from(event);
println!("KeyPressed {}", event.keycode);
}
}
|
x,
y,
width,
height,
|
random_line_split
|
handlers.rs
|
use x11::xlib;
use window_system::WindowSystem;
use libc::{c_ulong};
pub struct KeyPressedHandler;
pub struct MapRequestHandler;
fn create_some_window(window_system: &WindowSystem, width: u32, height: u32, x: i32, y: i32) -> c_ulong {
let border_width = 2;
unsafe {
let border = xlib::XWhitePixel(window_system.display,
xlib::XDefaultScreen(window_system.display));
let background = xlib::XBlackPixel(window_system.display,
xlib::XDefaultScreen(window_system.display));
let window = xlib::XCreateSimpleWindow(window_system.display,
window_system.root,
x,
y,
width,
height,
border_width,
border,background);
xlib::XSelectInput(window_system.display,
window,
xlib::SubstructureNotifyMask | xlib::SubstructureRedirectMask);
return window;
}
}
impl MapRequestHandler {
pub fn new() -> MapRequestHandler {
return MapRequestHandler;
}
pub fn handle(&self, event: xlib::XEvent, window_system: &WindowSystem) {
let event = xlib::XMapRequestEvent::from(event);
let height: u32;
let width: u32;
let mut x: i32 = 0;
let y: i32 = 0;
if window_system.count.get() == 0
|
else {
width = (window_system.info.width / 2) as u32;
height = window_system.info.height as u32;
x = width as i32;
}
// create frame as a new parent for the window to be mapped
let frame = create_some_window(window_system, width, height, x, y);
unsafe {
// resize window to fit parent
xlib::XResizeWindow(window_system.display, event.window, width as u32, height as u32);
// make frame window parent of window to be mapped
xlib::XReparentWindow(window_system.display, event.window, frame, 0, 0);
// show frame
xlib::XMapWindow(window_system.display, frame);
// show window inside frame
xlib::XMapWindow(window_system.display, event.window);
}
window_system.count.set(window_system.count.get() + 1);
}
}
impl KeyPressedHandler {
pub fn new() -> KeyPressedHandler {
return KeyPressedHandler;
}
pub fn handle(&self, event: xlib::XEvent) {
let event = xlib::XKeyPressedEvent::from(event);
println!("KeyPressed {}", event.keycode);
}
}
|
{
width = window_system.info.width as u32;
height = window_system.info.height as u32;
}
|
conditional_block
|
handlers.rs
|
use x11::xlib;
use window_system::WindowSystem;
use libc::{c_ulong};
pub struct KeyPressedHandler;
pub struct
|
;
fn create_some_window(window_system: &WindowSystem, width: u32, height: u32, x: i32, y: i32) -> c_ulong {
let border_width = 2;
unsafe {
let border = xlib::XWhitePixel(window_system.display,
xlib::XDefaultScreen(window_system.display));
let background = xlib::XBlackPixel(window_system.display,
xlib::XDefaultScreen(window_system.display));
let window = xlib::XCreateSimpleWindow(window_system.display,
window_system.root,
x,
y,
width,
height,
border_width,
border,background);
xlib::XSelectInput(window_system.display,
window,
xlib::SubstructureNotifyMask | xlib::SubstructureRedirectMask);
return window;
}
}
impl MapRequestHandler {
pub fn new() -> MapRequestHandler {
return MapRequestHandler;
}
pub fn handle(&self, event: xlib::XEvent, window_system: &WindowSystem) {
let event = xlib::XMapRequestEvent::from(event);
let height: u32;
let width: u32;
let mut x: i32 = 0;
let y: i32 = 0;
if window_system.count.get() == 0 {
width = window_system.info.width as u32;
height = window_system.info.height as u32;
}
else {
width = (window_system.info.width / 2) as u32;
height = window_system.info.height as u32;
x = width as i32;
}
// create frame as a new parent for the window to be mapped
let frame = create_some_window(window_system, width, height, x, y);
unsafe {
// resize window to fit parent
xlib::XResizeWindow(window_system.display, event.window, width as u32, height as u32);
// make frame window parent of window to be mapped
xlib::XReparentWindow(window_system.display, event.window, frame, 0, 0);
// show frame
xlib::XMapWindow(window_system.display, frame);
// show window inside frame
xlib::XMapWindow(window_system.display, event.window);
}
window_system.count.set(window_system.count.get() + 1);
}
}
impl KeyPressedHandler {
pub fn new() -> KeyPressedHandler {
return KeyPressedHandler;
}
pub fn handle(&self, event: xlib::XEvent) {
let event = xlib::XKeyPressedEvent::from(event);
println!("KeyPressed {}", event.keycode);
}
}
|
MapRequestHandler
|
identifier_name
|
Music.ts
|
function MusicDemo()
|
{
const bottomViewport = new Viewport(0, 1)
const buttons = [{
Label: "Classical",
Action: () => Music.Set(Content.Demos.Music.Classical)
}, {
Label: "Synth",
Action: () => Music.Set(Content.Demos.Music.Synth)
}, {
Label: "Easy L.",
Action: () => Music.Set(Content.Demos.Music.EasyListening)
}, {
Label: "Stop",
Action: () => Music.Stop()
}]
for (const button of buttons) {
const buttonGroup = new Group(bottomViewport, () => {
staticSprite.Play(Content.Buttons.Narrow.Pressed)
button.Action()
})
buttonGroup.Move(IndexOf(buttons, button) * (WidthVirtualPixels - ButtonNarrowWidth) / (buttons.length - 1) + ButtonNarrowWidth / 2, HeightVirtualPixels - ButtonHeight / 2)
const staticSprite = new Sprite(buttonGroup)
staticSprite.Loop(Content.Buttons.Narrow.Unpressed)
FontBig.Write(buttonGroup, button.Label, HorizontalAlignment.Middle, VerticalAlignment.Middle)
}
return () => {
Music.Stop()
bottomViewport.Delete()
}
}
|
identifier_body
|
|
Music.ts
|
function
|
() {
const bottomViewport = new Viewport(0, 1)
const buttons = [{
Label: "Classical",
Action: () => Music.Set(Content.Demos.Music.Classical)
}, {
Label: "Synth",
Action: () => Music.Set(Content.Demos.Music.Synth)
}, {
Label: "Easy L.",
Action: () => Music.Set(Content.Demos.Music.EasyListening)
}, {
Label: "Stop",
Action: () => Music.Stop()
}]
for (const button of buttons) {
const buttonGroup = new Group(bottomViewport, () => {
staticSprite.Play(Content.Buttons.Narrow.Pressed)
button.Action()
})
buttonGroup.Move(IndexOf(buttons, button) * (WidthVirtualPixels - ButtonNarrowWidth) / (buttons.length - 1) + ButtonNarrowWidth / 2, HeightVirtualPixels - ButtonHeight / 2)
const staticSprite = new Sprite(buttonGroup)
staticSprite.Loop(Content.Buttons.Narrow.Unpressed)
FontBig.Write(buttonGroup, button.Label, HorizontalAlignment.Middle, VerticalAlignment.Middle)
}
return () => {
Music.Stop()
bottomViewport.Delete()
}
}
|
MusicDemo
|
identifier_name
|
Music.ts
|
function MusicDemo() {
const bottomViewport = new Viewport(0, 1)
const buttons = [{
Label: "Classical",
Action: () => Music.Set(Content.Demos.Music.Classical)
}, {
Label: "Synth",
Action: () => Music.Set(Content.Demos.Music.Synth)
}, {
Label: "Easy L.",
Action: () => Music.Set(Content.Demos.Music.EasyListening)
}, {
Label: "Stop",
Action: () => Music.Stop()
}]
for (const button of buttons) {
const buttonGroup = new Group(bottomViewport, () => {
staticSprite.Play(Content.Buttons.Narrow.Pressed)
|
staticSprite.Loop(Content.Buttons.Narrow.Unpressed)
FontBig.Write(buttonGroup, button.Label, HorizontalAlignment.Middle, VerticalAlignment.Middle)
}
return () => {
Music.Stop()
bottomViewport.Delete()
}
}
|
button.Action()
})
buttonGroup.Move(IndexOf(buttons, button) * (WidthVirtualPixels - ButtonNarrowWidth) / (buttons.length - 1) + ButtonNarrowWidth / 2, HeightVirtualPixels - ButtonHeight / 2)
const staticSprite = new Sprite(buttonGroup)
|
random_line_split
|
main.rs
|
#![macro_use]
extern crate clap;
use std::fs;
fn main()
|
{
let matches = app_from_crate!()
.subcommand(SubCommand::with_name("create-combine-lists")
.arg(Arg::with_name("DIR")
.required(true)
.index(1)
)
).get_matches();
if let Some(matches) = matches.subcommand_matches("create-combine-lists") {
let mut bases = HashMap::new();
let d = matches.value_of("DIR").unwrap();
let paths = fs::read_dir(d);
for p in paths {
// check for a pattern match
if !p.starts_with("G") {
continue;
}
if !p.ends_with(".MP4") {
continue;
}
// 0123456789AB
// GX030293.MP4
if p.len() != 0xc {
continue;
}
let s = match p.to_str() {
Some(s) => s,
None => continue,
}
// P (x264) or X (x265)
// note: we could probably mix these.
let k = s[1];
// GXaa1234 - aa - the index of clip
let i = s[2..4];
// GX01bbbb - bbbb - the number of the video (composed of clips)
let n = s[4..8];
bases.entry((k, n)).
}
}
println!("Hello, world!");
}
|
identifier_body
|
|
main.rs
|
#![macro_use]
extern crate clap;
use std::fs;
fn main() {
let matches = app_from_crate!()
.subcommand(SubCommand::with_name("create-combine-lists")
.arg(Arg::with_name("DIR")
.required(true)
.index(1)
)
).get_matches();
if let Some(matches) = matches.subcommand_matches("create-combine-lists") {
let mut bases = HashMap::new();
let d = matches.value_of("DIR").unwrap();
let paths = fs::read_dir(d);
for p in paths {
// check for a pattern match
if !p.starts_with("G") {
continue;
}
if !p.ends_with(".MP4") {
continue;
}
// 0123456789AB
// GX030293.MP4
if p.len() != 0xc {
continue;
}
let s = match p.to_str() {
Some(s) => s,
None => continue,
}
// P (x264) or X (x265)
|
let n = s[4..8];
bases.entry((k, n)).
}
}
println!("Hello, world!");
}
|
// note: we could probably mix these.
let k = s[1];
// GXaa1234 - aa - the index of clip
let i = s[2..4];
// GX01bbbb - bbbb - the number of the video (composed of clips)
|
random_line_split
|
main.rs
|
#![macro_use]
extern crate clap;
use std::fs;
fn
|
() {
let matches = app_from_crate!()
.subcommand(SubCommand::with_name("create-combine-lists")
.arg(Arg::with_name("DIR")
.required(true)
.index(1)
)
).get_matches();
if let Some(matches) = matches.subcommand_matches("create-combine-lists") {
let mut bases = HashMap::new();
let d = matches.value_of("DIR").unwrap();
let paths = fs::read_dir(d);
for p in paths {
// check for a pattern match
if !p.starts_with("G") {
continue;
}
if !p.ends_with(".MP4") {
continue;
}
// 0123456789AB
// GX030293.MP4
if p.len() != 0xc {
continue;
}
let s = match p.to_str() {
Some(s) => s,
None => continue,
}
// P (x264) or X (x265)
// note: we could probably mix these.
let k = s[1];
// GXaa1234 - aa - the index of clip
let i = s[2..4];
// GX01bbbb - bbbb - the number of the video (composed of clips)
let n = s[4..8];
bases.entry((k, n)).
}
}
println!("Hello, world!");
}
|
main
|
identifier_name
|
main.rs
|
#![macro_use]
extern crate clap;
use std::fs;
fn main() {
let matches = app_from_crate!()
.subcommand(SubCommand::with_name("create-combine-lists")
.arg(Arg::with_name("DIR")
.required(true)
.index(1)
)
).get_matches();
if let Some(matches) = matches.subcommand_matches("create-combine-lists") {
let mut bases = HashMap::new();
let d = matches.value_of("DIR").unwrap();
let paths = fs::read_dir(d);
for p in paths {
// check for a pattern match
if !p.starts_with("G") {
continue;
}
if !p.ends_with(".MP4") {
continue;
}
// 0123456789AB
// GX030293.MP4
if p.len() != 0xc
|
let s = match p.to_str() {
Some(s) => s,
None => continue,
}
// P (x264) or X (x265)
// note: we could probably mix these.
let k = s[1];
// GXaa1234 - aa - the index of clip
let i = s[2..4];
// GX01bbbb - bbbb - the number of the video (composed of clips)
let n = s[4..8];
bases.entry((k, n)).
}
}
println!("Hello, world!");
}
|
{
continue;
}
|
conditional_block
|
auth.repository.js
|
const jwt = require("jsonwebtoken");
module.exports = (UserModel, Config, CryptoHelper) => {
function _transformUser (user) {
return {
"id": user._id,
"name": user.name,
"isAdmin": user.isAdmin,
"created": user.createdAt,
"updated": user.updatedAt
}
}
async function signIn (username, userpassword) {
// search for username
const foundUser = await UserModel.findOne({ name: username }).lean();
// validate founduser
if (!foundUser) throw new Error("Authentication failed. User not found.")
|
const token = await jwt.sign(foundUser, Config.tokenSecret, {
expiresIn: Config.tokenExpires,
issuer: Config.tokenIssuer
});
// delete password before return the user
delete foundUser.passwordHash;
// return token and user
return {
token: token,
user: this._transformUser(foundUser)
}
}
async function signUp (username, userpassword, isAdmin) {
// try to find the user
const foundUser = await UserModel.findOne({name: username}).lean();
// check if user is available
if (foundUser) throw new Error("Username is already registered. Please use another Username.");
// create new User object
const newUser = new UserModel({
name: username,
passwordHash: CryptoHelper.encrypt(userpassword),
isAdmin: isAdmin
});
// save new User object
const createdUser = await newUser.save();
// delete password before return the user
delete createdUser.passwordHash;
// return user
return {
user: this._transformUser(createdUser)
}
}
return {
_transformUser,
signIn,
signUp
}
}
|
if (userpassword !== CryptoHelper.decrypt(foundUser.passwordHash)) throw new Error("Authentication failed. Wrong password.");
// create a token
|
random_line_split
|
auth.repository.js
|
const jwt = require("jsonwebtoken");
module.exports = (UserModel, Config, CryptoHelper) => {
function _transformUser (user) {
return {
"id": user._id,
"name": user.name,
"isAdmin": user.isAdmin,
"created": user.createdAt,
"updated": user.updatedAt
}
}
async function signIn (username, userpassword) {
// search for username
const foundUser = await UserModel.findOne({ name: username }).lean();
// validate founduser
if (!foundUser) throw new Error("Authentication failed. User not found.")
if (userpassword !== CryptoHelper.decrypt(foundUser.passwordHash)) throw new Error("Authentication failed. Wrong password.");
// create a token
const token = await jwt.sign(foundUser, Config.tokenSecret, {
expiresIn: Config.tokenExpires,
issuer: Config.tokenIssuer
});
// delete password before return the user
delete foundUser.passwordHash;
// return token and user
return {
token: token,
user: this._transformUser(foundUser)
}
}
async function signUp (username, userpassword, isAdmin)
|
return {
_transformUser,
signIn,
signUp
}
}
|
{
// try to find the user
const foundUser = await UserModel.findOne({name: username}).lean();
// check if user is available
if (foundUser) throw new Error("Username is already registered. Please use another Username.");
// create new User object
const newUser = new UserModel({
name: username,
passwordHash: CryptoHelper.encrypt(userpassword),
isAdmin: isAdmin
});
// save new User object
const createdUser = await newUser.save();
// delete password before return the user
delete createdUser.passwordHash;
// return user
return {
user: this._transformUser(createdUser)
}
}
|
identifier_body
|
auth.repository.js
|
const jwt = require("jsonwebtoken");
module.exports = (UserModel, Config, CryptoHelper) => {
function _transformUser (user) {
return {
"id": user._id,
"name": user.name,
"isAdmin": user.isAdmin,
"created": user.createdAt,
"updated": user.updatedAt
}
}
async function
|
(username, userpassword) {
// search for username
const foundUser = await UserModel.findOne({ name: username }).lean();
// validate founduser
if (!foundUser) throw new Error("Authentication failed. User not found.")
if (userpassword !== CryptoHelper.decrypt(foundUser.passwordHash)) throw new Error("Authentication failed. Wrong password.");
// create a token
const token = await jwt.sign(foundUser, Config.tokenSecret, {
expiresIn: Config.tokenExpires,
issuer: Config.tokenIssuer
});
// delete password before return the user
delete foundUser.passwordHash;
// return token and user
return {
token: token,
user: this._transformUser(foundUser)
}
}
async function signUp (username, userpassword, isAdmin) {
// try to find the user
const foundUser = await UserModel.findOne({name: username}).lean();
// check if user is available
if (foundUser) throw new Error("Username is already registered. Please use another Username.");
// create new User object
const newUser = new UserModel({
name: username,
passwordHash: CryptoHelper.encrypt(userpassword),
isAdmin: isAdmin
});
// save new User object
const createdUser = await newUser.save();
// delete password before return the user
delete createdUser.passwordHash;
// return user
return {
user: this._transformUser(createdUser)
}
}
return {
_transformUser,
signIn,
signUp
}
}
|
signIn
|
identifier_name
|
mod.rs
|
use crate::{
commands::CommandHelpers,
entity::{EntityRef, Realm},
player_output::PlayerOutput,
};
mod enter_room_command;
pub use enter_room_command::enter_room;
pub fn
|
<F>(
f: F,
) -> Box<dyn Fn(&mut Realm, EntityRef, CommandHelpers) -> Vec<PlayerOutput>>
where
F: Fn(&mut Realm, EntityRef, CommandHelpers) -> Result<Vec<PlayerOutput>, String> + 'static,
{
Box::new(
move |realm, player_ref, helpers| match realm.player(player_ref) {
Some(player) if player.is_admin() => match f(realm, player_ref, helpers) {
Ok(output) => output,
Err(mut message) => {
message.push('\n');
let mut output: Vec<PlayerOutput> = Vec::new();
push_output_string!(output, player_ref, message);
output
}
},
_ => {
let mut output: Vec<PlayerOutput> = Vec::new();
push_output_str!(output, player_ref, "You are not an admin.");
output
}
},
)
}
|
wrap_admin_command
|
identifier_name
|
mod.rs
|
use crate::{
commands::CommandHelpers,
entity::{EntityRef, Realm},
player_output::PlayerOutput,
};
mod enter_room_command;
pub use enter_room_command::enter_room;
pub fn wrap_admin_command<F>(
f: F,
) -> Box<dyn Fn(&mut Realm, EntityRef, CommandHelpers) -> Vec<PlayerOutput>>
where
F: Fn(&mut Realm, EntityRef, CommandHelpers) -> Result<Vec<PlayerOutput>, String> + 'static,
|
{
Box::new(
move |realm, player_ref, helpers| match realm.player(player_ref) {
Some(player) if player.is_admin() => match f(realm, player_ref, helpers) {
Ok(output) => output,
Err(mut message) => {
message.push('\n');
let mut output: Vec<PlayerOutput> = Vec::new();
push_output_string!(output, player_ref, message);
output
}
},
_ => {
let mut output: Vec<PlayerOutput> = Vec::new();
push_output_str!(output, player_ref, "You are not an admin.");
output
}
},
)
}
|
identifier_body
|
|
mod.rs
|
use crate::{
commands::CommandHelpers,
entity::{EntityRef, Realm},
player_output::PlayerOutput,
};
mod enter_room_command;
pub use enter_room_command::enter_room;
pub fn wrap_admin_command<F>(
f: F,
) -> Box<dyn Fn(&mut Realm, EntityRef, CommandHelpers) -> Vec<PlayerOutput>>
where
F: Fn(&mut Realm, EntityRef, CommandHelpers) -> Result<Vec<PlayerOutput>, String> + 'static,
{
Box::new(
move |realm, player_ref, helpers| match realm.player(player_ref) {
Some(player) if player.is_admin() => match f(realm, player_ref, helpers) {
Ok(output) => output,
Err(mut message) => {
message.push('\n');
|
push_output_string!(output, player_ref, message);
output
}
},
_ => {
let mut output: Vec<PlayerOutput> = Vec::new();
push_output_str!(output, player_ref, "You are not an admin.");
output
}
},
)
}
|
let mut output: Vec<PlayerOutput> = Vec::new();
|
random_line_split
|
jquery.contextmenu.js
|
/// <reference path="../intellisense/jquery-1.2.6-vsdoc-cn.js" />
/* --------------------------------------------------
参数说明
option: {width:Number, items:Array, onShow:Function, rule:JSON}
成员语法(三种形式) -- para.items
-> {text:String, icon:String, type:String, alias:String, width:Number, items:Array} -- 菜单组
-> {text:String, icon:String, type:String, alias:String, action:Function } -- 菜单项
-> {type:String} -- 菜单分隔线
--------------------------------------------------*/
(function($) {
function returnfalse() { return false; };
$.fn.contextmenu = funct
|
option = $.extend({ alias: "cmroot", width: 150 }, option);
var ruleName = null, target = null,
groups = {}, mitems = {}, actions = {}, showGroups = [],
itemTpl = "<div class='b-m-$[type]' unselectable=on><nobr unselectable=on><img src='$[icon]' align='absmiddle'/><span unselectable=on>$[text]</span></nobr></div>";
var gTemplet = $("<div/>").addClass("b-m-mpanel").attr("unselectable", "on").css("display", "none");
var iTemplet = $("<div/>").addClass("b-m-item").attr("unselectable", "on");
var sTemplet = $("<div/>").addClass("b-m-split");
//创建菜单组
var buildGroup = function(obj) {
groups[obj.alias] = this;
this.gidx = obj.alias;
this.id = obj.alias;
if (obj.disable) {
this.disable = obj.disable;
this.className = "b-m-idisable";
}
$(this).width(obj.width).click(returnfalse).mousedown(returnfalse).appendTo($("body"));
obj = null;
return this;
};
var buildItem = function(obj) {
var T = this;
T.title = obj.text;
T.idx = obj.alias;
T.gidx = obj.gidx;
T.data = obj;
T.innerHTML = itemTpl.replace(/\$\[([^\]]+)\]/g, function() {
return obj[arguments[1]];
});
if (obj.disable) {
T.disable = obj.disable;
T.className = "b-m-idisable";
}
obj.items && (T.group = true);
obj.action && (actions[obj.alias] = obj.action);
mitems[obj.alias] = T;
T = obj = null;
return this;
};
//添加菜单项
var addItems = function(gidx, items) {
var tmp = null;
for (var i = 0; i < items.length; i++) {
if (items[i].type == "splitLine") {
//菜单分隔线
tmp = sTemplet.clone()[0];
} else {
items[i].gidx = gidx;
if (items[i].type == "group") {
//菜单组
buildGroup.apply(gTemplet.clone()[0], [items[i]]);
arguments.callee(items[i].alias, items[i].items);
items[i].type = "arrow";
tmp = buildItem.apply(iTemplet.clone()[0], [items[i]]);
} else {
//菜单项
items[i].type = "ibody";
tmp = buildItem.apply(iTemplet.clone()[0], [items[i]]);
$(tmp).click(function(e) {
if (!this.disable) {
if ($.isFunction(actions[this.idx])) {
actions[this.idx].call(this, target);
}
hideMenuPane();
}
return false;
});
} //Endif
$(tmp).bind("contextmenu", returnfalse).hover(overItem, outItem);
} //Endif
groups[gidx].appendChild(tmp);
tmp = items[i] = items[i].items = null;
} //Endfor
gidx = items = null;
};
var overItem = function(e) {
//如果菜单项不可用
if (this.disable)
return false;
hideMenuPane.call(groups[this.gidx]);
//如果是菜单组
if (this.group) {
var pos = $(this).offset();
var width = $(this).outerWidth();
showMenuGroup.apply(groups[this.idx], [pos, width]);
}
this.className = "b-m-ifocus";
return false;
};
//菜单项失去焦点
var outItem = function(e) {
//如果菜单项不可用
if (this.disable )
return false;
if (!this.group) {
//菜单项
this.className = "b-m-item";
} //Endif
return false;
};
//在指定位置显示指定的菜单组
var showMenuGroup = function(pos, width) {
var bwidth = $("body").width();
var bheight = document.documentElement.clientHeight;
var mwidth = $(this).outerWidth();
var mheight = $(this).outerHeight();
pos.left = (pos.left + width + mwidth > bwidth) ? (pos.left - mwidth < 0 ? 0 : pos.left - mwidth) : pos.left + width;
pos.top = (pos.top + mheight > bheight) ? (pos.top - mheight + (width > 0 ? 25 : 0) < 0 ? 0 : pos.top - mheight + (width > 0 ? 25 : 0)) : pos.top;
$(this).css(pos).show();
showGroups.push(this.gidx);
};
//隐藏菜单组
var hideMenuPane = function() {
var alias = null;
for (var i = showGroups.length - 1; i >= 0; i--) {
if (showGroups[i] == this.gidx)
break;
alias = showGroups.pop();
groups[alias].style.display = "none";
mitems[alias] && (mitems[alias].className = "b-m-item");
} //Endfor
//CollectGarbage();
};
function applyRule(rule) {
if (ruleName && ruleName == rule.name)
return false;
for (var i in mitems)
disable(i, !rule.disable);
for (var i = 0; i < rule.items.length; i++)
disable(rule.items[i], rule.disable);
ruleName = rule.name;
};
function disable(alias, disabled) {
var item = mitems[alias];
item.className = (item.disable = item.lastChild.disabled = disabled) ? "b-m-idisable" : "b-m-item";
};
/** 右键菜单显示 */
function showMenu(e, menutarget) {
target = menutarget;
showMenuGroup.call(groups[this.id], { left: e.pageX, top: e.pageY }, 0);
$(document).one('mousedown', hideMenuPane);
}
var $root = $("#" + option.alias);
var root = null;
if ($root.length == 0) {
root = buildGroup.apply(gTemplet.clone()[0], [option]);
root.applyrule = applyRule;
root.showMenu = showMenu;
addItems(option.alias, option.items);
}
else {
root = $root[0];
}
var me = $(this).each(function() {
return $(this).bind('contextmenu', function(e) {
var bShowContext = (option.onContextMenu && $.isFunction(option.onContextMenu)) ? option.onContextMenu.call(this, e) : true;
if (bShowContext) {
if (option.onShow && $.isFunction(option.onShow)) {
option.onShow.call(this, root);
}
root.showMenu(e, this);
}
return false;
});
});
//设置显示规则
if (option.rule) {
applyRule(option.rule);
}
gTemplet = iTemplet = sTemplet = itemTpl = buildGroup = buildItem = null;
addItems = overItem = outItem = null;
//CollectGarbage();
return me;
}
})(jQuery);
|
ion(option) {
|
identifier_body
|
jquery.contextmenu.js
|
/// <reference path="../intellisense/jquery-1.2.6-vsdoc-cn.js" />
/* --------------------------------------------------
参数说明
option: {width:Number, items:Array, onShow:Function, rule:JSON}
成员语法(三种形式) -- para.items
-> {text:String, icon:String, type:String, alias:String, width:Number, items:Array} -- 菜单组
-> {text:String, icon:String, type:String, alias:String, action:Function } -- 菜单项
-> {type:String} -- 菜单分隔线
--------------------------------------------------*/
(function($) {
function returnfalse() { return false; };
$.fn.contextmenu = function(option) {
option = $.extend({ alias: "cmroot", width: 150 }, option);
var ruleName = null, target = null,
groups = {}, mitems = {}, actions = {}, showGroups = [],
itemTpl = "<div class='b-m-$[type]' unselectable=on><nobr unselectable=on><img src='$[icon]' align='absmiddle'/><span unselectable=on>$[text]</span></nobr></div>";
var gTemplet = $("<div/>").addClass("b-m-mpanel").attr("unselectable", "on").css("display", "none");
var iTemplet = $("<div/>").addClass("b-m-item").attr("unselectable", "on");
var sTemplet = $("<div/>").addClass("b-m-split");
//创建菜单组
var buildGroup = function(obj) {
groups[obj.alias] = this;
this.gidx = obj.alias;
this.id = obj.alias;
if (obj.disable) {
this.disable = obj.disable;
this.className = "b-m-idisable";
}
$(this).width(obj.width).click(returnfalse).mousedown(returnfalse).appendTo($("body"));
obj = null;
return this;
};
var buildItem = function(obj) {
var T = this;
T.title = obj.text;
T.idx = obj.alias;
T.gidx = obj.gidx;
T.data = obj;
T.innerHTML = itemTpl.replace(/\$\[([^\]]+)\]/g, function() {
return obj[arguments[1]];
});
if (obj.disable) {
T.disable = obj.disable;
T.className = "b-m-idisable";
}
obj.items && (T.group = true);
obj.action && (actions[obj.alias] = obj.action);
mitems[obj.alias] = T;
T = obj = null;
return this;
};
//添加菜单项
var addItems = function(gidx, items) {
var tmp = null;
for (var i = 0; i < items.length; i++) {
if (items[i].type == "splitLine") {
//菜单分隔线
tmp = sTemplet.clone()[0];
} else {
items[i].gidx = gidx;
if (items[i].type == "group") {
//菜单组
buildGroup.apply(gTemplet.clone()[0], [items[i]]);
arguments.callee(items[i].alias, items[i].items);
items[i].type = "arrow";
tmp = buildItem.apply(iTemplet.clone()[0], [items[i]]);
} else {
//菜单项
items[i].type = "ibody";
tmp = buildItem.apply(iTemplet.clone()[0], [items[i]]);
$(tmp).click(function(e) {
if (!this.disable) {
if ($.isFunction(actions[this.idx])) {
actions[this.idx].call(this, target);
}
hideMenuPane();
}
return false;
});
} //Endif
$(tmp).bind("contextmenu", returnfalse).hover(overItem, outItem);
} //Endif
groups[gidx].appendChild(tmp);
tmp = items[i] = items[i].items = null;
} //Endfor
gidx = items = null;
};
var overItem = function(e) {
//如果菜单项不可用
if (this.disable)
return false;
hideMenuPane.call(groups[this.gidx]);
//如果是菜单组
if (this.group) {
var pos = $(this).offset();
var width = $(this).outerWidth();
showMenuGroup.apply(groups[this.idx], [pos, width]);
}
this.className = "b-m-ifocus";
return false;
};
//菜单项失去焦点
var outItem = function(e) {
//如果菜单项不可用
if (this.disable )
return false;
if (!this.group) {
//菜单项
this.className = "b-m-item";
} //Endif
return false;
};
//在指定位置显示指定的菜单组
var showMenuGroup = function(pos, width) {
var bwidth = $("body").width();
var bheight = document.documentElement.clientHeight;
var mwidth = $(this).outerWidth();
var mheight = $(this).outerHeight();
pos.left = (pos.left + width + mwidth > bwidth) ? (pos.left - mwidth < 0 ? 0 : pos.left - mwidth) : pos.left + width;
pos.top = (pos.top + mheight > bheight) ? (pos.top - mheight + (width > 0 ? 25 : 0) < 0 ? 0 : pos.top - mheight + (width > 0 ? 25 : 0)) : pos.top;
$(this).css(pos).show();
showGroups.push(this.gidx);
};
//隐藏菜单组
var hideMenuPane = function() {
var alias = null;
for (var i = showGroups.length - 1; i >= 0; i--) {
if (showGroups[i] == this.gidx)
break;
alias = showGroups.pop();
groups[alias].style.display = "none";
mitems[alias] && (mitems[alias].className = "b-m-item");
} //Endfor
//CollectGarbage();
};
function applyRule(rule) {
if (ruleName && ruleName == rule.name)
return false;
for (var i in mitems)
disable(i, !rule.disable);
for (var i = 0; i < rule.items.length; i++)
disable(rule.items[i], rule.disable);
ruleName = rule.name;
};
function disable(alias, disabled) {
var item = mitems[alias];
item.className = (item.disable = item.lastChild.disabled = disabled) ? "b-m-idisable" : "b-m-item";
};
/** 右键菜单显示 */
function showMenu(e, menutarget) {
target = menutarget;
showMenuGroup.call(groups[this.id], { left: e.pageX, top: e.pageY }, 0);
$(document).one('mousedown', hideMenuPane);
|
}
var $root = $("#" + option.alias);
var root = null;
if ($root.length == 0) {
root = buildGroup.apply(gTemplet.clone()[0], [option]);
root.applyrule = applyRule;
root.showMenu = showMenu;
addItems(option.alias, option.items);
}
else {
root = $root[0];
}
var me = $(this).each(function() {
return $(this).bind('contextmenu', function(e) {
var bShowContext = (option.onContextMenu && $.isFunction(option.onContextMenu)) ? option.onContextMenu.call(this, e) : true;
if (bShowContext) {
if (option.onShow && $.isFunction(option.onShow)) {
option.onShow.call(this, root);
}
root.showMenu(e, this);
}
return false;
});
});
//设置显示规则
if (option.rule) {
applyRule(option.rule);
}
gTemplet = iTemplet = sTemplet = itemTpl = buildGroup = buildItem = null;
addItems = overItem = outItem = null;
//CollectGarbage();
return me;
}
})(jQuery);
|
identifier_name
|
|
jquery.contextmenu.js
|
/// <reference path="../intellisense/jquery-1.2.6-vsdoc-cn.js" />
/* --------------------------------------------------
参数说明
option: {width:Number, items:Array, onShow:Function, rule:JSON}
成员语法(三种形式) -- para.items
-> {text:String, icon:String, type:String, alias:String, width:Number, items:Array} -- 菜单组
-> {text:String, icon:String, type:String, alias:String, action:Function } -- 菜单项
-> {type:String} -- 菜单分隔线
--------------------------------------------------*/
(function($) {
function returnfalse() { return false; };
$.fn.contextmenu = function(option) {
option = $.extend({ alias: "cmroot", width: 150 }, option);
var ruleName = null, target = null,
groups = {}, mitems = {}, actions = {}, showGroups = [],
itemTpl = "<div class='b-m-$[type]' unselectable=on><nobr unselectable=on><img src='$[icon]' align='absmiddle'/><span unselectable=on>$[text]</span></nobr></div>";
var gTemplet = $("<div/>").addClass("b-m-mpanel").attr("unselectable", "on").css("display", "none");
var iTemplet = $("<div/>").addClass("b-m-item").attr("unselectable", "on");
var sTemplet = $("<div/>").addClass("b-m-split");
//创建菜单组
var buildGroup = function(obj) {
groups[obj.alias] = this;
this.gidx = obj.alias;
this.id = obj.alias;
if (obj.disable) {
this.disable = obj.disable;
this.className = "b-m-idisable";
}
$(this).width(obj.width).click(returnfalse).mousedown(returnfalse).appendTo($("body"));
obj = null;
return this;
};
var buildItem = function(obj) {
var T = this;
T.title = obj.text;
T.idx = obj.alias;
T.gidx = obj.gidx;
T.data = obj;
T.innerHTML = itemTpl.replace(/\$\[([^\]]+)\]/g, function() {
return obj[arguments[1]];
});
if (obj.disable) {
T.disable = obj.disable;
T.className = "b-m-idisable";
}
obj.items && (T.group = true);
obj.action && (actions[obj.alias] = obj.action);
mitems[obj.alias] = T;
T = obj = null;
return this;
};
//添加菜单项
var addItems = function(gidx, items) {
var tmp = null;
for (var i = 0; i < items.length; i++) {
if (items[i].type == "splitLine") {
//菜单分隔线
tmp = sTemplet.clone()[0];
} else {
items[i].gidx = gidx;
if (items[i].type == "group") {
//菜单组
buildGroup.apply(gTemplet.clone()[0], [items[i]]);
arguments.callee(items[i].alias, items[i].items);
items[i].type = "arrow";
tmp = buildItem.apply(iTemplet.clone()[0], [items[i]]);
} else {
//菜单项
items[i].type = "ibody";
tmp = buildItem.apply(iTemplet.clone()[0], [items[i]]);
$(tmp).click(function(e) {
if (!this.disable) {
if ($.isFunction(actions[this.idx])) {
actions[this.idx].call(this, target);
}
hideMenuPane();
}
return false;
});
} //Endif
$(tmp).bind("contextmenu", returnfalse).hover(overItem, outItem);
} //Endif
groups[gidx].appendChild(tmp);
tmp = items[i] = items[i].items = null;
} //Endfor
gidx = items = null;
};
var overItem = function(e) {
//如果菜单项不可用
if (this.disable)
return false;
hideMenuPane.call(groups[this.gidx]);
//如果是菜单组
if (this.group) {
var pos = $(this).offset();
var width = $(this).outerWidth();
showMenuGroup.apply(groups[this.idx], [pos, width]);
}
this.className = "b-m-ifocus";
return false;
};
//菜单项失去焦点
var outItem = function(e) {
//如果菜单项不可用
if (this.disable )
return false;
if (!this.group) {
//菜单项
this.className = "b-m-item";
} //Endif
return false;
};
//在指定位置显示指定的菜单组
var showMenuGroup = function(pos, width) {
var bwidth = $("body").width();
var bheight = document.documentElement.clientHeight;
var mwidth = $(this).outerWidth();
var mheight = $(this).outerHeight();
pos.left = (pos.left + width + mwidth > bwidth) ? (pos.left - mwidth < 0 ? 0 : pos.left - mwidth) : pos.left + width;
pos.top = (pos.top + mheight > bheight) ? (pos.top - mheight + (width > 0 ? 25 : 0) < 0 ? 0 : pos.top - mheight + (width > 0 ? 25 : 0)) : pos.top;
$(this).css(pos).show();
showGroups.push(this.gidx);
};
//隐藏菜单组
var hideMenuPane = function() {
var alias = null;
for (var i = showGroups.length - 1; i >= 0; i--) {
if (showGroups[i] == this.gidx)
break;
alias = showGroups.pop();
groups[alias].style.display = "none";
mitems[alias] && (mitems[alias].className = "b-m-item");
} //Endfor
//CollectGarbage();
};
function applyRule(rule) {
if (ruleName && ruleName == rule.name)
return false;
for (var i in mitems)
disable(i, !rule.disable);
for (var i = 0; i < rule.items.length; i++)
disable(rule.items[i], rule.disable);
ruleName = rule.name;
};
function disable(alias, disabled) {
var item = mitems[alias];
item.className = (item.disable = item.lastChild.disabled = disabled) ? "b-m-idisable" : "b-m-item";
};
/** 右键菜单显示 */
function showMenu(e, menutarget) {
target = menutarget;
showMenuGroup.call(groups[this.id], { left: e.pageX, top: e.pageY }, 0);
$(document).one('mousedown', hideMenuPane);
}
|
if ($root.length == 0) {
root = buildGroup.apply(gTemplet.clone()[0], [option]);
root.applyrule = applyRule;
root.showMenu = showMenu;
addItems(option.alias, option.items);
}
else {
root = $root[0];
}
var me = $(this).each(function() {
return $(this).bind('contextmenu', function(e) {
var bShowContext = (option.onContextMenu && $.isFunction(option.onContextMenu)) ? option.onContextMenu.call(this, e) : true;
if (bShowContext) {
if (option.onShow && $.isFunction(option.onShow)) {
option.onShow.call(this, root);
}
root.showMenu(e, this);
}
return false;
});
});
//设置显示规则
if (option.rule) {
applyRule(option.rule);
}
gTemplet = iTemplet = sTemplet = itemTpl = buildGroup = buildItem = null;
addItems = overItem = outItem = null;
//CollectGarbage();
return me;
}
})(jQuery);
|
var $root = $("#" + option.alias);
var root = null;
|
random_line_split
|
jquery.contextmenu.js
|
/// <reference path="../intellisense/jquery-1.2.6-vsdoc-cn.js" />
/* --------------------------------------------------
参数说明
option: {width:Number, items:Array, onShow:Function, rule:JSON}
成员语法(三种形式) -- para.items
-> {text:String, icon:String, type:String, alias:String, width:Number, items:Array} -- 菜单组
-> {text:String, icon:String, type:String, alias:String, action:Function } -- 菜单项
-> {type:String} -- 菜单分隔线
--------------------------------------------------*/
(function($) {
function returnfalse() { return false; };
$.fn.contextmenu = function(option) {
option = $.extend({ alias: "cmroot", width: 150 }, option);
var ruleName = null, target = null,
groups = {}, mitems = {}, actions = {}, showGroups = [],
itemTpl = "<div class='b-m-$[type]' unselectable=on><nobr unselectable=on><img src='$[icon]' align='absmiddle'/><span unselectable=on>$[text]</span></nobr></div>";
var gTemplet = $("<div/>").addClass("b-m-mpanel").attr("unselectable", "on").css("display", "none");
var iTemplet = $("<div/>").addClass("b-m-item").attr("unselectable", "on");
var sTemplet = $("<div/>").addClass("b-m-split");
//创建菜单组
var buildGroup = function(obj) {
groups[obj.alias] = this;
this.gidx = obj.alias;
this.id = obj.alias;
if (obj.disable) {
this.disable = obj.disable;
this.className = "b-m-idisable";
}
$(this).width(obj.width).click(returnfalse).mousedown(returnfalse).appendTo($("body"));
obj = null;
return this;
};
var buildItem = function(obj) {
var T = this;
T.title = obj.text;
T.idx = obj.alias;
T.gidx = obj.gidx;
T.data = obj;
T.innerHTML = itemTpl.replace(/\$\[([^\]]+)\]/g, function() {
return obj[arguments[1]];
});
if (obj.disable) {
T.disable = obj.disable;
T.className = "b-m-idisable";
}
obj.items && (T.group = true);
obj.action && (actions[obj.alias] = obj.action);
mitems[obj.alias] = T;
T = obj = null;
return this;
};
//添加菜单项
var addItems = function(gidx, items) {
var tmp = null;
for (var i = 0; i < items.length; i++) {
if (items[i].type == "splitLine") {
//菜单分隔线
tmp = sTemplet.clone()[0];
} else {
items[i].gidx = gidx;
if (items[i]
|
.items = null;
} //Endfor
gidx = items = null;
};
var overItem = function(e) {
//如果菜单项不可用
if (this.disable)
return false;
hideMenuPane.call(groups[this.gidx]);
//如果是菜单组
if (this.group) {
var pos = $(this).offset();
var width = $(this).outerWidth();
showMenuGroup.apply(groups[this.idx], [pos, width]);
}
this.className = "b-m-ifocus";
return false;
};
//菜单项失去焦点
var outItem = function(e) {
//如果菜单项不可用
if (this.disable )
return false;
if (!this.group) {
//菜单项
this.className = "b-m-item";
} //Endif
return false;
};
//在指定位置显示指定的菜单组
var showMenuGroup = function(pos, width) {
var bwidth = $("body").width();
var bheight = document.documentElement.clientHeight;
var mwidth = $(this).outerWidth();
var mheight = $(this).outerHeight();
pos.left = (pos.left + width + mwidth > bwidth) ? (pos.left - mwidth < 0 ? 0 : pos.left - mwidth) : pos.left + width;
pos.top = (pos.top + mheight > bheight) ? (pos.top - mheight + (width > 0 ? 25 : 0) < 0 ? 0 : pos.top - mheight + (width > 0 ? 25 : 0)) : pos.top;
$(this).css(pos).show();
showGroups.push(this.gidx);
};
//隐藏菜单组
var hideMenuPane = function() {
var alias = null;
for (var i = showGroups.length - 1; i >= 0; i--) {
if (showGroups[i] == this.gidx)
break;
alias = showGroups.pop();
groups[alias].style.display = "none";
mitems[alias] && (mitems[alias].className = "b-m-item");
} //Endfor
//CollectGarbage();
};
function applyRule(rule) {
if (ruleName && ruleName == rule.name)
return false;
for (var i in mitems)
disable(i, !rule.disable);
for (var i = 0; i < rule.items.length; i++)
disable(rule.items[i], rule.disable);
ruleName = rule.name;
};
function disable(alias, disabled) {
var item = mitems[alias];
item.className = (item.disable = item.lastChild.disabled = disabled) ? "b-m-idisable" : "b-m-item";
};
/** 右键菜单显示 */
function showMenu(e, menutarget) {
target = menutarget;
showMenuGroup.call(groups[this.id], { left: e.pageX, top: e.pageY }, 0);
$(document).one('mousedown', hideMenuPane);
}
var $root = $("#" + option.alias);
var root = null;
if ($root.length == 0) {
root = buildGroup.apply(gTemplet.clone()[0], [option]);
root.applyrule = applyRule;
root.showMenu = showMenu;
addItems(option.alias, option.items);
}
else {
root = $root[0];
}
var me = $(this).each(function() {
return $(this).bind('contextmenu', function(e) {
var bShowContext = (option.onContextMenu && $.isFunction(option.onContextMenu)) ? option.onContextMenu.call(this, e) : true;
if (bShowContext) {
if (option.onShow && $.isFunction(option.onShow)) {
option.onShow.call(this, root);
}
root.showMenu(e, this);
}
return false;
});
});
//设置显示规则
if (option.rule) {
applyRule(option.rule);
}
gTemplet = iTemplet = sTemplet = itemTpl = buildGroup = buildItem = null;
addItems = overItem = outItem = null;
//CollectGarbage();
return me;
}
})(jQuery);
|
.type == "group") {
//菜单组
buildGroup.apply(gTemplet.clone()[0], [items[i]]);
arguments.callee(items[i].alias, items[i].items);
items[i].type = "arrow";
tmp = buildItem.apply(iTemplet.clone()[0], [items[i]]);
} else {
//菜单项
items[i].type = "ibody";
tmp = buildItem.apply(iTemplet.clone()[0], [items[i]]);
$(tmp).click(function(e) {
if (!this.disable) {
if ($.isFunction(actions[this.idx])) {
actions[this.idx].call(this, target);
}
hideMenuPane();
}
return false;
});
} //Endif
$(tmp).bind("contextmenu", returnfalse).hover(overItem, outItem);
} //Endif
groups[gidx].appendChild(tmp);
tmp = items[i] = items[i]
|
conditional_block
|
data_models.py
|
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\
common import data_models
class Interface(data_models.BaseDataModel):
def __init__(self, id=None, compute_id=None, network_id=None,
fixed_ips=None, port_id=None):
self.id = id
self.compute_id = compute_id
self.network_id = network_id
self.port_id = port_id
self.fixed_ips = fixed_ips
class Delta(data_models.BaseDataModel):
def __init__(self, amphora_id=None, compute_id=None,
add_nics=None, delete_nics=None):
self.compute_id = compute_id
self.amphora_id = amphora_id
self.add_nics = add_nics
self.delete_nics = delete_nics
class Network(data_models.BaseDataModel):
def __init__(self, id=None, name=None, subnets=None,
project_id=None, admin_state_up=None, mtu=None,
provider_network_type=None,
provider_physical_network=None,
provider_segmentation_id=None,
router_external=None):
self.id = id
self.name = name
self.subnets = subnets
self.project_id = project_id
self.admin_state_up = admin_state_up
self.provider_network_type = provider_network_type
self.provider_physical_network = provider_physical_network
self.provider_segmentation_id = provider_segmentation_id
self.router_external = router_external
self.mtu = mtu
class Subnet(data_models.BaseDataModel):
def __init__(self, id=None, name=None, network_id=None, project_id=None,
gateway_ip=None, cidr=None, ip_version=None):
self.id = id
self.name = name
self.network_id = network_id
self.project_id = project_id
self.gateway_ip = gateway_ip
self.cidr = cidr
self.ip_version = ip_version
class Port(data_models.BaseDataModel):
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
mac_address=None, network_id=None, status=None,
project_id=None, admin_state_up=None, fixed_ips=None,
network=None):
self.id = id
self.name = name
self.device_id = device_id
self.device_owner = device_owner
self.mac_address = mac_address
self.network_id = network_id
self.status = status
self.project_id = project_id
self.admin_state_up = admin_state_up
self.fixed_ips = fixed_ips or []
self.network = network
def get_subnet_id(self, fixed_ip_address):
for fixed_ip in self.fixed_ips:
if fixed_ip.ip_address == fixed_ip_address:
return fixed_ip.subnet_id
class
|
(data_models.BaseDataModel):
def __init__(self, subnet_id=None, ip_address=None, subnet=None):
self.subnet_id = subnet_id
self.ip_address = ip_address
self.subnet = subnet
class AmphoraNetworkConfig(data_models.BaseDataModel):
def __init__(self, amphora=None, vip_subnet=None, vip_port=None,
vrrp_subnet=None, vrrp_port=None, ha_subnet=None,
ha_port=None):
self.amphora = amphora
self.vip_subnet = vip_subnet
self.vip_port = vip_port
self.vrrp_subnet = vrrp_subnet
self.vrrp_port = vrrp_port
self.ha_subnet = ha_subnet
self.ha_port = ha_port
|
FixedIP
|
identifier_name
|
data_models.py
|
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\
common import data_models
class Interface(data_models.BaseDataModel):
|
class Delta(data_models.BaseDataModel):
def __init__(self, amphora_id=None, compute_id=None,
add_nics=None, delete_nics=None):
self.compute_id = compute_id
self.amphora_id = amphora_id
self.add_nics = add_nics
self.delete_nics = delete_nics
class Network(data_models.BaseDataModel):
def __init__(self, id=None, name=None, subnets=None,
project_id=None, admin_state_up=None, mtu=None,
provider_network_type=None,
provider_physical_network=None,
provider_segmentation_id=None,
router_external=None):
self.id = id
self.name = name
self.subnets = subnets
self.project_id = project_id
self.admin_state_up = admin_state_up
self.provider_network_type = provider_network_type
self.provider_physical_network = provider_physical_network
self.provider_segmentation_id = provider_segmentation_id
self.router_external = router_external
self.mtu = mtu
class Subnet(data_models.BaseDataModel):
def __init__(self, id=None, name=None, network_id=None, project_id=None,
gateway_ip=None, cidr=None, ip_version=None):
self.id = id
self.name = name
self.network_id = network_id
self.project_id = project_id
self.gateway_ip = gateway_ip
self.cidr = cidr
self.ip_version = ip_version
class Port(data_models.BaseDataModel):
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
mac_address=None, network_id=None, status=None,
project_id=None, admin_state_up=None, fixed_ips=None,
network=None):
self.id = id
self.name = name
self.device_id = device_id
self.device_owner = device_owner
self.mac_address = mac_address
self.network_id = network_id
self.status = status
self.project_id = project_id
self.admin_state_up = admin_state_up
self.fixed_ips = fixed_ips or []
self.network = network
def get_subnet_id(self, fixed_ip_address):
for fixed_ip in self.fixed_ips:
if fixed_ip.ip_address == fixed_ip_address:
return fixed_ip.subnet_id
class FixedIP(data_models.BaseDataModel):
def __init__(self, subnet_id=None, ip_address=None, subnet=None):
self.subnet_id = subnet_id
self.ip_address = ip_address
self.subnet = subnet
class AmphoraNetworkConfig(data_models.BaseDataModel):
def __init__(self, amphora=None, vip_subnet=None, vip_port=None,
vrrp_subnet=None, vrrp_port=None, ha_subnet=None,
ha_port=None):
self.amphora = amphora
self.vip_subnet = vip_subnet
self.vip_port = vip_port
self.vrrp_subnet = vrrp_subnet
self.vrrp_port = vrrp_port
self.ha_subnet = ha_subnet
self.ha_port = ha_port
|
def __init__(self, id=None, compute_id=None, network_id=None,
fixed_ips=None, port_id=None):
self.id = id
self.compute_id = compute_id
self.network_id = network_id
self.port_id = port_id
self.fixed_ips = fixed_ips
|
identifier_body
|
data_models.py
|
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\
common import data_models
class Interface(data_models.BaseDataModel):
def __init__(self, id=None, compute_id=None, network_id=None,
fixed_ips=None, port_id=None):
self.id = id
self.compute_id = compute_id
self.network_id = network_id
self.port_id = port_id
self.fixed_ips = fixed_ips
class Delta(data_models.BaseDataModel):
def __init__(self, amphora_id=None, compute_id=None,
add_nics=None, delete_nics=None):
self.compute_id = compute_id
self.amphora_id = amphora_id
self.add_nics = add_nics
self.delete_nics = delete_nics
class Network(data_models.BaseDataModel):
def __init__(self, id=None, name=None, subnets=None,
project_id=None, admin_state_up=None, mtu=None,
provider_network_type=None,
provider_physical_network=None,
provider_segmentation_id=None,
router_external=None):
self.id = id
self.name = name
self.subnets = subnets
self.project_id = project_id
self.admin_state_up = admin_state_up
self.provider_network_type = provider_network_type
self.provider_physical_network = provider_physical_network
self.provider_segmentation_id = provider_segmentation_id
self.router_external = router_external
self.mtu = mtu
class Subnet(data_models.BaseDataModel):
def __init__(self, id=None, name=None, network_id=None, project_id=None,
gateway_ip=None, cidr=None, ip_version=None):
self.id = id
self.name = name
self.network_id = network_id
self.project_id = project_id
self.gateway_ip = gateway_ip
self.cidr = cidr
self.ip_version = ip_version
class Port(data_models.BaseDataModel):
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
mac_address=None, network_id=None, status=None,
project_id=None, admin_state_up=None, fixed_ips=None,
network=None):
self.id = id
self.name = name
self.device_id = device_id
self.device_owner = device_owner
self.mac_address = mac_address
self.network_id = network_id
self.status = status
self.project_id = project_id
self.admin_state_up = admin_state_up
self.fixed_ips = fixed_ips or []
self.network = network
def get_subnet_id(self, fixed_ip_address):
for fixed_ip in self.fixed_ips:
if fixed_ip.ip_address == fixed_ip_address:
return fixed_ip.subnet_id
class FixedIP(data_models.BaseDataModel):
def __init__(self, subnet_id=None, ip_address=None, subnet=None):
self.subnet_id = subnet_id
self.ip_address = ip_address
self.subnet = subnet
class AmphoraNetworkConfig(data_models.BaseDataModel):
def __init__(self, amphora=None, vip_subnet=None, vip_port=None,
|
self.vip_port = vip_port
self.vrrp_subnet = vrrp_subnet
self.vrrp_port = vrrp_port
self.ha_subnet = ha_subnet
self.ha_port = ha_port
|
vrrp_subnet=None, vrrp_port=None, ha_subnet=None,
ha_port=None):
self.amphora = amphora
self.vip_subnet = vip_subnet
|
random_line_split
|
data_models.py
|
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\
common import data_models
class Interface(data_models.BaseDataModel):
def __init__(self, id=None, compute_id=None, network_id=None,
fixed_ips=None, port_id=None):
self.id = id
self.compute_id = compute_id
self.network_id = network_id
self.port_id = port_id
self.fixed_ips = fixed_ips
class Delta(data_models.BaseDataModel):
def __init__(self, amphora_id=None, compute_id=None,
add_nics=None, delete_nics=None):
self.compute_id = compute_id
self.amphora_id = amphora_id
self.add_nics = add_nics
self.delete_nics = delete_nics
class Network(data_models.BaseDataModel):
def __init__(self, id=None, name=None, subnets=None,
project_id=None, admin_state_up=None, mtu=None,
provider_network_type=None,
provider_physical_network=None,
provider_segmentation_id=None,
router_external=None):
self.id = id
self.name = name
self.subnets = subnets
self.project_id = project_id
self.admin_state_up = admin_state_up
self.provider_network_type = provider_network_type
self.provider_physical_network = provider_physical_network
self.provider_segmentation_id = provider_segmentation_id
self.router_external = router_external
self.mtu = mtu
class Subnet(data_models.BaseDataModel):
def __init__(self, id=None, name=None, network_id=None, project_id=None,
gateway_ip=None, cidr=None, ip_version=None):
self.id = id
self.name = name
self.network_id = network_id
self.project_id = project_id
self.gateway_ip = gateway_ip
self.cidr = cidr
self.ip_version = ip_version
class Port(data_models.BaseDataModel):
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
mac_address=None, network_id=None, status=None,
project_id=None, admin_state_up=None, fixed_ips=None,
network=None):
self.id = id
self.name = name
self.device_id = device_id
self.device_owner = device_owner
self.mac_address = mac_address
self.network_id = network_id
self.status = status
self.project_id = project_id
self.admin_state_up = admin_state_up
self.fixed_ips = fixed_ips or []
self.network = network
def get_subnet_id(self, fixed_ip_address):
for fixed_ip in self.fixed_ips:
if fixed_ip.ip_address == fixed_ip_address:
|
class FixedIP(data_models.BaseDataModel):
def __init__(self, subnet_id=None, ip_address=None, subnet=None):
self.subnet_id = subnet_id
self.ip_address = ip_address
self.subnet = subnet
class AmphoraNetworkConfig(data_models.BaseDataModel):
def __init__(self, amphora=None, vip_subnet=None, vip_port=None,
vrrp_subnet=None, vrrp_port=None, ha_subnet=None,
ha_port=None):
self.amphora = amphora
self.vip_subnet = vip_subnet
self.vip_port = vip_port
self.vrrp_subnet = vrrp_subnet
self.vrrp_port = vrrp_port
self.ha_subnet = ha_subnet
self.ha_port = ha_port
|
return fixed_ip.subnet_id
|
conditional_block
|
index.js
|
'use strict';
var yeoman = require('yeoman-generator');
var chalk = require('chalk');
var yosay = require('yosay');
module.exports = yeoman.Base.extend({
|
var prompts = [{
type: 'input',
name: 'name',
message: 'Your project name',
//Defaults to the project's folder name if the input is skipped
default: this.appname
}];
return this.prompt(prompts).then(function (answers) {
this.props = answers;
this.log(answers.name);
}.bind(this));
},
writing: function () {
this.fs.copy(
this.templatePath('app'),
this.destinationPath(this.props.name+'/app')
);
this.fs.copy(
this.templatePath('configs'),
this.destinationPath(this.props.name+'/configs')
);
this.fs.copyTpl(
this.templatePath('_README'),
this.destinationPath(this.props.name+'/README.md'), {
name: this.props.name
}
);
this.fs.copy(
this.templatePath('babelrc'),
this.destinationPath(this.props.name+'/.babelrc')
);
this.fs.copy(
this.templatePath('eslintrc'),
this.destinationPath(this.props.name+'/.eslintrc')
);
this.fs.copy(
this.templatePath('gitignore'),
this.destinationPath(this.props.name+'/.gitignore')
);
this.fs.copyTpl(
this.templatePath('_package.json'),
this.destinationPath(this.props.name+'/package.json'), {
name: this.props.name
}
);
this.fs.copy(
this.templatePath('server.js'),
this.destinationPath(this.props.name+'/server.js')
);
this.fs.copy(
this.templatePath('user.yml.example'),
this.destinationPath(this.props.name+'/user.yml.example')
);
},
install: function () {
var elementDir = process.cwd() + '/' + this.props.name;
process.chdir(elementDir);
var prompts = [{
type: 'confirm',
name: 'install',
message: 'Would you like to enable install Dependencies?',
default: true
}];
return this.prompt(prompts).then(function (props) {
if(props.install){
this.installDependencies();
}
}.bind(this));
},
end: function() {
this.log("All Done!");
},
});
|
prompting: function () {
this.log(yosay(
'Welcome to the ' + chalk.red('generator-react-app-boilerplate') + ' generator!'
));
|
random_line_split
|
index.js
|
'use strict';
var yeoman = require('yeoman-generator');
var chalk = require('chalk');
var yosay = require('yosay');
module.exports = yeoman.Base.extend({
prompting: function () {
this.log(yosay(
'Welcome to the ' + chalk.red('generator-react-app-boilerplate') + ' generator!'
));
var prompts = [{
type: 'input',
name: 'name',
message: 'Your project name',
//Defaults to the project's folder name if the input is skipped
default: this.appname
}];
return this.prompt(prompts).then(function (answers) {
this.props = answers;
this.log(answers.name);
}.bind(this));
},
writing: function () {
this.fs.copy(
this.templatePath('app'),
this.destinationPath(this.props.name+'/app')
);
this.fs.copy(
this.templatePath('configs'),
this.destinationPath(this.props.name+'/configs')
);
this.fs.copyTpl(
this.templatePath('_README'),
this.destinationPath(this.props.name+'/README.md'), {
name: this.props.name
}
);
this.fs.copy(
this.templatePath('babelrc'),
this.destinationPath(this.props.name+'/.babelrc')
);
this.fs.copy(
this.templatePath('eslintrc'),
this.destinationPath(this.props.name+'/.eslintrc')
);
this.fs.copy(
this.templatePath('gitignore'),
this.destinationPath(this.props.name+'/.gitignore')
);
this.fs.copyTpl(
this.templatePath('_package.json'),
this.destinationPath(this.props.name+'/package.json'), {
name: this.props.name
}
);
this.fs.copy(
this.templatePath('server.js'),
this.destinationPath(this.props.name+'/server.js')
);
this.fs.copy(
this.templatePath('user.yml.example'),
this.destinationPath(this.props.name+'/user.yml.example')
);
},
install: function () {
var elementDir = process.cwd() + '/' + this.props.name;
process.chdir(elementDir);
var prompts = [{
type: 'confirm',
name: 'install',
message: 'Would you like to enable install Dependencies?',
default: true
}];
return this.prompt(prompts).then(function (props) {
if(props.install)
|
}.bind(this));
},
end: function() {
this.log("All Done!");
},
});
|
{
this.installDependencies();
}
|
conditional_block
|
future-reserved-words.ts
|
/*
* SonarQube JavaScript Plugin
* Copyright (C) 2011-2021 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
// https://jira.sonarsource.com/browse/RSPEC-1527
import { Rule, Scope } from 'eslint';
const futureReservedWords = [
'implements',
'interface',
'package',
'private',
'protected',
'public',
'enum',
'class',
'const',
'export',
'extends',
'import',
'super',
'let',
'static',
'yield',
'await',
];
export const rule: Rule.RuleModule = {
create(context: Rule.RuleContext) {
function checkVariable(variable: Scope.Variable) {
if (variable.defs.length > 0)
|
}
function checkVariablesByScope(scope: Scope.Scope) {
scope.variables.filter(v => futureReservedWords.includes(v.name)).forEach(checkVariable);
scope.childScopes.forEach(childScope => {
checkVariablesByScope(childScope);
});
}
return {
'Program:exit': () => {
checkVariablesByScope(context.getScope());
},
};
},
};
|
{
const def = variable.defs[0].name;
context.report({
node: def,
message:
`Rename "${variable.name}" identifier to prevent potential conflicts ` +
`with future evolutions of the JavaScript language.`,
});
}
|
conditional_block
|
future-reserved-words.ts
|
/*
* SonarQube JavaScript Plugin
* Copyright (C) 2011-2021 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
// https://jira.sonarsource.com/browse/RSPEC-1527
import { Rule, Scope } from 'eslint';
const futureReservedWords = [
'implements',
'interface',
'package',
'private',
'protected',
'public',
'enum',
'class',
'const',
'export',
'extends',
'import',
'super',
'let',
'static',
'yield',
'await',
];
export const rule: Rule.RuleModule = {
create(context: Rule.RuleContext) {
function
|
(variable: Scope.Variable) {
if (variable.defs.length > 0) {
const def = variable.defs[0].name;
context.report({
node: def,
message:
`Rename "${variable.name}" identifier to prevent potential conflicts ` +
`with future evolutions of the JavaScript language.`,
});
}
}
function checkVariablesByScope(scope: Scope.Scope) {
scope.variables.filter(v => futureReservedWords.includes(v.name)).forEach(checkVariable);
scope.childScopes.forEach(childScope => {
checkVariablesByScope(childScope);
});
}
return {
'Program:exit': () => {
checkVariablesByScope(context.getScope());
},
};
},
};
|
checkVariable
|
identifier_name
|
future-reserved-words.ts
|
/*
* SonarQube JavaScript Plugin
* Copyright (C) 2011-2021 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
// https://jira.sonarsource.com/browse/RSPEC-1527
import { Rule, Scope } from 'eslint';
const futureReservedWords = [
'implements',
'interface',
'package',
'private',
'protected',
'public',
'enum',
'class',
'const',
'export',
'extends',
'import',
'super',
'let',
'static',
'yield',
'await',
];
export const rule: Rule.RuleModule = {
create(context: Rule.RuleContext) {
function checkVariable(variable: Scope.Variable)
|
function checkVariablesByScope(scope: Scope.Scope) {
scope.variables.filter(v => futureReservedWords.includes(v.name)).forEach(checkVariable);
scope.childScopes.forEach(childScope => {
checkVariablesByScope(childScope);
});
}
return {
'Program:exit': () => {
checkVariablesByScope(context.getScope());
},
};
},
};
|
{
if (variable.defs.length > 0) {
const def = variable.defs[0].name;
context.report({
node: def,
message:
`Rename "${variable.name}" identifier to prevent potential conflicts ` +
`with future evolutions of the JavaScript language.`,
});
}
}
|
identifier_body
|
future-reserved-words.ts
|
/*
* SonarQube JavaScript Plugin
* Copyright (C) 2011-2021 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
// https://jira.sonarsource.com/browse/RSPEC-1527
import { Rule, Scope } from 'eslint';
const futureReservedWords = [
'implements',
'interface',
'package',
'private',
'protected',
'public',
'enum',
'class',
'const',
'export',
'extends',
'import',
'super',
'let',
'static',
'yield',
'await',
];
export const rule: Rule.RuleModule = {
create(context: Rule.RuleContext) {
function checkVariable(variable: Scope.Variable) {
if (variable.defs.length > 0) {
const def = variable.defs[0].name;
context.report({
node: def,
message:
`Rename "${variable.name}" identifier to prevent potential conflicts ` +
`with future evolutions of the JavaScript language.`,
});
}
|
function checkVariablesByScope(scope: Scope.Scope) {
scope.variables.filter(v => futureReservedWords.includes(v.name)).forEach(checkVariable);
scope.childScopes.forEach(childScope => {
checkVariablesByScope(childScope);
});
}
return {
'Program:exit': () => {
checkVariablesByScope(context.getScope());
},
};
},
};
|
}
|
random_line_split
|
rand_util.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::rand;
// random uint less than n
fn under(r : rand::rng, n : uint) -> uint {
assert!(n != 0u); r.next() as uint % n
}
// random choice from a vec
fn choice<T:copy>(r : rand::rng, v : ~[T]) -> T {
assert!(vec::len(v) != 0u); v[under(r, vec::len(v))]
}
// 1 in n chance of being true
fn
|
(r : rand::rng, n : uint) -> bool { under(r, n) == 0u }
// shuffle a vec in place
fn shuffle<T>(r : rand::rng, &v : ~[T]) {
let i = vec::len(v);
while i >= 2u {
// Loop invariant: elements with index >= i have been locked in place.
i -= 1u;
vec::swap(v, i, under(r, i + 1u)); // Lock element i in place.
}
}
// create a shuffled copy of a vec
fn shuffled<T:copy>(r : rand::rng, v : ~[T]) -> ~[T] {
let w = vec::to_mut(v);
shuffle(r, w);
vec::from_mut(w) // Shouldn't this happen automatically?
}
// sample from a population without replacement
//fn sample<T>(r : rand::rng, pop : ~[T], k : uint) -> ~[T] { fail!() }
// Two ways to make a weighted choice.
// * weighted_choice is O(number of choices) time
// * weighted_vec is O(total weight) space
type weighted<T> = { weight: uint, item: T };
fn weighted_choice<T:copy>(r : rand::rng, v : ~[weighted<T>]) -> T {
assert!(vec::len(v) != 0u);
let total = 0u;
for {weight: weight, item: _} in v {
total += weight;
}
assert!(total >= 0u);
let chosen = under(r, total);
let so_far = 0u;
for {weight: weight, item: item} in v {
so_far += weight;
if so_far > chosen {
return item;
}
}
core::unreachable();
}
fn weighted_vec<T:copy>(v : ~[weighted<T>]) -> ~[T] {
let r = ~[];
for {weight: weight, item: item} in v {
let i = 0u;
while i < weight {
r.push(item);
i += 1u;
}
}
r
}
fn main()
{
let r = rand::mk_rng();
log(error, under(r, 5u));
log(error, choice(r, ~[10, 20, 30]));
log(error, if unlikely(r, 5u) { "unlikely" } else { "likely" });
let mut a = ~[1, 2, 3];
shuffle(r, a);
log(error, a);
let i = 0u;
let v = ~[
{weight:1u, item:"low"},
{weight:8u, item:"middle"},
{weight:1u, item:"high"}
];
let w = weighted_vec(v);
while i < 1000u {
log(error, "Immed: " + weighted_choice(r, v));
log(error, "Fast: " + choice(r, w));
i += 1u;
}
}
|
unlikely
|
identifier_name
|
rand_util.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::rand;
// random uint less than n
fn under(r : rand::rng, n : uint) -> uint {
assert!(n != 0u); r.next() as uint % n
}
// random choice from a vec
fn choice<T:copy>(r : rand::rng, v : ~[T]) -> T {
assert!(vec::len(v) != 0u); v[under(r, vec::len(v))]
}
// 1 in n chance of being true
fn unlikely(r : rand::rng, n : uint) -> bool { under(r, n) == 0u }
// shuffle a vec in place
fn shuffle<T>(r : rand::rng, &v : ~[T]) {
let i = vec::len(v);
while i >= 2u {
// Loop invariant: elements with index >= i have been locked in place.
i -= 1u;
vec::swap(v, i, under(r, i + 1u)); // Lock element i in place.
}
}
// create a shuffled copy of a vec
fn shuffled<T:copy>(r : rand::rng, v : ~[T]) -> ~[T] {
let w = vec::to_mut(v);
shuffle(r, w);
vec::from_mut(w) // Shouldn't this happen automatically?
}
// sample from a population without replacement
//fn sample<T>(r : rand::rng, pop : ~[T], k : uint) -> ~[T] { fail!() }
// Two ways to make a weighted choice.
// * weighted_choice is O(number of choices) time
// * weighted_vec is O(total weight) space
type weighted<T> = { weight: uint, item: T };
fn weighted_choice<T:copy>(r : rand::rng, v : ~[weighted<T>]) -> T {
assert!(vec::len(v) != 0u);
let total = 0u;
for {weight: weight, item: _} in v {
total += weight;
}
assert!(total >= 0u);
let chosen = under(r, total);
let so_far = 0u;
for {weight: weight, item: item} in v {
so_far += weight;
if so_far > chosen {
return item;
}
}
core::unreachable();
}
fn weighted_vec<T:copy>(v : ~[weighted<T>]) -> ~[T] {
let r = ~[];
for {weight: weight, item: item} in v {
let i = 0u;
while i < weight {
r.push(item);
i += 1u;
}
}
r
}
fn main()
{
let r = rand::mk_rng();
log(error, under(r, 5u));
log(error, choice(r, ~[10, 20, 30]));
log(error, if unlikely(r, 5u) { "unlikely" } else
|
);
let mut a = ~[1, 2, 3];
shuffle(r, a);
log(error, a);
let i = 0u;
let v = ~[
{weight:1u, item:"low"},
{weight:8u, item:"middle"},
{weight:1u, item:"high"}
];
let w = weighted_vec(v);
while i < 1000u {
log(error, "Immed: " + weighted_choice(r, v));
log(error, "Fast: " + choice(r, w));
i += 1u;
}
}
|
{ "likely" }
|
conditional_block
|
rand_util.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::rand;
// random uint less than n
fn under(r : rand::rng, n : uint) -> uint {
assert!(n != 0u); r.next() as uint % n
}
// random choice from a vec
fn choice<T:copy>(r : rand::rng, v : ~[T]) -> T {
assert!(vec::len(v) != 0u); v[under(r, vec::len(v))]
}
// 1 in n chance of being true
fn unlikely(r : rand::rng, n : uint) -> bool { under(r, n) == 0u }
// shuffle a vec in place
fn shuffle<T>(r : rand::rng, &v : ~[T]) {
let i = vec::len(v);
while i >= 2u {
// Loop invariant: elements with index >= i have been locked in place.
i -= 1u;
vec::swap(v, i, under(r, i + 1u)); // Lock element i in place.
}
}
// create a shuffled copy of a vec
fn shuffled<T:copy>(r : rand::rng, v : ~[T]) -> ~[T] {
let w = vec::to_mut(v);
shuffle(r, w);
vec::from_mut(w) // Shouldn't this happen automatically?
}
// sample from a population without replacement
//fn sample<T>(r : rand::rng, pop : ~[T], k : uint) -> ~[T] { fail!() }
// Two ways to make a weighted choice.
// * weighted_choice is O(number of choices) time
|
assert!(vec::len(v) != 0u);
let total = 0u;
for {weight: weight, item: _} in v {
total += weight;
}
assert!(total >= 0u);
let chosen = under(r, total);
let so_far = 0u;
for {weight: weight, item: item} in v {
so_far += weight;
if so_far > chosen {
return item;
}
}
core::unreachable();
}
fn weighted_vec<T:copy>(v : ~[weighted<T>]) -> ~[T] {
let r = ~[];
for {weight: weight, item: item} in v {
let i = 0u;
while i < weight {
r.push(item);
i += 1u;
}
}
r
}
fn main()
{
let r = rand::mk_rng();
log(error, under(r, 5u));
log(error, choice(r, ~[10, 20, 30]));
log(error, if unlikely(r, 5u) { "unlikely" } else { "likely" });
let mut a = ~[1, 2, 3];
shuffle(r, a);
log(error, a);
let i = 0u;
let v = ~[
{weight:1u, item:"low"},
{weight:8u, item:"middle"},
{weight:1u, item:"high"}
];
let w = weighted_vec(v);
while i < 1000u {
log(error, "Immed: " + weighted_choice(r, v));
log(error, "Fast: " + choice(r, w));
i += 1u;
}
}
|
// * weighted_vec is O(total weight) space
type weighted<T> = { weight: uint, item: T };
fn weighted_choice<T:copy>(r : rand::rng, v : ~[weighted<T>]) -> T {
|
random_line_split
|
rand_util.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::rand;
// random uint less than n
fn under(r : rand::rng, n : uint) -> uint {
assert!(n != 0u); r.next() as uint % n
}
// random choice from a vec
fn choice<T:copy>(r : rand::rng, v : ~[T]) -> T {
assert!(vec::len(v) != 0u); v[under(r, vec::len(v))]
}
// 1 in n chance of being true
fn unlikely(r : rand::rng, n : uint) -> bool { under(r, n) == 0u }
// shuffle a vec in place
fn shuffle<T>(r : rand::rng, &v : ~[T]) {
let i = vec::len(v);
while i >= 2u {
// Loop invariant: elements with index >= i have been locked in place.
i -= 1u;
vec::swap(v, i, under(r, i + 1u)); // Lock element i in place.
}
}
// create a shuffled copy of a vec
fn shuffled<T:copy>(r : rand::rng, v : ~[T]) -> ~[T] {
let w = vec::to_mut(v);
shuffle(r, w);
vec::from_mut(w) // Shouldn't this happen automatically?
}
// sample from a population without replacement
//fn sample<T>(r : rand::rng, pop : ~[T], k : uint) -> ~[T] { fail!() }
// Two ways to make a weighted choice.
// * weighted_choice is O(number of choices) time
// * weighted_vec is O(total weight) space
type weighted<T> = { weight: uint, item: T };
fn weighted_choice<T:copy>(r : rand::rng, v : ~[weighted<T>]) -> T {
assert!(vec::len(v) != 0u);
let total = 0u;
for {weight: weight, item: _} in v {
total += weight;
}
assert!(total >= 0u);
let chosen = under(r, total);
let so_far = 0u;
for {weight: weight, item: item} in v {
so_far += weight;
if so_far > chosen {
return item;
}
}
core::unreachable();
}
fn weighted_vec<T:copy>(v : ~[weighted<T>]) -> ~[T] {
let r = ~[];
for {weight: weight, item: item} in v {
let i = 0u;
while i < weight {
r.push(item);
i += 1u;
}
}
r
}
fn main()
|
{
let r = rand::mk_rng();
log(error, under(r, 5u));
log(error, choice(r, ~[10, 20, 30]));
log(error, if unlikely(r, 5u) { "unlikely" } else { "likely" });
let mut a = ~[1, 2, 3];
shuffle(r, a);
log(error, a);
let i = 0u;
let v = ~[
{weight:1u, item:"low"},
{weight:8u, item:"middle"},
{weight:1u, item:"high"}
];
let w = weighted_vec(v);
while i < 1000u {
log(error, "Immed: " + weighted_choice(r, v));
log(error, "Fast: " + choice(r, w));
i += 1u;
}
}
|
identifier_body
|
|
lib.rs
|
#![doc(html_root_url="https://docs.rs/phf_generator/0.7.20")]
extern crate phf_shared;
extern crate rand;
use phf_shared::PhfHash;
use rand::{SeedableRng, XorShiftRng, Rng};
const DEFAULT_LAMBDA: usize = 5;
const FIXED_SEED: [u32; 4] = [3141592653, 589793238, 462643383, 2795028841];
pub struct
|
{
pub key: u64,
pub disps: Vec<(u32, u32)>,
pub map: Vec<usize>,
}
pub fn generate_hash<H: PhfHash>(entries: &[H]) -> HashState {
let mut rng = XorShiftRng::from_seed(FIXED_SEED);
loop {
if let Some(s) = try_generate_hash(entries, &mut rng) {
return s;
}
}
}
fn try_generate_hash<H: PhfHash>(entries: &[H], rng: &mut XorShiftRng) -> Option<HashState> {
struct Bucket {
idx: usize,
keys: Vec<usize>,
}
struct Hashes {
g: u32,
f1: u32,
f2: u32,
}
let key = rng.gen();
let hashes: Vec<_> = entries.iter()
.map(|entry| {
let hash = phf_shared::hash(entry, key);
let (g, f1, f2) = phf_shared::split(hash);
Hashes {
g: g,
f1: f1,
f2: f2,
}
})
.collect();
let buckets_len = (entries.len() + DEFAULT_LAMBDA - 1) / DEFAULT_LAMBDA;
let mut buckets = (0..buckets_len)
.map(|i| {
Bucket {
idx: i,
keys: vec![],
}
})
.collect::<Vec<_>>();
for (i, hash) in hashes.iter().enumerate() {
buckets[(hash.g % (buckets_len as u32)) as usize].keys.push(i);
}
// Sort descending
buckets.sort_by(|a, b| a.keys.len().cmp(&b.keys.len()).reverse());
let table_len = entries.len();
let mut map = vec![None; table_len];
let mut disps = vec![(0u32, 0u32); buckets_len];
// store whether an element from the bucket being placed is
// located at a certain position, to allow for efficient overlap
// checks. It works by storing the generation in each cell and
// each new placement-attempt is a new generation, so you can tell
// if this is legitimately full by checking that the generations
// are equal. (A u64 is far too large to overflow in a reasonable
// time for current hardware.)
let mut try_map = vec![0u64; table_len];
let mut generation = 0u64;
// the actual values corresponding to the markers above, as
// (index, key) pairs, for adding to the main map once we've
// chosen the right disps.
let mut values_to_add = vec![];
'buckets: for bucket in &buckets {
for d1 in 0..(table_len as u32) {
'disps: for d2 in 0..(table_len as u32) {
values_to_add.clear();
generation += 1;
for &key in &bucket.keys {
let idx = (phf_shared::displace(hashes[key].f1, hashes[key].f2, d1, d2) %
(table_len as u32)) as usize;
if map[idx].is_some() || try_map[idx] == generation {
continue 'disps;
}
try_map[idx] = generation;
values_to_add.push((idx, key));
}
// We've picked a good set of disps
disps[bucket.idx] = (d1, d2);
for &(idx, key) in &values_to_add {
map[idx] = Some(key);
}
continue 'buckets;
}
}
// Unable to find displacements for a bucket
return None;
}
Some(HashState {
key: key,
disps: disps,
map: map.into_iter().map(|i| i.unwrap()).collect(),
})
}
|
HashState
|
identifier_name
|
lib.rs
|
#![doc(html_root_url="https://docs.rs/phf_generator/0.7.20")]
extern crate phf_shared;
extern crate rand;
use phf_shared::PhfHash;
use rand::{SeedableRng, XorShiftRng, Rng};
const DEFAULT_LAMBDA: usize = 5;
const FIXED_SEED: [u32; 4] = [3141592653, 589793238, 462643383, 2795028841];
pub struct HashState {
pub key: u64,
pub disps: Vec<(u32, u32)>,
pub map: Vec<usize>,
}
pub fn generate_hash<H: PhfHash>(entries: &[H]) -> HashState
|
fn try_generate_hash<H: PhfHash>(entries: &[H], rng: &mut XorShiftRng) -> Option<HashState> {
struct Bucket {
idx: usize,
keys: Vec<usize>,
}
struct Hashes {
g: u32,
f1: u32,
f2: u32,
}
let key = rng.gen();
let hashes: Vec<_> = entries.iter()
.map(|entry| {
let hash = phf_shared::hash(entry, key);
let (g, f1, f2) = phf_shared::split(hash);
Hashes {
g: g,
f1: f1,
f2: f2,
}
})
.collect();
let buckets_len = (entries.len() + DEFAULT_LAMBDA - 1) / DEFAULT_LAMBDA;
let mut buckets = (0..buckets_len)
.map(|i| {
Bucket {
idx: i,
keys: vec![],
}
})
.collect::<Vec<_>>();
for (i, hash) in hashes.iter().enumerate() {
buckets[(hash.g % (buckets_len as u32)) as usize].keys.push(i);
}
// Sort descending
buckets.sort_by(|a, b| a.keys.len().cmp(&b.keys.len()).reverse());
let table_len = entries.len();
let mut map = vec![None; table_len];
let mut disps = vec![(0u32, 0u32); buckets_len];
// store whether an element from the bucket being placed is
// located at a certain position, to allow for efficient overlap
// checks. It works by storing the generation in each cell and
// each new placement-attempt is a new generation, so you can tell
// if this is legitimately full by checking that the generations
// are equal. (A u64 is far too large to overflow in a reasonable
// time for current hardware.)
let mut try_map = vec![0u64; table_len];
let mut generation = 0u64;
// the actual values corresponding to the markers above, as
// (index, key) pairs, for adding to the main map once we've
// chosen the right disps.
let mut values_to_add = vec![];
'buckets: for bucket in &buckets {
for d1 in 0..(table_len as u32) {
'disps: for d2 in 0..(table_len as u32) {
values_to_add.clear();
generation += 1;
for &key in &bucket.keys {
let idx = (phf_shared::displace(hashes[key].f1, hashes[key].f2, d1, d2) %
(table_len as u32)) as usize;
if map[idx].is_some() || try_map[idx] == generation {
continue 'disps;
}
try_map[idx] = generation;
values_to_add.push((idx, key));
}
// We've picked a good set of disps
disps[bucket.idx] = (d1, d2);
for &(idx, key) in &values_to_add {
map[idx] = Some(key);
}
continue 'buckets;
}
}
// Unable to find displacements for a bucket
return None;
}
Some(HashState {
key: key,
disps: disps,
map: map.into_iter().map(|i| i.unwrap()).collect(),
})
}
|
{
let mut rng = XorShiftRng::from_seed(FIXED_SEED);
loop {
if let Some(s) = try_generate_hash(entries, &mut rng) {
return s;
}
}
}
|
identifier_body
|
lib.rs
|
#![doc(html_root_url="https://docs.rs/phf_generator/0.7.20")]
extern crate phf_shared;
extern crate rand;
use phf_shared::PhfHash;
use rand::{SeedableRng, XorShiftRng, Rng};
const DEFAULT_LAMBDA: usize = 5;
const FIXED_SEED: [u32; 4] = [3141592653, 589793238, 462643383, 2795028841];
pub struct HashState {
pub key: u64,
pub disps: Vec<(u32, u32)>,
pub map: Vec<usize>,
}
pub fn generate_hash<H: PhfHash>(entries: &[H]) -> HashState {
let mut rng = XorShiftRng::from_seed(FIXED_SEED);
loop {
if let Some(s) = try_generate_hash(entries, &mut rng) {
return s;
}
}
}
fn try_generate_hash<H: PhfHash>(entries: &[H], rng: &mut XorShiftRng) -> Option<HashState> {
struct Bucket {
idx: usize,
keys: Vec<usize>,
}
struct Hashes {
g: u32,
f1: u32,
f2: u32,
}
let key = rng.gen();
let hashes: Vec<_> = entries.iter()
.map(|entry| {
let hash = phf_shared::hash(entry, key);
let (g, f1, f2) = phf_shared::split(hash);
Hashes {
g: g,
f1: f1,
f2: f2,
}
})
.collect();
let buckets_len = (entries.len() + DEFAULT_LAMBDA - 1) / DEFAULT_LAMBDA;
let mut buckets = (0..buckets_len)
.map(|i| {
Bucket {
idx: i,
keys: vec![],
}
|
}
// Sort descending
buckets.sort_by(|a, b| a.keys.len().cmp(&b.keys.len()).reverse());
let table_len = entries.len();
let mut map = vec![None; table_len];
let mut disps = vec![(0u32, 0u32); buckets_len];
// store whether an element from the bucket being placed is
// located at a certain position, to allow for efficient overlap
// checks. It works by storing the generation in each cell and
// each new placement-attempt is a new generation, so you can tell
// if this is legitimately full by checking that the generations
// are equal. (A u64 is far too large to overflow in a reasonable
// time for current hardware.)
let mut try_map = vec![0u64; table_len];
let mut generation = 0u64;
// the actual values corresponding to the markers above, as
// (index, key) pairs, for adding to the main map once we've
// chosen the right disps.
let mut values_to_add = vec![];
'buckets: for bucket in &buckets {
for d1 in 0..(table_len as u32) {
'disps: for d2 in 0..(table_len as u32) {
values_to_add.clear();
generation += 1;
for &key in &bucket.keys {
let idx = (phf_shared::displace(hashes[key].f1, hashes[key].f2, d1, d2) %
(table_len as u32)) as usize;
if map[idx].is_some() || try_map[idx] == generation {
continue 'disps;
}
try_map[idx] = generation;
values_to_add.push((idx, key));
}
// We've picked a good set of disps
disps[bucket.idx] = (d1, d2);
for &(idx, key) in &values_to_add {
map[idx] = Some(key);
}
continue 'buckets;
}
}
// Unable to find displacements for a bucket
return None;
}
Some(HashState {
key: key,
disps: disps,
map: map.into_iter().map(|i| i.unwrap()).collect(),
})
}
|
})
.collect::<Vec<_>>();
for (i, hash) in hashes.iter().enumerate() {
buckets[(hash.g % (buckets_len as u32)) as usize].keys.push(i);
|
random_line_split
|
lib.rs
|
#![doc(html_root_url="https://docs.rs/phf_generator/0.7.20")]
extern crate phf_shared;
extern crate rand;
use phf_shared::PhfHash;
use rand::{SeedableRng, XorShiftRng, Rng};
const DEFAULT_LAMBDA: usize = 5;
const FIXED_SEED: [u32; 4] = [3141592653, 589793238, 462643383, 2795028841];
pub struct HashState {
pub key: u64,
pub disps: Vec<(u32, u32)>,
pub map: Vec<usize>,
}
pub fn generate_hash<H: PhfHash>(entries: &[H]) -> HashState {
let mut rng = XorShiftRng::from_seed(FIXED_SEED);
loop {
if let Some(s) = try_generate_hash(entries, &mut rng)
|
}
}
fn try_generate_hash<H: PhfHash>(entries: &[H], rng: &mut XorShiftRng) -> Option<HashState> {
struct Bucket {
idx: usize,
keys: Vec<usize>,
}
struct Hashes {
g: u32,
f1: u32,
f2: u32,
}
let key = rng.gen();
let hashes: Vec<_> = entries.iter()
.map(|entry| {
let hash = phf_shared::hash(entry, key);
let (g, f1, f2) = phf_shared::split(hash);
Hashes {
g: g,
f1: f1,
f2: f2,
}
})
.collect();
let buckets_len = (entries.len() + DEFAULT_LAMBDA - 1) / DEFAULT_LAMBDA;
let mut buckets = (0..buckets_len)
.map(|i| {
Bucket {
idx: i,
keys: vec![],
}
})
.collect::<Vec<_>>();
for (i, hash) in hashes.iter().enumerate() {
buckets[(hash.g % (buckets_len as u32)) as usize].keys.push(i);
}
// Sort descending
buckets.sort_by(|a, b| a.keys.len().cmp(&b.keys.len()).reverse());
let table_len = entries.len();
let mut map = vec![None; table_len];
let mut disps = vec![(0u32, 0u32); buckets_len];
// store whether an element from the bucket being placed is
// located at a certain position, to allow for efficient overlap
// checks. It works by storing the generation in each cell and
// each new placement-attempt is a new generation, so you can tell
// if this is legitimately full by checking that the generations
// are equal. (A u64 is far too large to overflow in a reasonable
// time for current hardware.)
let mut try_map = vec![0u64; table_len];
let mut generation = 0u64;
// the actual values corresponding to the markers above, as
// (index, key) pairs, for adding to the main map once we've
// chosen the right disps.
let mut values_to_add = vec![];
'buckets: for bucket in &buckets {
for d1 in 0..(table_len as u32) {
'disps: for d2 in 0..(table_len as u32) {
values_to_add.clear();
generation += 1;
for &key in &bucket.keys {
let idx = (phf_shared::displace(hashes[key].f1, hashes[key].f2, d1, d2) %
(table_len as u32)) as usize;
if map[idx].is_some() || try_map[idx] == generation {
continue 'disps;
}
try_map[idx] = generation;
values_to_add.push((idx, key));
}
// We've picked a good set of disps
disps[bucket.idx] = (d1, d2);
for &(idx, key) in &values_to_add {
map[idx] = Some(key);
}
continue 'buckets;
}
}
// Unable to find displacements for a bucket
return None;
}
Some(HashState {
key: key,
disps: disps,
map: map.into_iter().map(|i| i.unwrap()).collect(),
})
}
|
{
return s;
}
|
conditional_block
|
Tabs.tsx
|
import React, { useEffect, useRef, useState } from 'react';
import { cx } from '../../cx';
import './Tabs.css';
export type TabProps = {
children: React.ReactNode;
title: string;
};
const getTabId = (index: number, suffix?: string) =>
[`tab-${index}`, suffix].filter(Boolean).join('-');
export function Tabs({ children }) {
const firstRender = useRef(true);
const [currentTab, setCurrentTab] = useState(0);
const tabsRefs = useRef<HTMLElement[]>([]);
useEffect(() => {
if (!firstRender.current && tabsRefs.current) {
tabsRefs.current[currentTab].focus();
}
}, [currentTab]);
useEffect(() => {
firstRender.current = false;
}, []);
const onKeyDown = ({ key }: React.KeyboardEvent) => {
if (key === 'ArrowLeft') {
setCurrentTab(Math.max(0, currentTab - 1));
} else if (key === 'ArrowRight')
|
};
return (
<div className="Tabs">
<div role="tablist" className="Tabs-header">
{React.Children.map<React.ReactChild, React.ReactElement<TabProps>>(
children,
(child, index) => {
const isSelected = currentTab === index;
return (
<button
role="tab"
aria-selected={isSelected}
aria-controls={getTabId(index, 'item')}
id={getTabId(index, 'title')}
tabIndex={isSelected ? 0 : -1}
className={cx('Tabs-title', isSelected && 'Tabs-title--active')}
ref={(element) => (tabsRefs.current[index] = element!)}
key={getTabId(index)}
onClick={() => setCurrentTab(index)}
onKeyDown={onKeyDown}
>
{child.props.title}
</button>
);
}
)}
</div>
<div className="Tabs-list">
{React.Children.map(children, (child, index) => (
<div
tabIndex={0}
role="tabpanel"
id={getTabId(index, 'item')}
aria-labelledby={getTabId(index, 'title')}
hidden={currentTab !== index}
key={getTabId(index)}
>
{child}
</div>
))}
</div>
</div>
);
}
export function Tab({ children }: TabProps) {
return <>{children}</>;
}
|
{
setCurrentTab(
Math.min(currentTab + 1, React.Children.count(children) - 1)
);
}
|
conditional_block
|
Tabs.tsx
|
import React, { useEffect, useRef, useState } from 'react';
import { cx } from '../../cx';
import './Tabs.css';
|
};
const getTabId = (index: number, suffix?: string) =>
[`tab-${index}`, suffix].filter(Boolean).join('-');
export function Tabs({ children }) {
const firstRender = useRef(true);
const [currentTab, setCurrentTab] = useState(0);
const tabsRefs = useRef<HTMLElement[]>([]);
useEffect(() => {
if (!firstRender.current && tabsRefs.current) {
tabsRefs.current[currentTab].focus();
}
}, [currentTab]);
useEffect(() => {
firstRender.current = false;
}, []);
const onKeyDown = ({ key }: React.KeyboardEvent) => {
if (key === 'ArrowLeft') {
setCurrentTab(Math.max(0, currentTab - 1));
} else if (key === 'ArrowRight') {
setCurrentTab(
Math.min(currentTab + 1, React.Children.count(children) - 1)
);
}
};
return (
<div className="Tabs">
<div role="tablist" className="Tabs-header">
{React.Children.map<React.ReactChild, React.ReactElement<TabProps>>(
children,
(child, index) => {
const isSelected = currentTab === index;
return (
<button
role="tab"
aria-selected={isSelected}
aria-controls={getTabId(index, 'item')}
id={getTabId(index, 'title')}
tabIndex={isSelected ? 0 : -1}
className={cx('Tabs-title', isSelected && 'Tabs-title--active')}
ref={(element) => (tabsRefs.current[index] = element!)}
key={getTabId(index)}
onClick={() => setCurrentTab(index)}
onKeyDown={onKeyDown}
>
{child.props.title}
</button>
);
}
)}
</div>
<div className="Tabs-list">
{React.Children.map(children, (child, index) => (
<div
tabIndex={0}
role="tabpanel"
id={getTabId(index, 'item')}
aria-labelledby={getTabId(index, 'title')}
hidden={currentTab !== index}
key={getTabId(index)}
>
{child}
</div>
))}
</div>
</div>
);
}
export function Tab({ children }: TabProps) {
return <>{children}</>;
}
|
export type TabProps = {
children: React.ReactNode;
title: string;
|
random_line_split
|
convert_acewiki.py
|
#! /usr/bin/env python
# AceWiki data converter
# Author: Kaarel Kaljurand
# Version: 2012-02-23
#
# This script provides the conversion of a given AceWiki data file
# into other formats, e.g. JSON and GF.
#
# Examples:
#
# python convert_acewiki.py --in geo.acewikidata > Geo.json
# python convert_acewiki.py --in geo.acewikidata --format gfabs --name Geo > Geo.gf
# python convert_acewiki.py --in geo.acewikidata --format gfconc --name Geo > GeoEng.gf
# python convert_acewiki.py --in geo.acewikidata --format sentences > Geo.ace.txt
#
import sys
import argparse
import os
import re
import time
import simplejson
from string import Template
# Regular expression patterns
pattern_sep = re.compile('^\s*$')
pattern_name = re.compile('^([0-9]+)$')
pattern_type = re.compile('^type:([a-z]+)$')
pattern_words = re.compile('^words:(.+);$')
pattern_sentence = re.compile('^(c|\||#) (.+)$')
pattern_token = re.compile('^<([0-9]+),([0-9]+)>$')
# GF-specific templates and strings
# TODO: put these into a different module
gf = {}
gf['propername'] = ['PN', 'awPN']
gf['noun'] = ['CN', 'awCN']
gf['nounof'] = ['CN', 'awCNof']
gf['trverb'] = ['V2', 'awV2']
gf['tradj'] = ['A2', 'awA2']
gf['mainpage'] = ['Dummy', 'awDummy']
template_abs = Template("""
abstract ${name} = Attempto ** {
fun
""")
template_conc = Template("""
concrete ${name}Eng of $name = AttemptoEng **
open SyntaxEng, ParadigmsEng, IrregEng, (C = ConstructX) in {
-- TODO: review these, maybe we can do better
-- We use ~ as a dummy symbol which represents all the forms
-- that we do not want to actually generate.
-- This seems to be less confusing that using an empty string.
oper awCN : (_,_:Str) -> CN = \\sg,pl -> mkCN (ParadigmsEng.mkN sg pl) ;
oper awCNof : (_:Str) -> CN = \\x -> mkCN (ParadigmsEng.mkN x "~") ;
oper awPN : (_,_,_,_:Str) -> PN = \\x,d1,d2,d3 -> mkPN x ;
oper awV2 : (_,_,_:Str) -> V2 = \\goes,go,gone -> mkV2 (ParadigmsEng.mkV go goes "~" gone "~") ;
oper awA2 : (_:Str) -> A2 = \\x -> mkA2 (mkA x) (mkPrep "") ;
lin""")
def parse_acewiki(path):
|
def out_gf_abs(data, name):
"""
Outputs the data as GF abstract syntax file with the given name.
"""
print template_abs.substitute(name = name)
for id in sorted(data):
if 'words' in data[id]:
type = data[id]['type']
[gf_type, gf_oper] = gf[type]
print 'w{}_{} : {} ;'.format(id, gf_type, gf_type)
print '}'
def gf_quote(str):
"""
Changes the underscore for a space and quotes the string.
"""
return '"' + str.replace("_", " ").replace('"', '\\"') + '"'
def rewrite_words(type, words):
"""
Removes 'by' from past participles ('trverb').
Removes 'of' from of-constructs ('nounof').
This is currently needed only for the GF output.
"""
if type == 'trverb':
[sg, pl, vbg] = words
vbg = re.sub(r' by$', '', vbg)
return [sg, pl, vbg]
elif type == 'nounof':
[nounof] = words
return [re.sub(r' of$', '', nounof)]
return words
def rewrite_token(data, token):
"""
If the given token is a function word then
returns it lowercased (unless it is a variable),
otherwise
returns the wordform that corresponds to the ID-representation.
"""
m = pattern_token.match(token)
if m is None:
if token in ['X', 'Y', 'Z']:
return token
return token.lower()
else:
try:
article_id = int(m.group(1))
wordform_id = int(m.group(2))
return data[article_id]['words'][wordform_id].replace('_', ' ')
except:
print >> sys.stderr, 'Warning: Bad token ID: {}'.format(token)
return token
def out_gf_conc(data, name):
"""
Outputs the data as GF concrete syntax file with the given name.
"""
print template_conc.substitute(name = name)
for id in sorted(data):
if 'words' in data[id]:
type = data[id]['type']
words = rewrite_words(type, data[id]['words'])
words_as_str = ' '.join([gf_quote(x) for x in words])
[gf_type, gf_oper] = gf[type]
print 'w{}_{} = {} {} ;'.format(id, gf_type, gf_oper, words_as_str)
print '}'
def out_sentences(data):
"""
Outputs the data as a list of sentences (excl. comments).
The token IDs are resolved to the wordforms.
"""
for id in sorted(data):
if 'sentences' in data[id]:
for s in data[id]['sentences']:
if s['type'] == 'c':
continue
print ' '.join([rewrite_token(data, x) for x in s['content']])
# Commandline arguments parsing
parser = argparse.ArgumentParser(description='AceWiki data file converter')
parser.add_argument('-i', '--in', type=str, action='store', dest='file_in',
help='file that contains AceWiki data (OBLIGATORY)')
parser.add_argument('-n', '--name', type=str, action='store', dest='name',
default="Acewiki",
help='name of the grammar, used for GF outputs (default: AceWiki)')
parser.add_argument('-f', '--format', type=str, action='store', dest='fmt',
default="json",
help='output format, one of {json,gfabs,gfconc,sentences} (default: json)')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v0.1')
args = parser.parse_args()
# TODO: there is probably a better way to do this
if args.file_in is None:
print >> sys.stderr, 'ERROR: argument -i/--in is not specified'
exit()
data = parse_acewiki(args.file_in)
if args.fmt == "json":
print simplejson.dumps(data)
elif args.fmt == "gfabs":
out_gf_abs(data, args.name)
elif args.fmt == "gfconc":
out_gf_conc(data, args.name)
elif args.fmt == "sentences":
out_sentences(data)
else:
print >> sys.stderr, 'ERROR: Unsupported format: {}'.format(args.fmt)
|
"""
Parses AceWiki data file into a Python data structure.
TODO: Currently does not change the token representation (i.e. <id1,id2>).
Assumes that article IDs are integers.
This way we get an ordering for articles which results in a stable output.
"""
data = {}
id = -1
f = open(path, 'r')
for line in f:
line = line.strip()
m = pattern_name.match(line)
if m is not None:
id = int(m.group(1))
data[id] = {}
continue
m = pattern_type.match(line)
if m is not None:
data[id]['type'] = m.group(1)
continue
m = pattern_words.match(line)
if m is not None:
data[id]['words'] = m.group(1).split(';')
continue
m = pattern_sentence.match(line)
if m is not None:
if 'sentences' not in data[id]:
data[id]['sentences'] = []
data[id]['sentences'].append({ 'type': m.group(1), 'content' : m.group(2).split(' ') })
continue
f.close()
return data
|
identifier_body
|
convert_acewiki.py
|
#! /usr/bin/env python
# AceWiki data converter
# Author: Kaarel Kaljurand
# Version: 2012-02-23
#
# This script provides the conversion of a given AceWiki data file
# into other formats, e.g. JSON and GF.
#
# Examples:
#
# python convert_acewiki.py --in geo.acewikidata > Geo.json
# python convert_acewiki.py --in geo.acewikidata --format gfabs --name Geo > Geo.gf
# python convert_acewiki.py --in geo.acewikidata --format gfconc --name Geo > GeoEng.gf
# python convert_acewiki.py --in geo.acewikidata --format sentences > Geo.ace.txt
#
import sys
import argparse
import os
import re
import time
import simplejson
from string import Template
# Regular expression patterns
pattern_sep = re.compile('^\s*$')
pattern_name = re.compile('^([0-9]+)$')
pattern_type = re.compile('^type:([a-z]+)$')
pattern_words = re.compile('^words:(.+);$')
pattern_sentence = re.compile('^(c|\||#) (.+)$')
pattern_token = re.compile('^<([0-9]+),([0-9]+)>$')
# GF-specific templates and strings
# TODO: put these into a different module
gf = {}
gf['propername'] = ['PN', 'awPN']
gf['noun'] = ['CN', 'awCN']
gf['nounof'] = ['CN', 'awCNof']
gf['trverb'] = ['V2', 'awV2']
gf['tradj'] = ['A2', 'awA2']
gf['mainpage'] = ['Dummy', 'awDummy']
template_abs = Template("""
abstract ${name} = Attempto ** {
fun
""")
template_conc = Template("""
concrete ${name}Eng of $name = AttemptoEng **
open SyntaxEng, ParadigmsEng, IrregEng, (C = ConstructX) in {
-- TODO: review these, maybe we can do better
-- We use ~ as a dummy symbol which represents all the forms
-- that we do not want to actually generate.
-- This seems to be less confusing that using an empty string.
oper awCN : (_,_:Str) -> CN = \\sg,pl -> mkCN (ParadigmsEng.mkN sg pl) ;
oper awCNof : (_:Str) -> CN = \\x -> mkCN (ParadigmsEng.mkN x "~") ;
oper awPN : (_,_,_,_:Str) -> PN = \\x,d1,d2,d3 -> mkPN x ;
oper awV2 : (_,_,_:Str) -> V2 = \\goes,go,gone -> mkV2 (ParadigmsEng.mkV go goes "~" gone "~") ;
oper awA2 : (_:Str) -> A2 = \\x -> mkA2 (mkA x) (mkPrep "") ;
lin""")
def parse_acewiki(path):
"""
Parses AceWiki data file into a Python data structure.
TODO: Currently does not change the token representation (i.e. <id1,id2>).
Assumes that article IDs are integers.
This way we get an ordering for articles which results in a stable output.
"""
data = {}
id = -1
f = open(path, 'r')
for line in f:
line = line.strip()
m = pattern_name.match(line)
if m is not None:
id = int(m.group(1))
data[id] = {}
continue
m = pattern_type.match(line)
if m is not None:
data[id]['type'] = m.group(1)
continue
m = pattern_words.match(line)
if m is not None:
data[id]['words'] = m.group(1).split(';')
continue
m = pattern_sentence.match(line)
if m is not None:
if 'sentences' not in data[id]:
data[id]['sentences'] = []
data[id]['sentences'].append({ 'type': m.group(1), 'content' : m.group(2).split(' ') })
continue
f.close()
return data
def out_gf_abs(data, name):
"""
Outputs the data as GF abstract syntax file with the given name.
"""
print template_abs.substitute(name = name)
for id in sorted(data):
if 'words' in data[id]:
type = data[id]['type']
[gf_type, gf_oper] = gf[type]
print 'w{}_{} : {} ;'.format(id, gf_type, gf_type)
print '}'
def gf_quote(str):
"""
Changes the underscore for a space and quotes the string.
"""
return '"' + str.replace("_", " ").replace('"', '\\"') + '"'
def rewrite_words(type, words):
"""
Removes 'by' from past participles ('trverb').
Removes 'of' from of-constructs ('nounof').
This is currently needed only for the GF output.
"""
if type == 'trverb':
[sg, pl, vbg] = words
vbg = re.sub(r' by$', '', vbg)
return [sg, pl, vbg]
elif type == 'nounof':
[nounof] = words
return [re.sub(r' of$', '', nounof)]
return words
def rewrite_token(data, token):
"""
If the given token is a function word then
returns it lowercased (unless it is a variable),
otherwise
returns the wordform that corresponds to the ID-representation.
"""
m = pattern_token.match(token)
if m is None:
if token in ['X', 'Y', 'Z']:
return token
return token.lower()
else:
try:
article_id = int(m.group(1))
wordform_id = int(m.group(2))
return data[article_id]['words'][wordform_id].replace('_', ' ')
except:
print >> sys.stderr, 'Warning: Bad token ID: {}'.format(token)
return token
def out_gf_conc(data, name):
"""
Outputs the data as GF concrete syntax file with the given name.
"""
print template_conc.substitute(name = name)
for id in sorted(data):
if 'words' in data[id]:
type = data[id]['type']
words = rewrite_words(type, data[id]['words'])
words_as_str = ' '.join([gf_quote(x) for x in words])
[gf_type, gf_oper] = gf[type]
print 'w{}_{} = {} {} ;'.format(id, gf_type, gf_oper, words_as_str)
print '}'
def out_sentences(data):
"""
Outputs the data as a list of sentences (excl. comments).
The token IDs are resolved to the wordforms.
"""
for id in sorted(data):
if 'sentences' in data[id]:
|
# Commandline arguments parsing
parser = argparse.ArgumentParser(description='AceWiki data file converter')
parser.add_argument('-i', '--in', type=str, action='store', dest='file_in',
help='file that contains AceWiki data (OBLIGATORY)')
parser.add_argument('-n', '--name', type=str, action='store', dest='name',
default="Acewiki",
help='name of the grammar, used for GF outputs (default: AceWiki)')
parser.add_argument('-f', '--format', type=str, action='store', dest='fmt',
default="json",
help='output format, one of {json,gfabs,gfconc,sentences} (default: json)')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v0.1')
args = parser.parse_args()
# TODO: there is probably a better way to do this
if args.file_in is None:
print >> sys.stderr, 'ERROR: argument -i/--in is not specified'
exit()
data = parse_acewiki(args.file_in)
if args.fmt == "json":
print simplejson.dumps(data)
elif args.fmt == "gfabs":
out_gf_abs(data, args.name)
elif args.fmt == "gfconc":
out_gf_conc(data, args.name)
elif args.fmt == "sentences":
out_sentences(data)
else:
print >> sys.stderr, 'ERROR: Unsupported format: {}'.format(args.fmt)
|
for s in data[id]['sentences']:
if s['type'] == 'c':
continue
print ' '.join([rewrite_token(data, x) for x in s['content']])
|
conditional_block
|
convert_acewiki.py
|
#! /usr/bin/env python
# AceWiki data converter
# Author: Kaarel Kaljurand
# Version: 2012-02-23
#
# This script provides the conversion of a given AceWiki data file
# into other formats, e.g. JSON and GF.
#
# Examples:
#
# python convert_acewiki.py --in geo.acewikidata > Geo.json
# python convert_acewiki.py --in geo.acewikidata --format gfabs --name Geo > Geo.gf
# python convert_acewiki.py --in geo.acewikidata --format gfconc --name Geo > GeoEng.gf
# python convert_acewiki.py --in geo.acewikidata --format sentences > Geo.ace.txt
#
import sys
import argparse
import os
import re
import time
import simplejson
from string import Template
# Regular expression patterns
pattern_sep = re.compile('^\s*$')
pattern_name = re.compile('^([0-9]+)$')
pattern_type = re.compile('^type:([a-z]+)$')
pattern_words = re.compile('^words:(.+);$')
pattern_sentence = re.compile('^(c|\||#) (.+)$')
pattern_token = re.compile('^<([0-9]+),([0-9]+)>$')
# GF-specific templates and strings
# TODO: put these into a different module
gf = {}
gf['propername'] = ['PN', 'awPN']
gf['noun'] = ['CN', 'awCN']
gf['nounof'] = ['CN', 'awCNof']
gf['trverb'] = ['V2', 'awV2']
gf['tradj'] = ['A2', 'awA2']
gf['mainpage'] = ['Dummy', 'awDummy']
template_abs = Template("""
abstract ${name} = Attempto ** {
fun
""")
template_conc = Template("""
concrete ${name}Eng of $name = AttemptoEng **
open SyntaxEng, ParadigmsEng, IrregEng, (C = ConstructX) in {
-- TODO: review these, maybe we can do better
-- We use ~ as a dummy symbol which represents all the forms
-- that we do not want to actually generate.
-- This seems to be less confusing that using an empty string.
oper awCN : (_,_:Str) -> CN = \\sg,pl -> mkCN (ParadigmsEng.mkN sg pl) ;
oper awCNof : (_:Str) -> CN = \\x -> mkCN (ParadigmsEng.mkN x "~") ;
oper awPN : (_,_,_,_:Str) -> PN = \\x,d1,d2,d3 -> mkPN x ;
oper awV2 : (_,_,_:Str) -> V2 = \\goes,go,gone -> mkV2 (ParadigmsEng.mkV go goes "~" gone "~") ;
oper awA2 : (_:Str) -> A2 = \\x -> mkA2 (mkA x) (mkPrep "") ;
lin""")
def parse_acewiki(path):
"""
Parses AceWiki data file into a Python data structure.
TODO: Currently does not change the token representation (i.e. <id1,id2>).
Assumes that article IDs are integers.
This way we get an ordering for articles which results in a stable output.
"""
data = {}
id = -1
f = open(path, 'r')
for line in f:
line = line.strip()
m = pattern_name.match(line)
if m is not None:
id = int(m.group(1))
data[id] = {}
continue
m = pattern_type.match(line)
if m is not None:
data[id]['type'] = m.group(1)
continue
m = pattern_words.match(line)
if m is not None:
data[id]['words'] = m.group(1).split(';')
continue
m = pattern_sentence.match(line)
if m is not None:
if 'sentences' not in data[id]:
data[id]['sentences'] = []
data[id]['sentences'].append({ 'type': m.group(1), 'content' : m.group(2).split(' ') })
continue
f.close()
return data
|
def out_gf_abs(data, name):
"""
Outputs the data as GF abstract syntax file with the given name.
"""
print template_abs.substitute(name = name)
for id in sorted(data):
if 'words' in data[id]:
type = data[id]['type']
[gf_type, gf_oper] = gf[type]
print 'w{}_{} : {} ;'.format(id, gf_type, gf_type)
print '}'
def gf_quote(str):
"""
Changes the underscore for a space and quotes the string.
"""
return '"' + str.replace("_", " ").replace('"', '\\"') + '"'
def rewrite_words(type, words):
"""
Removes 'by' from past participles ('trverb').
Removes 'of' from of-constructs ('nounof').
This is currently needed only for the GF output.
"""
if type == 'trverb':
[sg, pl, vbg] = words
vbg = re.sub(r' by$', '', vbg)
return [sg, pl, vbg]
elif type == 'nounof':
[nounof] = words
return [re.sub(r' of$', '', nounof)]
return words
def rewrite_token(data, token):
"""
If the given token is a function word then
returns it lowercased (unless it is a variable),
otherwise
returns the wordform that corresponds to the ID-representation.
"""
m = pattern_token.match(token)
if m is None:
if token in ['X', 'Y', 'Z']:
return token
return token.lower()
else:
try:
article_id = int(m.group(1))
wordform_id = int(m.group(2))
return data[article_id]['words'][wordform_id].replace('_', ' ')
except:
print >> sys.stderr, 'Warning: Bad token ID: {}'.format(token)
return token
def out_gf_conc(data, name):
"""
Outputs the data as GF concrete syntax file with the given name.
"""
print template_conc.substitute(name = name)
for id in sorted(data):
if 'words' in data[id]:
type = data[id]['type']
words = rewrite_words(type, data[id]['words'])
words_as_str = ' '.join([gf_quote(x) for x in words])
[gf_type, gf_oper] = gf[type]
print 'w{}_{} = {} {} ;'.format(id, gf_type, gf_oper, words_as_str)
print '}'
def out_sentences(data):
"""
Outputs the data as a list of sentences (excl. comments).
The token IDs are resolved to the wordforms.
"""
for id in sorted(data):
if 'sentences' in data[id]:
for s in data[id]['sentences']:
if s['type'] == 'c':
continue
print ' '.join([rewrite_token(data, x) for x in s['content']])
# Commandline arguments parsing
parser = argparse.ArgumentParser(description='AceWiki data file converter')
parser.add_argument('-i', '--in', type=str, action='store', dest='file_in',
help='file that contains AceWiki data (OBLIGATORY)')
parser.add_argument('-n', '--name', type=str, action='store', dest='name',
default="Acewiki",
help='name of the grammar, used for GF outputs (default: AceWiki)')
parser.add_argument('-f', '--format', type=str, action='store', dest='fmt',
default="json",
help='output format, one of {json,gfabs,gfconc,sentences} (default: json)')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v0.1')
args = parser.parse_args()
# TODO: there is probably a better way to do this
if args.file_in is None:
print >> sys.stderr, 'ERROR: argument -i/--in is not specified'
exit()
data = parse_acewiki(args.file_in)
if args.fmt == "json":
print simplejson.dumps(data)
elif args.fmt == "gfabs":
out_gf_abs(data, args.name)
elif args.fmt == "gfconc":
out_gf_conc(data, args.name)
elif args.fmt == "sentences":
out_sentences(data)
else:
print >> sys.stderr, 'ERROR: Unsupported format: {}'.format(args.fmt)
|
random_line_split
|
|
convert_acewiki.py
|
#! /usr/bin/env python
# AceWiki data converter
# Author: Kaarel Kaljurand
# Version: 2012-02-23
#
# This script provides the conversion of a given AceWiki data file
# into other formats, e.g. JSON and GF.
#
# Examples:
#
# python convert_acewiki.py --in geo.acewikidata > Geo.json
# python convert_acewiki.py --in geo.acewikidata --format gfabs --name Geo > Geo.gf
# python convert_acewiki.py --in geo.acewikidata --format gfconc --name Geo > GeoEng.gf
# python convert_acewiki.py --in geo.acewikidata --format sentences > Geo.ace.txt
#
import sys
import argparse
import os
import re
import time
import simplejson
from string import Template
# Regular expression patterns
pattern_sep = re.compile('^\s*$')
pattern_name = re.compile('^([0-9]+)$')
pattern_type = re.compile('^type:([a-z]+)$')
pattern_words = re.compile('^words:(.+);$')
pattern_sentence = re.compile('^(c|\||#) (.+)$')
pattern_token = re.compile('^<([0-9]+),([0-9]+)>$')
# GF-specific templates and strings
# TODO: put these into a different module
gf = {}
gf['propername'] = ['PN', 'awPN']
gf['noun'] = ['CN', 'awCN']
gf['nounof'] = ['CN', 'awCNof']
gf['trverb'] = ['V2', 'awV2']
gf['tradj'] = ['A2', 'awA2']
gf['mainpage'] = ['Dummy', 'awDummy']
template_abs = Template("""
abstract ${name} = Attempto ** {
fun
""")
template_conc = Template("""
concrete ${name}Eng of $name = AttemptoEng **
open SyntaxEng, ParadigmsEng, IrregEng, (C = ConstructX) in {
-- TODO: review these, maybe we can do better
-- We use ~ as a dummy symbol which represents all the forms
-- that we do not want to actually generate.
-- This seems to be less confusing that using an empty string.
oper awCN : (_,_:Str) -> CN = \\sg,pl -> mkCN (ParadigmsEng.mkN sg pl) ;
oper awCNof : (_:Str) -> CN = \\x -> mkCN (ParadigmsEng.mkN x "~") ;
oper awPN : (_,_,_,_:Str) -> PN = \\x,d1,d2,d3 -> mkPN x ;
oper awV2 : (_,_,_:Str) -> V2 = \\goes,go,gone -> mkV2 (ParadigmsEng.mkV go goes "~" gone "~") ;
oper awA2 : (_:Str) -> A2 = \\x -> mkA2 (mkA x) (mkPrep "") ;
lin""")
def parse_acewiki(path):
"""
Parses AceWiki data file into a Python data structure.
TODO: Currently does not change the token representation (i.e. <id1,id2>).
Assumes that article IDs are integers.
This way we get an ordering for articles which results in a stable output.
"""
data = {}
id = -1
f = open(path, 'r')
for line in f:
line = line.strip()
m = pattern_name.match(line)
if m is not None:
id = int(m.group(1))
data[id] = {}
continue
m = pattern_type.match(line)
if m is not None:
data[id]['type'] = m.group(1)
continue
m = pattern_words.match(line)
if m is not None:
data[id]['words'] = m.group(1).split(';')
continue
m = pattern_sentence.match(line)
if m is not None:
if 'sentences' not in data[id]:
data[id]['sentences'] = []
data[id]['sentences'].append({ 'type': m.group(1), 'content' : m.group(2).split(' ') })
continue
f.close()
return data
def out_gf_abs(data, name):
"""
Outputs the data as GF abstract syntax file with the given name.
"""
print template_abs.substitute(name = name)
for id in sorted(data):
if 'words' in data[id]:
type = data[id]['type']
[gf_type, gf_oper] = gf[type]
print 'w{}_{} : {} ;'.format(id, gf_type, gf_type)
print '}'
def gf_quote(str):
"""
Changes the underscore for a space and quotes the string.
"""
return '"' + str.replace("_", " ").replace('"', '\\"') + '"'
def rewrite_words(type, words):
"""
Removes 'by' from past participles ('trverb').
Removes 'of' from of-constructs ('nounof').
This is currently needed only for the GF output.
"""
if type == 'trverb':
[sg, pl, vbg] = words
vbg = re.sub(r' by$', '', vbg)
return [sg, pl, vbg]
elif type == 'nounof':
[nounof] = words
return [re.sub(r' of$', '', nounof)]
return words
def
|
(data, token):
"""
If the given token is a function word then
returns it lowercased (unless it is a variable),
otherwise
returns the wordform that corresponds to the ID-representation.
"""
m = pattern_token.match(token)
if m is None:
if token in ['X', 'Y', 'Z']:
return token
return token.lower()
else:
try:
article_id = int(m.group(1))
wordform_id = int(m.group(2))
return data[article_id]['words'][wordform_id].replace('_', ' ')
except:
print >> sys.stderr, 'Warning: Bad token ID: {}'.format(token)
return token
def out_gf_conc(data, name):
"""
Outputs the data as GF concrete syntax file with the given name.
"""
print template_conc.substitute(name = name)
for id in sorted(data):
if 'words' in data[id]:
type = data[id]['type']
words = rewrite_words(type, data[id]['words'])
words_as_str = ' '.join([gf_quote(x) for x in words])
[gf_type, gf_oper] = gf[type]
print 'w{}_{} = {} {} ;'.format(id, gf_type, gf_oper, words_as_str)
print '}'
def out_sentences(data):
"""
Outputs the data as a list of sentences (excl. comments).
The token IDs are resolved to the wordforms.
"""
for id in sorted(data):
if 'sentences' in data[id]:
for s in data[id]['sentences']:
if s['type'] == 'c':
continue
print ' '.join([rewrite_token(data, x) for x in s['content']])
# Commandline arguments parsing
parser = argparse.ArgumentParser(description='AceWiki data file converter')
parser.add_argument('-i', '--in', type=str, action='store', dest='file_in',
help='file that contains AceWiki data (OBLIGATORY)')
parser.add_argument('-n', '--name', type=str, action='store', dest='name',
default="Acewiki",
help='name of the grammar, used for GF outputs (default: AceWiki)')
parser.add_argument('-f', '--format', type=str, action='store', dest='fmt',
default="json",
help='output format, one of {json,gfabs,gfconc,sentences} (default: json)')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v0.1')
args = parser.parse_args()
# TODO: there is probably a better way to do this
if args.file_in is None:
print >> sys.stderr, 'ERROR: argument -i/--in is not specified'
exit()
data = parse_acewiki(args.file_in)
if args.fmt == "json":
print simplejson.dumps(data)
elif args.fmt == "gfabs":
out_gf_abs(data, args.name)
elif args.fmt == "gfconc":
out_gf_conc(data, args.name)
elif args.fmt == "sentences":
out_sentences(data)
else:
print >> sys.stderr, 'ERROR: Unsupported format: {}'.format(args.fmt)
|
rewrite_token
|
identifier_name
|
ngdevmode_debug_spec.ts
|
/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {CommonModule} from '@angular/common';
import {Component} from '@angular/core';
import {getLContext} from '@angular/core/src/render3/context_discovery';
import {getComponentLView} from '@angular/core/src/render3/util/discovery_utils';
import {createNamedArrayType} from '@angular/core/src/util/named_array_type';
import {TestBed} from '@angular/core/testing';
import {onlyInIvy} from '@angular/private/testing';
const supportsArraySubclassing =
createNamedArrayType('SupportsArraySubclassing').name === 'SupportsArraySubclassing';
|
template: `
<ul>
<li *ngIf="true">item</li>
</ul>
`
})
class MyApp {
}
TestBed.configureTestingModule({declarations: [MyApp], imports: [CommonModule]});
const fixture = TestBed.createComponent(MyApp);
const rootLView = getLContext(fixture.nativeElement)!.lView;
expect(rootLView.constructor.name).toEqual('LRootView');
const componentLView = getComponentLView(fixture.componentInstance);
expect(componentLView.constructor.name).toEqual('LComponentView_MyApp');
const element: HTMLElement = fixture.nativeElement;
fixture.detectChanges();
const li = element.querySelector('li')!;
const embeddedLView = getLContext(li)!.lView;
expect(embeddedLView.constructor.name).toEqual('LEmbeddedView_MyApp_li_1');
});
});
});
|
onlyInIvy('Debug information exist in ivy only').describe('ngDevMode debug', () => {
describe('LViewDebug', () => {
supportsArraySubclassing && it('should name LView based on type', () => {
@Component({
|
random_line_split
|
ngdevmode_debug_spec.ts
|
/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {CommonModule} from '@angular/common';
import {Component} from '@angular/core';
import {getLContext} from '@angular/core/src/render3/context_discovery';
import {getComponentLView} from '@angular/core/src/render3/util/discovery_utils';
import {createNamedArrayType} from '@angular/core/src/util/named_array_type';
import {TestBed} from '@angular/core/testing';
import {onlyInIvy} from '@angular/private/testing';
const supportsArraySubclassing =
createNamedArrayType('SupportsArraySubclassing').name === 'SupportsArraySubclassing';
onlyInIvy('Debug information exist in ivy only').describe('ngDevMode debug', () => {
describe('LViewDebug', () => {
supportsArraySubclassing && it('should name LView based on type', () => {
@Component({
template: `
<ul>
<li *ngIf="true">item</li>
</ul>
`
})
class
|
{
}
TestBed.configureTestingModule({declarations: [MyApp], imports: [CommonModule]});
const fixture = TestBed.createComponent(MyApp);
const rootLView = getLContext(fixture.nativeElement)!.lView;
expect(rootLView.constructor.name).toEqual('LRootView');
const componentLView = getComponentLView(fixture.componentInstance);
expect(componentLView.constructor.name).toEqual('LComponentView_MyApp');
const element: HTMLElement = fixture.nativeElement;
fixture.detectChanges();
const li = element.querySelector('li')!;
const embeddedLView = getLContext(li)!.lView;
expect(embeddedLView.constructor.name).toEqual('LEmbeddedView_MyApp_li_1');
});
});
});
|
MyApp
|
identifier_name
|
problem2.py
|
import matplotlib.pyplot as plt
import numpy as np
|
mean = 0
variance = 1
sigma = math.sqrt(variance)
def drawSampleNormal(sampleSize):
samples = np.random.normal(mean, sigma, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
plt.plot(bins,mlab.normpdf(bins,mean,sigma))
plt.show()
plt.savefig("normal_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleNormal(20)
drawSampleNormal(50)
drawSampleNormal(100)
drawSampleNormal(500)
alpha = 7.5
beta = 10
def drawSampleGamma(sampleSize):
samples = np.random.gamma(alpha, beta, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
pdf = bins**(alpha-1)*(np.exp(-bins/beta) / (sps.gamma(alpha)*beta**alpha))
plt.plot(bins, pdf, linewidth=2, color='r')
plt.show()
plt.savefig("gamma_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleGamma(20)
drawSampleGamma(50)
drawSampleGamma(100)
drawSampleGamma(500)
|
import matplotlib.mlab as mlab
import math
import scipy.special as sps
|
random_line_split
|
problem2.py
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
import math
import scipy.special as sps
mean = 0
variance = 1
sigma = math.sqrt(variance)
def
|
(sampleSize):
samples = np.random.normal(mean, sigma, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
plt.plot(bins,mlab.normpdf(bins,mean,sigma))
plt.show()
plt.savefig("normal_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleNormal(20)
drawSampleNormal(50)
drawSampleNormal(100)
drawSampleNormal(500)
alpha = 7.5
beta = 10
def drawSampleGamma(sampleSize):
samples = np.random.gamma(alpha, beta, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
pdf = bins**(alpha-1)*(np.exp(-bins/beta) / (sps.gamma(alpha)*beta**alpha))
plt.plot(bins, pdf, linewidth=2, color='r')
plt.show()
plt.savefig("gamma_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleGamma(20)
drawSampleGamma(50)
drawSampleGamma(100)
drawSampleGamma(500)
|
drawSampleNormal
|
identifier_name
|
problem2.py
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
import math
import scipy.special as sps
mean = 0
variance = 1
sigma = math.sqrt(variance)
def drawSampleNormal(sampleSize):
samples = np.random.normal(mean, sigma, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
plt.plot(bins,mlab.normpdf(bins,mean,sigma))
plt.show()
plt.savefig("normal_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleNormal(20)
drawSampleNormal(50)
drawSampleNormal(100)
drawSampleNormal(500)
alpha = 7.5
beta = 10
def drawSampleGamma(sampleSize):
|
drawSampleGamma(20)
drawSampleGamma(50)
drawSampleGamma(100)
drawSampleGamma(500)
|
samples = np.random.gamma(alpha, beta, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
pdf = bins**(alpha-1)*(np.exp(-bins/beta) / (sps.gamma(alpha)*beta**alpha))
plt.plot(bins, pdf, linewidth=2, color='r')
plt.show()
plt.savefig("gamma_" + str(sampleSize) + "_samples.png")
plt.clf()
|
identifier_body
|
data-types.js
|
'use strict';
const _ = require('lodash');
const moment = require('moment-timezone');
module.exports = BaseTypes => {
BaseTypes.ABSTRACT.prototype.dialectTypes = 'https://mariadb.com/kb/en/library/resultset/#field-types';
/**
* types: [buffer_type, ...]
* @see documentation : https://mariadb.com/kb/en/library/resultset/#field-types
* @see connector implementation : https://github.com/MariaDB/mariadb-connector-nodejs/blob/master/lib/const/field-type.js
*/
BaseTypes.DATE.types.mariadb = ['DATETIME'];
BaseTypes.STRING.types.mariadb = ['VAR_STRING'];
BaseTypes.CHAR.types.mariadb = ['STRING'];
BaseTypes.TEXT.types.mariadb = ['BLOB'];
BaseTypes.TINYINT.types.mariadb = ['TINY'];
BaseTypes.SMALLINT.types.mariadb = ['SHORT'];
BaseTypes.MEDIUMINT.types.mariadb = ['INT24'];
BaseTypes.INTEGER.types.mariadb = ['LONG'];
BaseTypes.BIGINT.types.mariadb = ['LONGLONG'];
BaseTypes.FLOAT.types.mariadb = ['FLOAT'];
BaseTypes.TIME.types.mariadb = ['TIME'];
BaseTypes.DATEONLY.types.mariadb = ['DATE'];
BaseTypes.BOOLEAN.types.mariadb = ['TINY'];
BaseTypes.BLOB.types.mariadb = ['TINYBLOB', 'BLOB', 'LONGBLOB'];
BaseTypes.DECIMAL.types.mariadb = ['NEWDECIMAL'];
BaseTypes.UUID.types.mariadb = false;
BaseTypes.ENUM.types.mariadb = false;
BaseTypes.REAL.types.mariadb = ['DOUBLE'];
BaseTypes.DOUBLE.types.mariadb = ['DOUBLE'];
BaseTypes.GEOMETRY.types.mariadb = ['GEOMETRY'];
BaseTypes.JSON.types.mariadb = ['JSON'];
class DECIMAL extends BaseTypes.DECIMAL {
toSql() {
let definition = super.toSql();
if (this._unsigned) {
definition += ' UNSIGNED';
}
if (this._zerofill) {
definition += ' ZEROFILL';
}
return definition;
}
}
class DATE extends BaseTypes.DATE {
toSql() {
return `DATETIME${this._length ? `(${this._length})` : ''}`;
}
_stringify(date, options) {
date = this._applyTimezone(date, options);
return date.format('YYYY-MM-DD HH:mm:ss.SSS');
}
static parse(value, options) {
value = value.string();
if (value === null) {
return value;
}
if (moment.tz.zone(options.timezone)) {
value = moment.tz(value, options.timezone).toDate();
}
else {
value = new Date(`${value} ${options.timezone}`);
}
return value;
}
}
class DATEONLY extends BaseTypes.DATEONLY {
static parse(value) {
return value.string();
}
}
class UUID extends BaseTypes.UUID {
toSql() {
return 'CHAR(36) BINARY';
}
}
class
|
extends BaseTypes.GEOMETRY {
constructor(type, srid) {
super(type, srid);
if (_.isEmpty(this.type)) {
this.sqlType = this.key;
}
else {
this.sqlType = this.type;
}
}
toSql() {
return this.sqlType;
}
}
class ENUM extends BaseTypes.ENUM {
toSql(options) {
return `ENUM(${this.values.map(value => options.escape(value)).join(', ')})`;
}
}
class JSONTYPE extends BaseTypes.JSON {
_stringify(value, options) {
return options.operation === 'where' && typeof value === 'string' ? value
: JSON.stringify(value);
}
}
return {
ENUM,
DATE,
DATEONLY,
UUID,
GEOMETRY,
DECIMAL,
JSON: JSONTYPE
};
};
|
GEOMETRY
|
identifier_name
|
data-types.js
|
'use strict';
const _ = require('lodash');
const moment = require('moment-timezone');
module.exports = BaseTypes => {
BaseTypes.ABSTRACT.prototype.dialectTypes = 'https://mariadb.com/kb/en/library/resultset/#field-types';
/**
* types: [buffer_type, ...]
* @see documentation : https://mariadb.com/kb/en/library/resultset/#field-types
* @see connector implementation : https://github.com/MariaDB/mariadb-connector-nodejs/blob/master/lib/const/field-type.js
*/
BaseTypes.DATE.types.mariadb = ['DATETIME'];
BaseTypes.STRING.types.mariadb = ['VAR_STRING'];
BaseTypes.CHAR.types.mariadb = ['STRING'];
BaseTypes.TEXT.types.mariadb = ['BLOB'];
BaseTypes.TINYINT.types.mariadb = ['TINY'];
BaseTypes.SMALLINT.types.mariadb = ['SHORT'];
|
BaseTypes.FLOAT.types.mariadb = ['FLOAT'];
BaseTypes.TIME.types.mariadb = ['TIME'];
BaseTypes.DATEONLY.types.mariadb = ['DATE'];
BaseTypes.BOOLEAN.types.mariadb = ['TINY'];
BaseTypes.BLOB.types.mariadb = ['TINYBLOB', 'BLOB', 'LONGBLOB'];
BaseTypes.DECIMAL.types.mariadb = ['NEWDECIMAL'];
BaseTypes.UUID.types.mariadb = false;
BaseTypes.ENUM.types.mariadb = false;
BaseTypes.REAL.types.mariadb = ['DOUBLE'];
BaseTypes.DOUBLE.types.mariadb = ['DOUBLE'];
BaseTypes.GEOMETRY.types.mariadb = ['GEOMETRY'];
BaseTypes.JSON.types.mariadb = ['JSON'];
class DECIMAL extends BaseTypes.DECIMAL {
toSql() {
let definition = super.toSql();
if (this._unsigned) {
definition += ' UNSIGNED';
}
if (this._zerofill) {
definition += ' ZEROFILL';
}
return definition;
}
}
class DATE extends BaseTypes.DATE {
toSql() {
return `DATETIME${this._length ? `(${this._length})` : ''}`;
}
_stringify(date, options) {
date = this._applyTimezone(date, options);
return date.format('YYYY-MM-DD HH:mm:ss.SSS');
}
static parse(value, options) {
value = value.string();
if (value === null) {
return value;
}
if (moment.tz.zone(options.timezone)) {
value = moment.tz(value, options.timezone).toDate();
}
else {
value = new Date(`${value} ${options.timezone}`);
}
return value;
}
}
class DATEONLY extends BaseTypes.DATEONLY {
static parse(value) {
return value.string();
}
}
class UUID extends BaseTypes.UUID {
toSql() {
return 'CHAR(36) BINARY';
}
}
class GEOMETRY extends BaseTypes.GEOMETRY {
constructor(type, srid) {
super(type, srid);
if (_.isEmpty(this.type)) {
this.sqlType = this.key;
}
else {
this.sqlType = this.type;
}
}
toSql() {
return this.sqlType;
}
}
class ENUM extends BaseTypes.ENUM {
toSql(options) {
return `ENUM(${this.values.map(value => options.escape(value)).join(', ')})`;
}
}
class JSONTYPE extends BaseTypes.JSON {
_stringify(value, options) {
return options.operation === 'where' && typeof value === 'string' ? value
: JSON.stringify(value);
}
}
return {
ENUM,
DATE,
DATEONLY,
UUID,
GEOMETRY,
DECIMAL,
JSON: JSONTYPE
};
};
|
BaseTypes.MEDIUMINT.types.mariadb = ['INT24'];
BaseTypes.INTEGER.types.mariadb = ['LONG'];
BaseTypes.BIGINT.types.mariadb = ['LONGLONG'];
|
random_line_split
|
data-types.js
|
'use strict';
const _ = require('lodash');
const moment = require('moment-timezone');
module.exports = BaseTypes => {
BaseTypes.ABSTRACT.prototype.dialectTypes = 'https://mariadb.com/kb/en/library/resultset/#field-types';
/**
* types: [buffer_type, ...]
* @see documentation : https://mariadb.com/kb/en/library/resultset/#field-types
* @see connector implementation : https://github.com/MariaDB/mariadb-connector-nodejs/blob/master/lib/const/field-type.js
*/
BaseTypes.DATE.types.mariadb = ['DATETIME'];
BaseTypes.STRING.types.mariadb = ['VAR_STRING'];
BaseTypes.CHAR.types.mariadb = ['STRING'];
BaseTypes.TEXT.types.mariadb = ['BLOB'];
BaseTypes.TINYINT.types.mariadb = ['TINY'];
BaseTypes.SMALLINT.types.mariadb = ['SHORT'];
BaseTypes.MEDIUMINT.types.mariadb = ['INT24'];
BaseTypes.INTEGER.types.mariadb = ['LONG'];
BaseTypes.BIGINT.types.mariadb = ['LONGLONG'];
BaseTypes.FLOAT.types.mariadb = ['FLOAT'];
BaseTypes.TIME.types.mariadb = ['TIME'];
BaseTypes.DATEONLY.types.mariadb = ['DATE'];
BaseTypes.BOOLEAN.types.mariadb = ['TINY'];
BaseTypes.BLOB.types.mariadb = ['TINYBLOB', 'BLOB', 'LONGBLOB'];
BaseTypes.DECIMAL.types.mariadb = ['NEWDECIMAL'];
BaseTypes.UUID.types.mariadb = false;
BaseTypes.ENUM.types.mariadb = false;
BaseTypes.REAL.types.mariadb = ['DOUBLE'];
BaseTypes.DOUBLE.types.mariadb = ['DOUBLE'];
BaseTypes.GEOMETRY.types.mariadb = ['GEOMETRY'];
BaseTypes.JSON.types.mariadb = ['JSON'];
class DECIMAL extends BaseTypes.DECIMAL {
toSql() {
let definition = super.toSql();
if (this._unsigned)
|
if (this._zerofill) {
definition += ' ZEROFILL';
}
return definition;
}
}
class DATE extends BaseTypes.DATE {
toSql() {
return `DATETIME${this._length ? `(${this._length})` : ''}`;
}
_stringify(date, options) {
date = this._applyTimezone(date, options);
return date.format('YYYY-MM-DD HH:mm:ss.SSS');
}
static parse(value, options) {
value = value.string();
if (value === null) {
return value;
}
if (moment.tz.zone(options.timezone)) {
value = moment.tz(value, options.timezone).toDate();
}
else {
value = new Date(`${value} ${options.timezone}`);
}
return value;
}
}
class DATEONLY extends BaseTypes.DATEONLY {
static parse(value) {
return value.string();
}
}
class UUID extends BaseTypes.UUID {
toSql() {
return 'CHAR(36) BINARY';
}
}
class GEOMETRY extends BaseTypes.GEOMETRY {
constructor(type, srid) {
super(type, srid);
if (_.isEmpty(this.type)) {
this.sqlType = this.key;
}
else {
this.sqlType = this.type;
}
}
toSql() {
return this.sqlType;
}
}
class ENUM extends BaseTypes.ENUM {
toSql(options) {
return `ENUM(${this.values.map(value => options.escape(value)).join(', ')})`;
}
}
class JSONTYPE extends BaseTypes.JSON {
_stringify(value, options) {
return options.operation === 'where' && typeof value === 'string' ? value
: JSON.stringify(value);
}
}
return {
ENUM,
DATE,
DATEONLY,
UUID,
GEOMETRY,
DECIMAL,
JSON: JSONTYPE
};
};
|
{
definition += ' UNSIGNED';
}
|
conditional_block
|
594422d373ee_fip_qos.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
from neutron_lib.db import constants as db_const
from neutron.db import migration
"""fip qos
Revision ID: 594422d373ee
Revises: 7d32f979895f
Create Date: 2016-04-26 17:16:10.323756
"""
# revision identifiers, used by Alembic.
revision = '594422d373ee'
down_revision = '7d32f979895f'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.QUEENS]
def upgrade():
|
op.create_table(
'qos_fip_policy_bindings',
sa.Column('policy_id',
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False),
sa.Column('fip_id',
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('floatingips.id', ondelete='CASCADE'),
nullable=False, unique=True))
|
identifier_body
|
|
594422d373ee_fip_qos.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
from neutron_lib.db import constants as db_const
from neutron.db import migration
"""fip qos
Revision ID: 594422d373ee
Revises: 7d32f979895f
Create Date: 2016-04-26 17:16:10.323756
"""
# revision identifiers, used by Alembic.
revision = '594422d373ee'
down_revision = '7d32f979895f'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.QUEENS]
def
|
():
op.create_table(
'qos_fip_policy_bindings',
sa.Column('policy_id',
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False),
sa.Column('fip_id',
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('floatingips.id', ondelete='CASCADE'),
nullable=False, unique=True))
|
upgrade
|
identifier_name
|
594422d373ee_fip_qos.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
from neutron_lib.db import constants as db_const
from neutron.db import migration
"""fip qos
Revision ID: 594422d373ee
Revises: 7d32f979895f
Create Date: 2016-04-26 17:16:10.323756
"""
# revision identifiers, used by Alembic.
revision = '594422d373ee'
down_revision = '7d32f979895f'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.QUEENS]
def upgrade():
op.create_table(
'qos_fip_policy_bindings',
sa.Column('policy_id',
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
|
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('floatingips.id', ondelete='CASCADE'),
nullable=False, unique=True))
|
nullable=False),
sa.Column('fip_id',
|
random_line_split
|
map_data.ts
|
MapData = function (dungeonConfig) {
return createMapData(dungeonConfig);
};
function
|
(dungeonConfig) {
var _width = ROT.RNG.getUniformInt(dungeonConfig.minWidth, dungeonConfig.maxWidth);
var _height = ROT.RNG.getUniformInt(dungeonConfig.minHeight, dungeonConfig.maxHeight);
var _tiles = new Array(_width);
for (var x = 0; x < _width; x++) {
_tiles[x] = new Array(_height);
}
var getWidth = function () {
return _width;
};
var getHeight = function () {
return _height;
};
var getTiles = function () {
return _tiles;
};
var addTile = function (tile, x, y) {
tile.setPosition(x, y);
_tiles[x][y] = tile;
};
var getTile = function (x, y) {
return _tiles[x][y];
};
var map = {};
map.getWidth = getWidth;
map.getHeight = getHeight;
map.getTiles = getTiles;
map.addTile = addTile;
map.getTile = getTile;
return map;
}
|
createMapData
|
identifier_name
|
map_data.ts
|
MapData = function (dungeonConfig) {
return createMapData(dungeonConfig);
};
function createMapData(dungeonConfig) {
var _width = ROT.RNG.getUniformInt(dungeonConfig.minWidth, dungeonConfig.maxWidth);
var _height = ROT.RNG.getUniformInt(dungeonConfig.minHeight, dungeonConfig.maxHeight);
var _tiles = new Array(_width);
for (var x = 0; x < _width; x++)
|
var getWidth = function () {
return _width;
};
var getHeight = function () {
return _height;
};
var getTiles = function () {
return _tiles;
};
var addTile = function (tile, x, y) {
tile.setPosition(x, y);
_tiles[x][y] = tile;
};
var getTile = function (x, y) {
return _tiles[x][y];
};
var map = {};
map.getWidth = getWidth;
map.getHeight = getHeight;
map.getTiles = getTiles;
map.addTile = addTile;
map.getTile = getTile;
return map;
}
|
{
_tiles[x] = new Array(_height);
}
|
conditional_block
|
map_data.ts
|
MapData = function (dungeonConfig) {
return createMapData(dungeonConfig);
};
function createMapData(dungeonConfig)
|
{
var _width = ROT.RNG.getUniformInt(dungeonConfig.minWidth, dungeonConfig.maxWidth);
var _height = ROT.RNG.getUniformInt(dungeonConfig.minHeight, dungeonConfig.maxHeight);
var _tiles = new Array(_width);
for (var x = 0; x < _width; x++) {
_tiles[x] = new Array(_height);
}
var getWidth = function () {
return _width;
};
var getHeight = function () {
return _height;
};
var getTiles = function () {
return _tiles;
};
var addTile = function (tile, x, y) {
tile.setPosition(x, y);
_tiles[x][y] = tile;
};
var getTile = function (x, y) {
return _tiles[x][y];
};
var map = {};
map.getWidth = getWidth;
map.getHeight = getHeight;
map.getTiles = getTiles;
map.addTile = addTile;
map.getTile = getTile;
return map;
}
|
identifier_body
|
|
map_data.ts
|
MapData = function (dungeonConfig) {
return createMapData(dungeonConfig);
};
function createMapData(dungeonConfig) {
var _width = ROT.RNG.getUniformInt(dungeonConfig.minWidth, dungeonConfig.maxWidth);
var _height = ROT.RNG.getUniformInt(dungeonConfig.minHeight, dungeonConfig.maxHeight);
var _tiles = new Array(_width);
for (var x = 0; x < _width; x++) {
_tiles[x] = new Array(_height);
}
var getWidth = function () {
return _width;
};
var getHeight = function () {
return _height;
};
var getTiles = function () {
|
var addTile = function (tile, x, y) {
tile.setPosition(x, y);
_tiles[x][y] = tile;
};
var getTile = function (x, y) {
return _tiles[x][y];
};
var map = {};
map.getWidth = getWidth;
map.getHeight = getHeight;
map.getTiles = getTiles;
map.addTile = addTile;
map.getTile = getTile;
return map;
}
|
return _tiles;
};
|
random_line_split
|
dropck_vec_cycle_checked.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Reject mixing cyclic structure and Drop when using Vec.
//
// (Compare against compile-fail/dropck_arr_cycle_checked.rs)
use std::cell::Cell;
use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1
}
}
mod id {
use s;
#[derive(Debug)]
pub struct Id {
orig_count: usize,
count: usize,
}
impl Id {
pub fn new() -> Id {
let c = s::next_count();
println!("building Id {}", c);
Id { orig_count: c, count: c }
}
pub fn count(&self) -> usize {
println!("Id::count on {} returns {}", self.orig_count, self.count);
self.count
}
}
impl Drop for Id {
fn drop(&mut self) {
println!("dropping Id {}", self.count);
self.count = 0;
}
}
}
trait HasId {
fn count(&self) -> usize;
}
#[derive(Debug)]
struct CheckId<T:HasId> {
v: T
}
#[allow(non_snake_case)]
fn CheckId<T:HasId>(t: T) -> CheckId<T> { CheckId{ v: t } }
impl<T:HasId> Drop for CheckId<T> {
|
}
#[derive(Debug)]
struct C<'a> {
id: Id,
v: Vec<CheckId<Cell<Option<&'a C<'a>>>>>,
}
impl<'a> HasId for Cell<Option<&'a C<'a>>> {
fn count(&self) -> usize {
match self.get() {
None => 1,
Some(c) => c.id.count(),
}
}
}
impl<'a> C<'a> {
fn new() -> C<'a> {
C { id: Id::new(), v: Vec::new() }
}
}
fn f() {
let (mut c1, mut c2, mut c3);
c1 = C::new();
c2 = C::new();
c3 = C::new();
c1.v.push(CheckId(Cell::new(None)));
c1.v.push(CheckId(Cell::new(None)));
c2.v.push(CheckId(Cell::new(None)));
c2.v.push(CheckId(Cell::new(None)));
c3.v.push(CheckId(Cell::new(None)));
c3.v.push(CheckId(Cell::new(None)));
c1.v[0].v.set(Some(&c2)); //~ ERROR `c2` does not live long enough
c1.v[1].v.set(Some(&c3)); //~ ERROR `c3` does not live long enough
c2.v[0].v.set(Some(&c2)); //~ ERROR `c2` does not live long enough
c2.v[1].v.set(Some(&c3)); //~ ERROR `c3` does not live long enough
c3.v[0].v.set(Some(&c1)); //~ ERROR `c1` does not live long enough
c3.v[1].v.set(Some(&c2)); //~ ERROR `c2` does not live long enough
}
fn main() {
f();
}
|
fn drop(&mut self) {
assert!(self.v.count() > 0);
}
|
random_line_split
|
dropck_vec_cycle_checked.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Reject mixing cyclic structure and Drop when using Vec.
//
// (Compare against compile-fail/dropck_arr_cycle_checked.rs)
use std::cell::Cell;
use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1
}
}
mod id {
use s;
#[derive(Debug)]
pub struct Id {
orig_count: usize,
count: usize,
}
impl Id {
pub fn
|
() -> Id {
let c = s::next_count();
println!("building Id {}", c);
Id { orig_count: c, count: c }
}
pub fn count(&self) -> usize {
println!("Id::count on {} returns {}", self.orig_count, self.count);
self.count
}
}
impl Drop for Id {
fn drop(&mut self) {
println!("dropping Id {}", self.count);
self.count = 0;
}
}
}
trait HasId {
fn count(&self) -> usize;
}
#[derive(Debug)]
struct CheckId<T:HasId> {
v: T
}
#[allow(non_snake_case)]
fn CheckId<T:HasId>(t: T) -> CheckId<T> { CheckId{ v: t } }
impl<T:HasId> Drop for CheckId<T> {
fn drop(&mut self) {
assert!(self.v.count() > 0);
}
}
#[derive(Debug)]
struct C<'a> {
id: Id,
v: Vec<CheckId<Cell<Option<&'a C<'a>>>>>,
}
impl<'a> HasId for Cell<Option<&'a C<'a>>> {
fn count(&self) -> usize {
match self.get() {
None => 1,
Some(c) => c.id.count(),
}
}
}
impl<'a> C<'a> {
fn new() -> C<'a> {
C { id: Id::new(), v: Vec::new() }
}
}
fn f() {
let (mut c1, mut c2, mut c3);
c1 = C::new();
c2 = C::new();
c3 = C::new();
c1.v.push(CheckId(Cell::new(None)));
c1.v.push(CheckId(Cell::new(None)));
c2.v.push(CheckId(Cell::new(None)));
c2.v.push(CheckId(Cell::new(None)));
c3.v.push(CheckId(Cell::new(None)));
c3.v.push(CheckId(Cell::new(None)));
c1.v[0].v.set(Some(&c2)); //~ ERROR `c2` does not live long enough
c1.v[1].v.set(Some(&c3)); //~ ERROR `c3` does not live long enough
c2.v[0].v.set(Some(&c2)); //~ ERROR `c2` does not live long enough
c2.v[1].v.set(Some(&c3)); //~ ERROR `c3` does not live long enough
c3.v[0].v.set(Some(&c1)); //~ ERROR `c1` does not live long enough
c3.v[1].v.set(Some(&c2)); //~ ERROR `c2` does not live long enough
}
fn main() {
f();
}
|
new
|
identifier_name
|
tags_include.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify operation of <gcb-include> custom tag."""
__author__ = 'Mike Gainer ([email protected])'
import os
import StringIO
import appengine_config
from models import courses
from tests.functional import actions
COURSE_NAME = 'test_course'
COURSE_TITLE = 'Test Course'
ADMIN_EMAIL = '[email protected]'
PRE_INCLUDE = 'XXX'
POST_INCLUDE = 'YYY'
HTML_DIR = os.path.join(appengine_config.BUNDLE_ROOT, 'assets/html')
HTML_FILE = 'test.html'
HTML_PATH = os.path.join(HTML_DIR, HTML_FILE)
GCB_INCLUDE = (PRE_INCLUDE +
'<gcb-include path="/assets/html/%s" ' +
'instanceid="uODxjWHTxxIC"></gcb-include>' +
POST_INCLUDE)
LESSON_URL = '/test_course/unit?unit=1&lesson=2'
class TagsInclude(actions.TestBase):
def setUp(self):
super(TagsInclude, self).setUp()
self.context = actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL,
COURSE_TITLE)
self.course = courses.Course(None, self.context)
self.unit = self.course.add_unit()
self.unit.title = 'The Unit'
self.unit.now_available = True
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'The Lesson'
self.lesson.now_available = True
self.lesson.objectives = GCB_INCLUDE % HTML_FILE
self.course.save()
def tearDown(self):
self.context.fs.delete(HTML_PATH)
def _set_content(self, content):
self.context.fs.put(HTML_PATH, StringIO.StringIO(content))
def _expect_content(self, expected, response):
expected = '%s<div>%s</div>%s' % (PRE_INCLUDE, expected, POST_INCLUDE)
self.assertIn(expected, response.body)
def test_missing_file_gives_error(self):
self.lesson.objectives = GCB_INCLUDE % 'no_such_file.html'
self.course.save()
response = self.get(LESSON_URL)
self.assertIn('Invalid HTML tag: no_such_file.html', response.body)
def test_file_from_actual_filesystem(self):
# Note: This has the potential to cause a test flake: Adding an
# actual file to the filesystem and then removing it may cause
# ETL tests to complain - they saw the file, then failed to copy
# it because it went away.
simple_content = 'Fiery the angels fell'
if not os.path.isdir(HTML_DIR):
os.mkdir(HTML_DIR)
with open(HTML_PATH, 'w') as fp:
fp.write(simple_content)
response = self.get(LESSON_URL)
os.unlink(HTML_PATH)
self._expect_content(simple_content, response)
def test_simple(self):
simple_content = 'Deep thunder rolled around their shores'
self._set_content(simple_content)
response = self.get(LESSON_URL)
self._expect_content(simple_content, response)
def test_content_containing_tags(self):
content = '<h1>This is a test</h1><p>This is only a test.</p>'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content(content, response)
def test_jinja_base_path(self):
content = '{{ base_path }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('assets/html', response)
def test_jinja_course_base(self):
content = '{{ gcb_course_base }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('http://localhost/test_course/', response)
def test_jinja_course_title(self):
|
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('Test Course', response)
def test_inclusion(self):
content = 'Hello, World!'
sub_path = os.path.join(
appengine_config.BUNDLE_ROOT, HTML_DIR, 'sub.html')
self.context.fs.put(sub_path, StringIO.StringIO(content))
self._set_content('{% include "sub.html" %}')
try:
response = self.get(LESSON_URL)
self._expect_content(content, response)
finally:
self.context.fs.delete(sub_path)
|
content = '{{ course_info.course.title }}'
|
random_line_split
|
tags_include.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify operation of <gcb-include> custom tag."""
__author__ = 'Mike Gainer ([email protected])'
import os
import StringIO
import appengine_config
from models import courses
from tests.functional import actions
COURSE_NAME = 'test_course'
COURSE_TITLE = 'Test Course'
ADMIN_EMAIL = '[email protected]'
PRE_INCLUDE = 'XXX'
POST_INCLUDE = 'YYY'
HTML_DIR = os.path.join(appengine_config.BUNDLE_ROOT, 'assets/html')
HTML_FILE = 'test.html'
HTML_PATH = os.path.join(HTML_DIR, HTML_FILE)
GCB_INCLUDE = (PRE_INCLUDE +
'<gcb-include path="/assets/html/%s" ' +
'instanceid="uODxjWHTxxIC"></gcb-include>' +
POST_INCLUDE)
LESSON_URL = '/test_course/unit?unit=1&lesson=2'
class TagsInclude(actions.TestBase):
def setUp(self):
super(TagsInclude, self).setUp()
self.context = actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL,
COURSE_TITLE)
self.course = courses.Course(None, self.context)
self.unit = self.course.add_unit()
self.unit.title = 'The Unit'
self.unit.now_available = True
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'The Lesson'
self.lesson.now_available = True
self.lesson.objectives = GCB_INCLUDE % HTML_FILE
self.course.save()
def tearDown(self):
self.context.fs.delete(HTML_PATH)
def _set_content(self, content):
self.context.fs.put(HTML_PATH, StringIO.StringIO(content))
def _expect_content(self, expected, response):
expected = '%s<div>%s</div>%s' % (PRE_INCLUDE, expected, POST_INCLUDE)
self.assertIn(expected, response.body)
def test_missing_file_gives_error(self):
self.lesson.objectives = GCB_INCLUDE % 'no_such_file.html'
self.course.save()
response = self.get(LESSON_URL)
self.assertIn('Invalid HTML tag: no_such_file.html', response.body)
def test_file_from_actual_filesystem(self):
# Note: This has the potential to cause a test flake: Adding an
# actual file to the filesystem and then removing it may cause
# ETL tests to complain - they saw the file, then failed to copy
# it because it went away.
simple_content = 'Fiery the angels fell'
if not os.path.isdir(HTML_DIR):
|
with open(HTML_PATH, 'w') as fp:
fp.write(simple_content)
response = self.get(LESSON_URL)
os.unlink(HTML_PATH)
self._expect_content(simple_content, response)
def test_simple(self):
simple_content = 'Deep thunder rolled around their shores'
self._set_content(simple_content)
response = self.get(LESSON_URL)
self._expect_content(simple_content, response)
def test_content_containing_tags(self):
content = '<h1>This is a test</h1><p>This is only a test.</p>'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content(content, response)
def test_jinja_base_path(self):
content = '{{ base_path }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('assets/html', response)
def test_jinja_course_base(self):
content = '{{ gcb_course_base }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('http://localhost/test_course/', response)
def test_jinja_course_title(self):
content = '{{ course_info.course.title }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('Test Course', response)
def test_inclusion(self):
content = 'Hello, World!'
sub_path = os.path.join(
appengine_config.BUNDLE_ROOT, HTML_DIR, 'sub.html')
self.context.fs.put(sub_path, StringIO.StringIO(content))
self._set_content('{% include "sub.html" %}')
try:
response = self.get(LESSON_URL)
self._expect_content(content, response)
finally:
self.context.fs.delete(sub_path)
|
os.mkdir(HTML_DIR)
|
conditional_block
|
tags_include.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify operation of <gcb-include> custom tag."""
__author__ = 'Mike Gainer ([email protected])'
import os
import StringIO
import appengine_config
from models import courses
from tests.functional import actions
COURSE_NAME = 'test_course'
COURSE_TITLE = 'Test Course'
ADMIN_EMAIL = '[email protected]'
PRE_INCLUDE = 'XXX'
POST_INCLUDE = 'YYY'
HTML_DIR = os.path.join(appengine_config.BUNDLE_ROOT, 'assets/html')
HTML_FILE = 'test.html'
HTML_PATH = os.path.join(HTML_DIR, HTML_FILE)
GCB_INCLUDE = (PRE_INCLUDE +
'<gcb-include path="/assets/html/%s" ' +
'instanceid="uODxjWHTxxIC"></gcb-include>' +
POST_INCLUDE)
LESSON_URL = '/test_course/unit?unit=1&lesson=2'
class TagsInclude(actions.TestBase):
|
def setUp(self):
super(TagsInclude, self).setUp()
self.context = actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL,
COURSE_TITLE)
self.course = courses.Course(None, self.context)
self.unit = self.course.add_unit()
self.unit.title = 'The Unit'
self.unit.now_available = True
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'The Lesson'
self.lesson.now_available = True
self.lesson.objectives = GCB_INCLUDE % HTML_FILE
self.course.save()
def tearDown(self):
self.context.fs.delete(HTML_PATH)
def _set_content(self, content):
self.context.fs.put(HTML_PATH, StringIO.StringIO(content))
def _expect_content(self, expected, response):
expected = '%s<div>%s</div>%s' % (PRE_INCLUDE, expected, POST_INCLUDE)
self.assertIn(expected, response.body)
def test_missing_file_gives_error(self):
self.lesson.objectives = GCB_INCLUDE % 'no_such_file.html'
self.course.save()
response = self.get(LESSON_URL)
self.assertIn('Invalid HTML tag: no_such_file.html', response.body)
def test_file_from_actual_filesystem(self):
# Note: This has the potential to cause a test flake: Adding an
# actual file to the filesystem and then removing it may cause
# ETL tests to complain - they saw the file, then failed to copy
# it because it went away.
simple_content = 'Fiery the angels fell'
if not os.path.isdir(HTML_DIR):
os.mkdir(HTML_DIR)
with open(HTML_PATH, 'w') as fp:
fp.write(simple_content)
response = self.get(LESSON_URL)
os.unlink(HTML_PATH)
self._expect_content(simple_content, response)
def test_simple(self):
simple_content = 'Deep thunder rolled around their shores'
self._set_content(simple_content)
response = self.get(LESSON_URL)
self._expect_content(simple_content, response)
def test_content_containing_tags(self):
content = '<h1>This is a test</h1><p>This is only a test.</p>'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content(content, response)
def test_jinja_base_path(self):
content = '{{ base_path }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('assets/html', response)
def test_jinja_course_base(self):
content = '{{ gcb_course_base }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('http://localhost/test_course/', response)
def test_jinja_course_title(self):
content = '{{ course_info.course.title }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('Test Course', response)
def test_inclusion(self):
content = 'Hello, World!'
sub_path = os.path.join(
appengine_config.BUNDLE_ROOT, HTML_DIR, 'sub.html')
self.context.fs.put(sub_path, StringIO.StringIO(content))
self._set_content('{% include "sub.html" %}')
try:
response = self.get(LESSON_URL)
self._expect_content(content, response)
finally:
self.context.fs.delete(sub_path)
|
identifier_body
|
|
tags_include.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify operation of <gcb-include> custom tag."""
__author__ = 'Mike Gainer ([email protected])'
import os
import StringIO
import appengine_config
from models import courses
from tests.functional import actions
COURSE_NAME = 'test_course'
COURSE_TITLE = 'Test Course'
ADMIN_EMAIL = '[email protected]'
PRE_INCLUDE = 'XXX'
POST_INCLUDE = 'YYY'
HTML_DIR = os.path.join(appengine_config.BUNDLE_ROOT, 'assets/html')
HTML_FILE = 'test.html'
HTML_PATH = os.path.join(HTML_DIR, HTML_FILE)
GCB_INCLUDE = (PRE_INCLUDE +
'<gcb-include path="/assets/html/%s" ' +
'instanceid="uODxjWHTxxIC"></gcb-include>' +
POST_INCLUDE)
LESSON_URL = '/test_course/unit?unit=1&lesson=2'
class TagsInclude(actions.TestBase):
def setUp(self):
super(TagsInclude, self).setUp()
self.context = actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL,
COURSE_TITLE)
self.course = courses.Course(None, self.context)
self.unit = self.course.add_unit()
self.unit.title = 'The Unit'
self.unit.now_available = True
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'The Lesson'
self.lesson.now_available = True
self.lesson.objectives = GCB_INCLUDE % HTML_FILE
self.course.save()
def tearDown(self):
self.context.fs.delete(HTML_PATH)
def _set_content(self, content):
self.context.fs.put(HTML_PATH, StringIO.StringIO(content))
def
|
(self, expected, response):
expected = '%s<div>%s</div>%s' % (PRE_INCLUDE, expected, POST_INCLUDE)
self.assertIn(expected, response.body)
def test_missing_file_gives_error(self):
self.lesson.objectives = GCB_INCLUDE % 'no_such_file.html'
self.course.save()
response = self.get(LESSON_URL)
self.assertIn('Invalid HTML tag: no_such_file.html', response.body)
def test_file_from_actual_filesystem(self):
# Note: This has the potential to cause a test flake: Adding an
# actual file to the filesystem and then removing it may cause
# ETL tests to complain - they saw the file, then failed to copy
# it because it went away.
simple_content = 'Fiery the angels fell'
if not os.path.isdir(HTML_DIR):
os.mkdir(HTML_DIR)
with open(HTML_PATH, 'w') as fp:
fp.write(simple_content)
response = self.get(LESSON_URL)
os.unlink(HTML_PATH)
self._expect_content(simple_content, response)
def test_simple(self):
simple_content = 'Deep thunder rolled around their shores'
self._set_content(simple_content)
response = self.get(LESSON_URL)
self._expect_content(simple_content, response)
def test_content_containing_tags(self):
content = '<h1>This is a test</h1><p>This is only a test.</p>'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content(content, response)
def test_jinja_base_path(self):
content = '{{ base_path }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('assets/html', response)
def test_jinja_course_base(self):
content = '{{ gcb_course_base }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('http://localhost/test_course/', response)
def test_jinja_course_title(self):
content = '{{ course_info.course.title }}'
self._set_content(content)
response = self.get(LESSON_URL)
self._expect_content('Test Course', response)
def test_inclusion(self):
content = 'Hello, World!'
sub_path = os.path.join(
appengine_config.BUNDLE_ROOT, HTML_DIR, 'sub.html')
self.context.fs.put(sub_path, StringIO.StringIO(content))
self._set_content('{% include "sub.html" %}')
try:
response = self.get(LESSON_URL)
self._expect_content(content, response)
finally:
self.context.fs.delete(sub_path)
|
_expect_content
|
identifier_name
|
html2html.rs
|
// Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Parse and re-serialize a HTML5 document.
///
/// This is meant to produce the exact same output (ignoring stderr) as
///
/// java -classpath htmlparser-1.4.jar nu.validator.htmlparser.tools.HTML2HTML
///
/// where htmlparser-1.4.jar comes from http://about.validator.nu/htmlparser/
extern crate html5ever;
use std::io;
|
use html5ever::driver::ParseOpts;
use html5ever::tree_builder::TreeBuilderOpts;
use html5ever::{parse, one_input, serialize};
fn main() {
let input = io::stdin().read_to_string().unwrap();
let dom: RcDom = parse(one_input(input), ParseOpts {
tree_builder: TreeBuilderOpts {
drop_doctype: true,
..Default::default()
},
..Default::default()
});
// The validator.nu HTML2HTML always prints a doctype at the very beginning.
io::stdout().write_str("<!DOCTYPE html>\n")
.ok().expect("writing DOCTYPE failed");
serialize(&mut io::stdout(), &dom.document, Default::default())
.ok().expect("serialization failed");
}
|
use std::default::Default;
use html5ever::sink::rcdom::RcDom;
|
random_line_split
|
html2html.rs
|
// Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Parse and re-serialize a HTML5 document.
///
/// This is meant to produce the exact same output (ignoring stderr) as
///
/// java -classpath htmlparser-1.4.jar nu.validator.htmlparser.tools.HTML2HTML
///
/// where htmlparser-1.4.jar comes from http://about.validator.nu/htmlparser/
extern crate html5ever;
use std::io;
use std::default::Default;
use html5ever::sink::rcdom::RcDom;
use html5ever::driver::ParseOpts;
use html5ever::tree_builder::TreeBuilderOpts;
use html5ever::{parse, one_input, serialize};
fn main()
|
{
let input = io::stdin().read_to_string().unwrap();
let dom: RcDom = parse(one_input(input), ParseOpts {
tree_builder: TreeBuilderOpts {
drop_doctype: true,
..Default::default()
},
..Default::default()
});
// The validator.nu HTML2HTML always prints a doctype at the very beginning.
io::stdout().write_str("<!DOCTYPE html>\n")
.ok().expect("writing DOCTYPE failed");
serialize(&mut io::stdout(), &dom.document, Default::default())
.ok().expect("serialization failed");
}
|
identifier_body
|
|
html2html.rs
|
// Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Parse and re-serialize a HTML5 document.
///
/// This is meant to produce the exact same output (ignoring stderr) as
///
/// java -classpath htmlparser-1.4.jar nu.validator.htmlparser.tools.HTML2HTML
///
/// where htmlparser-1.4.jar comes from http://about.validator.nu/htmlparser/
extern crate html5ever;
use std::io;
use std::default::Default;
use html5ever::sink::rcdom::RcDom;
use html5ever::driver::ParseOpts;
use html5ever::tree_builder::TreeBuilderOpts;
use html5ever::{parse, one_input, serialize};
fn
|
() {
let input = io::stdin().read_to_string().unwrap();
let dom: RcDom = parse(one_input(input), ParseOpts {
tree_builder: TreeBuilderOpts {
drop_doctype: true,
..Default::default()
},
..Default::default()
});
// The validator.nu HTML2HTML always prints a doctype at the very beginning.
io::stdout().write_str("<!DOCTYPE html>\n")
.ok().expect("writing DOCTYPE failed");
serialize(&mut io::stdout(), &dom.document, Default::default())
.ok().expect("serialization failed");
}
|
main
|
identifier_name
|
lib.rs
|
#![feature(no_std,core_intrinsics)]
#![no_std]
#[macro_export]
macro_rules! impl_fmt
{
(@as_item $($i:item)*) => {$($i)*};
($( /*$(<($($params:tt)+)>)* */ $tr:ident($s:ident, $f:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl/*$(<$($params)+>)* */ ::std::fmt::$tr for $t {
fn fmt(&$s, $f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
$( $code )*
}
}
})+
};
}
#[macro_export]
macro_rules! impl_conv {
|
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::From<$src> for $t {
fn from($v: $src) -> $t {
$($code)*
}
}
})+
};
(@match_ $( $(<($($params:tt)+)>)* Into<$dst:ty>($self_:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::Into<$dst> for $t {
fn into($self_) -> $dst {
$($code)*
}
}
})+
};
(@match_ $( $(<($($params:tt)+)>)* AsRef<$dst:ty>($self_:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::AsRef<$dst> for $t {
fn as_ref($self_) -> &$dst {
$($code)*
}
}
})+
};
($( $(<($($params:tt)+)>)* $name:ident<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_conv!{ @match_ $(<($($params:tt)+)>)* $name<$src>($v) for $t { $($code)* } })+
};
}
#[macro_export]
macro_rules! impl_from {
(@as_item $($i:item)*) => {$($i)*};
($( $(<($($params:tt)+)>)* From<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::From<$src> for $t {
fn from($v: $src) -> $t {
$($code)*
}
}
})+
};
}
pub fn type_name<T: ?::core::marker::Sized>() -> &'static str {
// SAFE: Intrinsic with no sideeffect
unsafe { ::core::intrinsics::type_name::<T>() }
}
#[macro_export]
macro_rules! type_name {
($t:ty) => ( $crate::type_name::<$t>() );
}
#[macro_export]
macro_rules! todo
{
( $s:expr ) => ( panic!( concat!("TODO: ",$s) ) );
( $s:expr, $($v:tt)* ) => ( panic!( concat!("TODO: ",$s), $($v)* ) );
}
/// Override libcore's `try!` macro with one that backs onto `From`
#[macro_export]
macro_rules! try {
($e:expr) => (
match $e {
Ok(v) => v,
Err(e) => return Err(From::from(e)),
}
);
}
|
(@as_item $($i:item)*) => {$($i)*};
(@match_ $( $(<($($params:tt)+)>)* From<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
|
random_line_split
|
lib.rs
|
#![feature(no_std,core_intrinsics)]
#![no_std]
#[macro_export]
macro_rules! impl_fmt
{
(@as_item $($i:item)*) => {$($i)*};
($( /*$(<($($params:tt)+)>)* */ $tr:ident($s:ident, $f:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl/*$(<$($params)+>)* */ ::std::fmt::$tr for $t {
fn fmt(&$s, $f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
$( $code )*
}
}
})+
};
}
#[macro_export]
macro_rules! impl_conv {
(@as_item $($i:item)*) => {$($i)*};
(@match_ $( $(<($($params:tt)+)>)* From<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::From<$src> for $t {
fn from($v: $src) -> $t {
$($code)*
}
}
})+
};
(@match_ $( $(<($($params:tt)+)>)* Into<$dst:ty>($self_:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::Into<$dst> for $t {
fn into($self_) -> $dst {
$($code)*
}
}
})+
};
(@match_ $( $(<($($params:tt)+)>)* AsRef<$dst:ty>($self_:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::AsRef<$dst> for $t {
fn as_ref($self_) -> &$dst {
$($code)*
}
}
})+
};
($( $(<($($params:tt)+)>)* $name:ident<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_conv!{ @match_ $(<($($params:tt)+)>)* $name<$src>($v) for $t { $($code)* } })+
};
}
#[macro_export]
macro_rules! impl_from {
(@as_item $($i:item)*) => {$($i)*};
($( $(<($($params:tt)+)>)* From<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::From<$src> for $t {
fn from($v: $src) -> $t {
$($code)*
}
}
})+
};
}
pub fn type_name<T: ?::core::marker::Sized>() -> &'static str
|
#[macro_export]
macro_rules! type_name {
($t:ty) => ( $crate::type_name::<$t>() );
}
#[macro_export]
macro_rules! todo
{
( $s:expr ) => ( panic!( concat!("TODO: ",$s) ) );
( $s:expr, $($v:tt)* ) => ( panic!( concat!("TODO: ",$s), $($v)* ) );
}
/// Override libcore's `try!` macro with one that backs onto `From`
#[macro_export]
macro_rules! try {
($e:expr) => (
match $e {
Ok(v) => v,
Err(e) => return Err(From::from(e)),
}
);
}
|
{
// SAFE: Intrinsic with no sideeffect
unsafe { ::core::intrinsics::type_name::<T>() }
}
|
identifier_body
|
lib.rs
|
#![feature(no_std,core_intrinsics)]
#![no_std]
#[macro_export]
macro_rules! impl_fmt
{
(@as_item $($i:item)*) => {$($i)*};
($( /*$(<($($params:tt)+)>)* */ $tr:ident($s:ident, $f:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl/*$(<$($params)+>)* */ ::std::fmt::$tr for $t {
fn fmt(&$s, $f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
$( $code )*
}
}
})+
};
}
#[macro_export]
macro_rules! impl_conv {
(@as_item $($i:item)*) => {$($i)*};
(@match_ $( $(<($($params:tt)+)>)* From<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::From<$src> for $t {
fn from($v: $src) -> $t {
$($code)*
}
}
})+
};
(@match_ $( $(<($($params:tt)+)>)* Into<$dst:ty>($self_:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::Into<$dst> for $t {
fn into($self_) -> $dst {
$($code)*
}
}
})+
};
(@match_ $( $(<($($params:tt)+)>)* AsRef<$dst:ty>($self_:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::AsRef<$dst> for $t {
fn as_ref($self_) -> &$dst {
$($code)*
}
}
})+
};
($( $(<($($params:tt)+)>)* $name:ident<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_conv!{ @match_ $(<($($params:tt)+)>)* $name<$src>($v) for $t { $($code)* } })+
};
}
#[macro_export]
macro_rules! impl_from {
(@as_item $($i:item)*) => {$($i)*};
($( $(<($($params:tt)+)>)* From<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::std::convert::From<$src> for $t {
fn from($v: $src) -> $t {
$($code)*
}
}
})+
};
}
pub fn
|
<T: ?::core::marker::Sized>() -> &'static str {
// SAFE: Intrinsic with no sideeffect
unsafe { ::core::intrinsics::type_name::<T>() }
}
#[macro_export]
macro_rules! type_name {
($t:ty) => ( $crate::type_name::<$t>() );
}
#[macro_export]
macro_rules! todo
{
( $s:expr ) => ( panic!( concat!("TODO: ",$s) ) );
( $s:expr, $($v:tt)* ) => ( panic!( concat!("TODO: ",$s), $($v)* ) );
}
/// Override libcore's `try!` macro with one that backs onto `From`
#[macro_export]
macro_rules! try {
($e:expr) => (
match $e {
Ok(v) => v,
Err(e) => return Err(From::from(e)),
}
);
}
|
type_name
|
identifier_name
|
package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffxparser(RPackage):
|
"""Affymetrix File Parsing SDK
Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR). It
provides methods for fast and memory efficient parsing of Affymetrix
files using the Affymetrix' Fusion SDK. Both ASCII- and binary-based
files are supported. Currently, there are methods for reading chip
definition file (CDF) and a cell intensity file (CEL). These files can
be read either in full or in part. For example, probe signals from a few
probesets can be extracted very quickly from a set of CEL files into a
convenient list structure."""
homepage = "https://bioconductor.org/packages/affxparser"
git = "https://git.bioconductor.org/packages/affxparser.git"
version('1.62.0', commit='b3e988e5c136c3f1a064e1da13730b403c8704c0')
version('1.56.0', commit='20d27701ad2bdfacf34d857bb8ecb4f505b4d056')
version('1.54.0', commit='dce83d23599a964086a84ced4afd13fc43e7cd4f')
version('1.52.0', commit='8e0c4b89ee1cb4ff95f58a5dd947249dc718bc58')
version('1.50.0', commit='01ef641727eadc2cc17b5dbb0b1432364436e3d5')
version('1.48.0', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('[email protected]:', type=('build', 'run'))
|
identifier_body
|
|
package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffxparser(RPackage):
"""Affymetrix File Parsing SDK
Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR). It
provides methods for fast and memory efficient parsing of Affymetrix
files using the Affymetrix' Fusion SDK. Both ASCII- and binary-based
files are supported. Currently, there are methods for reading chip
|
definition file (CDF) and a cell intensity file (CEL). These files can
be read either in full or in part. For example, probe signals from a few
probesets can be extracted very quickly from a set of CEL files into a
convenient list structure."""
homepage = "https://bioconductor.org/packages/affxparser"
git = "https://git.bioconductor.org/packages/affxparser.git"
version('1.62.0', commit='b3e988e5c136c3f1a064e1da13730b403c8704c0')
version('1.56.0', commit='20d27701ad2bdfacf34d857bb8ecb4f505b4d056')
version('1.54.0', commit='dce83d23599a964086a84ced4afd13fc43e7cd4f')
version('1.52.0', commit='8e0c4b89ee1cb4ff95f58a5dd947249dc718bc58')
version('1.50.0', commit='01ef641727eadc2cc17b5dbb0b1432364436e3d5')
version('1.48.0', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('[email protected]:', type=('build', 'run'))
|
random_line_split
|
|
package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class
|
(RPackage):
"""Affymetrix File Parsing SDK
Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR). It
provides methods for fast and memory efficient parsing of Affymetrix
files using the Affymetrix' Fusion SDK. Both ASCII- and binary-based
files are supported. Currently, there are methods for reading chip
definition file (CDF) and a cell intensity file (CEL). These files can
be read either in full or in part. For example, probe signals from a few
probesets can be extracted very quickly from a set of CEL files into a
convenient list structure."""
homepage = "https://bioconductor.org/packages/affxparser"
git = "https://git.bioconductor.org/packages/affxparser.git"
version('1.62.0', commit='b3e988e5c136c3f1a064e1da13730b403c8704c0')
version('1.56.0', commit='20d27701ad2bdfacf34d857bb8ecb4f505b4d056')
version('1.54.0', commit='dce83d23599a964086a84ced4afd13fc43e7cd4f')
version('1.52.0', commit='8e0c4b89ee1cb4ff95f58a5dd947249dc718bc58')
version('1.50.0', commit='01ef641727eadc2cc17b5dbb0b1432364436e3d5')
version('1.48.0', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('[email protected]:', type=('build', 'run'))
|
RAffxparser
|
identifier_name
|
FIPBruteForce.py
|
import ftplib
def connect(host, user, password):
|
def main():
# Variables
targetHostAddress = '10.0.0.24'
userName = 'bwayne'
passwordsFilePath = 'passwords.txt'
# Try to connect using anonymous credentials
print('[+] Using anonymous credentials for ' + targetHostAddress)
if connect(targetHostAddress, 'anonymous', '[email protected]'):
print('[*] FTP Anonymous log on succeeded on host ' + targetHostAddress)
else:
print('[*] FTP Anonymous log on failed on host ' + targetHostAddress)
# Try brute force using dictionary
# Open passwords file
passwordsFile = open(passwordsFilePath, 'r')
for line in passwordsFile.readlines():
password = line.strip('\r').strip('\n')
print('Testing: ' + str(password))
if(connect(targetHostAddress, userName, password)):
#Password found
print('[*] FTP log on succeeded on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
exit(0)
else:
print('[*] FTP log on failed on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
if __name__ == "__main__":
main()
|
try:
ftp = ftplib.FTP(host)
ftp.login(user, password)
ftp.quit()
return True
except:
return False
|
identifier_body
|
FIPBruteForce.py
|
import ftplib
def connect(host, user, password):
try:
ftp = ftplib.FTP(host)
ftp.login(user, password)
ftp.quit()
return True
except:
return False
def main():
# Variables
targetHostAddress = '10.0.0.24'
userName = 'bwayne'
passwordsFilePath = 'passwords.txt'
# Try to connect using anonymous credentials
print('[+] Using anonymous credentials for ' + targetHostAddress)
if connect(targetHostAddress, 'anonymous', '[email protected]'):
print('[*] FTP Anonymous log on succeeded on host ' + targetHostAddress)
else:
print('[*] FTP Anonymous log on failed on host ' + targetHostAddress)
# Try brute force using dictionary
# Open passwords file
passwordsFile = open(passwordsFilePath, 'r')
for line in passwordsFile.readlines():
password = line.strip('\r').strip('\n')
print('Testing: ' + str(password))
if(connect(targetHostAddress, userName, password)):
#Password found
print('[*] FTP log on succeeded on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
|
print('[*] FTP log on failed on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
if __name__ == "__main__":
main()
|
exit(0)
else:
|
random_line_split
|
FIPBruteForce.py
|
import ftplib
def connect(host, user, password):
try:
ftp = ftplib.FTP(host)
ftp.login(user, password)
ftp.quit()
return True
except:
return False
def
|
():
# Variables
targetHostAddress = '10.0.0.24'
userName = 'bwayne'
passwordsFilePath = 'passwords.txt'
# Try to connect using anonymous credentials
print('[+] Using anonymous credentials for ' + targetHostAddress)
if connect(targetHostAddress, 'anonymous', '[email protected]'):
print('[*] FTP Anonymous log on succeeded on host ' + targetHostAddress)
else:
print('[*] FTP Anonymous log on failed on host ' + targetHostAddress)
# Try brute force using dictionary
# Open passwords file
passwordsFile = open(passwordsFilePath, 'r')
for line in passwordsFile.readlines():
password = line.strip('\r').strip('\n')
print('Testing: ' + str(password))
if(connect(targetHostAddress, userName, password)):
#Password found
print('[*] FTP log on succeeded on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
exit(0)
else:
print('[*] FTP log on failed on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
if __name__ == "__main__":
main()
|
main
|
identifier_name
|
FIPBruteForce.py
|
import ftplib
def connect(host, user, password):
try:
ftp = ftplib.FTP(host)
ftp.login(user, password)
ftp.quit()
return True
except:
return False
def main():
# Variables
targetHostAddress = '10.0.0.24'
userName = 'bwayne'
passwordsFilePath = 'passwords.txt'
# Try to connect using anonymous credentials
print('[+] Using anonymous credentials for ' + targetHostAddress)
if connect(targetHostAddress, 'anonymous', '[email protected]'):
|
else:
print('[*] FTP Anonymous log on failed on host ' + targetHostAddress)
# Try brute force using dictionary
# Open passwords file
passwordsFile = open(passwordsFilePath, 'r')
for line in passwordsFile.readlines():
password = line.strip('\r').strip('\n')
print('Testing: ' + str(password))
if(connect(targetHostAddress, userName, password)):
#Password found
print('[*] FTP log on succeeded on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
exit(0)
else:
print('[*] FTP log on failed on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
if __name__ == "__main__":
main()
|
print('[*] FTP Anonymous log on succeeded on host ' + targetHostAddress)
|
conditional_block
|
tables.js
|
import DB from '../db';
import * as types from '../constants/tablesConstants';
import { stopFetching, internalInitTable } from './currentTable';
export function setCurrentTable(tableName) {
return {
type: types.SET_CURRENT_TABLE,
tableName
};
}
export function changeTableName(newTableName) {
return {
type: types.CHANGE_TABLE_NAME,
newTableName
};
}
export function createTable(tableName, i = -1) {
return dispatch => new Promise((resolve, reject) => {
// eslint-disable-next-line no-param-reassign
DB.createTable(tableName)
.then(
() => {
dispatch({
type: types.CREATE_TABLE,
tableName
});
resolve(tableName);
},
(error) => {
// if tableName is occupied it sends reject with
// incremented counter to pick new table name
// recursively
if (error.search('already exists')) {
const j = i + 1;
reject(j);
}
}
);
});
}
export function dropTable(tableName)
|
export function truncateTable(tableName, restartIdentity) {
return (dispatch) => {
dispatch({
type: 'tables/TRUNCATE_TABLE'
});
DB.truncateTable(tableName, restartIdentity);
};
}
function internalGetTables(dispatch, clear) {
return new Promise((resolve, reject) => {
if (clear) {
dispatch({ type: types.GET_TABLES, tables: [] });
}
DB.getTables()
.then(
(tables) => {
if (tables.length) {
return DB.getTableOid(tables);
}
return tables;
},
(error) => {
reject(error);
}
)
.then(
(tables) => {
if (tables.length) {
return DB.getForeignKeys(tables);
}
dispatch(stopFetching());
return tables;
}
)
.then(
(tables) => {
dispatch({
type: types.GET_TABLES,
tables
});
resolve(tables.length ? tables[0].table_name : '');
}
);
});
}
export function getTables(clear = undefined) {
return dispatch => (internalGetTables(dispatch, clear));
}
export function reloadTables() {
return (dispatch, getState) => {
const currentTableState = getState().currentTable;
const tableName = currentTableState.tableName;
dispatch({ type: types.CLEAR_TABLES });
internalGetTables(dispatch)
.then(
() => {
if (tableName) {
dispatch({
type: types.SET_CURRENT_TABLE,
tableName
});
const page = currentTableState.page;
const order = currentTableState.order;
const filters = currentTableState.filters;
internalInitTable(dispatch, getState, { tableName, page, order, filters });
}
}
);
};
}
export function clearTables() {
return {
type: types.CLEAR_TABLES
};
}
export function searchTables(keyword) {
return {
type: types.SEARCH_TABLES,
keyword
};
}
|
{
return (dispatch) => {
dispatch({
type: 'tables/DROP_TABLE'
});
DB.dropTable(tableName);
};
}
|
identifier_body
|
tables.js
|
import DB from '../db';
import * as types from '../constants/tablesConstants';
import { stopFetching, internalInitTable } from './currentTable';
export function setCurrentTable(tableName) {
return {
type: types.SET_CURRENT_TABLE,
tableName
};
}
export function changeTableName(newTableName) {
return {
type: types.CHANGE_TABLE_NAME,
|
}
export function createTable(tableName, i = -1) {
return dispatch => new Promise((resolve, reject) => {
// eslint-disable-next-line no-param-reassign
DB.createTable(tableName)
.then(
() => {
dispatch({
type: types.CREATE_TABLE,
tableName
});
resolve(tableName);
},
(error) => {
// if tableName is occupied it sends reject with
// incremented counter to pick new table name
// recursively
if (error.search('already exists')) {
const j = i + 1;
reject(j);
}
}
);
});
}
export function dropTable(tableName) {
return (dispatch) => {
dispatch({
type: 'tables/DROP_TABLE'
});
DB.dropTable(tableName);
};
}
export function truncateTable(tableName, restartIdentity) {
return (dispatch) => {
dispatch({
type: 'tables/TRUNCATE_TABLE'
});
DB.truncateTable(tableName, restartIdentity);
};
}
function internalGetTables(dispatch, clear) {
return new Promise((resolve, reject) => {
if (clear) {
dispatch({ type: types.GET_TABLES, tables: [] });
}
DB.getTables()
.then(
(tables) => {
if (tables.length) {
return DB.getTableOid(tables);
}
return tables;
},
(error) => {
reject(error);
}
)
.then(
(tables) => {
if (tables.length) {
return DB.getForeignKeys(tables);
}
dispatch(stopFetching());
return tables;
}
)
.then(
(tables) => {
dispatch({
type: types.GET_TABLES,
tables
});
resolve(tables.length ? tables[0].table_name : '');
}
);
});
}
export function getTables(clear = undefined) {
return dispatch => (internalGetTables(dispatch, clear));
}
export function reloadTables() {
return (dispatch, getState) => {
const currentTableState = getState().currentTable;
const tableName = currentTableState.tableName;
dispatch({ type: types.CLEAR_TABLES });
internalGetTables(dispatch)
.then(
() => {
if (tableName) {
dispatch({
type: types.SET_CURRENT_TABLE,
tableName
});
const page = currentTableState.page;
const order = currentTableState.order;
const filters = currentTableState.filters;
internalInitTable(dispatch, getState, { tableName, page, order, filters });
}
}
);
};
}
export function clearTables() {
return {
type: types.CLEAR_TABLES
};
}
export function searchTables(keyword) {
return {
type: types.SEARCH_TABLES,
keyword
};
}
|
newTableName
};
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.