file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
handlers.py | """App related signal handlers."""
import redis
from django.conf import settings
from django.db.models import signals
from django.dispatch import receiver
from modoboa.admin import models as admin_models
from . import constants
def | (instance, key):
"""Store message limit in Redis."""
old_message_limit = instance._loaded_values.get("message_limit")
if old_message_limit == instance.message_limit:
return
rclient = redis.Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_QUOTA_DB
)
if instance.message_limit is None:
# delete existing key
if rclient.hexists(constants.REDIS_HASHNAME, key):
rclient.hdel(constants.REDIS_HASHNAME, key)
return
if old_message_limit is not None:
diff = instance.message_limit - old_message_limit
else:
diff = instance.message_limit
rclient.hincrby(constants.REDIS_HASHNAME, key, diff)
@receiver(signals.post_save, sender=admin_models.Domain)
def set_domain_message_limit(sender, instance, created, **kwargs):
"""Store domain message limit in Redis."""
set_message_limit(instance, instance.name)
@receiver(signals.post_save, sender=admin_models.Mailbox)
def set_mailbox_message_limit(sender, instance, created, **kwargs):
"""Store mailbox message limit in Redis."""
set_message_limit(instance, instance.full_address)
| set_message_limit | identifier_name |
handlers.py | """App related signal handlers."""
import redis
from django.conf import settings
from django.db.models import signals
from django.dispatch import receiver
from modoboa.admin import models as admin_models
from . import constants
def set_message_limit(instance, key):
"""Store message limit in Redis."""
old_message_limit = instance._loaded_values.get("message_limit")
if old_message_limit == instance.message_limit:
return
rclient = redis.Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_QUOTA_DB
) | if rclient.hexists(constants.REDIS_HASHNAME, key):
rclient.hdel(constants.REDIS_HASHNAME, key)
return
if old_message_limit is not None:
diff = instance.message_limit - old_message_limit
else:
diff = instance.message_limit
rclient.hincrby(constants.REDIS_HASHNAME, key, diff)
@receiver(signals.post_save, sender=admin_models.Domain)
def set_domain_message_limit(sender, instance, created, **kwargs):
"""Store domain message limit in Redis."""
set_message_limit(instance, instance.name)
@receiver(signals.post_save, sender=admin_models.Mailbox)
def set_mailbox_message_limit(sender, instance, created, **kwargs):
"""Store mailbox message limit in Redis."""
set_message_limit(instance, instance.full_address) | if instance.message_limit is None:
# delete existing key | random_line_split |
handlers.py | """App related signal handlers."""
import redis
from django.conf import settings
from django.db.models import signals
from django.dispatch import receiver
from modoboa.admin import models as admin_models
from . import constants
def set_message_limit(instance, key):
"""Store message limit in Redis."""
old_message_limit = instance._loaded_values.get("message_limit")
if old_message_limit == instance.message_limit:
return
rclient = redis.Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_QUOTA_DB
)
if instance.message_limit is None:
# delete existing key
if rclient.hexists(constants.REDIS_HASHNAME, key):
rclient.hdel(constants.REDIS_HASHNAME, key)
return
if old_message_limit is not None:
diff = instance.message_limit - old_message_limit
else:
diff = instance.message_limit
rclient.hincrby(constants.REDIS_HASHNAME, key, diff)
@receiver(signals.post_save, sender=admin_models.Domain)
def set_domain_message_limit(sender, instance, created, **kwargs):
"""Store domain message limit in Redis."""
set_message_limit(instance, instance.name)
@receiver(signals.post_save, sender=admin_models.Mailbox)
def set_mailbox_message_limit(sender, instance, created, **kwargs):
| """Store mailbox message limit in Redis."""
set_message_limit(instance, instance.full_address) | identifier_body |
|
jinja_template.py | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Renders one or more template files using the Jinja template engine."""
import codecs
import argparse
import os
import sys
from util import build_utils
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from pylib.constants import host_paths
# Import jinja2 from third_party/jinja2
sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'))
import jinja2 # pylint: disable=F0401
class _RecordingFileSystemLoader(jinja2.FileSystemLoader):
def __init__(self, searchpath):
jinja2.FileSystemLoader.__init__(self, searchpath)
self.loaded_templates = set()
def get_source(self, environment, template):
contents, filename, uptodate = jinja2.FileSystemLoader.get_source(
self, environment, template)
self.loaded_templates.add(os.path.relpath(filename))
return contents, filename, uptodate
class JinjaProcessor(object):
"""Allows easy rendering of jinja templates with input file tracking."""
def | (self, loader_base_dir, variables=None):
self.loader_base_dir = loader_base_dir
self.variables = variables or {}
self.loader = _RecordingFileSystemLoader(loader_base_dir)
self.env = jinja2.Environment(loader=self.loader)
self.env.undefined = jinja2.StrictUndefined
self.env.line_comment_prefix = '##'
self.env.trim_blocks = True
self.env.lstrip_blocks = True
self._template_cache = {} # Map of path -> Template
def Render(self, input_filename, variables=None):
input_rel_path = os.path.relpath(input_filename, self.loader_base_dir)
template = self._template_cache.get(input_rel_path)
if not template:
template = self.env.get_template(input_rel_path)
self._template_cache[input_rel_path] = template
return template.render(variables or self.variables)
def GetLoadedTemplates(self):
return list(self.loader.loaded_templates)
def _ProcessFile(processor, input_filename, output_filename):
output = processor.Render(input_filename)
with codecs.open(output_filename, 'w', 'utf-8') as output_file:
output_file.write(output)
def _ProcessFiles(processor, input_filenames, inputs_base_dir, outputs_zip):
with build_utils.TempDir() as temp_dir:
for input_filename in input_filenames:
relpath = os.path.relpath(os.path.abspath(input_filename),
os.path.abspath(inputs_base_dir))
if relpath.startswith(os.pardir):
raise Exception('input file %s is not contained in inputs base dir %s'
% (input_filename, inputs_base_dir))
output_filename = os.path.join(temp_dir, relpath)
parent_dir = os.path.dirname(output_filename)
build_utils.MakeDirectory(parent_dir)
_ProcessFile(processor, input_filename, output_filename)
build_utils.ZipDir(outputs_zip, temp_dir)
def _ParseVariables(variables_arg, error_func):
variables = {}
for v in build_utils.ParseGnList(variables_arg):
if '=' not in v:
error_func('--variables argument must contain "=": ' + v)
name, _, value = v.partition('=')
variables[name] = value
return variables
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', required=True,
help='The template files to process.')
parser.add_argument('--output', help='The output file to generate. Valid '
'only if there is a single input.')
parser.add_argument('--outputs-zip', help='A zip file for the processed '
'templates. Required if there are multiple inputs.')
parser.add_argument('--inputs-base-dir', help='A common ancestor directory '
'of the inputs. Each output\'s path in the output zip '
'will match the relative path from INPUTS_BASE_DIR to '
'the input. Required if --output-zip is given.')
parser.add_argument('--loader-base-dir', help='Base path used by the '
'template loader. Must be a common ancestor directory of '
'the inputs. Defaults to DIR_SOURCE_ROOT.',
default=host_paths.DIR_SOURCE_ROOT)
parser.add_argument('--variables', help='Variables to be made available in '
'the template processing environment, as a GYP list '
'(e.g. --variables "channel=beta mstone=39")', default='')
build_utils.AddDepfileOption(parser)
options = parser.parse_args()
inputs = build_utils.ParseGnList(options.inputs)
if (options.output is None) == (options.outputs_zip is None):
parser.error('Exactly one of --output and --output-zip must be given')
if options.output and len(inputs) != 1:
parser.error('--output cannot be used with multiple inputs')
if options.outputs_zip and not options.inputs_base_dir:
parser.error('--inputs-base-dir must be given when --output-zip is used')
variables = _ParseVariables(options.variables, parser.error)
processor = JinjaProcessor(options.loader_base_dir, variables=variables)
if options.output:
_ProcessFile(processor, inputs[0], options.output)
else:
_ProcessFiles(processor, inputs, options.inputs_base_dir,
options.outputs_zip)
if options.depfile:
output = options.output or options.outputs_zip
deps = processor.GetLoadedTemplates()
build_utils.WriteDepfile(options.depfile, output, deps)
if __name__ == '__main__':
main()
| __init__ | identifier_name |
jinja_template.py | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Renders one or more template files using the Jinja template engine."""
import codecs
import argparse
import os
import sys
from util import build_utils
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from pylib.constants import host_paths
# Import jinja2 from third_party/jinja2
sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'))
import jinja2 # pylint: disable=F0401
class _RecordingFileSystemLoader(jinja2.FileSystemLoader):
def __init__(self, searchpath):
jinja2.FileSystemLoader.__init__(self, searchpath)
self.loaded_templates = set()
def get_source(self, environment, template):
contents, filename, uptodate = jinja2.FileSystemLoader.get_source(
self, environment, template)
self.loaded_templates.add(os.path.relpath(filename))
return contents, filename, uptodate
class JinjaProcessor(object):
"""Allows easy rendering of jinja templates with input file tracking."""
def __init__(self, loader_base_dir, variables=None):
self.loader_base_dir = loader_base_dir
self.variables = variables or {}
self.loader = _RecordingFileSystemLoader(loader_base_dir)
self.env = jinja2.Environment(loader=self.loader)
self.env.undefined = jinja2.StrictUndefined
self.env.line_comment_prefix = '##'
self.env.trim_blocks = True
self.env.lstrip_blocks = True
self._template_cache = {} # Map of path -> Template
def Render(self, input_filename, variables=None):
input_rel_path = os.path.relpath(input_filename, self.loader_base_dir)
template = self._template_cache.get(input_rel_path)
if not template:
template = self.env.get_template(input_rel_path)
self._template_cache[input_rel_path] = template
return template.render(variables or self.variables)
def GetLoadedTemplates(self):
return list(self.loader.loaded_templates)
def _ProcessFile(processor, input_filename, output_filename):
output = processor.Render(input_filename)
with codecs.open(output_filename, 'w', 'utf-8') as output_file:
output_file.write(output)
def _ProcessFiles(processor, input_filenames, inputs_base_dir, outputs_zip):
with build_utils.TempDir() as temp_dir:
for input_filename in input_filenames:
relpath = os.path.relpath(os.path.abspath(input_filename),
os.path.abspath(inputs_base_dir))
if relpath.startswith(os.pardir):
raise Exception('input file %s is not contained in inputs base dir %s'
% (input_filename, inputs_base_dir))
output_filename = os.path.join(temp_dir, relpath)
parent_dir = os.path.dirname(output_filename)
build_utils.MakeDirectory(parent_dir)
_ProcessFile(processor, input_filename, output_filename)
build_utils.ZipDir(outputs_zip, temp_dir)
def _ParseVariables(variables_arg, error_func):
variables = {}
for v in build_utils.ParseGnList(variables_arg):
if '=' not in v:
error_func('--variables argument must contain "=": ' + v)
name, _, value = v.partition('=')
variables[name] = value
return variables
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', required=True,
help='The template files to process.')
parser.add_argument('--output', help='The output file to generate. Valid '
'only if there is a single input.')
parser.add_argument('--outputs-zip', help='A zip file for the processed '
'templates. Required if there are multiple inputs.')
parser.add_argument('--inputs-base-dir', help='A common ancestor directory '
'of the inputs. Each output\'s path in the output zip '
'will match the relative path from INPUTS_BASE_DIR to ' | 'the input. Required if --output-zip is given.')
parser.add_argument('--loader-base-dir', help='Base path used by the '
'template loader. Must be a common ancestor directory of '
'the inputs. Defaults to DIR_SOURCE_ROOT.',
default=host_paths.DIR_SOURCE_ROOT)
parser.add_argument('--variables', help='Variables to be made available in '
'the template processing environment, as a GYP list '
'(e.g. --variables "channel=beta mstone=39")', default='')
build_utils.AddDepfileOption(parser)
options = parser.parse_args()
inputs = build_utils.ParseGnList(options.inputs)
if (options.output is None) == (options.outputs_zip is None):
parser.error('Exactly one of --output and --output-zip must be given')
if options.output and len(inputs) != 1:
parser.error('--output cannot be used with multiple inputs')
if options.outputs_zip and not options.inputs_base_dir:
parser.error('--inputs-base-dir must be given when --output-zip is used')
variables = _ParseVariables(options.variables, parser.error)
processor = JinjaProcessor(options.loader_base_dir, variables=variables)
if options.output:
_ProcessFile(processor, inputs[0], options.output)
else:
_ProcessFiles(processor, inputs, options.inputs_base_dir,
options.outputs_zip)
if options.depfile:
output = options.output or options.outputs_zip
deps = processor.GetLoadedTemplates()
build_utils.WriteDepfile(options.depfile, output, deps)
if __name__ == '__main__':
main() | random_line_split |
|
jinja_template.py | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Renders one or more template files using the Jinja template engine."""
import codecs
import argparse
import os
import sys
from util import build_utils
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from pylib.constants import host_paths
# Import jinja2 from third_party/jinja2
sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'))
import jinja2 # pylint: disable=F0401
class _RecordingFileSystemLoader(jinja2.FileSystemLoader):
def __init__(self, searchpath):
jinja2.FileSystemLoader.__init__(self, searchpath)
self.loaded_templates = set()
def get_source(self, environment, template):
contents, filename, uptodate = jinja2.FileSystemLoader.get_source(
self, environment, template)
self.loaded_templates.add(os.path.relpath(filename))
return contents, filename, uptodate
class JinjaProcessor(object):
"""Allows easy rendering of jinja templates with input file tracking."""
def __init__(self, loader_base_dir, variables=None):
self.loader_base_dir = loader_base_dir
self.variables = variables or {}
self.loader = _RecordingFileSystemLoader(loader_base_dir)
self.env = jinja2.Environment(loader=self.loader)
self.env.undefined = jinja2.StrictUndefined
self.env.line_comment_prefix = '##'
self.env.trim_blocks = True
self.env.lstrip_blocks = True
self._template_cache = {} # Map of path -> Template
def Render(self, input_filename, variables=None):
input_rel_path = os.path.relpath(input_filename, self.loader_base_dir)
template = self._template_cache.get(input_rel_path)
if not template:
|
return template.render(variables or self.variables)
def GetLoadedTemplates(self):
return list(self.loader.loaded_templates)
def _ProcessFile(processor, input_filename, output_filename):
output = processor.Render(input_filename)
with codecs.open(output_filename, 'w', 'utf-8') as output_file:
output_file.write(output)
def _ProcessFiles(processor, input_filenames, inputs_base_dir, outputs_zip):
with build_utils.TempDir() as temp_dir:
for input_filename in input_filenames:
relpath = os.path.relpath(os.path.abspath(input_filename),
os.path.abspath(inputs_base_dir))
if relpath.startswith(os.pardir):
raise Exception('input file %s is not contained in inputs base dir %s'
% (input_filename, inputs_base_dir))
output_filename = os.path.join(temp_dir, relpath)
parent_dir = os.path.dirname(output_filename)
build_utils.MakeDirectory(parent_dir)
_ProcessFile(processor, input_filename, output_filename)
build_utils.ZipDir(outputs_zip, temp_dir)
def _ParseVariables(variables_arg, error_func):
variables = {}
for v in build_utils.ParseGnList(variables_arg):
if '=' not in v:
error_func('--variables argument must contain "=": ' + v)
name, _, value = v.partition('=')
variables[name] = value
return variables
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', required=True,
help='The template files to process.')
parser.add_argument('--output', help='The output file to generate. Valid '
'only if there is a single input.')
parser.add_argument('--outputs-zip', help='A zip file for the processed '
'templates. Required if there are multiple inputs.')
parser.add_argument('--inputs-base-dir', help='A common ancestor directory '
'of the inputs. Each output\'s path in the output zip '
'will match the relative path from INPUTS_BASE_DIR to '
'the input. Required if --output-zip is given.')
parser.add_argument('--loader-base-dir', help='Base path used by the '
'template loader. Must be a common ancestor directory of '
'the inputs. Defaults to DIR_SOURCE_ROOT.',
default=host_paths.DIR_SOURCE_ROOT)
parser.add_argument('--variables', help='Variables to be made available in '
'the template processing environment, as a GYP list '
'(e.g. --variables "channel=beta mstone=39")', default='')
build_utils.AddDepfileOption(parser)
options = parser.parse_args()
inputs = build_utils.ParseGnList(options.inputs)
if (options.output is None) == (options.outputs_zip is None):
parser.error('Exactly one of --output and --output-zip must be given')
if options.output and len(inputs) != 1:
parser.error('--output cannot be used with multiple inputs')
if options.outputs_zip and not options.inputs_base_dir:
parser.error('--inputs-base-dir must be given when --output-zip is used')
variables = _ParseVariables(options.variables, parser.error)
processor = JinjaProcessor(options.loader_base_dir, variables=variables)
if options.output:
_ProcessFile(processor, inputs[0], options.output)
else:
_ProcessFiles(processor, inputs, options.inputs_base_dir,
options.outputs_zip)
if options.depfile:
output = options.output or options.outputs_zip
deps = processor.GetLoadedTemplates()
build_utils.WriteDepfile(options.depfile, output, deps)
if __name__ == '__main__':
main()
| template = self.env.get_template(input_rel_path)
self._template_cache[input_rel_path] = template | conditional_block |
jinja_template.py | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Renders one or more template files using the Jinja template engine."""
import codecs
import argparse
import os
import sys
from util import build_utils
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from pylib.constants import host_paths
# Import jinja2 from third_party/jinja2
sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'))
import jinja2 # pylint: disable=F0401
class _RecordingFileSystemLoader(jinja2.FileSystemLoader):
def __init__(self, searchpath):
jinja2.FileSystemLoader.__init__(self, searchpath)
self.loaded_templates = set()
def get_source(self, environment, template):
contents, filename, uptodate = jinja2.FileSystemLoader.get_source(
self, environment, template)
self.loaded_templates.add(os.path.relpath(filename))
return contents, filename, uptodate
class JinjaProcessor(object):
"""Allows easy rendering of jinja templates with input file tracking."""
def __init__(self, loader_base_dir, variables=None):
self.loader_base_dir = loader_base_dir
self.variables = variables or {}
self.loader = _RecordingFileSystemLoader(loader_base_dir)
self.env = jinja2.Environment(loader=self.loader)
self.env.undefined = jinja2.StrictUndefined
self.env.line_comment_prefix = '##'
self.env.trim_blocks = True
self.env.lstrip_blocks = True
self._template_cache = {} # Map of path -> Template
def Render(self, input_filename, variables=None):
input_rel_path = os.path.relpath(input_filename, self.loader_base_dir)
template = self._template_cache.get(input_rel_path)
if not template:
template = self.env.get_template(input_rel_path)
self._template_cache[input_rel_path] = template
return template.render(variables or self.variables)
def GetLoadedTemplates(self):
return list(self.loader.loaded_templates)
def _ProcessFile(processor, input_filename, output_filename):
output = processor.Render(input_filename)
with codecs.open(output_filename, 'w', 'utf-8') as output_file:
output_file.write(output)
def _ProcessFiles(processor, input_filenames, inputs_base_dir, outputs_zip):
|
def _ParseVariables(variables_arg, error_func):
variables = {}
for v in build_utils.ParseGnList(variables_arg):
if '=' not in v:
error_func('--variables argument must contain "=": ' + v)
name, _, value = v.partition('=')
variables[name] = value
return variables
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', required=True,
help='The template files to process.')
parser.add_argument('--output', help='The output file to generate. Valid '
'only if there is a single input.')
parser.add_argument('--outputs-zip', help='A zip file for the processed '
'templates. Required if there are multiple inputs.')
parser.add_argument('--inputs-base-dir', help='A common ancestor directory '
'of the inputs. Each output\'s path in the output zip '
'will match the relative path from INPUTS_BASE_DIR to '
'the input. Required if --output-zip is given.')
parser.add_argument('--loader-base-dir', help='Base path used by the '
'template loader. Must be a common ancestor directory of '
'the inputs. Defaults to DIR_SOURCE_ROOT.',
default=host_paths.DIR_SOURCE_ROOT)
parser.add_argument('--variables', help='Variables to be made available in '
'the template processing environment, as a GYP list '
'(e.g. --variables "channel=beta mstone=39")', default='')
build_utils.AddDepfileOption(parser)
options = parser.parse_args()
inputs = build_utils.ParseGnList(options.inputs)
if (options.output is None) == (options.outputs_zip is None):
parser.error('Exactly one of --output and --output-zip must be given')
if options.output and len(inputs) != 1:
parser.error('--output cannot be used with multiple inputs')
if options.outputs_zip and not options.inputs_base_dir:
parser.error('--inputs-base-dir must be given when --output-zip is used')
variables = _ParseVariables(options.variables, parser.error)
processor = JinjaProcessor(options.loader_base_dir, variables=variables)
if options.output:
_ProcessFile(processor, inputs[0], options.output)
else:
_ProcessFiles(processor, inputs, options.inputs_base_dir,
options.outputs_zip)
if options.depfile:
output = options.output or options.outputs_zip
deps = processor.GetLoadedTemplates()
build_utils.WriteDepfile(options.depfile, output, deps)
if __name__ == '__main__':
main()
| with build_utils.TempDir() as temp_dir:
for input_filename in input_filenames:
relpath = os.path.relpath(os.path.abspath(input_filename),
os.path.abspath(inputs_base_dir))
if relpath.startswith(os.pardir):
raise Exception('input file %s is not contained in inputs base dir %s'
% (input_filename, inputs_base_dir))
output_filename = os.path.join(temp_dir, relpath)
parent_dir = os.path.dirname(output_filename)
build_utils.MakeDirectory(parent_dir)
_ProcessFile(processor, input_filename, output_filename)
build_utils.ZipDir(outputs_zip, temp_dir) | identifier_body |
bigquery.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
import pandas as pd
from sqlalchemy import literal_column
from sqlalchemy.sql.expression import ColumnClause
from superset.db_engine_specs.base import BaseEngineSpec
from superset.utils import core as utils
if TYPE_CHECKING:
from superset.models.core import Database # pragma: no cover
class BigQueryEngineSpec(BaseEngineSpec):
"""Engine spec for Google's BigQuery
As contributed by @mxmzdlv on issue #945"""
engine = "bigquery"
engine_name = "Google BigQuery"
max_column_name_length = 128
"""
https://www.python.org/dev/peps/pep-0249/#arraysize
raw_connections bypass the pybigquery query execution context and deal with
raw dbapi connection directly.
If this value is not set, the default value is set to 1, as described here,
https://googlecloudplatform.github.io/google-cloud-python/latest/_modules/google/cloud/bigquery/dbapi/cursor.html#Cursor
The default value of 5000 is derived from the pybigquery.
https://github.com/mxmzdlv/pybigquery/blob/d214bb089ca0807ca9aaa6ce4d5a01172d40264e/pybigquery/sqlalchemy_bigquery.py#L102
"""
arraysize = 5000
_date_trunc_functions = {
"DATE": "DATE_TRUNC",
"DATETIME": "DATETIME_TRUNC",
"TIME": "TIME_TRUNC",
"TIMESTAMP": "TIMESTAMP_TRUNC",
}
_time_grain_expressions = {
None: "{col}",
"PT1S": "{func}({col}, SECOND)",
"PT1M": "{func}({col}, MINUTE)",
"PT1H": "{func}({col}, HOUR)",
"P1D": "{func}({col}, DAY)",
"P1W": "{func}({col}, WEEK)",
"P1M": "{func}({col}, MONTH)",
"P0.25Y": "{func}({col}, QUARTER)",
"P1Y": "{func}({col}, YEAR)",
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"CAST('{dttm.date().isoformat()}' AS DATE)"
if tt == utils.TemporalType.DATETIME:
return f"""CAST('{dttm.isoformat(timespec="microseconds")}' AS DATETIME)"""
if tt == utils.TemporalType.TIME:
return f"""CAST('{dttm.strftime("%H:%M:%S.%f")}' AS TIME)"""
if tt == utils.TemporalType.TIMESTAMP:
return f"""CAST('{dttm.isoformat(timespec="microseconds")}' AS TIMESTAMP)"""
return None
@classmethod
def fetch_data(
cls, cursor: Any, limit: Optional[int] = None
) -> List[Tuple[Any, ...]]:
data = super().fetch_data(cursor, limit)
# Support type BigQuery Row, introduced here PR #4071
# google.cloud.bigquery.table.Row
if data and type(data[0]).__name__ == "Row":
data = [r.values() for r in data] # type: ignore
return data
@staticmethod
def _mutate_label(label: str) -> str:
"""
BigQuery field_name should start with a letter or underscore and contain only
alphanumeric characters. Labels that start with a number are prefixed with an
underscore. Any unsupported characters are replaced with underscores and an
md5 hash is added to the end of the label to avoid possible collisions.
:param label: Expected expression label
:return: Conditionally mutated label
"""
label_hashed = "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
# if label starts with number, add underscore as first character
label_mutated = "_" + label if re.match(r"^\d", label) else label
# replace non-alphanumeric characters with underscores
label_mutated = re.sub(r"[^\w]+", "_", label_mutated)
if label_mutated != label:
# add first 5 chars from md5 hash to label to avoid possible collisions
label_mutated += label_hashed[:6]
return label_mutated
@classmethod
def _truncate_label(cls, label: str) -> str:
"""BigQuery requires column names start with either a letter or
underscore. To make sure this is always the case, an underscore is prefixed
to the md5 hash of the original label.
:param label: expected expression label
:return: truncated label
"""
return "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
@classmethod
def extra_table_metadata(
cls, database: "Database", table_name: str, schema_name: str
) -> Dict[str, Any]:
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
partitions_columns = [
index.get("column_names", [])
for index in indexes
if index.get("name") == "partition"
]
cluster_columns = [
index.get("column_names", [])
for index in indexes
if index.get("name") == "clustering"
]
return {
"partitions": {"cols": partitions_columns},
"clustering": {"cols": cluster_columns},
}
@classmethod
def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]:
"""
BigQuery dialect requires us to not use backtick in the fieldname which are
nested.
Using literal_column handles that issue.
https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column
Also explicility specifying column names so we don't encounter duplicate
column names in the result.
"""
return [
literal_column(c["name"]).label(c["name"].replace(".", "__")) for c in cols
]
@classmethod
def epoch_to_dttm(cls) -> str:
return "TIMESTAMP_SECONDS({col})"
@classmethod
def epoch_ms_to_dttm(cls) -> str:
|
@classmethod
def df_to_sql(cls, df: pd.DataFrame, **kwargs: Any) -> None:
"""
Upload data from a Pandas DataFrame to BigQuery. Calls
`DataFrame.to_gbq()` which requires `pandas_gbq` to be installed.
:param df: Dataframe with data to be uploaded
:param kwargs: kwargs to be passed to to_gbq() method. Requires that `schema`,
`name` and `con` are present in kwargs. `name` and `schema` are combined
and passed to `to_gbq()` as `destination_table`.
"""
try:
import pandas_gbq
from google.oauth2 import service_account
except ImportError:
raise Exception(
"Could not import libraries `pandas_gbq` or `google.oauth2`, which are "
"required to be installed in your environment in order "
"to upload data to BigQuery"
)
if not ("name" in kwargs and "schema" in kwargs and "con" in kwargs):
raise Exception("name, schema and con need to be defined in kwargs")
gbq_kwargs = {}
gbq_kwargs["project_id"] = kwargs["con"].engine.url.host
gbq_kwargs["destination_table"] = f"{kwargs.pop('schema')}.{kwargs.pop('name')}"
# add credentials if they are set on the SQLAlchemy Dialect:
creds = kwargs["con"].dialect.credentials_info
if creds:
credentials = service_account.Credentials.from_service_account_info(creds)
gbq_kwargs["credentials"] = credentials
# Only pass through supported kwargs
supported_kwarg_keys = {"if_exists"}
for key in supported_kwarg_keys:
if key in kwargs:
gbq_kwargs[key] = kwargs[key]
pandas_gbq.to_gbq(df, **gbq_kwargs)
| return "TIMESTAMP_MILLIS({col})" | identifier_body |
bigquery.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
import pandas as pd
from sqlalchemy import literal_column
from sqlalchemy.sql.expression import ColumnClause
from superset.db_engine_specs.base import BaseEngineSpec
from superset.utils import core as utils
if TYPE_CHECKING:
from superset.models.core import Database # pragma: no cover
class BigQueryEngineSpec(BaseEngineSpec):
"""Engine spec for Google's BigQuery
As contributed by @mxmzdlv on issue #945"""
engine = "bigquery"
engine_name = "Google BigQuery"
max_column_name_length = 128
"""
https://www.python.org/dev/peps/pep-0249/#arraysize
raw_connections bypass the pybigquery query execution context and deal with
raw dbapi connection directly.
If this value is not set, the default value is set to 1, as described here,
https://googlecloudplatform.github.io/google-cloud-python/latest/_modules/google/cloud/bigquery/dbapi/cursor.html#Cursor
The default value of 5000 is derived from the pybigquery.
https://github.com/mxmzdlv/pybigquery/blob/d214bb089ca0807ca9aaa6ce4d5a01172d40264e/pybigquery/sqlalchemy_bigquery.py#L102
"""
arraysize = 5000
_date_trunc_functions = {
"DATE": "DATE_TRUNC",
"DATETIME": "DATETIME_TRUNC",
"TIME": "TIME_TRUNC",
"TIMESTAMP": "TIMESTAMP_TRUNC",
}
_time_grain_expressions = {
None: "{col}",
"PT1S": "{func}({col}, SECOND)",
"PT1M": "{func}({col}, MINUTE)",
"PT1H": "{func}({col}, HOUR)",
"P1D": "{func}({col}, DAY)",
"P1W": "{func}({col}, WEEK)",
"P1M": "{func}({col}, MONTH)",
"P0.25Y": "{func}({col}, QUARTER)",
"P1Y": "{func}({col}, YEAR)",
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"CAST('{dttm.date().isoformat()}' AS DATE)"
if tt == utils.TemporalType.DATETIME:
return f"""CAST('{dttm.isoformat(timespec="microseconds")}' AS DATETIME)"""
if tt == utils.TemporalType.TIME:
return f"""CAST('{dttm.strftime("%H:%M:%S.%f")}' AS TIME)"""
if tt == utils.TemporalType.TIMESTAMP:
return f"""CAST('{dttm.isoformat(timespec="microseconds")}' AS TIMESTAMP)"""
return None
@classmethod
def fetch_data(
cls, cursor: Any, limit: Optional[int] = None
) -> List[Tuple[Any, ...]]:
data = super().fetch_data(cursor, limit)
# Support type BigQuery Row, introduced here PR #4071
# google.cloud.bigquery.table.Row
if data and type(data[0]).__name__ == "Row":
data = [r.values() for r in data] # type: ignore
return data
@staticmethod
def _mutate_label(label: str) -> str:
"""
BigQuery field_name should start with a letter or underscore and contain only
alphanumeric characters. Labels that start with a number are prefixed with an
underscore. Any unsupported characters are replaced with underscores and an
md5 hash is added to the end of the label to avoid possible collisions.
:param label: Expected expression label
:return: Conditionally mutated label
"""
label_hashed = "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
# if label starts with number, add underscore as first character
label_mutated = "_" + label if re.match(r"^\d", label) else label
# replace non-alphanumeric characters with underscores
label_mutated = re.sub(r"[^\w]+", "_", label_mutated)
if label_mutated != label:
# add first 5 chars from md5 hash to label to avoid possible collisions
label_mutated += label_hashed[:6]
return label_mutated
@classmethod
def _truncate_label(cls, label: str) -> str:
"""BigQuery requires column names start with either a letter or
underscore. To make sure this is always the case, an underscore is prefixed
to the md5 hash of the original label.
:param label: expected expression label
:return: truncated label
"""
return "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
@classmethod
def | (
cls, database: "Database", table_name: str, schema_name: str
) -> Dict[str, Any]:
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
partitions_columns = [
index.get("column_names", [])
for index in indexes
if index.get("name") == "partition"
]
cluster_columns = [
index.get("column_names", [])
for index in indexes
if index.get("name") == "clustering"
]
return {
"partitions": {"cols": partitions_columns},
"clustering": {"cols": cluster_columns},
}
@classmethod
def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]:
"""
BigQuery dialect requires us to not use backtick in the fieldname which are
nested.
Using literal_column handles that issue.
https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column
Also explicility specifying column names so we don't encounter duplicate
column names in the result.
"""
return [
literal_column(c["name"]).label(c["name"].replace(".", "__")) for c in cols
]
@classmethod
def epoch_to_dttm(cls) -> str:
return "TIMESTAMP_SECONDS({col})"
@classmethod
def epoch_ms_to_dttm(cls) -> str:
return "TIMESTAMP_MILLIS({col})"
@classmethod
def df_to_sql(cls, df: pd.DataFrame, **kwargs: Any) -> None:
"""
Upload data from a Pandas DataFrame to BigQuery. Calls
`DataFrame.to_gbq()` which requires `pandas_gbq` to be installed.
:param df: Dataframe with data to be uploaded
:param kwargs: kwargs to be passed to to_gbq() method. Requires that `schema`,
`name` and `con` are present in kwargs. `name` and `schema` are combined
and passed to `to_gbq()` as `destination_table`.
"""
try:
import pandas_gbq
from google.oauth2 import service_account
except ImportError:
raise Exception(
"Could not import libraries `pandas_gbq` or `google.oauth2`, which are "
"required to be installed in your environment in order "
"to upload data to BigQuery"
)
if not ("name" in kwargs and "schema" in kwargs and "con" in kwargs):
raise Exception("name, schema and con need to be defined in kwargs")
gbq_kwargs = {}
gbq_kwargs["project_id"] = kwargs["con"].engine.url.host
gbq_kwargs["destination_table"] = f"{kwargs.pop('schema')}.{kwargs.pop('name')}"
# add credentials if they are set on the SQLAlchemy Dialect:
creds = kwargs["con"].dialect.credentials_info
if creds:
credentials = service_account.Credentials.from_service_account_info(creds)
gbq_kwargs["credentials"] = credentials
# Only pass through supported kwargs
supported_kwarg_keys = {"if_exists"}
for key in supported_kwarg_keys:
if key in kwargs:
gbq_kwargs[key] = kwargs[key]
pandas_gbq.to_gbq(df, **gbq_kwargs)
| extra_table_metadata | identifier_name |
bigquery.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
import pandas as pd
from sqlalchemy import literal_column
from sqlalchemy.sql.expression import ColumnClause
from superset.db_engine_specs.base import BaseEngineSpec
from superset.utils import core as utils
if TYPE_CHECKING:
from superset.models.core import Database # pragma: no cover
class BigQueryEngineSpec(BaseEngineSpec):
"""Engine spec for Google's BigQuery
As contributed by @mxmzdlv on issue #945"""
engine = "bigquery"
engine_name = "Google BigQuery"
max_column_name_length = 128
"""
https://www.python.org/dev/peps/pep-0249/#arraysize
raw_connections bypass the pybigquery query execution context and deal with
raw dbapi connection directly.
If this value is not set, the default value is set to 1, as described here,
https://googlecloudplatform.github.io/google-cloud-python/latest/_modules/google/cloud/bigquery/dbapi/cursor.html#Cursor
The default value of 5000 is derived from the pybigquery.
https://github.com/mxmzdlv/pybigquery/blob/d214bb089ca0807ca9aaa6ce4d5a01172d40264e/pybigquery/sqlalchemy_bigquery.py#L102
"""
arraysize = 5000
_date_trunc_functions = {
"DATE": "DATE_TRUNC",
"DATETIME": "DATETIME_TRUNC",
"TIME": "TIME_TRUNC",
"TIMESTAMP": "TIMESTAMP_TRUNC",
}
_time_grain_expressions = {
None: "{col}",
"PT1S": "{func}({col}, SECOND)",
"PT1M": "{func}({col}, MINUTE)",
"PT1H": "{func}({col}, HOUR)",
"P1D": "{func}({col}, DAY)",
"P1W": "{func}({col}, WEEK)",
"P1M": "{func}({col}, MONTH)",
"P0.25Y": "{func}({col}, QUARTER)",
"P1Y": "{func}({col}, YEAR)",
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"CAST('{dttm.date().isoformat()}' AS DATE)"
if tt == utils.TemporalType.DATETIME:
return f"""CAST('{dttm.isoformat(timespec="microseconds")}' AS DATETIME)"""
if tt == utils.TemporalType.TIME:
return f"""CAST('{dttm.strftime("%H:%M:%S.%f")}' AS TIME)"""
if tt == utils.TemporalType.TIMESTAMP:
return f"""CAST('{dttm.isoformat(timespec="microseconds")}' AS TIMESTAMP)"""
return None
@classmethod
def fetch_data(
cls, cursor: Any, limit: Optional[int] = None
) -> List[Tuple[Any, ...]]:
data = super().fetch_data(cursor, limit)
# Support type BigQuery Row, introduced here PR #4071
# google.cloud.bigquery.table.Row
if data and type(data[0]).__name__ == "Row":
data = [r.values() for r in data] # type: ignore
return data
@staticmethod
def _mutate_label(label: str) -> str:
"""
BigQuery field_name should start with a letter or underscore and contain only
alphanumeric characters. Labels that start with a number are prefixed with an
underscore. Any unsupported characters are replaced with underscores and an
md5 hash is added to the end of the label to avoid possible collisions.
:param label: Expected expression label
:return: Conditionally mutated label
"""
label_hashed = "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
# if label starts with number, add underscore as first character
label_mutated = "_" + label if re.match(r"^\d", label) else label
# replace non-alphanumeric characters with underscores
label_mutated = re.sub(r"[^\w]+", "_", label_mutated)
if label_mutated != label:
# add first 5 chars from md5 hash to label to avoid possible collisions
label_mutated += label_hashed[:6]
return label_mutated
@classmethod
def _truncate_label(cls, label: str) -> str:
"""BigQuery requires column names start with either a letter or
underscore. To make sure this is always the case, an underscore is prefixed
to the md5 hash of the original label.
:param label: expected expression label
:return: truncated label
"""
return "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
@classmethod
def extra_table_metadata(
cls, database: "Database", table_name: str, schema_name: str
) -> Dict[str, Any]:
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
partitions_columns = [
index.get("column_names", [])
for index in indexes
if index.get("name") == "partition"
]
cluster_columns = [
index.get("column_names", [])
for index in indexes
if index.get("name") == "clustering"
]
return {
"partitions": {"cols": partitions_columns},
"clustering": {"cols": cluster_columns},
}
@classmethod
def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]:
"""
BigQuery dialect requires us to not use backtick in the fieldname which are
nested.
Using literal_column handles that issue.
https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column
Also explicility specifying column names so we don't encounter duplicate
column names in the result.
"""
return [
literal_column(c["name"]).label(c["name"].replace(".", "__")) for c in cols
]
@classmethod
def epoch_to_dttm(cls) -> str:
return "TIMESTAMP_SECONDS({col})"
@classmethod
def epoch_ms_to_dttm(cls) -> str:
return "TIMESTAMP_MILLIS({col})"
@classmethod
def df_to_sql(cls, df: pd.DataFrame, **kwargs: Any) -> None:
"""
Upload data from a Pandas DataFrame to BigQuery. Calls
`DataFrame.to_gbq()` which requires `pandas_gbq` to be installed.
:param df: Dataframe with data to be uploaded
:param kwargs: kwargs to be passed to to_gbq() method. Requires that `schema`,
`name` and `con` are present in kwargs. `name` and `schema` are combined
and passed to `to_gbq()` as `destination_table`.
"""
try:
import pandas_gbq
from google.oauth2 import service_account
except ImportError:
raise Exception(
"Could not import libraries `pandas_gbq` or `google.oauth2`, which are "
"required to be installed in your environment in order "
"to upload data to BigQuery"
)
if not ("name" in kwargs and "schema" in kwargs and "con" in kwargs):
|
gbq_kwargs = {}
gbq_kwargs["project_id"] = kwargs["con"].engine.url.host
gbq_kwargs["destination_table"] = f"{kwargs.pop('schema')}.{kwargs.pop('name')}"
# add credentials if they are set on the SQLAlchemy Dialect:
creds = kwargs["con"].dialect.credentials_info
if creds:
credentials = service_account.Credentials.from_service_account_info(creds)
gbq_kwargs["credentials"] = credentials
# Only pass through supported kwargs
supported_kwarg_keys = {"if_exists"}
for key in supported_kwarg_keys:
if key in kwargs:
gbq_kwargs[key] = kwargs[key]
pandas_gbq.to_gbq(df, **gbq_kwargs)
| raise Exception("name, schema and con need to be defined in kwargs") | conditional_block |
bigquery.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
import pandas as pd
from sqlalchemy import literal_column
from sqlalchemy.sql.expression import ColumnClause
from superset.db_engine_specs.base import BaseEngineSpec
from superset.utils import core as utils
if TYPE_CHECKING:
from superset.models.core import Database # pragma: no cover
class BigQueryEngineSpec(BaseEngineSpec):
"""Engine spec for Google's BigQuery
As contributed by @mxmzdlv on issue #945"""
engine = "bigquery"
engine_name = "Google BigQuery"
max_column_name_length = 128
"""
https://www.python.org/dev/peps/pep-0249/#arraysize
raw_connections bypass the pybigquery query execution context and deal with
raw dbapi connection directly.
If this value is not set, the default value is set to 1, as described here,
https://googlecloudplatform.github.io/google-cloud-python/latest/_modules/google/cloud/bigquery/dbapi/cursor.html#Cursor
The default value of 5000 is derived from the pybigquery.
https://github.com/mxmzdlv/pybigquery/blob/d214bb089ca0807ca9aaa6ce4d5a01172d40264e/pybigquery/sqlalchemy_bigquery.py#L102
"""
arraysize = 5000
_date_trunc_functions = {
"DATE": "DATE_TRUNC",
"DATETIME": "DATETIME_TRUNC",
"TIME": "TIME_TRUNC",
"TIMESTAMP": "TIMESTAMP_TRUNC",
}
_time_grain_expressions = {
None: "{col}",
"PT1S": "{func}({col}, SECOND)",
"PT1M": "{func}({col}, MINUTE)",
"PT1H": "{func}({col}, HOUR)",
"P1D": "{func}({col}, DAY)",
"P1W": "{func}({col}, WEEK)",
"P1M": "{func}({col}, MONTH)",
"P0.25Y": "{func}({col}, QUARTER)",
"P1Y": "{func}({col}, YEAR)",
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"CAST('{dttm.date().isoformat()}' AS DATE)"
if tt == utils.TemporalType.DATETIME:
return f"""CAST('{dttm.isoformat(timespec="microseconds")}' AS DATETIME)"""
if tt == utils.TemporalType.TIME:
return f"""CAST('{dttm.strftime("%H:%M:%S.%f")}' AS TIME)"""
if tt == utils.TemporalType.TIMESTAMP:
return f"""CAST('{dttm.isoformat(timespec="microseconds")}' AS TIMESTAMP)"""
return None
@classmethod
def fetch_data(
cls, cursor: Any, limit: Optional[int] = None
) -> List[Tuple[Any, ...]]:
data = super().fetch_data(cursor, limit)
# Support type BigQuery Row, introduced here PR #4071
# google.cloud.bigquery.table.Row
if data and type(data[0]).__name__ == "Row":
data = [r.values() for r in data] # type: ignore
return data
@staticmethod
def _mutate_label(label: str) -> str:
"""
BigQuery field_name should start with a letter or underscore and contain only
alphanumeric characters. Labels that start with a number are prefixed with an
underscore. Any unsupported characters are replaced with underscores and an
md5 hash is added to the end of the label to avoid possible collisions.
:param label: Expected expression label
:return: Conditionally mutated label
"""
label_hashed = "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
# if label starts with number, add underscore as first character
label_mutated = "_" + label if re.match(r"^\d", label) else label
# replace non-alphanumeric characters with underscores | return label_mutated
@classmethod
def _truncate_label(cls, label: str) -> str:
"""BigQuery requires column names start with either a letter or
underscore. To make sure this is always the case, an underscore is prefixed
to the md5 hash of the original label.
:param label: expected expression label
:return: truncated label
"""
return "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
@classmethod
def extra_table_metadata(
cls, database: "Database", table_name: str, schema_name: str
) -> Dict[str, Any]:
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
partitions_columns = [
index.get("column_names", [])
for index in indexes
if index.get("name") == "partition"
]
cluster_columns = [
index.get("column_names", [])
for index in indexes
if index.get("name") == "clustering"
]
return {
"partitions": {"cols": partitions_columns},
"clustering": {"cols": cluster_columns},
}
@classmethod
def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]:
"""
BigQuery dialect requires us to not use backtick in the fieldname which are
nested.
Using literal_column handles that issue.
https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column
Also explicility specifying column names so we don't encounter duplicate
column names in the result.
"""
return [
literal_column(c["name"]).label(c["name"].replace(".", "__")) for c in cols
]
@classmethod
def epoch_to_dttm(cls) -> str:
return "TIMESTAMP_SECONDS({col})"
@classmethod
def epoch_ms_to_dttm(cls) -> str:
return "TIMESTAMP_MILLIS({col})"
@classmethod
def df_to_sql(cls, df: pd.DataFrame, **kwargs: Any) -> None:
"""
Upload data from a Pandas DataFrame to BigQuery. Calls
`DataFrame.to_gbq()` which requires `pandas_gbq` to be installed.
:param df: Dataframe with data to be uploaded
:param kwargs: kwargs to be passed to to_gbq() method. Requires that `schema`,
`name` and `con` are present in kwargs. `name` and `schema` are combined
and passed to `to_gbq()` as `destination_table`.
"""
try:
import pandas_gbq
from google.oauth2 import service_account
except ImportError:
raise Exception(
"Could not import libraries `pandas_gbq` or `google.oauth2`, which are "
"required to be installed in your environment in order "
"to upload data to BigQuery"
)
if not ("name" in kwargs and "schema" in kwargs and "con" in kwargs):
raise Exception("name, schema and con need to be defined in kwargs")
gbq_kwargs = {}
gbq_kwargs["project_id"] = kwargs["con"].engine.url.host
gbq_kwargs["destination_table"] = f"{kwargs.pop('schema')}.{kwargs.pop('name')}"
# add credentials if they are set on the SQLAlchemy Dialect:
creds = kwargs["con"].dialect.credentials_info
if creds:
credentials = service_account.Credentials.from_service_account_info(creds)
gbq_kwargs["credentials"] = credentials
# Only pass through supported kwargs
supported_kwarg_keys = {"if_exists"}
for key in supported_kwarg_keys:
if key in kwargs:
gbq_kwargs[key] = kwargs[key]
pandas_gbq.to_gbq(df, **gbq_kwargs) | label_mutated = re.sub(r"[^\w]+", "_", label_mutated)
if label_mutated != label:
# add first 5 chars from md5 hash to label to avoid possible collisions
label_mutated += label_hashed[:6]
| random_line_split |
bingrep_dump.py | # BinGrep, version 1.0.0
# Copyright 2017 Hiroki Hada
# coding:UTF-8
import sys, os, time, argparse
import re
import pprint
#import pydot
import math
import cPickle
import ged_node
from idautils import *
from idc import *
import idaapi
def idascript_exit(code=0):
idc.Exit(code)
def get_short_function_name(function):
return function.replace("?", "")[:100]
def mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def cPickle_dump(filename, data):
with open(filename, "wb") as f:
cPickle.dump(data, f)
def print_cfg(cfg):
for block in cfg:
print "[%02d]" % block.id,
print hex(block.startEA),
succs = list(block.succs())
print "(succs(%d): " % len(succs),
for i in range(len(succs)):
sys.stdout.write(hex(succs[i].startEA))
if i < len(succs) - 1:
sys.stdout.write(", ")
print ")"
def output_cfg_as_png_rec(g, block, memo):
functions1, dummy = get_marks(block, 0)
hashed_label1 = hash_label(functions1)
label1 = hex(block.startEA) + ("\n%08x" % hashed_label1)
g.add_node(pydot.Node(label1, fontcolor='#FFFFFF', color='#333399'))
for b in list(block.succs()):
functions2, dummy = get_marks(b, 0)
hashed_label2 = hash_label(functions2)
label2 = hex(b.startEA) + ("\n%08x" % hashed_label2)
if b.startEA not in memo:
memo.append(b.startEA)
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold'))
output_cfg_as_png_rec(g, b, memo)
else:
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold, dotted'))
def output_cfg_as_png(cfg, filename, overwrite_flag):
blocks_src = {}
blocks_dst = {}
block = cfg[0]
f_name = GetFunctionName(block.startEA)
if not overwrite_flag and os.path.exists(filename):
return
g = pydot.Dot(graph_type='digraph', bgcolor="#F0E0FF")
size = "21"
g.set_rankdir('TB')
g.set_size(size)
g.add_node(pydot.Node('node', shape='ellipse', margin='0.05', fontcolor='#FFFFFF', fontsize=size, color='#333399', style='filled', fontname='Consolas Bold'))
g.add_node(pydot.Node('edge', color='lightgrey'))
memo = []
output_cfg_as_png_rec(g, block, memo)
g.write_png(filename)
def get_cfg(function_start, function_end):
f_name = GetFunctionName(function_start)
cfg = idaapi.FlowChart(idaapi.get_func(function_start))
return list(cfg)
def get_cfgs():
cfgs = []
for ea in Segments():
functions = list(Functions(SegStart(ea), SegEnd(ea)))
functions.append(SegEnd(ea))
for i in range(len(functions) - 1):
function_start = functions[i]
function_end = functions[i+1]
cfg = get_cfg(function_start, function_end)
cfgs.append(cfg)
return cfgs
def hash_label(marks):
tmp = sorted(set(marks))
tmp = "".join(tmp)
tmp = tmp.upper()
def rot13(string):
return reduce(lambda h,c: ((h>>13 | h<<19)+ord(c)) & 0xFFFFFFFF, [0]+list(string))
hashed_label = rot13(tmp)
hashed_label = hashed_label & 0xFFFFFFFF
return hashed_label
def get_marks(block, gamma):
marks = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
if mnem not in ["call"]:
for buf in (opnd[1], opnd[2]):
if buf:
match = re.search("([\dA-F]+)h", buf)
if match:
magic = int(match.group(1), 16)
if 0x00001000 <= magic <= 0xffffffff:
marks.append(hex(magic))
for buf in (opnd[0], opnd[1], opnd[2]):
if buf:
match = re.search("offset (a[\S]+)", buf)
if match:
offset_a = match.group(1)
if offset_a[:4] == "asc_": continue
marks.append(offset_a)
continue
else:
gamma += 1
if opnd[0][:4] == "sub_": continue
if opnd[0][0] in ["?", "$"]: continue
if opnd[0] in ["eax", "ebx", "ecx", "edx", "esi", "edi"]: continue
if opnd[0] in ["__SEH_prolog4", "__SEH_epilog4", "__EH_prolog3_catch"]: continue
if opnd[0].find("cookie") >= 0: continue
marks.append(opnd[0])
continue
return marks, gamma
def get_mnems(block):
mnems = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
buf = " "
for o in opnd:
if not o: break
elif o in ["eax", "ebx", "ecx", "edx", "ax", "bx", "cx", "dx", "al", "bl", "cl", "dl", "ah", "bh", "ch", "dh", "esi", "edi", "si", "di", "esp", "ebp"]:
buf += "reg "
elif o[:3] == "xmm": buf += "reg "
elif o.find("[") >= 0: buf += "mem "
elif o[:6] == "offset": buf += "off "
elif o[:4] == "loc_": buf += "loc "
elif o[:4] == "sub_": buf += "sub "
elif o.isdigit(): buf += "num "
elif re.match("[\da-fA-F]+h", o): buf += "num "
elif o[:6] == "dword_": buf += "dwd "
else: buf += "lbl "
mnems.append(mnem + buf)
return mnems
def cfg_to_cft_rec(block, memo, abr):
|
def cfg_to_cft(cfg):
block = cfg[0]
memo = []
memo.append(block.startEA)
return cfg_to_cft_rec(block, memo, (0, 0, 0))
def dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite):
function_num = len(cfgs)
dump_data_list = {}
for cfg in cfgs:
function_name = GetFunctionName(cfg[0].startEA)
(cft, abr, mnems) = cfg_to_cft(cfg)
dump_data_list[function_name] = {}
dump_data_list[function_name]["FUNCTION_NAME"] = function_name
dump_data_list[function_name]["CFT"] = cft
dump_data_list[function_name]["ABR"] = abr
dump_data_list[function_name]["MNEMS"] = mnems
def dump_pickle(dump_data_list, program, function, f_overwrite):
function_name_short = get_short_function_name(function)
filename_pickle = os.path.join(function_name_short + ".pickle")
if f_overwrite or not os.path.exists(filename_pickle):
cPickle_dump(filename_pickle, dump_data_list[function])
cPickle_dump(program + ".dmp", dump_data_list)
def main(function, f_image, f_all, f_overwrite):
sys.setrecursionlimit(3000)
program = idaapi.get_root_filename()
start_time = time.time()
cfgs = get_cfgs()
dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite)
result_time = time.time() - start_time
print "Dump finished."
print "result_time: " + str(result_time) + " sec."
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="")
parser.add_argument('-f', dest='function', default=None, type=str, help='')
parser.add_argument('-a', dest='f_all', default=False, action='store_true', help='')
parser.add_argument('-i', dest='f_image', default=False, action='store_true', help='Image Flag (Output as PNG)')
parser.add_argument('-o', dest='f_overwrite', default=False, action='store_true', help='Overwrite file')
args = parser.parse_args()
function = args.function
f_image = args.f_image
f_all = args.f_all
f_overwrite = args.f_overwrite
main(function, f_image, f_all, f_overwrite)
#idascript_exit()
| (alpha, beta, gamma) = abr
alpha += 1
marks, gamma = get_marks(block, gamma)
hashed_label = hash_label(marks)
mnems = get_mnems(block)
tree = ged_node.Node(hashed_label)
for b in list(block.succs()):
beta += 1
if b.startEA not in memo:
memo.append(b.startEA)
tmp, (alpha, beta, gamma), tmp2 = cfg_to_cft_rec(b, memo, (alpha, beta, gamma))
tree = tree.addkid(tmp)
mnems += tmp2
return tree, (alpha, beta, gamma), mnems | identifier_body |
bingrep_dump.py | # BinGrep, version 1.0.0
# Copyright 2017 Hiroki Hada
# coding:UTF-8
import sys, os, time, argparse
import re
import pprint
#import pydot
import math
import cPickle
import ged_node
from idautils import *
from idc import *
import idaapi
def idascript_exit(code=0):
idc.Exit(code)
def get_short_function_name(function):
return function.replace("?", "")[:100]
def mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def cPickle_dump(filename, data):
with open(filename, "wb") as f:
cPickle.dump(data, f)
def print_cfg(cfg):
for block in cfg:
print "[%02d]" % block.id,
print hex(block.startEA),
succs = list(block.succs())
print "(succs(%d): " % len(succs),
for i in range(len(succs)):
sys.stdout.write(hex(succs[i].startEA))
if i < len(succs) - 1:
sys.stdout.write(", ")
print ")"
def output_cfg_as_png_rec(g, block, memo):
functions1, dummy = get_marks(block, 0)
hashed_label1 = hash_label(functions1)
label1 = hex(block.startEA) + ("\n%08x" % hashed_label1)
g.add_node(pydot.Node(label1, fontcolor='#FFFFFF', color='#333399'))
for b in list(block.succs()):
functions2, dummy = get_marks(b, 0)
hashed_label2 = hash_label(functions2)
label2 = hex(b.startEA) + ("\n%08x" % hashed_label2)
if b.startEA not in memo:
memo.append(b.startEA)
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold'))
output_cfg_as_png_rec(g, b, memo)
else:
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold, dotted'))
def output_cfg_as_png(cfg, filename, overwrite_flag):
blocks_src = {}
blocks_dst = {}
block = cfg[0]
f_name = GetFunctionName(block.startEA)
if not overwrite_flag and os.path.exists(filename):
return
g = pydot.Dot(graph_type='digraph', bgcolor="#F0E0FF")
size = "21"
g.set_rankdir('TB')
g.set_size(size)
g.add_node(pydot.Node('node', shape='ellipse', margin='0.05', fontcolor='#FFFFFF', fontsize=size, color='#333399', style='filled', fontname='Consolas Bold'))
g.add_node(pydot.Node('edge', color='lightgrey'))
memo = []
output_cfg_as_png_rec(g, block, memo)
g.write_png(filename)
def get_cfg(function_start, function_end):
f_name = GetFunctionName(function_start)
cfg = idaapi.FlowChart(idaapi.get_func(function_start))
return list(cfg)
def get_cfgs():
cfgs = []
for ea in Segments():
functions = list(Functions(SegStart(ea), SegEnd(ea)))
functions.append(SegEnd(ea))
for i in range(len(functions) - 1):
function_start = functions[i]
function_end = functions[i+1]
cfg = get_cfg(function_start, function_end)
cfgs.append(cfg)
return cfgs
def hash_label(marks):
tmp = sorted(set(marks))
tmp = "".join(tmp)
tmp = tmp.upper()
def rot13(string):
return reduce(lambda h,c: ((h>>13 | h<<19)+ord(c)) & 0xFFFFFFFF, [0]+list(string))
hashed_label = rot13(tmp)
hashed_label = hashed_label & 0xFFFFFFFF
return hashed_label
def get_marks(block, gamma):
marks = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
if mnem not in ["call"]:
for buf in (opnd[1], opnd[2]):
if buf:
match = re.search("([\dA-F]+)h", buf)
if match:
magic = int(match.group(1), 16)
if 0x00001000 <= magic <= 0xffffffff:
marks.append(hex(magic))
for buf in (opnd[0], opnd[1], opnd[2]):
if buf:
match = re.search("offset (a[\S]+)", buf)
if match:
offset_a = match.group(1)
if offset_a[:4] == "asc_": continue
marks.append(offset_a)
continue
else:
gamma += 1
if opnd[0][:4] == "sub_": continue
if opnd[0][0] in ["?", "$"]: continue
if opnd[0] in ["eax", "ebx", "ecx", "edx", "esi", "edi"]: continue
if opnd[0] in ["__SEH_prolog4", "__SEH_epilog4", "__EH_prolog3_catch"]: continue
if opnd[0].find("cookie") >= 0: continue
marks.append(opnd[0])
continue
return marks, gamma
def get_mnems(block):
mnems = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
buf = " "
for o in opnd:
if not o: break
elif o in ["eax", "ebx", "ecx", "edx", "ax", "bx", "cx", "dx", "al", "bl", "cl", "dl", "ah", "bh", "ch", "dh", "esi", "edi", "si", "di", "esp", "ebp"]:
buf += "reg "
elif o[:3] == "xmm": buf += "reg "
elif o.find("[") >= 0: buf += "mem "
elif o[:6] == "offset": buf += "off "
elif o[:4] == "loc_": buf += "loc "
elif o[:4] == "sub_": buf += "sub "
elif o.isdigit(): buf += "num "
elif re.match("[\da-fA-F]+h", o): buf += "num "
elif o[:6] == "dword_": buf += "dwd "
else: buf += "lbl "
mnems.append(mnem + buf)
return mnems
def cfg_to_cft_rec(block, memo, abr):
(alpha, beta, gamma) = abr
alpha += 1
marks, gamma = get_marks(block, gamma)
hashed_label = hash_label(marks)
mnems = get_mnems(block)
tree = ged_node.Node(hashed_label)
for b in list(block.succs()):
beta += 1
if b.startEA not in memo:
memo.append(b.startEA)
tmp, (alpha, beta, gamma), tmp2 = cfg_to_cft_rec(b, memo, (alpha, beta, gamma))
tree = tree.addkid(tmp)
mnems += tmp2 | return tree, (alpha, beta, gamma), mnems
def cfg_to_cft(cfg):
block = cfg[0]
memo = []
memo.append(block.startEA)
return cfg_to_cft_rec(block, memo, (0, 0, 0))
def dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite):
function_num = len(cfgs)
dump_data_list = {}
for cfg in cfgs:
function_name = GetFunctionName(cfg[0].startEA)
(cft, abr, mnems) = cfg_to_cft(cfg)
dump_data_list[function_name] = {}
dump_data_list[function_name]["FUNCTION_NAME"] = function_name
dump_data_list[function_name]["CFT"] = cft
dump_data_list[function_name]["ABR"] = abr
dump_data_list[function_name]["MNEMS"] = mnems
def dump_pickle(dump_data_list, program, function, f_overwrite):
function_name_short = get_short_function_name(function)
filename_pickle = os.path.join(function_name_short + ".pickle")
if f_overwrite or not os.path.exists(filename_pickle):
cPickle_dump(filename_pickle, dump_data_list[function])
cPickle_dump(program + ".dmp", dump_data_list)
def main(function, f_image, f_all, f_overwrite):
sys.setrecursionlimit(3000)
program = idaapi.get_root_filename()
start_time = time.time()
cfgs = get_cfgs()
dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite)
result_time = time.time() - start_time
print "Dump finished."
print "result_time: " + str(result_time) + " sec."
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="")
parser.add_argument('-f', dest='function', default=None, type=str, help='')
parser.add_argument('-a', dest='f_all', default=False, action='store_true', help='')
parser.add_argument('-i', dest='f_image', default=False, action='store_true', help='Image Flag (Output as PNG)')
parser.add_argument('-o', dest='f_overwrite', default=False, action='store_true', help='Overwrite file')
args = parser.parse_args()
function = args.function
f_image = args.f_image
f_all = args.f_all
f_overwrite = args.f_overwrite
main(function, f_image, f_all, f_overwrite)
#idascript_exit() | random_line_split |
|
bingrep_dump.py | # BinGrep, version 1.0.0
# Copyright 2017 Hiroki Hada
# coding:UTF-8
import sys, os, time, argparse
import re
import pprint
#import pydot
import math
import cPickle
import ged_node
from idautils import *
from idc import *
import idaapi
def idascript_exit(code=0):
idc.Exit(code)
def get_short_function_name(function):
return function.replace("?", "")[:100]
def mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def cPickle_dump(filename, data):
with open(filename, "wb") as f:
cPickle.dump(data, f)
def print_cfg(cfg):
for block in cfg:
print "[%02d]" % block.id,
print hex(block.startEA),
succs = list(block.succs())
print "(succs(%d): " % len(succs),
for i in range(len(succs)):
sys.stdout.write(hex(succs[i].startEA))
if i < len(succs) - 1:
sys.stdout.write(", ")
print ")"
def output_cfg_as_png_rec(g, block, memo):
functions1, dummy = get_marks(block, 0)
hashed_label1 = hash_label(functions1)
label1 = hex(block.startEA) + ("\n%08x" % hashed_label1)
g.add_node(pydot.Node(label1, fontcolor='#FFFFFF', color='#333399'))
for b in list(block.succs()):
functions2, dummy = get_marks(b, 0)
hashed_label2 = hash_label(functions2)
label2 = hex(b.startEA) + ("\n%08x" % hashed_label2)
if b.startEA not in memo:
memo.append(b.startEA)
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold'))
output_cfg_as_png_rec(g, b, memo)
else:
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold, dotted'))
def output_cfg_as_png(cfg, filename, overwrite_flag):
blocks_src = {}
blocks_dst = {}
block = cfg[0]
f_name = GetFunctionName(block.startEA)
if not overwrite_flag and os.path.exists(filename):
return
g = pydot.Dot(graph_type='digraph', bgcolor="#F0E0FF")
size = "21"
g.set_rankdir('TB')
g.set_size(size)
g.add_node(pydot.Node('node', shape='ellipse', margin='0.05', fontcolor='#FFFFFF', fontsize=size, color='#333399', style='filled', fontname='Consolas Bold'))
g.add_node(pydot.Node('edge', color='lightgrey'))
memo = []
output_cfg_as_png_rec(g, block, memo)
g.write_png(filename)
def get_cfg(function_start, function_end):
f_name = GetFunctionName(function_start)
cfg = idaapi.FlowChart(idaapi.get_func(function_start))
return list(cfg)
def get_cfgs():
cfgs = []
for ea in Segments():
|
return cfgs
def hash_label(marks):
tmp = sorted(set(marks))
tmp = "".join(tmp)
tmp = tmp.upper()
def rot13(string):
return reduce(lambda h,c: ((h>>13 | h<<19)+ord(c)) & 0xFFFFFFFF, [0]+list(string))
hashed_label = rot13(tmp)
hashed_label = hashed_label & 0xFFFFFFFF
return hashed_label
def get_marks(block, gamma):
marks = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
if mnem not in ["call"]:
for buf in (opnd[1], opnd[2]):
if buf:
match = re.search("([\dA-F]+)h", buf)
if match:
magic = int(match.group(1), 16)
if 0x00001000 <= magic <= 0xffffffff:
marks.append(hex(magic))
for buf in (opnd[0], opnd[1], opnd[2]):
if buf:
match = re.search("offset (a[\S]+)", buf)
if match:
offset_a = match.group(1)
if offset_a[:4] == "asc_": continue
marks.append(offset_a)
continue
else:
gamma += 1
if opnd[0][:4] == "sub_": continue
if opnd[0][0] in ["?", "$"]: continue
if opnd[0] in ["eax", "ebx", "ecx", "edx", "esi", "edi"]: continue
if opnd[0] in ["__SEH_prolog4", "__SEH_epilog4", "__EH_prolog3_catch"]: continue
if opnd[0].find("cookie") >= 0: continue
marks.append(opnd[0])
continue
return marks, gamma
def get_mnems(block):
mnems = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
buf = " "
for o in opnd:
if not o: break
elif o in ["eax", "ebx", "ecx", "edx", "ax", "bx", "cx", "dx", "al", "bl", "cl", "dl", "ah", "bh", "ch", "dh", "esi", "edi", "si", "di", "esp", "ebp"]:
buf += "reg "
elif o[:3] == "xmm": buf += "reg "
elif o.find("[") >= 0: buf += "mem "
elif o[:6] == "offset": buf += "off "
elif o[:4] == "loc_": buf += "loc "
elif o[:4] == "sub_": buf += "sub "
elif o.isdigit(): buf += "num "
elif re.match("[\da-fA-F]+h", o): buf += "num "
elif o[:6] == "dword_": buf += "dwd "
else: buf += "lbl "
mnems.append(mnem + buf)
return mnems
def cfg_to_cft_rec(block, memo, abr):
(alpha, beta, gamma) = abr
alpha += 1
marks, gamma = get_marks(block, gamma)
hashed_label = hash_label(marks)
mnems = get_mnems(block)
tree = ged_node.Node(hashed_label)
for b in list(block.succs()):
beta += 1
if b.startEA not in memo:
memo.append(b.startEA)
tmp, (alpha, beta, gamma), tmp2 = cfg_to_cft_rec(b, memo, (alpha, beta, gamma))
tree = tree.addkid(tmp)
mnems += tmp2
return tree, (alpha, beta, gamma), mnems
def cfg_to_cft(cfg):
block = cfg[0]
memo = []
memo.append(block.startEA)
return cfg_to_cft_rec(block, memo, (0, 0, 0))
def dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite):
function_num = len(cfgs)
dump_data_list = {}
for cfg in cfgs:
function_name = GetFunctionName(cfg[0].startEA)
(cft, abr, mnems) = cfg_to_cft(cfg)
dump_data_list[function_name] = {}
dump_data_list[function_name]["FUNCTION_NAME"] = function_name
dump_data_list[function_name]["CFT"] = cft
dump_data_list[function_name]["ABR"] = abr
dump_data_list[function_name]["MNEMS"] = mnems
def dump_pickle(dump_data_list, program, function, f_overwrite):
function_name_short = get_short_function_name(function)
filename_pickle = os.path.join(function_name_short + ".pickle")
if f_overwrite or not os.path.exists(filename_pickle):
cPickle_dump(filename_pickle, dump_data_list[function])
cPickle_dump(program + ".dmp", dump_data_list)
def main(function, f_image, f_all, f_overwrite):
sys.setrecursionlimit(3000)
program = idaapi.get_root_filename()
start_time = time.time()
cfgs = get_cfgs()
dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite)
result_time = time.time() - start_time
print "Dump finished."
print "result_time: " + str(result_time) + " sec."
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="")
parser.add_argument('-f', dest='function', default=None, type=str, help='')
parser.add_argument('-a', dest='f_all', default=False, action='store_true', help='')
parser.add_argument('-i', dest='f_image', default=False, action='store_true', help='Image Flag (Output as PNG)')
parser.add_argument('-o', dest='f_overwrite', default=False, action='store_true', help='Overwrite file')
args = parser.parse_args()
function = args.function
f_image = args.f_image
f_all = args.f_all
f_overwrite = args.f_overwrite
main(function, f_image, f_all, f_overwrite)
#idascript_exit()
| functions = list(Functions(SegStart(ea), SegEnd(ea)))
functions.append(SegEnd(ea))
for i in range(len(functions) - 1):
function_start = functions[i]
function_end = functions[i+1]
cfg = get_cfg(function_start, function_end)
cfgs.append(cfg) | conditional_block |
bingrep_dump.py | # BinGrep, version 1.0.0
# Copyright 2017 Hiroki Hada
# coding:UTF-8
import sys, os, time, argparse
import re
import pprint
#import pydot
import math
import cPickle
import ged_node
from idautils import *
from idc import *
import idaapi
def idascript_exit(code=0):
idc.Exit(code)
def get_short_function_name(function):
return function.replace("?", "")[:100]
def mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def cPickle_dump(filename, data):
with open(filename, "wb") as f:
cPickle.dump(data, f)
def print_cfg(cfg):
for block in cfg:
print "[%02d]" % block.id,
print hex(block.startEA),
succs = list(block.succs())
print "(succs(%d): " % len(succs),
for i in range(len(succs)):
sys.stdout.write(hex(succs[i].startEA))
if i < len(succs) - 1:
sys.stdout.write(", ")
print ")"
def output_cfg_as_png_rec(g, block, memo):
functions1, dummy = get_marks(block, 0)
hashed_label1 = hash_label(functions1)
label1 = hex(block.startEA) + ("\n%08x" % hashed_label1)
g.add_node(pydot.Node(label1, fontcolor='#FFFFFF', color='#333399'))
for b in list(block.succs()):
functions2, dummy = get_marks(b, 0)
hashed_label2 = hash_label(functions2)
label2 = hex(b.startEA) + ("\n%08x" % hashed_label2)
if b.startEA not in memo:
memo.append(b.startEA)
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold'))
output_cfg_as_png_rec(g, b, memo)
else:
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold, dotted'))
def output_cfg_as_png(cfg, filename, overwrite_flag):
blocks_src = {}
blocks_dst = {}
block = cfg[0]
f_name = GetFunctionName(block.startEA)
if not overwrite_flag and os.path.exists(filename):
return
g = pydot.Dot(graph_type='digraph', bgcolor="#F0E0FF")
size = "21"
g.set_rankdir('TB')
g.set_size(size)
g.add_node(pydot.Node('node', shape='ellipse', margin='0.05', fontcolor='#FFFFFF', fontsize=size, color='#333399', style='filled', fontname='Consolas Bold'))
g.add_node(pydot.Node('edge', color='lightgrey'))
memo = []
output_cfg_as_png_rec(g, block, memo)
g.write_png(filename)
def get_cfg(function_start, function_end):
f_name = GetFunctionName(function_start)
cfg = idaapi.FlowChart(idaapi.get_func(function_start))
return list(cfg)
def get_cfgs():
cfgs = []
for ea in Segments():
functions = list(Functions(SegStart(ea), SegEnd(ea)))
functions.append(SegEnd(ea))
for i in range(len(functions) - 1):
function_start = functions[i]
function_end = functions[i+1]
cfg = get_cfg(function_start, function_end)
cfgs.append(cfg)
return cfgs
def hash_label(marks):
tmp = sorted(set(marks))
tmp = "".join(tmp)
tmp = tmp.upper()
def rot13(string):
return reduce(lambda h,c: ((h>>13 | h<<19)+ord(c)) & 0xFFFFFFFF, [0]+list(string))
hashed_label = rot13(tmp)
hashed_label = hashed_label & 0xFFFFFFFF
return hashed_label
def get_marks(block, gamma):
marks = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
if mnem not in ["call"]:
for buf in (opnd[1], opnd[2]):
if buf:
match = re.search("([\dA-F]+)h", buf)
if match:
magic = int(match.group(1), 16)
if 0x00001000 <= magic <= 0xffffffff:
marks.append(hex(magic))
for buf in (opnd[0], opnd[1], opnd[2]):
if buf:
match = re.search("offset (a[\S]+)", buf)
if match:
offset_a = match.group(1)
if offset_a[:4] == "asc_": continue
marks.append(offset_a)
continue
else:
gamma += 1
if opnd[0][:4] == "sub_": continue
if opnd[0][0] in ["?", "$"]: continue
if opnd[0] in ["eax", "ebx", "ecx", "edx", "esi", "edi"]: continue
if opnd[0] in ["__SEH_prolog4", "__SEH_epilog4", "__EH_prolog3_catch"]: continue
if opnd[0].find("cookie") >= 0: continue
marks.append(opnd[0])
continue
return marks, gamma
def get_mnems(block):
mnems = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
buf = " "
for o in opnd:
if not o: break
elif o in ["eax", "ebx", "ecx", "edx", "ax", "bx", "cx", "dx", "al", "bl", "cl", "dl", "ah", "bh", "ch", "dh", "esi", "edi", "si", "di", "esp", "ebp"]:
buf += "reg "
elif o[:3] == "xmm": buf += "reg "
elif o.find("[") >= 0: buf += "mem "
elif o[:6] == "offset": buf += "off "
elif o[:4] == "loc_": buf += "loc "
elif o[:4] == "sub_": buf += "sub "
elif o.isdigit(): buf += "num "
elif re.match("[\da-fA-F]+h", o): buf += "num "
elif o[:6] == "dword_": buf += "dwd "
else: buf += "lbl "
mnems.append(mnem + buf)
return mnems
def cfg_to_cft_rec(block, memo, abr):
(alpha, beta, gamma) = abr
alpha += 1
marks, gamma = get_marks(block, gamma)
hashed_label = hash_label(marks)
mnems = get_mnems(block)
tree = ged_node.Node(hashed_label)
for b in list(block.succs()):
beta += 1
if b.startEA not in memo:
memo.append(b.startEA)
tmp, (alpha, beta, gamma), tmp2 = cfg_to_cft_rec(b, memo, (alpha, beta, gamma))
tree = tree.addkid(tmp)
mnems += tmp2
return tree, (alpha, beta, gamma), mnems
def cfg_to_cft(cfg):
block = cfg[0]
memo = []
memo.append(block.startEA)
return cfg_to_cft_rec(block, memo, (0, 0, 0))
def | (cfgs, program, function, f_image, f_all, f_overwrite):
function_num = len(cfgs)
dump_data_list = {}
for cfg in cfgs:
function_name = GetFunctionName(cfg[0].startEA)
(cft, abr, mnems) = cfg_to_cft(cfg)
dump_data_list[function_name] = {}
dump_data_list[function_name]["FUNCTION_NAME"] = function_name
dump_data_list[function_name]["CFT"] = cft
dump_data_list[function_name]["ABR"] = abr
dump_data_list[function_name]["MNEMS"] = mnems
def dump_pickle(dump_data_list, program, function, f_overwrite):
function_name_short = get_short_function_name(function)
filename_pickle = os.path.join(function_name_short + ".pickle")
if f_overwrite or not os.path.exists(filename_pickle):
cPickle_dump(filename_pickle, dump_data_list[function])
cPickle_dump(program + ".dmp", dump_data_list)
def main(function, f_image, f_all, f_overwrite):
sys.setrecursionlimit(3000)
program = idaapi.get_root_filename()
start_time = time.time()
cfgs = get_cfgs()
dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite)
result_time = time.time() - start_time
print "Dump finished."
print "result_time: " + str(result_time) + " sec."
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="")
parser.add_argument('-f', dest='function', default=None, type=str, help='')
parser.add_argument('-a', dest='f_all', default=False, action='store_true', help='')
parser.add_argument('-i', dest='f_image', default=False, action='store_true', help='Image Flag (Output as PNG)')
parser.add_argument('-o', dest='f_overwrite', default=False, action='store_true', help='Overwrite file')
args = parser.parse_args()
function = args.function
f_image = args.f_image
f_all = args.f_all
f_overwrite = args.f_overwrite
main(function, f_image, f_all, f_overwrite)
#idascript_exit()
| dump_function_info | identifier_name |
series_rolling_median.py | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def | ():
series = pd.Series([4, 3, 5, 2, 6]) # Series of 4, 3, 5, 2, 6
out_series = series.rolling(3).median()
return out_series # Expect series of NaN, NaN, 4.0, 3.0, 5.0
print(series_rolling_median())
| series_rolling_median | identifier_name |
series_rolling_median.py | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_rolling_median():
series = pd.Series([4, 3, 5, 2, 6]) # Series of 4, 3, 5, 2, 6 | return out_series # Expect series of NaN, NaN, 4.0, 3.0, 5.0
print(series_rolling_median()) | out_series = series.rolling(3).median()
| random_line_split |
series_rolling_median.py | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_rolling_median():
|
print(series_rolling_median())
| series = pd.Series([4, 3, 5, 2, 6]) # Series of 4, 3, 5, 2, 6
out_series = series.rolling(3).median()
return out_series # Expect series of NaN, NaN, 4.0, 3.0, 5.0 | identifier_body |
Core.UI.Datepicker.js | // --
// Copyright (C) 2001-2016 OTRS AG, http://otrs.com/
// --
// This software comes with ABSOLUTELY NO WARRANTY. For details, see
// the enclosed file COPYING for license information (AGPL). If you
// did not receive this file, see http://www.gnu.org/licenses/agpl.txt.
// --
"use strict";
var Core = Core || {};
Core.UI = Core.UI || {};
/**
* @namespace Core.UI.Datepicker
* @memberof Core.UI
* @author OTRS AG
* @description
* This namespace contains the datepicker functions.
*/
Core.UI.Datepicker = (function (TargetNS) {
/**
* @private
* @name VacationDays
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* Vacation days, defined in SysConfig.
*/
var VacationDays,
/**
* @private
* @name VacationDaysOneTime
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* One time vacations, defined in SysConfig.
*/
VacationDaysOneTime,
/**
* @private
* @name LocalizationData
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* Translations.
*/
LocalizationData,
/**
* @private
* @name DatepickerCount
* @memberof Core.UI.Datepicker
* @member {Number}
* @description
* Number of initialized datepicker.
*/
DatepickerCount = 0;
if (!Core.Debug.CheckDependency('Core.UI.Datepicker', '$([]).datepicker', 'jQuery UI datepicker')) {
return false;
}
/**
* @private
* @name CheckDate
* @memberof Core.UI.Datepicker
* @function
* @returns {Array} First element is always true, second element contains the name of a CSS class, third element a description for the date.
* @param {DateObject} DateObject - A JS date object to check.
* @description
* Check if date is on of the defined vacation days.
*/
function CheckDate(DateObject) {
var DayDescription = '',
DayClass = '';
// Get defined days from Config, if not done already
if (typeof VacationDays === 'undefined') {
VacationDays = Core.Config.Get('Datepicker.VacationDays').TimeVacationDays;
}
if (typeof VacationDaysOneTime === 'undefined') {
VacationDaysOneTime = Core.Config.Get('Datepicker.VacationDays').TimeVacationDaysOneTime;
}
// Check if date is one of the vacation days
if (typeof VacationDays[DateObject.getMonth() + 1] !== 'undefined' &&
typeof VacationDays[DateObject.getMonth() + 1][DateObject.getDate()] !== 'undefined') {
DayDescription += VacationDays[DateObject.getMonth() + 1][DateObject.getDate()];
DayClass = 'Highlight ';
}
// Check if date is one of the one time vacation days
if (typeof VacationDaysOneTime[DateObject.getFullYear()] !== 'undefined' &&
typeof VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1] !== 'undefined' &&
typeof VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1][DateObject.getDate()] !== 'undefined') {
DayDescription += VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1][DateObject.getDate()];
DayClass = 'Highlight ';
}
if (DayClass.length) {
return [true, DayClass, DayDescription];
}
else {
return [true, ''];
}
}
/**
* @name Init
* @memberof Core.UI.Datepicker
* @function
* @returns {Boolean} false, if Parameter Element is not of the correct type.
* @param {jQueryObject|Object} Element - The jQuery object of a text input field which should get a datepicker.
* Or a hash with the Keys 'Year', 'Month' and 'Day' and as values the jQueryObjects of the select drop downs.
* @description
* This function initializes the datepicker on the defined elements.
*/
TargetNS.Init = function (Element) {
var $DatepickerElement,
HasDateSelectBoxes = false,
Options,
ErrorMessage;
if (typeof Element.VacationDays === 'object') {
Core.Config.Set('Datepicker.VacationDays', Element.VacationDays);
}
/**
* @private
* @name LeadingZero
* @memberof Core.UI.Datepicker.Init
* @function
* @returns {String} A number with leading zero, if needed.
* @param {Number} Number - A number to convert.
* @description
* Converts a one digit number to a string with leading zero.
*/
function LeadingZero(Number) |
if (typeof LocalizationData === 'undefined') {
LocalizationData = Core.Config.Get('Datepicker.Localization');
if (typeof LocalizationData === 'undefined') {
throw new Core.Exception.ApplicationError('Datepicker localization data could not be found!', 'InternalError');
}
}
// Increment number of initialized datepickers on this site
DatepickerCount++;
// Check, if datepicker is used with three input element or with three select boxes
if (typeof Element === 'object' &&
typeof Element.Day !== 'undefined' &&
typeof Element.Month !== 'undefined' &&
typeof Element.Year !== 'undefined' &&
isJQueryObject(Element.Day, Element.Month, Element.Year) &&
// Sometimes it can happen that BuildDateSelection was called without placing the full date selection.
// Ignore in this case.
Element.Day.length
) {
$DatepickerElement = $('<input>').attr('type', 'hidden').attr('id', 'Datepicker' + DatepickerCount);
Element.Year.after($DatepickerElement);
if (Element.Day.is('select') && Element.Month.is('select') && Element.Year.is('select')) {
HasDateSelectBoxes = true;
}
}
else {
return false;
}
// Define options hash
Options = {
beforeShowDay: function (DateObject) {
return CheckDate(DateObject);
},
showOn: 'focus',
prevText: LocalizationData.PrevText,
nextText: LocalizationData.NextText,
firstDay: Element.WeekDayStart,
showMonthAfterYear: 0,
monthNames: LocalizationData.MonthNames,
monthNamesShort: LocalizationData.MonthNamesShort,
dayNames: LocalizationData.DayNames,
dayNamesShort: LocalizationData.DayNamesShort,
dayNamesMin: LocalizationData.DayNamesMin,
isRTL: LocalizationData.IsRTL
};
Options.onSelect = function (DateText, Instance) {
var Year = Instance.selectedYear,
Month = Instance.selectedMonth + 1,
Day = Instance.selectedDay;
// Update the three select boxes
if (HasDateSelectBoxes) {
Element.Year.find('option[value=' + Year + ']').prop('selected', true);
Element.Month.find('option[value=' + Month + ']').prop('selected', true);
Element.Day.find('option[value=' + Day + ']').prop('selected', true);
}
else {
Element.Year.val(Year);
Element.Month.val(LeadingZero(Month));
Element.Day.val(LeadingZero(Day));
}
};
Options.beforeShow = function (Input) {
$(Input).val('');
return {
defaultDate: new Date(Element.Year.val(), Element.Month.val() - 1, Element.Day.val())
};
};
$DatepickerElement.datepicker(Options);
// Add some DOM notes to the datepicker, but only if it was not initialized previously.
// Check if one additional DOM node is already present.
if (!$('#' + Core.App.EscapeSelector(Element.Day.attr('id')) + 'DatepickerIcon').length) {
// add datepicker icon and click event
$DatepickerElement.after('<a href="#" class="DatepickerIcon" id="' + Element.Day.attr('id') + 'DatepickerIcon" title="' + LocalizationData.IconText + '"><i class="fa fa-calendar"></i></a>');
if (Element.DateInFuture) {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessageDateInFuture');
}
else if (Element.DateNotInFuture) {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessageDateNotInFuture');
}
else {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessage');
}
// Add validation error messages for all dateselection elements
Element.Year
.after('<div id="' + Element.Day.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Month.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Year.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>');
// only insert time element error messages if time elements are present
if (Element.Hour && Element.Hour.length) {
Element.Hour
.after('<div id="' + Element.Hour.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Minute.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>');
}
}
$('#' + Core.App.EscapeSelector(Element.Day.attr('id')) + 'DatepickerIcon').unbind('click.Datepicker').bind('click.Datepicker', function () {
$DatepickerElement.datepicker('show');
return false;
});
// do not show the datepicker container div.
$('#ui-datepicker-div').hide();
};
return TargetNS;
}(Core.UI.Datepicker || {}));
| {
if (Number.toString().length === 1) {
return '0' + Number;
}
else {
return Number;
}
} | identifier_body |
Core.UI.Datepicker.js | // --
// Copyright (C) 2001-2016 OTRS AG, http://otrs.com/
// --
// This software comes with ABSOLUTELY NO WARRANTY. For details, see
// the enclosed file COPYING for license information (AGPL). If you
// did not receive this file, see http://www.gnu.org/licenses/agpl.txt.
// --
"use strict";
var Core = Core || {};
Core.UI = Core.UI || {};
/**
* @namespace Core.UI.Datepicker
* @memberof Core.UI
* @author OTRS AG
* @description
* This namespace contains the datepicker functions.
*/
Core.UI.Datepicker = (function (TargetNS) {
/**
* @private
* @name VacationDays
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* Vacation days, defined in SysConfig.
*/
var VacationDays,
/**
* @private
* @name VacationDaysOneTime
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* One time vacations, defined in SysConfig.
*/
VacationDaysOneTime,
/**
* @private
* @name LocalizationData
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* Translations.
*/
LocalizationData,
/**
* @private
* @name DatepickerCount
* @memberof Core.UI.Datepicker
* @member {Number}
* @description
* Number of initialized datepicker.
*/
DatepickerCount = 0;
if (!Core.Debug.CheckDependency('Core.UI.Datepicker', '$([]).datepicker', 'jQuery UI datepicker')) {
return false;
}
/**
* @private
* @name CheckDate
* @memberof Core.UI.Datepicker
* @function
* @returns {Array} First element is always true, second element contains the name of a CSS class, third element a description for the date.
* @param {DateObject} DateObject - A JS date object to check.
* @description
* Check if date is on of the defined vacation days.
*/
function CheckDate(DateObject) {
var DayDescription = '',
DayClass = '';
// Get defined days from Config, if not done already
if (typeof VacationDays === 'undefined') {
VacationDays = Core.Config.Get('Datepicker.VacationDays').TimeVacationDays;
}
if (typeof VacationDaysOneTime === 'undefined') {
VacationDaysOneTime = Core.Config.Get('Datepicker.VacationDays').TimeVacationDaysOneTime;
}
// Check if date is one of the vacation days
if (typeof VacationDays[DateObject.getMonth() + 1] !== 'undefined' &&
typeof VacationDays[DateObject.getMonth() + 1][DateObject.getDate()] !== 'undefined') {
DayDescription += VacationDays[DateObject.getMonth() + 1][DateObject.getDate()];
DayClass = 'Highlight ';
}
// Check if date is one of the one time vacation days
if (typeof VacationDaysOneTime[DateObject.getFullYear()] !== 'undefined' &&
typeof VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1] !== 'undefined' &&
typeof VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1][DateObject.getDate()] !== 'undefined') {
DayDescription += VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1][DateObject.getDate()];
DayClass = 'Highlight ';
}
if (DayClass.length) {
return [true, DayClass, DayDescription];
}
else {
return [true, ''];
}
}
/**
* @name Init
* @memberof Core.UI.Datepicker
* @function
* @returns {Boolean} false, if Parameter Element is not of the correct type.
* @param {jQueryObject|Object} Element - The jQuery object of a text input field which should get a datepicker.
* Or a hash with the Keys 'Year', 'Month' and 'Day' and as values the jQueryObjects of the select drop downs.
* @description
* This function initializes the datepicker on the defined elements.
*/ |
var $DatepickerElement,
HasDateSelectBoxes = false,
Options,
ErrorMessage;
if (typeof Element.VacationDays === 'object') {
Core.Config.Set('Datepicker.VacationDays', Element.VacationDays);
}
/**
* @private
* @name LeadingZero
* @memberof Core.UI.Datepicker.Init
* @function
* @returns {String} A number with leading zero, if needed.
* @param {Number} Number - A number to convert.
* @description
* Converts a one digit number to a string with leading zero.
*/
function LeadingZero(Number) {
if (Number.toString().length === 1) {
return '0' + Number;
}
else {
return Number;
}
}
if (typeof LocalizationData === 'undefined') {
LocalizationData = Core.Config.Get('Datepicker.Localization');
if (typeof LocalizationData === 'undefined') {
throw new Core.Exception.ApplicationError('Datepicker localization data could not be found!', 'InternalError');
}
}
// Increment number of initialized datepickers on this site
DatepickerCount++;
// Check, if datepicker is used with three input element or with three select boxes
if (typeof Element === 'object' &&
typeof Element.Day !== 'undefined' &&
typeof Element.Month !== 'undefined' &&
typeof Element.Year !== 'undefined' &&
isJQueryObject(Element.Day, Element.Month, Element.Year) &&
// Sometimes it can happen that BuildDateSelection was called without placing the full date selection.
// Ignore in this case.
Element.Day.length
) {
$DatepickerElement = $('<input>').attr('type', 'hidden').attr('id', 'Datepicker' + DatepickerCount);
Element.Year.after($DatepickerElement);
if (Element.Day.is('select') && Element.Month.is('select') && Element.Year.is('select')) {
HasDateSelectBoxes = true;
}
}
else {
return false;
}
// Define options hash
Options = {
beforeShowDay: function (DateObject) {
return CheckDate(DateObject);
},
showOn: 'focus',
prevText: LocalizationData.PrevText,
nextText: LocalizationData.NextText,
firstDay: Element.WeekDayStart,
showMonthAfterYear: 0,
monthNames: LocalizationData.MonthNames,
monthNamesShort: LocalizationData.MonthNamesShort,
dayNames: LocalizationData.DayNames,
dayNamesShort: LocalizationData.DayNamesShort,
dayNamesMin: LocalizationData.DayNamesMin,
isRTL: LocalizationData.IsRTL
};
Options.onSelect = function (DateText, Instance) {
var Year = Instance.selectedYear,
Month = Instance.selectedMonth + 1,
Day = Instance.selectedDay;
// Update the three select boxes
if (HasDateSelectBoxes) {
Element.Year.find('option[value=' + Year + ']').prop('selected', true);
Element.Month.find('option[value=' + Month + ']').prop('selected', true);
Element.Day.find('option[value=' + Day + ']').prop('selected', true);
}
else {
Element.Year.val(Year);
Element.Month.val(LeadingZero(Month));
Element.Day.val(LeadingZero(Day));
}
};
Options.beforeShow = function (Input) {
$(Input).val('');
return {
defaultDate: new Date(Element.Year.val(), Element.Month.val() - 1, Element.Day.val())
};
};
$DatepickerElement.datepicker(Options);
// Add some DOM notes to the datepicker, but only if it was not initialized previously.
// Check if one additional DOM node is already present.
if (!$('#' + Core.App.EscapeSelector(Element.Day.attr('id')) + 'DatepickerIcon').length) {
// add datepicker icon and click event
$DatepickerElement.after('<a href="#" class="DatepickerIcon" id="' + Element.Day.attr('id') + 'DatepickerIcon" title="' + LocalizationData.IconText + '"><i class="fa fa-calendar"></i></a>');
if (Element.DateInFuture) {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessageDateInFuture');
}
else if (Element.DateNotInFuture) {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessageDateNotInFuture');
}
else {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessage');
}
// Add validation error messages for all dateselection elements
Element.Year
.after('<div id="' + Element.Day.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Month.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Year.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>');
// only insert time element error messages if time elements are present
if (Element.Hour && Element.Hour.length) {
Element.Hour
.after('<div id="' + Element.Hour.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Minute.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>');
}
}
$('#' + Core.App.EscapeSelector(Element.Day.attr('id')) + 'DatepickerIcon').unbind('click.Datepicker').bind('click.Datepicker', function () {
$DatepickerElement.datepicker('show');
return false;
});
// do not show the datepicker container div.
$('#ui-datepicker-div').hide();
};
return TargetNS;
}(Core.UI.Datepicker || {})); | TargetNS.Init = function (Element) { | random_line_split |
Core.UI.Datepicker.js | // --
// Copyright (C) 2001-2016 OTRS AG, http://otrs.com/
// --
// This software comes with ABSOLUTELY NO WARRANTY. For details, see
// the enclosed file COPYING for license information (AGPL). If you
// did not receive this file, see http://www.gnu.org/licenses/agpl.txt.
// --
"use strict";
var Core = Core || {};
Core.UI = Core.UI || {};
/**
* @namespace Core.UI.Datepicker
* @memberof Core.UI
* @author OTRS AG
* @description
* This namespace contains the datepicker functions.
*/
Core.UI.Datepicker = (function (TargetNS) {
/**
* @private
* @name VacationDays
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* Vacation days, defined in SysConfig.
*/
var VacationDays,
/**
* @private
* @name VacationDaysOneTime
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* One time vacations, defined in SysConfig.
*/
VacationDaysOneTime,
/**
* @private
* @name LocalizationData
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* Translations.
*/
LocalizationData,
/**
* @private
* @name DatepickerCount
* @memberof Core.UI.Datepicker
* @member {Number}
* @description
* Number of initialized datepicker.
*/
DatepickerCount = 0;
if (!Core.Debug.CheckDependency('Core.UI.Datepicker', '$([]).datepicker', 'jQuery UI datepicker')) {
return false;
}
/**
* @private
* @name CheckDate
* @memberof Core.UI.Datepicker
* @function
* @returns {Array} First element is always true, second element contains the name of a CSS class, third element a description for the date.
* @param {DateObject} DateObject - A JS date object to check.
* @description
* Check if date is on of the defined vacation days.
*/
function CheckDate(DateObject) {
var DayDescription = '',
DayClass = '';
// Get defined days from Config, if not done already
if (typeof VacationDays === 'undefined') {
VacationDays = Core.Config.Get('Datepicker.VacationDays').TimeVacationDays;
}
if (typeof VacationDaysOneTime === 'undefined') {
VacationDaysOneTime = Core.Config.Get('Datepicker.VacationDays').TimeVacationDaysOneTime;
}
// Check if date is one of the vacation days
if (typeof VacationDays[DateObject.getMonth() + 1] !== 'undefined' &&
typeof VacationDays[DateObject.getMonth() + 1][DateObject.getDate()] !== 'undefined') {
DayDescription += VacationDays[DateObject.getMonth() + 1][DateObject.getDate()];
DayClass = 'Highlight ';
}
// Check if date is one of the one time vacation days
if (typeof VacationDaysOneTime[DateObject.getFullYear()] !== 'undefined' &&
typeof VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1] !== 'undefined' &&
typeof VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1][DateObject.getDate()] !== 'undefined') {
DayDescription += VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1][DateObject.getDate()];
DayClass = 'Highlight ';
}
if (DayClass.length) {
return [true, DayClass, DayDescription];
}
else {
return [true, ''];
}
}
/**
* @name Init
* @memberof Core.UI.Datepicker
* @function
* @returns {Boolean} false, if Parameter Element is not of the correct type.
* @param {jQueryObject|Object} Element - The jQuery object of a text input field which should get a datepicker.
* Or a hash with the Keys 'Year', 'Month' and 'Day' and as values the jQueryObjects of the select drop downs.
* @description
* This function initializes the datepicker on the defined elements.
*/
TargetNS.Init = function (Element) {
var $DatepickerElement,
HasDateSelectBoxes = false,
Options,
ErrorMessage;
if (typeof Element.VacationDays === 'object') {
Core.Config.Set('Datepicker.VacationDays', Element.VacationDays);
}
/**
* @private
* @name LeadingZero
* @memberof Core.UI.Datepicker.Init
* @function
* @returns {String} A number with leading zero, if needed.
* @param {Number} Number - A number to convert.
* @description
* Converts a one digit number to a string with leading zero.
*/
function LeadingZero(Number) {
if (Number.toString().length === 1) {
return '0' + Number;
}
else {
return Number;
}
}
if (typeof LocalizationData === 'undefined') {
LocalizationData = Core.Config.Get('Datepicker.Localization');
if (typeof LocalizationData === 'undefined') {
throw new Core.Exception.ApplicationError('Datepicker localization data could not be found!', 'InternalError');
}
}
// Increment number of initialized datepickers on this site
DatepickerCount++;
// Check, if datepicker is used with three input element or with three select boxes
if (typeof Element === 'object' &&
typeof Element.Day !== 'undefined' &&
typeof Element.Month !== 'undefined' &&
typeof Element.Year !== 'undefined' &&
isJQueryObject(Element.Day, Element.Month, Element.Year) &&
// Sometimes it can happen that BuildDateSelection was called without placing the full date selection.
// Ignore in this case.
Element.Day.length
) {
$DatepickerElement = $('<input>').attr('type', 'hidden').attr('id', 'Datepicker' + DatepickerCount);
Element.Year.after($DatepickerElement);
if (Element.Day.is('select') && Element.Month.is('select') && Element.Year.is('select')) {
HasDateSelectBoxes = true;
}
}
else {
return false;
}
// Define options hash
Options = {
beforeShowDay: function (DateObject) {
return CheckDate(DateObject);
},
showOn: 'focus',
prevText: LocalizationData.PrevText,
nextText: LocalizationData.NextText,
firstDay: Element.WeekDayStart,
showMonthAfterYear: 0,
monthNames: LocalizationData.MonthNames,
monthNamesShort: LocalizationData.MonthNamesShort,
dayNames: LocalizationData.DayNames,
dayNamesShort: LocalizationData.DayNamesShort,
dayNamesMin: LocalizationData.DayNamesMin,
isRTL: LocalizationData.IsRTL
};
Options.onSelect = function (DateText, Instance) {
var Year = Instance.selectedYear,
Month = Instance.selectedMonth + 1,
Day = Instance.selectedDay;
// Update the three select boxes
if (HasDateSelectBoxes) {
Element.Year.find('option[value=' + Year + ']').prop('selected', true);
Element.Month.find('option[value=' + Month + ']').prop('selected', true);
Element.Day.find('option[value=' + Day + ']').prop('selected', true);
}
else |
};
Options.beforeShow = function (Input) {
$(Input).val('');
return {
defaultDate: new Date(Element.Year.val(), Element.Month.val() - 1, Element.Day.val())
};
};
$DatepickerElement.datepicker(Options);
// Add some DOM notes to the datepicker, but only if it was not initialized previously.
// Check if one additional DOM node is already present.
if (!$('#' + Core.App.EscapeSelector(Element.Day.attr('id')) + 'DatepickerIcon').length) {
// add datepicker icon and click event
$DatepickerElement.after('<a href="#" class="DatepickerIcon" id="' + Element.Day.attr('id') + 'DatepickerIcon" title="' + LocalizationData.IconText + '"><i class="fa fa-calendar"></i></a>');
if (Element.DateInFuture) {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessageDateInFuture');
}
else if (Element.DateNotInFuture) {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessageDateNotInFuture');
}
else {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessage');
}
// Add validation error messages for all dateselection elements
Element.Year
.after('<div id="' + Element.Day.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Month.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Year.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>');
// only insert time element error messages if time elements are present
if (Element.Hour && Element.Hour.length) {
Element.Hour
.after('<div id="' + Element.Hour.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Minute.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>');
}
}
$('#' + Core.App.EscapeSelector(Element.Day.attr('id')) + 'DatepickerIcon').unbind('click.Datepicker').bind('click.Datepicker', function () {
$DatepickerElement.datepicker('show');
return false;
});
// do not show the datepicker container div.
$('#ui-datepicker-div').hide();
};
return TargetNS;
}(Core.UI.Datepicker || {}));
| {
Element.Year.val(Year);
Element.Month.val(LeadingZero(Month));
Element.Day.val(LeadingZero(Day));
} | conditional_block |
Core.UI.Datepicker.js | // --
// Copyright (C) 2001-2016 OTRS AG, http://otrs.com/
// --
// This software comes with ABSOLUTELY NO WARRANTY. For details, see
// the enclosed file COPYING for license information (AGPL). If you
// did not receive this file, see http://www.gnu.org/licenses/agpl.txt.
// --
"use strict";
var Core = Core || {};
Core.UI = Core.UI || {};
/**
* @namespace Core.UI.Datepicker
* @memberof Core.UI
* @author OTRS AG
* @description
* This namespace contains the datepicker functions.
*/
Core.UI.Datepicker = (function (TargetNS) {
/**
* @private
* @name VacationDays
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* Vacation days, defined in SysConfig.
*/
var VacationDays,
/**
* @private
* @name VacationDaysOneTime
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* One time vacations, defined in SysConfig.
*/
VacationDaysOneTime,
/**
* @private
* @name LocalizationData
* @memberof Core.UI.Datepicker
* @member {Object}
* @description
* Translations.
*/
LocalizationData,
/**
* @private
* @name DatepickerCount
* @memberof Core.UI.Datepicker
* @member {Number}
* @description
* Number of initialized datepicker.
*/
DatepickerCount = 0;
if (!Core.Debug.CheckDependency('Core.UI.Datepicker', '$([]).datepicker', 'jQuery UI datepicker')) {
return false;
}
/**
* @private
* @name CheckDate
* @memberof Core.UI.Datepicker
* @function
* @returns {Array} First element is always true, second element contains the name of a CSS class, third element a description for the date.
* @param {DateObject} DateObject - A JS date object to check.
* @description
* Check if date is on of the defined vacation days.
*/
function CheckDate(DateObject) {
var DayDescription = '',
DayClass = '';
// Get defined days from Config, if not done already
if (typeof VacationDays === 'undefined') {
VacationDays = Core.Config.Get('Datepicker.VacationDays').TimeVacationDays;
}
if (typeof VacationDaysOneTime === 'undefined') {
VacationDaysOneTime = Core.Config.Get('Datepicker.VacationDays').TimeVacationDaysOneTime;
}
// Check if date is one of the vacation days
if (typeof VacationDays[DateObject.getMonth() + 1] !== 'undefined' &&
typeof VacationDays[DateObject.getMonth() + 1][DateObject.getDate()] !== 'undefined') {
DayDescription += VacationDays[DateObject.getMonth() + 1][DateObject.getDate()];
DayClass = 'Highlight ';
}
// Check if date is one of the one time vacation days
if (typeof VacationDaysOneTime[DateObject.getFullYear()] !== 'undefined' &&
typeof VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1] !== 'undefined' &&
typeof VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1][DateObject.getDate()] !== 'undefined') {
DayDescription += VacationDaysOneTime[DateObject.getFullYear()][DateObject.getMonth() + 1][DateObject.getDate()];
DayClass = 'Highlight ';
}
if (DayClass.length) {
return [true, DayClass, DayDescription];
}
else {
return [true, ''];
}
}
/**
* @name Init
* @memberof Core.UI.Datepicker
* @function
* @returns {Boolean} false, if Parameter Element is not of the correct type.
* @param {jQueryObject|Object} Element - The jQuery object of a text input field which should get a datepicker.
* Or a hash with the Keys 'Year', 'Month' and 'Day' and as values the jQueryObjects of the select drop downs.
* @description
* This function initializes the datepicker on the defined elements.
*/
TargetNS.Init = function (Element) {
var $DatepickerElement,
HasDateSelectBoxes = false,
Options,
ErrorMessage;
if (typeof Element.VacationDays === 'object') {
Core.Config.Set('Datepicker.VacationDays', Element.VacationDays);
}
/**
* @private
* @name LeadingZero
* @memberof Core.UI.Datepicker.Init
* @function
* @returns {String} A number with leading zero, if needed.
* @param {Number} Number - A number to convert.
* @description
* Converts a one digit number to a string with leading zero.
*/
function | (Number) {
if (Number.toString().length === 1) {
return '0' + Number;
}
else {
return Number;
}
}
if (typeof LocalizationData === 'undefined') {
LocalizationData = Core.Config.Get('Datepicker.Localization');
if (typeof LocalizationData === 'undefined') {
throw new Core.Exception.ApplicationError('Datepicker localization data could not be found!', 'InternalError');
}
}
// Increment number of initialized datepickers on this site
DatepickerCount++;
// Check, if datepicker is used with three input element or with three select boxes
if (typeof Element === 'object' &&
typeof Element.Day !== 'undefined' &&
typeof Element.Month !== 'undefined' &&
typeof Element.Year !== 'undefined' &&
isJQueryObject(Element.Day, Element.Month, Element.Year) &&
// Sometimes it can happen that BuildDateSelection was called without placing the full date selection.
// Ignore in this case.
Element.Day.length
) {
$DatepickerElement = $('<input>').attr('type', 'hidden').attr('id', 'Datepicker' + DatepickerCount);
Element.Year.after($DatepickerElement);
if (Element.Day.is('select') && Element.Month.is('select') && Element.Year.is('select')) {
HasDateSelectBoxes = true;
}
}
else {
return false;
}
// Define options hash
Options = {
beforeShowDay: function (DateObject) {
return CheckDate(DateObject);
},
showOn: 'focus',
prevText: LocalizationData.PrevText,
nextText: LocalizationData.NextText,
firstDay: Element.WeekDayStart,
showMonthAfterYear: 0,
monthNames: LocalizationData.MonthNames,
monthNamesShort: LocalizationData.MonthNamesShort,
dayNames: LocalizationData.DayNames,
dayNamesShort: LocalizationData.DayNamesShort,
dayNamesMin: LocalizationData.DayNamesMin,
isRTL: LocalizationData.IsRTL
};
Options.onSelect = function (DateText, Instance) {
var Year = Instance.selectedYear,
Month = Instance.selectedMonth + 1,
Day = Instance.selectedDay;
// Update the three select boxes
if (HasDateSelectBoxes) {
Element.Year.find('option[value=' + Year + ']').prop('selected', true);
Element.Month.find('option[value=' + Month + ']').prop('selected', true);
Element.Day.find('option[value=' + Day + ']').prop('selected', true);
}
else {
Element.Year.val(Year);
Element.Month.val(LeadingZero(Month));
Element.Day.val(LeadingZero(Day));
}
};
Options.beforeShow = function (Input) {
$(Input).val('');
return {
defaultDate: new Date(Element.Year.val(), Element.Month.val() - 1, Element.Day.val())
};
};
$DatepickerElement.datepicker(Options);
// Add some DOM notes to the datepicker, but only if it was not initialized previously.
// Check if one additional DOM node is already present.
if (!$('#' + Core.App.EscapeSelector(Element.Day.attr('id')) + 'DatepickerIcon').length) {
// add datepicker icon and click event
$DatepickerElement.after('<a href="#" class="DatepickerIcon" id="' + Element.Day.attr('id') + 'DatepickerIcon" title="' + LocalizationData.IconText + '"><i class="fa fa-calendar"></i></a>');
if (Element.DateInFuture) {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessageDateInFuture');
}
else if (Element.DateNotInFuture) {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessageDateNotInFuture');
}
else {
ErrorMessage = Core.Config.Get('Datepicker.ErrorMessage');
}
// Add validation error messages for all dateselection elements
Element.Year
.after('<div id="' + Element.Day.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Month.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Year.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>');
// only insert time element error messages if time elements are present
if (Element.Hour && Element.Hour.length) {
Element.Hour
.after('<div id="' + Element.Hour.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>')
.after('<div id="' + Element.Minute.attr('id') + 'Error" class="TooltipErrorMessage"><p>' + ErrorMessage + '</p></div>');
}
}
$('#' + Core.App.EscapeSelector(Element.Day.attr('id')) + 'DatepickerIcon').unbind('click.Datepicker').bind('click.Datepicker', function () {
$DatepickerElement.datepicker('show');
return false;
});
// do not show the datepicker container div.
$('#ui-datepicker-div').hide();
};
return TargetNS;
}(Core.UI.Datepicker || {}));
| LeadingZero | identifier_name |
_schema.py | #!/usr/bin/python3
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import jsonschema
import yaml
from snapcraft.internal import common
class SnapcraftSchemaError(Exception):
@property
def message(self):
return self._message
def __init__(self, message):
self._message = message
class Validator:
def __init__(self, snapcraft_yaml=None):
"""Create a validation instance for snapcraft_yaml."""
self._snapcraft = snapcraft_yaml if snapcraft_yaml else {}
self._load_schema()
@property
def schema(self):
"""Return all schema properties."""
return self._schema['properties'].copy()
@property
def part_schema(self):
"""Return part-specific schema properties."""
sub = self.schema['parts']['patternProperties']
properties = sub['^(?!plugins$)[a-z0-9][a-z0-9+-\/]*$']['properties']
return properties
def _load_schema(self):
schema_file = os.path.abspath(os.path.join(
common.get_schemadir(), 'snapcraft.yaml'))
try:
with open(schema_file) as fp:
self._schema = yaml.load(fp)
except FileNotFoundError:
raise SnapcraftSchemaError(
'snapcraft validation file is missing from installation path')
def validate(self):
format_check = jsonschema.FormatChecker()
try:
jsonschema.validate(
self._snapcraft, self._schema, format_checker=format_check)
except jsonschema.ValidationError as e:
messages = [e.message]
path = []
while e.absolute_path:
element = e.absolute_path.popleft()
# assume numbers are indices and use 'xxx[123]' notation.
if isinstance(element, int):
path[-1] = '{}[{}]'.format(path[-1], element)
else:
|
if path:
messages.insert(0, "The '{}' property does not match the "
"required schema:".format('/'.join(path)))
if e.cause:
messages.append('({})'.format(e.cause))
raise SnapcraftSchemaError(' '.join(messages))
| path.append(str(element)) | conditional_block |
_schema.py | #!/usr/bin/python3
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import jsonschema
import yaml
from snapcraft.internal import common
class SnapcraftSchemaError(Exception):
@property
def message(self):
return self._message
def __init__(self, message):
self._message = message
class Validator:
def __init__(self, snapcraft_yaml=None):
"""Create a validation instance for snapcraft_yaml."""
self._snapcraft = snapcraft_yaml if snapcraft_yaml else {}
self._load_schema()
@property
def schema(self):
"""Return all schema properties."""
return self._schema['properties'].copy()
@property
def part_schema(self):
"""Return part-specific schema properties."""
sub = self.schema['parts']['patternProperties']
properties = sub['^(?!plugins$)[a-z0-9][a-z0-9+-\/]*$']['properties']
return properties
def _load_schema(self):
schema_file = os.path.abspath(os.path.join(
common.get_schemadir(), 'snapcraft.yaml'))
try:
with open(schema_file) as fp:
self._schema = yaml.load(fp)
except FileNotFoundError:
raise SnapcraftSchemaError(
'snapcraft validation file is missing from installation path')
def validate(self):
format_check = jsonschema.FormatChecker()
try:
jsonschema.validate(
self._snapcraft, self._schema, format_checker=format_check)
except jsonschema.ValidationError as e:
messages = [e.message]
path = []
while e.absolute_path:
element = e.absolute_path.popleft()
# assume numbers are indices and use 'xxx[123]' notation.
if isinstance(element, int):
path[-1] = '{}[{}]'.format(path[-1], element) | messages.insert(0, "The '{}' property does not match the "
"required schema:".format('/'.join(path)))
if e.cause:
messages.append('({})'.format(e.cause))
raise SnapcraftSchemaError(' '.join(messages)) | else:
path.append(str(element))
if path: | random_line_split |
_schema.py | #!/usr/bin/python3
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import jsonschema
import yaml
from snapcraft.internal import common
class SnapcraftSchemaError(Exception):
@property
def message(self):
return self._message
def __init__(self, message):
self._message = message
class Validator:
def __init__(self, snapcraft_yaml=None):
|
@property
def schema(self):
"""Return all schema properties."""
return self._schema['properties'].copy()
@property
def part_schema(self):
"""Return part-specific schema properties."""
sub = self.schema['parts']['patternProperties']
properties = sub['^(?!plugins$)[a-z0-9][a-z0-9+-\/]*$']['properties']
return properties
def _load_schema(self):
schema_file = os.path.abspath(os.path.join(
common.get_schemadir(), 'snapcraft.yaml'))
try:
with open(schema_file) as fp:
self._schema = yaml.load(fp)
except FileNotFoundError:
raise SnapcraftSchemaError(
'snapcraft validation file is missing from installation path')
def validate(self):
format_check = jsonschema.FormatChecker()
try:
jsonschema.validate(
self._snapcraft, self._schema, format_checker=format_check)
except jsonschema.ValidationError as e:
messages = [e.message]
path = []
while e.absolute_path:
element = e.absolute_path.popleft()
# assume numbers are indices and use 'xxx[123]' notation.
if isinstance(element, int):
path[-1] = '{}[{}]'.format(path[-1], element)
else:
path.append(str(element))
if path:
messages.insert(0, "The '{}' property does not match the "
"required schema:".format('/'.join(path)))
if e.cause:
messages.append('({})'.format(e.cause))
raise SnapcraftSchemaError(' '.join(messages))
| """Create a validation instance for snapcraft_yaml."""
self._snapcraft = snapcraft_yaml if snapcraft_yaml else {}
self._load_schema() | identifier_body |
_schema.py | #!/usr/bin/python3
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import jsonschema
import yaml
from snapcraft.internal import common
class SnapcraftSchemaError(Exception):
@property
def message(self):
return self._message
def | (self, message):
self._message = message
class Validator:
def __init__(self, snapcraft_yaml=None):
"""Create a validation instance for snapcraft_yaml."""
self._snapcraft = snapcraft_yaml if snapcraft_yaml else {}
self._load_schema()
@property
def schema(self):
"""Return all schema properties."""
return self._schema['properties'].copy()
@property
def part_schema(self):
"""Return part-specific schema properties."""
sub = self.schema['parts']['patternProperties']
properties = sub['^(?!plugins$)[a-z0-9][a-z0-9+-\/]*$']['properties']
return properties
def _load_schema(self):
schema_file = os.path.abspath(os.path.join(
common.get_schemadir(), 'snapcraft.yaml'))
try:
with open(schema_file) as fp:
self._schema = yaml.load(fp)
except FileNotFoundError:
raise SnapcraftSchemaError(
'snapcraft validation file is missing from installation path')
def validate(self):
format_check = jsonschema.FormatChecker()
try:
jsonschema.validate(
self._snapcraft, self._schema, format_checker=format_check)
except jsonschema.ValidationError as e:
messages = [e.message]
path = []
while e.absolute_path:
element = e.absolute_path.popleft()
# assume numbers are indices and use 'xxx[123]' notation.
if isinstance(element, int):
path[-1] = '{}[{}]'.format(path[-1], element)
else:
path.append(str(element))
if path:
messages.insert(0, "The '{}' property does not match the "
"required schema:".format('/'.join(path)))
if e.cause:
messages.append('({})'.format(e.cause))
raise SnapcraftSchemaError(' '.join(messages))
| __init__ | identifier_name |
usage-builder.js | 'use strict';
const { extend } = require('underscore');
const dbclient = require('abacus-dbclient');
const { testCollectedUsageID, testResourceID, testOrganizationID, testSpaceID, testConsumerID, testPlanID,
testResourceType, testAccountID, testMeteringPlanID, testRatingPlanID,
testPricingPlanID } = require('./fixtures/usageDocumentFieldsConstants');
const _commonBlueprint = {
collected_usage_id: testCollectedUsageID,
resource_id: testResourceID,
organization_id: testOrganizationID,
space_id: testSpaceID,
consumer_id: testConsumerID,
plan_id: testPlanID,
resource_type: testResourceType,
account_id: testAccountID,
metering_plan_id: testMeteringPlanID,
rating_plan_id: testRatingPlanID,
pricing_plan_id: testPricingPlanID | const buildUsage = (...builders) => {
const usage = {};
for(let builder of builders)
builder(usage);
return extend(usage, {
id: dbclient.kturi(usage.resource_instance_id, usage.processed)
});
};
const withEndTimestamp = (timestamp) => (usage) => usage.end = timestamp;
const withStartTimestamp = (timestamp) => (usage) => usage.start = timestamp;
const withProcessedTimestamp = (timestamp) => (usage) => usage.processed = timestamp;
const withBlueprint = (blueprint) => (usage) => extend(usage, blueprint);
const withDefaultBlueprint = () => (usage) => extend(usage, _commonBlueprint);
const withResourceInstanceId = (resourceInstanceId) => (usage) => usage.resource_instance_id = resourceInstanceId;
const withAccumulatedUsage = (accumulatedUsage) => (usage) => usage.accumulated_usage = accumulatedUsage;
const buildAccumulatedUsage = (...builders) => {
const accumulatedUsage = { windows: [[null], [null], [null], [null, null, null, null, null, null], [null, null]] };
for(let builder of builders)
builder(accumulatedUsage);
return accumulatedUsage;
};
const withMetricName = (metricName) => (accumulatedUsage) => accumulatedUsage.metric = metricName;
const withCurrentDayQuantity = (quantity) => (accumulatedUsage) =>
accumulatedUsage.windows[3][0] = { quantity: quantity };
const withPreviousDayQuantity = (quantity) => (accumulatedUsage) =>
accumulatedUsage.windows[3][1] = { quantity: quantity };
const withCurrentMonthQuantity = (quantity) => (accumulatedUsage) =>
accumulatedUsage.windows[4][0] = { quantity: quantity };
module.exports = {
buildUsage, withEndTimestamp, withStartTimestamp, withProcessedTimestamp, withBlueprint, withDefaultBlueprint,
withResourceInstanceId, withAccumulatedUsage, buildAccumulatedUsage, withMetricName, withCurrentDayQuantity,
withCurrentMonthQuantity, withPreviousDayQuantity
}; | };
| random_line_split |
index.js | /*global describe, it, assert, clone */
/*jshint browser: true */
describe('clone', function () {
'use strict';
it('should clone an object', function () {
var obj = {
cats: true,
dogs: 2,
fish: [ 0, 1, 2 ]
};
var c = clone(obj);
obj.cats = false;
obj.dogs = 1;
obj.fish[0] = 'stuff';
assert.strictEqual(c.cats, true);
assert.strictEqual(c.dogs, 2);
assert.deepEqual(c.fish, [0, 1, 2]);
});
it('should clone nested objects', function () {
var obj = {
cats: {
fred: 1,
billy: 2,
meow: true
},
dogs: {
spot: 1,
max: 2,
woof: [ 0, 1, 2 ]
},
fish: [ 0, 1, 2 ]
};
var c = clone(obj);
obj.cats.fred = 47;
obj.dogs = 47;
obj.fish[0] = 'stuff';
assert.deepEqual(c.cats, {
fred: 1,
billy: 2,
meow: true
});
assert.deepEqual(c.dogs, {
spot: 1,
max: 2,
woof: [ 0, 1, 2 ]
});
assert.deepEqual(c.fish, [ 0, 1, 2]);
});
it('should clone objects with methods', function () {
var obj = {
cats: function () { return 'meow'; },
dogs: function () { return 'woof'; }
};
var c = clone(obj);
assert.strictEqual(obj.cats, c.cats);
assert.strictEqual(obj.dogs, c.dogs);
obj.cats = function () {};
obj.dogs = function () {};
assert.notStrictEqual(obj.cats, c.cats);
assert.notStrictEqual(obj.dogs, c.dogs);
});
it('should clone prototypes', function () {
function Cat(name) {
this.name = name;
} | Cat.prototype.bark = function () {
return 'cats dont bark';
};
var cat = new Cat('Fred'),
c = clone(cat);
assert.deepEqual(cat.name, c.name);
assert.deepEqual(Cat.prototype.bark, c.bark);
assert.deepEqual(Cat.prototype.meow, c.meow);
});
}); |
Cat.prototype.meow = function () {
return 'meow';
};
| random_line_split |
index.js | /*global describe, it, assert, clone */
/*jshint browser: true */
describe('clone', function () {
'use strict';
it('should clone an object', function () {
var obj = {
cats: true,
dogs: 2,
fish: [ 0, 1, 2 ]
};
var c = clone(obj);
obj.cats = false;
obj.dogs = 1;
obj.fish[0] = 'stuff';
assert.strictEqual(c.cats, true);
assert.strictEqual(c.dogs, 2);
assert.deepEqual(c.fish, [0, 1, 2]);
});
it('should clone nested objects', function () {
var obj = {
cats: {
fred: 1,
billy: 2,
meow: true
},
dogs: {
spot: 1,
max: 2,
woof: [ 0, 1, 2 ]
},
fish: [ 0, 1, 2 ]
};
var c = clone(obj);
obj.cats.fred = 47;
obj.dogs = 47;
obj.fish[0] = 'stuff';
assert.deepEqual(c.cats, {
fred: 1,
billy: 2,
meow: true
});
assert.deepEqual(c.dogs, {
spot: 1,
max: 2,
woof: [ 0, 1, 2 ]
});
assert.deepEqual(c.fish, [ 0, 1, 2]);
});
it('should clone objects with methods', function () {
var obj = {
cats: function () { return 'meow'; },
dogs: function () { return 'woof'; }
};
var c = clone(obj);
assert.strictEqual(obj.cats, c.cats);
assert.strictEqual(obj.dogs, c.dogs);
obj.cats = function () {};
obj.dogs = function () {};
assert.notStrictEqual(obj.cats, c.cats);
assert.notStrictEqual(obj.dogs, c.dogs);
});
it('should clone prototypes', function () {
function Cat(name) |
Cat.prototype.meow = function () {
return 'meow';
};
Cat.prototype.bark = function () {
return 'cats dont bark';
};
var cat = new Cat('Fred'),
c = clone(cat);
assert.deepEqual(cat.name, c.name);
assert.deepEqual(Cat.prototype.bark, c.bark);
assert.deepEqual(Cat.prototype.meow, c.meow);
});
});
| {
this.name = name;
} | identifier_body |
index.js | /*global describe, it, assert, clone */
/*jshint browser: true */
describe('clone', function () {
'use strict';
it('should clone an object', function () {
var obj = {
cats: true,
dogs: 2,
fish: [ 0, 1, 2 ]
};
var c = clone(obj);
obj.cats = false;
obj.dogs = 1;
obj.fish[0] = 'stuff';
assert.strictEqual(c.cats, true);
assert.strictEqual(c.dogs, 2);
assert.deepEqual(c.fish, [0, 1, 2]);
});
it('should clone nested objects', function () {
var obj = {
cats: {
fred: 1,
billy: 2,
meow: true
},
dogs: {
spot: 1,
max: 2,
woof: [ 0, 1, 2 ]
},
fish: [ 0, 1, 2 ]
};
var c = clone(obj);
obj.cats.fred = 47;
obj.dogs = 47;
obj.fish[0] = 'stuff';
assert.deepEqual(c.cats, {
fred: 1,
billy: 2,
meow: true
});
assert.deepEqual(c.dogs, {
spot: 1,
max: 2,
woof: [ 0, 1, 2 ]
});
assert.deepEqual(c.fish, [ 0, 1, 2]);
});
it('should clone objects with methods', function () {
var obj = {
cats: function () { return 'meow'; },
dogs: function () { return 'woof'; }
};
var c = clone(obj);
assert.strictEqual(obj.cats, c.cats);
assert.strictEqual(obj.dogs, c.dogs);
obj.cats = function () {};
obj.dogs = function () {};
assert.notStrictEqual(obj.cats, c.cats);
assert.notStrictEqual(obj.dogs, c.dogs);
});
it('should clone prototypes', function () {
function | (name) {
this.name = name;
}
Cat.prototype.meow = function () {
return 'meow';
};
Cat.prototype.bark = function () {
return 'cats dont bark';
};
var cat = new Cat('Fred'),
c = clone(cat);
assert.deepEqual(cat.name, c.name);
assert.deepEqual(Cat.prototype.bark, c.bark);
assert.deepEqual(Cat.prototype.meow, c.meow);
});
});
| Cat | identifier_name |
how_sexy_is_your_name.py | SCORES = {'A': 100, 'B': 14, 'C': 9, 'D': 28, 'E': 145, 'F': 12, 'G': 3,
'H': 10, 'I': 200, 'J': 100, 'K': 114, 'L': 100, 'M': 25, | name_score = sum(SCORES.get(a, 0) for a in name.upper())
if name_score >= 600:
return 'THE ULTIMATE SEXIEST'
elif name_score >= 301:
return 'VERY SEXY'
elif name_score >= 60:
return 'PRETTY SEXY'
return 'NOT TOO SEXY' | 'N': 450, 'O': 80, 'P': 2, 'Q': 12, 'R': 400, 'S': 113, 'T': 405,
'U': 11, 'V': 10, 'W': 10, 'X': 3, 'Y': 210, 'Z': 23}
def sexy_name(name): | random_line_split |
how_sexy_is_your_name.py | SCORES = {'A': 100, 'B': 14, 'C': 9, 'D': 28, 'E': 145, 'F': 12, 'G': 3,
'H': 10, 'I': 200, 'J': 100, 'K': 114, 'L': 100, 'M': 25,
'N': 450, 'O': 80, 'P': 2, 'Q': 12, 'R': 400, 'S': 113, 'T': 405,
'U': 11, 'V': 10, 'W': 10, 'X': 3, 'Y': 210, 'Z': 23}
def | (name):
name_score = sum(SCORES.get(a, 0) for a in name.upper())
if name_score >= 600:
return 'THE ULTIMATE SEXIEST'
elif name_score >= 301:
return 'VERY SEXY'
elif name_score >= 60:
return 'PRETTY SEXY'
return 'NOT TOO SEXY'
| sexy_name | identifier_name |
how_sexy_is_your_name.py | SCORES = {'A': 100, 'B': 14, 'C': 9, 'D': 28, 'E': 145, 'F': 12, 'G': 3,
'H': 10, 'I': 200, 'J': 100, 'K': 114, 'L': 100, 'M': 25,
'N': 450, 'O': 80, 'P': 2, 'Q': 12, 'R': 400, 'S': 113, 'T': 405,
'U': 11, 'V': 10, 'W': 10, 'X': 3, 'Y': 210, 'Z': 23}
def sexy_name(name):
| name_score = sum(SCORES.get(a, 0) for a in name.upper())
if name_score >= 600:
return 'THE ULTIMATE SEXIEST'
elif name_score >= 301:
return 'VERY SEXY'
elif name_score >= 60:
return 'PRETTY SEXY'
return 'NOT TOO SEXY' | identifier_body |
|
how_sexy_is_your_name.py | SCORES = {'A': 100, 'B': 14, 'C': 9, 'D': 28, 'E': 145, 'F': 12, 'G': 3,
'H': 10, 'I': 200, 'J': 100, 'K': 114, 'L': 100, 'M': 25,
'N': 450, 'O': 80, 'P': 2, 'Q': 12, 'R': 400, 'S': 113, 'T': 405,
'U': 11, 'V': 10, 'W': 10, 'X': 3, 'Y': 210, 'Z': 23}
def sexy_name(name):
name_score = sum(SCORES.get(a, 0) for a in name.upper())
if name_score >= 600:
|
elif name_score >= 301:
return 'VERY SEXY'
elif name_score >= 60:
return 'PRETTY SEXY'
return 'NOT TOO SEXY'
| return 'THE ULTIMATE SEXIEST' | conditional_block |
assoc-types.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_type="lib"]
// @has assoc_types/trait.Index.html
pub trait Index<I: ?Sized> {
// @has - '//*[@id="associatedtype.Output"]//code' 'type Output: ?Sized'
type Output: ?Sized;
// @has - '//*[@id="tymethod.index"]//code' \
// "fn index<'a>(&'a self, index: I) -> &'a Self::Output"
fn index<'a>(&'a self, index: I) -> &'a Self::Output;
}
// @has assoc_types/fn.use_output.html
// @has - '//*[@class="rust fn"]' '-> &T::Output'
pub fn use_output<T: Index<usize>>(obj: &T, index: usize) -> &T::Output {
obj.index(index)
}
pub trait Feed {
type Input;
}
// @has assoc_types/fn.use_input.html
// @has - '//*[@class="rust fn"]' 'T::Input'
pub fn use_input<T: Feed>(_feed: &T, _element: T::Input) |
// @has assoc_types/fn.cmp_input.html
// @has - '//*[@class="rust fn"]' 'where T::Input: PartialEq<U::Input>'
pub fn cmp_input<T: Feed, U: Feed>(a: &T::Input, b: &U::Input) -> bool
where T::Input: PartialEq<U::Input>
{
a == b
}
| { } | identifier_body |
assoc-types.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_type="lib"]
// @has assoc_types/trait.Index.html
pub trait Index<I: ?Sized> {
// @has - '//*[@id="associatedtype.Output"]//code' 'type Output: ?Sized'
type Output: ?Sized;
// @has - '//*[@id="tymethod.index"]//code' \
// "fn index<'a>(&'a self, index: I) -> &'a Self::Output"
fn index<'a>(&'a self, index: I) -> &'a Self::Output;
}
// @has assoc_types/fn.use_output.html
// @has - '//*[@class="rust fn"]' '-> &T::Output'
pub fn | <T: Index<usize>>(obj: &T, index: usize) -> &T::Output {
obj.index(index)
}
pub trait Feed {
type Input;
}
// @has assoc_types/fn.use_input.html
// @has - '//*[@class="rust fn"]' 'T::Input'
pub fn use_input<T: Feed>(_feed: &T, _element: T::Input) { }
// @has assoc_types/fn.cmp_input.html
// @has - '//*[@class="rust fn"]' 'where T::Input: PartialEq<U::Input>'
pub fn cmp_input<T: Feed, U: Feed>(a: &T::Input, b: &U::Input) -> bool
where T::Input: PartialEq<U::Input>
{
a == b
}
| use_output | identifier_name |
assoc-types.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | // option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_type="lib"]
// @has assoc_types/trait.Index.html
pub trait Index<I: ?Sized> {
// @has - '//*[@id="associatedtype.Output"]//code' 'type Output: ?Sized'
type Output: ?Sized;
// @has - '//*[@id="tymethod.index"]//code' \
// "fn index<'a>(&'a self, index: I) -> &'a Self::Output"
fn index<'a>(&'a self, index: I) -> &'a Self::Output;
}
// @has assoc_types/fn.use_output.html
// @has - '//*[@class="rust fn"]' '-> &T::Output'
pub fn use_output<T: Index<usize>>(obj: &T, index: usize) -> &T::Output {
obj.index(index)
}
pub trait Feed {
type Input;
}
// @has assoc_types/fn.use_input.html
// @has - '//*[@class="rust fn"]' 'T::Input'
pub fn use_input<T: Feed>(_feed: &T, _element: T::Input) { }
// @has assoc_types/fn.cmp_input.html
// @has - '//*[@class="rust fn"]' 'where T::Input: PartialEq<U::Input>'
pub fn cmp_input<T: Feed, U: Feed>(a: &T::Input, b: &U::Input) -> bool
where T::Input: PartialEq<U::Input>
{
a == b
} | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | random_line_split |
display-final-after-month-is-over.py | #!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python | import pyperclip
# Edit Me!
# This script gets run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/ex6nis/stay_clean_february_this_thread_updated_daily/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString) |
from participantCollection import ParticipantCollection
import re
import datetime | random_line_split |
display-final-after-month-is-over.py | #!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
# This script gets run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/ex6nis/stay_clean_february_this_thread_updated_daily/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
|
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer | identifier_body |
display-final-after-month-is-over.py | #!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
# This script gets run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/ex6nis/stay_clean_february_this_thread_updated_daily/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def | ():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| templateForParticipants | identifier_name |
display-final-after-month-is-over.py | #!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
# This script gets run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/ex6nis/stay_clean_february_this_thread_updated_daily/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
|
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| answer += "/u/" + participant.name
answer += "\n\n" | conditional_block |
underscore_const_names.rs | // Copyright 2012-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
#![feature(const_let)]
#![feature(underscore_const_names)]
trait Trt {}
struct Str {}
impl Trt for Str {}
macro_rules! check_impl {
($struct:ident,$trait:ident) => {
const _ : () = {
use std::marker::PhantomData;
struct ImplementsTrait<T: $trait>(PhantomData<T>);
let _ = ImplementsTrait::<$struct>(PhantomData);
()
};
}
}
#[deny(unused)]
const _ : () = ();
const _ : i32 = 42;
const _ : Str = Str{};
check_impl!(Str, Trt);
check_impl!(Str, Trt);
fn main() | {
check_impl!(Str, Trt);
check_impl!(Str, Trt);
} | identifier_body |
|
underscore_const_names.rs | // Copyright 2012-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
#![feature(const_let)]
#![feature(underscore_const_names)]
trait Trt {}
struct Str {}
impl Trt for Str {}
macro_rules! check_impl {
($struct:ident,$trait:ident) => {
const _ : () = {
use std::marker::PhantomData;
struct ImplementsTrait<T: $trait>(PhantomData<T>);
let _ = ImplementsTrait::<$struct>(PhantomData);
()
};
}
}
#[deny(unused)]
const _ : () = ();
const _ : i32 = 42;
const _ : Str = Str{};
check_impl!(Str, Trt);
check_impl!(Str, Trt);
fn | () {
check_impl!(Str, Trt);
check_impl!(Str, Trt);
}
| main | identifier_name |
underscore_const_names.rs | // Copyright 2012-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
trait Trt {}
struct Str {}
impl Trt for Str {}
macro_rules! check_impl {
($struct:ident,$trait:ident) => {
const _ : () = {
use std::marker::PhantomData;
struct ImplementsTrait<T: $trait>(PhantomData<T>);
let _ = ImplementsTrait::<$struct>(PhantomData);
()
};
}
}
#[deny(unused)]
const _ : () = ();
const _ : i32 = 42;
const _ : Str = Str{};
check_impl!(Str, Trt);
check_impl!(Str, Trt);
fn main() {
check_impl!(Str, Trt);
check_impl!(Str, Trt);
} | // compile-pass
#![feature(const_let)]
#![feature(underscore_const_names)] | random_line_split |
walk-jqueue-2.js | (function () {
"use strict"
// Array.prototype.forEachAsync(next, item, i, collection)
require('futures/forEachAsync');
var fs = require('fs'),
EventEmitter = require('events').EventEmitter;
// 2010-11-25 [email protected]
function create(pathname, cb) {
var emitter = new EventEmitter(),
q = [],
queue = [q],
curpath;
function walk() {
fs.readdir(curpath, function(err, files) {
if (err) {
emitter.emit('error', curpath, err);
}
// XXX bug was here. next() was omitted
if (!files || 0 == files.length) {
return next();
}
var stats = [];
emitter.emit('names', curpath, files, stats);
files.forEachAsync(function (cont, file) {
emitter.emit('name', curpath, file);
fs.lstat(curpath + '/' + file, function (err, stat) {
if (err) {
emitter.emit('error', curpath, err);
}
if (stat) {
stat.name = file;
stats.push(stat);
emitter.emit('stat', curpath, file, stat);
}
cont();
});
}).then(function () {
var dirs = []
emitter.emit('stats', curpath, files, stats);
stats.forEach(function (stat) {
if (stat.isDirectory()) {
dirs.push(stat.name);
}
});
dirs.forEach(fullPath);
queue.push(q = dirs);
next();
});
});
}
function next() {
if (q.length) {
curpath = q.pop();
return walk();
}
if (queue.length -= 1) {
q = queue[queue.length-1];
return next();
}
emitter.emit('end');
}
function | (v,i,o) {
o[i]= [curpath, '/', v].join('');
}
curpath = pathname;
walk();
return emitter;
}
module.exports = create;
}());
| fullPath | identifier_name |
walk-jqueue-2.js | (function () {
"use strict"
// Array.prototype.forEachAsync(next, item, i, collection)
require('futures/forEachAsync');
var fs = require('fs'),
EventEmitter = require('events').EventEmitter;
// 2010-11-25 [email protected]
function create(pathname, cb) {
var emitter = new EventEmitter(),
q = [],
queue = [q],
curpath;
function walk() {
fs.readdir(curpath, function(err, files) {
if (err) {
emitter.emit('error', curpath, err);
}
// XXX bug was here. next() was omitted
if (!files || 0 == files.length) {
return next();
}
var stats = [];
emitter.emit('names', curpath, files, stats);
files.forEachAsync(function (cont, file) {
emitter.emit('name', curpath, file);
fs.lstat(curpath + '/' + file, function (err, stat) {
if (err) |
if (stat) {
stat.name = file;
stats.push(stat);
emitter.emit('stat', curpath, file, stat);
}
cont();
});
}).then(function () {
var dirs = []
emitter.emit('stats', curpath, files, stats);
stats.forEach(function (stat) {
if (stat.isDirectory()) {
dirs.push(stat.name);
}
});
dirs.forEach(fullPath);
queue.push(q = dirs);
next();
});
});
}
function next() {
if (q.length) {
curpath = q.pop();
return walk();
}
if (queue.length -= 1) {
q = queue[queue.length-1];
return next();
}
emitter.emit('end');
}
function fullPath(v,i,o) {
o[i]= [curpath, '/', v].join('');
}
curpath = pathname;
walk();
return emitter;
}
module.exports = create;
}());
| {
emitter.emit('error', curpath, err);
} | conditional_block |
walk-jqueue-2.js | (function () {
"use strict"
// Array.prototype.forEachAsync(next, item, i, collection)
require('futures/forEachAsync');
var fs = require('fs'),
EventEmitter = require('events').EventEmitter;
// 2010-11-25 [email protected]
function create(pathname, cb) {
var emitter = new EventEmitter(),
q = [],
queue = [q],
curpath;
function walk() {
fs.readdir(curpath, function(err, files) {
if (err) {
emitter.emit('error', curpath, err);
}
// XXX bug was here. next() was omitted
if (!files || 0 == files.length) {
return next();
}
var stats = [];
emitter.emit('names', curpath, files, stats);
files.forEachAsync(function (cont, file) {
emitter.emit('name', curpath, file);
fs.lstat(curpath + '/' + file, function (err, stat) {
if (err) {
emitter.emit('error', curpath, err);
}
if (stat) {
stat.name = file;
stats.push(stat);
emitter.emit('stat', curpath, file, stat);
}
cont();
});
}).then(function () {
var dirs = []
emitter.emit('stats', curpath, files, stats);
stats.forEach(function (stat) {
if (stat.isDirectory()) {
dirs.push(stat.name);
}
});
dirs.forEach(fullPath);
queue.push(q = dirs);
next();
});
});
}
function next() { | q = queue[queue.length-1];
return next();
}
emitter.emit('end');
}
function fullPath(v,i,o) {
o[i]= [curpath, '/', v].join('');
}
curpath = pathname;
walk();
return emitter;
}
module.exports = create;
}()); | if (q.length) {
curpath = q.pop();
return walk();
}
if (queue.length -= 1) { | random_line_split |
walk-jqueue-2.js | (function () {
"use strict"
// Array.prototype.forEachAsync(next, item, i, collection)
require('futures/forEachAsync');
var fs = require('fs'),
EventEmitter = require('events').EventEmitter;
// 2010-11-25 [email protected]
function create(pathname, cb) {
var emitter = new EventEmitter(),
q = [],
queue = [q],
curpath;
function walk() |
function next() {
if (q.length) {
curpath = q.pop();
return walk();
}
if (queue.length -= 1) {
q = queue[queue.length-1];
return next();
}
emitter.emit('end');
}
function fullPath(v,i,o) {
o[i]= [curpath, '/', v].join('');
}
curpath = pathname;
walk();
return emitter;
}
module.exports = create;
}());
| {
fs.readdir(curpath, function(err, files) {
if (err) {
emitter.emit('error', curpath, err);
}
// XXX bug was here. next() was omitted
if (!files || 0 == files.length) {
return next();
}
var stats = [];
emitter.emit('names', curpath, files, stats);
files.forEachAsync(function (cont, file) {
emitter.emit('name', curpath, file);
fs.lstat(curpath + '/' + file, function (err, stat) {
if (err) {
emitter.emit('error', curpath, err);
}
if (stat) {
stat.name = file;
stats.push(stat);
emitter.emit('stat', curpath, file, stat);
}
cont();
});
}).then(function () {
var dirs = []
emitter.emit('stats', curpath, files, stats);
stats.forEach(function (stat) {
if (stat.isDirectory()) {
dirs.push(stat.name);
}
});
dirs.forEach(fullPath);
queue.push(q = dirs);
next();
});
});
} | identifier_body |
navigation.js | /**
* Custom script that builds a SELECT drop-down with all menu links.
*
* Selects are great since they support CSS3
*
*/
jQuery(document).ready(function(){
jQuery('ul:first').attr("id", "primary-menu");
jQuery("ul#primary-menu > li:has(ul)").addClass("hasChildren");
jQuery("ul#primary-menu > li:has(ul) > ul > li > a").addClass("isChild");
});
jQuery(function() {
jQuery("<select id='rl-navMenu' />").appendTo("#masthead");
jQuery("#rl-navMenu").wrap('<div class="rl-styled-select"></div>'); |
jQuery("nav a").each(function() {
var el = jQuery(this);
jQuery("<option />", {
"value" : el.attr("href"),
"text" : el.text(),
"class" : el.attr("class")
}).appendTo("#masthead select");
});
jQuery("nav select").change(function() {
window.location = jQuery(this).find("option:selected").val();
});
});
jQuery(document).ready(function() {
jQuery("#masthead select option").each(function() {
var el = jQuery(this);
if(el.hasClass("isChild")){
jQuery(el.prepend("- "))
}
});
}); | jQuery("<option />", {
"selected": "selected",
"value" : "",
"text" : "Menu",
}).appendTo("#masthead select"); | random_line_split |
navigation.js | /**
* Custom script that builds a SELECT drop-down with all menu links.
*
* Selects are great since they support CSS3
*
*/
jQuery(document).ready(function(){
jQuery('ul:first').attr("id", "primary-menu");
jQuery("ul#primary-menu > li:has(ul)").addClass("hasChildren");
jQuery("ul#primary-menu > li:has(ul) > ul > li > a").addClass("isChild");
});
jQuery(function() {
jQuery("<select id='rl-navMenu' />").appendTo("#masthead");
jQuery("#rl-navMenu").wrap('<div class="rl-styled-select"></div>');
jQuery("<option />", {
"selected": "selected",
"value" : "",
"text" : "Menu",
}).appendTo("#masthead select");
jQuery("nav a").each(function() {
var el = jQuery(this);
jQuery("<option />", {
"value" : el.attr("href"),
"text" : el.text(),
"class" : el.attr("class")
}).appendTo("#masthead select");
});
jQuery("nav select").change(function() {
window.location = jQuery(this).find("option:selected").val();
});
});
jQuery(document).ready(function() {
jQuery("#masthead select option").each(function() {
var el = jQuery(this);
if(el.hasClass("isChild")) |
});
}); | {
jQuery(el.prepend("- "))
} | conditional_block |
label.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Tuukka Turto
#
# This file is part of satin-python.
#
# pyherc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyherc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with satin-python. If not, see <http://www.gnu.org/licenses/>.
"""
Module for testing labels
"""
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest.core.helpers.wrap_matcher import wrap_matcher
from .enumerators import all_widgets
class LabelMatcher(BaseMatcher):
"""
Check if Widget has label with given text
"""
def __init__(self, text):
"""
Default constructor
"""
super(LabelMatcher, self).__init__()
if hasattr(text, 'matches'):
self.text = text
else:
|
def _matches(self, item):
"""
Check if matcher matches item
:param item: object to match against
:returns: True if matching, otherwise False
:rtype: Boolean
"""
widgets = all_widgets(item)
for widget in widgets:
if hasattr(widget, 'text') and self.text.matches(widget.text()):
return True
return False
def describe_to(self, description):
"""
Describe this matcher
"""
description.append('Control with label {0}'.format(self.text))
def describe_mismatch(self, item, mismatch_description):
"""
Describe this mismatch
"""
mismatch_description.append(
'QLabel with text {0} was not found'.format(self.text))
def has_label(text):
"""
Check if Widget has label with given text
"""
return LabelMatcher(text)
| self.text = wrap_matcher(text) | conditional_block |
label.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Tuukka Turto
#
# This file is part of satin-python.
#
# pyherc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyherc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with satin-python. If not, see <http://www.gnu.org/licenses/>.
"""
Module for testing labels
"""
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest.core.helpers.wrap_matcher import wrap_matcher
from .enumerators import all_widgets
class | (BaseMatcher):
"""
Check if Widget has label with given text
"""
def __init__(self, text):
"""
Default constructor
"""
super(LabelMatcher, self).__init__()
if hasattr(text, 'matches'):
self.text = text
else:
self.text = wrap_matcher(text)
def _matches(self, item):
"""
Check if matcher matches item
:param item: object to match against
:returns: True if matching, otherwise False
:rtype: Boolean
"""
widgets = all_widgets(item)
for widget in widgets:
if hasattr(widget, 'text') and self.text.matches(widget.text()):
return True
return False
def describe_to(self, description):
"""
Describe this matcher
"""
description.append('Control with label {0}'.format(self.text))
def describe_mismatch(self, item, mismatch_description):
"""
Describe this mismatch
"""
mismatch_description.append(
'QLabel with text {0} was not found'.format(self.text))
def has_label(text):
"""
Check if Widget has label with given text
"""
return LabelMatcher(text)
| LabelMatcher | identifier_name |
label.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Tuukka Turto
#
# This file is part of satin-python.
#
# pyherc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyherc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with satin-python. If not, see <http://www.gnu.org/licenses/>.
"""
Module for testing labels
"""
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest.core.helpers.wrap_matcher import wrap_matcher
from .enumerators import all_widgets
class LabelMatcher(BaseMatcher):
"""
Check if Widget has label with given text
"""
def __init__(self, text):
"""
Default constructor
"""
super(LabelMatcher, self).__init__()
if hasattr(text, 'matches'):
self.text = text
else:
self.text = wrap_matcher(text)
def _matches(self, item):
"""
Check if matcher matches item
:param item: object to match against
:returns: True if matching, otherwise False
:rtype: Boolean
"""
widgets = all_widgets(item)
for widget in widgets:
if hasattr(widget, 'text') and self.text.matches(widget.text()):
return True
return False
def describe_to(self, description):
"""
Describe this matcher
"""
description.append('Control with label {0}'.format(self.text))
def describe_mismatch(self, item, mismatch_description):
"""
Describe this mismatch
"""
mismatch_description.append(
'QLabel with text {0} was not found'.format(self.text))
def has_label(text):
| """
Check if Widget has label with given text
"""
return LabelMatcher(text) | identifier_body |
|
label.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Tuukka Turto
#
# This file is part of satin-python. | # (at your option) any later version.
#
# pyherc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with satin-python. If not, see <http://www.gnu.org/licenses/>.
"""
Module for testing labels
"""
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest.core.helpers.wrap_matcher import wrap_matcher
from .enumerators import all_widgets
class LabelMatcher(BaseMatcher):
"""
Check if Widget has label with given text
"""
def __init__(self, text):
"""
Default constructor
"""
super(LabelMatcher, self).__init__()
if hasattr(text, 'matches'):
self.text = text
else:
self.text = wrap_matcher(text)
def _matches(self, item):
"""
Check if matcher matches item
:param item: object to match against
:returns: True if matching, otherwise False
:rtype: Boolean
"""
widgets = all_widgets(item)
for widget in widgets:
if hasattr(widget, 'text') and self.text.matches(widget.text()):
return True
return False
def describe_to(self, description):
"""
Describe this matcher
"""
description.append('Control with label {0}'.format(self.text))
def describe_mismatch(self, item, mismatch_description):
"""
Describe this mismatch
"""
mismatch_description.append(
'QLabel with text {0} was not found'.format(self.text))
def has_label(text):
"""
Check if Widget has label with given text
"""
return LabelMatcher(text) | #
# pyherc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or | random_line_split |
ml-levenberg-marquardt-tests.ts | import LM = require('ml-levenberg-marquardt');
function | ([a, b]: number[]) {
return (t: number) => a * Math.sin(b * t);
}
const data = {
x: [
0, Math.PI / 2, Math.PI, Math.PI * 3 / 2
],
y: [
0, 1, 0, -1
]
};
const initialValues = [ 2, 4 ];
const options = {
damping: 1.5,
initialValues,
gradientDifference: 10e-2,
maxIterations: 100,
errorTolerance: 10e-3
};
const fittedParams = LM(data, sinFunction, options);
// this doesn't make sense but it uses fittedParams...
if (fittedParams.iterations < 10) {
fittedParams.parameterValues[0] += fittedParams.parameterError;
}
| sinFunction | identifier_name |
ml-levenberg-marquardt-tests.ts | import LM = require('ml-levenberg-marquardt');
function sinFunction([a, b]: number[]) {
return (t: number) => a * Math.sin(b * t);
}
const data = {
x: [
0, Math.PI / 2, Math.PI, Math.PI * 3 / 2
],
y: [
0, 1, 0, -1
]
};
const initialValues = [ 2, 4 ];
const options = {
damping: 1.5,
initialValues,
gradientDifference: 10e-2,
maxIterations: 100,
errorTolerance: 10e-3
};
const fittedParams = LM(data, sinFunction, options);
// this doesn't make sense but it uses fittedParams...
if (fittedParams.iterations < 10) | {
fittedParams.parameterValues[0] += fittedParams.parameterError;
} | conditional_block |
|
ml-levenberg-marquardt-tests.ts | import LM = require('ml-levenberg-marquardt');
function sinFunction([a, b]: number[]) {
return (t: number) => a * Math.sin(b * t);
}
const data = {
x: [
0, Math.PI / 2, Math.PI, Math.PI * 3 / 2
],
y: [
0, 1, 0, -1
]
};
const initialValues = [ 2, 4 ];
const options = {
damping: 1.5,
initialValues,
gradientDifference: 10e-2,
maxIterations: 100,
errorTolerance: 10e-3
};
const fittedParams = LM(data, sinFunction, options);
// this doesn't make sense but it uses fittedParams... | if (fittedParams.iterations < 10) {
fittedParams.parameterValues[0] += fittedParams.parameterError;
} | random_line_split |
|
ml-levenberg-marquardt-tests.ts | import LM = require('ml-levenberg-marquardt');
function sinFunction([a, b]: number[]) |
const data = {
x: [
0, Math.PI / 2, Math.PI, Math.PI * 3 / 2
],
y: [
0, 1, 0, -1
]
};
const initialValues = [ 2, 4 ];
const options = {
damping: 1.5,
initialValues,
gradientDifference: 10e-2,
maxIterations: 100,
errorTolerance: 10e-3
};
const fittedParams = LM(data, sinFunction, options);
// this doesn't make sense but it uses fittedParams...
if (fittedParams.iterations < 10) {
fittedParams.parameterValues[0] += fittedParams.parameterError;
}
| {
return (t: number) => a * Math.sin(b * t);
} | identifier_body |
fib_fac.py | #-*- coding: utf-8 -*-
def factorial(n):
"""Return the factorial of n"""
if n < 2:
|
return n * factorial(n - 1)
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def fib_fac(x=30, y=900):
fib = fibonacci(x)
fac = factorial(y)
print "fibonacci({}):".format(x), fib
print "factorial({}):".format(y), fac
if __name__ == "__main__":
def opc1():
fruits = tuple(str(i) for i in xrange(100))
out = ''
for fruit in fruits:
out += fruit +':'
return out
def opc2():
format_str = '%s:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str % fruits
return out
def opc3():
format_str = '{}:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str.format(*fruits)
return out
def opc4():
fruits = tuple(str(i) for i in xrange(100))
out = ':'.join(fruits)
return out
import timeit
print timeit.timeit(stmt=opc4, number=100)
fib_fac()
| return 1 | conditional_block |
fib_fac.py | #-*- coding: utf-8 -*-
def factorial(n):
|
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def fib_fac(x=30, y=900):
fib = fibonacci(x)
fac = factorial(y)
print "fibonacci({}):".format(x), fib
print "factorial({}):".format(y), fac
if __name__ == "__main__":
def opc1():
fruits = tuple(str(i) for i in xrange(100))
out = ''
for fruit in fruits:
out += fruit +':'
return out
def opc2():
format_str = '%s:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str % fruits
return out
def opc3():
format_str = '{}:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str.format(*fruits)
return out
def opc4():
fruits = tuple(str(i) for i in xrange(100))
out = ':'.join(fruits)
return out
import timeit
print timeit.timeit(stmt=opc4, number=100)
fib_fac()
| """Return the factorial of n"""
if n < 2:
return 1
return n * factorial(n - 1) | identifier_body |
fib_fac.py | #-*- coding: utf-8 -*-
def factorial(n):
"""Return the factorial of n"""
if n < 2:
return 1
return n * factorial(n - 1)
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def | (x=30, y=900):
fib = fibonacci(x)
fac = factorial(y)
print "fibonacci({}):".format(x), fib
print "factorial({}):".format(y), fac
if __name__ == "__main__":
def opc1():
fruits = tuple(str(i) for i in xrange(100))
out = ''
for fruit in fruits:
out += fruit +':'
return out
def opc2():
format_str = '%s:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str % fruits
return out
def opc3():
format_str = '{}:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str.format(*fruits)
return out
def opc4():
fruits = tuple(str(i) for i in xrange(100))
out = ':'.join(fruits)
return out
import timeit
print timeit.timeit(stmt=opc4, number=100)
fib_fac()
| fib_fac | identifier_name |
fib_fac.py | #-*- coding: utf-8 -*- | return 1
return n * factorial(n - 1)
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def fib_fac(x=30, y=900):
fib = fibonacci(x)
fac = factorial(y)
print "fibonacci({}):".format(x), fib
print "factorial({}):".format(y), fac
if __name__ == "__main__":
def opc1():
fruits = tuple(str(i) for i in xrange(100))
out = ''
for fruit in fruits:
out += fruit +':'
return out
def opc2():
format_str = '%s:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str % fruits
return out
def opc3():
format_str = '{}:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str.format(*fruits)
return out
def opc4():
fruits = tuple(str(i) for i in xrange(100))
out = ':'.join(fruits)
return out
import timeit
print timeit.timeit(stmt=opc4, number=100)
fib_fac() |
def factorial(n):
"""Return the factorial of n"""
if n < 2: | random_line_split |
CreateChart.js | var results;
function viewData()
{
var resultsTable = $('<table></table>').attr('id', 'results');
var tableHead = $("<thead></thead>");
var tableBody = $("<tbody></tbody>");
var temp;
$.post("/api/Data/Query/",
{
name: $('#name').val(),
query: $('#query').val(),
server: $('#server').val(),
description: $('#description').val()
},
function(data, status){})
.done(function(data)
{
data = JSON.parse(data);
//Take care of the headers
var columnSet = [];
var tableHeadRow = $("<tr></tr>");
for (var key in data[0])
{
if ($.inArray(key, columnSet) == -1)
{
columnSet.push(key);
tableHeadRow.append($('<th/>').html(key));
}
}
tableHead.append(tableHeadRow);
//Take care of the data
for(i = 0; i < 10 || i == data.length; i++)
{
row = data[i];
var tableBodyRow = $("<tr></tr>");
$.each(row, function(j, field)
{
tableBodyRow.append($('<td/>').html(field));
});
tableBody.append(tableBodyRow);
}
buildSelect("Dependent",columnSet,"Dependent");
buildSelect("Independent",columnSet,"Independent");
$('#main').append('<button class="btn grey" type="button" onclick="buildChart()">Build Chart</button>');
$('#main').append('<br>');
$('#main').append('<br>');
$('#main').append('<br>');
results = data;
});
//Append Everything
resultsTable.append(tableHead);
resultsTable.append(tableBody);
resultsTable.addClass("centered");
resultsTable.addClass("highlight");
$('#main').append(resultsTable);
}
function buildChart()
{
$('#myChart').remove();
$('#main').append('<canvas id="myChart" width="500" height="500"></canvas>');
var depend = $('#Dependent').find(":selected").text();
var independ = $('#Independent').find(":selected").text();
var labels = [];
var data = [];
var color = [];
$.each(results, function(j, row)
{
labels.push(row[independ]);
data.push(row[depend]);
color.push(colorGenerator());
});
var data = {
labels: labels,
datasets: [{
data: data,
backgroundColor: color,
hoverBackgroundColor: color}]
};
var ctx = document.getElementById("myChart");
var chart = new Chart(ctx,{
type: 'pie',
data: data,
options: {responsive: false, maintainAspectRatio: false}
});
$('#saveBtn').remove();
$('#main').append('<button class="btn grey" id="saveBtn" type="button" onclick="persistChart()">Save Chart</button>');
$('#main').append('<br>');
}
//
//
//
function buildSelect(ID, options, name)
|
//
//
//
function persistChart()
{
$.post("/api/Charts/Create/",
{
name: $('#name').val(),
query: $('#query').val(),
server: $('#server').val(),
description: $('#description').val(),
dependent: $('#Dependent').find(":selected").text(),
independent: $('#Independent').find(":selected").text()
},
function(data, status){})
.done(function(data)
{
alert("Chart Saved");
});
}
| {
var division = $('<div class="input-field col s12" id="'+ID+'"></div>');
var selectBox = $("<select id='"+name+"'></select>");
for(var x in options)
{
var option = $("<option value="+options[x]+">"+options[x]+"</option>");
selectBox.append(option);
}
var option = $('<option value="0" disabled selected>Choose your option</option>');
selectBox.append(option);
var labelBox = $("<label>"+name+"</label>");
division.append(selectBox);
division.append(labelBox);
$('#main').append(division);
$('select').material_select();
} | identifier_body |
CreateChart.js | var results;
function viewData()
{
var resultsTable = $('<table></table>').attr('id', 'results');
var tableHead = $("<thead></thead>");
var tableBody = $("<tbody></tbody>");
var temp;
$.post("/api/Data/Query/",
{
name: $('#name').val(),
query: $('#query').val(),
server: $('#server').val(),
description: $('#description').val()
},
function(data, status){})
.done(function(data)
{
data = JSON.parse(data);
//Take care of the headers
var columnSet = [];
var tableHeadRow = $("<tr></tr>");
for (var key in data[0])
{
if ($.inArray(key, columnSet) == -1)
{
columnSet.push(key);
tableHeadRow.append($('<th/>').html(key));
}
}
tableHead.append(tableHeadRow);
//Take care of the data
for(i = 0; i < 10 || i == data.length; i++)
{
row = data[i];
var tableBodyRow = $("<tr></tr>"); | {
tableBodyRow.append($('<td/>').html(field));
});
tableBody.append(tableBodyRow);
}
buildSelect("Dependent",columnSet,"Dependent");
buildSelect("Independent",columnSet,"Independent");
$('#main').append('<button class="btn grey" type="button" onclick="buildChart()">Build Chart</button>');
$('#main').append('<br>');
$('#main').append('<br>');
$('#main').append('<br>');
results = data;
});
//Append Everything
resultsTable.append(tableHead);
resultsTable.append(tableBody);
resultsTable.addClass("centered");
resultsTable.addClass("highlight");
$('#main').append(resultsTable);
}
function buildChart()
{
$('#myChart').remove();
$('#main').append('<canvas id="myChart" width="500" height="500"></canvas>');
var depend = $('#Dependent').find(":selected").text();
var independ = $('#Independent').find(":selected").text();
var labels = [];
var data = [];
var color = [];
$.each(results, function(j, row)
{
labels.push(row[independ]);
data.push(row[depend]);
color.push(colorGenerator());
});
var data = {
labels: labels,
datasets: [{
data: data,
backgroundColor: color,
hoverBackgroundColor: color}]
};
var ctx = document.getElementById("myChart");
var chart = new Chart(ctx,{
type: 'pie',
data: data,
options: {responsive: false, maintainAspectRatio: false}
});
$('#saveBtn').remove();
$('#main').append('<button class="btn grey" id="saveBtn" type="button" onclick="persistChart()">Save Chart</button>');
$('#main').append('<br>');
}
//
//
//
function buildSelect(ID, options, name)
{
var division = $('<div class="input-field col s12" id="'+ID+'"></div>');
var selectBox = $("<select id='"+name+"'></select>");
for(var x in options)
{
var option = $("<option value="+options[x]+">"+options[x]+"</option>");
selectBox.append(option);
}
var option = $('<option value="0" disabled selected>Choose your option</option>');
selectBox.append(option);
var labelBox = $("<label>"+name+"</label>");
division.append(selectBox);
division.append(labelBox);
$('#main').append(division);
$('select').material_select();
}
//
//
//
function persistChart()
{
$.post("/api/Charts/Create/",
{
name: $('#name').val(),
query: $('#query').val(),
server: $('#server').val(),
description: $('#description').val(),
dependent: $('#Dependent').find(":selected").text(),
independent: $('#Independent').find(":selected").text()
},
function(data, status){})
.done(function(data)
{
alert("Chart Saved");
});
} | $.each(row, function(j, field) | random_line_split |
CreateChart.js | var results;
function viewData()
{
var resultsTable = $('<table></table>').attr('id', 'results');
var tableHead = $("<thead></thead>");
var tableBody = $("<tbody></tbody>");
var temp;
$.post("/api/Data/Query/",
{
name: $('#name').val(),
query: $('#query').val(),
server: $('#server').val(),
description: $('#description').val()
},
function(data, status){})
.done(function(data)
{
data = JSON.parse(data);
//Take care of the headers
var columnSet = [];
var tableHeadRow = $("<tr></tr>");
for (var key in data[0])
{
if ($.inArray(key, columnSet) == -1)
{
columnSet.push(key);
tableHeadRow.append($('<th/>').html(key));
}
}
tableHead.append(tableHeadRow);
//Take care of the data
for(i = 0; i < 10 || i == data.length; i++)
|
buildSelect("Dependent",columnSet,"Dependent");
buildSelect("Independent",columnSet,"Independent");
$('#main').append('<button class="btn grey" type="button" onclick="buildChart()">Build Chart</button>');
$('#main').append('<br>');
$('#main').append('<br>');
$('#main').append('<br>');
results = data;
});
//Append Everything
resultsTable.append(tableHead);
resultsTable.append(tableBody);
resultsTable.addClass("centered");
resultsTable.addClass("highlight");
$('#main').append(resultsTable);
}
function buildChart()
{
$('#myChart').remove();
$('#main').append('<canvas id="myChart" width="500" height="500"></canvas>');
var depend = $('#Dependent').find(":selected").text();
var independ = $('#Independent').find(":selected").text();
var labels = [];
var data = [];
var color = [];
$.each(results, function(j, row)
{
labels.push(row[independ]);
data.push(row[depend]);
color.push(colorGenerator());
});
var data = {
labels: labels,
datasets: [{
data: data,
backgroundColor: color,
hoverBackgroundColor: color}]
};
var ctx = document.getElementById("myChart");
var chart = new Chart(ctx,{
type: 'pie',
data: data,
options: {responsive: false, maintainAspectRatio: false}
});
$('#saveBtn').remove();
$('#main').append('<button class="btn grey" id="saveBtn" type="button" onclick="persistChart()">Save Chart</button>');
$('#main').append('<br>');
}
//
//
//
function buildSelect(ID, options, name)
{
var division = $('<div class="input-field col s12" id="'+ID+'"></div>');
var selectBox = $("<select id='"+name+"'></select>");
for(var x in options)
{
var option = $("<option value="+options[x]+">"+options[x]+"</option>");
selectBox.append(option);
}
var option = $('<option value="0" disabled selected>Choose your option</option>');
selectBox.append(option);
var labelBox = $("<label>"+name+"</label>");
division.append(selectBox);
division.append(labelBox);
$('#main').append(division);
$('select').material_select();
}
//
//
//
function persistChart()
{
$.post("/api/Charts/Create/",
{
name: $('#name').val(),
query: $('#query').val(),
server: $('#server').val(),
description: $('#description').val(),
dependent: $('#Dependent').find(":selected").text(),
independent: $('#Independent').find(":selected").text()
},
function(data, status){})
.done(function(data)
{
alert("Chart Saved");
});
}
| {
row = data[i];
var tableBodyRow = $("<tr></tr>");
$.each(row, function(j, field)
{
tableBodyRow.append($('<td/>').html(field));
});
tableBody.append(tableBodyRow);
} | conditional_block |
CreateChart.js | var results;
function viewData()
{
var resultsTable = $('<table></table>').attr('id', 'results');
var tableHead = $("<thead></thead>");
var tableBody = $("<tbody></tbody>");
var temp;
$.post("/api/Data/Query/",
{
name: $('#name').val(),
query: $('#query').val(),
server: $('#server').val(),
description: $('#description').val()
},
function(data, status){})
.done(function(data)
{
data = JSON.parse(data);
//Take care of the headers
var columnSet = [];
var tableHeadRow = $("<tr></tr>");
for (var key in data[0])
{
if ($.inArray(key, columnSet) == -1)
{
columnSet.push(key);
tableHeadRow.append($('<th/>').html(key));
}
}
tableHead.append(tableHeadRow);
//Take care of the data
for(i = 0; i < 10 || i == data.length; i++)
{
row = data[i];
var tableBodyRow = $("<tr></tr>");
$.each(row, function(j, field)
{
tableBodyRow.append($('<td/>').html(field));
});
tableBody.append(tableBodyRow);
}
buildSelect("Dependent",columnSet,"Dependent");
buildSelect("Independent",columnSet,"Independent");
$('#main').append('<button class="btn grey" type="button" onclick="buildChart()">Build Chart</button>');
$('#main').append('<br>');
$('#main').append('<br>');
$('#main').append('<br>');
results = data;
});
//Append Everything
resultsTable.append(tableHead);
resultsTable.append(tableBody);
resultsTable.addClass("centered");
resultsTable.addClass("highlight");
$('#main').append(resultsTable);
}
function | ()
{
$('#myChart').remove();
$('#main').append('<canvas id="myChart" width="500" height="500"></canvas>');
var depend = $('#Dependent').find(":selected").text();
var independ = $('#Independent').find(":selected").text();
var labels = [];
var data = [];
var color = [];
$.each(results, function(j, row)
{
labels.push(row[independ]);
data.push(row[depend]);
color.push(colorGenerator());
});
var data = {
labels: labels,
datasets: [{
data: data,
backgroundColor: color,
hoverBackgroundColor: color}]
};
var ctx = document.getElementById("myChart");
var chart = new Chart(ctx,{
type: 'pie',
data: data,
options: {responsive: false, maintainAspectRatio: false}
});
$('#saveBtn').remove();
$('#main').append('<button class="btn grey" id="saveBtn" type="button" onclick="persistChart()">Save Chart</button>');
$('#main').append('<br>');
}
//
//
//
function buildSelect(ID, options, name)
{
var division = $('<div class="input-field col s12" id="'+ID+'"></div>');
var selectBox = $("<select id='"+name+"'></select>");
for(var x in options)
{
var option = $("<option value="+options[x]+">"+options[x]+"</option>");
selectBox.append(option);
}
var option = $('<option value="0" disabled selected>Choose your option</option>');
selectBox.append(option);
var labelBox = $("<label>"+name+"</label>");
division.append(selectBox);
division.append(labelBox);
$('#main').append(division);
$('select').material_select();
}
//
//
//
function persistChart()
{
$.post("/api/Charts/Create/",
{
name: $('#name').val(),
query: $('#query').val(),
server: $('#server').val(),
description: $('#description').val(),
dependent: $('#Dependent').find(":selected").text(),
independent: $('#Independent').find(":selected").text()
},
function(data, status){})
.done(function(data)
{
alert("Chart Saved");
});
}
| buildChart | identifier_name |
mod.rs | use libc::{self, c_int};
#[macro_use]
pub mod dlsym;
#[cfg(any(target_os = "linux", target_os = "android"))]
mod epoll;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use self::epoll::{Events, Selector};
#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
target_os = "freebsd", target_os = "ios", target_os = "macos",
target_os = "netbsd", target_os = "openbsd"))]
mod kqueue;
#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
target_os = "freebsd", target_os = "ios", target_os = "macos",
target_os = "netbsd", target_os = "openbsd"))]
pub use self::kqueue::{Events, Selector};
mod awakener;
mod eventedfd;
mod io;
mod ready;
mod tcp;
mod udp;
#[cfg(feature = "with-deprecated")]
mod uds;
pub use self::awakener::Awakener;
pub use self::eventedfd::EventedFd;
pub use self::io::{Io, set_nonblock};
pub use self::ready::UnixReady;
pub use self::tcp::{TcpStream, TcpListener};
pub use self::udp::UdpSocket;
#[cfg(feature = "with-deprecated")]
pub use self::uds::UnixSocket;
pub use iovec::IoVec;
use std::os::unix::io::FromRawFd;
pub fn pipe() -> ::io::Result<(Io, Io)> {
// Use pipe2 for atomically setting O_CLOEXEC if we can, but otherwise
// just fall back to using `pipe`.
dlsym!(fn pipe2(*mut c_int, c_int) -> c_int);
let mut pipes = [0; 2];
let flags = libc::O_NONBLOCK | libc::O_CLOEXEC;
unsafe {
match pipe2.get() {
Some(pipe2_fn) => {
cvt(pipe2_fn(pipes.as_mut_ptr(), flags))?;
}
None => {
cvt(libc::pipe(pipes.as_mut_ptr()))?;
libc::fcntl(pipes[0], libc::F_SETFL, flags);
libc::fcntl(pipes[1], libc::F_SETFL, flags);
}
}
}
unsafe {
Ok((Io::from_raw_fd(pipes[0]), Io::from_raw_fd(pipes[1])))
}
} | }
impl IsMinusOne for i32 {
fn is_minus_one(&self) -> bool { *self == -1 }
}
impl IsMinusOne for isize {
fn is_minus_one(&self) -> bool { *self == -1 }
}
fn cvt<T: IsMinusOne>(t: T) -> ::io::Result<T> {
use std::io;
if t.is_minus_one() {
Err(io::Error::last_os_error())
} else {
Ok(t)
}
} |
trait IsMinusOne {
fn is_minus_one(&self) -> bool; | random_line_split |
mod.rs | use libc::{self, c_int};
#[macro_use]
pub mod dlsym;
#[cfg(any(target_os = "linux", target_os = "android"))]
mod epoll;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use self::epoll::{Events, Selector};
#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
target_os = "freebsd", target_os = "ios", target_os = "macos",
target_os = "netbsd", target_os = "openbsd"))]
mod kqueue;
#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
target_os = "freebsd", target_os = "ios", target_os = "macos",
target_os = "netbsd", target_os = "openbsd"))]
pub use self::kqueue::{Events, Selector};
mod awakener;
mod eventedfd;
mod io;
mod ready;
mod tcp;
mod udp;
#[cfg(feature = "with-deprecated")]
mod uds;
pub use self::awakener::Awakener;
pub use self::eventedfd::EventedFd;
pub use self::io::{Io, set_nonblock};
pub use self::ready::UnixReady;
pub use self::tcp::{TcpStream, TcpListener};
pub use self::udp::UdpSocket;
#[cfg(feature = "with-deprecated")]
pub use self::uds::UnixSocket;
pub use iovec::IoVec;
use std::os::unix::io::FromRawFd;
pub fn pipe() -> ::io::Result<(Io, Io)> {
// Use pipe2 for atomically setting O_CLOEXEC if we can, but otherwise
// just fall back to using `pipe`.
dlsym!(fn pipe2(*mut c_int, c_int) -> c_int);
let mut pipes = [0; 2];
let flags = libc::O_NONBLOCK | libc::O_CLOEXEC;
unsafe {
match pipe2.get() {
Some(pipe2_fn) => {
cvt(pipe2_fn(pipes.as_mut_ptr(), flags))?;
}
None => {
cvt(libc::pipe(pipes.as_mut_ptr()))?;
libc::fcntl(pipes[0], libc::F_SETFL, flags);
libc::fcntl(pipes[1], libc::F_SETFL, flags);
}
}
}
unsafe {
Ok((Io::from_raw_fd(pipes[0]), Io::from_raw_fd(pipes[1])))
}
}
trait IsMinusOne {
fn is_minus_one(&self) -> bool;
}
impl IsMinusOne for i32 {
fn is_minus_one(&self) -> bool { *self == -1 }
}
impl IsMinusOne for isize {
fn is_minus_one(&self) -> bool { *self == -1 }
}
fn cvt<T: IsMinusOne>(t: T) -> ::io::Result<T> {
use std::io;
if t.is_minus_one() {
Err(io::Error::last_os_error())
} else |
}
| {
Ok(t)
} | conditional_block |
mod.rs | use libc::{self, c_int};
#[macro_use]
pub mod dlsym;
#[cfg(any(target_os = "linux", target_os = "android"))]
mod epoll;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use self::epoll::{Events, Selector};
#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
target_os = "freebsd", target_os = "ios", target_os = "macos",
target_os = "netbsd", target_os = "openbsd"))]
mod kqueue;
#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
target_os = "freebsd", target_os = "ios", target_os = "macos",
target_os = "netbsd", target_os = "openbsd"))]
pub use self::kqueue::{Events, Selector};
mod awakener;
mod eventedfd;
mod io;
mod ready;
mod tcp;
mod udp;
#[cfg(feature = "with-deprecated")]
mod uds;
pub use self::awakener::Awakener;
pub use self::eventedfd::EventedFd;
pub use self::io::{Io, set_nonblock};
pub use self::ready::UnixReady;
pub use self::tcp::{TcpStream, TcpListener};
pub use self::udp::UdpSocket;
#[cfg(feature = "with-deprecated")]
pub use self::uds::UnixSocket;
pub use iovec::IoVec;
use std::os::unix::io::FromRawFd;
pub fn pipe() -> ::io::Result<(Io, Io)> {
// Use pipe2 for atomically setting O_CLOEXEC if we can, but otherwise
// just fall back to using `pipe`.
dlsym!(fn pipe2(*mut c_int, c_int) -> c_int);
let mut pipes = [0; 2];
let flags = libc::O_NONBLOCK | libc::O_CLOEXEC;
unsafe {
match pipe2.get() {
Some(pipe2_fn) => {
cvt(pipe2_fn(pipes.as_mut_ptr(), flags))?;
}
None => {
cvt(libc::pipe(pipes.as_mut_ptr()))?;
libc::fcntl(pipes[0], libc::F_SETFL, flags);
libc::fcntl(pipes[1], libc::F_SETFL, flags);
}
}
}
unsafe {
Ok((Io::from_raw_fd(pipes[0]), Io::from_raw_fd(pipes[1])))
}
}
trait IsMinusOne {
fn is_minus_one(&self) -> bool;
}
impl IsMinusOne for i32 {
fn is_minus_one(&self) -> bool { *self == -1 }
}
impl IsMinusOne for isize {
fn | (&self) -> bool { *self == -1 }
}
fn cvt<T: IsMinusOne>(t: T) -> ::io::Result<T> {
use std::io;
if t.is_minus_one() {
Err(io::Error::last_os_error())
} else {
Ok(t)
}
}
| is_minus_one | identifier_name |
mod.rs | use libc::{self, c_int};
#[macro_use]
pub mod dlsym;
#[cfg(any(target_os = "linux", target_os = "android"))]
mod epoll;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use self::epoll::{Events, Selector};
#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
target_os = "freebsd", target_os = "ios", target_os = "macos",
target_os = "netbsd", target_os = "openbsd"))]
mod kqueue;
#[cfg(any(target_os = "bitrig", target_os = "dragonfly",
target_os = "freebsd", target_os = "ios", target_os = "macos",
target_os = "netbsd", target_os = "openbsd"))]
pub use self::kqueue::{Events, Selector};
mod awakener;
mod eventedfd;
mod io;
mod ready;
mod tcp;
mod udp;
#[cfg(feature = "with-deprecated")]
mod uds;
pub use self::awakener::Awakener;
pub use self::eventedfd::EventedFd;
pub use self::io::{Io, set_nonblock};
pub use self::ready::UnixReady;
pub use self::tcp::{TcpStream, TcpListener};
pub use self::udp::UdpSocket;
#[cfg(feature = "with-deprecated")]
pub use self::uds::UnixSocket;
pub use iovec::IoVec;
use std::os::unix::io::FromRawFd;
pub fn pipe() -> ::io::Result<(Io, Io)> |
trait IsMinusOne {
fn is_minus_one(&self) -> bool;
}
impl IsMinusOne for i32 {
fn is_minus_one(&self) -> bool { *self == -1 }
}
impl IsMinusOne for isize {
fn is_minus_one(&self) -> bool { *self == -1 }
}
fn cvt<T: IsMinusOne>(t: T) -> ::io::Result<T> {
use std::io;
if t.is_minus_one() {
Err(io::Error::last_os_error())
} else {
Ok(t)
}
}
| {
// Use pipe2 for atomically setting O_CLOEXEC if we can, but otherwise
// just fall back to using `pipe`.
dlsym!(fn pipe2(*mut c_int, c_int) -> c_int);
let mut pipes = [0; 2];
let flags = libc::O_NONBLOCK | libc::O_CLOEXEC;
unsafe {
match pipe2.get() {
Some(pipe2_fn) => {
cvt(pipe2_fn(pipes.as_mut_ptr(), flags))?;
}
None => {
cvt(libc::pipe(pipes.as_mut_ptr()))?;
libc::fcntl(pipes[0], libc::F_SETFL, flags);
libc::fcntl(pipes[1], libc::F_SETFL, flags);
}
}
}
unsafe {
Ok((Io::from_raw_fd(pipes[0]), Io::from_raw_fd(pipes[1])))
}
} | identifier_body |
epitopefinder_plotdistributioncomparison.py | #!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
|
if __name__ == '__main__':
main() # run the script
| """Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n") | identifier_body |
epitopefinder_plotdistributioncomparison.py | #!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax') | out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script | random_line_split |
|
epitopefinder_plotdistributioncomparison.py | #!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def | ():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
| main | identifier_name |
epitopefinder_plotdistributioncomparison.py | #!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
|
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
| (site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n) | conditional_block |
mod.rs | mod handlers;
use crate::server::Server;
use log::{debug, error};
use protocol::frame::Framed;
use protocol::Decode;
use runtime::net::TcpStream;
use std::sync::Arc;
const SALT_LEN: usize = 32;
const AES_KEY_LEN: usize = 32;
const AES_IV_LEN: usize = 16;
#[derive(PartialEq, Eq, Debug)]
enum State {
Init,
Logged {
aes_key: [u8; AES_KEY_LEN],
ticket: String,
},
}
pub struct Session {
stream: Framed<TcpStream>,
state: State,
server: Arc<Server>,
}
impl Session {
pub fn new(stream: TcpStream, server: Arc<Server>) -> Self {
Self {
stream: Framed::new(stream),
state: State::Init,
server,
}
}
pub async fn run(mut self) -> std::io::Result<()> |
}
| {
use futures::StreamExt;
use protocol::messages::connection::HelloConnectMessage;
use protocol::messages::connection::IdentificationMessage;
use protocol::messages::connection::ServerSelectionMessage;
use protocol::messages::handshake::ProtocolRequired;
use rand::Rng;
debug!(
"new connection from {:?}",
self.stream.get_ref().peer_addr()
);
self.stream.write(ProtocolRequired {
required_version: 1924,
current_version: 1924,
_phantom: std::marker::PhantomData,
})?;
let salt: String = {
let mut rng = rand::thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.take(SALT_LEN)
.collect()
};
self.stream.write(HelloConnectMessage {
salt: &salt,
key: unsafe {
std::slice::from_raw_parts(
self.server.public_key.as_ptr() as *const i8,
self.server.public_key.len(),
)
},
})?;
self.stream.flush().await?;
while let Some(frame) = self.stream.next().await {
let frame = frame?;
debug!("received message with id {}", frame.id());
match frame.id() {
<IdentificationMessage<'_> as Decode<'_>>::ID => {
match IdentificationMessage::decode(&mut frame.payload()) {
Ok(msg) => self.handle_identification(msg).await?,
Err(err) => error!("decode error: {}", err),
}
}
<ServerSelectionMessage<'_> as Decode<'_>>::ID => {
match ServerSelectionMessage::decode(&mut frame.payload()) {
Ok(msg) => self.handle_server_selection(msg).await?,
Err(err) => error!("decode error: {}", err),
}
}
_ => (),
}
}
Ok(())
} | identifier_body |
mod.rs | mod handlers;
use crate::server::Server;
use log::{debug, error};
use protocol::frame::Framed;
use protocol::Decode;
use runtime::net::TcpStream;
use std::sync::Arc;
const SALT_LEN: usize = 32;
const AES_KEY_LEN: usize = 32;
const AES_IV_LEN: usize = 16;
#[derive(PartialEq, Eq, Debug)]
enum | {
Init,
Logged {
aes_key: [u8; AES_KEY_LEN],
ticket: String,
},
}
pub struct Session {
stream: Framed<TcpStream>,
state: State,
server: Arc<Server>,
}
impl Session {
pub fn new(stream: TcpStream, server: Arc<Server>) -> Self {
Self {
stream: Framed::new(stream),
state: State::Init,
server,
}
}
pub async fn run(mut self) -> std::io::Result<()> {
use futures::StreamExt;
use protocol::messages::connection::HelloConnectMessage;
use protocol::messages::connection::IdentificationMessage;
use protocol::messages::connection::ServerSelectionMessage;
use protocol::messages::handshake::ProtocolRequired;
use rand::Rng;
debug!(
"new connection from {:?}",
self.stream.get_ref().peer_addr()
);
self.stream.write(ProtocolRequired {
required_version: 1924,
current_version: 1924,
_phantom: std::marker::PhantomData,
})?;
let salt: String = {
let mut rng = rand::thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.take(SALT_LEN)
.collect()
};
self.stream.write(HelloConnectMessage {
salt: &salt,
key: unsafe {
std::slice::from_raw_parts(
self.server.public_key.as_ptr() as *const i8,
self.server.public_key.len(),
)
},
})?;
self.stream.flush().await?;
while let Some(frame) = self.stream.next().await {
let frame = frame?;
debug!("received message with id {}", frame.id());
match frame.id() {
<IdentificationMessage<'_> as Decode<'_>>::ID => {
match IdentificationMessage::decode(&mut frame.payload()) {
Ok(msg) => self.handle_identification(msg).await?,
Err(err) => error!("decode error: {}", err),
}
}
<ServerSelectionMessage<'_> as Decode<'_>>::ID => {
match ServerSelectionMessage::decode(&mut frame.payload()) {
Ok(msg) => self.handle_server_selection(msg).await?,
Err(err) => error!("decode error: {}", err),
}
}
_ => (),
}
}
Ok(())
}
}
| State | identifier_name |
mod.rs | mod handlers;
use crate::server::Server;
use log::{debug, error};
use protocol::frame::Framed;
use protocol::Decode;
use runtime::net::TcpStream;
use std::sync::Arc;
const SALT_LEN: usize = 32;
const AES_KEY_LEN: usize = 32;
const AES_IV_LEN: usize = 16;
#[derive(PartialEq, Eq, Debug)]
enum State {
Init,
Logged {
aes_key: [u8; AES_KEY_LEN],
ticket: String,
},
}
pub struct Session {
stream: Framed<TcpStream>,
state: State,
server: Arc<Server>,
}
impl Session {
pub fn new(stream: TcpStream, server: Arc<Server>) -> Self {
Self {
stream: Framed::new(stream),
state: State::Init,
server,
}
}
pub async fn run(mut self) -> std::io::Result<()> {
use futures::StreamExt;
use protocol::messages::connection::HelloConnectMessage;
use protocol::messages::connection::IdentificationMessage;
use protocol::messages::connection::ServerSelectionMessage;
use protocol::messages::handshake::ProtocolRequired;
use rand::Rng;
debug!(
"new connection from {:?}",
self.stream.get_ref().peer_addr()
);
self.stream.write(ProtocolRequired {
required_version: 1924,
current_version: 1924,
_phantom: std::marker::PhantomData,
})?;
let salt: String = {
let mut rng = rand::thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.take(SALT_LEN)
.collect()
};
self.stream.write(HelloConnectMessage {
salt: &salt,
key: unsafe {
std::slice::from_raw_parts(
self.server.public_key.as_ptr() as *const i8,
self.server.public_key.len(),
)
},
})?;
self.stream.flush().await?;
while let Some(frame) = self.stream.next().await {
let frame = frame?;
| match IdentificationMessage::decode(&mut frame.payload()) {
Ok(msg) => self.handle_identification(msg).await?,
Err(err) => error!("decode error: {}", err),
}
}
<ServerSelectionMessage<'_> as Decode<'_>>::ID => {
match ServerSelectionMessage::decode(&mut frame.payload()) {
Ok(msg) => self.handle_server_selection(msg).await?,
Err(err) => error!("decode error: {}", err),
}
}
_ => (),
}
}
Ok(())
}
} | debug!("received message with id {}", frame.id());
match frame.id() {
<IdentificationMessage<'_> as Decode<'_>>::ID => { | random_line_split |
main.rs | extern crate sdl2;
extern crate rustc_serialize;
use sdl2::keycode::KeyCode;
use sdl2::event::Event;
use sdl2::timer::get_ticks;
mod sprite;
mod assets;
mod draw;
mod player;
mod tile;
mod map;
mod physics;
use sprite::Sprite;
use player::Player;
use player::PlayerStatus;
use draw::Draw;
fn | () {
//initialize sdl
let sdl_context = sdl2::init().video().events().build()
.ok().expect("Failed to initialize SDL.");
//create a window
let window = sdl_context.window("Rust-Man", 640, 480)
.position_centered()
.build()
.ok().expect("Failed to create window.");
//create a renderer
let mut renderer = window.renderer().accelerated().build()
.ok().expect("Failed to create accelerated renderer.");
//create a new player
let mut player = Player::new(Sprite::new_from_file("sonic.bmp", &renderer),
PlayerStatus::Stationary);
//start drawing
let mut drawer = renderer.drawer();
drawer.clear();
drawer.present();
//event loop stuff
let mut running = true;
let mut event_pump = sdl_context.event_pump();
let mut prev_time = get_ticks();
let mut delta_t = get_ticks() - prev_time;
while running {
//timer stuff
delta_t = get_ticks() - prev_time;
//limit to 60fps
if delta_t > 16 {
//handle event queue
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} | Event::KeyDown {keycode: KeyCode::Escape, .. } => {
running = false
},
Event::KeyDown {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::MovingLeft
},
Event::KeyDown {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::MovingRight
},
Event::KeyUp {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::Decelerating
},
Event::KeyUp {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::Decelerating
},
_ => {}
}
}
//move player
player.update();
//draw
drawer.clear();
player.draw(&mut drawer, &None);
drawer.present();
//more timer stuff
prev_time = get_ticks();
}
}
println!("Goodbye, world!");
}
| main | identifier_name |
main.rs | extern crate sdl2;
extern crate rustc_serialize;
use sdl2::keycode::KeyCode;
use sdl2::event::Event;
use sdl2::timer::get_ticks;
mod sprite;
mod assets;
mod draw;
mod player;
mod tile;
mod map;
mod physics;
use sprite::Sprite;
use player::Player;
use player::PlayerStatus;
use draw::Draw;
fn main() | {
//initialize sdl
let sdl_context = sdl2::init().video().events().build()
.ok().expect("Failed to initialize SDL.");
//create a window
let window = sdl_context.window("Rust-Man", 640, 480)
.position_centered()
.build()
.ok().expect("Failed to create window.");
//create a renderer
let mut renderer = window.renderer().accelerated().build()
.ok().expect("Failed to create accelerated renderer.");
//create a new player
let mut player = Player::new(Sprite::new_from_file("sonic.bmp", &renderer),
PlayerStatus::Stationary);
//start drawing
let mut drawer = renderer.drawer();
drawer.clear();
drawer.present();
//event loop stuff
let mut running = true;
let mut event_pump = sdl_context.event_pump();
let mut prev_time = get_ticks();
let mut delta_t = get_ticks() - prev_time;
while running {
//timer stuff
delta_t = get_ticks() - prev_time;
//limit to 60fps
if delta_t > 16 {
//handle event queue
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} | Event::KeyDown {keycode: KeyCode::Escape, .. } => {
running = false
},
Event::KeyDown {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::MovingLeft
},
Event::KeyDown {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::MovingRight
},
Event::KeyUp {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::Decelerating
},
Event::KeyUp {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::Decelerating
},
_ => {}
}
}
//move player
player.update();
//draw
drawer.clear();
player.draw(&mut drawer, &None);
drawer.present();
//more timer stuff
prev_time = get_ticks();
}
}
println!("Goodbye, world!");
} | identifier_body |
|
main.rs | extern crate sdl2;
extern crate rustc_serialize;
use sdl2::keycode::KeyCode;
use sdl2::event::Event;
use sdl2::timer::get_ticks;
mod sprite;
mod assets;
mod draw;
mod player;
mod tile;
mod map;
mod physics;
use sprite::Sprite;
use player::Player;
use player::PlayerStatus;
use draw::Draw;
fn main() { | let window = sdl_context.window("Rust-Man", 640, 480)
.position_centered()
.build()
.ok().expect("Failed to create window.");
//create a renderer
let mut renderer = window.renderer().accelerated().build()
.ok().expect("Failed to create accelerated renderer.");
//create a new player
let mut player = Player::new(Sprite::new_from_file("sonic.bmp", &renderer),
PlayerStatus::Stationary);
//start drawing
let mut drawer = renderer.drawer();
drawer.clear();
drawer.present();
//event loop stuff
let mut running = true;
let mut event_pump = sdl_context.event_pump();
let mut prev_time = get_ticks();
let mut delta_t = get_ticks() - prev_time;
while running {
//timer stuff
delta_t = get_ticks() - prev_time;
//limit to 60fps
if delta_t > 16 {
//handle event queue
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} | Event::KeyDown {keycode: KeyCode::Escape, .. } => {
running = false
},
Event::KeyDown {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::MovingLeft
},
Event::KeyDown {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::MovingRight
},
Event::KeyUp {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::Decelerating
},
Event::KeyUp {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::Decelerating
},
_ => {}
}
}
//move player
player.update();
//draw
drawer.clear();
player.draw(&mut drawer, &None);
drawer.present();
//more timer stuff
prev_time = get_ticks();
}
}
println!("Goodbye, world!");
} | //initialize sdl
let sdl_context = sdl2::init().video().events().build()
.ok().expect("Failed to initialize SDL.");
//create a window | random_line_split |
main.rs | extern crate sdl2;
extern crate rustc_serialize;
use sdl2::keycode::KeyCode;
use sdl2::event::Event;
use sdl2::timer::get_ticks;
mod sprite;
mod assets;
mod draw;
mod player;
mod tile;
mod map;
mod physics;
use sprite::Sprite;
use player::Player;
use player::PlayerStatus;
use draw::Draw;
fn main() {
//initialize sdl
let sdl_context = sdl2::init().video().events().build()
.ok().expect("Failed to initialize SDL.");
//create a window
let window = sdl_context.window("Rust-Man", 640, 480)
.position_centered()
.build()
.ok().expect("Failed to create window.");
//create a renderer
let mut renderer = window.renderer().accelerated().build()
.ok().expect("Failed to create accelerated renderer.");
//create a new player
let mut player = Player::new(Sprite::new_from_file("sonic.bmp", &renderer),
PlayerStatus::Stationary);
//start drawing
let mut drawer = renderer.drawer();
drawer.clear();
drawer.present();
//event loop stuff
let mut running = true;
let mut event_pump = sdl_context.event_pump();
let mut prev_time = get_ticks();
let mut delta_t = get_ticks() - prev_time;
while running {
//timer stuff
delta_t = get_ticks() - prev_time;
//limit to 60fps
if delta_t > 16 |
}
println!("Goodbye, world!");
}
| {
//handle event queue
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} | Event::KeyDown {keycode: KeyCode::Escape, .. } => {
running = false
},
Event::KeyDown {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::MovingLeft
},
Event::KeyDown {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::MovingRight
},
Event::KeyUp {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::Decelerating
},
Event::KeyUp {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::Decelerating
},
_ => {}
}
}
//move player
player.update();
//draw
drawer.clear();
player.draw(&mut drawer, &None);
drawer.present();
//more timer stuff
prev_time = get_ticks();
} | conditional_block |
unsafe_lib.rs | use std::collections::HashMap;
use std::cell::{Cell, RefCell};
use std::hash::Hash;
use std::ops::{Index, IndexMut};
use std::fmt::Debug;
pub struct MutMap<K, V: Default> {
map: HashMap<K, RefCell<V>>,
}
impl<K: Eq + Hash, V: Default> MutMap<K, V> {
pub fn new() -> Self {
MutMap { map: HashMap::new() }
}
}
impl<K: Hash + Eq + Clone + Debug, V: Default> Index<K> for MutMap<K, V> {
type Output = V;
fn index(&self, idx: K) -> &Self::Output {
let map = &self.map;
if !map.contains_key(&idx) {
panic!("{:?} not found", idx)
}
let cntp = map[&idx].as_ptr();
unsafe { &*cntp }
}
}
impl<K: Hash + Eq + Clone + Debug, V: Default> IndexMut<K> for MutMap<K, V> {
fn index_mut(&mut self, idx: K) -> &mut Self::Output {
let map = &mut self.map;
if !map.contains_key(&idx) {
map.insert(idx.clone(), RefCell::new(V::default()));
}
let cntp = map[&idx].as_ptr();
unsafe { &mut *cntp }
}
}
// Pythonesque Counter implementation
// XXX Move to a separate module
static ZERO: usize = 0;
pub struct Counter<T: Hash + Eq + Clone> {
pub map: HashMap<T, Cell<usize>>,
}
impl<T: Hash + Eq + Clone> Counter<T> {
pub fn new() -> Self {
Counter { map: HashMap::new() }
}
pub fn len(&self) -> usize {
self.map.len()
}
pub fn remove(&mut self, idx: &T) {
self.map.remove(idx);
}
}
impl<T: Hash + Eq + Clone> Index<T> for Counter<T> {
type Output = usize;
fn index(&self, idx: T) -> &Self::Output |
}
impl<T: Hash + Eq + Clone> IndexMut<T> for Counter<T> {
fn index_mut(&mut self, idx: T) -> &mut Self::Output {
if self.map.contains_key(&idx) {
let cntp = self.map[&idx].as_ptr();
unsafe { &mut *cntp }
} else {
self.map.insert(idx.clone(), Cell::new(0));
let cntp = self.map[&idx].as_ptr();
unsafe { &mut *cntp }
}
}
}
| {
if self.map.contains_key(&idx) {
let cntp = self.map[&idx].as_ptr();
unsafe { &*cntp }
} else {
//map.insert(idx, Cell::new(0));
//let mut cntp = map[&idx].as_ptr();
//unsafe {& *cntp}
&ZERO
}
} | identifier_body |
unsafe_lib.rs | use std::collections::HashMap;
use std::cell::{Cell, RefCell};
use std::hash::Hash;
use std::ops::{Index, IndexMut};
use std::fmt::Debug;
pub struct MutMap<K, V: Default> {
map: HashMap<K, RefCell<V>>,
}
impl<K: Eq + Hash, V: Default> MutMap<K, V> {
pub fn new() -> Self {
MutMap { map: HashMap::new() }
}
}
impl<K: Hash + Eq + Clone + Debug, V: Default> Index<K> for MutMap<K, V> {
type Output = V;
fn index(&self, idx: K) -> &Self::Output {
let map = &self.map;
if !map.contains_key(&idx) {
panic!("{:?} not found", idx)
}
let cntp = map[&idx].as_ptr();
unsafe { &*cntp }
}
}
impl<K: Hash + Eq + Clone + Debug, V: Default> IndexMut<K> for MutMap<K, V> {
fn index_mut(&mut self, idx: K) -> &mut Self::Output {
let map = &mut self.map;
if !map.contains_key(&idx) {
map.insert(idx.clone(), RefCell::new(V::default()));
}
let cntp = map[&idx].as_ptr();
unsafe { &mut *cntp }
} | // Pythonesque Counter implementation
// XXX Move to a separate module
static ZERO: usize = 0;
pub struct Counter<T: Hash + Eq + Clone> {
pub map: HashMap<T, Cell<usize>>,
}
impl<T: Hash + Eq + Clone> Counter<T> {
pub fn new() -> Self {
Counter { map: HashMap::new() }
}
pub fn len(&self) -> usize {
self.map.len()
}
pub fn remove(&mut self, idx: &T) {
self.map.remove(idx);
}
}
impl<T: Hash + Eq + Clone> Index<T> for Counter<T> {
type Output = usize;
fn index(&self, idx: T) -> &Self::Output {
if self.map.contains_key(&idx) {
let cntp = self.map[&idx].as_ptr();
unsafe { &*cntp }
} else {
//map.insert(idx, Cell::new(0));
//let mut cntp = map[&idx].as_ptr();
//unsafe {& *cntp}
&ZERO
}
}
}
impl<T: Hash + Eq + Clone> IndexMut<T> for Counter<T> {
fn index_mut(&mut self, idx: T) -> &mut Self::Output {
if self.map.contains_key(&idx) {
let cntp = self.map[&idx].as_ptr();
unsafe { &mut *cntp }
} else {
self.map.insert(idx.clone(), Cell::new(0));
let cntp = self.map[&idx].as_ptr();
unsafe { &mut *cntp }
}
}
} | }
| random_line_split |
unsafe_lib.rs | use std::collections::HashMap;
use std::cell::{Cell, RefCell};
use std::hash::Hash;
use std::ops::{Index, IndexMut};
use std::fmt::Debug;
pub struct MutMap<K, V: Default> {
map: HashMap<K, RefCell<V>>,
}
impl<K: Eq + Hash, V: Default> MutMap<K, V> {
pub fn new() -> Self {
MutMap { map: HashMap::new() }
}
}
impl<K: Hash + Eq + Clone + Debug, V: Default> Index<K> for MutMap<K, V> {
type Output = V;
fn index(&self, idx: K) -> &Self::Output {
let map = &self.map;
if !map.contains_key(&idx) {
panic!("{:?} not found", idx)
}
let cntp = map[&idx].as_ptr();
unsafe { &*cntp }
}
}
impl<K: Hash + Eq + Clone + Debug, V: Default> IndexMut<K> for MutMap<K, V> {
fn index_mut(&mut self, idx: K) -> &mut Self::Output {
let map = &mut self.map;
if !map.contains_key(&idx) {
map.insert(idx.clone(), RefCell::new(V::default()));
}
let cntp = map[&idx].as_ptr();
unsafe { &mut *cntp }
}
}
// Pythonesque Counter implementation
// XXX Move to a separate module
static ZERO: usize = 0;
pub struct Counter<T: Hash + Eq + Clone> {
pub map: HashMap<T, Cell<usize>>,
}
impl<T: Hash + Eq + Clone> Counter<T> {
pub fn | () -> Self {
Counter { map: HashMap::new() }
}
pub fn len(&self) -> usize {
self.map.len()
}
pub fn remove(&mut self, idx: &T) {
self.map.remove(idx);
}
}
impl<T: Hash + Eq + Clone> Index<T> for Counter<T> {
type Output = usize;
fn index(&self, idx: T) -> &Self::Output {
if self.map.contains_key(&idx) {
let cntp = self.map[&idx].as_ptr();
unsafe { &*cntp }
} else {
//map.insert(idx, Cell::new(0));
//let mut cntp = map[&idx].as_ptr();
//unsafe {& *cntp}
&ZERO
}
}
}
impl<T: Hash + Eq + Clone> IndexMut<T> for Counter<T> {
fn index_mut(&mut self, idx: T) -> &mut Self::Output {
if self.map.contains_key(&idx) {
let cntp = self.map[&idx].as_ptr();
unsafe { &mut *cntp }
} else {
self.map.insert(idx.clone(), Cell::new(0));
let cntp = self.map[&idx].as_ptr();
unsafe { &mut *cntp }
}
}
}
| new | identifier_name |
unsafe_lib.rs | use std::collections::HashMap;
use std::cell::{Cell, RefCell};
use std::hash::Hash;
use std::ops::{Index, IndexMut};
use std::fmt::Debug;
pub struct MutMap<K, V: Default> {
map: HashMap<K, RefCell<V>>,
}
impl<K: Eq + Hash, V: Default> MutMap<K, V> {
pub fn new() -> Self {
MutMap { map: HashMap::new() }
}
}
impl<K: Hash + Eq + Clone + Debug, V: Default> Index<K> for MutMap<K, V> {
type Output = V;
fn index(&self, idx: K) -> &Self::Output {
let map = &self.map;
if !map.contains_key(&idx) {
panic!("{:?} not found", idx)
}
let cntp = map[&idx].as_ptr();
unsafe { &*cntp }
}
}
impl<K: Hash + Eq + Clone + Debug, V: Default> IndexMut<K> for MutMap<K, V> {
fn index_mut(&mut self, idx: K) -> &mut Self::Output {
let map = &mut self.map;
if !map.contains_key(&idx) {
map.insert(idx.clone(), RefCell::new(V::default()));
}
let cntp = map[&idx].as_ptr();
unsafe { &mut *cntp }
}
}
// Pythonesque Counter implementation
// XXX Move to a separate module
static ZERO: usize = 0;
pub struct Counter<T: Hash + Eq + Clone> {
pub map: HashMap<T, Cell<usize>>,
}
impl<T: Hash + Eq + Clone> Counter<T> {
pub fn new() -> Self {
Counter { map: HashMap::new() }
}
pub fn len(&self) -> usize {
self.map.len()
}
pub fn remove(&mut self, idx: &T) {
self.map.remove(idx);
}
}
impl<T: Hash + Eq + Clone> Index<T> for Counter<T> {
type Output = usize;
fn index(&self, idx: T) -> &Self::Output {
if self.map.contains_key(&idx) {
let cntp = self.map[&idx].as_ptr();
unsafe { &*cntp }
} else {
//map.insert(idx, Cell::new(0));
//let mut cntp = map[&idx].as_ptr();
//unsafe {& *cntp}
&ZERO
}
}
}
impl<T: Hash + Eq + Clone> IndexMut<T> for Counter<T> {
fn index_mut(&mut self, idx: T) -> &mut Self::Output {
if self.map.contains_key(&idx) | else {
self.map.insert(idx.clone(), Cell::new(0));
let cntp = self.map[&idx].as_ptr();
unsafe { &mut *cntp }
}
}
}
| {
let cntp = self.map[&idx].as_ptr();
unsafe { &mut *cntp }
} | conditional_block |
index.tsx | import { QueryFilterActions, logQueryFilterReducer } from './logQueryFilterReducer';
import { LogQueryActions, logQueryReducer } from './logQueryReducer';
type State = {
queryFilters: LogQueryFilter;
savedQueries: LogQuery[];
}
const initialState: State = {
queryFilters: { queryText: undefined, dateFilter: undefined, dateRangeFilter: undefined, levelFilter: undefined, exceptionsOnly: false },
savedQueries: [],
};
const AppContext = createContext<{ state: State, dispatch: React.Dispatch<QueryFilterActions | LogQueryActions> }>({
state: initialState,
dispatch: () => null
});
const mainReducer = ({ queryFilters, savedQueries }: State, action: QueryFilterActions | LogQueryActions) => ({
queryFilters: logQueryFilterReducer(queryFilters, action as QueryFilterActions),
savedQueries: logQueryReducer(savedQueries, action as LogQueryActions)
});
const AppProvider: React.FC = ({ children }) => {
const [state, dispatch] = useReducer(mainReducer, initialState);
return (
<AppContext.Provider value={{ state, dispatch }}>
{children}
</AppContext.Provider>
)
};
export { AppProvider, AppContext }; | import React, { createContext, useReducer } from 'react';
import { LogQueryFilter, LogQuery } from '../Models'; | random_line_split |
|
cli.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for Zenodo fixtures."""
from __future__ import absolute_import, print_function
import json
from os.path import dirname, join
import click
from flask.cli import with_appcontext
from invenio_communities.utils import initialize_communities_bucket
from sqlalchemy.orm.exc import NoResultFound
from .communities import loadcommunities
from .files import loaddemofiles, loadlocation
from .grants import loadfp6funders, loadfp6grants
from .licenses import loadlicenses, matchlicenses
from .oai import loadoaisets
from .pages import loadpages
from .records import loaddemorecords
@click.group()
def fixtures():
"""Command for loading fixture data."""
@fixtures.command()
@with_appcontext
def init():
"""Load basic data."""
loadpages()
loadlocation()
loadoaisets()
initialize_communities_bucket()
| @click.option('--force', '-f', is_flag=True, default=False)
@with_appcontext
def loadpages_cli(force):
"""Load pages."""
loadpages(force=force)
click.secho('Created pages', fg='green')
@fixtures.command('loadlocation')
@with_appcontext
def loadlocation_cli():
"""Load data store location."""
loc = loadlocation()
click.secho('Created location {0}'.format(loc.uri), fg='green')
@fixtures.command('loadoaisets')
@with_appcontext
def loadoaisets_cli():
"""Load OAI-PMH sets."""
sets_count = loadoaisets()
click.secho('Created {0} OAI-PMH sets'.format(len(sets_count)), fg='green')
@fixtures.command('loadfp6grants')
@with_appcontext
def loadfp6grants_cli():
"""Load one-off grants."""
loadfp6grants()
@fixtures.command('loadfp6funders')
@with_appcontext
def loadfp6funders_cli():
"""Load one-off funders."""
loadfp6funders()
@fixtures.command('loaddemorecords')
@with_appcontext
def loaddemorecords_cli():
"""Load demo records."""
click.echo('Loading demo data...')
with open(join(dirname(__file__), 'data/records.json'), 'r') as fp:
data = json.load(fp)
click.echo('Sending tasks to queue...')
with click.progressbar(data) as records:
loaddemorecords(records)
click.echo("1. Start Celery:")
click.echo(" celery worker -A zenodo.celery -l INFO")
click.echo("2. After tasks have been processed start reindexing:")
click.echo(" zenodo migration recordsrun")
click.echo(" zenodo migration reindex recid")
click.echo(" zenodo index run -d -c 4")
@fixtures.command('loaddemofiles')
@click.argument('source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@with_appcontext
def loaddemofiles_cli(source):
"""Load demo files."""
loaddemofiles(source)
@fixtures.command('loadlicenses')
@with_appcontext
def loadlicenses_cli():
"""Load Zenodo licenses."""
loadlicenses()
@fixtures.command('matchlicenses')
@click.argument('legacy_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('od_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('destination', type=click.Path(exists=False, dir_okay=False))
def matchlicenses_cli(legacy_source, od_source, destination):
"""Match legacy Zenodo licenses with OpenDefinition.org licenses."""
matchlicenses(legacy_source, od_source, destination)
@fixtures.command('loadcommunities')
@click.argument('owner_email')
@with_appcontext
def loadcommunities_cli(owner_email):
"""Load Zenodo communities."""
try:
loadcommunities(owner_email)
except NoResultFound:
click.echo("Error: Provided owner email does not exist.") | @fixtures.command('loadpages') | random_line_split |
cli.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for Zenodo fixtures."""
from __future__ import absolute_import, print_function
import json
from os.path import dirname, join
import click
from flask.cli import with_appcontext
from invenio_communities.utils import initialize_communities_bucket
from sqlalchemy.orm.exc import NoResultFound
from .communities import loadcommunities
from .files import loaddemofiles, loadlocation
from .grants import loadfp6funders, loadfp6grants
from .licenses import loadlicenses, matchlicenses
from .oai import loadoaisets
from .pages import loadpages
from .records import loaddemorecords
@click.group()
def fixtures():
"""Command for loading fixture data."""
@fixtures.command()
@with_appcontext
def init():
"""Load basic data."""
loadpages()
loadlocation()
loadoaisets()
initialize_communities_bucket()
@fixtures.command('loadpages')
@click.option('--force', '-f', is_flag=True, default=False)
@with_appcontext
def | (force):
"""Load pages."""
loadpages(force=force)
click.secho('Created pages', fg='green')
@fixtures.command('loadlocation')
@with_appcontext
def loadlocation_cli():
"""Load data store location."""
loc = loadlocation()
click.secho('Created location {0}'.format(loc.uri), fg='green')
@fixtures.command('loadoaisets')
@with_appcontext
def loadoaisets_cli():
"""Load OAI-PMH sets."""
sets_count = loadoaisets()
click.secho('Created {0} OAI-PMH sets'.format(len(sets_count)), fg='green')
@fixtures.command('loadfp6grants')
@with_appcontext
def loadfp6grants_cli():
"""Load one-off grants."""
loadfp6grants()
@fixtures.command('loadfp6funders')
@with_appcontext
def loadfp6funders_cli():
"""Load one-off funders."""
loadfp6funders()
@fixtures.command('loaddemorecords')
@with_appcontext
def loaddemorecords_cli():
"""Load demo records."""
click.echo('Loading demo data...')
with open(join(dirname(__file__), 'data/records.json'), 'r') as fp:
data = json.load(fp)
click.echo('Sending tasks to queue...')
with click.progressbar(data) as records:
loaddemorecords(records)
click.echo("1. Start Celery:")
click.echo(" celery worker -A zenodo.celery -l INFO")
click.echo("2. After tasks have been processed start reindexing:")
click.echo(" zenodo migration recordsrun")
click.echo(" zenodo migration reindex recid")
click.echo(" zenodo index run -d -c 4")
@fixtures.command('loaddemofiles')
@click.argument('source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@with_appcontext
def loaddemofiles_cli(source):
"""Load demo files."""
loaddemofiles(source)
@fixtures.command('loadlicenses')
@with_appcontext
def loadlicenses_cli():
"""Load Zenodo licenses."""
loadlicenses()
@fixtures.command('matchlicenses')
@click.argument('legacy_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('od_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('destination', type=click.Path(exists=False, dir_okay=False))
def matchlicenses_cli(legacy_source, od_source, destination):
"""Match legacy Zenodo licenses with OpenDefinition.org licenses."""
matchlicenses(legacy_source, od_source, destination)
@fixtures.command('loadcommunities')
@click.argument('owner_email')
@with_appcontext
def loadcommunities_cli(owner_email):
"""Load Zenodo communities."""
try:
loadcommunities(owner_email)
except NoResultFound:
click.echo("Error: Provided owner email does not exist.")
| loadpages_cli | identifier_name |
cli.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for Zenodo fixtures."""
from __future__ import absolute_import, print_function
import json
from os.path import dirname, join
import click
from flask.cli import with_appcontext
from invenio_communities.utils import initialize_communities_bucket
from sqlalchemy.orm.exc import NoResultFound
from .communities import loadcommunities
from .files import loaddemofiles, loadlocation
from .grants import loadfp6funders, loadfp6grants
from .licenses import loadlicenses, matchlicenses
from .oai import loadoaisets
from .pages import loadpages
from .records import loaddemorecords
@click.group()
def fixtures():
"""Command for loading fixture data."""
@fixtures.command()
@with_appcontext
def init():
"""Load basic data."""
loadpages()
loadlocation()
loadoaisets()
initialize_communities_bucket()
@fixtures.command('loadpages')
@click.option('--force', '-f', is_flag=True, default=False)
@with_appcontext
def loadpages_cli(force):
"""Load pages."""
loadpages(force=force)
click.secho('Created pages', fg='green')
@fixtures.command('loadlocation')
@with_appcontext
def loadlocation_cli():
"""Load data store location."""
loc = loadlocation()
click.secho('Created location {0}'.format(loc.uri), fg='green')
@fixtures.command('loadoaisets')
@with_appcontext
def loadoaisets_cli():
"""Load OAI-PMH sets."""
sets_count = loadoaisets()
click.secho('Created {0} OAI-PMH sets'.format(len(sets_count)), fg='green')
@fixtures.command('loadfp6grants')
@with_appcontext
def loadfp6grants_cli():
"""Load one-off grants."""
loadfp6grants()
@fixtures.command('loadfp6funders')
@with_appcontext
def loadfp6funders_cli():
"""Load one-off funders."""
loadfp6funders()
@fixtures.command('loaddemorecords')
@with_appcontext
def loaddemorecords_cli():
|
@fixtures.command('loaddemofiles')
@click.argument('source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@with_appcontext
def loaddemofiles_cli(source):
"""Load demo files."""
loaddemofiles(source)
@fixtures.command('loadlicenses')
@with_appcontext
def loadlicenses_cli():
"""Load Zenodo licenses."""
loadlicenses()
@fixtures.command('matchlicenses')
@click.argument('legacy_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('od_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('destination', type=click.Path(exists=False, dir_okay=False))
def matchlicenses_cli(legacy_source, od_source, destination):
"""Match legacy Zenodo licenses with OpenDefinition.org licenses."""
matchlicenses(legacy_source, od_source, destination)
@fixtures.command('loadcommunities')
@click.argument('owner_email')
@with_appcontext
def loadcommunities_cli(owner_email):
"""Load Zenodo communities."""
try:
loadcommunities(owner_email)
except NoResultFound:
click.echo("Error: Provided owner email does not exist.")
| """Load demo records."""
click.echo('Loading demo data...')
with open(join(dirname(__file__), 'data/records.json'), 'r') as fp:
data = json.load(fp)
click.echo('Sending tasks to queue...')
with click.progressbar(data) as records:
loaddemorecords(records)
click.echo("1. Start Celery:")
click.echo(" celery worker -A zenodo.celery -l INFO")
click.echo("2. After tasks have been processed start reindexing:")
click.echo(" zenodo migration recordsrun")
click.echo(" zenodo migration reindex recid")
click.echo(" zenodo index run -d -c 4") | identifier_body |
c_win32.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! C definitions used by libnative that don't belong in liblibc
#![allow(type_overflow)]
use libc;
pub static WSADESCRIPTION_LEN: uint = 256;
pub static WSASYS_STATUS_LEN: uint = 128;
pub static FIONBIO: libc::c_long = 0x8004667e;
static FD_SETSIZE: uint = 64;
pub static MSG_DONTWAIT: libc::c_int = 0;
#[repr(C)]
pub struct WSADATA {
pub wVersion: libc::WORD,
pub wHighVersion: libc::WORD,
pub szDescription: [u8, ..WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8, ..WSASYS_STATUS_LEN + 1],
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
}
pub type LPWSADATA = *mut WSADATA;
#[repr(C)]
pub struct fd_set {
fd_count: libc::c_uint,
fd_array: [libc::SOCKET, ..FD_SETSIZE],
}
pub fn fd_set(set: &mut fd_set, s: libc::SOCKET) {
set.fd_array[set.fd_count as uint] = s;
set.fd_count += 1;
}
#[link(name = "ws2_32")]
extern "system" {
pub fn WSAStartup(wVersionRequested: libc::WORD,
lpWSAData: LPWSADATA) -> libc::c_int;
pub fn WSAGetLastError() -> libc::c_int;
pub fn ioctlsocket(s: libc::SOCKET, cmd: libc::c_long,
argp: *mut libc::c_ulong) -> libc::c_int;
pub fn select(nfds: libc::c_int,
readfds: *mut fd_set,
writefds: *mut fd_set,
exceptfds: *mut fd_set,
timeout: *mut libc::timeval) -> libc::c_int;
pub fn getsockopt(sockfd: libc::SOCKET,
level: libc::c_int,
optname: libc::c_int,
optval: *mut libc::c_char,
optlen: *mut libc::c_int) -> libc::c_int;
pub fn CancelIo(hFile: libc::HANDLE) -> libc::BOOL;
pub fn CancelIoEx(hFile: libc::HANDLE,
lpOverlapped: libc::LPOVERLAPPED) -> libc::BOOL;
}
pub mod compat {
use std::intrinsics::{atomic_store_relaxed, transmute};
use std::iter::Iterator;
use libc::types::os::arch::extra::{LPCWSTR, HMODULE, LPCSTR, LPVOID};
extern "system" {
fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID;
}
// store_func() is idempotent, so using relaxed ordering for the atomics
// should be enough. This way, calling a function in this compatibility
// layer (after it's loaded) shouldn't be any slower than a regular DLL
// call.
unsafe fn store_func(ptr: *mut uint, module: &str, symbol: &str, fallback: uint) {
let module: Vec<u16> = module.utf16_units().collect();
let module = module.append_one(0);
symbol.with_c_str(|symbol| {
let handle = GetModuleHandleW(module.as_ptr());
let func: uint = transmute(GetProcAddress(handle, symbol));
atomic_store_relaxed(ptr, if func == 0 | else {
func
})
})
}
/// Macro for creating a compatibility fallback for a Windows function
///
/// # Example
/// ```
/// compat_fn!(adll32::SomeFunctionW(_arg: LPCWSTR) {
/// // Fallback implementation
/// })
/// ```
///
/// Note that arguments unused by the fallback implementation should not be called `_` as
/// they are used to be passed to the real function if available.
macro_rules! compat_fn(
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*)
-> $rettype:ty $fallback:block) => (
#[inline(always)]
pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
static mut ptr: extern "system" fn($($argname: $argtype),*) -> $rettype = thunk;
extern "system" fn thunk($($argname: $argtype),*) -> $rettype {
unsafe {
::io::c::compat::store_func(&mut ptr as *mut _ as *mut uint,
stringify!($module),
stringify!($symbol),
fallback as uint);
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
}
extern "system" fn fallback($($argname: $argtype),*) -> $rettype $fallback
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
);
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*) $fallback:block) => (
compat_fn!($module::$symbol($($argname: $argtype),*) -> () $fallback)
)
)
/// Compatibility layer for functions in `kernel32.dll`
///
/// Latest versions of Windows this is needed for:
///
/// * `CreateSymbolicLinkW`: Windows XP, Windows Server 2003
/// * `GetFinalPathNameByHandleW`: Windows XP, Windows Server 2003
pub mod kernel32 {
use libc::types::os::arch::extra::{DWORD, LPCWSTR, BOOLEAN, HANDLE};
use libc::consts::os::extra::ERROR_CALL_NOT_IMPLEMENTED;
extern "system" {
fn SetLastError(dwErrCode: DWORD);
}
compat_fn!(kernel32::CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR,
_lpTargetFileName: LPCWSTR,
_dwFlags: DWORD) -> BOOLEAN {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
compat_fn!(kernel32::GetFinalPathNameByHandleW(_hFile: HANDLE,
_lpszFilePath: LPCWSTR,
_cchFilePath: DWORD,
_dwFlags: DWORD) -> DWORD {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
}
}
| {
fallback
} | conditional_block |
c_win32.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! C definitions used by libnative that don't belong in liblibc
#![allow(type_overflow)]
use libc;
pub static WSADESCRIPTION_LEN: uint = 256;
pub static WSASYS_STATUS_LEN: uint = 128;
pub static FIONBIO: libc::c_long = 0x8004667e;
static FD_SETSIZE: uint = 64;
pub static MSG_DONTWAIT: libc::c_int = 0;
#[repr(C)]
pub struct WSADATA {
pub wVersion: libc::WORD,
pub wHighVersion: libc::WORD,
pub szDescription: [u8, ..WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8, ..WSASYS_STATUS_LEN + 1],
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
}
pub type LPWSADATA = *mut WSADATA;
#[repr(C)]
pub struct fd_set {
fd_count: libc::c_uint,
fd_array: [libc::SOCKET, ..FD_SETSIZE],
}
pub fn fd_set(set: &mut fd_set, s: libc::SOCKET) {
set.fd_array[set.fd_count as uint] = s;
set.fd_count += 1;
}
#[link(name = "ws2_32")]
extern "system" {
pub fn WSAStartup(wVersionRequested: libc::WORD,
lpWSAData: LPWSADATA) -> libc::c_int;
pub fn WSAGetLastError() -> libc::c_int;
pub fn ioctlsocket(s: libc::SOCKET, cmd: libc::c_long,
argp: *mut libc::c_ulong) -> libc::c_int;
pub fn select(nfds: libc::c_int,
readfds: *mut fd_set,
writefds: *mut fd_set,
exceptfds: *mut fd_set,
timeout: *mut libc::timeval) -> libc::c_int;
pub fn getsockopt(sockfd: libc::SOCKET,
level: libc::c_int,
optname: libc::c_int,
optval: *mut libc::c_char,
optlen: *mut libc::c_int) -> libc::c_int;
pub fn CancelIo(hFile: libc::HANDLE) -> libc::BOOL;
pub fn CancelIoEx(hFile: libc::HANDLE,
lpOverlapped: libc::LPOVERLAPPED) -> libc::BOOL;
}
pub mod compat {
use std::intrinsics::{atomic_store_relaxed, transmute};
use std::iter::Iterator;
use libc::types::os::arch::extra::{LPCWSTR, HMODULE, LPCSTR, LPVOID};
extern "system" {
fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID;
}
// store_func() is idempotent, so using relaxed ordering for the atomics
// should be enough. This way, calling a function in this compatibility
// layer (after it's loaded) shouldn't be any slower than a regular DLL
// call.
unsafe fn store_func(ptr: *mut uint, module: &str, symbol: &str, fallback: uint) {
let module: Vec<u16> = module.utf16_units().collect();
let module = module.append_one(0);
symbol.with_c_str(|symbol| {
let handle = GetModuleHandleW(module.as_ptr());
let func: uint = transmute(GetProcAddress(handle, symbol));
atomic_store_relaxed(ptr, if func == 0 {
fallback
} else {
func
})
})
}
/// Macro for creating a compatibility fallback for a Windows function
///
/// # Example
/// ```
/// compat_fn!(adll32::SomeFunctionW(_arg: LPCWSTR) {
/// // Fallback implementation
/// })
/// ```
///
/// Note that arguments unused by the fallback implementation should not be called `_` as
/// they are used to be passed to the real function if available.
macro_rules! compat_fn(
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*)
-> $rettype:ty $fallback:block) => (
#[inline(always)]
pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
static mut ptr: extern "system" fn($($argname: $argtype),*) -> $rettype = thunk;
extern "system" fn thunk($($argname: $argtype),*) -> $rettype {
unsafe {
::io::c::compat::store_func(&mut ptr as *mut _ as *mut uint,
stringify!($module),
stringify!($symbol),
fallback as uint);
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
}
extern "system" fn fallback($($argname: $argtype),*) -> $rettype $fallback
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
);
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*) $fallback:block) => (
compat_fn!($module::$symbol($($argname: $argtype),*) -> () $fallback)
)
)
/// Compatibility layer for functions in `kernel32.dll`
///
/// Latest versions of Windows this is needed for:
///
/// * `CreateSymbolicLinkW`: Windows XP, Windows Server 2003
/// * `GetFinalPathNameByHandleW`: Windows XP, Windows Server 2003
pub mod kernel32 {
use libc::types::os::arch::extra::{DWORD, LPCWSTR, BOOLEAN, HANDLE};
use libc::consts::os::extra::ERROR_CALL_NOT_IMPLEMENTED;
extern "system" {
fn SetLastError(dwErrCode: DWORD);
}
| unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
compat_fn!(kernel32::GetFinalPathNameByHandleW(_hFile: HANDLE,
_lpszFilePath: LPCWSTR,
_cchFilePath: DWORD,
_dwFlags: DWORD) -> DWORD {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
}
} | compat_fn!(kernel32::CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR,
_lpTargetFileName: LPCWSTR,
_dwFlags: DWORD) -> BOOLEAN { | random_line_split |
c_win32.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! C definitions used by libnative that don't belong in liblibc
#![allow(type_overflow)]
use libc;
pub static WSADESCRIPTION_LEN: uint = 256;
pub static WSASYS_STATUS_LEN: uint = 128;
pub static FIONBIO: libc::c_long = 0x8004667e;
static FD_SETSIZE: uint = 64;
pub static MSG_DONTWAIT: libc::c_int = 0;
#[repr(C)]
pub struct WSADATA {
pub wVersion: libc::WORD,
pub wHighVersion: libc::WORD,
pub szDescription: [u8, ..WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8, ..WSASYS_STATUS_LEN + 1],
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
}
pub type LPWSADATA = *mut WSADATA;
#[repr(C)]
pub struct fd_set {
fd_count: libc::c_uint,
fd_array: [libc::SOCKET, ..FD_SETSIZE],
}
pub fn fd_set(set: &mut fd_set, s: libc::SOCKET) {
set.fd_array[set.fd_count as uint] = s;
set.fd_count += 1;
}
#[link(name = "ws2_32")]
extern "system" {
pub fn WSAStartup(wVersionRequested: libc::WORD,
lpWSAData: LPWSADATA) -> libc::c_int;
pub fn WSAGetLastError() -> libc::c_int;
pub fn ioctlsocket(s: libc::SOCKET, cmd: libc::c_long,
argp: *mut libc::c_ulong) -> libc::c_int;
pub fn select(nfds: libc::c_int,
readfds: *mut fd_set,
writefds: *mut fd_set,
exceptfds: *mut fd_set,
timeout: *mut libc::timeval) -> libc::c_int;
pub fn getsockopt(sockfd: libc::SOCKET,
level: libc::c_int,
optname: libc::c_int,
optval: *mut libc::c_char,
optlen: *mut libc::c_int) -> libc::c_int;
pub fn CancelIo(hFile: libc::HANDLE) -> libc::BOOL;
pub fn CancelIoEx(hFile: libc::HANDLE,
lpOverlapped: libc::LPOVERLAPPED) -> libc::BOOL;
}
pub mod compat {
use std::intrinsics::{atomic_store_relaxed, transmute};
use std::iter::Iterator;
use libc::types::os::arch::extra::{LPCWSTR, HMODULE, LPCSTR, LPVOID};
extern "system" {
fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID;
}
// store_func() is idempotent, so using relaxed ordering for the atomics
// should be enough. This way, calling a function in this compatibility
// layer (after it's loaded) shouldn't be any slower than a regular DLL
// call.
unsafe fn | (ptr: *mut uint, module: &str, symbol: &str, fallback: uint) {
let module: Vec<u16> = module.utf16_units().collect();
let module = module.append_one(0);
symbol.with_c_str(|symbol| {
let handle = GetModuleHandleW(module.as_ptr());
let func: uint = transmute(GetProcAddress(handle, symbol));
atomic_store_relaxed(ptr, if func == 0 {
fallback
} else {
func
})
})
}
/// Macro for creating a compatibility fallback for a Windows function
///
/// # Example
/// ```
/// compat_fn!(adll32::SomeFunctionW(_arg: LPCWSTR) {
/// // Fallback implementation
/// })
/// ```
///
/// Note that arguments unused by the fallback implementation should not be called `_` as
/// they are used to be passed to the real function if available.
macro_rules! compat_fn(
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*)
-> $rettype:ty $fallback:block) => (
#[inline(always)]
pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
static mut ptr: extern "system" fn($($argname: $argtype),*) -> $rettype = thunk;
extern "system" fn thunk($($argname: $argtype),*) -> $rettype {
unsafe {
::io::c::compat::store_func(&mut ptr as *mut _ as *mut uint,
stringify!($module),
stringify!($symbol),
fallback as uint);
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
}
extern "system" fn fallback($($argname: $argtype),*) -> $rettype $fallback
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
);
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*) $fallback:block) => (
compat_fn!($module::$symbol($($argname: $argtype),*) -> () $fallback)
)
)
/// Compatibility layer for functions in `kernel32.dll`
///
/// Latest versions of Windows this is needed for:
///
/// * `CreateSymbolicLinkW`: Windows XP, Windows Server 2003
/// * `GetFinalPathNameByHandleW`: Windows XP, Windows Server 2003
pub mod kernel32 {
use libc::types::os::arch::extra::{DWORD, LPCWSTR, BOOLEAN, HANDLE};
use libc::consts::os::extra::ERROR_CALL_NOT_IMPLEMENTED;
extern "system" {
fn SetLastError(dwErrCode: DWORD);
}
compat_fn!(kernel32::CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR,
_lpTargetFileName: LPCWSTR,
_dwFlags: DWORD) -> BOOLEAN {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
compat_fn!(kernel32::GetFinalPathNameByHandleW(_hFile: HANDLE,
_lpszFilePath: LPCWSTR,
_cchFilePath: DWORD,
_dwFlags: DWORD) -> DWORD {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
}
}
| store_func | identifier_name |
viewporter-tests.ts | /// <reference types="jquery"/>
/// <reference types="google-maps"/>
function test_map() {
viewporter.preventPageScroll = true;
var eventName = viewporter.ACTIVE ? 'viewportready' : "load";
google.maps.event.addDomListener(window, eventName, function () {
var map = new google.maps.Map(document.getElementById("map"), {
zoom: 2,
center: new google.maps.LatLng(10, 0),
mapTypeId: google.maps.MapTypeId.ROADMAP
});
window.addEventListener("resize", viewporter.refresh);
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function (position) {
map.setCenter(new google.maps.LatLng(
position.coords.latitude,
position.coords.longitude
));
map.setZoom(14);
});
}
});
}
function test_resize() {
viewporter.preventPageScroll = true;
document.addEventListener('DOMContentLoaded', function () {
// listen for "resize" events and trigger "refresh" method.
window.addEventListener("resize", function () {
viewporter.refresh();
document.getElementById("events").innerHTML += "resize<br>";
});
if (navigator.geolocation) {
function success(position) |
navigator.geolocation.getCurrentPosition(success);
}
});
}
function test_swipey() {
function rainbow(numOfSteps, step) {
var r, g, b, h = step / numOfSteps, i = ~~(h * 6), f = h * 6 - i, q = 1 - f;
switch (i % 6) {
case 0: r = 1, g = f, b = 0; break;
case 1: r = q, g = 1, b = 0; break;
case 2: r = 0, g = 1, b = f; break;
case 3: r = 0, g = q, b = 1; break;
case 4: r = f, g = 0, b = 1; break;
case 5: r = 1, g = 0, b = q; break;
}
return [((~ ~(r * 255))), (~ ~(g * 255)), (~ ~(b * 255))];
}
function drawingPointer(context, color) {
var clickX = [];
var clickY = [];
var clickDrag = [];
this.painting = false;
var timestamp = null;
this.addPoint = function (x, y, dragging) {
clickX.push(x);
clickY.push(y);
clickDrag.push(dragging);
};
this.start = function () {
this.clear();
this.painting = true;
};
this.clear = function () {
clickX = [];
clickY = [];
clickDrag = [];
timestamp = null;
};
this.stop = function () {
this.painting = false;
timestamp = Date.now();
};
this.redraw = function () {
var opacity = timestamp ? (300 - (Date.now() - timestamp)) / 300 : 1;
if (opacity <= 0) {
this.clear();
return;
}
context.strokeStyle = "rgba(" + color[0] + "," + color[1] + "," + color[2] + "," + opacity + ")";
context.lineJoin = "round";
context.lineWidth = ((<any>window).devicePixelRatio || 1) * 5;
for (var i = 0; i < clickX.length; i++) {
context.beginPath();
if (clickDrag[i] && i) {
context.moveTo(clickX[i - 1], clickY[i - 1]);
} else {
context.moveTo(clickX[i] - 1, clickY[i]);
}
context.lineTo(clickX[i], clickY[i]);
context.closePath();
context.stroke();
}
};
};
$(window).bind(viewporter.ACTIVE ? 'viewportready' : 'load', function () {
var canvas = <HTMLCanvasElement>$('canvas')[0];
var context = canvas.getContext('2d');
var iOS = (/iphone|ipad/i).test(navigator.userAgent);
var pointers = {};
// handle resizing / rotating of the viewport
var width, height;
$(window).bind(viewporter.ACTIVE ? 'viewportchange' : 'resize', function () {
width = canvas.width = window.innerWidth;
height = canvas.height = window.innerHeight
}).trigger(viewporter.ACTIVE ? 'viewportchange' : 'resize');
$('canvas').bind(iOS ? 'touchstart' : 'mousedown', function (e) {
e.preventDefault();
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
// if no pointer has been created for this finger yet, do it
if (!pointers[identifier]) {
pointers[identifier] = new drawingPointer(context, rainbow(8, Object.keys(pointers).length));
}
pointers[identifier].start();
pointers[identifier].addPoint(touches[i].pageX, touches[i].pageY);
}
});
$('canvas').bind(iOS ? 'touchmove' : 'mousemove', function (e) {
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
if (pointers[identifier] && pointers[identifier].painting) {
pointers[identifier].addPoint(touches[i].pageX, touches[i].pageY, true);
}
}
});
$('canvas').bind(iOS ? 'touchend' : 'mouseup', function (e) {
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
if (pointers[identifier]) {
pointers[identifier].stop();
(function (identifier) {
setTimeout(function () {
delete pointers[identifier];
}, 300);
})(identifier);
}
}
});
window.setInterval(function () {
context.clearRect(0, 0, width, height);
var counter = 0, ratio = (<any>window).devicePixelRatio || 1;
for (var identifier in pointers) {
pointers[identifier].redraw();
counter++;
}
context.font = (10 * ratio) + 'pt Arial';
context.fillText(counter + ' active pointers', 15 * ratio, 25 * ratio);
}, 16);
});
} | {
var coords = [position.coords.latitude, position.coords.longitude]
document.getElementById("coords").innerHTML = coords.join(", ");
} | identifier_body |
viewporter-tests.ts | /// <reference types="jquery"/>
/// <reference types="google-maps"/>
function test_map() {
viewporter.preventPageScroll = true;
var eventName = viewporter.ACTIVE ? 'viewportready' : "load";
google.maps.event.addDomListener(window, eventName, function () {
var map = new google.maps.Map(document.getElementById("map"), {
zoom: 2,
center: new google.maps.LatLng(10, 0),
mapTypeId: google.maps.MapTypeId.ROADMAP
});
window.addEventListener("resize", viewporter.refresh);
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function (position) {
map.setCenter(new google.maps.LatLng(
position.coords.latitude,
position.coords.longitude
));
map.setZoom(14);
});
}
});
}
function test_resize() {
viewporter.preventPageScroll = true;
document.addEventListener('DOMContentLoaded', function () {
// listen for "resize" events and trigger "refresh" method.
window.addEventListener("resize", function () {
viewporter.refresh();
document.getElementById("events").innerHTML += "resize<br>";
});
if (navigator.geolocation) {
function success(position) {
var coords = [position.coords.latitude, position.coords.longitude]
document.getElementById("coords").innerHTML = coords.join(", ");
}
navigator.geolocation.getCurrentPosition(success);
}
});
}
function test_swipey() {
function rainbow(numOfSteps, step) {
var r, g, b, h = step / numOfSteps, i = ~~(h * 6), f = h * 6 - i, q = 1 - f;
switch (i % 6) {
case 0: r = 1, g = f, b = 0; break;
case 1: r = q, g = 1, b = 0; break;
case 2: r = 0, g = 1, b = f; break;
case 3: r = 0, g = q, b = 1; break;
case 4: r = f, g = 0, b = 1; break;
case 5: r = 1, g = 0, b = q; break;
}
return [((~ ~(r * 255))), (~ ~(g * 255)), (~ ~(b * 255))];
}
function drawingPointer(context, color) {
var clickX = [];
var clickY = [];
var clickDrag = [];
this.painting = false;
var timestamp = null;
this.addPoint = function (x, y, dragging) {
clickX.push(x);
clickY.push(y);
clickDrag.push(dragging);
};
this.start = function () {
this.clear();
this.painting = true;
};
this.clear = function () {
clickX = [];
clickY = [];
clickDrag = [];
timestamp = null;
};
this.stop = function () {
this.painting = false;
timestamp = Date.now();
};
this.redraw = function () {
var opacity = timestamp ? (300 - (Date.now() - timestamp)) / 300 : 1;
if (opacity <= 0) {
this.clear();
return;
}
context.strokeStyle = "rgba(" + color[0] + "," + color[1] + "," + color[2] + "," + opacity + ")";
context.lineJoin = "round";
context.lineWidth = ((<any>window).devicePixelRatio || 1) * 5;
for (var i = 0; i < clickX.length; i++) {
context.beginPath();
if (clickDrag[i] && i) {
context.moveTo(clickX[i - 1], clickY[i - 1]);
} else |
context.lineTo(clickX[i], clickY[i]);
context.closePath();
context.stroke();
}
};
};
$(window).bind(viewporter.ACTIVE ? 'viewportready' : 'load', function () {
var canvas = <HTMLCanvasElement>$('canvas')[0];
var context = canvas.getContext('2d');
var iOS = (/iphone|ipad/i).test(navigator.userAgent);
var pointers = {};
// handle resizing / rotating of the viewport
var width, height;
$(window).bind(viewporter.ACTIVE ? 'viewportchange' : 'resize', function () {
width = canvas.width = window.innerWidth;
height = canvas.height = window.innerHeight
}).trigger(viewporter.ACTIVE ? 'viewportchange' : 'resize');
$('canvas').bind(iOS ? 'touchstart' : 'mousedown', function (e) {
e.preventDefault();
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
// if no pointer has been created for this finger yet, do it
if (!pointers[identifier]) {
pointers[identifier] = new drawingPointer(context, rainbow(8, Object.keys(pointers).length));
}
pointers[identifier].start();
pointers[identifier].addPoint(touches[i].pageX, touches[i].pageY);
}
});
$('canvas').bind(iOS ? 'touchmove' : 'mousemove', function (e) {
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
if (pointers[identifier] && pointers[identifier].painting) {
pointers[identifier].addPoint(touches[i].pageX, touches[i].pageY, true);
}
}
});
$('canvas').bind(iOS ? 'touchend' : 'mouseup', function (e) {
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
if (pointers[identifier]) {
pointers[identifier].stop();
(function (identifier) {
setTimeout(function () {
delete pointers[identifier];
}, 300);
})(identifier);
}
}
});
window.setInterval(function () {
context.clearRect(0, 0, width, height);
var counter = 0, ratio = (<any>window).devicePixelRatio || 1;
for (var identifier in pointers) {
pointers[identifier].redraw();
counter++;
}
context.font = (10 * ratio) + 'pt Arial';
context.fillText(counter + ' active pointers', 15 * ratio, 25 * ratio);
}, 16);
});
} | {
context.moveTo(clickX[i] - 1, clickY[i]);
} | conditional_block |
viewporter-tests.ts | /// <reference types="jquery"/>
/// <reference types="google-maps"/>
function test_map() {
viewporter.preventPageScroll = true;
var eventName = viewporter.ACTIVE ? 'viewportready' : "load";
google.maps.event.addDomListener(window, eventName, function () {
var map = new google.maps.Map(document.getElementById("map"), {
zoom: 2,
center: new google.maps.LatLng(10, 0),
mapTypeId: google.maps.MapTypeId.ROADMAP
});
window.addEventListener("resize", viewporter.refresh);
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function (position) {
map.setCenter(new google.maps.LatLng(
position.coords.latitude,
position.coords.longitude
));
map.setZoom(14);
});
}
});
}
function test_resize() {
viewporter.preventPageScroll = true;
document.addEventListener('DOMContentLoaded', function () {
// listen for "resize" events and trigger "refresh" method.
window.addEventListener("resize", function () {
viewporter.refresh();
document.getElementById("events").innerHTML += "resize<br>";
});
if (navigator.geolocation) {
function success(position) {
var coords = [position.coords.latitude, position.coords.longitude]
document.getElementById("coords").innerHTML = coords.join(", ");
}
navigator.geolocation.getCurrentPosition(success);
}
});
}
function test_swipey() {
function rainbow(numOfSteps, step) {
var r, g, b, h = step / numOfSteps, i = ~~(h * 6), f = h * 6 - i, q = 1 - f;
switch (i % 6) {
case 0: r = 1, g = f, b = 0; break;
case 1: r = q, g = 1, b = 0; break;
case 2: r = 0, g = 1, b = f; break;
case 3: r = 0, g = q, b = 1; break;
case 4: r = f, g = 0, b = 1; break;
case 5: r = 1, g = 0, b = q; break;
}
return [((~ ~(r * 255))), (~ ~(g * 255)), (~ ~(b * 255))];
}
function drawingPointer(context, color) {
var clickX = [];
var clickY = [];
var clickDrag = [];
this.painting = false;
var timestamp = null;
this.addPoint = function (x, y, dragging) {
clickX.push(x);
clickY.push(y);
clickDrag.push(dragging);
};
this.start = function () {
this.clear();
this.painting = true;
};
this.clear = function () {
clickX = [];
clickY = [];
clickDrag = [];
timestamp = null;
};
this.stop = function () {
this.painting = false;
timestamp = Date.now();
};
this.redraw = function () {
var opacity = timestamp ? (300 - (Date.now() - timestamp)) / 300 : 1;
if (opacity <= 0) {
this.clear();
return;
}
context.strokeStyle = "rgba(" + color[0] + "," + color[1] + "," + color[2] + "," + opacity + ")";
context.lineJoin = "round";
context.lineWidth = ((<any>window).devicePixelRatio || 1) * 5;
for (var i = 0; i < clickX.length; i++) {
context.beginPath();
if (clickDrag[i] && i) {
context.moveTo(clickX[i - 1], clickY[i - 1]);
} else {
context.moveTo(clickX[i] - 1, clickY[i]);
}
context.lineTo(clickX[i], clickY[i]);
context.closePath();
context.stroke();
}
};
};
$(window).bind(viewporter.ACTIVE ? 'viewportready' : 'load', function () {
var canvas = <HTMLCanvasElement>$('canvas')[0];
var context = canvas.getContext('2d'); | $(window).bind(viewporter.ACTIVE ? 'viewportchange' : 'resize', function () {
width = canvas.width = window.innerWidth;
height = canvas.height = window.innerHeight
}).trigger(viewporter.ACTIVE ? 'viewportchange' : 'resize');
$('canvas').bind(iOS ? 'touchstart' : 'mousedown', function (e) {
e.preventDefault();
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
// if no pointer has been created for this finger yet, do it
if (!pointers[identifier]) {
pointers[identifier] = new drawingPointer(context, rainbow(8, Object.keys(pointers).length));
}
pointers[identifier].start();
pointers[identifier].addPoint(touches[i].pageX, touches[i].pageY);
}
});
$('canvas').bind(iOS ? 'touchmove' : 'mousemove', function (e) {
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
if (pointers[identifier] && pointers[identifier].painting) {
pointers[identifier].addPoint(touches[i].pageX, touches[i].pageY, true);
}
}
});
$('canvas').bind(iOS ? 'touchend' : 'mouseup', function (e) {
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
if (pointers[identifier]) {
pointers[identifier].stop();
(function (identifier) {
setTimeout(function () {
delete pointers[identifier];
}, 300);
})(identifier);
}
}
});
window.setInterval(function () {
context.clearRect(0, 0, width, height);
var counter = 0, ratio = (<any>window).devicePixelRatio || 1;
for (var identifier in pointers) {
pointers[identifier].redraw();
counter++;
}
context.font = (10 * ratio) + 'pt Arial';
context.fillText(counter + ' active pointers', 15 * ratio, 25 * ratio);
}, 16);
});
} | var iOS = (/iphone|ipad/i).test(navigator.userAgent);
var pointers = {};
// handle resizing / rotating of the viewport
var width, height; | random_line_split |
viewporter-tests.ts | /// <reference types="jquery"/>
/// <reference types="google-maps"/>
function test_map() {
viewporter.preventPageScroll = true;
var eventName = viewporter.ACTIVE ? 'viewportready' : "load";
google.maps.event.addDomListener(window, eventName, function () {
var map = new google.maps.Map(document.getElementById("map"), {
zoom: 2,
center: new google.maps.LatLng(10, 0),
mapTypeId: google.maps.MapTypeId.ROADMAP
});
window.addEventListener("resize", viewporter.refresh);
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function (position) {
map.setCenter(new google.maps.LatLng(
position.coords.latitude,
position.coords.longitude
));
map.setZoom(14);
});
}
});
}
function | () {
viewporter.preventPageScroll = true;
document.addEventListener('DOMContentLoaded', function () {
// listen for "resize" events and trigger "refresh" method.
window.addEventListener("resize", function () {
viewporter.refresh();
document.getElementById("events").innerHTML += "resize<br>";
});
if (navigator.geolocation) {
function success(position) {
var coords = [position.coords.latitude, position.coords.longitude]
document.getElementById("coords").innerHTML = coords.join(", ");
}
navigator.geolocation.getCurrentPosition(success);
}
});
}
function test_swipey() {
function rainbow(numOfSteps, step) {
var r, g, b, h = step / numOfSteps, i = ~~(h * 6), f = h * 6 - i, q = 1 - f;
switch (i % 6) {
case 0: r = 1, g = f, b = 0; break;
case 1: r = q, g = 1, b = 0; break;
case 2: r = 0, g = 1, b = f; break;
case 3: r = 0, g = q, b = 1; break;
case 4: r = f, g = 0, b = 1; break;
case 5: r = 1, g = 0, b = q; break;
}
return [((~ ~(r * 255))), (~ ~(g * 255)), (~ ~(b * 255))];
}
function drawingPointer(context, color) {
var clickX = [];
var clickY = [];
var clickDrag = [];
this.painting = false;
var timestamp = null;
this.addPoint = function (x, y, dragging) {
clickX.push(x);
clickY.push(y);
clickDrag.push(dragging);
};
this.start = function () {
this.clear();
this.painting = true;
};
this.clear = function () {
clickX = [];
clickY = [];
clickDrag = [];
timestamp = null;
};
this.stop = function () {
this.painting = false;
timestamp = Date.now();
};
this.redraw = function () {
var opacity = timestamp ? (300 - (Date.now() - timestamp)) / 300 : 1;
if (opacity <= 0) {
this.clear();
return;
}
context.strokeStyle = "rgba(" + color[0] + "," + color[1] + "," + color[2] + "," + opacity + ")";
context.lineJoin = "round";
context.lineWidth = ((<any>window).devicePixelRatio || 1) * 5;
for (var i = 0; i < clickX.length; i++) {
context.beginPath();
if (clickDrag[i] && i) {
context.moveTo(clickX[i - 1], clickY[i - 1]);
} else {
context.moveTo(clickX[i] - 1, clickY[i]);
}
context.lineTo(clickX[i], clickY[i]);
context.closePath();
context.stroke();
}
};
};
$(window).bind(viewporter.ACTIVE ? 'viewportready' : 'load', function () {
var canvas = <HTMLCanvasElement>$('canvas')[0];
var context = canvas.getContext('2d');
var iOS = (/iphone|ipad/i).test(navigator.userAgent);
var pointers = {};
// handle resizing / rotating of the viewport
var width, height;
$(window).bind(viewporter.ACTIVE ? 'viewportchange' : 'resize', function () {
width = canvas.width = window.innerWidth;
height = canvas.height = window.innerHeight
}).trigger(viewporter.ACTIVE ? 'viewportchange' : 'resize');
$('canvas').bind(iOS ? 'touchstart' : 'mousedown', function (e) {
e.preventDefault();
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
// if no pointer has been created for this finger yet, do it
if (!pointers[identifier]) {
pointers[identifier] = new drawingPointer(context, rainbow(8, Object.keys(pointers).length));
}
pointers[identifier].start();
pointers[identifier].addPoint(touches[i].pageX, touches[i].pageY);
}
});
$('canvas').bind(iOS ? 'touchmove' : 'mousemove', function (e) {
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
if (pointers[identifier] && pointers[identifier].painting) {
pointers[identifier].addPoint(touches[i].pageX, touches[i].pageY, true);
}
}
});
$('canvas').bind(iOS ? 'touchend' : 'mouseup', function (e) {
var touches = iOS ? (<any>e.originalEvent).changedTouches : [e.originalEvent];
var identifier;
for (var i = 0; i < touches.length; i++) {
identifier = touches[i].identifier || 'mouse';
if (pointers[identifier]) {
pointers[identifier].stop();
(function (identifier) {
setTimeout(function () {
delete pointers[identifier];
}, 300);
})(identifier);
}
}
});
window.setInterval(function () {
context.clearRect(0, 0, width, height);
var counter = 0, ratio = (<any>window).devicePixelRatio || 1;
for (var identifier in pointers) {
pointers[identifier].redraw();
counter++;
}
context.font = (10 * ratio) + 'pt Arial';
context.fillText(counter + ' active pointers', 15 * ratio, 25 * ratio);
}, 16);
});
} | test_resize | identifier_name |
tile-stitcher-spec.js | describe('tile-stitcher.js', function() {
describe('stitch', function() {
var ts = tileStitcher('src/tiles/{z}/{x}/{y}.png'),
async = new AsyncSpec(this);
async.it('should stitch tiles together in the proper order', function(done) {
ts.stitch(19084, 24821, 19085, 24822, 16, function(stitchedCanvas) {
ts._getImage('src/tiles/stitched.png', function(img) {
var expectedCanvas = document.createElement('canvas'),
expectedCtx, expectedImageData,
stitchedCtx, stitchedImageData,
i, len;
expectedCanvas.width = img.width;
expectedCanvas.height = img.height;
// Copy the image contents to the canvas
expectedCtx = expectedCanvas.getContext('2d');
expectedCtx.drawImage(img, 0, 0);
expectedImageData = expectedCtx.getImageData(0, 0, img.width, img.height);
stitchedCtx = stitchedCanvas.getContext('2d');
stitchedImageData = stitchedCtx.getImageData(0, 0, stitchedCanvas.width, stitchedCanvas.height);
expect(stitchedImageData.data.length).toBe(expectedImageData.data.length);
for(i=0, len=stitchedImageData.data.length; i<len; i++) |
done();
});
});
});
});
}); | {
expect(stitchedImageData.data[i]).toBe(expectedImageData.data[i]);
} | conditional_block |
tile-stitcher-spec.js | describe('tile-stitcher.js', function() {
describe('stitch', function() {
var ts = tileStitcher('src/tiles/{z}/{x}/{y}.png'),
async = new AsyncSpec(this);
async.it('should stitch tiles together in the proper order', function(done) {
ts.stitch(19084, 24821, 19085, 24822, 16, function(stitchedCanvas) {
ts._getImage('src/tiles/stitched.png', function(img) {
var expectedCanvas = document.createElement('canvas'),
expectedCtx, expectedImageData,
stitchedCtx, stitchedImageData,
i, len;
expectedCanvas.width = img.width;
expectedCanvas.height = img.height;
// Copy the image contents to the canvas
expectedCtx = expectedCanvas.getContext('2d');
expectedCtx.drawImage(img, 0, 0);
expectedImageData = expectedCtx.getImageData(0, 0, img.width, img.height);
stitchedCtx = stitchedCanvas.getContext('2d');
stitchedImageData = stitchedCtx.getImageData(0, 0, stitchedCanvas.width, stitchedCanvas.height);
expect(stitchedImageData.data.length).toBe(expectedImageData.data.length);
for(i=0, len=stitchedImageData.data.length; i<len; i++) {
expect(stitchedImageData.data[i]).toBe(expectedImageData.data[i]);
}
done(); | });
});
}); | });
}); | random_line_split |
ChildNodeInterface.js | /*
* Copyright 2013 The Polymer Authors. All rights reserved.
* Use of this source code is goverened by a BSD-style
* license that can be found in the LICENSE file.
*/
suite('ChildNodeInterface', function() {
function | () {
var tree = {};
var div = tree.div = document.createElement('div');
div.innerHTML = 'a<b></b>c<d></d>e';
var a = tree.a = div.firstChild;
var b = tree.b = a.nextSibling;
var c = tree.c = b.nextSibling;
var d = tree.d = c.nextSibling;
var e = tree.e = d.nextSibling;
var sr = tree.sr = div.createShadowRoot();
sr.innerHTML = 'f<g></g>h<content></content>i<j></j>k';
var f = tree.f = sr.firstChild;
var g = tree.g = f.nextSibling;
var h = tree.h = g.nextSibling;
var content = tree.content = h.nextSibling;
var i = tree.i = content.nextSibling;
var j = tree.j = i.nextSibling;
var k = tree.k = j.nextSibling;
div.offsetHeight; // trigger rendering
return tree;
}
test('nextElementSibling', function() {
var tree = getTree();
assert.equal(tree.b.nextElementSibling, tree.d);
assert.equal(tree.d.nextElementSibling, null);
assert.equal(tree.g.nextElementSibling, tree.content);
assert.equal(tree.content.nextElementSibling, tree.j);
assert.equal(tree.j.nextElementSibling, null);
});
test('previousElementSibling', function() {
var tree = getTree();
assert.equal(tree.b.previousElementSibling, null);
assert.equal(tree.d.previousElementSibling, tree.b);
assert.equal(tree.g.previousElementSibling, null);
assert.equal(tree.content.previousElementSibling, tree.g);
assert.equal(tree.j.previousElementSibling, tree.content);
});
test('remove', function() {
var div = document.createElement('div');
div.innerHTML = '<a></a>';
var a = div.firstChild;
a.remove();
assert.equal(div.firstChild, null);
assert.equal(a.parentNode, null);
// no op.
div.remove();
});
});
| getTree | identifier_name |
ChildNodeInterface.js | /*
* Copyright 2013 The Polymer Authors. All rights reserved.
* Use of this source code is goverened by a BSD-style
* license that can be found in the LICENSE file.
*/
suite('ChildNodeInterface', function() {
function getTree() |
test('nextElementSibling', function() {
var tree = getTree();
assert.equal(tree.b.nextElementSibling, tree.d);
assert.equal(tree.d.nextElementSibling, null);
assert.equal(tree.g.nextElementSibling, tree.content);
assert.equal(tree.content.nextElementSibling, tree.j);
assert.equal(tree.j.nextElementSibling, null);
});
test('previousElementSibling', function() {
var tree = getTree();
assert.equal(tree.b.previousElementSibling, null);
assert.equal(tree.d.previousElementSibling, tree.b);
assert.equal(tree.g.previousElementSibling, null);
assert.equal(tree.content.previousElementSibling, tree.g);
assert.equal(tree.j.previousElementSibling, tree.content);
});
test('remove', function() {
var div = document.createElement('div');
div.innerHTML = '<a></a>';
var a = div.firstChild;
a.remove();
assert.equal(div.firstChild, null);
assert.equal(a.parentNode, null);
// no op.
div.remove();
});
});
| {
var tree = {};
var div = tree.div = document.createElement('div');
div.innerHTML = 'a<b></b>c<d></d>e';
var a = tree.a = div.firstChild;
var b = tree.b = a.nextSibling;
var c = tree.c = b.nextSibling;
var d = tree.d = c.nextSibling;
var e = tree.e = d.nextSibling;
var sr = tree.sr = div.createShadowRoot();
sr.innerHTML = 'f<g></g>h<content></content>i<j></j>k';
var f = tree.f = sr.firstChild;
var g = tree.g = f.nextSibling;
var h = tree.h = g.nextSibling;
var content = tree.content = h.nextSibling;
var i = tree.i = content.nextSibling;
var j = tree.j = i.nextSibling;
var k = tree.k = j.nextSibling;
div.offsetHeight; // trigger rendering
return tree;
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.