file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
runtests.py | #!/usr/bin/env python
"""
Custom test runner
If args or options, we run the testsuite as quickly as possible.
If args but no options, we default to using the spec plugin and aborting on
first error/failure.
If options, we ignore defaults and pass options onto Nose.
Examples:
Run all tests (as fast as possible)
$ ./runtests.py
Run all unit tests (using spec output)
$ ./runtests.py tests/unit
Run all checkout unit tests (using spec output)
$ ./runtests.py tests/unit/checkout
Run all tests relating to shipping
$ ./runtests.py --attr=shipping
Re-run failing tests (needs to be run twice to first build the index)
$ ./runtests.py ... --failed
Drop into pdb when a test fails
$ ./runtests.py ... --pdb-failures
"""
import sys
import logging
import warnings
from tests.config import configure
from django.utils.six.moves import map
# No logging
logging.disable(logging.CRITICAL)
def run_tests(verbosity, *test_args):
|
if __name__ == '__main__':
args = sys.argv[1:]
verbosity = 1
if not args:
# If run with no args, try and run the testsuite as fast as possible.
# That means across all cores and with no high-falutin' plugins.
import multiprocessing
try:
num_cores = multiprocessing.cpu_count()
except NotImplementedError:
num_cores = 4 # Guess
args = ['--nocapture', '--stop', '--processes=%s' % num_cores]
else:
# Some args/options specified. Check to see if any nose options have
# been specified. If they have, then don't set any
has_options = any(map(lambda x: x.startswith('--'), args))
if not has_options:
# Default options:
# --stop Abort on first error/failure
# --nocapture Don't capture STDOUT
args.extend(['--nocapture', '--stop'])
else:
# Remove options as nose will pick these up from sys.argv
for arg in args:
if arg.startswith('--verbosity'):
verbosity = int(arg[-1])
args = [arg for arg in args if not arg.startswith('-')]
configure()
with warnings.catch_warnings():
# The warnings module in default configuration will never cause tests
# to fail, as it never raises an exception. We alter that behaviour by
# turning DeprecationWarnings into exceptions, but exclude warnings
# triggered by third-party libs. Note: The context manager is not thread
# safe. Behaviour with multiple threads is undefined.
warnings.filterwarnings('error', category=DeprecationWarning)
warnings.filterwarnings('error', category=RuntimeWarning)
libs = r'(sorl\.thumbnail.*|bs4.*|webtest.*)'
warnings.filterwarnings(
'ignore', r'.*', DeprecationWarning, libs)
run_tests(verbosity, *args)
| from django_nose import NoseTestSuiteRunner
test_runner = NoseTestSuiteRunner(verbosity=verbosity)
if not test_args:
test_args = ['tests']
num_failures = test_runner.run_tests(test_args)
if num_failures:
sys.exit(num_failures) | identifier_body |
url-path.ts | export class UrlPath {
static ADMIN = 'admin';
static FILTERED_MAP = 'filteredMap';
static INSPECTOR = 'inspector';
static MAIN = 'main';
static REAL_TIME = 'realtime';
static SCATTER_FULL_SCREEN_MODE = 'scatterFullScreenMode';
static THREAD_DUMP = 'threadDump';
static TRANSACTION_DETAIL = 'transactionDetail';
static TRANSACTION_LIST = 'transactionList';
static TRANSACTION_VIEW = 'transactionView';
static BROWSER_NOT_SUPPORT = 'browserNotSupported';
static ERROR = 'error';
static CONFIG = 'config';
constructor() {}
static | (): string[] {
return [
UrlPath.CONFIG,
UrlPath.ADMIN,
UrlPath.ERROR,
UrlPath.FILTERED_MAP,
UrlPath.INSPECTOR,
UrlPath.MAIN,
UrlPath.REAL_TIME,
UrlPath.SCATTER_FULL_SCREEN_MODE,
UrlPath.THREAD_DUMP,
UrlPath.TRANSACTION_DETAIL,
UrlPath.TRANSACTION_LIST,
UrlPath.TRANSACTION_VIEW
];
}
}
export default UrlPath;
| getParamList | identifier_name |
url-path.ts | export class UrlPath {
static ADMIN = 'admin';
static FILTERED_MAP = 'filteredMap';
static INSPECTOR = 'inspector';
static MAIN = 'main';
static REAL_TIME = 'realtime';
static SCATTER_FULL_SCREEN_MODE = 'scatterFullScreenMode';
static THREAD_DUMP = 'threadDump';
static TRANSACTION_DETAIL = 'transactionDetail';
static TRANSACTION_LIST = 'transactionList';
static TRANSACTION_VIEW = 'transactionView';
static BROWSER_NOT_SUPPORT = 'browserNotSupported';
static ERROR = 'error';
static CONFIG = 'config';
constructor() {} | UrlPath.ADMIN,
UrlPath.ERROR,
UrlPath.FILTERED_MAP,
UrlPath.INSPECTOR,
UrlPath.MAIN,
UrlPath.REAL_TIME,
UrlPath.SCATTER_FULL_SCREEN_MODE,
UrlPath.THREAD_DUMP,
UrlPath.TRANSACTION_DETAIL,
UrlPath.TRANSACTION_LIST,
UrlPath.TRANSACTION_VIEW
];
}
}
export default UrlPath; | static getParamList(): string[] {
return [
UrlPath.CONFIG, | random_line_split |
url-path.ts | export class UrlPath {
static ADMIN = 'admin';
static FILTERED_MAP = 'filteredMap';
static INSPECTOR = 'inspector';
static MAIN = 'main';
static REAL_TIME = 'realtime';
static SCATTER_FULL_SCREEN_MODE = 'scatterFullScreenMode';
static THREAD_DUMP = 'threadDump';
static TRANSACTION_DETAIL = 'transactionDetail';
static TRANSACTION_LIST = 'transactionList';
static TRANSACTION_VIEW = 'transactionView';
static BROWSER_NOT_SUPPORT = 'browserNotSupported';
static ERROR = 'error';
static CONFIG = 'config';
constructor() |
static getParamList(): string[] {
return [
UrlPath.CONFIG,
UrlPath.ADMIN,
UrlPath.ERROR,
UrlPath.FILTERED_MAP,
UrlPath.INSPECTOR,
UrlPath.MAIN,
UrlPath.REAL_TIME,
UrlPath.SCATTER_FULL_SCREEN_MODE,
UrlPath.THREAD_DUMP,
UrlPath.TRANSACTION_DETAIL,
UrlPath.TRANSACTION_LIST,
UrlPath.TRANSACTION_VIEW
];
}
}
export default UrlPath;
| {} | identifier_body |
db.py | # Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import network as net_def
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from neutron_lib.objects import exceptions as obj_exc
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as p_utils
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import exceptions as c_exc
from neutron.db import _resource_extend as resource_extend
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.objects import auto_allocate as auto_allocate_obj
from neutron.objects import base as base_obj
from neutron.objects import network as net_obj
from neutron.services.auto_allocate import exceptions
LOG = logging.getLogger(__name__)
CHECK_REQUIREMENTS = 'dry-run'
def _ensure_external_network_default_value_callback(
resource, event, trigger, **kwargs):
"""Ensure the is_default db field matches the create/update request."""
# TODO(boden): remove shim once all callbacks use payloads
if 'payload' in kwargs:
_request = kwargs['payload'].request_body
_context = kwargs['payload'].context
_network = kwargs['payload'].desired_state
_orig = kwargs['payload'].states[0]
else:
_request = kwargs['request']
_context = kwargs['context']
_network = kwargs['network']
_orig = kwargs.get('original_network')
@db_api.retry_if_session_inactive()
def _do_ensure_external_network_default_value_callback(
context, request, orig, network):
is_default = request.get(api_const.IS_DEFAULT)
if is_default is None:
return
if is_default:
# ensure only one default external network at any given time
pager = base_obj.Pager(limit=1)
objs = net_obj.ExternalNetwork.get_objects(context,
_pager=pager, is_default=True)
if objs:
if objs[0] and network['id'] != objs[0].network_id:
raise exceptions.DefaultExternalNetworkExists(
net_id=objs[0].network_id)
if orig and orig.get(api_const.IS_DEFAULT) == is_default:
return
network[api_const.IS_DEFAULT] = is_default
# Reflect the status of the is_default on the create/update request
obj = net_obj.ExternalNetwork.get_object(context,
network_id=network['id'])
if obj:
obj.is_default = is_default
obj.update()
_do_ensure_external_network_default_value_callback(
_context, _request, _orig, _network)
@resource_extend.has_resource_extenders
class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin):
def __new__(cls, *args, **kwargs):
# NOTE(kevinbenton): we subscribe on object construction because
# the tests blow away the callback manager for each run
new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args,
**kwargs)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_UPDATE)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_CREATE)
return new
# TODO(armax): if a tenant modifies auto allocated resources under
# the hood the behavior of the get_auto_allocated_topology API is
# undetermined. Consider adding callbacks to deal with the following
# situations:
# - insert subnet -> plug router interface
# - delete router -> remove the entire topology
# - update subnet -> prevent operation
# - update router gateway -> prevent operation
# - ...
@property
def core_plugin(self):
if not getattr(self, '_core_plugin', None):
self._core_plugin = directory.get_plugin()
return self._core_plugin
@property
def l3_plugin(self):
if not getattr(self, '_l3_plugin', None):
self._l3_plugin = directory.get_plugin(constants.L3)
return self._l3_plugin
@staticmethod
@resource_extend.extends([net_def.COLLECTION_NAME])
def _extend_external_network_default(net_res, net_db):
"""Add is_default field to 'show' response."""
if net_db.external is not None:
net_res[api_const.IS_DEFAULT] = net_db.external.is_default
return net_res
def get_auto_allocated_topology(self, context, tenant_id, fields=None):
"""Return tenant's network associated to auto-allocated topology.
The topology will be provisioned upon return, if network is missing.
"""
fields = fields or []
tenant_id = self._validate(context, tenant_id)
if CHECK_REQUIREMENTS in fields:
# for dry-run requests, simply validates that subsequent
# requests can be fulfilled based on a set of requirements
# such as existence of default networks, pools, etc.
return self._check_requirements(context, tenant_id)
elif fields:
raise n_exc.BadRequest(resource='auto_allocate',
msg=_("Unrecognized field"))
# Check for an existent topology
network_id = self._get_auto_allocated_network(context, tenant_id)
if network_id:
return self._response(network_id, tenant_id, fields=fields)
# See if we indeed have an external network to connect to, otherwise
# we will fail fast
default_external_network = self._get_default_external_network(
context)
# If we reach this point, then we got some work to do!
network_id = self._build_topology(
context, tenant_id, default_external_network)
return self._response(network_id, tenant_id, fields=fields)
def | (self, context, tenant_id):
tenant_id = self._validate(context, tenant_id)
topology = self._get_auto_allocated_topology(context, tenant_id)
if topology:
subnets = self.core_plugin.get_subnets(
context,
filters={'network_id': [topology['network_id']]})
self._cleanup(
context, network_id=topology['network_id'],
router_id=topology['router_id'], subnets=subnets)
def _build_topology(self, context, tenant_id, default_external_network):
"""Build the network topology and returns its network UUID."""
try:
subnets = self._provision_tenant_private_network(
context, tenant_id)
network_id = subnets[0]['network_id']
router = self._provision_external_connectivity(
context, default_external_network, subnets, tenant_id)
network_id = self._save(
context, tenant_id, network_id, router['id'], subnets)
return network_id
except exceptions.UnknownProvisioningError as e:
# Clean partially provisioned topologies, and reraise the
# error. If it can be retried, so be it.
LOG.error("Unknown error while provisioning topology for "
"tenant %(tenant_id)s. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
self._cleanup(
context, network_id=e.network_id,
router_id=e.router_id, subnets=e.subnets)
raise e.error
def _check_requirements(self, context, tenant_id):
"""Raise if requirements are not met."""
self._get_default_external_network(context)
try:
self._get_supported_subnetpools(context)
except n_exc.NotFound:
raise exceptions.AutoAllocationFailure(
reason=_("No default subnetpools defined"))
return {'id': 'dry-run=pass', 'tenant_id': tenant_id}
def _validate(self, context, tenant_id):
"""Validate and return the tenant to be associated to the topology."""
if tenant_id == 'None':
# NOTE(HenryG): the client might be sending us astray by
# passing no tenant; this is really meant to be the tenant
# issuing the request, therefore let's get it from the context
tenant_id = context.tenant_id
if not context.is_admin and tenant_id != context.tenant_id:
raise n_exc.NotAuthorized()
return tenant_id
def _get_auto_allocated_topology(self, context, tenant_id):
"""Return the auto allocated topology record if present or None."""
return auto_allocate_obj.AutoAllocatedTopology.get_object(
context, project_id=tenant_id)
def _get_auto_allocated_network(self, context, tenant_id):
"""Get the auto allocated network for the tenant."""
network = self._get_auto_allocated_topology(context, tenant_id)
if network:
return network['network_id']
@staticmethod
def _response(network_id, tenant_id, fields=None):
"""Build response for auto-allocated network."""
res = {
'id': network_id,
'tenant_id': tenant_id
}
return db_utils.resource_fields(res, fields)
def _get_default_external_network(self, context):
"""Get the default external network for the deployment."""
default_external_networks = net_obj.ExternalNetwork.get_objects(
context, is_default=True)
if not default_external_networks:
LOG.error("Unable to find default external network "
"for deployment, please create/assign one to "
"allow auto-allocation to work correctly.")
raise exceptions.AutoAllocationFailure(
reason=_("No default router:external network"))
if len(default_external_networks) > 1:
LOG.error("Multiple external default networks detected. "
"Network %s is true 'default'.",
default_external_networks[0]['network_id'])
return default_external_networks[0].network_id
def _get_supported_subnetpools(self, context):
"""Return the default subnet pools available."""
default_subnet_pools = [
self.core_plugin.get_default_subnetpool(
context, ver) for ver in (4, 6)
]
available_pools = [
s for s in default_subnet_pools if s
]
if not available_pools:
LOG.error("No default pools available")
raise n_exc.NotFound()
return available_pools
def _provision_tenant_private_network(self, context, tenant_id):
"""Create a tenant private network/subnets."""
network = None
try:
network_args = {
'name': 'auto_allocated_network',
'admin_state_up': False,
'tenant_id': tenant_id,
'shared': False
}
network = p_utils.create_network(
self.core_plugin, context, {'network': network_args})
subnets = []
for pool in self._get_supported_subnetpools(context):
subnet_args = {
'name': 'auto_allocated_subnet_v%s' % pool['ip_version'],
'network_id': network['id'],
'tenant_id': tenant_id,
'ip_version': pool['ip_version'],
'subnetpool_id': pool['id'],
}
subnets.append(p_utils.create_subnet(
self.core_plugin, context, {'subnet': subnet_args}))
return subnets
except (c_exc.SubnetAllocationError, ValueError,
n_exc.BadRequest, n_exc.NotFound) as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s due to missing or unmet "
"requirements. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
if network:
self._cleanup(context, network['id'])
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide tenant private network"))
except Exception as e:
network_id = network['id'] if network else None
raise exceptions.UnknownProvisioningError(e, network_id=network_id)
def _provision_external_connectivity(
self, context, default_external_network, subnets, tenant_id):
"""Uplink tenant subnet(s) to external network."""
router_args = {
'name': 'auto_allocated_router',
l3_apidef.EXTERNAL_GW_INFO: {
'network_id': default_external_network},
'tenant_id': tenant_id,
'admin_state_up': True
}
router = None
attached_subnets = []
try:
router = self.l3_plugin.create_router(
context, {'router': router_args})
for subnet in subnets:
self.l3_plugin.add_router_interface(
context, router['id'], {'subnet_id': subnet['id']})
attached_subnets.append(subnet)
return router
except n_exc.BadRequest as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s because of router errors. "
"Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
router_id = router['id'] if router else None
self._cleanup(context,
network_id=subnets[0]['network_id'],
router_id=router_id, subnets=attached_subnets)
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide external connectivity"))
except Exception as e:
router_id = router['id'] if router else None
raise exceptions.UnknownProvisioningError(
e, network_id=subnets[0]['network_id'],
router_id=router_id, subnets=subnets)
def _save(self, context, tenant_id, network_id, router_id, subnets):
"""Save auto-allocated topology, or revert in case of DB errors."""
try:
auto_allocate_obj.AutoAllocatedTopology(
context, project_id=tenant_id, network_id=network_id,
router_id=router_id).create()
self.core_plugin.update_network(
context, network_id,
{'network': {'admin_state_up': True}})
except obj_exc.NeutronDbObjectDuplicateEntry:
LOG.debug("Multiple auto-allocated networks detected for "
"tenant %s. Attempting clean up for network %s "
"and router %s.",
tenant_id, network_id, router_id)
self._cleanup(
context, network_id=network_id,
router_id=router_id, subnets=subnets)
network_id = self._get_auto_allocated_network(context, tenant_id)
except Exception as e:
raise exceptions.UnknownProvisioningError(
e, network_id=network_id,
router_id=router_id, subnets=subnets)
return network_id
def _cleanup(self, context, network_id=None, router_id=None, subnets=None):
"""Clean up auto allocated resources."""
# Concurrent attempts to delete the topology may interleave and
# cause some operations to fail with NotFound exceptions. Rather
# than fail partially, the exceptions should be ignored and the
# cleanup should proceed uninterrupted.
if router_id:
for subnet in subnets or []:
ignore_notfound(
self.l3_plugin.remove_router_interface,
context, router_id, {'subnet_id': subnet['id']})
ignore_notfound(self.l3_plugin.delete_router, context, router_id)
if network_id:
ignore_notfound(
self.core_plugin.delete_network, context, network_id)
def ignore_notfound(func, *args, **kwargs):
"""Call the given function and pass if a `NotFound` exception is raised."""
try:
return func(*args, **kwargs)
except n_exc.NotFound:
pass
| delete_auto_allocated_topology | identifier_name |
db.py | # Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import network as net_def
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from neutron_lib.objects import exceptions as obj_exc
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as p_utils
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import exceptions as c_exc
from neutron.db import _resource_extend as resource_extend
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.objects import auto_allocate as auto_allocate_obj
from neutron.objects import base as base_obj
from neutron.objects import network as net_obj
from neutron.services.auto_allocate import exceptions
LOG = logging.getLogger(__name__)
CHECK_REQUIREMENTS = 'dry-run'
def _ensure_external_network_default_value_callback(
resource, event, trigger, **kwargs):
"""Ensure the is_default db field matches the create/update request."""
# TODO(boden): remove shim once all callbacks use payloads
if 'payload' in kwargs:
_request = kwargs['payload'].request_body
_context = kwargs['payload'].context
_network = kwargs['payload'].desired_state
_orig = kwargs['payload'].states[0]
else:
_request = kwargs['request']
_context = kwargs['context']
_network = kwargs['network']
_orig = kwargs.get('original_network')
@db_api.retry_if_session_inactive()
def _do_ensure_external_network_default_value_callback(
context, request, orig, network):
is_default = request.get(api_const.IS_DEFAULT)
if is_default is None:
return
if is_default:
# ensure only one default external network at any given time
pager = base_obj.Pager(limit=1)
objs = net_obj.ExternalNetwork.get_objects(context,
_pager=pager, is_default=True)
if objs:
if objs[0] and network['id'] != objs[0].network_id:
raise exceptions.DefaultExternalNetworkExists(
net_id=objs[0].network_id)
if orig and orig.get(api_const.IS_DEFAULT) == is_default:
return
network[api_const.IS_DEFAULT] = is_default
# Reflect the status of the is_default on the create/update request
obj = net_obj.ExternalNetwork.get_object(context,
network_id=network['id'])
if obj:
obj.is_default = is_default
obj.update()
_do_ensure_external_network_default_value_callback(
_context, _request, _orig, _network)
@resource_extend.has_resource_extenders
class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin):
def __new__(cls, *args, **kwargs):
# NOTE(kevinbenton): we subscribe on object construction because
# the tests blow away the callback manager for each run
new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args,
**kwargs)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_UPDATE)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_CREATE)
return new
# TODO(armax): if a tenant modifies auto allocated resources under
# the hood the behavior of the get_auto_allocated_topology API is
# undetermined. Consider adding callbacks to deal with the following
# situations:
# - insert subnet -> plug router interface
# - delete router -> remove the entire topology
# - update subnet -> prevent operation
# - update router gateway -> prevent operation
# - ...
@property
def core_plugin(self):
if not getattr(self, '_core_plugin', None):
self._core_plugin = directory.get_plugin()
return self._core_plugin
@property
def l3_plugin(self):
if not getattr(self, '_l3_plugin', None):
self._l3_plugin = directory.get_plugin(constants.L3)
return self._l3_plugin
@staticmethod
@resource_extend.extends([net_def.COLLECTION_NAME])
def _extend_external_network_default(net_res, net_db):
"""Add is_default field to 'show' response."""
if net_db.external is not None:
net_res[api_const.IS_DEFAULT] = net_db.external.is_default
return net_res
def get_auto_allocated_topology(self, context, tenant_id, fields=None):
"""Return tenant's network associated to auto-allocated topology.
The topology will be provisioned upon return, if network is missing.
"""
fields = fields or []
tenant_id = self._validate(context, tenant_id)
if CHECK_REQUIREMENTS in fields:
# for dry-run requests, simply validates that subsequent
# requests can be fulfilled based on a set of requirements
# such as existence of default networks, pools, etc.
return self._check_requirements(context, tenant_id)
elif fields:
raise n_exc.BadRequest(resource='auto_allocate',
msg=_("Unrecognized field"))
# Check for an existent topology
network_id = self._get_auto_allocated_network(context, tenant_id)
if network_id:
return self._response(network_id, tenant_id, fields=fields)
# See if we indeed have an external network to connect to, otherwise
# we will fail fast
default_external_network = self._get_default_external_network(
context)
# If we reach this point, then we got some work to do!
network_id = self._build_topology(
context, tenant_id, default_external_network)
return self._response(network_id, tenant_id, fields=fields)
def delete_auto_allocated_topology(self, context, tenant_id):
tenant_id = self._validate(context, tenant_id)
topology = self._get_auto_allocated_topology(context, tenant_id)
if topology:
subnets = self.core_plugin.get_subnets(
context,
filters={'network_id': [topology['network_id']]})
self._cleanup(
context, network_id=topology['network_id'],
router_id=topology['router_id'], subnets=subnets)
def _build_topology(self, context, tenant_id, default_external_network):
"""Build the network topology and returns its network UUID."""
try:
subnets = self._provision_tenant_private_network(
context, tenant_id)
network_id = subnets[0]['network_id']
router = self._provision_external_connectivity(
context, default_external_network, subnets, tenant_id)
network_id = self._save(
context, tenant_id, network_id, router['id'], subnets)
return network_id
except exceptions.UnknownProvisioningError as e:
# Clean partially provisioned topologies, and reraise the
# error. If it can be retried, so be it.
LOG.error("Unknown error while provisioning topology for "
"tenant %(tenant_id)s. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
self._cleanup(
context, network_id=e.network_id,
router_id=e.router_id, subnets=e.subnets)
raise e.error
def _check_requirements(self, context, tenant_id):
"""Raise if requirements are not met."""
self._get_default_external_network(context)
try:
self._get_supported_subnetpools(context)
except n_exc.NotFound:
raise exceptions.AutoAllocationFailure(
reason=_("No default subnetpools defined"))
return {'id': 'dry-run=pass', 'tenant_id': tenant_id}
def _validate(self, context, tenant_id):
"""Validate and return the tenant to be associated to the topology."""
if tenant_id == 'None':
# NOTE(HenryG): the client might be sending us astray by
# passing no tenant; this is really meant to be the tenant
# issuing the request, therefore let's get it from the context
tenant_id = context.tenant_id
if not context.is_admin and tenant_id != context.tenant_id:
raise n_exc.NotAuthorized()
return tenant_id
def _get_auto_allocated_topology(self, context, tenant_id):
"""Return the auto allocated topology record if present or None."""
return auto_allocate_obj.AutoAllocatedTopology.get_object(
context, project_id=tenant_id)
def _get_auto_allocated_network(self, context, tenant_id):
"""Get the auto allocated network for the tenant."""
network = self._get_auto_allocated_topology(context, tenant_id)
if network:
return network['network_id']
@staticmethod
def _response(network_id, tenant_id, fields=None):
"""Build response for auto-allocated network."""
res = {
'id': network_id,
'tenant_id': tenant_id
}
return db_utils.resource_fields(res, fields)
def _get_default_external_network(self, context):
"""Get the default external network for the deployment."""
default_external_networks = net_obj.ExternalNetwork.get_objects(
context, is_default=True)
if not default_external_networks:
|
if len(default_external_networks) > 1:
LOG.error("Multiple external default networks detected. "
"Network %s is true 'default'.",
default_external_networks[0]['network_id'])
return default_external_networks[0].network_id
def _get_supported_subnetpools(self, context):
"""Return the default subnet pools available."""
default_subnet_pools = [
self.core_plugin.get_default_subnetpool(
context, ver) for ver in (4, 6)
]
available_pools = [
s for s in default_subnet_pools if s
]
if not available_pools:
LOG.error("No default pools available")
raise n_exc.NotFound()
return available_pools
def _provision_tenant_private_network(self, context, tenant_id):
"""Create a tenant private network/subnets."""
network = None
try:
network_args = {
'name': 'auto_allocated_network',
'admin_state_up': False,
'tenant_id': tenant_id,
'shared': False
}
network = p_utils.create_network(
self.core_plugin, context, {'network': network_args})
subnets = []
for pool in self._get_supported_subnetpools(context):
subnet_args = {
'name': 'auto_allocated_subnet_v%s' % pool['ip_version'],
'network_id': network['id'],
'tenant_id': tenant_id,
'ip_version': pool['ip_version'],
'subnetpool_id': pool['id'],
}
subnets.append(p_utils.create_subnet(
self.core_plugin, context, {'subnet': subnet_args}))
return subnets
except (c_exc.SubnetAllocationError, ValueError,
n_exc.BadRequest, n_exc.NotFound) as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s due to missing or unmet "
"requirements. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
if network:
self._cleanup(context, network['id'])
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide tenant private network"))
except Exception as e:
network_id = network['id'] if network else None
raise exceptions.UnknownProvisioningError(e, network_id=network_id)
def _provision_external_connectivity(
self, context, default_external_network, subnets, tenant_id):
"""Uplink tenant subnet(s) to external network."""
router_args = {
'name': 'auto_allocated_router',
l3_apidef.EXTERNAL_GW_INFO: {
'network_id': default_external_network},
'tenant_id': tenant_id,
'admin_state_up': True
}
router = None
attached_subnets = []
try:
router = self.l3_plugin.create_router(
context, {'router': router_args})
for subnet in subnets:
self.l3_plugin.add_router_interface(
context, router['id'], {'subnet_id': subnet['id']})
attached_subnets.append(subnet)
return router
except n_exc.BadRequest as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s because of router errors. "
"Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
router_id = router['id'] if router else None
self._cleanup(context,
network_id=subnets[0]['network_id'],
router_id=router_id, subnets=attached_subnets)
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide external connectivity"))
except Exception as e:
router_id = router['id'] if router else None
raise exceptions.UnknownProvisioningError(
e, network_id=subnets[0]['network_id'],
router_id=router_id, subnets=subnets)
def _save(self, context, tenant_id, network_id, router_id, subnets):
"""Save auto-allocated topology, or revert in case of DB errors."""
try:
auto_allocate_obj.AutoAllocatedTopology(
context, project_id=tenant_id, network_id=network_id,
router_id=router_id).create()
self.core_plugin.update_network(
context, network_id,
{'network': {'admin_state_up': True}})
except obj_exc.NeutronDbObjectDuplicateEntry:
LOG.debug("Multiple auto-allocated networks detected for "
"tenant %s. Attempting clean up for network %s "
"and router %s.",
tenant_id, network_id, router_id)
self._cleanup(
context, network_id=network_id,
router_id=router_id, subnets=subnets)
network_id = self._get_auto_allocated_network(context, tenant_id)
except Exception as e:
raise exceptions.UnknownProvisioningError(
e, network_id=network_id,
router_id=router_id, subnets=subnets)
return network_id
def _cleanup(self, context, network_id=None, router_id=None, subnets=None):
"""Clean up auto allocated resources."""
# Concurrent attempts to delete the topology may interleave and
# cause some operations to fail with NotFound exceptions. Rather
# than fail partially, the exceptions should be ignored and the
# cleanup should proceed uninterrupted.
if router_id:
for subnet in subnets or []:
ignore_notfound(
self.l3_plugin.remove_router_interface,
context, router_id, {'subnet_id': subnet['id']})
ignore_notfound(self.l3_plugin.delete_router, context, router_id)
if network_id:
ignore_notfound(
self.core_plugin.delete_network, context, network_id)
def ignore_notfound(func, *args, **kwargs):
"""Call the given function and pass if a `NotFound` exception is raised."""
try:
return func(*args, **kwargs)
except n_exc.NotFound:
pass
| LOG.error("Unable to find default external network "
"for deployment, please create/assign one to "
"allow auto-allocation to work correctly.")
raise exceptions.AutoAllocationFailure(
reason=_("No default router:external network")) | conditional_block |
db.py | # Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import network as net_def
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from neutron_lib.objects import exceptions as obj_exc
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as p_utils
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import exceptions as c_exc
from neutron.db import _resource_extend as resource_extend
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.objects import auto_allocate as auto_allocate_obj
from neutron.objects import base as base_obj
from neutron.objects import network as net_obj
from neutron.services.auto_allocate import exceptions
LOG = logging.getLogger(__name__)
CHECK_REQUIREMENTS = 'dry-run'
def _ensure_external_network_default_value_callback(
resource, event, trigger, **kwargs):
"""Ensure the is_default db field matches the create/update request."""
# TODO(boden): remove shim once all callbacks use payloads
if 'payload' in kwargs:
_request = kwargs['payload'].request_body
_context = kwargs['payload'].context
_network = kwargs['payload'].desired_state
_orig = kwargs['payload'].states[0]
else:
_request = kwargs['request']
_context = kwargs['context']
_network = kwargs['network']
_orig = kwargs.get('original_network')
@db_api.retry_if_session_inactive()
def _do_ensure_external_network_default_value_callback(
context, request, orig, network):
is_default = request.get(api_const.IS_DEFAULT)
if is_default is None:
return
if is_default:
# ensure only one default external network at any given time
pager = base_obj.Pager(limit=1)
objs = net_obj.ExternalNetwork.get_objects(context,
_pager=pager, is_default=True)
if objs:
if objs[0] and network['id'] != objs[0].network_id:
raise exceptions.DefaultExternalNetworkExists(
net_id=objs[0].network_id)
if orig and orig.get(api_const.IS_DEFAULT) == is_default:
return
network[api_const.IS_DEFAULT] = is_default
# Reflect the status of the is_default on the create/update request
obj = net_obj.ExternalNetwork.get_object(context,
network_id=network['id'])
if obj:
obj.is_default = is_default
obj.update()
_do_ensure_external_network_default_value_callback(
_context, _request, _orig, _network)
@resource_extend.has_resource_extenders
class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin):
def __new__(cls, *args, **kwargs):
# NOTE(kevinbenton): we subscribe on object construction because
# the tests blow away the callback manager for each run
new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args,
**kwargs)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_UPDATE)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_CREATE)
return new
# TODO(armax): if a tenant modifies auto allocated resources under
# the hood the behavior of the get_auto_allocated_topology API is
# undetermined. Consider adding callbacks to deal with the following
# situations:
# - insert subnet -> plug router interface
# - delete router -> remove the entire topology
# - update subnet -> prevent operation
# - update router gateway -> prevent operation
# - ...
@property
def core_plugin(self):
if not getattr(self, '_core_plugin', None):
self._core_plugin = directory.get_plugin()
return self._core_plugin
@property
def l3_plugin(self):
if not getattr(self, '_l3_plugin', None):
self._l3_plugin = directory.get_plugin(constants.L3)
return self._l3_plugin
@staticmethod
@resource_extend.extends([net_def.COLLECTION_NAME])
def _extend_external_network_default(net_res, net_db):
"""Add is_default field to 'show' response."""
if net_db.external is not None:
net_res[api_const.IS_DEFAULT] = net_db.external.is_default
return net_res
def get_auto_allocated_topology(self, context, tenant_id, fields=None):
"""Return tenant's network associated to auto-allocated topology.
The topology will be provisioned upon return, if network is missing.
"""
fields = fields or []
tenant_id = self._validate(context, tenant_id)
if CHECK_REQUIREMENTS in fields:
# for dry-run requests, simply validates that subsequent
# requests can be fulfilled based on a set of requirements
# such as existence of default networks, pools, etc.
return self._check_requirements(context, tenant_id)
elif fields:
raise n_exc.BadRequest(resource='auto_allocate',
msg=_("Unrecognized field"))
# Check for an existent topology
network_id = self._get_auto_allocated_network(context, tenant_id)
if network_id:
return self._response(network_id, tenant_id, fields=fields)
# See if we indeed have an external network to connect to, otherwise
# we will fail fast
default_external_network = self._get_default_external_network(
context)
# If we reach this point, then we got some work to do!
network_id = self._build_topology(
context, tenant_id, default_external_network)
return self._response(network_id, tenant_id, fields=fields)
def delete_auto_allocated_topology(self, context, tenant_id):
tenant_id = self._validate(context, tenant_id)
topology = self._get_auto_allocated_topology(context, tenant_id)
if topology:
subnets = self.core_plugin.get_subnets(
context,
filters={'network_id': [topology['network_id']]})
self._cleanup(
context, network_id=topology['network_id'],
router_id=topology['router_id'], subnets=subnets)
def _build_topology(self, context, tenant_id, default_external_network):
"""Build the network topology and returns its network UUID."""
try:
subnets = self._provision_tenant_private_network(
context, tenant_id)
network_id = subnets[0]['network_id']
router = self._provision_external_connectivity(
context, default_external_network, subnets, tenant_id)
network_id = self._save(
context, tenant_id, network_id, router['id'], subnets)
return network_id
except exceptions.UnknownProvisioningError as e:
# Clean partially provisioned topologies, and reraise the
# error. If it can be retried, so be it.
LOG.error("Unknown error while provisioning topology for "
"tenant %(tenant_id)s. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
self._cleanup(
context, network_id=e.network_id,
router_id=e.router_id, subnets=e.subnets)
raise e.error
def _check_requirements(self, context, tenant_id):
"""Raise if requirements are not met."""
self._get_default_external_network(context)
try:
self._get_supported_subnetpools(context)
except n_exc.NotFound:
raise exceptions.AutoAllocationFailure(
reason=_("No default subnetpools defined"))
return {'id': 'dry-run=pass', 'tenant_id': tenant_id}
def _validate(self, context, tenant_id):
"""Validate and return the tenant to be associated to the topology."""
if tenant_id == 'None':
# NOTE(HenryG): the client might be sending us astray by
# passing no tenant; this is really meant to be the tenant
# issuing the request, therefore let's get it from the context
tenant_id = context.tenant_id
if not context.is_admin and tenant_id != context.tenant_id:
raise n_exc.NotAuthorized()
return tenant_id
def _get_auto_allocated_topology(self, context, tenant_id):
"""Return the auto allocated topology record if present or None."""
return auto_allocate_obj.AutoAllocatedTopology.get_object(
context, project_id=tenant_id)
def _get_auto_allocated_network(self, context, tenant_id):
"""Get the auto allocated network for the tenant."""
network = self._get_auto_allocated_topology(context, tenant_id)
if network:
return network['network_id']
@staticmethod
def _response(network_id, tenant_id, fields=None):
"""Build response for auto-allocated network."""
res = {
'id': network_id,
'tenant_id': tenant_id
}
return db_utils.resource_fields(res, fields)
def _get_default_external_network(self, context):
"""Get the default external network for the deployment."""
default_external_networks = net_obj.ExternalNetwork.get_objects(
context, is_default=True)
if not default_external_networks:
LOG.error("Unable to find default external network "
"for deployment, please create/assign one to "
"allow auto-allocation to work correctly.")
raise exceptions.AutoAllocationFailure(
reason=_("No default router:external network"))
if len(default_external_networks) > 1:
LOG.error("Multiple external default networks detected. "
"Network %s is true 'default'.",
default_external_networks[0]['network_id'])
return default_external_networks[0].network_id
def _get_supported_subnetpools(self, context):
|
def _provision_tenant_private_network(self, context, tenant_id):
"""Create a tenant private network/subnets."""
network = None
try:
network_args = {
'name': 'auto_allocated_network',
'admin_state_up': False,
'tenant_id': tenant_id,
'shared': False
}
network = p_utils.create_network(
self.core_plugin, context, {'network': network_args})
subnets = []
for pool in self._get_supported_subnetpools(context):
subnet_args = {
'name': 'auto_allocated_subnet_v%s' % pool['ip_version'],
'network_id': network['id'],
'tenant_id': tenant_id,
'ip_version': pool['ip_version'],
'subnetpool_id': pool['id'],
}
subnets.append(p_utils.create_subnet(
self.core_plugin, context, {'subnet': subnet_args}))
return subnets
except (c_exc.SubnetAllocationError, ValueError,
n_exc.BadRequest, n_exc.NotFound) as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s due to missing or unmet "
"requirements. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
if network:
self._cleanup(context, network['id'])
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide tenant private network"))
except Exception as e:
network_id = network['id'] if network else None
raise exceptions.UnknownProvisioningError(e, network_id=network_id)
def _provision_external_connectivity(
self, context, default_external_network, subnets, tenant_id):
"""Uplink tenant subnet(s) to external network."""
router_args = {
'name': 'auto_allocated_router',
l3_apidef.EXTERNAL_GW_INFO: {
'network_id': default_external_network},
'tenant_id': tenant_id,
'admin_state_up': True
}
router = None
attached_subnets = []
try:
router = self.l3_plugin.create_router(
context, {'router': router_args})
for subnet in subnets:
self.l3_plugin.add_router_interface(
context, router['id'], {'subnet_id': subnet['id']})
attached_subnets.append(subnet)
return router
except n_exc.BadRequest as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s because of router errors. "
"Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
router_id = router['id'] if router else None
self._cleanup(context,
network_id=subnets[0]['network_id'],
router_id=router_id, subnets=attached_subnets)
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide external connectivity"))
except Exception as e:
router_id = router['id'] if router else None
raise exceptions.UnknownProvisioningError(
e, network_id=subnets[0]['network_id'],
router_id=router_id, subnets=subnets)
def _save(self, context, tenant_id, network_id, router_id, subnets):
"""Save auto-allocated topology, or revert in case of DB errors."""
try:
auto_allocate_obj.AutoAllocatedTopology(
context, project_id=tenant_id, network_id=network_id,
router_id=router_id).create()
self.core_plugin.update_network(
context, network_id,
{'network': {'admin_state_up': True}})
except obj_exc.NeutronDbObjectDuplicateEntry:
LOG.debug("Multiple auto-allocated networks detected for "
"tenant %s. Attempting clean up for network %s "
"and router %s.",
tenant_id, network_id, router_id)
self._cleanup(
context, network_id=network_id,
router_id=router_id, subnets=subnets)
network_id = self._get_auto_allocated_network(context, tenant_id)
except Exception as e:
raise exceptions.UnknownProvisioningError(
e, network_id=network_id,
router_id=router_id, subnets=subnets)
return network_id
def _cleanup(self, context, network_id=None, router_id=None, subnets=None):
"""Clean up auto allocated resources."""
# Concurrent attempts to delete the topology may interleave and
# cause some operations to fail with NotFound exceptions. Rather
# than fail partially, the exceptions should be ignored and the
# cleanup should proceed uninterrupted.
if router_id:
for subnet in subnets or []:
ignore_notfound(
self.l3_plugin.remove_router_interface,
context, router_id, {'subnet_id': subnet['id']})
ignore_notfound(self.l3_plugin.delete_router, context, router_id)
if network_id:
ignore_notfound(
self.core_plugin.delete_network, context, network_id)
def ignore_notfound(func, *args, **kwargs):
"""Call the given function and pass if a `NotFound` exception is raised."""
try:
return func(*args, **kwargs)
except n_exc.NotFound:
pass
| """Return the default subnet pools available."""
default_subnet_pools = [
self.core_plugin.get_default_subnetpool(
context, ver) for ver in (4, 6)
]
available_pools = [
s for s in default_subnet_pools if s
]
if not available_pools:
LOG.error("No default pools available")
raise n_exc.NotFound()
return available_pools | identifier_body |
db.py | # Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import network as net_def
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from neutron_lib.objects import exceptions as obj_exc
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as p_utils
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import exceptions as c_exc
from neutron.db import _resource_extend as resource_extend
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.objects import auto_allocate as auto_allocate_obj
from neutron.objects import base as base_obj
from neutron.objects import network as net_obj
from neutron.services.auto_allocate import exceptions
LOG = logging.getLogger(__name__)
CHECK_REQUIREMENTS = 'dry-run'
def _ensure_external_network_default_value_callback(
resource, event, trigger, **kwargs):
"""Ensure the is_default db field matches the create/update request."""
# TODO(boden): remove shim once all callbacks use payloads
if 'payload' in kwargs:
_request = kwargs['payload'].request_body
_context = kwargs['payload'].context
_network = kwargs['payload'].desired_state
_orig = kwargs['payload'].states[0]
else:
_request = kwargs['request']
_context = kwargs['context']
_network = kwargs['network']
_orig = kwargs.get('original_network')
@db_api.retry_if_session_inactive()
def _do_ensure_external_network_default_value_callback(
context, request, orig, network):
is_default = request.get(api_const.IS_DEFAULT)
if is_default is None:
return
if is_default:
# ensure only one default external network at any given time
pager = base_obj.Pager(limit=1)
objs = net_obj.ExternalNetwork.get_objects(context,
_pager=pager, is_default=True)
if objs:
if objs[0] and network['id'] != objs[0].network_id:
raise exceptions.DefaultExternalNetworkExists(
net_id=objs[0].network_id)
if orig and orig.get(api_const.IS_DEFAULT) == is_default:
return
network[api_const.IS_DEFAULT] = is_default
# Reflect the status of the is_default on the create/update request
obj = net_obj.ExternalNetwork.get_object(context,
network_id=network['id'])
if obj:
obj.is_default = is_default
obj.update()
_do_ensure_external_network_default_value_callback(
_context, _request, _orig, _network)
@resource_extend.has_resource_extenders
class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin):
def __new__(cls, *args, **kwargs):
# NOTE(kevinbenton): we subscribe on object construction because
# the tests blow away the callback manager for each run
new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args,
**kwargs)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_UPDATE)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_CREATE)
return new
# TODO(armax): if a tenant modifies auto allocated resources under
# the hood the behavior of the get_auto_allocated_topology API is
# undetermined. Consider adding callbacks to deal with the following
# situations:
# - insert subnet -> plug router interface
# - delete router -> remove the entire topology
# - update subnet -> prevent operation
# - update router gateway -> prevent operation
# - ...
@property
def core_plugin(self):
if not getattr(self, '_core_plugin', None):
self._core_plugin = directory.get_plugin()
return self._core_plugin
@property
def l3_plugin(self):
if not getattr(self, '_l3_plugin', None):
self._l3_plugin = directory.get_plugin(constants.L3)
return self._l3_plugin
@staticmethod
@resource_extend.extends([net_def.COLLECTION_NAME])
def _extend_external_network_default(net_res, net_db):
"""Add is_default field to 'show' response."""
if net_db.external is not None:
net_res[api_const.IS_DEFAULT] = net_db.external.is_default
return net_res
def get_auto_allocated_topology(self, context, tenant_id, fields=None):
"""Return tenant's network associated to auto-allocated topology.
The topology will be provisioned upon return, if network is missing.
"""
fields = fields or []
tenant_id = self._validate(context, tenant_id)
if CHECK_REQUIREMENTS in fields:
# for dry-run requests, simply validates that subsequent
# requests can be fulfilled based on a set of requirements
# such as existence of default networks, pools, etc.
return self._check_requirements(context, tenant_id)
elif fields:
raise n_exc.BadRequest(resource='auto_allocate',
msg=_("Unrecognized field"))
# Check for an existent topology
network_id = self._get_auto_allocated_network(context, tenant_id)
if network_id:
return self._response(network_id, tenant_id, fields=fields)
# See if we indeed have an external network to connect to, otherwise
# we will fail fast
default_external_network = self._get_default_external_network(
context)
# If we reach this point, then we got some work to do!
network_id = self._build_topology(
context, tenant_id, default_external_network)
return self._response(network_id, tenant_id, fields=fields)
def delete_auto_allocated_topology(self, context, tenant_id):
tenant_id = self._validate(context, tenant_id)
topology = self._get_auto_allocated_topology(context, tenant_id)
if topology:
subnets = self.core_plugin.get_subnets(
context,
filters={'network_id': [topology['network_id']]})
self._cleanup(
context, network_id=topology['network_id'],
router_id=topology['router_id'], subnets=subnets)
def _build_topology(self, context, tenant_id, default_external_network):
"""Build the network topology and returns its network UUID."""
try:
subnets = self._provision_tenant_private_network(
context, tenant_id)
network_id = subnets[0]['network_id']
router = self._provision_external_connectivity(
context, default_external_network, subnets, tenant_id)
network_id = self._save(
context, tenant_id, network_id, router['id'], subnets)
return network_id
except exceptions.UnknownProvisioningError as e:
# Clean partially provisioned topologies, and reraise the
# error. If it can be retried, so be it.
LOG.error("Unknown error while provisioning topology for "
"tenant %(tenant_id)s. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
self._cleanup(
context, network_id=e.network_id,
router_id=e.router_id, subnets=e.subnets)
raise e.error
def _check_requirements(self, context, tenant_id):
"""Raise if requirements are not met."""
self._get_default_external_network(context)
try:
self._get_supported_subnetpools(context)
except n_exc.NotFound:
raise exceptions.AutoAllocationFailure(
reason=_("No default subnetpools defined"))
return {'id': 'dry-run=pass', 'tenant_id': tenant_id}
def _validate(self, context, tenant_id):
"""Validate and return the tenant to be associated to the topology."""
if tenant_id == 'None':
# NOTE(HenryG): the client might be sending us astray by
# passing no tenant; this is really meant to be the tenant
# issuing the request, therefore let's get it from the context
tenant_id = context.tenant_id
if not context.is_admin and tenant_id != context.tenant_id:
raise n_exc.NotAuthorized()
return tenant_id
def _get_auto_allocated_topology(self, context, tenant_id):
"""Return the auto allocated topology record if present or None."""
return auto_allocate_obj.AutoAllocatedTopology.get_object(
context, project_id=tenant_id)
def _get_auto_allocated_network(self, context, tenant_id):
"""Get the auto allocated network for the tenant."""
network = self._get_auto_allocated_topology(context, tenant_id)
if network:
return network['network_id']
@staticmethod
def _response(network_id, tenant_id, fields=None):
"""Build response for auto-allocated network."""
res = {
'id': network_id,
'tenant_id': tenant_id
}
return db_utils.resource_fields(res, fields)
def _get_default_external_network(self, context):
"""Get the default external network for the deployment."""
default_external_networks = net_obj.ExternalNetwork.get_objects(
context, is_default=True)
if not default_external_networks:
LOG.error("Unable to find default external network "
"for deployment, please create/assign one to "
"allow auto-allocation to work correctly.")
raise exceptions.AutoAllocationFailure(
reason=_("No default router:external network"))
if len(default_external_networks) > 1:
LOG.error("Multiple external default networks detected. "
"Network %s is true 'default'.",
default_external_networks[0]['network_id'])
return default_external_networks[0].network_id
def _get_supported_subnetpools(self, context):
"""Return the default subnet pools available."""
default_subnet_pools = [
self.core_plugin.get_default_subnetpool(
context, ver) for ver in (4, 6)
]
available_pools = [
s for s in default_subnet_pools if s
]
if not available_pools:
LOG.error("No default pools available")
raise n_exc.NotFound()
return available_pools
def _provision_tenant_private_network(self, context, tenant_id):
"""Create a tenant private network/subnets."""
network = None
try:
network_args = {
'name': 'auto_allocated_network',
'admin_state_up': False,
'tenant_id': tenant_id,
'shared': False
}
network = p_utils.create_network(
self.core_plugin, context, {'network': network_args})
subnets = []
for pool in self._get_supported_subnetpools(context):
subnet_args = {
'name': 'auto_allocated_subnet_v%s' % pool['ip_version'],
'network_id': network['id'],
'tenant_id': tenant_id,
'ip_version': pool['ip_version'],
'subnetpool_id': pool['id'],
}
subnets.append(p_utils.create_subnet(
self.core_plugin, context, {'subnet': subnet_args}))
return subnets
except (c_exc.SubnetAllocationError, ValueError,
n_exc.BadRequest, n_exc.NotFound) as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s due to missing or unmet "
"requirements. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
if network:
self._cleanup(context, network['id'])
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide tenant private network"))
except Exception as e:
network_id = network['id'] if network else None
raise exceptions.UnknownProvisioningError(e, network_id=network_id)
def _provision_external_connectivity(
self, context, default_external_network, subnets, tenant_id):
"""Uplink tenant subnet(s) to external network."""
router_args = {
'name': 'auto_allocated_router',
l3_apidef.EXTERNAL_GW_INFO: {
'network_id': default_external_network},
'tenant_id': tenant_id,
'admin_state_up': True
}
router = None
attached_subnets = []
try:
router = self.l3_plugin.create_router(
context, {'router': router_args})
for subnet in subnets:
self.l3_plugin.add_router_interface(
context, router['id'], {'subnet_id': subnet['id']})
attached_subnets.append(subnet)
return router
except n_exc.BadRequest as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s because of router errors. "
"Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
router_id = router['id'] if router else None
self._cleanup(context,
network_id=subnets[0]['network_id'],
router_id=router_id, subnets=attached_subnets)
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide external connectivity"))
except Exception as e:
router_id = router['id'] if router else None
raise exceptions.UnknownProvisioningError(
e, network_id=subnets[0]['network_id'],
router_id=router_id, subnets=subnets)
def _save(self, context, tenant_id, network_id, router_id, subnets):
"""Save auto-allocated topology, or revert in case of DB errors."""
try:
auto_allocate_obj.AutoAllocatedTopology(
context, project_id=tenant_id, network_id=network_id,
router_id=router_id).create()
self.core_plugin.update_network(
context, network_id,
{'network': {'admin_state_up': True}})
except obj_exc.NeutronDbObjectDuplicateEntry:
LOG.debug("Multiple auto-allocated networks detected for "
"tenant %s. Attempting clean up for network %s "
"and router %s.",
tenant_id, network_id, router_id)
self._cleanup(
context, network_id=network_id,
router_id=router_id, subnets=subnets)
network_id = self._get_auto_allocated_network(context, tenant_id)
except Exception as e:
raise exceptions.UnknownProvisioningError(
e, network_id=network_id,
router_id=router_id, subnets=subnets)
return network_id
def _cleanup(self, context, network_id=None, router_id=None, subnets=None):
"""Clean up auto allocated resources."""
# Concurrent attempts to delete the topology may interleave and
# cause some operations to fail with NotFound exceptions. Rather
# than fail partially, the exceptions should be ignored and the
# cleanup should proceed uninterrupted.
if router_id:
for subnet in subnets or []:
ignore_notfound(
self.l3_plugin.remove_router_interface,
context, router_id, {'subnet_id': subnet['id']})
ignore_notfound(self.l3_plugin.delete_router, context, router_id)
if network_id:
ignore_notfound(
self.core_plugin.delete_network, context, network_id)
def ignore_notfound(func, *args, **kwargs):
"""Call the given function and pass if a `NotFound` exception is raised."""
try: | except n_exc.NotFound:
pass | return func(*args, **kwargs) | random_line_split |
index.tsx | import React from 'react';
import { parse } from 'qs'; | import { withStyles } from '../../helpers/withStylesHelper';
import { setActiveFilterButton } from '../../actions/searchFilter';
import { SearchActions } from '../../actions/actionTypes';
import FilterButton, { FILTER_BUTTON_TYPE } from '../filterButton';
import { AppState } from '../../reducers';
import SearchQueryManager from '../../helpers/searchQueryManager';
import ActionTicketManager from '../../helpers/actionTicketManager';
const s = require('./sortingDropdown.scss');
const SORTING_TYPES: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS[] = ['RELEVANCE', 'NEWEST_FIRST', 'MOST_CITATIONS'];
interface SortingDropdownProps {
dispatch: Dispatch<SearchActions>;
}
function trackSorting(sortOption: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS) {
ActionTicketManager.trackTicket({
pageType: 'searchResult',
actionType: 'fire',
actionArea: 'sortBar',
actionTag: 'paperSorting',
actionLabel: sortOption,
});
}
function getSortText(sortOption: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS) {
switch (sortOption) {
case 'RELEVANCE': {
return 'Relevance';
}
case 'NEWEST_FIRST': {
return 'Newest';
}
case 'MOST_CITATIONS': {
return 'Most Citations';
}
default:
return 'Relevance';
}
}
const SortingDropdown: React.FC<
SortingDropdownProps & ReturnType<typeof mapStateToProps> & RouteComponentProps
> = React.memo(props => {
const anchorEl = React.useRef(null);
const queryParams = parse(location.search, { ignoreQueryPrefix: true });
const filter = SearchQueryManager.objectifyPaperFilter(queryParams.filter);
function getNextLocation(sortOption: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS) {
return {
pathname: '/search',
search: SearchQueryManager.stringifyPapersQuery({
query: props.query,
page: 1,
sort: sortOption,
filter,
}),
};
}
const sortingButtons = SORTING_TYPES.map(types => {
return (
<Button
key={types}
elementType="link"
to={getNextLocation(types)}
variant="text"
color="black"
onClick={() => {
trackSorting(types);
props.dispatch(setActiveFilterButton(null));
}}
fullWidth
>
<span style={{ textAlign: 'left' }}>{getSortText(types)}</span>
</Button>
);
});
return (
<div ref={anchorEl}>
<FilterButton
onClick={() => {
if (props.isActive) {
props.dispatch(setActiveFilterButton(null));
} else {
props.dispatch(setActiveFilterButton(FILTER_BUTTON_TYPE.SORTING));
}
}}
content={getSortText(props.sorting)}
isActive={props.isActive}
selected={false}
/>
<Popover
onClose={() => {
if (props.isActive) {
props.dispatch(setActiveFilterButton(null));
}
}}
anchorOrigin={{
vertical: 'bottom',
horizontal: 'left',
}}
transformOrigin={{
vertical: 'top',
horizontal: 'left',
}}
elevation={0}
transitionDuration={150}
classes={{
paper: s.dropBoxWrapper,
}}
open={props.isActive}
anchorEl={anchorEl.current}
>
{sortingButtons}
</Popover>
</div>
);
});
function mapStateToProps(state: AppState) {
return {
query: state.articleSearch.searchInput,
sorting: state.searchFilterState.sorting,
isActive: state.searchFilterState.activeButton === FILTER_BUTTON_TYPE.SORTING,
};
}
export default withRouter(connect(mapStateToProps)(withStyles<typeof SortingDropdown>(s)(SortingDropdown))); | import { Dispatch } from 'redux';
import { connect } from 'react-redux';
import { RouteComponentProps, withRouter } from 'react-router-dom';
import Popover from '@material-ui/core/Popover';
import { Button } from '@pluto_network/pluto-design-elements'; | random_line_split |
index.tsx | import React from 'react';
import { parse } from 'qs';
import { Dispatch } from 'redux';
import { connect } from 'react-redux';
import { RouteComponentProps, withRouter } from 'react-router-dom';
import Popover from '@material-ui/core/Popover';
import { Button } from '@pluto_network/pluto-design-elements';
import { withStyles } from '../../helpers/withStylesHelper';
import { setActiveFilterButton } from '../../actions/searchFilter';
import { SearchActions } from '../../actions/actionTypes';
import FilterButton, { FILTER_BUTTON_TYPE } from '../filterButton';
import { AppState } from '../../reducers';
import SearchQueryManager from '../../helpers/searchQueryManager';
import ActionTicketManager from '../../helpers/actionTicketManager';
const s = require('./sortingDropdown.scss');
const SORTING_TYPES: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS[] = ['RELEVANCE', 'NEWEST_FIRST', 'MOST_CITATIONS'];
interface SortingDropdownProps {
dispatch: Dispatch<SearchActions>;
}
function trackSorting(sortOption: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS) {
ActionTicketManager.trackTicket({
pageType: 'searchResult',
actionType: 'fire',
actionArea: 'sortBar',
actionTag: 'paperSorting',
actionLabel: sortOption,
});
}
function getSortText(sortOption: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS) {
switch (sortOption) {
case 'RELEVANCE': {
return 'Relevance';
}
case 'NEWEST_FIRST': {
return 'Newest';
}
case 'MOST_CITATIONS': {
return 'Most Citations';
}
default:
return 'Relevance';
}
}
const SortingDropdown: React.FC<
SortingDropdownProps & ReturnType<typeof mapStateToProps> & RouteComponentProps
> = React.memo(props => {
const anchorEl = React.useRef(null);
const queryParams = parse(location.search, { ignoreQueryPrefix: true });
const filter = SearchQueryManager.objectifyPaperFilter(queryParams.filter);
function | (sortOption: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS) {
return {
pathname: '/search',
search: SearchQueryManager.stringifyPapersQuery({
query: props.query,
page: 1,
sort: sortOption,
filter,
}),
};
}
const sortingButtons = SORTING_TYPES.map(types => {
return (
<Button
key={types}
elementType="link"
to={getNextLocation(types)}
variant="text"
color="black"
onClick={() => {
trackSorting(types);
props.dispatch(setActiveFilterButton(null));
}}
fullWidth
>
<span style={{ textAlign: 'left' }}>{getSortText(types)}</span>
</Button>
);
});
return (
<div ref={anchorEl}>
<FilterButton
onClick={() => {
if (props.isActive) {
props.dispatch(setActiveFilterButton(null));
} else {
props.dispatch(setActiveFilterButton(FILTER_BUTTON_TYPE.SORTING));
}
}}
content={getSortText(props.sorting)}
isActive={props.isActive}
selected={false}
/>
<Popover
onClose={() => {
if (props.isActive) {
props.dispatch(setActiveFilterButton(null));
}
}}
anchorOrigin={{
vertical: 'bottom',
horizontal: 'left',
}}
transformOrigin={{
vertical: 'top',
horizontal: 'left',
}}
elevation={0}
transitionDuration={150}
classes={{
paper: s.dropBoxWrapper,
}}
open={props.isActive}
anchorEl={anchorEl.current}
>
{sortingButtons}
</Popover>
</div>
);
});
function mapStateToProps(state: AppState) {
return {
query: state.articleSearch.searchInput,
sorting: state.searchFilterState.sorting,
isActive: state.searchFilterState.activeButton === FILTER_BUTTON_TYPE.SORTING,
};
}
export default withRouter(connect(mapStateToProps)(withStyles<typeof SortingDropdown>(s)(SortingDropdown)));
| getNextLocation | identifier_name |
index.tsx | import React from 'react';
import { parse } from 'qs';
import { Dispatch } from 'redux';
import { connect } from 'react-redux';
import { RouteComponentProps, withRouter } from 'react-router-dom';
import Popover from '@material-ui/core/Popover';
import { Button } from '@pluto_network/pluto-design-elements';
import { withStyles } from '../../helpers/withStylesHelper';
import { setActiveFilterButton } from '../../actions/searchFilter';
import { SearchActions } from '../../actions/actionTypes';
import FilterButton, { FILTER_BUTTON_TYPE } from '../filterButton';
import { AppState } from '../../reducers';
import SearchQueryManager from '../../helpers/searchQueryManager';
import ActionTicketManager from '../../helpers/actionTicketManager';
const s = require('./sortingDropdown.scss');
const SORTING_TYPES: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS[] = ['RELEVANCE', 'NEWEST_FIRST', 'MOST_CITATIONS'];
interface SortingDropdownProps {
dispatch: Dispatch<SearchActions>;
}
function trackSorting(sortOption: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS) {
ActionTicketManager.trackTicket({
pageType: 'searchResult',
actionType: 'fire',
actionArea: 'sortBar',
actionTag: 'paperSorting',
actionLabel: sortOption,
});
}
function getSortText(sortOption: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS) |
const SortingDropdown: React.FC<
SortingDropdownProps & ReturnType<typeof mapStateToProps> & RouteComponentProps
> = React.memo(props => {
const anchorEl = React.useRef(null);
const queryParams = parse(location.search, { ignoreQueryPrefix: true });
const filter = SearchQueryManager.objectifyPaperFilter(queryParams.filter);
function getNextLocation(sortOption: Scinapse.ArticleSearch.SEARCH_SORT_OPTIONS) {
return {
pathname: '/search',
search: SearchQueryManager.stringifyPapersQuery({
query: props.query,
page: 1,
sort: sortOption,
filter,
}),
};
}
const sortingButtons = SORTING_TYPES.map(types => {
return (
<Button
key={types}
elementType="link"
to={getNextLocation(types)}
variant="text"
color="black"
onClick={() => {
trackSorting(types);
props.dispatch(setActiveFilterButton(null));
}}
fullWidth
>
<span style={{ textAlign: 'left' }}>{getSortText(types)}</span>
</Button>
);
});
return (
<div ref={anchorEl}>
<FilterButton
onClick={() => {
if (props.isActive) {
props.dispatch(setActiveFilterButton(null));
} else {
props.dispatch(setActiveFilterButton(FILTER_BUTTON_TYPE.SORTING));
}
}}
content={getSortText(props.sorting)}
isActive={props.isActive}
selected={false}
/>
<Popover
onClose={() => {
if (props.isActive) {
props.dispatch(setActiveFilterButton(null));
}
}}
anchorOrigin={{
vertical: 'bottom',
horizontal: 'left',
}}
transformOrigin={{
vertical: 'top',
horizontal: 'left',
}}
elevation={0}
transitionDuration={150}
classes={{
paper: s.dropBoxWrapper,
}}
open={props.isActive}
anchorEl={anchorEl.current}
>
{sortingButtons}
</Popover>
</div>
);
});
function mapStateToProps(state: AppState) {
return {
query: state.articleSearch.searchInput,
sorting: state.searchFilterState.sorting,
isActive: state.searchFilterState.activeButton === FILTER_BUTTON_TYPE.SORTING,
};
}
export default withRouter(connect(mapStateToProps)(withStyles<typeof SortingDropdown>(s)(SortingDropdown)));
| {
switch (sortOption) {
case 'RELEVANCE': {
return 'Relevance';
}
case 'NEWEST_FIRST': {
return 'Newest';
}
case 'MOST_CITATIONS': {
return 'Most Citations';
}
default:
return 'Relevance';
}
} | identifier_body |
roman_numerals.py | # https://www.codeeval.com/open_challenges/106/
import sys
from collections import namedtuple
test_cases = open(sys.argv[1], 'r')
# test_cases = open('roman_numerals.txt', 'r')
test_lines = (line.rstrip() for line in test_cases)
def num_to_components(num):
num_comp = namedtuple('NumComponents', ('thousands', 'hundreds', 'tens', 'singles'))
thousands = int(num/1000)
hundreds = int((num - 1000*thousands)/100)
tens = int((num - 1000*thousands - 100*hundreds)/10)
singles = int(num - 1000*thousands - 100*hundreds - 10*tens)
return num_comp(thousands=thousands, hundreds=hundreds, tens=tens, singles=singles)
def to_roman(num_components):
|
if __name__ == '__main__':
for test in test_lines:
components = num_to_components(int(test))
to_roman(components)
test_cases.close()
| r_thousands = 'M'*num_components.thousands
r_hundreds = ''
r_tens = ''
r_singles = ''
# for hundreds
if num_components.hundreds == 4:
r_hundreds = 'CD'
elif num_components.hundreds == 9:
r_hundreds = 'CM'
elif num_components.hundreds == 5:
r_hundreds = 'D'
elif num_components.hundreds <= 3:
r_hundreds = 'C'*num_components.hundreds
elif num_components.hundreds in range(6, 9):
r_hundreds = 'D' + 'C' * (num_components.hundreds - 5)
# for Tens
if num_components.tens == 4:
r_tens = 'XL'
elif num_components.tens == 9:
r_tens = 'XC'
elif num_components.tens == 5:
r_tens = 'L'
elif num_components.tens <= 3:
r_tens = 'X'*num_components.tens
elif num_components.tens in range(6, 9):
r_tens = 'L' + 'X' * (num_components.tens - 5)
# for singles
if num_components.singles == 4:
r_singles = 'IV'
elif num_components.singles == 9:
r_singles = 'IX'
elif num_components.singles == 5:
r_singles = 'V'
elif num_components.singles <= 3:
r_singles = 'I'*num_components.singles
elif num_components.singles in range(6, 9):
r_singles = 'V' + 'I' * (num_components.singles - 5)
roman_num = r_thousands + r_hundreds + r_tens + r_singles
print(roman_num) | identifier_body |
roman_numerals.py | # https://www.codeeval.com/open_challenges/106/
import sys
from collections import namedtuple
test_cases = open(sys.argv[1], 'r')
# test_cases = open('roman_numerals.txt', 'r')
test_lines = (line.rstrip() for line in test_cases)
def num_to_components(num):
num_comp = namedtuple('NumComponents', ('thousands', 'hundreds', 'tens', 'singles'))
thousands = int(num/1000)
hundreds = int((num - 1000*thousands)/100)
tens = int((num - 1000*thousands - 100*hundreds)/10)
singles = int(num - 1000*thousands - 100*hundreds - 10*tens)
return num_comp(thousands=thousands, hundreds=hundreds, tens=tens, singles=singles)
def to_roman(num_components):
r_thousands = 'M'*num_components.thousands
r_hundreds = ''
r_tens = ''
r_singles = ''
# for hundreds
if num_components.hundreds == 4:
r_hundreds = 'CD'
elif num_components.hundreds == 9:
r_hundreds = 'CM'
elif num_components.hundreds == 5:
r_hundreds = 'D'
elif num_components.hundreds <= 3:
r_hundreds = 'C'*num_components.hundreds
elif num_components.hundreds in range(6, 9):
r_hundreds = 'D' + 'C' * (num_components.hundreds - 5)
# for Tens
if num_components.tens == 4:
r_tens = 'XL'
elif num_components.tens == 9: | elif num_components.tens in range(6, 9):
r_tens = 'L' + 'X' * (num_components.tens - 5)
# for singles
if num_components.singles == 4:
r_singles = 'IV'
elif num_components.singles == 9:
r_singles = 'IX'
elif num_components.singles == 5:
r_singles = 'V'
elif num_components.singles <= 3:
r_singles = 'I'*num_components.singles
elif num_components.singles in range(6, 9):
r_singles = 'V' + 'I' * (num_components.singles - 5)
roman_num = r_thousands + r_hundreds + r_tens + r_singles
print(roman_num)
if __name__ == '__main__':
for test in test_lines:
components = num_to_components(int(test))
to_roman(components)
test_cases.close() | r_tens = 'XC'
elif num_components.tens == 5:
r_tens = 'L'
elif num_components.tens <= 3:
r_tens = 'X'*num_components.tens | random_line_split |
roman_numerals.py | # https://www.codeeval.com/open_challenges/106/
import sys
from collections import namedtuple
test_cases = open(sys.argv[1], 'r')
# test_cases = open('roman_numerals.txt', 'r')
test_lines = (line.rstrip() for line in test_cases)
def num_to_components(num):
num_comp = namedtuple('NumComponents', ('thousands', 'hundreds', 'tens', 'singles'))
thousands = int(num/1000)
hundreds = int((num - 1000*thousands)/100)
tens = int((num - 1000*thousands - 100*hundreds)/10)
singles = int(num - 1000*thousands - 100*hundreds - 10*tens)
return num_comp(thousands=thousands, hundreds=hundreds, tens=tens, singles=singles)
def | (num_components):
r_thousands = 'M'*num_components.thousands
r_hundreds = ''
r_tens = ''
r_singles = ''
# for hundreds
if num_components.hundreds == 4:
r_hundreds = 'CD'
elif num_components.hundreds == 9:
r_hundreds = 'CM'
elif num_components.hundreds == 5:
r_hundreds = 'D'
elif num_components.hundreds <= 3:
r_hundreds = 'C'*num_components.hundreds
elif num_components.hundreds in range(6, 9):
r_hundreds = 'D' + 'C' * (num_components.hundreds - 5)
# for Tens
if num_components.tens == 4:
r_tens = 'XL'
elif num_components.tens == 9:
r_tens = 'XC'
elif num_components.tens == 5:
r_tens = 'L'
elif num_components.tens <= 3:
r_tens = 'X'*num_components.tens
elif num_components.tens in range(6, 9):
r_tens = 'L' + 'X' * (num_components.tens - 5)
# for singles
if num_components.singles == 4:
r_singles = 'IV'
elif num_components.singles == 9:
r_singles = 'IX'
elif num_components.singles == 5:
r_singles = 'V'
elif num_components.singles <= 3:
r_singles = 'I'*num_components.singles
elif num_components.singles in range(6, 9):
r_singles = 'V' + 'I' * (num_components.singles - 5)
roman_num = r_thousands + r_hundreds + r_tens + r_singles
print(roman_num)
if __name__ == '__main__':
for test in test_lines:
components = num_to_components(int(test))
to_roman(components)
test_cases.close()
| to_roman | identifier_name |
roman_numerals.py | # https://www.codeeval.com/open_challenges/106/
import sys
from collections import namedtuple
test_cases = open(sys.argv[1], 'r')
# test_cases = open('roman_numerals.txt', 'r')
test_lines = (line.rstrip() for line in test_cases)
def num_to_components(num):
num_comp = namedtuple('NumComponents', ('thousands', 'hundreds', 'tens', 'singles'))
thousands = int(num/1000)
hundreds = int((num - 1000*thousands)/100)
tens = int((num - 1000*thousands - 100*hundreds)/10)
singles = int(num - 1000*thousands - 100*hundreds - 10*tens)
return num_comp(thousands=thousands, hundreds=hundreds, tens=tens, singles=singles)
def to_roman(num_components):
r_thousands = 'M'*num_components.thousands
r_hundreds = ''
r_tens = ''
r_singles = ''
# for hundreds
if num_components.hundreds == 4:
r_hundreds = 'CD'
elif num_components.hundreds == 9:
r_hundreds = 'CM'
elif num_components.hundreds == 5:
r_hundreds = 'D'
elif num_components.hundreds <= 3:
r_hundreds = 'C'*num_components.hundreds
elif num_components.hundreds in range(6, 9):
r_hundreds = 'D' + 'C' * (num_components.hundreds - 5)
# for Tens
if num_components.tens == 4:
r_tens = 'XL'
elif num_components.tens == 9:
r_tens = 'XC'
elif num_components.tens == 5:
r_tens = 'L'
elif num_components.tens <= 3:
r_tens = 'X'*num_components.tens
elif num_components.tens in range(6, 9):
r_tens = 'L' + 'X' * (num_components.tens - 5)
# for singles
if num_components.singles == 4:
r_singles = 'IV'
elif num_components.singles == 9:
r_singles = 'IX'
elif num_components.singles == 5:
r_singles = 'V'
elif num_components.singles <= 3:
r_singles = 'I'*num_components.singles
elif num_components.singles in range(6, 9):
r_singles = 'V' + 'I' * (num_components.singles - 5)
roman_num = r_thousands + r_hundreds + r_tens + r_singles
print(roman_num)
if __name__ == '__main__':
for test in test_lines:
|
test_cases.close()
| components = num_to_components(int(test))
to_roman(components) | conditional_block |
KIconDialog.py | # encoding: utf-8
# module PyKDE4.kio
# from /usr/lib/python3/dist-packages/PyKDE4/kio.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdeui as __PyKDE4_kdeui
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
class KIconDialog(__PyKDE4_kdeui.KDialog):
# no doc
def getIcon(self, *args, **kwargs): # real signature unknown
pass
def iconSize(self, *args, **kwargs): # real signature unknown
pass
def newIconName(self, *args, **kwargs): # real signature unknown
pass
def openDialog(self, *args, **kwargs): # real signature unknown
pass
def setCustomLocation(self, *args, **kwargs): # real signature unknown
pass
def setIconSize(self, *args, **kwargs): # real signature unknown
pass
def setStrictIconSize(self, *args, **kwargs): # real signature unknown
pass
def setup(self, *args, **kwargs): # real signature unknown
pass
def showDialog(self, *args, **kwargs): # real signature unknown
pass
def slotOk(self, *args, **kwargs): # real signature unknown
|
def strictIconSize(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
| pass | identifier_body |
KIconDialog.py | # encoding: utf-8
# module PyKDE4.kio
# from /usr/lib/python3/dist-packages/PyKDE4/kio.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdeui as __PyKDE4_kdeui
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
class | (__PyKDE4_kdeui.KDialog):
# no doc
def getIcon(self, *args, **kwargs): # real signature unknown
pass
def iconSize(self, *args, **kwargs): # real signature unknown
pass
def newIconName(self, *args, **kwargs): # real signature unknown
pass
def openDialog(self, *args, **kwargs): # real signature unknown
pass
def setCustomLocation(self, *args, **kwargs): # real signature unknown
pass
def setIconSize(self, *args, **kwargs): # real signature unknown
pass
def setStrictIconSize(self, *args, **kwargs): # real signature unknown
pass
def setup(self, *args, **kwargs): # real signature unknown
pass
def showDialog(self, *args, **kwargs): # real signature unknown
pass
def slotOk(self, *args, **kwargs): # real signature unknown
pass
def strictIconSize(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
| KIconDialog | identifier_name |
KIconDialog.py | # encoding: utf-8
# module PyKDE4.kio | # by generator 1.135
# no doc
# imports
import PyKDE4.kdeui as __PyKDE4_kdeui
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
class KIconDialog(__PyKDE4_kdeui.KDialog):
# no doc
def getIcon(self, *args, **kwargs): # real signature unknown
pass
def iconSize(self, *args, **kwargs): # real signature unknown
pass
def newIconName(self, *args, **kwargs): # real signature unknown
pass
def openDialog(self, *args, **kwargs): # real signature unknown
pass
def setCustomLocation(self, *args, **kwargs): # real signature unknown
pass
def setIconSize(self, *args, **kwargs): # real signature unknown
pass
def setStrictIconSize(self, *args, **kwargs): # real signature unknown
pass
def setup(self, *args, **kwargs): # real signature unknown
pass
def showDialog(self, *args, **kwargs): # real signature unknown
pass
def slotOk(self, *args, **kwargs): # real signature unknown
pass
def strictIconSize(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass | # from /usr/lib/python3/dist-packages/PyKDE4/kio.cpython-34m-x86_64-linux-gnu.so | random_line_split |
karma.conf.js | // Karma configuration
// Generated on Thu Jul 03 2014 13:23:26 GMT+0530 (India Standard Time)
module.exports = function(config) {
config.set({
// base path that will be used to resolve all patterns (eg. files, exclude)
basePath: '',
// frameworks to use
// available frameworks: https://npmjs.org/browse/keyword/karma-adapter
frameworks: ['jasmine'],
// list of files / patterns to load in the browser
files: [
'dist/gainda.js',
'test/spec/*.js'
],
// list of files to exclude
exclude: [
],
// preprocess matching files before serving them to the browser
// available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor
preprocessors: {
},
// test results reporter to use
// possible values: 'dots', 'progress'
// available reporters: https://npmjs.org/browse/keyword/karma-reporter
reporters: ['progress'],
// web server port
port: 9876,
// enable / disable colors in the output (reporters and logs)
colors: true,
// level of logging
// possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
logLevel: config.LOG_INFO,
// enable / disable watching file and executing tests whenever any file changes
autoWatch: true, |
// Continuous Integration mode
// if true, Karma captures browsers, runs the tests and exits
singleRun: false
});
}; |
// start these browsers
// available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
browsers: ['Chrome'], | random_line_split |
TypeTextAtCefTest.ts | import { Keys } from '@ephox/agar';
import { describe, it } from '@ephox/bedrock-client';
import { TinyAssertions, TinyContentActions, TinyHooks, TinySelections } from '@ephox/mcagar';
import Editor from 'tinymce/core/api/Editor';
import Theme from 'tinymce/themes/silver/Theme';
describe('browser.tinymce.core.keyboard.TypeTextAtCef', () => {
const hook = TinyHooks.bddSetupLight<Editor>({
add_unload_trigger: false,
base_url: '/project/tinymce/js/tinymce'
}, [ Theme ], true);
it('Type text before cef inline element', () => {
const editor = hook.editor();
editor.setContent('<p><span contenteditable="false">a</span></p>');
TinySelections.select(editor, 'p', [ 1 ]);
TinyContentActions.keystroke(editor, Keys.left());
TinyContentActions.type(editor, 'bc'); | it('Type after cef inline element', () => {
const editor = hook.editor();
editor.setContent('<p><span contenteditable="false">a</span></p>');
TinySelections.select(editor, 'p', [ 1 ]);
TinyContentActions.keystroke(editor, Keys.right());
TinyContentActions.type(editor, 'bc');
TinyAssertions.assertCursor(editor, [ 0, 1 ], 3);
TinyAssertions.assertContent(editor, '<p><span contenteditable="false">a</span>bc</p>');
});
it('Type between cef inline elements', () => {
const editor = hook.editor();
editor.setContent('<p><span contenteditable="false">a</span> <span contenteditable="false">b</span></p>');
TinySelections.select(editor, 'p', [ 3 ]);
TinyContentActions.keystroke(editor, Keys.left());
TinyContentActions.keystroke(editor, Keys.left());
TinyContentActions.type(editor, 'bc');
TinyAssertions.assertSelection(editor, [ 0, 1 ], 3, [ 0, 1 ], 3);
TinyAssertions.assertContent(editor, '<p><span contenteditable="false">a</span>bc <span contenteditable="false">b</span></p>');
});
}); | TinyAssertions.assertCursor(editor, [ 0, 0 ], 2);
TinyAssertions.assertContent(editor, '<p>bc<span contenteditable="false">a</span></p>');
});
| random_line_split |
process.rs | // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(deprecated)] // this module itself is essentially deprecated
use prelude::v1::*;
use self::Req::*;
use collections::HashMap;
use ffi::CString;
use hash::Hash;
use old_io::process::{ProcessExit, ExitStatus, ExitSignal};
use old_io::{IoResult, EndOfFile};
use libc::{self, pid_t, c_void, c_int};
use mem;
use os;
use old_path::BytesContainer;
use ptr;
use sync::mpsc::{channel, Sender, Receiver};
use sys::fs::FileDesc;
use sys::{self, retry, c, wouldblock, set_nonblocking, ms_to_timeval};
use sys_common::helper_thread::Helper;
use sys_common::{AsInner, mkerr_libc, timeout};
pub use sys_common::ProcessConfig;
helper_init! { static HELPER: Helper<Req> }
/// The unique id of the process (this should never be negative).
pub struct Process {
pub pid: pid_t
}
enum Req {
NewChild(libc::pid_t, Sender<ProcessExit>, u64),
}
const CLOEXEC_MSG_FOOTER: &'static [u8] = b"NOEX";
impl Process {
pub fn id(&self) -> pid_t {
self.pid
}
pub unsafe fn kill(&self, signal: int) -> IoResult<()> {
Process::killpid(self.pid, signal)
}
pub unsafe fn killpid(pid: pid_t, signal: int) -> IoResult<()> {
let r = libc::funcs::posix88::signal::kill(pid, signal as c_int);
mkerr_libc(r)
}
pub fn spawn<K, V, C, P>(cfg: &C, in_fd: Option<P>,
out_fd: Option<P>, err_fd: Option<P>)
-> IoResult<Process>
where C: ProcessConfig<K, V>, P: AsInner<FileDesc>,
K: BytesContainer + Eq + Hash, V: BytesContainer
{
use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp};
mod rustrt {
extern {
pub fn rust_unset_sigprocmask();
}
}
unsafe fn set_cloexec(fd: c_int) {
let ret = c::ioctl(fd, c::FIOCLEX);
assert_eq!(ret, 0);
}
#[cfg(all(target_os = "android", target_arch = "aarch64"))]
unsafe fn getdtablesize() -> c_int {
libc::sysconf(libc::consts::os::sysconf::_SC_OPEN_MAX) as c_int
}
#[cfg(not(all(target_os = "android", target_arch = "aarch64")))]
unsafe fn getdtablesize() -> c_int {
libc::funcs::bsd44::getdtablesize()
}
let dirp = cfg.cwd().map(|c| c.as_ptr()).unwrap_or(ptr::null());
// temporary until unboxed closures land
let cfg = unsafe {
mem::transmute::<&ProcessConfig<K,V>,&'static ProcessConfig<K,V>>(cfg)
};
with_envp(cfg.env(), move|envp: *const c_void| {
with_argv(cfg.program(), cfg.args(), move|argv: *const *const libc::c_char| unsafe {
let (input, mut output) = try!(sys::os::pipe());
// We may use this in the child, so perform allocations before the
// fork
let devnull = b"/dev/null\0";
set_cloexec(output.fd());
let pid = fork();
if pid < 0 {
return Err(super::last_error())
} else if pid > 0 {
#[inline]
fn | (arr: &[u8]) -> i32 {
let a = arr[0] as u32;
let b = arr[1] as u32;
let c = arr[2] as u32;
let d = arr[3] as u32;
((a << 24) | (b << 16) | (c << 8) | (d << 0)) as i32
}
let p = Process{ pid: pid };
drop(output);
let mut bytes = [0; 8];
return match input.read(&mut bytes) {
Ok(8) => {
assert!(combine(CLOEXEC_MSG_FOOTER) == combine(&bytes[4.. 8]),
"Validation on the CLOEXEC pipe failed: {:?}", bytes);
let errno = combine(&bytes[0.. 4]);
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
Err(super::decode_error(errno))
}
Err(ref e) if e.kind == EndOfFile => Ok(p),
Err(e) => {
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("the CLOEXEC pipe failed: {:?}", e)
},
Ok(..) => { // pipe I/O up to PIPE_BUF bytes should be atomic
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("short read on the CLOEXEC pipe")
}
};
}
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
// scenario:
//
// 1. Thread A of process 1 grabs the malloc() mutex
// 2. Thread B of process 1 forks(), creating thread C
// 3. Thread C of process 2 then attempts to malloc()
// 4. The memory of process 2 is the same as the memory of
// process 1, so the mutex is locked.
//
// This situation looks a lot like deadlock, right? It turns out
// that this is what pthread_atfork() takes care of, which is
// presumably implemented across platforms. The first thing that
// threads to *before* forking is to do things like grab the malloc
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
// deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
// For this reason, the block of code below should contain 0
// invocations of either malloc of free (or their related friends).
//
// As an example of not having malloc/free traffic, we don't close
// this file descriptor by dropping the FileDesc (which contains an
// allocation). Instead we just close it manually. This will never
// have the drop glue anyway because this code never returns (the
// child will either exec() or invoke libc::exit)
let _ = libc::close(input.fd());
fn fail(output: &mut FileDesc) -> ! {
let errno = sys::os::errno() as u32;
let bytes = [
(errno >> 24) as u8,
(errno >> 16) as u8,
(errno >> 8) as u8,
(errno >> 0) as u8,
CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1],
CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3]
];
// pipe I/O up to PIPE_BUF bytes should be atomic
assert!(output.write(&bytes).is_ok());
unsafe { libc::_exit(1) }
}
rustrt::rust_unset_sigprocmask();
// If a stdio file descriptor is set to be ignored (via a -1 file
// descriptor), then we don't actually close it, but rather open
// up /dev/null into that file descriptor. Otherwise, the first file
// descriptor opened up in the child would be numbered as one of the
// stdio file descriptors, which is likely to wreak havoc.
let setup = |src: Option<P>, dst: c_int| {
let src = match src {
None => {
let flags = if dst == libc::STDIN_FILENO {
libc::O_RDONLY
} else {
libc::O_RDWR
};
libc::open(devnull.as_ptr() as *const _, flags, 0)
}
Some(obj) => {
let fd = obj.as_inner().fd();
// Leak the memory and the file descriptor. We're in the
// child now an all our resources are going to be
// cleaned up very soon
mem::forget(obj);
fd
}
};
src != -1 && retry(|| dup2(src, dst)) != -1
};
if !setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) }
if !setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) }
if !setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) }
// close all other fds
for fd in (3..getdtablesize()).rev() {
if fd != output.fd() {
let _ = close(fd as c_int);
}
}
match cfg.gid() {
Some(u) => {
if libc::setgid(u as libc::gid_t) != 0 {
fail(&mut output);
}
}
None => {}
}
match cfg.uid() {
Some(u) => {
// When dropping privileges from root, the `setgroups` call
// will remove any extraneous groups. If we don't call this,
// then even though our uid has dropped, we may still have
// groups that enable us to do super-user things. This will
// fail if we aren't root, so don't bother checking the
// return value, this is just done as an optimistic
// privilege dropping function.
extern {
fn setgroups(ngroups: libc::c_int,
ptr: *const libc::c_void) -> libc::c_int;
}
let _ = setgroups(0, ptr::null());
if libc::setuid(u as libc::uid_t) != 0 {
fail(&mut output);
}
}
None => {}
}
if cfg.detach() {
// Don't check the error of setsid because it fails if we're the
// process leader already. We just forked so it shouldn't return
// error, but ignore it anyway.
let _ = libc::setsid();
}
if !dirp.is_null() && chdir(dirp) == -1 {
fail(&mut output);
}
if !envp.is_null() {
*sys::os::environ() = envp as *const _;
}
let _ = execvp(*argv, argv as *mut _);
fail(&mut output);
})
})
}
pub fn wait(&self, deadline: u64) -> IoResult<ProcessExit> {
use cmp;
use sync::mpsc::TryRecvError;
static mut WRITE_FD: libc::c_int = 0;
let mut status = 0 as c_int;
if deadline == 0 {
return match retry(|| unsafe { c::waitpid(self.pid, &mut status, 0) }) {
-1 => panic!("unknown waitpid error: {:?}", super::last_error()),
_ => Ok(translate_status(status)),
}
}
// On unix, wait() and its friends have no timeout parameters, so there is
// no way to time out a thread in wait(). From some googling and some
// thinking, it appears that there are a few ways to handle timeouts in
// wait(), but the only real reasonable one for a multi-threaded program is
// to listen for SIGCHLD.
//
// With this in mind, the waiting mechanism with a timeout barely uses
// waitpid() at all. There are a few times that waitpid() is invoked with
// WNOHANG, but otherwise all the necessary blocking is done by waiting for
// a SIGCHLD to arrive (and that blocking has a timeout). Note, however,
// that waitpid() is still used to actually reap the child.
//
// Signal handling is super tricky in general, and this is no exception. Due
// to the async nature of SIGCHLD, we use the self-pipe trick to transmit
// data out of the signal handler to the rest of the application. The first
// idea would be to have each thread waiting with a timeout to read this
// output file descriptor, but a write() is akin to a signal(), not a
// broadcast(), so it would only wake up one thread, and possibly the wrong
// thread. Hence a helper thread is used.
//
// The helper thread here is responsible for farming requests for a
// waitpid() with a timeout, and then processing all of the wait requests.
// By guaranteeing that only this helper thread is reading half of the
// self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread
// is also responsible for select() to wait for incoming messages or
// incoming SIGCHLD messages, along with passing an appropriate timeout to
// select() to wake things up as necessary.
//
// The ordering of the following statements is also very purposeful. First,
// we must be guaranteed that the helper thread is booted and available to
// receive SIGCHLD signals, and then we must also ensure that we do a
// nonblocking waitpid() at least once before we go ask the sigchld helper.
// This prevents the race where the child exits, we boot the helper, and
// then we ask for the child's exit status (never seeing a sigchld).
//
// The actual communication between the helper thread and this thread is
// quite simple, just a channel moving data around.
HELPER.boot(register_sigchld, waitpid_helper);
match self.try_wait() {
Some(ret) => return Ok(ret),
None => {}
}
let (tx, rx) = channel();
HELPER.send(NewChild(self.pid, tx, deadline));
return match rx.recv() {
Ok(e) => Ok(e),
Err(..) => Err(timeout("wait timed out")),
};
// Register a new SIGCHLD handler, returning the reading half of the
// self-pipe plus the old handler registered (return value of sigaction).
//
// Be sure to set up the self-pipe first because as soon as we register a
// handler we're going to start receiving signals.
fn register_sigchld() -> (libc::c_int, c::sigaction) {
unsafe {
let mut pipes = [0; 2];
assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0);
set_nonblocking(pipes[0], true);
set_nonblocking(pipes[1], true);
WRITE_FD = pipes[1];
let mut old: c::sigaction = mem::zeroed();
let mut new: c::sigaction = mem::zeroed();
new.sa_handler = sigchld_handler;
new.sa_flags = c::SA_NOCLDSTOP;
assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0);
(pipes[0], old)
}
}
// Helper thread for processing SIGCHLD messages
fn waitpid_helper(input: libc::c_int,
messages: Receiver<Req>,
(read_fd, old): (libc::c_int, c::sigaction)) {
set_nonblocking(input, true);
let mut set: c::fd_set = unsafe { mem::zeroed() };
let mut tv: libc::timeval;
let mut active = Vec::<(libc::pid_t, Sender<ProcessExit>, u64)>::new();
let max = cmp::max(input, read_fd) + 1;
'outer: loop {
// Figure out the timeout of our syscall-to-happen. If we're waiting
// for some processes, then they'll have a timeout, otherwise we
// wait indefinitely for a message to arrive.
//
// FIXME: sure would be nice to not have to scan the entire array
let min = active.iter().map(|a| a.2).enumerate().min_by(|p| {
p.1
});
let (p, idx) = match min {
Some((idx, deadline)) => {
let now = sys::timer::now();
let ms = if now < deadline {deadline - now} else {0};
tv = ms_to_timeval(ms);
(&mut tv as *mut _, idx)
}
None => (ptr::null_mut(), -1),
};
// Wait for something to happen
c::fd_set(&mut set, input);
c::fd_set(&mut set, read_fd);
match unsafe { c::select(max, &mut set, ptr::null_mut(),
ptr::null_mut(), p) } {
// interrupted, retry
-1 if os::errno() == libc::EINTR as i32 => continue,
// We read something, break out and process
1 | 2 => {}
// Timeout, the pending request is removed
0 => {
drop(active.remove(idx));
continue
}
n => panic!("error in select {:?} ({:?})", os::errno(), n),
}
// Process any pending messages
if drain(input) {
loop {
match messages.try_recv() {
Ok(NewChild(pid, tx, deadline)) => {
active.push((pid, tx, deadline));
}
// Once we've been disconnected it means the main
// thread is exiting (at_exit has run). We could
// still have active waiter for other threads, so
// we're just going to drop them all on the floor.
// This means that they won't receive a "you're
// done" message in which case they'll be considered
// as timed out, but more generally errors will
// start propagating.
Err(TryRecvError::Disconnected) => {
break 'outer;
}
Err(TryRecvError::Empty) => break,
}
}
}
// If a child exited (somehow received SIGCHLD), then poll all
// children to see if any of them exited.
//
// We also attempt to be responsible netizens when dealing with
// SIGCHLD by invoking any previous SIGCHLD handler instead of just
// ignoring any previous SIGCHLD handler. Note that we don't provide
// a 1:1 mapping of our handler invocations to the previous handler
// invocations because we drain the `read_fd` entirely. This is
// probably OK because the kernel is already allowed to coalesce
// simultaneous signals, we're just doing some extra coalescing.
//
// Another point of note is that this likely runs the signal handler
// on a different thread than the one that received the signal. I
// *think* this is ok at this time.
//
// The main reason for doing this is to allow stdtest to run native
// tests as well. Both libgreen and libnative are running around
// with process timeouts, but libgreen should get there first
// (currently libuv doesn't handle old signal handlers).
if drain(read_fd) {
let i: uint = unsafe { mem::transmute(old.sa_handler) };
if i != 0 {
assert!(old.sa_flags & c::SA_SIGINFO == 0);
(old.sa_handler)(c::SIGCHLD);
}
// FIXME: sure would be nice to not have to scan the entire
// array...
active.retain(|&(pid, ref tx, _)| {
let pr = Process { pid: pid };
match pr.try_wait() {
Some(msg) => { tx.send(msg).unwrap(); false }
None => true,
}
});
}
}
// Once this helper thread is done, we re-register the old sigchld
// handler and close our intermediate file descriptors.
unsafe {
assert_eq!(c::sigaction(c::SIGCHLD, &old, ptr::null_mut()), 0);
let _ = libc::close(read_fd);
let _ = libc::close(WRITE_FD);
WRITE_FD = -1;
}
}
// Drain all pending data from the file descriptor, returning if any data
// could be drained. This requires that the file descriptor is in
// nonblocking mode.
fn drain(fd: libc::c_int) -> bool {
let mut ret = false;
loop {
let mut buf = [0u8; 1];
match unsafe {
libc::read(fd, buf.as_mut_ptr() as *mut libc::c_void,
buf.len() as libc::size_t)
} {
n if n > 0 => { ret = true; }
0 => return true,
-1 if wouldblock() => return ret,
n => panic!("bad read {:?} ({:?})", os::last_os_error(), n),
}
}
}
// Signal handler for SIGCHLD signals, must be async-signal-safe!
//
// This function will write to the writing half of the "self pipe" to wake
// up the helper thread if it's waiting. Note that this write must be
// nonblocking because if it blocks and the reader is the thread we
// interrupted, then we'll deadlock.
//
// When writing, if the write returns EWOULDBLOCK then we choose to ignore
// it. At that point we're guaranteed that there's something in the pipe
// which will wake up the other end at some point, so we just allow this
// signal to be coalesced with the pending signals on the pipe.
extern fn sigchld_handler(_signum: libc::c_int) {
let msg = 1;
match unsafe {
libc::write(WRITE_FD, &msg as *const _ as *const libc::c_void, 1)
} {
1 => {}
-1 if wouldblock() => {} // see above comments
n => panic!("bad error on write fd: {:?} {:?}", n, os::errno()),
}
}
}
pub fn try_wait(&self) -> Option<ProcessExit> {
let mut status = 0 as c_int;
match retry(|| unsafe {
c::waitpid(self.pid, &mut status, c::WNOHANG)
}) {
n if n == self.pid => Some(translate_status(status)),
0 => None,
n => panic!("unknown waitpid error `{:?}`: {:?}", n,
super::last_error()),
}
}
}
fn with_argv<T,F>(prog: &CString, args: &[CString],
cb: F)
-> T
where F : FnOnce(*const *const libc::c_char) -> T
{
let mut ptrs: Vec<*const libc::c_char> = Vec::with_capacity(args.len()+1);
// Convert the CStrings into an array of pointers. Note: the
// lifetime of the various CStrings involved is guaranteed to be
// larger than the lifetime of our invocation of cb, but this is
// technically unsafe as the callback could leak these pointers
// out of our scope.
ptrs.push(prog.as_ptr());
ptrs.extend(args.iter().map(|tmp| tmp.as_ptr()));
// Add a terminating null pointer (required by libc).
ptrs.push(ptr::null());
cb(ptrs.as_ptr())
}
fn with_envp<K,V,T,F>(env: Option<&HashMap<K, V>>,
cb: F)
-> T
where F : FnOnce(*const c_void) -> T,
K : BytesContainer + Eq + Hash,
V : BytesContainer
{
// On posixy systems we can pass a char** for envp, which is a
// null-terminated array of "k=v\0" strings. Since we must create
// these strings locally, yet expose a raw pointer to them, we
// create a temporary vector to own the CStrings that outlives the
// call to cb.
match env {
Some(env) => {
let mut tmps = Vec::with_capacity(env.len());
for pair in env {
let mut kv = Vec::new();
kv.push_all(pair.0.container_as_bytes());
kv.push('=' as u8);
kv.push_all(pair.1.container_as_bytes());
kv.push(0); // terminating null
tmps.push(kv);
}
// As with `with_argv`, this is unsafe, since cb could leak the pointers.
let mut ptrs: Vec<*const libc::c_char> =
tmps.iter()
.map(|tmp| tmp.as_ptr() as *const libc::c_char)
.collect();
ptrs.push(ptr::null());
cb(ptrs.as_ptr() as *const c_void)
}
_ => cb(ptr::null())
}
}
fn translate_status(status: c_int) -> ProcessExit {
#![allow(non_snake_case)]
#[cfg(any(target_os = "linux", target_os = "android"))]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0xff) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { (status >> 8) & 0xff }
pub fn WTERMSIG(status: i32) -> i32 { status & 0x7f }
}
#[cfg(any(target_os = "macos",
target_os = "ios",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
target_os = "openbsd"))]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0x7f) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { status >> 8 }
pub fn WTERMSIG(status: i32) -> i32 { status & 0o177 }
}
if imp::WIFEXITED(status) {
ExitStatus(imp::WEXITSTATUS(status) as int)
} else {
ExitSignal(imp::WTERMSIG(status) as int)
}
}
| combine | identifier_name |
process.rs | // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(deprecated)] // this module itself is essentially deprecated
use prelude::v1::*;
use self::Req::*;
use collections::HashMap;
use ffi::CString;
use hash::Hash;
use old_io::process::{ProcessExit, ExitStatus, ExitSignal};
use old_io::{IoResult, EndOfFile};
use libc::{self, pid_t, c_void, c_int};
use mem;
use os;
use old_path::BytesContainer;
use ptr;
use sync::mpsc::{channel, Sender, Receiver};
use sys::fs::FileDesc;
use sys::{self, retry, c, wouldblock, set_nonblocking, ms_to_timeval};
use sys_common::helper_thread::Helper;
use sys_common::{AsInner, mkerr_libc, timeout};
pub use sys_common::ProcessConfig;
helper_init! { static HELPER: Helper<Req> }
/// The unique id of the process (this should never be negative).
pub struct Process {
pub pid: pid_t
}
enum Req {
NewChild(libc::pid_t, Sender<ProcessExit>, u64),
}
const CLOEXEC_MSG_FOOTER: &'static [u8] = b"NOEX";
impl Process {
pub fn id(&self) -> pid_t {
self.pid
}
pub unsafe fn kill(&self, signal: int) -> IoResult<()> {
Process::killpid(self.pid, signal)
}
pub unsafe fn killpid(pid: pid_t, signal: int) -> IoResult<()> |
pub fn spawn<K, V, C, P>(cfg: &C, in_fd: Option<P>,
out_fd: Option<P>, err_fd: Option<P>)
-> IoResult<Process>
where C: ProcessConfig<K, V>, P: AsInner<FileDesc>,
K: BytesContainer + Eq + Hash, V: BytesContainer
{
use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp};
mod rustrt {
extern {
pub fn rust_unset_sigprocmask();
}
}
unsafe fn set_cloexec(fd: c_int) {
let ret = c::ioctl(fd, c::FIOCLEX);
assert_eq!(ret, 0);
}
#[cfg(all(target_os = "android", target_arch = "aarch64"))]
unsafe fn getdtablesize() -> c_int {
libc::sysconf(libc::consts::os::sysconf::_SC_OPEN_MAX) as c_int
}
#[cfg(not(all(target_os = "android", target_arch = "aarch64")))]
unsafe fn getdtablesize() -> c_int {
libc::funcs::bsd44::getdtablesize()
}
let dirp = cfg.cwd().map(|c| c.as_ptr()).unwrap_or(ptr::null());
// temporary until unboxed closures land
let cfg = unsafe {
mem::transmute::<&ProcessConfig<K,V>,&'static ProcessConfig<K,V>>(cfg)
};
with_envp(cfg.env(), move|envp: *const c_void| {
with_argv(cfg.program(), cfg.args(), move|argv: *const *const libc::c_char| unsafe {
let (input, mut output) = try!(sys::os::pipe());
// We may use this in the child, so perform allocations before the
// fork
let devnull = b"/dev/null\0";
set_cloexec(output.fd());
let pid = fork();
if pid < 0 {
return Err(super::last_error())
} else if pid > 0 {
#[inline]
fn combine(arr: &[u8]) -> i32 {
let a = arr[0] as u32;
let b = arr[1] as u32;
let c = arr[2] as u32;
let d = arr[3] as u32;
((a << 24) | (b << 16) | (c << 8) | (d << 0)) as i32
}
let p = Process{ pid: pid };
drop(output);
let mut bytes = [0; 8];
return match input.read(&mut bytes) {
Ok(8) => {
assert!(combine(CLOEXEC_MSG_FOOTER) == combine(&bytes[4.. 8]),
"Validation on the CLOEXEC pipe failed: {:?}", bytes);
let errno = combine(&bytes[0.. 4]);
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
Err(super::decode_error(errno))
}
Err(ref e) if e.kind == EndOfFile => Ok(p),
Err(e) => {
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("the CLOEXEC pipe failed: {:?}", e)
},
Ok(..) => { // pipe I/O up to PIPE_BUF bytes should be atomic
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("short read on the CLOEXEC pipe")
}
};
}
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
// scenario:
//
// 1. Thread A of process 1 grabs the malloc() mutex
// 2. Thread B of process 1 forks(), creating thread C
// 3. Thread C of process 2 then attempts to malloc()
// 4. The memory of process 2 is the same as the memory of
// process 1, so the mutex is locked.
//
// This situation looks a lot like deadlock, right? It turns out
// that this is what pthread_atfork() takes care of, which is
// presumably implemented across platforms. The first thing that
// threads to *before* forking is to do things like grab the malloc
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
// deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
// For this reason, the block of code below should contain 0
// invocations of either malloc of free (or their related friends).
//
// As an example of not having malloc/free traffic, we don't close
// this file descriptor by dropping the FileDesc (which contains an
// allocation). Instead we just close it manually. This will never
// have the drop glue anyway because this code never returns (the
// child will either exec() or invoke libc::exit)
let _ = libc::close(input.fd());
fn fail(output: &mut FileDesc) -> ! {
let errno = sys::os::errno() as u32;
let bytes = [
(errno >> 24) as u8,
(errno >> 16) as u8,
(errno >> 8) as u8,
(errno >> 0) as u8,
CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1],
CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3]
];
// pipe I/O up to PIPE_BUF bytes should be atomic
assert!(output.write(&bytes).is_ok());
unsafe { libc::_exit(1) }
}
rustrt::rust_unset_sigprocmask();
// If a stdio file descriptor is set to be ignored (via a -1 file
// descriptor), then we don't actually close it, but rather open
// up /dev/null into that file descriptor. Otherwise, the first file
// descriptor opened up in the child would be numbered as one of the
// stdio file descriptors, which is likely to wreak havoc.
let setup = |src: Option<P>, dst: c_int| {
let src = match src {
None => {
let flags = if dst == libc::STDIN_FILENO {
libc::O_RDONLY
} else {
libc::O_RDWR
};
libc::open(devnull.as_ptr() as *const _, flags, 0)
}
Some(obj) => {
let fd = obj.as_inner().fd();
// Leak the memory and the file descriptor. We're in the
// child now an all our resources are going to be
// cleaned up very soon
mem::forget(obj);
fd
}
};
src != -1 && retry(|| dup2(src, dst)) != -1
};
if !setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) }
if !setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) }
if !setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) }
// close all other fds
for fd in (3..getdtablesize()).rev() {
if fd != output.fd() {
let _ = close(fd as c_int);
}
}
match cfg.gid() {
Some(u) => {
if libc::setgid(u as libc::gid_t) != 0 {
fail(&mut output);
}
}
None => {}
}
match cfg.uid() {
Some(u) => {
// When dropping privileges from root, the `setgroups` call
// will remove any extraneous groups. If we don't call this,
// then even though our uid has dropped, we may still have
// groups that enable us to do super-user things. This will
// fail if we aren't root, so don't bother checking the
// return value, this is just done as an optimistic
// privilege dropping function.
extern {
fn setgroups(ngroups: libc::c_int,
ptr: *const libc::c_void) -> libc::c_int;
}
let _ = setgroups(0, ptr::null());
if libc::setuid(u as libc::uid_t) != 0 {
fail(&mut output);
}
}
None => {}
}
if cfg.detach() {
// Don't check the error of setsid because it fails if we're the
// process leader already. We just forked so it shouldn't return
// error, but ignore it anyway.
let _ = libc::setsid();
}
if !dirp.is_null() && chdir(dirp) == -1 {
fail(&mut output);
}
if !envp.is_null() {
*sys::os::environ() = envp as *const _;
}
let _ = execvp(*argv, argv as *mut _);
fail(&mut output);
})
})
}
pub fn wait(&self, deadline: u64) -> IoResult<ProcessExit> {
use cmp;
use sync::mpsc::TryRecvError;
static mut WRITE_FD: libc::c_int = 0;
let mut status = 0 as c_int;
if deadline == 0 {
return match retry(|| unsafe { c::waitpid(self.pid, &mut status, 0) }) {
-1 => panic!("unknown waitpid error: {:?}", super::last_error()),
_ => Ok(translate_status(status)),
}
}
// On unix, wait() and its friends have no timeout parameters, so there is
// no way to time out a thread in wait(). From some googling and some
// thinking, it appears that there are a few ways to handle timeouts in
// wait(), but the only real reasonable one for a multi-threaded program is
// to listen for SIGCHLD.
//
// With this in mind, the waiting mechanism with a timeout barely uses
// waitpid() at all. There are a few times that waitpid() is invoked with
// WNOHANG, but otherwise all the necessary blocking is done by waiting for
// a SIGCHLD to arrive (and that blocking has a timeout). Note, however,
// that waitpid() is still used to actually reap the child.
//
// Signal handling is super tricky in general, and this is no exception. Due
// to the async nature of SIGCHLD, we use the self-pipe trick to transmit
// data out of the signal handler to the rest of the application. The first
// idea would be to have each thread waiting with a timeout to read this
// output file descriptor, but a write() is akin to a signal(), not a
// broadcast(), so it would only wake up one thread, and possibly the wrong
// thread. Hence a helper thread is used.
//
// The helper thread here is responsible for farming requests for a
// waitpid() with a timeout, and then processing all of the wait requests.
// By guaranteeing that only this helper thread is reading half of the
// self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread
// is also responsible for select() to wait for incoming messages or
// incoming SIGCHLD messages, along with passing an appropriate timeout to
// select() to wake things up as necessary.
//
// The ordering of the following statements is also very purposeful. First,
// we must be guaranteed that the helper thread is booted and available to
// receive SIGCHLD signals, and then we must also ensure that we do a
// nonblocking waitpid() at least once before we go ask the sigchld helper.
// This prevents the race where the child exits, we boot the helper, and
// then we ask for the child's exit status (never seeing a sigchld).
//
// The actual communication between the helper thread and this thread is
// quite simple, just a channel moving data around.
HELPER.boot(register_sigchld, waitpid_helper);
match self.try_wait() {
Some(ret) => return Ok(ret),
None => {}
}
let (tx, rx) = channel();
HELPER.send(NewChild(self.pid, tx, deadline));
return match rx.recv() {
Ok(e) => Ok(e),
Err(..) => Err(timeout("wait timed out")),
};
// Register a new SIGCHLD handler, returning the reading half of the
// self-pipe plus the old handler registered (return value of sigaction).
//
// Be sure to set up the self-pipe first because as soon as we register a
// handler we're going to start receiving signals.
fn register_sigchld() -> (libc::c_int, c::sigaction) {
unsafe {
let mut pipes = [0; 2];
assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0);
set_nonblocking(pipes[0], true);
set_nonblocking(pipes[1], true);
WRITE_FD = pipes[1];
let mut old: c::sigaction = mem::zeroed();
let mut new: c::sigaction = mem::zeroed();
new.sa_handler = sigchld_handler;
new.sa_flags = c::SA_NOCLDSTOP;
assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0);
(pipes[0], old)
}
}
// Helper thread for processing SIGCHLD messages
fn waitpid_helper(input: libc::c_int,
messages: Receiver<Req>,
(read_fd, old): (libc::c_int, c::sigaction)) {
set_nonblocking(input, true);
let mut set: c::fd_set = unsafe { mem::zeroed() };
let mut tv: libc::timeval;
let mut active = Vec::<(libc::pid_t, Sender<ProcessExit>, u64)>::new();
let max = cmp::max(input, read_fd) + 1;
'outer: loop {
// Figure out the timeout of our syscall-to-happen. If we're waiting
// for some processes, then they'll have a timeout, otherwise we
// wait indefinitely for a message to arrive.
//
// FIXME: sure would be nice to not have to scan the entire array
let min = active.iter().map(|a| a.2).enumerate().min_by(|p| {
p.1
});
let (p, idx) = match min {
Some((idx, deadline)) => {
let now = sys::timer::now();
let ms = if now < deadline {deadline - now} else {0};
tv = ms_to_timeval(ms);
(&mut tv as *mut _, idx)
}
None => (ptr::null_mut(), -1),
};
// Wait for something to happen
c::fd_set(&mut set, input);
c::fd_set(&mut set, read_fd);
match unsafe { c::select(max, &mut set, ptr::null_mut(),
ptr::null_mut(), p) } {
// interrupted, retry
-1 if os::errno() == libc::EINTR as i32 => continue,
// We read something, break out and process
1 | 2 => {}
// Timeout, the pending request is removed
0 => {
drop(active.remove(idx));
continue
}
n => panic!("error in select {:?} ({:?})", os::errno(), n),
}
// Process any pending messages
if drain(input) {
loop {
match messages.try_recv() {
Ok(NewChild(pid, tx, deadline)) => {
active.push((pid, tx, deadline));
}
// Once we've been disconnected it means the main
// thread is exiting (at_exit has run). We could
// still have active waiter for other threads, so
// we're just going to drop them all on the floor.
// This means that they won't receive a "you're
// done" message in which case they'll be considered
// as timed out, but more generally errors will
// start propagating.
Err(TryRecvError::Disconnected) => {
break 'outer;
}
Err(TryRecvError::Empty) => break,
}
}
}
// If a child exited (somehow received SIGCHLD), then poll all
// children to see if any of them exited.
//
// We also attempt to be responsible netizens when dealing with
// SIGCHLD by invoking any previous SIGCHLD handler instead of just
// ignoring any previous SIGCHLD handler. Note that we don't provide
// a 1:1 mapping of our handler invocations to the previous handler
// invocations because we drain the `read_fd` entirely. This is
// probably OK because the kernel is already allowed to coalesce
// simultaneous signals, we're just doing some extra coalescing.
//
// Another point of note is that this likely runs the signal handler
// on a different thread than the one that received the signal. I
// *think* this is ok at this time.
//
// The main reason for doing this is to allow stdtest to run native
// tests as well. Both libgreen and libnative are running around
// with process timeouts, but libgreen should get there first
// (currently libuv doesn't handle old signal handlers).
if drain(read_fd) {
let i: uint = unsafe { mem::transmute(old.sa_handler) };
if i != 0 {
assert!(old.sa_flags & c::SA_SIGINFO == 0);
(old.sa_handler)(c::SIGCHLD);
}
// FIXME: sure would be nice to not have to scan the entire
// array...
active.retain(|&(pid, ref tx, _)| {
let pr = Process { pid: pid };
match pr.try_wait() {
Some(msg) => { tx.send(msg).unwrap(); false }
None => true,
}
});
}
}
// Once this helper thread is done, we re-register the old sigchld
// handler and close our intermediate file descriptors.
unsafe {
assert_eq!(c::sigaction(c::SIGCHLD, &old, ptr::null_mut()), 0);
let _ = libc::close(read_fd);
let _ = libc::close(WRITE_FD);
WRITE_FD = -1;
}
}
// Drain all pending data from the file descriptor, returning if any data
// could be drained. This requires that the file descriptor is in
// nonblocking mode.
fn drain(fd: libc::c_int) -> bool {
let mut ret = false;
loop {
let mut buf = [0u8; 1];
match unsafe {
libc::read(fd, buf.as_mut_ptr() as *mut libc::c_void,
buf.len() as libc::size_t)
} {
n if n > 0 => { ret = true; }
0 => return true,
-1 if wouldblock() => return ret,
n => panic!("bad read {:?} ({:?})", os::last_os_error(), n),
}
}
}
// Signal handler for SIGCHLD signals, must be async-signal-safe!
//
// This function will write to the writing half of the "self pipe" to wake
// up the helper thread if it's waiting. Note that this write must be
// nonblocking because if it blocks and the reader is the thread we
// interrupted, then we'll deadlock.
//
// When writing, if the write returns EWOULDBLOCK then we choose to ignore
// it. At that point we're guaranteed that there's something in the pipe
// which will wake up the other end at some point, so we just allow this
// signal to be coalesced with the pending signals on the pipe.
extern fn sigchld_handler(_signum: libc::c_int) {
let msg = 1;
match unsafe {
libc::write(WRITE_FD, &msg as *const _ as *const libc::c_void, 1)
} {
1 => {}
-1 if wouldblock() => {} // see above comments
n => panic!("bad error on write fd: {:?} {:?}", n, os::errno()),
}
}
}
pub fn try_wait(&self) -> Option<ProcessExit> {
let mut status = 0 as c_int;
match retry(|| unsafe {
c::waitpid(self.pid, &mut status, c::WNOHANG)
}) {
n if n == self.pid => Some(translate_status(status)),
0 => None,
n => panic!("unknown waitpid error `{:?}`: {:?}", n,
super::last_error()),
}
}
}
fn with_argv<T,F>(prog: &CString, args: &[CString],
cb: F)
-> T
where F : FnOnce(*const *const libc::c_char) -> T
{
let mut ptrs: Vec<*const libc::c_char> = Vec::with_capacity(args.len()+1);
// Convert the CStrings into an array of pointers. Note: the
// lifetime of the various CStrings involved is guaranteed to be
// larger than the lifetime of our invocation of cb, but this is
// technically unsafe as the callback could leak these pointers
// out of our scope.
ptrs.push(prog.as_ptr());
ptrs.extend(args.iter().map(|tmp| tmp.as_ptr()));
// Add a terminating null pointer (required by libc).
ptrs.push(ptr::null());
cb(ptrs.as_ptr())
}
fn with_envp<K,V,T,F>(env: Option<&HashMap<K, V>>,
cb: F)
-> T
where F : FnOnce(*const c_void) -> T,
K : BytesContainer + Eq + Hash,
V : BytesContainer
{
// On posixy systems we can pass a char** for envp, which is a
// null-terminated array of "k=v\0" strings. Since we must create
// these strings locally, yet expose a raw pointer to them, we
// create a temporary vector to own the CStrings that outlives the
// call to cb.
match env {
Some(env) => {
let mut tmps = Vec::with_capacity(env.len());
for pair in env {
let mut kv = Vec::new();
kv.push_all(pair.0.container_as_bytes());
kv.push('=' as u8);
kv.push_all(pair.1.container_as_bytes());
kv.push(0); // terminating null
tmps.push(kv);
}
// As with `with_argv`, this is unsafe, since cb could leak the pointers.
let mut ptrs: Vec<*const libc::c_char> =
tmps.iter()
.map(|tmp| tmp.as_ptr() as *const libc::c_char)
.collect();
ptrs.push(ptr::null());
cb(ptrs.as_ptr() as *const c_void)
}
_ => cb(ptr::null())
}
}
fn translate_status(status: c_int) -> ProcessExit {
#![allow(non_snake_case)]
#[cfg(any(target_os = "linux", target_os = "android"))]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0xff) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { (status >> 8) & 0xff }
pub fn WTERMSIG(status: i32) -> i32 { status & 0x7f }
}
#[cfg(any(target_os = "macos",
target_os = "ios",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
target_os = "openbsd"))]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0x7f) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { status >> 8 }
pub fn WTERMSIG(status: i32) -> i32 { status & 0o177 }
}
if imp::WIFEXITED(status) {
ExitStatus(imp::WEXITSTATUS(status) as int)
} else {
ExitSignal(imp::WTERMSIG(status) as int)
}
}
| {
let r = libc::funcs::posix88::signal::kill(pid, signal as c_int);
mkerr_libc(r)
} | identifier_body |
process.rs | // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(deprecated)] // this module itself is essentially deprecated
use prelude::v1::*;
use self::Req::*;
use collections::HashMap;
use ffi::CString;
use hash::Hash;
use old_io::process::{ProcessExit, ExitStatus, ExitSignal};
use old_io::{IoResult, EndOfFile};
use libc::{self, pid_t, c_void, c_int};
use mem;
use os;
use old_path::BytesContainer;
use ptr;
use sync::mpsc::{channel, Sender, Receiver};
use sys::fs::FileDesc;
use sys::{self, retry, c, wouldblock, set_nonblocking, ms_to_timeval};
use sys_common::helper_thread::Helper;
use sys_common::{AsInner, mkerr_libc, timeout};
pub use sys_common::ProcessConfig;
helper_init! { static HELPER: Helper<Req> }
/// The unique id of the process (this should never be negative).
pub struct Process {
pub pid: pid_t
}
enum Req {
NewChild(libc::pid_t, Sender<ProcessExit>, u64),
}
const CLOEXEC_MSG_FOOTER: &'static [u8] = b"NOEX";
impl Process {
pub fn id(&self) -> pid_t {
self.pid
}
pub unsafe fn kill(&self, signal: int) -> IoResult<()> {
Process::killpid(self.pid, signal)
}
pub unsafe fn killpid(pid: pid_t, signal: int) -> IoResult<()> {
let r = libc::funcs::posix88::signal::kill(pid, signal as c_int);
mkerr_libc(r)
}
pub fn spawn<K, V, C, P>(cfg: &C, in_fd: Option<P>,
out_fd: Option<P>, err_fd: Option<P>)
-> IoResult<Process>
where C: ProcessConfig<K, V>, P: AsInner<FileDesc>,
K: BytesContainer + Eq + Hash, V: BytesContainer
{
use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp};
mod rustrt {
extern {
pub fn rust_unset_sigprocmask();
}
}
unsafe fn set_cloexec(fd: c_int) {
let ret = c::ioctl(fd, c::FIOCLEX);
assert_eq!(ret, 0);
}
#[cfg(all(target_os = "android", target_arch = "aarch64"))]
unsafe fn getdtablesize() -> c_int {
libc::sysconf(libc::consts::os::sysconf::_SC_OPEN_MAX) as c_int
}
#[cfg(not(all(target_os = "android", target_arch = "aarch64")))]
unsafe fn getdtablesize() -> c_int {
libc::funcs::bsd44::getdtablesize()
}
let dirp = cfg.cwd().map(|c| c.as_ptr()).unwrap_or(ptr::null());
// temporary until unboxed closures land
let cfg = unsafe {
mem::transmute::<&ProcessConfig<K,V>,&'static ProcessConfig<K,V>>(cfg)
};
with_envp(cfg.env(), move|envp: *const c_void| {
with_argv(cfg.program(), cfg.args(), move|argv: *const *const libc::c_char| unsafe {
let (input, mut output) = try!(sys::os::pipe());
// We may use this in the child, so perform allocations before the
// fork
let devnull = b"/dev/null\0";
set_cloexec(output.fd());
let pid = fork();
if pid < 0 {
return Err(super::last_error())
} else if pid > 0 {
#[inline]
fn combine(arr: &[u8]) -> i32 {
let a = arr[0] as u32;
let b = arr[1] as u32;
let c = arr[2] as u32;
let d = arr[3] as u32;
((a << 24) | (b << 16) | (c << 8) | (d << 0)) as i32
}
let p = Process{ pid: pid };
drop(output);
let mut bytes = [0; 8];
return match input.read(&mut bytes) {
Ok(8) => {
assert!(combine(CLOEXEC_MSG_FOOTER) == combine(&bytes[4.. 8]),
"Validation on the CLOEXEC pipe failed: {:?}", bytes);
let errno = combine(&bytes[0.. 4]);
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
Err(super::decode_error(errno))
}
Err(ref e) if e.kind == EndOfFile => Ok(p),
Err(e) => {
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("the CLOEXEC pipe failed: {:?}", e)
},
Ok(..) => { // pipe I/O up to PIPE_BUF bytes should be atomic
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("short read on the CLOEXEC pipe")
}
};
}
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
// scenario:
//
// 1. Thread A of process 1 grabs the malloc() mutex
// 2. Thread B of process 1 forks(), creating thread C
// 3. Thread C of process 2 then attempts to malloc()
// 4. The memory of process 2 is the same as the memory of
// process 1, so the mutex is locked.
//
// This situation looks a lot like deadlock, right? It turns out
// that this is what pthread_atfork() takes care of, which is
// presumably implemented across platforms. The first thing that
// threads to *before* forking is to do things like grab the malloc
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
// deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
// For this reason, the block of code below should contain 0
// invocations of either malloc of free (or their related friends).
//
// As an example of not having malloc/free traffic, we don't close
// this file descriptor by dropping the FileDesc (which contains an
// allocation). Instead we just close it manually. This will never
// have the drop glue anyway because this code never returns (the
// child will either exec() or invoke libc::exit)
let _ = libc::close(input.fd());
fn fail(output: &mut FileDesc) -> ! {
let errno = sys::os::errno() as u32;
let bytes = [
(errno >> 24) as u8,
(errno >> 16) as u8,
(errno >> 8) as u8,
(errno >> 0) as u8,
CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1],
CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3]
];
// pipe I/O up to PIPE_BUF bytes should be atomic
assert!(output.write(&bytes).is_ok());
unsafe { libc::_exit(1) }
}
rustrt::rust_unset_sigprocmask();
// If a stdio file descriptor is set to be ignored (via a -1 file
// descriptor), then we don't actually close it, but rather open
// up /dev/null into that file descriptor. Otherwise, the first file
// descriptor opened up in the child would be numbered as one of the
// stdio file descriptors, which is likely to wreak havoc.
let setup = |src: Option<P>, dst: c_int| {
let src = match src {
None => {
let flags = if dst == libc::STDIN_FILENO {
libc::O_RDONLY
} else {
libc::O_RDWR
};
libc::open(devnull.as_ptr() as *const _, flags, 0)
}
Some(obj) => {
let fd = obj.as_inner().fd();
// Leak the memory and the file descriptor. We're in the
// child now an all our resources are going to be
// cleaned up very soon
mem::forget(obj);
fd
}
};
src != -1 && retry(|| dup2(src, dst)) != -1
};
if !setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) }
if !setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) }
if !setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) }
// close all other fds
for fd in (3..getdtablesize()).rev() {
if fd != output.fd() {
let _ = close(fd as c_int);
}
}
match cfg.gid() {
Some(u) => {
if libc::setgid(u as libc::gid_t) != 0 {
fail(&mut output);
}
}
None => {}
}
match cfg.uid() {
Some(u) => {
// When dropping privileges from root, the `setgroups` call
// will remove any extraneous groups. If we don't call this,
// then even though our uid has dropped, we may still have
// groups that enable us to do super-user things. This will
// fail if we aren't root, so don't bother checking the
// return value, this is just done as an optimistic
// privilege dropping function.
extern {
fn setgroups(ngroups: libc::c_int,
ptr: *const libc::c_void) -> libc::c_int;
}
let _ = setgroups(0, ptr::null());
if libc::setuid(u as libc::uid_t) != 0 {
fail(&mut output);
}
}
None => {}
}
if cfg.detach() {
// Don't check the error of setsid because it fails if we're the
// process leader already. We just forked so it shouldn't return
// error, but ignore it anyway.
let _ = libc::setsid();
}
if !dirp.is_null() && chdir(dirp) == -1 {
fail(&mut output);
}
if !envp.is_null() {
*sys::os::environ() = envp as *const _;
}
let _ = execvp(*argv, argv as *mut _);
fail(&mut output);
})
})
}
pub fn wait(&self, deadline: u64) -> IoResult<ProcessExit> {
use cmp;
use sync::mpsc::TryRecvError;
static mut WRITE_FD: libc::c_int = 0;
let mut status = 0 as c_int;
if deadline == 0 {
return match retry(|| unsafe { c::waitpid(self.pid, &mut status, 0) }) {
-1 => panic!("unknown waitpid error: {:?}", super::last_error()),
_ => Ok(translate_status(status)),
}
}
// On unix, wait() and its friends have no timeout parameters, so there is
// no way to time out a thread in wait(). From some googling and some
// thinking, it appears that there are a few ways to handle timeouts in
// wait(), but the only real reasonable one for a multi-threaded program is
// to listen for SIGCHLD.
//
// With this in mind, the waiting mechanism with a timeout barely uses
// waitpid() at all. There are a few times that waitpid() is invoked with
// WNOHANG, but otherwise all the necessary blocking is done by waiting for
// a SIGCHLD to arrive (and that blocking has a timeout). Note, however,
// that waitpid() is still used to actually reap the child.
//
// Signal handling is super tricky in general, and this is no exception. Due
// to the async nature of SIGCHLD, we use the self-pipe trick to transmit
// data out of the signal handler to the rest of the application. The first
// idea would be to have each thread waiting with a timeout to read this
// output file descriptor, but a write() is akin to a signal(), not a
// broadcast(), so it would only wake up one thread, and possibly the wrong
// thread. Hence a helper thread is used.
//
// The helper thread here is responsible for farming requests for a
// waitpid() with a timeout, and then processing all of the wait requests.
// By guaranteeing that only this helper thread is reading half of the
// self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread
// is also responsible for select() to wait for incoming messages or
// incoming SIGCHLD messages, along with passing an appropriate timeout to
// select() to wake things up as necessary.
//
// The ordering of the following statements is also very purposeful. First,
// we must be guaranteed that the helper thread is booted and available to
// receive SIGCHLD signals, and then we must also ensure that we do a
// nonblocking waitpid() at least once before we go ask the sigchld helper.
// This prevents the race where the child exits, we boot the helper, and
// then we ask for the child's exit status (never seeing a sigchld).
//
// The actual communication between the helper thread and this thread is
// quite simple, just a channel moving data around.
HELPER.boot(register_sigchld, waitpid_helper);
match self.try_wait() {
Some(ret) => return Ok(ret),
None => {}
}
let (tx, rx) = channel();
HELPER.send(NewChild(self.pid, tx, deadline));
return match rx.recv() {
Ok(e) => Ok(e),
Err(..) => Err(timeout("wait timed out")),
};
// Register a new SIGCHLD handler, returning the reading half of the
// self-pipe plus the old handler registered (return value of sigaction).
//
// Be sure to set up the self-pipe first because as soon as we register a
// handler we're going to start receiving signals.
fn register_sigchld() -> (libc::c_int, c::sigaction) {
unsafe {
let mut pipes = [0; 2];
assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0);
set_nonblocking(pipes[0], true);
set_nonblocking(pipes[1], true);
WRITE_FD = pipes[1];
let mut old: c::sigaction = mem::zeroed();
let mut new: c::sigaction = mem::zeroed();
new.sa_handler = sigchld_handler;
new.sa_flags = c::SA_NOCLDSTOP;
assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0);
(pipes[0], old)
}
}
// Helper thread for processing SIGCHLD messages
fn waitpid_helper(input: libc::c_int,
messages: Receiver<Req>,
(read_fd, old): (libc::c_int, c::sigaction)) {
set_nonblocking(input, true);
let mut set: c::fd_set = unsafe { mem::zeroed() };
let mut tv: libc::timeval;
let mut active = Vec::<(libc::pid_t, Sender<ProcessExit>, u64)>::new();
let max = cmp::max(input, read_fd) + 1;
'outer: loop {
// Figure out the timeout of our syscall-to-happen. If we're waiting
// for some processes, then they'll have a timeout, otherwise we
// wait indefinitely for a message to arrive.
//
// FIXME: sure would be nice to not have to scan the entire array
let min = active.iter().map(|a| a.2).enumerate().min_by(|p| {
p.1
});
let (p, idx) = match min {
Some((idx, deadline)) => {
let now = sys::timer::now();
let ms = if now < deadline {deadline - now} else {0};
tv = ms_to_timeval(ms);
(&mut tv as *mut _, idx)
}
None => (ptr::null_mut(), -1),
};
// Wait for something to happen
c::fd_set(&mut set, input);
c::fd_set(&mut set, read_fd);
match unsafe { c::select(max, &mut set, ptr::null_mut(),
ptr::null_mut(), p) } {
// interrupted, retry
-1 if os::errno() == libc::EINTR as i32 => continue,
// We read something, break out and process
1 | 2 => {}
// Timeout, the pending request is removed
0 => {
drop(active.remove(idx));
continue
}
n => panic!("error in select {:?} ({:?})", os::errno(), n),
}
// Process any pending messages
if drain(input) {
loop {
match messages.try_recv() {
Ok(NewChild(pid, tx, deadline)) => {
active.push((pid, tx, deadline));
}
// Once we've been disconnected it means the main
// thread is exiting (at_exit has run). We could
// still have active waiter for other threads, so
// we're just going to drop them all on the floor.
// This means that they won't receive a "you're
// done" message in which case they'll be considered
// as timed out, but more generally errors will
// start propagating.
Err(TryRecvError::Disconnected) => {
break 'outer;
}
Err(TryRecvError::Empty) => break,
}
}
}
// If a child exited (somehow received SIGCHLD), then poll all
// children to see if any of them exited.
//
// We also attempt to be responsible netizens when dealing with
// SIGCHLD by invoking any previous SIGCHLD handler instead of just
// ignoring any previous SIGCHLD handler. Note that we don't provide
// a 1:1 mapping of our handler invocations to the previous handler
// invocations because we drain the `read_fd` entirely. This is
// probably OK because the kernel is already allowed to coalesce
// simultaneous signals, we're just doing some extra coalescing.
//
// Another point of note is that this likely runs the signal handler
// on a different thread than the one that received the signal. I
// *think* this is ok at this time.
//
// The main reason for doing this is to allow stdtest to run native
// tests as well. Both libgreen and libnative are running around
// with process timeouts, but libgreen should get there first
// (currently libuv doesn't handle old signal handlers).
if drain(read_fd) {
let i: uint = unsafe { mem::transmute(old.sa_handler) };
if i != 0 {
assert!(old.sa_flags & c::SA_SIGINFO == 0);
(old.sa_handler)(c::SIGCHLD);
}
| // FIXME: sure would be nice to not have to scan the entire
// array...
active.retain(|&(pid, ref tx, _)| {
let pr = Process { pid: pid };
match pr.try_wait() {
Some(msg) => { tx.send(msg).unwrap(); false }
None => true,
}
});
}
}
// Once this helper thread is done, we re-register the old sigchld
// handler and close our intermediate file descriptors.
unsafe {
assert_eq!(c::sigaction(c::SIGCHLD, &old, ptr::null_mut()), 0);
let _ = libc::close(read_fd);
let _ = libc::close(WRITE_FD);
WRITE_FD = -1;
}
}
// Drain all pending data from the file descriptor, returning if any data
// could be drained. This requires that the file descriptor is in
// nonblocking mode.
fn drain(fd: libc::c_int) -> bool {
let mut ret = false;
loop {
let mut buf = [0u8; 1];
match unsafe {
libc::read(fd, buf.as_mut_ptr() as *mut libc::c_void,
buf.len() as libc::size_t)
} {
n if n > 0 => { ret = true; }
0 => return true,
-1 if wouldblock() => return ret,
n => panic!("bad read {:?} ({:?})", os::last_os_error(), n),
}
}
}
// Signal handler for SIGCHLD signals, must be async-signal-safe!
//
// This function will write to the writing half of the "self pipe" to wake
// up the helper thread if it's waiting. Note that this write must be
// nonblocking because if it blocks and the reader is the thread we
// interrupted, then we'll deadlock.
//
// When writing, if the write returns EWOULDBLOCK then we choose to ignore
// it. At that point we're guaranteed that there's something in the pipe
// which will wake up the other end at some point, so we just allow this
// signal to be coalesced with the pending signals on the pipe.
extern fn sigchld_handler(_signum: libc::c_int) {
let msg = 1;
match unsafe {
libc::write(WRITE_FD, &msg as *const _ as *const libc::c_void, 1)
} {
1 => {}
-1 if wouldblock() => {} // see above comments
n => panic!("bad error on write fd: {:?} {:?}", n, os::errno()),
}
}
}
pub fn try_wait(&self) -> Option<ProcessExit> {
let mut status = 0 as c_int;
match retry(|| unsafe {
c::waitpid(self.pid, &mut status, c::WNOHANG)
}) {
n if n == self.pid => Some(translate_status(status)),
0 => None,
n => panic!("unknown waitpid error `{:?}`: {:?}", n,
super::last_error()),
}
}
}
fn with_argv<T,F>(prog: &CString, args: &[CString],
cb: F)
-> T
where F : FnOnce(*const *const libc::c_char) -> T
{
let mut ptrs: Vec<*const libc::c_char> = Vec::with_capacity(args.len()+1);
// Convert the CStrings into an array of pointers. Note: the
// lifetime of the various CStrings involved is guaranteed to be
// larger than the lifetime of our invocation of cb, but this is
// technically unsafe as the callback could leak these pointers
// out of our scope.
ptrs.push(prog.as_ptr());
ptrs.extend(args.iter().map(|tmp| tmp.as_ptr()));
// Add a terminating null pointer (required by libc).
ptrs.push(ptr::null());
cb(ptrs.as_ptr())
}
fn with_envp<K,V,T,F>(env: Option<&HashMap<K, V>>,
cb: F)
-> T
where F : FnOnce(*const c_void) -> T,
K : BytesContainer + Eq + Hash,
V : BytesContainer
{
// On posixy systems we can pass a char** for envp, which is a
// null-terminated array of "k=v\0" strings. Since we must create
// these strings locally, yet expose a raw pointer to them, we
// create a temporary vector to own the CStrings that outlives the
// call to cb.
match env {
Some(env) => {
let mut tmps = Vec::with_capacity(env.len());
for pair in env {
let mut kv = Vec::new();
kv.push_all(pair.0.container_as_bytes());
kv.push('=' as u8);
kv.push_all(pair.1.container_as_bytes());
kv.push(0); // terminating null
tmps.push(kv);
}
// As with `with_argv`, this is unsafe, since cb could leak the pointers.
let mut ptrs: Vec<*const libc::c_char> =
tmps.iter()
.map(|tmp| tmp.as_ptr() as *const libc::c_char)
.collect();
ptrs.push(ptr::null());
cb(ptrs.as_ptr() as *const c_void)
}
_ => cb(ptr::null())
}
}
fn translate_status(status: c_int) -> ProcessExit {
#![allow(non_snake_case)]
#[cfg(any(target_os = "linux", target_os = "android"))]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0xff) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { (status >> 8) & 0xff }
pub fn WTERMSIG(status: i32) -> i32 { status & 0x7f }
}
#[cfg(any(target_os = "macos",
target_os = "ios",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
target_os = "openbsd"))]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0x7f) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { status >> 8 }
pub fn WTERMSIG(status: i32) -> i32 { status & 0o177 }
}
if imp::WIFEXITED(status) {
ExitStatus(imp::WEXITSTATUS(status) as int)
} else {
ExitSignal(imp::WTERMSIG(status) as int)
}
} | random_line_split |
|
process.rs | // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(deprecated)] // this module itself is essentially deprecated
use prelude::v1::*;
use self::Req::*;
use collections::HashMap;
use ffi::CString;
use hash::Hash;
use old_io::process::{ProcessExit, ExitStatus, ExitSignal};
use old_io::{IoResult, EndOfFile};
use libc::{self, pid_t, c_void, c_int};
use mem;
use os;
use old_path::BytesContainer;
use ptr;
use sync::mpsc::{channel, Sender, Receiver};
use sys::fs::FileDesc;
use sys::{self, retry, c, wouldblock, set_nonblocking, ms_to_timeval};
use sys_common::helper_thread::Helper;
use sys_common::{AsInner, mkerr_libc, timeout};
pub use sys_common::ProcessConfig;
helper_init! { static HELPER: Helper<Req> }
/// The unique id of the process (this should never be negative).
pub struct Process {
pub pid: pid_t
}
enum Req {
NewChild(libc::pid_t, Sender<ProcessExit>, u64),
}
const CLOEXEC_MSG_FOOTER: &'static [u8] = b"NOEX";
impl Process {
pub fn id(&self) -> pid_t {
self.pid
}
pub unsafe fn kill(&self, signal: int) -> IoResult<()> {
Process::killpid(self.pid, signal)
}
pub unsafe fn killpid(pid: pid_t, signal: int) -> IoResult<()> {
let r = libc::funcs::posix88::signal::kill(pid, signal as c_int);
mkerr_libc(r)
}
pub fn spawn<K, V, C, P>(cfg: &C, in_fd: Option<P>,
out_fd: Option<P>, err_fd: Option<P>)
-> IoResult<Process>
where C: ProcessConfig<K, V>, P: AsInner<FileDesc>,
K: BytesContainer + Eq + Hash, V: BytesContainer
{
use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp};
mod rustrt {
extern {
pub fn rust_unset_sigprocmask();
}
}
unsafe fn set_cloexec(fd: c_int) {
let ret = c::ioctl(fd, c::FIOCLEX);
assert_eq!(ret, 0);
}
#[cfg(all(target_os = "android", target_arch = "aarch64"))]
unsafe fn getdtablesize() -> c_int {
libc::sysconf(libc::consts::os::sysconf::_SC_OPEN_MAX) as c_int
}
#[cfg(not(all(target_os = "android", target_arch = "aarch64")))]
unsafe fn getdtablesize() -> c_int {
libc::funcs::bsd44::getdtablesize()
}
let dirp = cfg.cwd().map(|c| c.as_ptr()).unwrap_or(ptr::null());
// temporary until unboxed closures land
let cfg = unsafe {
mem::transmute::<&ProcessConfig<K,V>,&'static ProcessConfig<K,V>>(cfg)
};
with_envp(cfg.env(), move|envp: *const c_void| {
with_argv(cfg.program(), cfg.args(), move|argv: *const *const libc::c_char| unsafe {
let (input, mut output) = try!(sys::os::pipe());
// We may use this in the child, so perform allocations before the
// fork
let devnull = b"/dev/null\0";
set_cloexec(output.fd());
let pid = fork();
if pid < 0 {
return Err(super::last_error())
} else if pid > 0 {
#[inline]
fn combine(arr: &[u8]) -> i32 {
let a = arr[0] as u32;
let b = arr[1] as u32;
let c = arr[2] as u32;
let d = arr[3] as u32;
((a << 24) | (b << 16) | (c << 8) | (d << 0)) as i32
}
let p = Process{ pid: pid };
drop(output);
let mut bytes = [0; 8];
return match input.read(&mut bytes) {
Ok(8) => {
assert!(combine(CLOEXEC_MSG_FOOTER) == combine(&bytes[4.. 8]),
"Validation on the CLOEXEC pipe failed: {:?}", bytes);
let errno = combine(&bytes[0.. 4]);
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
Err(super::decode_error(errno))
}
Err(ref e) if e.kind == EndOfFile => Ok(p),
Err(e) => {
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("the CLOEXEC pipe failed: {:?}", e)
},
Ok(..) => { // pipe I/O up to PIPE_BUF bytes should be atomic
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("short read on the CLOEXEC pipe")
}
};
}
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
// scenario:
//
// 1. Thread A of process 1 grabs the malloc() mutex
// 2. Thread B of process 1 forks(), creating thread C
// 3. Thread C of process 2 then attempts to malloc()
// 4. The memory of process 2 is the same as the memory of
// process 1, so the mutex is locked.
//
// This situation looks a lot like deadlock, right? It turns out
// that this is what pthread_atfork() takes care of, which is
// presumably implemented across platforms. The first thing that
// threads to *before* forking is to do things like grab the malloc
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
// deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
// For this reason, the block of code below should contain 0
// invocations of either malloc of free (or their related friends).
//
// As an example of not having malloc/free traffic, we don't close
// this file descriptor by dropping the FileDesc (which contains an
// allocation). Instead we just close it manually. This will never
// have the drop glue anyway because this code never returns (the
// child will either exec() or invoke libc::exit)
let _ = libc::close(input.fd());
fn fail(output: &mut FileDesc) -> ! {
let errno = sys::os::errno() as u32;
let bytes = [
(errno >> 24) as u8,
(errno >> 16) as u8,
(errno >> 8) as u8,
(errno >> 0) as u8,
CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1],
CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3]
];
// pipe I/O up to PIPE_BUF bytes should be atomic
assert!(output.write(&bytes).is_ok());
unsafe { libc::_exit(1) }
}
rustrt::rust_unset_sigprocmask();
// If a stdio file descriptor is set to be ignored (via a -1 file
// descriptor), then we don't actually close it, but rather open
// up /dev/null into that file descriptor. Otherwise, the first file
// descriptor opened up in the child would be numbered as one of the
// stdio file descriptors, which is likely to wreak havoc.
let setup = |src: Option<P>, dst: c_int| {
let src = match src {
None => {
let flags = if dst == libc::STDIN_FILENO {
libc::O_RDONLY
} else {
libc::O_RDWR
};
libc::open(devnull.as_ptr() as *const _, flags, 0)
}
Some(obj) => {
let fd = obj.as_inner().fd();
// Leak the memory and the file descriptor. We're in the
// child now an all our resources are going to be
// cleaned up very soon
mem::forget(obj);
fd
}
};
src != -1 && retry(|| dup2(src, dst)) != -1
};
if !setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) }
if !setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) }
if !setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) }
// close all other fds
for fd in (3..getdtablesize()).rev() {
if fd != output.fd() {
let _ = close(fd as c_int);
}
}
match cfg.gid() {
Some(u) => |
None => {}
}
match cfg.uid() {
Some(u) => {
// When dropping privileges from root, the `setgroups` call
// will remove any extraneous groups. If we don't call this,
// then even though our uid has dropped, we may still have
// groups that enable us to do super-user things. This will
// fail if we aren't root, so don't bother checking the
// return value, this is just done as an optimistic
// privilege dropping function.
extern {
fn setgroups(ngroups: libc::c_int,
ptr: *const libc::c_void) -> libc::c_int;
}
let _ = setgroups(0, ptr::null());
if libc::setuid(u as libc::uid_t) != 0 {
fail(&mut output);
}
}
None => {}
}
if cfg.detach() {
// Don't check the error of setsid because it fails if we're the
// process leader already. We just forked so it shouldn't return
// error, but ignore it anyway.
let _ = libc::setsid();
}
if !dirp.is_null() && chdir(dirp) == -1 {
fail(&mut output);
}
if !envp.is_null() {
*sys::os::environ() = envp as *const _;
}
let _ = execvp(*argv, argv as *mut _);
fail(&mut output);
})
})
}
pub fn wait(&self, deadline: u64) -> IoResult<ProcessExit> {
use cmp;
use sync::mpsc::TryRecvError;
static mut WRITE_FD: libc::c_int = 0;
let mut status = 0 as c_int;
if deadline == 0 {
return match retry(|| unsafe { c::waitpid(self.pid, &mut status, 0) }) {
-1 => panic!("unknown waitpid error: {:?}", super::last_error()),
_ => Ok(translate_status(status)),
}
}
// On unix, wait() and its friends have no timeout parameters, so there is
// no way to time out a thread in wait(). From some googling and some
// thinking, it appears that there are a few ways to handle timeouts in
// wait(), but the only real reasonable one for a multi-threaded program is
// to listen for SIGCHLD.
//
// With this in mind, the waiting mechanism with a timeout barely uses
// waitpid() at all. There are a few times that waitpid() is invoked with
// WNOHANG, but otherwise all the necessary blocking is done by waiting for
// a SIGCHLD to arrive (and that blocking has a timeout). Note, however,
// that waitpid() is still used to actually reap the child.
//
// Signal handling is super tricky in general, and this is no exception. Due
// to the async nature of SIGCHLD, we use the self-pipe trick to transmit
// data out of the signal handler to the rest of the application. The first
// idea would be to have each thread waiting with a timeout to read this
// output file descriptor, but a write() is akin to a signal(), not a
// broadcast(), so it would only wake up one thread, and possibly the wrong
// thread. Hence a helper thread is used.
//
// The helper thread here is responsible for farming requests for a
// waitpid() with a timeout, and then processing all of the wait requests.
// By guaranteeing that only this helper thread is reading half of the
// self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread
// is also responsible for select() to wait for incoming messages or
// incoming SIGCHLD messages, along with passing an appropriate timeout to
// select() to wake things up as necessary.
//
// The ordering of the following statements is also very purposeful. First,
// we must be guaranteed that the helper thread is booted and available to
// receive SIGCHLD signals, and then we must also ensure that we do a
// nonblocking waitpid() at least once before we go ask the sigchld helper.
// This prevents the race where the child exits, we boot the helper, and
// then we ask for the child's exit status (never seeing a sigchld).
//
// The actual communication between the helper thread and this thread is
// quite simple, just a channel moving data around.
HELPER.boot(register_sigchld, waitpid_helper);
match self.try_wait() {
Some(ret) => return Ok(ret),
None => {}
}
let (tx, rx) = channel();
HELPER.send(NewChild(self.pid, tx, deadline));
return match rx.recv() {
Ok(e) => Ok(e),
Err(..) => Err(timeout("wait timed out")),
};
// Register a new SIGCHLD handler, returning the reading half of the
// self-pipe plus the old handler registered (return value of sigaction).
//
// Be sure to set up the self-pipe first because as soon as we register a
// handler we're going to start receiving signals.
fn register_sigchld() -> (libc::c_int, c::sigaction) {
unsafe {
let mut pipes = [0; 2];
assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0);
set_nonblocking(pipes[0], true);
set_nonblocking(pipes[1], true);
WRITE_FD = pipes[1];
let mut old: c::sigaction = mem::zeroed();
let mut new: c::sigaction = mem::zeroed();
new.sa_handler = sigchld_handler;
new.sa_flags = c::SA_NOCLDSTOP;
assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0);
(pipes[0], old)
}
}
// Helper thread for processing SIGCHLD messages
fn waitpid_helper(input: libc::c_int,
messages: Receiver<Req>,
(read_fd, old): (libc::c_int, c::sigaction)) {
set_nonblocking(input, true);
let mut set: c::fd_set = unsafe { mem::zeroed() };
let mut tv: libc::timeval;
let mut active = Vec::<(libc::pid_t, Sender<ProcessExit>, u64)>::new();
let max = cmp::max(input, read_fd) + 1;
'outer: loop {
// Figure out the timeout of our syscall-to-happen. If we're waiting
// for some processes, then they'll have a timeout, otherwise we
// wait indefinitely for a message to arrive.
//
// FIXME: sure would be nice to not have to scan the entire array
let min = active.iter().map(|a| a.2).enumerate().min_by(|p| {
p.1
});
let (p, idx) = match min {
Some((idx, deadline)) => {
let now = sys::timer::now();
let ms = if now < deadline {deadline - now} else {0};
tv = ms_to_timeval(ms);
(&mut tv as *mut _, idx)
}
None => (ptr::null_mut(), -1),
};
// Wait for something to happen
c::fd_set(&mut set, input);
c::fd_set(&mut set, read_fd);
match unsafe { c::select(max, &mut set, ptr::null_mut(),
ptr::null_mut(), p) } {
// interrupted, retry
-1 if os::errno() == libc::EINTR as i32 => continue,
// We read something, break out and process
1 | 2 => {}
// Timeout, the pending request is removed
0 => {
drop(active.remove(idx));
continue
}
n => panic!("error in select {:?} ({:?})", os::errno(), n),
}
// Process any pending messages
if drain(input) {
loop {
match messages.try_recv() {
Ok(NewChild(pid, tx, deadline)) => {
active.push((pid, tx, deadline));
}
// Once we've been disconnected it means the main
// thread is exiting (at_exit has run). We could
// still have active waiter for other threads, so
// we're just going to drop them all on the floor.
// This means that they won't receive a "you're
// done" message in which case they'll be considered
// as timed out, but more generally errors will
// start propagating.
Err(TryRecvError::Disconnected) => {
break 'outer;
}
Err(TryRecvError::Empty) => break,
}
}
}
// If a child exited (somehow received SIGCHLD), then poll all
// children to see if any of them exited.
//
// We also attempt to be responsible netizens when dealing with
// SIGCHLD by invoking any previous SIGCHLD handler instead of just
// ignoring any previous SIGCHLD handler. Note that we don't provide
// a 1:1 mapping of our handler invocations to the previous handler
// invocations because we drain the `read_fd` entirely. This is
// probably OK because the kernel is already allowed to coalesce
// simultaneous signals, we're just doing some extra coalescing.
//
// Another point of note is that this likely runs the signal handler
// on a different thread than the one that received the signal. I
// *think* this is ok at this time.
//
// The main reason for doing this is to allow stdtest to run native
// tests as well. Both libgreen and libnative are running around
// with process timeouts, but libgreen should get there first
// (currently libuv doesn't handle old signal handlers).
if drain(read_fd) {
let i: uint = unsafe { mem::transmute(old.sa_handler) };
if i != 0 {
assert!(old.sa_flags & c::SA_SIGINFO == 0);
(old.sa_handler)(c::SIGCHLD);
}
// FIXME: sure would be nice to not have to scan the entire
// array...
active.retain(|&(pid, ref tx, _)| {
let pr = Process { pid: pid };
match pr.try_wait() {
Some(msg) => { tx.send(msg).unwrap(); false }
None => true,
}
});
}
}
// Once this helper thread is done, we re-register the old sigchld
// handler and close our intermediate file descriptors.
unsafe {
assert_eq!(c::sigaction(c::SIGCHLD, &old, ptr::null_mut()), 0);
let _ = libc::close(read_fd);
let _ = libc::close(WRITE_FD);
WRITE_FD = -1;
}
}
// Drain all pending data from the file descriptor, returning if any data
// could be drained. This requires that the file descriptor is in
// nonblocking mode.
fn drain(fd: libc::c_int) -> bool {
let mut ret = false;
loop {
let mut buf = [0u8; 1];
match unsafe {
libc::read(fd, buf.as_mut_ptr() as *mut libc::c_void,
buf.len() as libc::size_t)
} {
n if n > 0 => { ret = true; }
0 => return true,
-1 if wouldblock() => return ret,
n => panic!("bad read {:?} ({:?})", os::last_os_error(), n),
}
}
}
// Signal handler for SIGCHLD signals, must be async-signal-safe!
//
// This function will write to the writing half of the "self pipe" to wake
// up the helper thread if it's waiting. Note that this write must be
// nonblocking because if it blocks and the reader is the thread we
// interrupted, then we'll deadlock.
//
// When writing, if the write returns EWOULDBLOCK then we choose to ignore
// it. At that point we're guaranteed that there's something in the pipe
// which will wake up the other end at some point, so we just allow this
// signal to be coalesced with the pending signals on the pipe.
extern fn sigchld_handler(_signum: libc::c_int) {
let msg = 1;
match unsafe {
libc::write(WRITE_FD, &msg as *const _ as *const libc::c_void, 1)
} {
1 => {}
-1 if wouldblock() => {} // see above comments
n => panic!("bad error on write fd: {:?} {:?}", n, os::errno()),
}
}
}
pub fn try_wait(&self) -> Option<ProcessExit> {
let mut status = 0 as c_int;
match retry(|| unsafe {
c::waitpid(self.pid, &mut status, c::WNOHANG)
}) {
n if n == self.pid => Some(translate_status(status)),
0 => None,
n => panic!("unknown waitpid error `{:?}`: {:?}", n,
super::last_error()),
}
}
}
fn with_argv<T,F>(prog: &CString, args: &[CString],
cb: F)
-> T
where F : FnOnce(*const *const libc::c_char) -> T
{
let mut ptrs: Vec<*const libc::c_char> = Vec::with_capacity(args.len()+1);
// Convert the CStrings into an array of pointers. Note: the
// lifetime of the various CStrings involved is guaranteed to be
// larger than the lifetime of our invocation of cb, but this is
// technically unsafe as the callback could leak these pointers
// out of our scope.
ptrs.push(prog.as_ptr());
ptrs.extend(args.iter().map(|tmp| tmp.as_ptr()));
// Add a terminating null pointer (required by libc).
ptrs.push(ptr::null());
cb(ptrs.as_ptr())
}
fn with_envp<K,V,T,F>(env: Option<&HashMap<K, V>>,
cb: F)
-> T
where F : FnOnce(*const c_void) -> T,
K : BytesContainer + Eq + Hash,
V : BytesContainer
{
// On posixy systems we can pass a char** for envp, which is a
// null-terminated array of "k=v\0" strings. Since we must create
// these strings locally, yet expose a raw pointer to them, we
// create a temporary vector to own the CStrings that outlives the
// call to cb.
match env {
Some(env) => {
let mut tmps = Vec::with_capacity(env.len());
for pair in env {
let mut kv = Vec::new();
kv.push_all(pair.0.container_as_bytes());
kv.push('=' as u8);
kv.push_all(pair.1.container_as_bytes());
kv.push(0); // terminating null
tmps.push(kv);
}
// As with `with_argv`, this is unsafe, since cb could leak the pointers.
let mut ptrs: Vec<*const libc::c_char> =
tmps.iter()
.map(|tmp| tmp.as_ptr() as *const libc::c_char)
.collect();
ptrs.push(ptr::null());
cb(ptrs.as_ptr() as *const c_void)
}
_ => cb(ptr::null())
}
}
fn translate_status(status: c_int) -> ProcessExit {
#![allow(non_snake_case)]
#[cfg(any(target_os = "linux", target_os = "android"))]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0xff) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { (status >> 8) & 0xff }
pub fn WTERMSIG(status: i32) -> i32 { status & 0x7f }
}
#[cfg(any(target_os = "macos",
target_os = "ios",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
target_os = "openbsd"))]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0x7f) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { status >> 8 }
pub fn WTERMSIG(status: i32) -> i32 { status & 0o177 }
}
if imp::WIFEXITED(status) {
ExitStatus(imp::WEXITSTATUS(status) as int)
} else {
ExitSignal(imp::WTERMSIG(status) as int)
}
}
| {
if libc::setgid(u as libc::gid_t) != 0 {
fail(&mut output);
}
} | conditional_block |
fs.rs | use std::{iter};
use std::collections::{HashMap, HashSet, VecDeque};
use prelude::*;
pub struct Filesystem {
pub volume: Box<Volume>,
pub superblock: Superblock,
pub superblock_bytes: Vec<u8>,
pub superblock_dirty: bool,
pub groups: Vec<Group>,
pub inode_cache: HashMap<u64, Inode>,
pub dirty_inos: HashSet<u64>,
pub reused_inos: HashSet<u64>,
pub cache_queue: VecDeque<u64>,
}
pub struct Group {
pub idx: u64,
pub desc: GroupDesc,
pub block_bitmap: Vec<u8>,
pub inode_bitmap: Vec<u8>,
pub dirty: bool,
}
pub const ROOT_INO: u64 = 2;
impl Filesystem {
pub fn block_size(&self) -> u64 {
1024 << self.superblock.log_block_size
}
pub fn | (&self) -> u64 {
let a = self.superblock.blocks_count as u64;
let b = self.superblock.blocks_per_group as u64;
(a + b - 1) / b
}
}
pub fn mount_fs(mut volume: Box<Volume>) -> Result<Filesystem> {
let mut superblock_bytes = make_buffer(1024);
try!(volume.read(1024, &mut superblock_bytes[..]));
let superblock = try!(decode_superblock(&superblock_bytes[..], true));
let mut fs = Filesystem {
volume: volume,
superblock: superblock,
superblock_bytes: superblock_bytes,
superblock_dirty: false,
groups: Vec::new(),
inode_cache: HashMap::new(),
dirty_inos: HashSet::new(),
reused_inos: HashSet::new(),
cache_queue: VecDeque::new(),
};
for group_idx in 0..fs.group_count() {
let group = try!(read_group(&mut fs, group_idx));
fs.groups.push(group);
}
try!(flush_superblock(&mut fs, false));
Ok(fs)
}
pub fn flush_fs(fs: &mut Filesystem) -> Result<()> {
let dirty_inos = fs.dirty_inos.clone();
for dirty_ino in dirty_inos {
try!(flush_ino(fs, dirty_ino));
}
for group_idx in 0..fs.group_count() {
try!(flush_group(fs, group_idx));
}
flush_superblock(fs, true)
}
fn flush_superblock(fs: &mut Filesystem, clean: bool) -> Result<()> {
let state = if clean { 1 } else { 2 };
fs.superblock_dirty = fs.superblock_dirty || fs.superblock.state != state;
fs.superblock.state = state;
if fs.superblock_dirty {
try!(encode_superblock(&fs.superblock, &mut fs.superblock_bytes[..]));
try!(fs.volume.write(1024, &fs.superblock_bytes[..]));
fs.superblock_dirty = false;
}
Ok(())
}
pub fn make_buffer(size: u64) -> Vec<u8> {
iter::repeat(0).take(size as usize).collect()
}
| group_count | identifier_name |
fs.rs | use std::{iter};
use std::collections::{HashMap, HashSet, VecDeque};
use prelude::*;
pub struct Filesystem {
pub volume: Box<Volume>,
pub superblock: Superblock,
pub superblock_bytes: Vec<u8>,
pub superblock_dirty: bool,
pub groups: Vec<Group>,
pub inode_cache: HashMap<u64, Inode>,
pub dirty_inos: HashSet<u64>,
pub reused_inos: HashSet<u64>,
pub cache_queue: VecDeque<u64>,
}
pub struct Group {
pub idx: u64,
pub desc: GroupDesc,
pub block_bitmap: Vec<u8>,
pub inode_bitmap: Vec<u8>,
pub dirty: bool,
}
pub const ROOT_INO: u64 = 2;
impl Filesystem {
pub fn block_size(&self) -> u64 {
1024 << self.superblock.log_block_size
}
pub fn group_count(&self) -> u64 {
let a = self.superblock.blocks_count as u64;
let b = self.superblock.blocks_per_group as u64;
(a + b - 1) / b
}
}
pub fn mount_fs(mut volume: Box<Volume>) -> Result<Filesystem> {
let mut superblock_bytes = make_buffer(1024);
try!(volume.read(1024, &mut superblock_bytes[..]));
let superblock = try!(decode_superblock(&superblock_bytes[..], true));
let mut fs = Filesystem {
volume: volume,
superblock: superblock,
superblock_bytes: superblock_bytes,
superblock_dirty: false,
groups: Vec::new(),
inode_cache: HashMap::new(),
dirty_inos: HashSet::new(),
reused_inos: HashSet::new(),
cache_queue: VecDeque::new(),
};
for group_idx in 0..fs.group_count() {
let group = try!(read_group(&mut fs, group_idx));
fs.groups.push(group);
}
try!(flush_superblock(&mut fs, false));
Ok(fs)
}
pub fn flush_fs(fs: &mut Filesystem) -> Result<()> {
let dirty_inos = fs.dirty_inos.clone();
for dirty_ino in dirty_inos {
try!(flush_ino(fs, dirty_ino));
}
for group_idx in 0..fs.group_count() {
try!(flush_group(fs, group_idx));
}
flush_superblock(fs, true)
}
fn flush_superblock(fs: &mut Filesystem, clean: bool) -> Result<()> {
let state = if clean { 1 } else { 2 };
fs.superblock_dirty = fs.superblock_dirty || fs.superblock.state != state;
fs.superblock.state = state;
if fs.superblock_dirty |
Ok(())
}
pub fn make_buffer(size: u64) -> Vec<u8> {
iter::repeat(0).take(size as usize).collect()
}
| {
try!(encode_superblock(&fs.superblock, &mut fs.superblock_bytes[..]));
try!(fs.volume.write(1024, &fs.superblock_bytes[..]));
fs.superblock_dirty = false;
} | conditional_block |
fs.rs | use std::{iter};
use std::collections::{HashMap, HashSet, VecDeque};
use prelude::*;
pub struct Filesystem {
pub volume: Box<Volume>,
pub superblock: Superblock,
pub superblock_bytes: Vec<u8>,
pub superblock_dirty: bool,
pub groups: Vec<Group>,
pub inode_cache: HashMap<u64, Inode>,
pub dirty_inos: HashSet<u64>,
pub reused_inos: HashSet<u64>,
pub cache_queue: VecDeque<u64>,
}
pub struct Group {
pub idx: u64,
pub desc: GroupDesc,
pub block_bitmap: Vec<u8>,
pub inode_bitmap: Vec<u8>,
pub dirty: bool,
}
pub const ROOT_INO: u64 = 2;
impl Filesystem {
pub fn block_size(&self) -> u64 {
1024 << self.superblock.log_block_size
}
pub fn group_count(&self) -> u64 {
let a = self.superblock.blocks_count as u64;
let b = self.superblock.blocks_per_group as u64;
(a + b - 1) / b
}
}
pub fn mount_fs(mut volume: Box<Volume>) -> Result<Filesystem> |
pub fn flush_fs(fs: &mut Filesystem) -> Result<()> {
let dirty_inos = fs.dirty_inos.clone();
for dirty_ino in dirty_inos {
try!(flush_ino(fs, dirty_ino));
}
for group_idx in 0..fs.group_count() {
try!(flush_group(fs, group_idx));
}
flush_superblock(fs, true)
}
fn flush_superblock(fs: &mut Filesystem, clean: bool) -> Result<()> {
let state = if clean { 1 } else { 2 };
fs.superblock_dirty = fs.superblock_dirty || fs.superblock.state != state;
fs.superblock.state = state;
if fs.superblock_dirty {
try!(encode_superblock(&fs.superblock, &mut fs.superblock_bytes[..]));
try!(fs.volume.write(1024, &fs.superblock_bytes[..]));
fs.superblock_dirty = false;
}
Ok(())
}
pub fn make_buffer(size: u64) -> Vec<u8> {
iter::repeat(0).take(size as usize).collect()
}
| {
let mut superblock_bytes = make_buffer(1024);
try!(volume.read(1024, &mut superblock_bytes[..]));
let superblock = try!(decode_superblock(&superblock_bytes[..], true));
let mut fs = Filesystem {
volume: volume,
superblock: superblock,
superblock_bytes: superblock_bytes,
superblock_dirty: false,
groups: Vec::new(),
inode_cache: HashMap::new(),
dirty_inos: HashSet::new(),
reused_inos: HashSet::new(),
cache_queue: VecDeque::new(),
};
for group_idx in 0..fs.group_count() {
let group = try!(read_group(&mut fs, group_idx));
fs.groups.push(group);
}
try!(flush_superblock(&mut fs, false));
Ok(fs)
} | identifier_body |
fs.rs | use std::{iter};
use std::collections::{HashMap, HashSet, VecDeque};
use prelude::*;
pub struct Filesystem {
pub volume: Box<Volume>,
pub superblock: Superblock,
pub superblock_bytes: Vec<u8>,
pub superblock_dirty: bool,
pub groups: Vec<Group>,
pub inode_cache: HashMap<u64, Inode>,
pub dirty_inos: HashSet<u64>,
pub reused_inos: HashSet<u64>,
pub cache_queue: VecDeque<u64>,
}
pub struct Group {
pub idx: u64,
pub desc: GroupDesc,
pub block_bitmap: Vec<u8>,
pub inode_bitmap: Vec<u8>,
pub dirty: bool,
}
pub const ROOT_INO: u64 = 2;
impl Filesystem {
pub fn block_size(&self) -> u64 {
1024 << self.superblock.log_block_size
}
pub fn group_count(&self) -> u64 {
let a = self.superblock.blocks_count as u64;
let b = self.superblock.blocks_per_group as u64;
(a + b - 1) / b
}
}
pub fn mount_fs(mut volume: Box<Volume>) -> Result<Filesystem> {
let mut superblock_bytes = make_buffer(1024);
try!(volume.read(1024, &mut superblock_bytes[..]));
let superblock = try!(decode_superblock(&superblock_bytes[..], true));
let mut fs = Filesystem {
volume: volume,
superblock: superblock,
superblock_bytes: superblock_bytes,
superblock_dirty: false,
groups: Vec::new(),
inode_cache: HashMap::new(),
dirty_inos: HashSet::new(),
reused_inos: HashSet::new(),
cache_queue: VecDeque::new(),
};
for group_idx in 0..fs.group_count() {
let group = try!(read_group(&mut fs, group_idx));
fs.groups.push(group);
}
try!(flush_superblock(&mut fs, false));
Ok(fs)
}
pub fn flush_fs(fs: &mut Filesystem) -> Result<()> {
let dirty_inos = fs.dirty_inos.clone();
for dirty_ino in dirty_inos { | try!(flush_group(fs, group_idx));
}
flush_superblock(fs, true)
}
fn flush_superblock(fs: &mut Filesystem, clean: bool) -> Result<()> {
let state = if clean { 1 } else { 2 };
fs.superblock_dirty = fs.superblock_dirty || fs.superblock.state != state;
fs.superblock.state = state;
if fs.superblock_dirty {
try!(encode_superblock(&fs.superblock, &mut fs.superblock_bytes[..]));
try!(fs.volume.write(1024, &fs.superblock_bytes[..]));
fs.superblock_dirty = false;
}
Ok(())
}
pub fn make_buffer(size: u64) -> Vec<u8> {
iter::repeat(0).take(size as usize).collect()
} | try!(flush_ino(fs, dirty_ino));
}
for group_idx in 0..fs.group_count() { | random_line_split |
enrollmentTermsApi.js | //
// Copyright (C) 2016 - present Instructure, Inc.
//
// This file is part of Canvas.
//
// Canvas is free software: you can redistribute it and/or modify it under
// the terms of the GNU Affero General Public License as published by the Free
// Software Foundation, version 3 of the License.
//
// Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
// A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
// details.
//
// You should have received a copy of the GNU Affero General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
import _ from 'underscore'
import Depaginate from 'jsx/shared/CheatDepaginator'
const listUrl = () => ENV.ENROLLMENT_TERMS_URL
const deserializeTerms = termGroups =>
_.flatten(
_.map(termGroups, group =>
_.map(group.enrollment_terms, (term) => {
const groupID = term.grading_period_group_id
const newGroupID = _.isNumber(groupID) ? groupID.toString() : groupID
return {
id: term.id.toString(),
name: term.name,
startAt: term.start_at ? new Date(term.start_at) : null,
endAt: term.end_at ? new Date(term.end_at) : null,
createdAt: term.created_at ? new Date(term.created_at) : null,
gradingPeriodGroupId: newGroupID,
}
})
)
)
export default {
list (terms) |
}
| {
return new Promise((resolve, reject) => {
Depaginate(listUrl())
.then(response => resolve(deserializeTerms(response)))
.fail(error => reject(error))
})
} | identifier_body |
enrollmentTermsApi.js | //
// Copyright (C) 2016 - present Instructure, Inc.
//
// This file is part of Canvas.
//
// Canvas is free software: you can redistribute it and/or modify it under
// the terms of the GNU Affero General Public License as published by the Free
// Software Foundation, version 3 of the License.
//
// Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
// A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
// details.
//
// You should have received a copy of the GNU Affero General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
import _ from 'underscore'
import Depaginate from 'jsx/shared/CheatDepaginator'
const listUrl = () => ENV.ENROLLMENT_TERMS_URL
const deserializeTerms = termGroups =>
_.flatten(
_.map(termGroups, group =>
_.map(group.enrollment_terms, (term) => {
const groupID = term.grading_period_group_id
const newGroupID = _.isNumber(groupID) ? groupID.toString() : groupID
return {
id: term.id.toString(),
name: term.name,
startAt: term.start_at ? new Date(term.start_at) : null,
endAt: term.end_at ? new Date(term.end_at) : null,
createdAt: term.created_at ? new Date(term.created_at) : null,
gradingPeriodGroupId: newGroupID,
}
})
)
)
export default {
| (terms) {
return new Promise((resolve, reject) => {
Depaginate(listUrl())
.then(response => resolve(deserializeTerms(response)))
.fail(error => reject(error))
})
}
}
| list | identifier_name |
enrollmentTermsApi.js | //
// Copyright (C) 2016 - present Instructure, Inc.
//
// This file is part of Canvas.
//
// Canvas is free software: you can redistribute it and/or modify it under
// the terms of the GNU Affero General Public License as published by the Free
// Software Foundation, version 3 of the License.
//
// Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
// A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
// details.
//
// You should have received a copy of the GNU Affero General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
import _ from 'underscore'
import Depaginate from 'jsx/shared/CheatDepaginator'
const listUrl = () => ENV.ENROLLMENT_TERMS_URL
const deserializeTerms = termGroups =>
_.flatten(
_.map(termGroups, group =>
_.map(group.enrollment_terms, (term) => {
const groupID = term.grading_period_group_id
const newGroupID = _.isNumber(groupID) ? groupID.toString() : groupID
return {
id: term.id.toString(),
name: term.name,
startAt: term.start_at ? new Date(term.start_at) : null,
endAt: term.end_at ? new Date(term.end_at) : null,
createdAt: term.created_at ? new Date(term.created_at) : null,
gradingPeriodGroupId: newGroupID,
}
})
)
)
| return new Promise((resolve, reject) => {
Depaginate(listUrl())
.then(response => resolve(deserializeTerms(response)))
.fail(error => reject(error))
})
}
} | export default {
list (terms) { | random_line_split |
yaml_generator.py |
YAML_OUTPUT = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s
table: %s"""
YAML_OUTPUT_NO_TABLE = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s"""
class YamlGenerator(object):
"""docstring for yaml_generator"""
def __init__(self, grammar):
self.grammar = grammar
def print_yaml(self, ll1_table = None):
def convert_list_str(a_list):
return "[%s]" % (", ".join(a_list))
def convert_dict_str(a_dict):
return "{%s}" % ", ".join(["%s: %s" % (key, value)
for key, value in a_dict.items()])
def convert_dict_dict_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_str(value))
for key, value in a_dict.items()]))
def convert_dict_list_str(a_dict):
return "{%s}" % (", \n ".join(["%s: %s" % (key, convert_list_str(value))
for key, value in a_dict.items()]))
def convert_dict_dict_list_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_list_str(value))
for key, value in a_dict.items()]))
if ll1_table:
return YAML_OUTPUT % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()),
convert_dict_dict_str(ll1_table))
else:
return YAML_OUTPUT_NO_TABLE % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()))
def convert_production(self):
return {idx : {production.left_hand.lexeme : [item.lexeme for item in production.right_hand if item.lexeme is not EPSILON]} for idx, production in enumerate(self.grammar.production)} | from ll1_symbols import * | random_line_split |
|
yaml_generator.py | from ll1_symbols import *
YAML_OUTPUT = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s
table: %s"""
YAML_OUTPUT_NO_TABLE = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s"""
class YamlGenerator(object):
"""docstring for yaml_generator"""
def __init__(self, grammar):
self.grammar = grammar
def print_yaml(self, ll1_table = None):
def convert_list_str(a_list):
return "[%s]" % (", ".join(a_list))
def convert_dict_str(a_dict):
|
def convert_dict_dict_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_str(value))
for key, value in a_dict.items()]))
def convert_dict_list_str(a_dict):
return "{%s}" % (", \n ".join(["%s: %s" % (key, convert_list_str(value))
for key, value in a_dict.items()]))
def convert_dict_dict_list_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_list_str(value))
for key, value in a_dict.items()]))
if ll1_table:
return YAML_OUTPUT % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()),
convert_dict_dict_str(ll1_table))
else:
return YAML_OUTPUT_NO_TABLE % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()))
def convert_production(self):
return {idx : {production.left_hand.lexeme : [item.lexeme for item in production.right_hand if item.lexeme is not EPSILON]} for idx, production in enumerate(self.grammar.production)}
| return "{%s}" % ", ".join(["%s: %s" % (key, value)
for key, value in a_dict.items()]) | identifier_body |
yaml_generator.py | from ll1_symbols import *
YAML_OUTPUT = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s
table: %s"""
YAML_OUTPUT_NO_TABLE = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s"""
class YamlGenerator(object):
"""docstring for yaml_generator"""
def __init__(self, grammar):
self.grammar = grammar
def print_yaml(self, ll1_table = None):
def convert_list_str(a_list):
return "[%s]" % (", ".join(a_list))
def convert_dict_str(a_dict):
return "{%s}" % ", ".join(["%s: %s" % (key, value)
for key, value in a_dict.items()])
def convert_dict_dict_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_str(value))
for key, value in a_dict.items()]))
def convert_dict_list_str(a_dict):
return "{%s}" % (", \n ".join(["%s: %s" % (key, convert_list_str(value))
for key, value in a_dict.items()]))
def convert_dict_dict_list_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_list_str(value))
for key, value in a_dict.items()]))
if ll1_table:
|
else:
return YAML_OUTPUT_NO_TABLE % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()))
def convert_production(self):
return {idx : {production.left_hand.lexeme : [item.lexeme for item in production.right_hand if item.lexeme is not EPSILON]} for idx, production in enumerate(self.grammar.production)}
| return YAML_OUTPUT % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()),
convert_dict_dict_str(ll1_table)) | conditional_block |
yaml_generator.py | from ll1_symbols import *
YAML_OUTPUT = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s
table: %s"""
YAML_OUTPUT_NO_TABLE = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s"""
class YamlGenerator(object):
"""docstring for yaml_generator"""
def __init__(self, grammar):
self.grammar = grammar
def print_yaml(self, ll1_table = None):
def | (a_list):
return "[%s]" % (", ".join(a_list))
def convert_dict_str(a_dict):
return "{%s}" % ", ".join(["%s: %s" % (key, value)
for key, value in a_dict.items()])
def convert_dict_dict_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_str(value))
for key, value in a_dict.items()]))
def convert_dict_list_str(a_dict):
return "{%s}" % (", \n ".join(["%s: %s" % (key, convert_list_str(value))
for key, value in a_dict.items()]))
def convert_dict_dict_list_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_list_str(value))
for key, value in a_dict.items()]))
if ll1_table:
return YAML_OUTPUT % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()),
convert_dict_dict_str(ll1_table))
else:
return YAML_OUTPUT_NO_TABLE % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()))
def convert_production(self):
return {idx : {production.left_hand.lexeme : [item.lexeme for item in production.right_hand if item.lexeme is not EPSILON]} for idx, production in enumerate(self.grammar.production)}
| convert_list_str | identifier_name |
misc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Diff misc.
use Bytes;
use rlp::RlpStream;
use target_info::Target;
include!(concat!(env!("OUT_DIR"), "/version.rs"));
include!(concat!(env!("OUT_DIR"), "/rustc_version.rs"));
#[cfg(feature = "final")]
const THIS_TRACK: &'static str = "nightly";
// ^^^ should be reset to "stable" or "beta" according to the release branch.
#[cfg(not(feature = "final"))]
const THIS_TRACK: &'static str = "unstable";
// ^^^ This gets used when we're not building a final release; should stay as "unstable".
/// Boolean type for clean/dirty status.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Filth {
/// Data has not been changed.
Clean,
/// Data has been changed.
Dirty,
}
/// Get the platform identifier.
pub fn platform() -> String {
let env = Target::env();
let env_dash = if env.is_empty() { "" } else { "-" };
format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env)
}
/// Get the standard version string for this software.
pub fn version() -> String {
let sha3 = short_sha();
let sha3_dash = if sha3.is_empty() { "" } else { "-" };
let commit_date = commit_date().replace("-", "");
let date_dash = if commit_date.is_empty() { "" } else { "-" };
format!("Parity/v{}-{}{}{}{}{}/{}/rustc{}", env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, sha3, date_dash, commit_date, platform(), rustc_version())
}
/// Get the standard version data for this software.
pub fn version_data() -> Bytes {
let mut s = RlpStream::new_list(4);
let v = (env!("CARGO_PKG_VERSION_MAJOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 16) +
(env!("CARGO_PKG_VERSION_MINOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 8) +
env!("CARGO_PKG_VERSION_PATCH")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed");
s.append(&v);
s.append(&"Parity");
s.append(&rustc_version());
s.append(&&Target::os()[0..2]);
s.out()
}
/// Provide raw information on the package.
pub fn raw_package_info() -> (&'static str, &'static str, &'static str) | {
(THIS_TRACK, env!["CARGO_PKG_VERSION"], sha())
} | identifier_body |
|
misc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Diff misc.
use Bytes;
use rlp::RlpStream;
use target_info::Target;
include!(concat!(env!("OUT_DIR"), "/version.rs"));
include!(concat!(env!("OUT_DIR"), "/rustc_version.rs"));
#[cfg(feature = "final")]
const THIS_TRACK: &'static str = "nightly";
// ^^^ should be reset to "stable" or "beta" according to the release branch.
#[cfg(not(feature = "final"))]
const THIS_TRACK: &'static str = "unstable";
// ^^^ This gets used when we're not building a final release; should stay as "unstable".
/// Boolean type for clean/dirty status.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Filth {
/// Data has not been changed.
Clean,
/// Data has been changed.
Dirty,
}
/// Get the platform identifier.
pub fn platform() -> String {
let env = Target::env();
let env_dash = if env.is_empty() { "" } else { "-" };
format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env)
}
/// Get the standard version string for this software.
pub fn version() -> String {
let sha3 = short_sha();
let sha3_dash = if sha3.is_empty() { "" } else { "-" };
let commit_date = commit_date().replace("-", "");
let date_dash = if commit_date.is_empty() { "" } else | ;
format!("Parity/v{}-{}{}{}{}{}/{}/rustc{}", env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, sha3, date_dash, commit_date, platform(), rustc_version())
}
/// Get the standard version data for this software.
pub fn version_data() -> Bytes {
let mut s = RlpStream::new_list(4);
let v = (env!("CARGO_PKG_VERSION_MAJOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 16) +
(env!("CARGO_PKG_VERSION_MINOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 8) +
env!("CARGO_PKG_VERSION_PATCH")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed");
s.append(&v);
s.append(&"Parity");
s.append(&rustc_version());
s.append(&&Target::os()[0..2]);
s.out()
}
/// Provide raw information on the package.
pub fn raw_package_info() -> (&'static str, &'static str, &'static str) {
(THIS_TRACK, env!["CARGO_PKG_VERSION"], sha())
}
| { "-" } | conditional_block |
misc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful, |
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Diff misc.
use Bytes;
use rlp::RlpStream;
use target_info::Target;
include!(concat!(env!("OUT_DIR"), "/version.rs"));
include!(concat!(env!("OUT_DIR"), "/rustc_version.rs"));
#[cfg(feature = "final")]
const THIS_TRACK: &'static str = "nightly";
// ^^^ should be reset to "stable" or "beta" according to the release branch.
#[cfg(not(feature = "final"))]
const THIS_TRACK: &'static str = "unstable";
// ^^^ This gets used when we're not building a final release; should stay as "unstable".
/// Boolean type for clean/dirty status.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Filth {
/// Data has not been changed.
Clean,
/// Data has been changed.
Dirty,
}
/// Get the platform identifier.
pub fn platform() -> String {
let env = Target::env();
let env_dash = if env.is_empty() { "" } else { "-" };
format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env)
}
/// Get the standard version string for this software.
pub fn version() -> String {
let sha3 = short_sha();
let sha3_dash = if sha3.is_empty() { "" } else { "-" };
let commit_date = commit_date().replace("-", "");
let date_dash = if commit_date.is_empty() { "" } else { "-" };
format!("Parity/v{}-{}{}{}{}{}/{}/rustc{}", env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, sha3, date_dash, commit_date, platform(), rustc_version())
}
/// Get the standard version data for this software.
pub fn version_data() -> Bytes {
let mut s = RlpStream::new_list(4);
let v = (env!("CARGO_PKG_VERSION_MAJOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 16) +
(env!("CARGO_PKG_VERSION_MINOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 8) +
env!("CARGO_PKG_VERSION_PATCH")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed");
s.append(&v);
s.append(&"Parity");
s.append(&rustc_version());
s.append(&&Target::os()[0..2]);
s.out()
}
/// Provide raw information on the package.
pub fn raw_package_info() -> (&'static str, &'static str, &'static str) {
(THIS_TRACK, env!["CARGO_PKG_VERSION"], sha())
} | // but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details. | random_line_split |
misc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Diff misc.
use Bytes;
use rlp::RlpStream;
use target_info::Target;
include!(concat!(env!("OUT_DIR"), "/version.rs"));
include!(concat!(env!("OUT_DIR"), "/rustc_version.rs"));
#[cfg(feature = "final")]
const THIS_TRACK: &'static str = "nightly";
// ^^^ should be reset to "stable" or "beta" according to the release branch.
#[cfg(not(feature = "final"))]
const THIS_TRACK: &'static str = "unstable";
// ^^^ This gets used when we're not building a final release; should stay as "unstable".
/// Boolean type for clean/dirty status.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Filth {
/// Data has not been changed.
Clean,
/// Data has been changed.
Dirty,
}
/// Get the platform identifier.
pub fn platform() -> String {
let env = Target::env();
let env_dash = if env.is_empty() { "" } else { "-" };
format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env)
}
/// Get the standard version string for this software.
pub fn version() -> String {
let sha3 = short_sha();
let sha3_dash = if sha3.is_empty() { "" } else { "-" };
let commit_date = commit_date().replace("-", "");
let date_dash = if commit_date.is_empty() { "" } else { "-" };
format!("Parity/v{}-{}{}{}{}{}/{}/rustc{}", env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, sha3, date_dash, commit_date, platform(), rustc_version())
}
/// Get the standard version data for this software.
pub fn version_data() -> Bytes {
let mut s = RlpStream::new_list(4);
let v = (env!("CARGO_PKG_VERSION_MAJOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 16) +
(env!("CARGO_PKG_VERSION_MINOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 8) +
env!("CARGO_PKG_VERSION_PATCH")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed");
s.append(&v);
s.append(&"Parity");
s.append(&rustc_version());
s.append(&&Target::os()[0..2]);
s.out()
}
/// Provide raw information on the package.
pub fn | () -> (&'static str, &'static str, &'static str) {
(THIS_TRACK, env!["CARGO_PKG_VERSION"], sha())
}
| raw_package_info | identifier_name |
bp.py | import math
import net
SIGMOID = 0
TANH = 1
class bp:
def __init__(self, net, learning_rate, momentum):
self.type = net.getType()
self.net = net
self.lr = learning_rate
self.m = momentum
self.layer = net.getLayer()
self.lc = [[[0]*max(self.layer)]*max(self.layer)]*len(self.layer)
def _dfunc(self, y):
if self.type==SIGMOID:
return y * (1.0 - y)
else:
return 1.0 - y**2
def setLearningRate(self,x):
|
def setMomentum(self, x):
self.m = x
def backPropagate(self, input, target):
if len(target)!=self.layer[-1]:
print len(target)
print self.layer[-1]
raise ValueError('Wrong number of target values')
self.net.process(input)
nlayer = len(self.layer)
delta = []
for i in range(0, nlayer):
delta.append([0.0] * self.layer[i])
for i in range(0,self.layer[nlayer-1]):
node = self.net.getNode(nlayer-1, i)
error = target[i] - node
delta[nlayer-1][i] = self._dfunc(node) * error
for l in range(nlayer-2, 0, -1):
for i in range(0, self.layer[l]):
error = 0.0
for j in range(0, self.layer[l+1]):
error = error + delta[l+1][j] * self.net.getWeight(l+1, i, j)
delta[l][i] = self._dfunc(self.net.getNode(l,i)) * error
for l in range(nlayer-2, -1, -1):
for i in range(0, self.layer[l]):
for j in range(0, self.layer[l+1]):
change = delta[l+1][j] * self.net.getNode(l, i)
w = self.net.getWeight(l+1, i, j) + self.lr * change + self.m * self.lc[l+1][i][j]
self.net.setWeight(l+1, i, j, w)
self.lc[l+1][i][j] = change
for i in range(0, self.layer[l+1]):
b = self.net.getBias(l+1, i) + delta[l+1][i]
self.net.setBias(l+1, i, b)
error = 0.0
for i in range(0, len(target)):
error = error + 0.5 * (target[i] - self.net.getNode(nlayer-1, i))**2
return error
| self.lr = x | identifier_body |
bp.py | import math
import net
SIGMOID = 0
TANH = 1
class | :
def __init__(self, net, learning_rate, momentum):
self.type = net.getType()
self.net = net
self.lr = learning_rate
self.m = momentum
self.layer = net.getLayer()
self.lc = [[[0]*max(self.layer)]*max(self.layer)]*len(self.layer)
def _dfunc(self, y):
if self.type==SIGMOID:
return y * (1.0 - y)
else:
return 1.0 - y**2
def setLearningRate(self,x):
self.lr = x
def setMomentum(self, x):
self.m = x
def backPropagate(self, input, target):
if len(target)!=self.layer[-1]:
print len(target)
print self.layer[-1]
raise ValueError('Wrong number of target values')
self.net.process(input)
nlayer = len(self.layer)
delta = []
for i in range(0, nlayer):
delta.append([0.0] * self.layer[i])
for i in range(0,self.layer[nlayer-1]):
node = self.net.getNode(nlayer-1, i)
error = target[i] - node
delta[nlayer-1][i] = self._dfunc(node) * error
for l in range(nlayer-2, 0, -1):
for i in range(0, self.layer[l]):
error = 0.0
for j in range(0, self.layer[l+1]):
error = error + delta[l+1][j] * self.net.getWeight(l+1, i, j)
delta[l][i] = self._dfunc(self.net.getNode(l,i)) * error
for l in range(nlayer-2, -1, -1):
for i in range(0, self.layer[l]):
for j in range(0, self.layer[l+1]):
change = delta[l+1][j] * self.net.getNode(l, i)
w = self.net.getWeight(l+1, i, j) + self.lr * change + self.m * self.lc[l+1][i][j]
self.net.setWeight(l+1, i, j, w)
self.lc[l+1][i][j] = change
for i in range(0, self.layer[l+1]):
b = self.net.getBias(l+1, i) + delta[l+1][i]
self.net.setBias(l+1, i, b)
error = 0.0
for i in range(0, len(target)):
error = error + 0.5 * (target[i] - self.net.getNode(nlayer-1, i))**2
return error
| bp | identifier_name |
bp.py | import math
import net
SIGMOID = 0
TANH = 1
class bp:
def __init__(self, net, learning_rate, momentum):
self.type = net.getType()
self.net = net | self.lc = [[[0]*max(self.layer)]*max(self.layer)]*len(self.layer)
def _dfunc(self, y):
if self.type==SIGMOID:
return y * (1.0 - y)
else:
return 1.0 - y**2
def setLearningRate(self,x):
self.lr = x
def setMomentum(self, x):
self.m = x
def backPropagate(self, input, target):
if len(target)!=self.layer[-1]:
print len(target)
print self.layer[-1]
raise ValueError('Wrong number of target values')
self.net.process(input)
nlayer = len(self.layer)
delta = []
for i in range(0, nlayer):
delta.append([0.0] * self.layer[i])
for i in range(0,self.layer[nlayer-1]):
node = self.net.getNode(nlayer-1, i)
error = target[i] - node
delta[nlayer-1][i] = self._dfunc(node) * error
for l in range(nlayer-2, 0, -1):
for i in range(0, self.layer[l]):
error = 0.0
for j in range(0, self.layer[l+1]):
error = error + delta[l+1][j] * self.net.getWeight(l+1, i, j)
delta[l][i] = self._dfunc(self.net.getNode(l,i)) * error
for l in range(nlayer-2, -1, -1):
for i in range(0, self.layer[l]):
for j in range(0, self.layer[l+1]):
change = delta[l+1][j] * self.net.getNode(l, i)
w = self.net.getWeight(l+1, i, j) + self.lr * change + self.m * self.lc[l+1][i][j]
self.net.setWeight(l+1, i, j, w)
self.lc[l+1][i][j] = change
for i in range(0, self.layer[l+1]):
b = self.net.getBias(l+1, i) + delta[l+1][i]
self.net.setBias(l+1, i, b)
error = 0.0
for i in range(0, len(target)):
error = error + 0.5 * (target[i] - self.net.getNode(nlayer-1, i))**2
return error | self.lr = learning_rate
self.m = momentum
self.layer = net.getLayer() | random_line_split |
bp.py | import math
import net
SIGMOID = 0
TANH = 1
class bp:
def __init__(self, net, learning_rate, momentum):
self.type = net.getType()
self.net = net
self.lr = learning_rate
self.m = momentum
self.layer = net.getLayer()
self.lc = [[[0]*max(self.layer)]*max(self.layer)]*len(self.layer)
def _dfunc(self, y):
if self.type==SIGMOID:
return y * (1.0 - y)
else:
return 1.0 - y**2
def setLearningRate(self,x):
self.lr = x
def setMomentum(self, x):
self.m = x
def backPropagate(self, input, target):
if len(target)!=self.layer[-1]:
print len(target)
print self.layer[-1]
raise ValueError('Wrong number of target values')
self.net.process(input)
nlayer = len(self.layer)
delta = []
for i in range(0, nlayer):
delta.append([0.0] * self.layer[i])
for i in range(0,self.layer[nlayer-1]):
node = self.net.getNode(nlayer-1, i)
error = target[i] - node
delta[nlayer-1][i] = self._dfunc(node) * error
for l in range(nlayer-2, 0, -1):
for i in range(0, self.layer[l]):
error = 0.0
for j in range(0, self.layer[l+1]):
error = error + delta[l+1][j] * self.net.getWeight(l+1, i, j)
delta[l][i] = self._dfunc(self.net.getNode(l,i)) * error
for l in range(nlayer-2, -1, -1):
|
error = 0.0
for i in range(0, len(target)):
error = error + 0.5 * (target[i] - self.net.getNode(nlayer-1, i))**2
return error
| for i in range(0, self.layer[l]):
for j in range(0, self.layer[l+1]):
change = delta[l+1][j] * self.net.getNode(l, i)
w = self.net.getWeight(l+1, i, j) + self.lr * change + self.m * self.lc[l+1][i][j]
self.net.setWeight(l+1, i, j, w)
self.lc[l+1][i][j] = change
for i in range(0, self.layer[l+1]):
b = self.net.getBias(l+1, i) + delta[l+1][i]
self.net.setBias(l+1, i, b) | conditional_block |
conf.py | # -*- coding: utf-8 -*-
#
# Total Open Station documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 28 23:03:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../totalopenstation'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Total Open Station'
copyright = '2015-2020, Stefano Costa, Damien Gaignon and Luca Bianconi'
author = 'Stefano Costa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build',
'global.rst',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use. | # A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
rst_prolog = """
.. include:: /global.rst
"""
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'totalopenstation',
'github_repo': 'totalopenstation',
'github_type': 'star',
'github_count': 'true',
'github_button': True,
'description': 'Download and export field survey data from your total station'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "tops.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TotalOpenStationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TotalOpenStation.tex', 'Total Open Station Documentation',
'Stefano Costa, Damien Gaignon, Luca Bianconi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('totalopenstation-cli-parser',
'totalopenstation-cli-parser',
'Total Open Station command line converter',
['Stefano Costa, Luca Bianconi'],
1),
('totalopenstation-cli-connector',
'totalopenstation-cli-connector',
'Total Open Station command line downloader',
['Stefano Costa, Luca Bianconi'],
1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TotalOpenStation', 'Total Open Station Documentation',
'Stefano Costa, Damien Gaignon, Luca Bianconi', 'TotalOpenStation', 'Total Open Station downloads data from your total station into common formats',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_use_ivar = True
napoleon_use_param = False | pygments_style = 'sphinx'
| random_line_split |
gdocsbackend.py | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2011 Carlos Abalde <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import string
import urllib
import duplicity.backend
from duplicity.errors import BackendException
class GDocsBackend(duplicity.backend.Backend):
"""Connect to remote store using Google Google Documents List API"""
ROOT_FOLDER_ID = 'folder%3Aroot'
BACKUP_DOCUMENT_TYPE = 'application/binary'
def __init__(self, parsed_url):
|
def _put(self, source_path, remote_filename):
self._delete(remote_filename)
# Set uploader instance. Note that resumable uploads are required in order to
# enable uploads for all file types.
# (see http://googleappsdeveloper.blogspot.com/2011/05/upload-all-file-types-to-any-google.html)
file = source_path.open()
uploader = gdata.client.ResumableUploader(
self.client, file,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
os.path.getsize(file.name),
chunk_size=gdata.client.ResumableUploader.DEFAULT_CHUNK_SIZE,
desired_class=gdata.docs.data.Resource)
if uploader:
# Chunked upload.
entry = gdata.docs.data.Resource(title=atom.data.Title(text=remote_filename))
uri = self.folder.get_resumable_create_media_link().href + '?convert=false'
entry = uploader.UploadFile(uri, entry=entry)
if not entry:
raise BackendException("Failed to upload file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
else:
raise BackendException("Failed to initialize upload of file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
assert not file.close()
def _get(self, remote_filename, local_path):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
remote_filename)
if len(entries) == 1:
entry = entries[0]
self.client.DownloadResource(entry, local_path.name)
else:
raise BackendException("Failed to find file '%s' in remote folder '%s'"
% (remote_filename, self.folder.title.text))
def _list(self):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE)
return [entry.title.text for entry in entries]
def _delete(self, filename):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
filename)
for entry in entries:
self.client.delete(entry.get_edit_link().href + '?delete=true', force=True)
def _authorize(self, email, password, captcha_token=None, captcha_response=None):
try:
self.client.client_login(email,
password,
source='duplicity $version',
service='writely',
captcha_token=captcha_token,
captcha_response=captcha_response)
except gdata.client.CaptchaChallenge as challenge:
print('A captcha challenge in required. Please visit ' + challenge.captcha_url)
answer = None
while not answer:
answer = raw_input('Answer to the challenge? ')
self._authorize(email, password, challenge.captcha_token, answer)
except gdata.client.BadAuthentication:
raise BackendException(
'Invalid user credentials given. Be aware that accounts '
'that use 2-step verification require creating an application specific '
'access code for using this Duplicity backend. Follow the instruction in '
'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 '
'and create your application-specific password to run duplicity backups.')
def _fetch_entries(self, folder_id, type, title=None):
# Build URI.
uri = '/feeds/default/private/full/%s/contents' % folder_id
if type == 'folder':
uri += '/-/folder?showfolders=true'
elif type == GDocsBackend.BACKUP_DOCUMENT_TYPE:
uri += '?showfolders=false'
else:
uri += '?showfolders=true'
if title:
uri += '&title=' + urllib.quote(title) + '&title-exact=true'
# Fetch entries.
entries = self.client.get_all_resources(uri=uri)
# When filtering by entry title, API is returning (don't know why) documents in other
# folders (apart from folder_id) matching the title, so some extra filtering is required.
if title:
result = []
for entry in entries:
resource_type = entry.get_resource_type()
if (not type) \
or (type == 'folder' and resource_type == 'folder') \
or (type == GDocsBackend.BACKUP_DOCUMENT_TYPE and resource_type != 'folder'):
if folder_id != GDocsBackend.ROOT_FOLDER_ID:
for link in entry.in_collections():
folder_entry = self.client.get_entry(link.href, None, None,
desired_class=gdata.docs.data.Resource)
if folder_entry and (folder_entry.resource_id.text == folder_id):
result.append(entry)
elif len(entry.in_collections()) == 0:
result.append(entry)
else:
result = entries
# Done!
return result
""" gdata is an alternate way to access gdocs, currently 05/2015 lacking OAuth support """
duplicity.backend.register_backend('gdata+gdocs', GDocsBackend)
duplicity.backend.uses_netloc.extend(['gdata+gdocs'])
| duplicity.backend.Backend.__init__(self, parsed_url)
# Import Google Data APIs libraries.
try:
global atom
global gdata
import atom.data
import gdata.client
import gdata.docs.client
import gdata.docs.data
except ImportError as e:
raise BackendException("""\
Google Docs backend requires Google Data APIs Python Client Library (see http://code.google.com/p/gdata-python-client/).
Exception: %s""" % str(e))
# Setup client instance.
self.client = gdata.docs.client.DocsClient(source='duplicity $version')
self.client.ssl = True
self.client.http_client.debug = False
self._authorize(parsed_url.username + '@' + parsed_url.hostname, self.get_password())
# Fetch destination folder entry (and crete hierarchy if required).
folder_names = string.split(parsed_url.path[1:], '/')
parent_folder = None
parent_folder_id = GDocsBackend.ROOT_FOLDER_ID
for folder_name in folder_names:
entries = self._fetch_entries(parent_folder_id, 'folder', folder_name)
if entries is not None:
if len(entries) == 1:
parent_folder = entries[0]
elif len(entries) == 0:
folder = gdata.docs.data.Resource(type='folder', title=folder_name)
parent_folder = self.client.create_resource(folder, collection=parent_folder)
else:
parent_folder = None
if parent_folder:
parent_folder_id = parent_folder.resource_id.text
else:
raise BackendException("Error while creating destination folder '%s'." % folder_name)
else:
raise BackendException("Error while fetching destination folder '%s'." % folder_name)
self.folder = parent_folder | identifier_body |
gdocsbackend.py | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2011 Carlos Abalde <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import string
import urllib
import duplicity.backend
from duplicity.errors import BackendException
class GDocsBackend(duplicity.backend.Backend):
"""Connect to remote store using Google Google Documents List API"""
ROOT_FOLDER_ID = 'folder%3Aroot'
BACKUP_DOCUMENT_TYPE = 'application/binary'
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# Import Google Data APIs libraries.
try:
global atom
global gdata
import atom.data
import gdata.client
import gdata.docs.client
import gdata.docs.data
except ImportError as e:
raise BackendException("""\
Google Docs backend requires Google Data APIs Python Client Library (see http://code.google.com/p/gdata-python-client/).
Exception: %s""" % str(e))
# Setup client instance.
self.client = gdata.docs.client.DocsClient(source='duplicity $version')
self.client.ssl = True
self.client.http_client.debug = False
self._authorize(parsed_url.username + '@' + parsed_url.hostname, self.get_password())
# Fetch destination folder entry (and crete hierarchy if required).
folder_names = string.split(parsed_url.path[1:], '/')
parent_folder = None
parent_folder_id = GDocsBackend.ROOT_FOLDER_ID
for folder_name in folder_names:
entries = self._fetch_entries(parent_folder_id, 'folder', folder_name)
if entries is not None:
if len(entries) == 1:
parent_folder = entries[0]
elif len(entries) == 0:
folder = gdata.docs.data.Resource(type='folder', title=folder_name)
parent_folder = self.client.create_resource(folder, collection=parent_folder)
else:
parent_folder = None
if parent_folder:
parent_folder_id = parent_folder.resource_id.text
else:
raise BackendException("Error while creating destination folder '%s'." % folder_name)
else:
raise BackendException("Error while fetching destination folder '%s'." % folder_name)
self.folder = parent_folder
def _put(self, source_path, remote_filename):
self._delete(remote_filename)
# Set uploader instance. Note that resumable uploads are required in order to
# enable uploads for all file types.
# (see http://googleappsdeveloper.blogspot.com/2011/05/upload-all-file-types-to-any-google.html)
file = source_path.open()
uploader = gdata.client.ResumableUploader(
self.client, file,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
os.path.getsize(file.name),
chunk_size=gdata.client.ResumableUploader.DEFAULT_CHUNK_SIZE,
desired_class=gdata.docs.data.Resource)
if uploader:
# Chunked upload.
entry = gdata.docs.data.Resource(title=atom.data.Title(text=remote_filename))
uri = self.folder.get_resumable_create_media_link().href + '?convert=false'
entry = uploader.UploadFile(uri, entry=entry)
if not entry:
raise BackendException("Failed to upload file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
else:
raise BackendException("Failed to initialize upload of file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
assert not file.close()
def | (self, remote_filename, local_path):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
remote_filename)
if len(entries) == 1:
entry = entries[0]
self.client.DownloadResource(entry, local_path.name)
else:
raise BackendException("Failed to find file '%s' in remote folder '%s'"
% (remote_filename, self.folder.title.text))
def _list(self):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE)
return [entry.title.text for entry in entries]
def _delete(self, filename):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
filename)
for entry in entries:
self.client.delete(entry.get_edit_link().href + '?delete=true', force=True)
def _authorize(self, email, password, captcha_token=None, captcha_response=None):
try:
self.client.client_login(email,
password,
source='duplicity $version',
service='writely',
captcha_token=captcha_token,
captcha_response=captcha_response)
except gdata.client.CaptchaChallenge as challenge:
print('A captcha challenge in required. Please visit ' + challenge.captcha_url)
answer = None
while not answer:
answer = raw_input('Answer to the challenge? ')
self._authorize(email, password, challenge.captcha_token, answer)
except gdata.client.BadAuthentication:
raise BackendException(
'Invalid user credentials given. Be aware that accounts '
'that use 2-step verification require creating an application specific '
'access code for using this Duplicity backend. Follow the instruction in '
'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 '
'and create your application-specific password to run duplicity backups.')
def _fetch_entries(self, folder_id, type, title=None):
# Build URI.
uri = '/feeds/default/private/full/%s/contents' % folder_id
if type == 'folder':
uri += '/-/folder?showfolders=true'
elif type == GDocsBackend.BACKUP_DOCUMENT_TYPE:
uri += '?showfolders=false'
else:
uri += '?showfolders=true'
if title:
uri += '&title=' + urllib.quote(title) + '&title-exact=true'
# Fetch entries.
entries = self.client.get_all_resources(uri=uri)
# When filtering by entry title, API is returning (don't know why) documents in other
# folders (apart from folder_id) matching the title, so some extra filtering is required.
if title:
result = []
for entry in entries:
resource_type = entry.get_resource_type()
if (not type) \
or (type == 'folder' and resource_type == 'folder') \
or (type == GDocsBackend.BACKUP_DOCUMENT_TYPE and resource_type != 'folder'):
if folder_id != GDocsBackend.ROOT_FOLDER_ID:
for link in entry.in_collections():
folder_entry = self.client.get_entry(link.href, None, None,
desired_class=gdata.docs.data.Resource)
if folder_entry and (folder_entry.resource_id.text == folder_id):
result.append(entry)
elif len(entry.in_collections()) == 0:
result.append(entry)
else:
result = entries
# Done!
return result
""" gdata is an alternate way to access gdocs, currently 05/2015 lacking OAuth support """
duplicity.backend.register_backend('gdata+gdocs', GDocsBackend)
duplicity.backend.uses_netloc.extend(['gdata+gdocs'])
| _get | identifier_name |
gdocsbackend.py | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2011 Carlos Abalde <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import string
import urllib
import duplicity.backend
from duplicity.errors import BackendException
class GDocsBackend(duplicity.backend.Backend):
"""Connect to remote store using Google Google Documents List API"""
ROOT_FOLDER_ID = 'folder%3Aroot'
BACKUP_DOCUMENT_TYPE = 'application/binary'
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# Import Google Data APIs libraries.
try:
global atom
global gdata
import atom.data
import gdata.client
import gdata.docs.client
import gdata.docs.data
except ImportError as e:
raise BackendException("""\
Google Docs backend requires Google Data APIs Python Client Library (see http://code.google.com/p/gdata-python-client/).
Exception: %s""" % str(e))
# Setup client instance.
self.client = gdata.docs.client.DocsClient(source='duplicity $version')
self.client.ssl = True
self.client.http_client.debug = False
self._authorize(parsed_url.username + '@' + parsed_url.hostname, self.get_password())
# Fetch destination folder entry (and crete hierarchy if required).
folder_names = string.split(parsed_url.path[1:], '/')
parent_folder = None
parent_folder_id = GDocsBackend.ROOT_FOLDER_ID
for folder_name in folder_names:
entries = self._fetch_entries(parent_folder_id, 'folder', folder_name)
if entries is not None:
if len(entries) == 1:
parent_folder = entries[0]
elif len(entries) == 0:
folder = gdata.docs.data.Resource(type='folder', title=folder_name)
parent_folder = self.client.create_resource(folder, collection=parent_folder)
else:
parent_folder = None
if parent_folder:
parent_folder_id = parent_folder.resource_id.text
else:
raise BackendException("Error while creating destination folder '%s'." % folder_name)
else:
raise BackendException("Error while fetching destination folder '%s'." % folder_name)
self.folder = parent_folder
def _put(self, source_path, remote_filename):
self._delete(remote_filename)
# Set uploader instance. Note that resumable uploads are required in order to
# enable uploads for all file types.
# (see http://googleappsdeveloper.blogspot.com/2011/05/upload-all-file-types-to-any-google.html)
file = source_path.open()
uploader = gdata.client.ResumableUploader(
self.client, file,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
os.path.getsize(file.name),
chunk_size=gdata.client.ResumableUploader.DEFAULT_CHUNK_SIZE,
desired_class=gdata.docs.data.Resource)
if uploader:
# Chunked upload.
entry = gdata.docs.data.Resource(title=atom.data.Title(text=remote_filename))
uri = self.folder.get_resumable_create_media_link().href + '?convert=false'
entry = uploader.UploadFile(uri, entry=entry)
if not entry:
raise BackendException("Failed to upload file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
else:
raise BackendException("Failed to initialize upload of file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
assert not file.close()
def _get(self, remote_filename, local_path):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
remote_filename)
if len(entries) == 1:
entry = entries[0]
self.client.DownloadResource(entry, local_path.name)
else:
raise BackendException("Failed to find file '%s' in remote folder '%s'"
% (remote_filename, self.folder.title.text))
def _list(self):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE)
return [entry.title.text for entry in entries]
def _delete(self, filename):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
filename)
for entry in entries:
self.client.delete(entry.get_edit_link().href + '?delete=true', force=True)
def _authorize(self, email, password, captcha_token=None, captcha_response=None):
try:
self.client.client_login(email,
password,
source='duplicity $version',
service='writely',
captcha_token=captcha_token,
captcha_response=captcha_response)
except gdata.client.CaptchaChallenge as challenge:
print('A captcha challenge in required. Please visit ' + challenge.captcha_url)
answer = None
while not answer:
answer = raw_input('Answer to the challenge? ')
self._authorize(email, password, challenge.captcha_token, answer)
except gdata.client.BadAuthentication:
raise BackendException(
'Invalid user credentials given. Be aware that accounts '
'that use 2-step verification require creating an application specific '
'access code for using this Duplicity backend. Follow the instruction in '
'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 '
'and create your application-specific password to run duplicity backups.')
def _fetch_entries(self, folder_id, type, title=None):
# Build URI.
uri = '/feeds/default/private/full/%s/contents' % folder_id
if type == 'folder':
uri += '/-/folder?showfolders=true'
elif type == GDocsBackend.BACKUP_DOCUMENT_TYPE:
uri += '?showfolders=false'
else:
uri += '?showfolders=true'
if title:
uri += '&title=' + urllib.quote(title) + '&title-exact=true'
# Fetch entries.
entries = self.client.get_all_resources(uri=uri)
# When filtering by entry title, API is returning (don't know why) documents in other
# folders (apart from folder_id) matching the title, so some extra filtering is required.
if title:
result = []
for entry in entries:
resource_type = entry.get_resource_type()
if (not type) \
or (type == 'folder' and resource_type == 'folder') \
or (type == GDocsBackend.BACKUP_DOCUMENT_TYPE and resource_type != 'folder'):
if folder_id != GDocsBackend.ROOT_FOLDER_ID:
|
elif len(entry.in_collections()) == 0:
result.append(entry)
else:
result = entries
# Done!
return result
""" gdata is an alternate way to access gdocs, currently 05/2015 lacking OAuth support """
duplicity.backend.register_backend('gdata+gdocs', GDocsBackend)
duplicity.backend.uses_netloc.extend(['gdata+gdocs'])
| for link in entry.in_collections():
folder_entry = self.client.get_entry(link.href, None, None,
desired_class=gdata.docs.data.Resource)
if folder_entry and (folder_entry.resource_id.text == folder_id):
result.append(entry) | conditional_block |
gdocsbackend.py | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2011 Carlos Abalde <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import string
import urllib
import duplicity.backend
from duplicity.errors import BackendException
class GDocsBackend(duplicity.backend.Backend):
"""Connect to remote store using Google Google Documents List API"""
ROOT_FOLDER_ID = 'folder%3Aroot'
BACKUP_DOCUMENT_TYPE = 'application/binary'
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# Import Google Data APIs libraries.
try:
global atom
global gdata
import atom.data
import gdata.client
import gdata.docs.client
import gdata.docs.data
except ImportError as e:
raise BackendException("""\
Google Docs backend requires Google Data APIs Python Client Library (see http://code.google.com/p/gdata-python-client/).
Exception: %s""" % str(e))
# Setup client instance.
self.client = gdata.docs.client.DocsClient(source='duplicity $version')
self.client.ssl = True
self.client.http_client.debug = False
self._authorize(parsed_url.username + '@' + parsed_url.hostname, self.get_password())
# Fetch destination folder entry (and crete hierarchy if required).
folder_names = string.split(parsed_url.path[1:], '/')
parent_folder = None
parent_folder_id = GDocsBackend.ROOT_FOLDER_ID
for folder_name in folder_names:
entries = self._fetch_entries(parent_folder_id, 'folder', folder_name)
if entries is not None:
if len(entries) == 1:
parent_folder = entries[0]
elif len(entries) == 0: | folder = gdata.docs.data.Resource(type='folder', title=folder_name)
parent_folder = self.client.create_resource(folder, collection=parent_folder)
else:
parent_folder = None
if parent_folder:
parent_folder_id = parent_folder.resource_id.text
else:
raise BackendException("Error while creating destination folder '%s'." % folder_name)
else:
raise BackendException("Error while fetching destination folder '%s'." % folder_name)
self.folder = parent_folder
def _put(self, source_path, remote_filename):
self._delete(remote_filename)
# Set uploader instance. Note that resumable uploads are required in order to
# enable uploads for all file types.
# (see http://googleappsdeveloper.blogspot.com/2011/05/upload-all-file-types-to-any-google.html)
file = source_path.open()
uploader = gdata.client.ResumableUploader(
self.client, file,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
os.path.getsize(file.name),
chunk_size=gdata.client.ResumableUploader.DEFAULT_CHUNK_SIZE,
desired_class=gdata.docs.data.Resource)
if uploader:
# Chunked upload.
entry = gdata.docs.data.Resource(title=atom.data.Title(text=remote_filename))
uri = self.folder.get_resumable_create_media_link().href + '?convert=false'
entry = uploader.UploadFile(uri, entry=entry)
if not entry:
raise BackendException("Failed to upload file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
else:
raise BackendException("Failed to initialize upload of file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text))
assert not file.close()
def _get(self, remote_filename, local_path):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
remote_filename)
if len(entries) == 1:
entry = entries[0]
self.client.DownloadResource(entry, local_path.name)
else:
raise BackendException("Failed to find file '%s' in remote folder '%s'"
% (remote_filename, self.folder.title.text))
def _list(self):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE)
return [entry.title.text for entry in entries]
def _delete(self, filename):
entries = self._fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
filename)
for entry in entries:
self.client.delete(entry.get_edit_link().href + '?delete=true', force=True)
def _authorize(self, email, password, captcha_token=None, captcha_response=None):
try:
self.client.client_login(email,
password,
source='duplicity $version',
service='writely',
captcha_token=captcha_token,
captcha_response=captcha_response)
except gdata.client.CaptchaChallenge as challenge:
print('A captcha challenge in required. Please visit ' + challenge.captcha_url)
answer = None
while not answer:
answer = raw_input('Answer to the challenge? ')
self._authorize(email, password, challenge.captcha_token, answer)
except gdata.client.BadAuthentication:
raise BackendException(
'Invalid user credentials given. Be aware that accounts '
'that use 2-step verification require creating an application specific '
'access code for using this Duplicity backend. Follow the instruction in '
'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 '
'and create your application-specific password to run duplicity backups.')
def _fetch_entries(self, folder_id, type, title=None):
# Build URI.
uri = '/feeds/default/private/full/%s/contents' % folder_id
if type == 'folder':
uri += '/-/folder?showfolders=true'
elif type == GDocsBackend.BACKUP_DOCUMENT_TYPE:
uri += '?showfolders=false'
else:
uri += '?showfolders=true'
if title:
uri += '&title=' + urllib.quote(title) + '&title-exact=true'
# Fetch entries.
entries = self.client.get_all_resources(uri=uri)
# When filtering by entry title, API is returning (don't know why) documents in other
# folders (apart from folder_id) matching the title, so some extra filtering is required.
if title:
result = []
for entry in entries:
resource_type = entry.get_resource_type()
if (not type) \
or (type == 'folder' and resource_type == 'folder') \
or (type == GDocsBackend.BACKUP_DOCUMENT_TYPE and resource_type != 'folder'):
if folder_id != GDocsBackend.ROOT_FOLDER_ID:
for link in entry.in_collections():
folder_entry = self.client.get_entry(link.href, None, None,
desired_class=gdata.docs.data.Resource)
if folder_entry and (folder_entry.resource_id.text == folder_id):
result.append(entry)
elif len(entry.in_collections()) == 0:
result.append(entry)
else:
result = entries
# Done!
return result
""" gdata is an alternate way to access gdocs, currently 05/2015 lacking OAuth support """
duplicity.backend.register_backend('gdata+gdocs', GDocsBackend)
duplicity.backend.uses_netloc.extend(['gdata+gdocs']) | random_line_split |
|
color.js | // We can't use goog.color or goog.color.alpha because they interally use a hex
// string representation that encodes each channel in a single byte. This
// causes occasional loss of precision and rounding errors, especially in the
// alpha channel.
goog.provide('ol.Color');
goog.provide('ol.color');
goog.require('goog.asserts');
goog.require('goog.color');
goog.require('goog.color.names');
goog.require('goog.vec.Mat4');
goog.require('ol');
goog.require('ol.math');
/**
* A color represented as a short array [red, green, blue, alpha].
* red, green, and blue should be integers in the range 0..255 inclusive.
* alpha should be a float in the range 0..1 inclusive.
* @typedef {Array.<number>}
* @api
*/
ol.Color;
/**
* This RegExp matches # followed by 3 or 6 hex digits.
* @const
* @type {RegExp}
* @private
*/
ol.color.hexColorRe_ = /^#(?:[0-9a-f]{3}){1,2}$/i;
/**
* @see goog.color.rgbColorRe_
* @const
* @type {RegExp}
* @private
*/
ol.color.rgbColorRe_ =
/^(?:rgb)?\((0|[1-9]\d{0,2}),\s?(0|[1-9]\d{0,2}),\s?(0|[1-9]\d{0,2})\)$/i;
/**
* @see goog.color.alpha.rgbaColorRe_
* @const
* @type {RegExp}
* @private
*/
ol.color.rgbaColorRe_ =
/^(?:rgba)?\((0|[1-9]\d{0,2}),\s?(0|[1-9]\d{0,2}),\s?(0|[1-9]\d{0,2}),\s?(0|1|0\.\d{0,10})\)$/i;
/**
* @param {ol.Color} dst Destination.
* @param {ol.Color} src Source.
* @param {ol.Color=} opt_color Color.
* @return {ol.Color} Color.
*/
ol.color.blend = function(dst, src, opt_color) {
// http://en.wikipedia.org/wiki/Alpha_compositing
// FIXME do we need to scale by 255?
var out = opt_color ? opt_color : [];
var dstA = dst[3];
var srcA = src[3];
if (dstA == 1) {
out[0] = (src[0] * srcA + dst[0] * (1 - srcA) + 0.5) | 0;
out[1] = (src[1] * srcA + dst[1] * (1 - srcA) + 0.5) | 0;
out[2] = (src[2] * srcA + dst[2] * (1 - srcA) + 0.5) | 0;
out[3] = 1;
} else if (srcA === 0) {
out[0] = dst[0];
out[1] = dst[1];
out[2] = dst[2];
out[3] = dstA;
} else {
var outA = srcA + dstA * (1 - srcA);
if (outA === 0) {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
} else {
out[0] = ((src[0] * srcA + dst[0] * dstA * (1 - srcA)) / outA + 0.5) | 0;
out[1] = ((src[1] * srcA + dst[1] * dstA * (1 - srcA)) / outA + 0.5) | 0;
out[2] = ((src[2] * srcA + dst[2] * dstA * (1 - srcA)) / outA + 0.5) | 0;
out[3] = outA;
}
}
goog.asserts.assert(ol.color.isValid(out),
'Output color of blend should be a valid color');
return out;
};
/**
* Return the color as an array. This function maintains a cache of calculated
* arrays which means the result should not be modified.
* @param {ol.Color|string} color Color.
* @return {ol.Color} Color.
* @api
*/
ol.color.asArray = function(color) {
if (goog.isArray(color)) {
return color;
} else {
goog.asserts.assert(goog.isString(color), 'Color should be a string');
return ol.color.fromString(color);
}
};
/**
* Return the color as an rgba string.
* @param {ol.Color|string} color Color.
* @return {string} Rgba string.
* @api
*/
ol.color.asString = function(color) {
if (goog.isString(color)) {
return color;
} else {
goog.asserts.assert(goog.isArray(color), 'Color should be an array');
return ol.color.toString(color);
}
};
/**
* @param {ol.Color} color1 Color1.
* @param {ol.Color} color2 Color2.
* @return {boolean} Equals.
*/
ol.color.equals = function(color1, color2) {
return color1 === color2 || (
color1[0] == color2[0] && color1[1] == color2[1] &&
color1[2] == color2[2] && color1[3] == color2[3]);
};
/**
* @param {string} s String.
* @return {ol.Color} Color.
*/
ol.color.fromString = (
/**
* @return {function(string): ol.Color}
*/
function() {
// We maintain a small cache of parsed strings. To provide cheap LRU-like
// semantics, whenever the cache grows too large we simply delete an
// arbitrary 25% of the entries.
/**
* @const
* @type {number}
*/
var MAX_CACHE_SIZE = 1024;
/**
* @type {Object.<string, ol.Color>}
*/
var cache = {};
/**
* @type {number}
*/
var cacheSize = 0;
return (
/**
* @param {string} s String.
* @return {ol.Color} Color.
*/
function(s) {
var color;
if (cache.hasOwnProperty(s)) {
color = cache[s];
} else {
if (cacheSize >= MAX_CACHE_SIZE) {
var i = 0;
var key;
for (key in cache) {
if ((i++ & 3) === 0) {
delete cache[key];
--cacheSize;
}
}
}
color = ol.color.fromStringInternal_(s);
cache[s] = color;
++cacheSize;
}
return color;
});
})();
/**
* @param {string} s String.
* @private
* @return {ol.Color} Color.
*/
ol.color.fromStringInternal_ = function(s) {
var isHex = false;
if (ol.ENABLE_NAMED_COLORS && goog.color.names.hasOwnProperty(s)) {
// goog.color.names does not have a type declaration, so add a typecast
s = /** @type {string} */ (goog.color.names[s]);
isHex = true;
}
var r, g, b, a, color, match;
if (isHex || (match = ol.color.hexColorRe_.exec(s))) | else if ((match = ol.color.rgbaColorRe_.exec(s))) { // rgba()
r = Number(match[1]);
g = Number(match[2]);
b = Number(match[3]);
a = Number(match[4]);
color = [r, g, b, a];
return ol.color.normalize(color, color);
} else if ((match = ol.color.rgbColorRe_.exec(s))) { // rgb()
r = Number(match[1]);
g = Number(match[2]);
b = Number(match[3]);
color = [r, g, b, 1];
return ol.color.normalize(color, color);
} else {
goog.asserts.fail(s + ' is not a valid color');
}
};
/**
* @param {ol.Color} color Color.
* @return {boolean} Is valid.
*/
ol.color.isValid = function(color) {
return 0 <= color[0] && color[0] < 256 &&
0 <= color[1] && color[1] < 256 &&
0 <= color[2] && color[2] < 256 &&
0 <= color[3] && color[3] <= 1;
};
/**
* @param {ol.Color} color Color.
* @param {ol.Color=} opt_color Color.
* @return {ol.Color} Clamped color.
*/
ol.color.normalize = function(color, opt_color) {
var result = opt_color || [];
result[0] = ol.math.clamp((color[0] + 0.5) | 0, 0, 255);
result[1] = ol.math.clamp((color[1] + 0.5) | 0, 0, 255);
result[2] = ol.math.clamp((color[2] + 0.5) | 0, 0, 255);
result[3] = ol.math.clamp(color[3], 0, 1);
return result;
};
/**
* @param {ol.Color} color Color.
* @return {string} String.
*/
ol.color.toString = function(color) {
var r = color[0];
if (r != (r | 0)) {
r = (r + 0.5) | 0;
}
var g = color[1];
if (g != (g | 0)) {
g = (g + 0.5) | 0;
}
var b = color[2];
if (b != (b | 0)) {
b = (b + 0.5) | 0;
}
var a = color[3];
return 'rgba(' + r + ',' + g + ',' + b + ',' + a + ')';
};
/**
* @param {!ol.Color} color Color.
* @param {goog.vec.Mat4.Number} transform Transform.
* @param {!ol.Color=} opt_color Color.
* @return {ol.Color} Transformed color.
*/
ol.color.transform = function(color, transform, opt_color) {
var result = opt_color ? opt_color : [];
result = goog.vec.Mat4.multVec3(transform, color, result);
goog.asserts.assert(goog.isArray(result), 'result should be an array');
result[3] = color[3];
return ol.color.normalize(result, result);
};
/**
* @param {ol.Color|string} color1 Color2.
* @param {ol.Color|string} color2 Color2.
* @return {boolean} Equals.
*/
ol.color.stringOrColorEquals = function(color1, color2) {
if (color1 === color2 || color1 == color2) {
return true;
}
if (goog.isString(color1)) {
color1 = ol.color.fromString(color1);
}
if (goog.isString(color2)) {
color2 = ol.color.fromString(color2);
}
return ol.color.equals(color1, color2);
};
| { // hex
var n = s.length - 1; // number of hex digits
goog.asserts.assert(n == 3 || n == 6,
'Color string length should be 3 or 6');
var d = n == 3 ? 1 : 2; // number of digits per channel
r = parseInt(s.substr(1 + 0 * d, d), 16);
g = parseInt(s.substr(1 + 1 * d, d), 16);
b = parseInt(s.substr(1 + 2 * d, d), 16);
if (d == 1) {
r = (r << 4) + r;
g = (g << 4) + g;
b = (b << 4) + b;
}
a = 1;
color = [r, g, b, a];
goog.asserts.assert(ol.color.isValid(color),
'Color should be a valid color');
return color;
} | conditional_block |
color.js | // We can't use goog.color or goog.color.alpha because they interally use a hex
// string representation that encodes each channel in a single byte. This
// causes occasional loss of precision and rounding errors, especially in the
// alpha channel.
goog.provide('ol.Color');
goog.provide('ol.color');
goog.require('goog.asserts');
goog.require('goog.color');
goog.require('goog.color.names');
goog.require('goog.vec.Mat4');
goog.require('ol');
goog.require('ol.math');
/**
* A color represented as a short array [red, green, blue, alpha].
* red, green, and blue should be integers in the range 0..255 inclusive.
* alpha should be a float in the range 0..1 inclusive.
* @typedef {Array.<number>}
* @api
*/
ol.Color;
/**
* This RegExp matches # followed by 3 or 6 hex digits.
* @const
* @type {RegExp}
* @private
*/
ol.color.hexColorRe_ = /^#(?:[0-9a-f]{3}){1,2}$/i;
/**
* @see goog.color.rgbColorRe_
* @const
* @type {RegExp}
* @private
*/
ol.color.rgbColorRe_ =
/^(?:rgb)?\((0|[1-9]\d{0,2}),\s?(0|[1-9]\d{0,2}),\s?(0|[1-9]\d{0,2})\)$/i;
/**
* @see goog.color.alpha.rgbaColorRe_
* @const
* @type {RegExp}
* @private
*/
ol.color.rgbaColorRe_ =
/^(?:rgba)?\((0|[1-9]\d{0,2}),\s?(0|[1-9]\d{0,2}),\s?(0|[1-9]\d{0,2}),\s?(0|1|0\.\d{0,10})\)$/i;
/**
* @param {ol.Color} dst Destination.
* @param {ol.Color} src Source.
* @param {ol.Color=} opt_color Color.
* @return {ol.Color} Color.
*/
ol.color.blend = function(dst, src, opt_color) {
// http://en.wikipedia.org/wiki/Alpha_compositing
// FIXME do we need to scale by 255?
var out = opt_color ? opt_color : [];
var dstA = dst[3];
var srcA = src[3];
if (dstA == 1) {
out[0] = (src[0] * srcA + dst[0] * (1 - srcA) + 0.5) | 0;
out[1] = (src[1] * srcA + dst[1] * (1 - srcA) + 0.5) | 0;
out[2] = (src[2] * srcA + dst[2] * (1 - srcA) + 0.5) | 0;
out[3] = 1;
} else if (srcA === 0) {
out[0] = dst[0];
out[1] = dst[1];
out[2] = dst[2];
out[3] = dstA;
} else {
var outA = srcA + dstA * (1 - srcA);
if (outA === 0) {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
} else {
out[0] = ((src[0] * srcA + dst[0] * dstA * (1 - srcA)) / outA + 0.5) | 0;
out[1] = ((src[1] * srcA + dst[1] * dstA * (1 - srcA)) / outA + 0.5) | 0;
out[2] = ((src[2] * srcA + dst[2] * dstA * (1 - srcA)) / outA + 0.5) | 0;
out[3] = outA;
}
}
goog.asserts.assert(ol.color.isValid(out),
'Output color of blend should be a valid color');
return out;
};
/**
* Return the color as an array. This function maintains a cache of calculated
* arrays which means the result should not be modified.
* @param {ol.Color|string} color Color.
* @return {ol.Color} Color.
* @api
*/
ol.color.asArray = function(color) {
if (goog.isArray(color)) {
return color;
} else {
goog.asserts.assert(goog.isString(color), 'Color should be a string');
return ol.color.fromString(color);
}
};
/**
* Return the color as an rgba string.
* @param {ol.Color|string} color Color.
* @return {string} Rgba string.
* @api
*/
ol.color.asString = function(color) {
if (goog.isString(color)) {
return color;
} else {
goog.asserts.assert(goog.isArray(color), 'Color should be an array');
return ol.color.toString(color);
}
};
/**
* @param {ol.Color} color1 Color1.
* @param {ol.Color} color2 Color2.
* @return {boolean} Equals.
*/
ol.color.equals = function(color1, color2) {
return color1 === color2 || (
color1[0] == color2[0] && color1[1] == color2[1] &&
color1[2] == color2[2] && color1[3] == color2[3]);
};
/**
* @param {string} s String.
* @return {ol.Color} Color.
*/
ol.color.fromString = (
/**
* @return {function(string): ol.Color}
*/
function() {
// We maintain a small cache of parsed strings. To provide cheap LRU-like
// semantics, whenever the cache grows too large we simply delete an
// arbitrary 25% of the entries.
/**
* @const
* @type {number}
*/
var MAX_CACHE_SIZE = 1024;
/**
* @type {Object.<string, ol.Color>}
*/
var cache = {};
/**
* @type {number}
*/
var cacheSize = 0;
return (
/**
* @param {string} s String.
* @return {ol.Color} Color.
*/
function(s) {
var color;
if (cache.hasOwnProperty(s)) {
color = cache[s];
} else {
if (cacheSize >= MAX_CACHE_SIZE) {
var i = 0;
var key;
for (key in cache) {
if ((i++ & 3) === 0) {
delete cache[key];
--cacheSize;
}
} | cache[s] = color;
++cacheSize;
}
return color;
});
})();
/**
* @param {string} s String.
* @private
* @return {ol.Color} Color.
*/
ol.color.fromStringInternal_ = function(s) {
var isHex = false;
if (ol.ENABLE_NAMED_COLORS && goog.color.names.hasOwnProperty(s)) {
// goog.color.names does not have a type declaration, so add a typecast
s = /** @type {string} */ (goog.color.names[s]);
isHex = true;
}
var r, g, b, a, color, match;
if (isHex || (match = ol.color.hexColorRe_.exec(s))) { // hex
var n = s.length - 1; // number of hex digits
goog.asserts.assert(n == 3 || n == 6,
'Color string length should be 3 or 6');
var d = n == 3 ? 1 : 2; // number of digits per channel
r = parseInt(s.substr(1 + 0 * d, d), 16);
g = parseInt(s.substr(1 + 1 * d, d), 16);
b = parseInt(s.substr(1 + 2 * d, d), 16);
if (d == 1) {
r = (r << 4) + r;
g = (g << 4) + g;
b = (b << 4) + b;
}
a = 1;
color = [r, g, b, a];
goog.asserts.assert(ol.color.isValid(color),
'Color should be a valid color');
return color;
} else if ((match = ol.color.rgbaColorRe_.exec(s))) { // rgba()
r = Number(match[1]);
g = Number(match[2]);
b = Number(match[3]);
a = Number(match[4]);
color = [r, g, b, a];
return ol.color.normalize(color, color);
} else if ((match = ol.color.rgbColorRe_.exec(s))) { // rgb()
r = Number(match[1]);
g = Number(match[2]);
b = Number(match[3]);
color = [r, g, b, 1];
return ol.color.normalize(color, color);
} else {
goog.asserts.fail(s + ' is not a valid color');
}
};
/**
* @param {ol.Color} color Color.
* @return {boolean} Is valid.
*/
ol.color.isValid = function(color) {
return 0 <= color[0] && color[0] < 256 &&
0 <= color[1] && color[1] < 256 &&
0 <= color[2] && color[2] < 256 &&
0 <= color[3] && color[3] <= 1;
};
/**
* @param {ol.Color} color Color.
* @param {ol.Color=} opt_color Color.
* @return {ol.Color} Clamped color.
*/
ol.color.normalize = function(color, opt_color) {
var result = opt_color || [];
result[0] = ol.math.clamp((color[0] + 0.5) | 0, 0, 255);
result[1] = ol.math.clamp((color[1] + 0.5) | 0, 0, 255);
result[2] = ol.math.clamp((color[2] + 0.5) | 0, 0, 255);
result[3] = ol.math.clamp(color[3], 0, 1);
return result;
};
/**
* @param {ol.Color} color Color.
* @return {string} String.
*/
ol.color.toString = function(color) {
var r = color[0];
if (r != (r | 0)) {
r = (r + 0.5) | 0;
}
var g = color[1];
if (g != (g | 0)) {
g = (g + 0.5) | 0;
}
var b = color[2];
if (b != (b | 0)) {
b = (b + 0.5) | 0;
}
var a = color[3];
return 'rgba(' + r + ',' + g + ',' + b + ',' + a + ')';
};
/**
* @param {!ol.Color} color Color.
* @param {goog.vec.Mat4.Number} transform Transform.
* @param {!ol.Color=} opt_color Color.
* @return {ol.Color} Transformed color.
*/
ol.color.transform = function(color, transform, opt_color) {
var result = opt_color ? opt_color : [];
result = goog.vec.Mat4.multVec3(transform, color, result);
goog.asserts.assert(goog.isArray(result), 'result should be an array');
result[3] = color[3];
return ol.color.normalize(result, result);
};
/**
* @param {ol.Color|string} color1 Color2.
* @param {ol.Color|string} color2 Color2.
* @return {boolean} Equals.
*/
ol.color.stringOrColorEquals = function(color1, color2) {
if (color1 === color2 || color1 == color2) {
return true;
}
if (goog.isString(color1)) {
color1 = ol.color.fromString(color1);
}
if (goog.isString(color2)) {
color2 = ol.color.fromString(color2);
}
return ol.color.equals(color1, color2);
}; | }
color = ol.color.fromStringInternal_(s); | random_line_split |
server.js | // NPM Modules
var low = require("lowdb");
var url = require("url");
var async = require("async");
var fs = require("fs");
var _ = require("underscore");
var _s = require("underscore.string");
var request = require('request');
var twitter = require('twitter');
// Custom Modules
var yans = require("./node_modules-custom/yans");
var twinglish = require("./node_modules-custom/twinglish.js");
var twetrics = require("./node_modules-custom/twetrics.js");
var csv = require("./node_modules-custom/csv.js");
var dataer = require("./node_modules-custom/dataer.js");
var twitterClient;
var server;
async.waterfall([
function(next) {
// SERVER
server = new yans({
"directory": __dirname,
"viewPath": "pages",
"logging": true,
"loggingFormat": ":method :url -> HTTP :status; :response-time ms",
"staticDirectories": ["css", "fonts", "js", "static"]
});
server.dbInit = function() {
var _self = this;
_self.db = low(
"db.json",
{
"autosave": false
}
);
}
server.resetDatabase = function(callback) {
var _self = this;
var blankData = fs.readFileSync("db-blank.json");
fs.writeFileSync("db.json", blankData);
_self.dbInit();
callback();
};
server.jsonError = function(text, res) {
res.send({
"error": text
});
};
server.jsonSuccess = function(text, res) {
res.send({
"ok": text
});
};
next();
},
function(next) {
// DATABASE
if (!fs.existsSync("db.json")) {
server.resetDatabase(next);
} else {
server.dbInit();
next();
}
},
function(next) {
// TWITTER
fs.readFile("twitter-credentials.json", function(error, data) {
if (error) {
console.error("twitter-credentials.json is missing or couldn't be read. I need this.");
process.exit();
return;
}
var twitterDetails = JSON.parse(data);
twitterClient = new twitter(twitterDetails);
next();
});
},
function(next) {
//
// FRONTEND (UI) ROUTES
//
server.app.get("/", function(req, res) {
res.render("index");
});
server.app.get("/twitter/", function(req, res) {
var twitter = "jadaradix";
res.render(
"twitter",
{ "twitter": twitter }
);
});
server.app.get("/twitter/*/", function(req, res) {
var twitter = req.params[0];
res.render(
"twitter",
{ "twitter": twitter }
);
});
server.app.get("/process/", function(req, res) {
if (!req.query.hasOwnProperty("name") || !req.query.hasOwnProperty("topics")) {
res.redirect(302, "/");
return;
}
var name = _s.humanize(req.query.name) + " Museum";
var topics = req.query.topics.split(",");
var museums = server.db("museums");
//insert
function doesIdExist(id) {
var r = museums.find({ id: id });
return (r.value() ? true : false);
}
function generateId() {
var id = _.times(16, function(n) {
return _.random(0, 10);
}).join("");
return id;
}
var id = generateId();
while (doesIdExist(id)) {
var id = generateId();
}
var museum = {
"id": id,
"name": name,
"isSpontaneous": true,
"topics": topics
}
museums.push(museum);
server.db.save();
res.redirect(302, "/museum/" + id);
});
server.app.get("/process/*/", function(req, res) {
var id = req.params[0];
var museums = server.db("museums");
var r = museums.find({
id: id
});
var rValue = r.value();
if (!rValue) {
res.redirect(302, "/twitter/" + id);
return;
}
res.render(
"process",
{ "twitter": id }
);
});
server.app.get("/museum/*/", function(req, res) {
var id = req.params[0];
var museums = server.db("museums");
var r = museums.find({
id: id
});
var rValue = r.value();
if (!rValue || (!(("topics" in rValue)) || rValue.topics.length == 0)) {
res.redirect(302, "/twitter/" + id);
return;
}
res.render(
"museum",
{
"title": rValue.name,
"id": id
}
);
});
server.app.get("/favicon.ico", function(req, res) {
res.redirect(301, "/static/favicon.ico");
});
//
// BACKEND (API) ROUTES
//
server.app.get("/api/reset", function(req, res) {
async.waterfall([
function(next) {
server.resetDatabase(next);
},
function(next) {
server.jsonSuccess("The database was reset.", res);
}
]);
});
server.app.get("/api/twitter/*", function(req, res) {
var screenName = req.params[0];
if (!screenName.length) {
server.jsonError("The Twitter screen name was empty.", res);
return;
}
async.waterfall([
function(next) {
// Account
twitterClient.get(
'users/show',
{
"screen_name": screenName,
},
function(error, data, raw) {
if (error) {
var errorText;
switch(error.statusCode) {
case 404:
errorText = "That Twitter account doesn't exist.";
break;
default:
errorText = "The twitter account couldn't be accessed; that's all.";
break;
}
server.jsonError(errorText, res);
return;
}
var newData = {
"id": screenName,
"name": data.name + "'" + (!_s.endsWith(data.name.toLowerCase(), "s") ? "s" : "") + " Museum",
"isSpontaneous": false,
"twitter": {
"account": {
"screenName": data.screen_name,
"name": data.name,
"location": data.location,
"description": data.description,
"language": data.lang,
"picture": data.profile_image_url
}
}
};
next(null, newData);
}
);
},
function(passedData, next) {
twitterClient.get(
'statuses/user_timeline',
{
"screen_name": screenName,
"trim_user": true,
"count": 200,
"include_rts": true
},
function(error, data, raw) {
if (error) {
var errorText;
switch(error.statusCode) {
case 401:
errorText = "That Twitter account is probably using ‘protected tweets’.";
break;
default:
errorText = "The tweets couldn't be retrieved; that's all.";
break;
}
server.jsonError(errorText, res);
return;
}
var tweets = _.map(data, twinglish.cleanTweet);
// var tweets = data;
//some tweets may have been removed (annulled)
tweets = _.filter(tweets, function(tweet) {
return (!(tweet == null || tweet == undefined)); //sorry
});
passedData["twitter"]["tweets"] = tweets;
var tweetTexts = _.map(tweets, function(tweet) {
return tweet.text;
});
var textStuff = tweetTexts.join(" ");
// from http://codereview.stackexchange.com/questions/63947/count-frequency-of-words-in-a-string-returning-list-of-unique-words-sorted-by-fr
function getFrequency2(string, cutOff) {
var cleanString = string.replace(/[\.,-\/#!$%\^&\*;:{}=\-_`~()]/g,""),
words = cleanString.split(' '),
frequencies = {},
word, frequency, i;
for( i=0; i<words.length; i++ ) {
word = words[i];
frequencies[word] = frequencies[word] || 0;
frequencies[word]++;
}
words = Object.keys( frequencies );
return words.sort(function (a,b) { return frequencies[b] -frequencies[a];}).slice(0,cutOff);
}
var stuff = getFrequency2(textStuff, 100);
var stuff2 = stuff.slice(95, 100);
console.log(stuff2);
passedData["topics"] = stuff2;
next(null, passedData);
}
);
},
function(passedData, next) {
if ("metrics" in req.query) {
passedData["twitter"]["metrics"] = _.map(twetrics.metrics, function(metric) {
return metric.method(passedData["twitter"]["tweets"]);
});
}
next(null, passedData);
},
function(passedData, next) {
var format = (("format" in req.query) ? req.query.format : "json");
var data;
switch(format) {
case "store":
var museums = server.db("museums");
var r = museums.remove({
id: screenName
});
// if (r["__wrapped__"].length == 0) {
// //did not exist in the first place
// return;
// }
museums.push(passedData);
server.db.save();
server.jsonSuccess("The Twitter data was stored.", res);
return;
break;
case "json":
data = passedData;
break;
case "prettyjson":
data = JSON.stringify(passedData, undefined, 2);
break;
case "csv":
//return only tweets
data = csv.fromObjects(
passedData["twitter"]["tweets"],
["when", "text", "isRetweet"],
true
);
break;
default:
data = [];
break;
}
res.send(data);
}
]);
});
server.app.get("/api/process/*", function(req, res) {
var museums = server.db("museums");
var r = museums.find({
id: req.params[0]
}).value();
if (!r) {
server.jsonError("There's no data for this screen name. Stop hacking.", res);
return;
}
res.send(r);
});
server.app.get("/api/museum/*", function(req, res) {
var museums = server.db("museums");
var r = museums.find({
id: req.params[0]
}).value();
if (!r) |
var topicFunctions = _.map(r.topics, function(topic) {
return function(callback) {
dataer.getData(topic, function(data) {
callback(null, data);
})
};
})
async.parallel(
topicFunctions,
function(err, topics) {
res.send({
"isSpontaneous": (r.isSpontaneous ? true : false),
"topics": topics
});
}
);
});
//
// ERROR CODE ROUTES
//
server.app.get("*", function(req, res) {
res.status(404);
res.render("404");
});
}
]); | {
server.jsonError("There's no data for this screen name. Stop hacking.", res);
return;
} | conditional_block |
server.js | // NPM Modules
var low = require("lowdb");
var url = require("url");
var async = require("async");
var fs = require("fs");
var _ = require("underscore");
var _s = require("underscore.string");
var request = require('request');
var twitter = require('twitter');
// Custom Modules
var yans = require("./node_modules-custom/yans");
var twinglish = require("./node_modules-custom/twinglish.js");
var twetrics = require("./node_modules-custom/twetrics.js");
var csv = require("./node_modules-custom/csv.js");
var dataer = require("./node_modules-custom/dataer.js");
var twitterClient;
var server;
async.waterfall([
function(next) {
// SERVER
server = new yans({
"directory": __dirname,
"viewPath": "pages",
"logging": true,
"loggingFormat": ":method :url -> HTTP :status; :response-time ms",
"staticDirectories": ["css", "fonts", "js", "static"]
});
server.dbInit = function() {
var _self = this;
_self.db = low(
"db.json",
{
"autosave": false
}
);
}
server.resetDatabase = function(callback) {
var _self = this;
var blankData = fs.readFileSync("db-blank.json");
fs.writeFileSync("db.json", blankData);
_self.dbInit();
callback();
};
server.jsonError = function(text, res) {
res.send({
"error": text
});
};
server.jsonSuccess = function(text, res) {
res.send({
"ok": text
});
};
next();
},
function(next) {
// DATABASE
if (!fs.existsSync("db.json")) {
server.resetDatabase(next);
} else {
server.dbInit();
next();
}
},
function(next) {
// TWITTER
fs.readFile("twitter-credentials.json", function(error, data) {
if (error) {
console.error("twitter-credentials.json is missing or couldn't be read. I need this.");
process.exit();
return;
}
var twitterDetails = JSON.parse(data);
twitterClient = new twitter(twitterDetails);
next();
});
},
function(next) {
//
// FRONTEND (UI) ROUTES
//
server.app.get("/", function(req, res) {
res.render("index");
});
server.app.get("/twitter/", function(req, res) {
var twitter = "jadaradix";
res.render(
"twitter",
{ "twitter": twitter }
);
});
server.app.get("/twitter/*/", function(req, res) {
var twitter = req.params[0];
res.render(
"twitter",
{ "twitter": twitter }
);
});
server.app.get("/process/", function(req, res) {
if (!req.query.hasOwnProperty("name") || !req.query.hasOwnProperty("topics")) {
res.redirect(302, "/");
return;
}
var name = _s.humanize(req.query.name) + " Museum";
var topics = req.query.topics.split(",");
var museums = server.db("museums");
//insert
function doesIdExist(id) {
var r = museums.find({ id: id });
return (r.value() ? true : false);
}
function generateId() {
var id = _.times(16, function(n) {
return _.random(0, 10);
}).join("");
return id;
}
var id = generateId();
while (doesIdExist(id)) {
var id = generateId();
}
var museum = {
"id": id,
"name": name,
"isSpontaneous": true,
"topics": topics
}
museums.push(museum);
server.db.save(); | var museums = server.db("museums");
var r = museums.find({
id: id
});
var rValue = r.value();
if (!rValue) {
res.redirect(302, "/twitter/" + id);
return;
}
res.render(
"process",
{ "twitter": id }
);
});
server.app.get("/museum/*/", function(req, res) {
var id = req.params[0];
var museums = server.db("museums");
var r = museums.find({
id: id
});
var rValue = r.value();
if (!rValue || (!(("topics" in rValue)) || rValue.topics.length == 0)) {
res.redirect(302, "/twitter/" + id);
return;
}
res.render(
"museum",
{
"title": rValue.name,
"id": id
}
);
});
server.app.get("/favicon.ico", function(req, res) {
res.redirect(301, "/static/favicon.ico");
});
//
// BACKEND (API) ROUTES
//
server.app.get("/api/reset", function(req, res) {
async.waterfall([
function(next) {
server.resetDatabase(next);
},
function(next) {
server.jsonSuccess("The database was reset.", res);
}
]);
});
server.app.get("/api/twitter/*", function(req, res) {
var screenName = req.params[0];
if (!screenName.length) {
server.jsonError("The Twitter screen name was empty.", res);
return;
}
async.waterfall([
function(next) {
// Account
twitterClient.get(
'users/show',
{
"screen_name": screenName,
},
function(error, data, raw) {
if (error) {
var errorText;
switch(error.statusCode) {
case 404:
errorText = "That Twitter account doesn't exist.";
break;
default:
errorText = "The twitter account couldn't be accessed; that's all.";
break;
}
server.jsonError(errorText, res);
return;
}
var newData = {
"id": screenName,
"name": data.name + "'" + (!_s.endsWith(data.name.toLowerCase(), "s") ? "s" : "") + " Museum",
"isSpontaneous": false,
"twitter": {
"account": {
"screenName": data.screen_name,
"name": data.name,
"location": data.location,
"description": data.description,
"language": data.lang,
"picture": data.profile_image_url
}
}
};
next(null, newData);
}
);
},
function(passedData, next) {
twitterClient.get(
'statuses/user_timeline',
{
"screen_name": screenName,
"trim_user": true,
"count": 200,
"include_rts": true
},
function(error, data, raw) {
if (error) {
var errorText;
switch(error.statusCode) {
case 401:
errorText = "That Twitter account is probably using ‘protected tweets’.";
break;
default:
errorText = "The tweets couldn't be retrieved; that's all.";
break;
}
server.jsonError(errorText, res);
return;
}
var tweets = _.map(data, twinglish.cleanTweet);
// var tweets = data;
//some tweets may have been removed (annulled)
tweets = _.filter(tweets, function(tweet) {
return (!(tweet == null || tweet == undefined)); //sorry
});
passedData["twitter"]["tweets"] = tweets;
var tweetTexts = _.map(tweets, function(tweet) {
return tweet.text;
});
var textStuff = tweetTexts.join(" ");
// from http://codereview.stackexchange.com/questions/63947/count-frequency-of-words-in-a-string-returning-list-of-unique-words-sorted-by-fr
function getFrequency2(string, cutOff) {
var cleanString = string.replace(/[\.,-\/#!$%\^&\*;:{}=\-_`~()]/g,""),
words = cleanString.split(' '),
frequencies = {},
word, frequency, i;
for( i=0; i<words.length; i++ ) {
word = words[i];
frequencies[word] = frequencies[word] || 0;
frequencies[word]++;
}
words = Object.keys( frequencies );
return words.sort(function (a,b) { return frequencies[b] -frequencies[a];}).slice(0,cutOff);
}
var stuff = getFrequency2(textStuff, 100);
var stuff2 = stuff.slice(95, 100);
console.log(stuff2);
passedData["topics"] = stuff2;
next(null, passedData);
}
);
},
function(passedData, next) {
if ("metrics" in req.query) {
passedData["twitter"]["metrics"] = _.map(twetrics.metrics, function(metric) {
return metric.method(passedData["twitter"]["tweets"]);
});
}
next(null, passedData);
},
function(passedData, next) {
var format = (("format" in req.query) ? req.query.format : "json");
var data;
switch(format) {
case "store":
var museums = server.db("museums");
var r = museums.remove({
id: screenName
});
// if (r["__wrapped__"].length == 0) {
// //did not exist in the first place
// return;
// }
museums.push(passedData);
server.db.save();
server.jsonSuccess("The Twitter data was stored.", res);
return;
break;
case "json":
data = passedData;
break;
case "prettyjson":
data = JSON.stringify(passedData, undefined, 2);
break;
case "csv":
//return only tweets
data = csv.fromObjects(
passedData["twitter"]["tweets"],
["when", "text", "isRetweet"],
true
);
break;
default:
data = [];
break;
}
res.send(data);
}
]);
});
server.app.get("/api/process/*", function(req, res) {
var museums = server.db("museums");
var r = museums.find({
id: req.params[0]
}).value();
if (!r) {
server.jsonError("There's no data for this screen name. Stop hacking.", res);
return;
}
res.send(r);
});
server.app.get("/api/museum/*", function(req, res) {
var museums = server.db("museums");
var r = museums.find({
id: req.params[0]
}).value();
if (!r) {
server.jsonError("There's no data for this screen name. Stop hacking.", res);
return;
}
var topicFunctions = _.map(r.topics, function(topic) {
return function(callback) {
dataer.getData(topic, function(data) {
callback(null, data);
})
};
})
async.parallel(
topicFunctions,
function(err, topics) {
res.send({
"isSpontaneous": (r.isSpontaneous ? true : false),
"topics": topics
});
}
);
});
//
// ERROR CODE ROUTES
//
server.app.get("*", function(req, res) {
res.status(404);
res.render("404");
});
}
]); | res.redirect(302, "/museum/" + id);
});
server.app.get("/process/*/", function(req, res) {
var id = req.params[0]; | random_line_split |
server.js | // NPM Modules
var low = require("lowdb");
var url = require("url");
var async = require("async");
var fs = require("fs");
var _ = require("underscore");
var _s = require("underscore.string");
var request = require('request');
var twitter = require('twitter');
// Custom Modules
var yans = require("./node_modules-custom/yans");
var twinglish = require("./node_modules-custom/twinglish.js");
var twetrics = require("./node_modules-custom/twetrics.js");
var csv = require("./node_modules-custom/csv.js");
var dataer = require("./node_modules-custom/dataer.js");
var twitterClient;
var server;
async.waterfall([
function(next) {
// SERVER
server = new yans({
"directory": __dirname,
"viewPath": "pages",
"logging": true,
"loggingFormat": ":method :url -> HTTP :status; :response-time ms",
"staticDirectories": ["css", "fonts", "js", "static"]
});
server.dbInit = function() {
var _self = this;
_self.db = low(
"db.json",
{
"autosave": false
}
);
}
server.resetDatabase = function(callback) {
var _self = this;
var blankData = fs.readFileSync("db-blank.json");
fs.writeFileSync("db.json", blankData);
_self.dbInit();
callback();
};
server.jsonError = function(text, res) {
res.send({
"error": text
});
};
server.jsonSuccess = function(text, res) {
res.send({
"ok": text
});
};
next();
},
function(next) {
// DATABASE
if (!fs.existsSync("db.json")) {
server.resetDatabase(next);
} else {
server.dbInit();
next();
}
},
function(next) {
// TWITTER
fs.readFile("twitter-credentials.json", function(error, data) {
if (error) {
console.error("twitter-credentials.json is missing or couldn't be read. I need this.");
process.exit();
return;
}
var twitterDetails = JSON.parse(data);
twitterClient = new twitter(twitterDetails);
next();
});
},
function(next) {
//
// FRONTEND (UI) ROUTES
//
server.app.get("/", function(req, res) {
res.render("index");
});
server.app.get("/twitter/", function(req, res) {
var twitter = "jadaradix";
res.render(
"twitter",
{ "twitter": twitter }
);
});
server.app.get("/twitter/*/", function(req, res) {
var twitter = req.params[0];
res.render(
"twitter",
{ "twitter": twitter }
);
});
server.app.get("/process/", function(req, res) {
if (!req.query.hasOwnProperty("name") || !req.query.hasOwnProperty("topics")) {
res.redirect(302, "/");
return;
}
var name = _s.humanize(req.query.name) + " Museum";
var topics = req.query.topics.split(",");
var museums = server.db("museums");
//insert
function doesIdExist(id) |
function generateId() {
var id = _.times(16, function(n) {
return _.random(0, 10);
}).join("");
return id;
}
var id = generateId();
while (doesIdExist(id)) {
var id = generateId();
}
var museum = {
"id": id,
"name": name,
"isSpontaneous": true,
"topics": topics
}
museums.push(museum);
server.db.save();
res.redirect(302, "/museum/" + id);
});
server.app.get("/process/*/", function(req, res) {
var id = req.params[0];
var museums = server.db("museums");
var r = museums.find({
id: id
});
var rValue = r.value();
if (!rValue) {
res.redirect(302, "/twitter/" + id);
return;
}
res.render(
"process",
{ "twitter": id }
);
});
server.app.get("/museum/*/", function(req, res) {
var id = req.params[0];
var museums = server.db("museums");
var r = museums.find({
id: id
});
var rValue = r.value();
if (!rValue || (!(("topics" in rValue)) || rValue.topics.length == 0)) {
res.redirect(302, "/twitter/" + id);
return;
}
res.render(
"museum",
{
"title": rValue.name,
"id": id
}
);
});
server.app.get("/favicon.ico", function(req, res) {
res.redirect(301, "/static/favicon.ico");
});
//
// BACKEND (API) ROUTES
//
server.app.get("/api/reset", function(req, res) {
async.waterfall([
function(next) {
server.resetDatabase(next);
},
function(next) {
server.jsonSuccess("The database was reset.", res);
}
]);
});
server.app.get("/api/twitter/*", function(req, res) {
var screenName = req.params[0];
if (!screenName.length) {
server.jsonError("The Twitter screen name was empty.", res);
return;
}
async.waterfall([
function(next) {
// Account
twitterClient.get(
'users/show',
{
"screen_name": screenName,
},
function(error, data, raw) {
if (error) {
var errorText;
switch(error.statusCode) {
case 404:
errorText = "That Twitter account doesn't exist.";
break;
default:
errorText = "The twitter account couldn't be accessed; that's all.";
break;
}
server.jsonError(errorText, res);
return;
}
var newData = {
"id": screenName,
"name": data.name + "'" + (!_s.endsWith(data.name.toLowerCase(), "s") ? "s" : "") + " Museum",
"isSpontaneous": false,
"twitter": {
"account": {
"screenName": data.screen_name,
"name": data.name,
"location": data.location,
"description": data.description,
"language": data.lang,
"picture": data.profile_image_url
}
}
};
next(null, newData);
}
);
},
function(passedData, next) {
twitterClient.get(
'statuses/user_timeline',
{
"screen_name": screenName,
"trim_user": true,
"count": 200,
"include_rts": true
},
function(error, data, raw) {
if (error) {
var errorText;
switch(error.statusCode) {
case 401:
errorText = "That Twitter account is probably using ‘protected tweets’.";
break;
default:
errorText = "The tweets couldn't be retrieved; that's all.";
break;
}
server.jsonError(errorText, res);
return;
}
var tweets = _.map(data, twinglish.cleanTweet);
// var tweets = data;
//some tweets may have been removed (annulled)
tweets = _.filter(tweets, function(tweet) {
return (!(tweet == null || tweet == undefined)); //sorry
});
passedData["twitter"]["tweets"] = tweets;
var tweetTexts = _.map(tweets, function(tweet) {
return tweet.text;
});
var textStuff = tweetTexts.join(" ");
// from http://codereview.stackexchange.com/questions/63947/count-frequency-of-words-in-a-string-returning-list-of-unique-words-sorted-by-fr
function getFrequency2(string, cutOff) {
var cleanString = string.replace(/[\.,-\/#!$%\^&\*;:{}=\-_`~()]/g,""),
words = cleanString.split(' '),
frequencies = {},
word, frequency, i;
for( i=0; i<words.length; i++ ) {
word = words[i];
frequencies[word] = frequencies[word] || 0;
frequencies[word]++;
}
words = Object.keys( frequencies );
return words.sort(function (a,b) { return frequencies[b] -frequencies[a];}).slice(0,cutOff);
}
var stuff = getFrequency2(textStuff, 100);
var stuff2 = stuff.slice(95, 100);
console.log(stuff2);
passedData["topics"] = stuff2;
next(null, passedData);
}
);
},
function(passedData, next) {
if ("metrics" in req.query) {
passedData["twitter"]["metrics"] = _.map(twetrics.metrics, function(metric) {
return metric.method(passedData["twitter"]["tweets"]);
});
}
next(null, passedData);
},
function(passedData, next) {
var format = (("format" in req.query) ? req.query.format : "json");
var data;
switch(format) {
case "store":
var museums = server.db("museums");
var r = museums.remove({
id: screenName
});
// if (r["__wrapped__"].length == 0) {
// //did not exist in the first place
// return;
// }
museums.push(passedData);
server.db.save();
server.jsonSuccess("The Twitter data was stored.", res);
return;
break;
case "json":
data = passedData;
break;
case "prettyjson":
data = JSON.stringify(passedData, undefined, 2);
break;
case "csv":
//return only tweets
data = csv.fromObjects(
passedData["twitter"]["tweets"],
["when", "text", "isRetweet"],
true
);
break;
default:
data = [];
break;
}
res.send(data);
}
]);
});
server.app.get("/api/process/*", function(req, res) {
var museums = server.db("museums");
var r = museums.find({
id: req.params[0]
}).value();
if (!r) {
server.jsonError("There's no data for this screen name. Stop hacking.", res);
return;
}
res.send(r);
});
server.app.get("/api/museum/*", function(req, res) {
var museums = server.db("museums");
var r = museums.find({
id: req.params[0]
}).value();
if (!r) {
server.jsonError("There's no data for this screen name. Stop hacking.", res);
return;
}
var topicFunctions = _.map(r.topics, function(topic) {
return function(callback) {
dataer.getData(topic, function(data) {
callback(null, data);
})
};
})
async.parallel(
topicFunctions,
function(err, topics) {
res.send({
"isSpontaneous": (r.isSpontaneous ? true : false),
"topics": topics
});
}
);
});
//
// ERROR CODE ROUTES
//
server.app.get("*", function(req, res) {
res.status(404);
res.render("404");
});
}
]); | {
var r = museums.find({ id: id });
return (r.value() ? true : false);
} | identifier_body |
server.js | // NPM Modules
var low = require("lowdb");
var url = require("url");
var async = require("async");
var fs = require("fs");
var _ = require("underscore");
var _s = require("underscore.string");
var request = require('request');
var twitter = require('twitter');
// Custom Modules
var yans = require("./node_modules-custom/yans");
var twinglish = require("./node_modules-custom/twinglish.js");
var twetrics = require("./node_modules-custom/twetrics.js");
var csv = require("./node_modules-custom/csv.js");
var dataer = require("./node_modules-custom/dataer.js");
var twitterClient;
var server;
async.waterfall([
function(next) {
// SERVER
server = new yans({
"directory": __dirname,
"viewPath": "pages",
"logging": true,
"loggingFormat": ":method :url -> HTTP :status; :response-time ms",
"staticDirectories": ["css", "fonts", "js", "static"]
});
server.dbInit = function() {
var _self = this;
_self.db = low(
"db.json",
{
"autosave": false
}
);
}
server.resetDatabase = function(callback) {
var _self = this;
var blankData = fs.readFileSync("db-blank.json");
fs.writeFileSync("db.json", blankData);
_self.dbInit();
callback();
};
server.jsonError = function(text, res) {
res.send({
"error": text
});
};
server.jsonSuccess = function(text, res) {
res.send({
"ok": text
});
};
next();
},
function(next) {
// DATABASE
if (!fs.existsSync("db.json")) {
server.resetDatabase(next);
} else {
server.dbInit();
next();
}
},
function(next) {
// TWITTER
fs.readFile("twitter-credentials.json", function(error, data) {
if (error) {
console.error("twitter-credentials.json is missing or couldn't be read. I need this.");
process.exit();
return;
}
var twitterDetails = JSON.parse(data);
twitterClient = new twitter(twitterDetails);
next();
});
},
function(next) {
//
// FRONTEND (UI) ROUTES
//
server.app.get("/", function(req, res) {
res.render("index");
});
server.app.get("/twitter/", function(req, res) {
var twitter = "jadaradix";
res.render(
"twitter",
{ "twitter": twitter }
);
});
server.app.get("/twitter/*/", function(req, res) {
var twitter = req.params[0];
res.render(
"twitter",
{ "twitter": twitter }
);
});
server.app.get("/process/", function(req, res) {
if (!req.query.hasOwnProperty("name") || !req.query.hasOwnProperty("topics")) {
res.redirect(302, "/");
return;
}
var name = _s.humanize(req.query.name) + " Museum";
var topics = req.query.topics.split(",");
var museums = server.db("museums");
//insert
function doesIdExist(id) {
var r = museums.find({ id: id });
return (r.value() ? true : false);
}
function generateId() {
var id = _.times(16, function(n) {
return _.random(0, 10);
}).join("");
return id;
}
var id = generateId();
while (doesIdExist(id)) {
var id = generateId();
}
var museum = {
"id": id,
"name": name,
"isSpontaneous": true,
"topics": topics
}
museums.push(museum);
server.db.save();
res.redirect(302, "/museum/" + id);
});
server.app.get("/process/*/", function(req, res) {
var id = req.params[0];
var museums = server.db("museums");
var r = museums.find({
id: id
});
var rValue = r.value();
if (!rValue) {
res.redirect(302, "/twitter/" + id);
return;
}
res.render(
"process",
{ "twitter": id }
);
});
server.app.get("/museum/*/", function(req, res) {
var id = req.params[0];
var museums = server.db("museums");
var r = museums.find({
id: id
});
var rValue = r.value();
if (!rValue || (!(("topics" in rValue)) || rValue.topics.length == 0)) {
res.redirect(302, "/twitter/" + id);
return;
}
res.render(
"museum",
{
"title": rValue.name,
"id": id
}
);
});
server.app.get("/favicon.ico", function(req, res) {
res.redirect(301, "/static/favicon.ico");
});
//
// BACKEND (API) ROUTES
//
server.app.get("/api/reset", function(req, res) {
async.waterfall([
function(next) {
server.resetDatabase(next);
},
function(next) {
server.jsonSuccess("The database was reset.", res);
}
]);
});
server.app.get("/api/twitter/*", function(req, res) {
var screenName = req.params[0];
if (!screenName.length) {
server.jsonError("The Twitter screen name was empty.", res);
return;
}
async.waterfall([
function(next) {
// Account
twitterClient.get(
'users/show',
{
"screen_name": screenName,
},
function(error, data, raw) {
if (error) {
var errorText;
switch(error.statusCode) {
case 404:
errorText = "That Twitter account doesn't exist.";
break;
default:
errorText = "The twitter account couldn't be accessed; that's all.";
break;
}
server.jsonError(errorText, res);
return;
}
var newData = {
"id": screenName,
"name": data.name + "'" + (!_s.endsWith(data.name.toLowerCase(), "s") ? "s" : "") + " Museum",
"isSpontaneous": false,
"twitter": {
"account": {
"screenName": data.screen_name,
"name": data.name,
"location": data.location,
"description": data.description,
"language": data.lang,
"picture": data.profile_image_url
}
}
};
next(null, newData);
}
);
},
function(passedData, next) {
twitterClient.get(
'statuses/user_timeline',
{
"screen_name": screenName,
"trim_user": true,
"count": 200,
"include_rts": true
},
function(error, data, raw) {
if (error) {
var errorText;
switch(error.statusCode) {
case 401:
errorText = "That Twitter account is probably using ‘protected tweets’.";
break;
default:
errorText = "The tweets couldn't be retrieved; that's all.";
break;
}
server.jsonError(errorText, res);
return;
}
var tweets = _.map(data, twinglish.cleanTweet);
// var tweets = data;
//some tweets may have been removed (annulled)
tweets = _.filter(tweets, function(tweet) {
return (!(tweet == null || tweet == undefined)); //sorry
});
passedData["twitter"]["tweets"] = tweets;
var tweetTexts = _.map(tweets, function(tweet) {
return tweet.text;
});
var textStuff = tweetTexts.join(" ");
// from http://codereview.stackexchange.com/questions/63947/count-frequency-of-words-in-a-string-returning-list-of-unique-words-sorted-by-fr
function | (string, cutOff) {
var cleanString = string.replace(/[\.,-\/#!$%\^&\*;:{}=\-_`~()]/g,""),
words = cleanString.split(' '),
frequencies = {},
word, frequency, i;
for( i=0; i<words.length; i++ ) {
word = words[i];
frequencies[word] = frequencies[word] || 0;
frequencies[word]++;
}
words = Object.keys( frequencies );
return words.sort(function (a,b) { return frequencies[b] -frequencies[a];}).slice(0,cutOff);
}
var stuff = getFrequency2(textStuff, 100);
var stuff2 = stuff.slice(95, 100);
console.log(stuff2);
passedData["topics"] = stuff2;
next(null, passedData);
}
);
},
function(passedData, next) {
if ("metrics" in req.query) {
passedData["twitter"]["metrics"] = _.map(twetrics.metrics, function(metric) {
return metric.method(passedData["twitter"]["tweets"]);
});
}
next(null, passedData);
},
function(passedData, next) {
var format = (("format" in req.query) ? req.query.format : "json");
var data;
switch(format) {
case "store":
var museums = server.db("museums");
var r = museums.remove({
id: screenName
});
// if (r["__wrapped__"].length == 0) {
// //did not exist in the first place
// return;
// }
museums.push(passedData);
server.db.save();
server.jsonSuccess("The Twitter data was stored.", res);
return;
break;
case "json":
data = passedData;
break;
case "prettyjson":
data = JSON.stringify(passedData, undefined, 2);
break;
case "csv":
//return only tweets
data = csv.fromObjects(
passedData["twitter"]["tweets"],
["when", "text", "isRetweet"],
true
);
break;
default:
data = [];
break;
}
res.send(data);
}
]);
});
server.app.get("/api/process/*", function(req, res) {
var museums = server.db("museums");
var r = museums.find({
id: req.params[0]
}).value();
if (!r) {
server.jsonError("There's no data for this screen name. Stop hacking.", res);
return;
}
res.send(r);
});
server.app.get("/api/museum/*", function(req, res) {
var museums = server.db("museums");
var r = museums.find({
id: req.params[0]
}).value();
if (!r) {
server.jsonError("There's no data for this screen name. Stop hacking.", res);
return;
}
var topicFunctions = _.map(r.topics, function(topic) {
return function(callback) {
dataer.getData(topic, function(data) {
callback(null, data);
})
};
})
async.parallel(
topicFunctions,
function(err, topics) {
res.send({
"isSpontaneous": (r.isSpontaneous ? true : false),
"topics": topics
});
}
);
});
//
// ERROR CODE ROUTES
//
server.app.get("*", function(req, res) {
res.status(404);
res.render("404");
});
}
]); | getFrequency2 | identifier_name |
datasource-view.spec.tsx | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file | * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { shallow } from 'enzyme';
import React from 'react';
import { Capabilities } from '../../utils';
import { DatasourcesView } from './datasource-view';
describe('DatasourcesView', () => {
it('matches snapshot', () => {
const dataSourceView = shallow(
<DatasourcesView
goToQuery={() => {}}
goToTask={() => null}
goToSegments={() => {}}
capabilities={Capabilities.FULL}
/>,
);
expect(dataSourceView).toMatchSnapshot();
});
}); | random_line_split |
|
windowactivatable.py | # -*- coding: UTF-8 -*-
# Gedit External Tools plugin
# Copyright (C) 2005-2006 Steve Frécinaux <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__all__ = ('ExternalToolsPlugin', 'Manager', 'OutputPanel', 'Capture', 'UniqueById')
from gi.repository import GLib, Gio, GObject, Gtk, Gedit, PeasGtk
from .manager import Manager
from .library import ToolLibrary
from .outputpanel import OutputPanel
from .capture import Capture
from .functions import *
class ToolMenu(object):
def __init__(self, library, window, panel, menu):
super(ToolMenu, self).__init__()
self._library = library
self._window = window
self._panel = panel
self._menu = menu
self._action_tools = {}
self.update()
def deactivate(self):
self.remove()
def remove(self):
self._menu.remove_all()
for name, tool in self._action_tools.items():
self._window.remove_action(name)
if tool.shortcut:
app = Gio.Application.get_default()
app.remove_accelerator(tool.shortcut)
self._action_tools = {}
def _insert_directory(self, directory, menu):
for d in sorted(directory.subdirs, key=lambda x: x.name.lower()):
submenu = Gio.Menu()
menu.append_submenu(d.name.replace('_', '__'), submenu)
section = Gio.Menu()
submenu.append_section(None, section)
self._insert_directory(d, section)
for tool in sorted(directory.tools, key=lambda x: x.name.lower()):
action_name = 'external-tool_%X_%X' % (id(tool), id(tool.name))
self._action_tools[action_name] = tool
action = Gio.SimpleAction(name=action_name)
action.connect('activate', capture_menu_action, self._window, self._panel, tool)
self._window.add_action(action)
item = Gio.MenuItem.new(tool.name.replace('_', '__'), "win.%s" % action_name)
item.set_attribute_value("hidden-when", GLib.Variant.new_string("action-disabled"))
menu.append_item(item)
if tool.shortcut:
app = Gio.Application.get_default()
app.add_accelerator(tool.shortcut, "win.%s" % action_name, None)
def update(self):
self.remove()
self._insert_directory(self._library.tree, self._menu)
self.filter(self._window.get_active_document())
def filter_language(self, language, item):
if not item.languages:
return True
if not language and 'plain' in item.languages:
return True
if language and (language.get_id() in item.languages):
return True
else:
return False
def filter(self, document):
if document is None:
titled = False
remote = False
language = None
else:
titled = document.get_location() is not None
remote = not document.is_local()
language = document.get_language()
states = {
'always': True,
'all' : document is not None,
'local': titled and not remote,
'remote': titled and remote,
'titled': titled,
'untitled': not titled,
}
for name, tool in self._action_tools.items():
action = self._window.lookup_action(name)
if action:
action.set_enabled(states[tool.applicability] and
self.filter_language(language, tool))
# FIXME: restore the launch of the manager on configure using PeasGtk.Configurable
class WindowActivatable(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "ExternalToolsWindowActivatable"
window = GObject.property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
self._manager = None
self._manager_default_size = None
self.menu = None
def do_activate(self):
# Ugly hack... we need to get access to the activatable to update the menuitems
self.window._external_tools_window_activatable = self
self._library = ToolLibrary()
action = Gio.SimpleAction(name="manage_tools")
action.connect("activate", lambda action, parameter: self.open_dialog())
self.window.add_action(action)
self.gear_menu = self.extend_gear_menu("ext9")
item = Gio.MenuItem.new(_("Manage _External Tools..."), "win.manage_tools")
self.gear_menu.append_menu_item(item)
external_tools_submenu = Gio.Menu()
item = Gio.MenuItem.new_submenu(_("External _Tools"), external_tools_submenu)
self.gear_menu.append_menu_item(item)
external_tools_submenu_section = Gio.Menu()
external_tools_submenu.append_section(None, external_tools_submenu_section)
# Create output console
self._output_buffer = OutputPanel(self.plugin_info.get_data_dir(), self.window)
self.menu = ToolMenu(self._library, self.window, self._output_buffer, external_tools_submenu_section)
bottom = self.window.get_bottom_panel()
bottom.add_titled(self._output_buffer.panel, "GeditExternalToolsShellOutput", _("Tool Output"))
def do_update_state(self):
if self.menu is not None:
self.menu.filter(self.window.get_active_document())
def do_deactivate(self):
self.window._external_tools_window_activatable = None
self.menu.deactivate()
self.window.remove_action("manage_tools")
bottom = self.window.get_bottom_panel()
bottom.remove(self._output_buffer.panel)
def open_dialog(self):
if not self._manager:
self._manager = Manager(self.plugin_info.get_data_dir())
if self._manager_default_size:
self._manager.dialog.set_default_size(*self._manager_default_size)
self._manager.dialog.connect('destroy', self.on_manager_destroy)
self._manager.connect('tools-updated', self.on_manager_tools_updated)
window = Gio.Application.get_default().get_active_window()
self._manager.run(window)
return self._manager.dialog
def u | self, tool):
if self._manager:
self._manager.tool_changed(tool, True)
def on_manager_destroy(self, dialog):
self._manager_default_size = self._manager.get_final_size()
self._manager = None
def on_manager_tools_updated(self, manager):
for window in Gio.Application.get_default().get_windows():
window._external_tools_window_activatable.menu.update()
# ex:ts=4:et:
| pdate_manager( | identifier_name |
windowactivatable.py | # -*- coding: UTF-8 -*-
# Gedit External Tools plugin
# Copyright (C) 2005-2006 Steve Frécinaux <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__all__ = ('ExternalToolsPlugin', 'Manager', 'OutputPanel', 'Capture', 'UniqueById')
from gi.repository import GLib, Gio, GObject, Gtk, Gedit, PeasGtk
from .manager import Manager
from .library import ToolLibrary
from .outputpanel import OutputPanel
from .capture import Capture
from .functions import *
class ToolMenu(object):
def __init__(self, library, window, panel, menu):
super(ToolMenu, self).__init__()
self._library = library
self._window = window
self._panel = panel
self._menu = menu
self._action_tools = {}
self.update() |
def remove(self):
self._menu.remove_all()
for name, tool in self._action_tools.items():
self._window.remove_action(name)
if tool.shortcut:
app = Gio.Application.get_default()
app.remove_accelerator(tool.shortcut)
self._action_tools = {}
def _insert_directory(self, directory, menu):
for d in sorted(directory.subdirs, key=lambda x: x.name.lower()):
submenu = Gio.Menu()
menu.append_submenu(d.name.replace('_', '__'), submenu)
section = Gio.Menu()
submenu.append_section(None, section)
self._insert_directory(d, section)
for tool in sorted(directory.tools, key=lambda x: x.name.lower()):
action_name = 'external-tool_%X_%X' % (id(tool), id(tool.name))
self._action_tools[action_name] = tool
action = Gio.SimpleAction(name=action_name)
action.connect('activate', capture_menu_action, self._window, self._panel, tool)
self._window.add_action(action)
item = Gio.MenuItem.new(tool.name.replace('_', '__'), "win.%s" % action_name)
item.set_attribute_value("hidden-when", GLib.Variant.new_string("action-disabled"))
menu.append_item(item)
if tool.shortcut:
app = Gio.Application.get_default()
app.add_accelerator(tool.shortcut, "win.%s" % action_name, None)
def update(self):
self.remove()
self._insert_directory(self._library.tree, self._menu)
self.filter(self._window.get_active_document())
def filter_language(self, language, item):
if not item.languages:
return True
if not language and 'plain' in item.languages:
return True
if language and (language.get_id() in item.languages):
return True
else:
return False
def filter(self, document):
if document is None:
titled = False
remote = False
language = None
else:
titled = document.get_location() is not None
remote = not document.is_local()
language = document.get_language()
states = {
'always': True,
'all' : document is not None,
'local': titled and not remote,
'remote': titled and remote,
'titled': titled,
'untitled': not titled,
}
for name, tool in self._action_tools.items():
action = self._window.lookup_action(name)
if action:
action.set_enabled(states[tool.applicability] and
self.filter_language(language, tool))
# FIXME: restore the launch of the manager on configure using PeasGtk.Configurable
class WindowActivatable(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "ExternalToolsWindowActivatable"
window = GObject.property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
self._manager = None
self._manager_default_size = None
self.menu = None
def do_activate(self):
# Ugly hack... we need to get access to the activatable to update the menuitems
self.window._external_tools_window_activatable = self
self._library = ToolLibrary()
action = Gio.SimpleAction(name="manage_tools")
action.connect("activate", lambda action, parameter: self.open_dialog())
self.window.add_action(action)
self.gear_menu = self.extend_gear_menu("ext9")
item = Gio.MenuItem.new(_("Manage _External Tools..."), "win.manage_tools")
self.gear_menu.append_menu_item(item)
external_tools_submenu = Gio.Menu()
item = Gio.MenuItem.new_submenu(_("External _Tools"), external_tools_submenu)
self.gear_menu.append_menu_item(item)
external_tools_submenu_section = Gio.Menu()
external_tools_submenu.append_section(None, external_tools_submenu_section)
# Create output console
self._output_buffer = OutputPanel(self.plugin_info.get_data_dir(), self.window)
self.menu = ToolMenu(self._library, self.window, self._output_buffer, external_tools_submenu_section)
bottom = self.window.get_bottom_panel()
bottom.add_titled(self._output_buffer.panel, "GeditExternalToolsShellOutput", _("Tool Output"))
def do_update_state(self):
if self.menu is not None:
self.menu.filter(self.window.get_active_document())
def do_deactivate(self):
self.window._external_tools_window_activatable = None
self.menu.deactivate()
self.window.remove_action("manage_tools")
bottom = self.window.get_bottom_panel()
bottom.remove(self._output_buffer.panel)
def open_dialog(self):
if not self._manager:
self._manager = Manager(self.plugin_info.get_data_dir())
if self._manager_default_size:
self._manager.dialog.set_default_size(*self._manager_default_size)
self._manager.dialog.connect('destroy', self.on_manager_destroy)
self._manager.connect('tools-updated', self.on_manager_tools_updated)
window = Gio.Application.get_default().get_active_window()
self._manager.run(window)
return self._manager.dialog
def update_manager(self, tool):
if self._manager:
self._manager.tool_changed(tool, True)
def on_manager_destroy(self, dialog):
self._manager_default_size = self._manager.get_final_size()
self._manager = None
def on_manager_tools_updated(self, manager):
for window in Gio.Application.get_default().get_windows():
window._external_tools_window_activatable.menu.update()
# ex:ts=4:et: |
def deactivate(self):
self.remove() | random_line_split |
windowactivatable.py | # -*- coding: UTF-8 -*-
# Gedit External Tools plugin
# Copyright (C) 2005-2006 Steve Frécinaux <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__all__ = ('ExternalToolsPlugin', 'Manager', 'OutputPanel', 'Capture', 'UniqueById')
from gi.repository import GLib, Gio, GObject, Gtk, Gedit, PeasGtk
from .manager import Manager
from .library import ToolLibrary
from .outputpanel import OutputPanel
from .capture import Capture
from .functions import *
class ToolMenu(object):
def __init__(self, library, window, panel, menu):
super(ToolMenu, self).__init__()
self._library = library
self._window = window
self._panel = panel
self._menu = menu
self._action_tools = {}
self.update()
def deactivate(self):
self.remove()
def remove(self):
self._menu.remove_all()
for name, tool in self._action_tools.items():
self._window.remove_action(name)
if tool.shortcut:
app = Gio.Application.get_default()
app.remove_accelerator(tool.shortcut)
self._action_tools = {}
def _insert_directory(self, directory, menu):
for d in sorted(directory.subdirs, key=lambda x: x.name.lower()):
submenu = Gio.Menu()
menu.append_submenu(d.name.replace('_', '__'), submenu)
section = Gio.Menu()
submenu.append_section(None, section)
self._insert_directory(d, section)
for tool in sorted(directory.tools, key=lambda x: x.name.lower()):
action_name = 'external-tool_%X_%X' % (id(tool), id(tool.name))
self._action_tools[action_name] = tool
action = Gio.SimpleAction(name=action_name)
action.connect('activate', capture_menu_action, self._window, self._panel, tool)
self._window.add_action(action)
item = Gio.MenuItem.new(tool.name.replace('_', '__'), "win.%s" % action_name)
item.set_attribute_value("hidden-when", GLib.Variant.new_string("action-disabled"))
menu.append_item(item)
if tool.shortcut:
app = Gio.Application.get_default()
app.add_accelerator(tool.shortcut, "win.%s" % action_name, None)
def update(self):
self.remove()
self._insert_directory(self._library.tree, self._menu)
self.filter(self._window.get_active_document())
def filter_language(self, language, item):
if not item.languages:
return True
if not language and 'plain' in item.languages:
return True
if language and (language.get_id() in item.languages):
return True
else:
return False
def filter(self, document):
i |
# FIXME: restore the launch of the manager on configure using PeasGtk.Configurable
class WindowActivatable(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "ExternalToolsWindowActivatable"
window = GObject.property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
self._manager = None
self._manager_default_size = None
self.menu = None
def do_activate(self):
# Ugly hack... we need to get access to the activatable to update the menuitems
self.window._external_tools_window_activatable = self
self._library = ToolLibrary()
action = Gio.SimpleAction(name="manage_tools")
action.connect("activate", lambda action, parameter: self.open_dialog())
self.window.add_action(action)
self.gear_menu = self.extend_gear_menu("ext9")
item = Gio.MenuItem.new(_("Manage _External Tools..."), "win.manage_tools")
self.gear_menu.append_menu_item(item)
external_tools_submenu = Gio.Menu()
item = Gio.MenuItem.new_submenu(_("External _Tools"), external_tools_submenu)
self.gear_menu.append_menu_item(item)
external_tools_submenu_section = Gio.Menu()
external_tools_submenu.append_section(None, external_tools_submenu_section)
# Create output console
self._output_buffer = OutputPanel(self.plugin_info.get_data_dir(), self.window)
self.menu = ToolMenu(self._library, self.window, self._output_buffer, external_tools_submenu_section)
bottom = self.window.get_bottom_panel()
bottom.add_titled(self._output_buffer.panel, "GeditExternalToolsShellOutput", _("Tool Output"))
def do_update_state(self):
if self.menu is not None:
self.menu.filter(self.window.get_active_document())
def do_deactivate(self):
self.window._external_tools_window_activatable = None
self.menu.deactivate()
self.window.remove_action("manage_tools")
bottom = self.window.get_bottom_panel()
bottom.remove(self._output_buffer.panel)
def open_dialog(self):
if not self._manager:
self._manager = Manager(self.plugin_info.get_data_dir())
if self._manager_default_size:
self._manager.dialog.set_default_size(*self._manager_default_size)
self._manager.dialog.connect('destroy', self.on_manager_destroy)
self._manager.connect('tools-updated', self.on_manager_tools_updated)
window = Gio.Application.get_default().get_active_window()
self._manager.run(window)
return self._manager.dialog
def update_manager(self, tool):
if self._manager:
self._manager.tool_changed(tool, True)
def on_manager_destroy(self, dialog):
self._manager_default_size = self._manager.get_final_size()
self._manager = None
def on_manager_tools_updated(self, manager):
for window in Gio.Application.get_default().get_windows():
window._external_tools_window_activatable.menu.update()
# ex:ts=4:et:
| f document is None:
titled = False
remote = False
language = None
else:
titled = document.get_location() is not None
remote = not document.is_local()
language = document.get_language()
states = {
'always': True,
'all' : document is not None,
'local': titled and not remote,
'remote': titled and remote,
'titled': titled,
'untitled': not titled,
}
for name, tool in self._action_tools.items():
action = self._window.lookup_action(name)
if action:
action.set_enabled(states[tool.applicability] and
self.filter_language(language, tool))
| identifier_body |
windowactivatable.py | # -*- coding: UTF-8 -*-
# Gedit External Tools plugin
# Copyright (C) 2005-2006 Steve Frécinaux <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__all__ = ('ExternalToolsPlugin', 'Manager', 'OutputPanel', 'Capture', 'UniqueById')
from gi.repository import GLib, Gio, GObject, Gtk, Gedit, PeasGtk
from .manager import Manager
from .library import ToolLibrary
from .outputpanel import OutputPanel
from .capture import Capture
from .functions import *
class ToolMenu(object):
def __init__(self, library, window, panel, menu):
super(ToolMenu, self).__init__()
self._library = library
self._window = window
self._panel = panel
self._menu = menu
self._action_tools = {}
self.update()
def deactivate(self):
self.remove()
def remove(self):
self._menu.remove_all()
for name, tool in self._action_tools.items():
self._window.remove_action(name)
if tool.shortcut:
app = Gio.Application.get_default()
app.remove_accelerator(tool.shortcut)
self._action_tools = {}
def _insert_directory(self, directory, menu):
for d in sorted(directory.subdirs, key=lambda x: x.name.lower()):
s |
for tool in sorted(directory.tools, key=lambda x: x.name.lower()):
action_name = 'external-tool_%X_%X' % (id(tool), id(tool.name))
self._action_tools[action_name] = tool
action = Gio.SimpleAction(name=action_name)
action.connect('activate', capture_menu_action, self._window, self._panel, tool)
self._window.add_action(action)
item = Gio.MenuItem.new(tool.name.replace('_', '__'), "win.%s" % action_name)
item.set_attribute_value("hidden-when", GLib.Variant.new_string("action-disabled"))
menu.append_item(item)
if tool.shortcut:
app = Gio.Application.get_default()
app.add_accelerator(tool.shortcut, "win.%s" % action_name, None)
def update(self):
self.remove()
self._insert_directory(self._library.tree, self._menu)
self.filter(self._window.get_active_document())
def filter_language(self, language, item):
if not item.languages:
return True
if not language and 'plain' in item.languages:
return True
if language and (language.get_id() in item.languages):
return True
else:
return False
def filter(self, document):
if document is None:
titled = False
remote = False
language = None
else:
titled = document.get_location() is not None
remote = not document.is_local()
language = document.get_language()
states = {
'always': True,
'all' : document is not None,
'local': titled and not remote,
'remote': titled and remote,
'titled': titled,
'untitled': not titled,
}
for name, tool in self._action_tools.items():
action = self._window.lookup_action(name)
if action:
action.set_enabled(states[tool.applicability] and
self.filter_language(language, tool))
# FIXME: restore the launch of the manager on configure using PeasGtk.Configurable
class WindowActivatable(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "ExternalToolsWindowActivatable"
window = GObject.property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
self._manager = None
self._manager_default_size = None
self.menu = None
def do_activate(self):
# Ugly hack... we need to get access to the activatable to update the menuitems
self.window._external_tools_window_activatable = self
self._library = ToolLibrary()
action = Gio.SimpleAction(name="manage_tools")
action.connect("activate", lambda action, parameter: self.open_dialog())
self.window.add_action(action)
self.gear_menu = self.extend_gear_menu("ext9")
item = Gio.MenuItem.new(_("Manage _External Tools..."), "win.manage_tools")
self.gear_menu.append_menu_item(item)
external_tools_submenu = Gio.Menu()
item = Gio.MenuItem.new_submenu(_("External _Tools"), external_tools_submenu)
self.gear_menu.append_menu_item(item)
external_tools_submenu_section = Gio.Menu()
external_tools_submenu.append_section(None, external_tools_submenu_section)
# Create output console
self._output_buffer = OutputPanel(self.plugin_info.get_data_dir(), self.window)
self.menu = ToolMenu(self._library, self.window, self._output_buffer, external_tools_submenu_section)
bottom = self.window.get_bottom_panel()
bottom.add_titled(self._output_buffer.panel, "GeditExternalToolsShellOutput", _("Tool Output"))
def do_update_state(self):
if self.menu is not None:
self.menu.filter(self.window.get_active_document())
def do_deactivate(self):
self.window._external_tools_window_activatable = None
self.menu.deactivate()
self.window.remove_action("manage_tools")
bottom = self.window.get_bottom_panel()
bottom.remove(self._output_buffer.panel)
def open_dialog(self):
if not self._manager:
self._manager = Manager(self.plugin_info.get_data_dir())
if self._manager_default_size:
self._manager.dialog.set_default_size(*self._manager_default_size)
self._manager.dialog.connect('destroy', self.on_manager_destroy)
self._manager.connect('tools-updated', self.on_manager_tools_updated)
window = Gio.Application.get_default().get_active_window()
self._manager.run(window)
return self._manager.dialog
def update_manager(self, tool):
if self._manager:
self._manager.tool_changed(tool, True)
def on_manager_destroy(self, dialog):
self._manager_default_size = self._manager.get_final_size()
self._manager = None
def on_manager_tools_updated(self, manager):
for window in Gio.Application.get_default().get_windows():
window._external_tools_window_activatable.menu.update()
# ex:ts=4:et:
| ubmenu = Gio.Menu()
menu.append_submenu(d.name.replace('_', '__'), submenu)
section = Gio.Menu()
submenu.append_section(None, section)
self._insert_directory(d, section)
| conditional_block |
macro_parser.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This is an Earley-like parser, without support for in-grammar nonterminals,
//! only by calling out to the main rust parser for named nonterminals (which it
//! commits to fully when it hits one in a grammar). This means that there are no
//! completer or predictor rules, and therefore no need to store one column per
//! token: instead, there's a set of current Earley items and a set of next
//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
//! pathological cases, is worse than traditional Earley parsing, but it's an
//! easier fit for Macro-by-Example-style rules, and I think the overhead is
//! lower. (In order to prevent the pathological case, we'd need to lazily
//! construct the resulting `NamedMatch`es at the very end. It'd be a pain,
//! and require more memory to keep around old items, but it would also save
//! overhead)
//!
//! Quick intro to how the parser works:
//!
//! A 'position' is a dot in the middle of a matcher, usually represented as a
//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
//!
//! The parser walks through the input a character at a time, maintaining a list
//! of items consistent with the current position in the input string: `cur_eis`.
//!
//! As it processes them, it fills up `eof_eis` with items that would be valid if
//! the macro invocation is now over, `bb_eis` with items that are waiting on
//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
//! on the a particular token. Most of the logic concerns moving the · through the
//! repetitions indicated by Kleene stars. It only advances or calls out to the
//! real Rust parser when no `cur_eis` items remain
//!
//! Example: Start parsing `a a a a b` against [· a $( a )* a b].
//!
//! Remaining input: `a a a a b`
//! next_eis: [· a $( a )* a b]
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a a b`
//! cur: [a · $( a )* a b]
//! Descend/Skip (first item).
//! next: [a $( · a )* a b] [a $( a )* · a b].
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: usize) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: | erTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures
}
&TtDelimited(_, ref delim) => {
count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
Box::new(MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
})
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
&TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
panic!(p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
bind_name)))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
Error(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(Box::new(MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
}));
}
TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (!bb_eis.is_empty() && !next_eis.is_empty())
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
format!("{} ('{}')", name, bind)
}
_ => panic!()
} }).collect::<Vec<String>>().join(" or ");
return Error(sp, format!(
"local ambiguity: multiple parsing options: \
built-in NTs {} or {} other options.",
nts, next_eis.len()).to_string());
} else if bb_eis.is_empty() && next_eis.is_empty() {
return Failure(sp, format!("no rules expected the token `{}`",
pprust::token_to_string(&tok)).to_string());
} else if !next_eis.is_empty() {
/* Now process the next token */
while !next_eis.is_empty() {
cur_eis.push(next_eis.pop().unwrap());
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
let mut rust_parser = Parser::new(sess, cfg.clone(), Box::new(rdr.clone()));
let mut ei = bb_eis.pop().unwrap();
match ei.top_elts.get_tt(ei.idx) {
TtToken(span, MatchNt(_, ident, _, _)) => {
let match_cur = ei.match_cur;
(&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal(
parse_nt(&mut rust_parser, span, &ident.name.as_str()))));
ei.idx += 1;
ei.match_cur += 1;
}
_ => panic!()
}
cur_eis.push(ei);
for _ in 0..rust_parser.tokens_consumed {
let _ = rdr.next_token();
}
}
}
assert!(!cur_eis.is_empty());
}
}
pub fn parse_nt(p: &mut Parser, sp: Span, name: &str) -> Nonterminal {
match name {
"tt" => {
p.quote_depth += 1; //but in theory, non-quoted tts might be useful
let res = token::NtTT(P(panictry!(p.parse_token_tree())));
p.quote_depth -= 1;
return res;
}
_ => {}
}
// check at the beginning and the parser checks after each bump
panictry!(p.check_unknown_macro_variable());
match name {
"item" => match p.parse_item() {
Some(i) => token::NtItem(i),
None => panic!(p.fatal("expected an item keyword"))
},
"block" => token::NtBlock(panictry!(p.parse_block())),
"stmt" => match p.parse_stmt() {
Some(s) => token::NtStmt(s),
None => panic!(p.fatal("expected a statement"))
},
"pat" => token::NtPat(p.parse_pat()),
"expr" => token::NtExpr(p.parse_expr()),
"ty" => token::NtTy(p.parse_ty()),
// this could be handled like a token, since it is one
"ident" => match p.token {
token::Ident(sn,b) => { panictry!(p.bump()); token::NtIdent(Box::new(sn),b) }
_ => {
let token_str = pprust::token_to_string(&p.token);
panic!(p.fatal(&format!("expected ident, found {}",
&token_str[..])))
}
},
"path" => {
token::NtPath(Box::new(panictry!(p.parse_path(LifetimeAndTypesWithoutColons))))
}
"meta" => token::NtMeta(p.parse_meta_item()),
_ => {
panic!(p.span_fatal_help(sp,
&format!("invalid fragment specifier `{}`", name),
"valid fragment specifiers are `ident`, `block`, \
`stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \
and `item`"))
}
}
}
| Vec<Match | identifier_name |
macro_parser.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This is an Earley-like parser, without support for in-grammar nonterminals,
//! only by calling out to the main rust parser for named nonterminals (which it
//! commits to fully when it hits one in a grammar). This means that there are no
//! completer or predictor rules, and therefore no need to store one column per
//! token: instead, there's a set of current Earley items and a set of next
//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
//! pathological cases, is worse than traditional Earley parsing, but it's an
//! easier fit for Macro-by-Example-style rules, and I think the overhead is
//! lower. (In order to prevent the pathological case, we'd need to lazily
//! construct the resulting `NamedMatch`es at the very end. It'd be a pain,
//! and require more memory to keep around old items, but it would also save
//! overhead)
//!
//! Quick intro to how the parser works:
//!
//! A 'position' is a dot in the middle of a matcher, usually represented as a
//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
//!
//! The parser walks through the input a character at a time, maintaining a list
//! of items consistent with the current position in the input string: `cur_eis`.
//!
//! As it processes them, it fills up `eof_eis` with items that would be valid if
//! the macro invocation is now over, `bb_eis` with items that are waiting on
//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
//! on the a particular token. Most of the logic concerns moving the · through the
//! repetitions indicated by Kleene stars. It only advances or calls out to the
//! real Rust parser when no `cur_eis` items remain
//!
//! Example: Start parsing `a a a a b` against [· a $( a )* a b].
//!
//! Remaining input: `a a a a b`
//! next_eis: [· a $( a )* a b]
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a a b`
//! cur: [a · $( a )* a b]
//! Descend/Skip (first item).
//! next: [a $( · a )* a b] [a $( a )* · a b].
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: usize) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: Vec<MatcherTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures
}
&TtDelimited(_, ref delim) => {
count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
Box::new(MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
})
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
| HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
Error(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(Box::new(MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
}));
}
TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (!bb_eis.is_empty() && !next_eis.is_empty())
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
format!("{} ('{}')", name, bind)
}
_ => panic!()
} }).collect::<Vec<String>>().join(" or ");
return Error(sp, format!(
"local ambiguity: multiple parsing options: \
built-in NTs {} or {} other options.",
nts, next_eis.len()).to_string());
} else if bb_eis.is_empty() && next_eis.is_empty() {
return Failure(sp, format!("no rules expected the token `{}`",
pprust::token_to_string(&tok)).to_string());
} else if !next_eis.is_empty() {
/* Now process the next token */
while !next_eis.is_empty() {
cur_eis.push(next_eis.pop().unwrap());
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
let mut rust_parser = Parser::new(sess, cfg.clone(), Box::new(rdr.clone()));
let mut ei = bb_eis.pop().unwrap();
match ei.top_elts.get_tt(ei.idx) {
TtToken(span, MatchNt(_, ident, _, _)) => {
let match_cur = ei.match_cur;
(&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal(
parse_nt(&mut rust_parser, span, &ident.name.as_str()))));
ei.idx += 1;
ei.match_cur += 1;
}
_ => panic!()
}
cur_eis.push(ei);
for _ in 0..rust_parser.tokens_consumed {
let _ = rdr.next_token();
}
}
}
assert!(!cur_eis.is_empty());
}
}
pub fn parse_nt(p: &mut Parser, sp: Span, name: &str) -> Nonterminal {
match name {
"tt" => {
p.quote_depth += 1; //but in theory, non-quoted tts might be useful
let res = token::NtTT(P(panictry!(p.parse_token_tree())));
p.quote_depth -= 1;
return res;
}
_ => {}
}
// check at the beginning and the parser checks after each bump
panictry!(p.check_unknown_macro_variable());
match name {
"item" => match p.parse_item() {
Some(i) => token::NtItem(i),
None => panic!(p.fatal("expected an item keyword"))
},
"block" => token::NtBlock(panictry!(p.parse_block())),
"stmt" => match p.parse_stmt() {
Some(s) => token::NtStmt(s),
None => panic!(p.fatal("expected a statement"))
},
"pat" => token::NtPat(p.parse_pat()),
"expr" => token::NtExpr(p.parse_expr()),
"ty" => token::NtTy(p.parse_ty()),
// this could be handled like a token, since it is one
"ident" => match p.token {
token::Ident(sn,b) => { panictry!(p.bump()); token::NtIdent(Box::new(sn),b) }
_ => {
let token_str = pprust::token_to_string(&p.token);
panic!(p.fatal(&format!("expected ident, found {}",
&token_str[..])))
}
},
"path" => {
token::NtPath(Box::new(panictry!(p.parse_path(LifetimeAndTypesWithoutColons))))
}
"meta" => token::NtMeta(p.parse_meta_item()),
_ => {
panic!(p.span_fatal_help(sp,
&format!("invalid fragment specifier `{}`", name),
"valid fragment specifiers are `ident`, `block`, \
`stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \
and `item`"))
}
}
}
| &TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
panic!(p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
bind_name)))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = | identifier_body |
macro_parser.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This is an Earley-like parser, without support for in-grammar nonterminals,
//! only by calling out to the main rust parser for named nonterminals (which it
//! commits to fully when it hits one in a grammar). This means that there are no
//! completer or predictor rules, and therefore no need to store one column per
//! token: instead, there's a set of current Earley items and a set of next
//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
//! pathological cases, is worse than traditional Earley parsing, but it's an
//! easier fit for Macro-by-Example-style rules, and I think the overhead is
//! lower. (In order to prevent the pathological case, we'd need to lazily
//! construct the resulting `NamedMatch`es at the very end. It'd be a pain,
//! and require more memory to keep around old items, but it would also save
//! overhead)
//!
//! Quick intro to how the parser works:
//!
//! A 'position' is a dot in the middle of a matcher, usually represented as a
//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
//!
//! The parser walks through the input a character at a time, maintaining a list
//! of items consistent with the current position in the input string: `cur_eis`.
//!
//! As it processes them, it fills up `eof_eis` with items that would be valid if
//! the macro invocation is now over, `bb_eis` with items that are waiting on
//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
//! on the a particular token. Most of the logic concerns moving the · through the
//! repetitions indicated by Kleene stars. It only advances or calls out to the
//! real Rust parser when no `cur_eis` items remain
//!
//! Example: Start parsing `a a a a b` against [· a $( a )* a b].
//!
//! Remaining input: `a a a a b`
//! next_eis: [· a $( a )* a b]
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a a b`
//! cur: [a · $( a )* a b]
//! Descend/Skip (first item).
//! next: [a $( · a )* a b] [a $( a )* · a b].
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: usize) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: Vec<MatcherTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures
}
&TtDelimited(_, ref delim) => {
count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
Box::new(MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
})
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
&TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
panic!(p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
bind_name)))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
Error(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(Box::new(MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
}));
}
TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (!bb_eis.is_empty() && !next_eis.is_empty())
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
format!("{} ('{}')", name, bind)
}
_ => panic!()
} }).collect::<Vec<String>>().join(" or ");
return Error(sp, format!(
"local ambiguity: multiple parsing options: \
built-in NTs {} or {} other options.",
nts, next_eis.len()).to_string());
} else if bb_eis.is_empty() && next_eis.is_empty() {
return Failure(sp, format!("no rules expected the token `{}`",
pprust::token_to_string(&tok)).to_string());
} else if !next_eis.is_empty() {
/* Now process the next token */
while !next_eis.is_empty() {
cur_eis.push(next_eis.pop().unwrap());
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
let mut rust_parser = Parser::new(sess, cfg.clone(), Box::new(rdr.clone()));
let mut ei = bb_eis.pop().unwrap();
match ei.top_elts.get_tt(ei.idx) {
TtToken(span, MatchNt(_, ident, _, _)) => {
let match_cur = ei.match_cur;
(&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal(
parse_nt(&mut rust_parser, span, &ident.name.as_str()))));
ei.idx += 1;
ei.match_cur += 1;
}
_ => panic!()
}
cur_eis.push(ei);
for _ in 0..rust_parser.tokens_consumed {
let _ = rdr.next_token();
}
}
}
assert!(!cur_eis.is_empty());
}
}
pub fn parse_nt(p: &mut Parser, sp: Span, name: &str) -> Nonterminal {
match name {
"tt" => {
p.quote_depth += 1; //but in theory, non-quoted tts might be useful
let res = token::NtTT(P(panictry!(p.parse_token_tree())));
p.quote_depth -= 1;
return res;
}
_ => {}
}
// check at the beginning and the parser checks after each bump
panictry!(p.check_unknown_macro_variable());
match name {
"item" => match p.parse_item() {
Some(i) => token::NtItem(i),
None => panic!(p.fatal("expected an item keyword"))
},
"block" => token::NtBlock(panictry!(p.parse_block())),
"stmt" => match p.parse_stmt() {
Some(s) => token::NtStmt(s),
None => panic!(p.fatal("expected a statement"))
},
"pat" => token::NtPat(p.parse_pat()),
"expr" => token::NtExpr(p.parse_expr()),
"ty" => token::NtTy(p.parse_ty()),
// this could be handled like a token, since it is one
"ident" => match p.token {
token::Ident(sn,b) => { panictry!(p.bump()); token::NtIdent(Box::new(sn),b) }
_ => {
let token_str = pprust::token_to_string(&p.token);
panic!(p.fatal(&format!("expected ident, found {}",
&token_str[..])))
}
},
"path" => {
token::NtPath | :NtMeta(p.parse_meta_item()),
_ => {
panic!(p.span_fatal_help(sp,
&format!("invalid fragment specifier `{}`", name),
"valid fragment specifiers are `ident`, `block`, \
`stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \
and `item`"))
}
}
}
| (Box::new(panictry!(p.parse_path(LifetimeAndTypesWithoutColons))))
}
"meta" => token: | conditional_block |
macro_parser.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This is an Earley-like parser, without support for in-grammar nonterminals,
//! only by calling out to the main rust parser for named nonterminals (which it
//! commits to fully when it hits one in a grammar). This means that there are no
//! completer or predictor rules, and therefore no need to store one column per
//! token: instead, there's a set of current Earley items and a set of next
//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
//! pathological cases, is worse than traditional Earley parsing, but it's an
//! easier fit for Macro-by-Example-style rules, and I think the overhead is
//! lower. (In order to prevent the pathological case, we'd need to lazily
//! construct the resulting `NamedMatch`es at the very end. It'd be a pain,
//! and require more memory to keep around old items, but it would also save
//! overhead)
//!
//! Quick intro to how the parser works:
//!
//! A 'position' is a dot in the middle of a matcher, usually represented as a
//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
//!
//! The parser walks through the input a character at a time, maintaining a list
//! of items consistent with the current position in the input string: `cur_eis`.
//!
//! As it processes them, it fills up `eof_eis` with items that would be valid if
//! the macro invocation is now over, `bb_eis` with items that are waiting on
//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
//! on the a particular token. Most of the logic concerns moving the · through the
//! repetitions indicated by Kleene stars. It only advances or calls out to the
//! real Rust parser when no `cur_eis` items remain
//!
//! Example: Start parsing `a a a a b` against [· a $( a )* a b].
//!
//! Remaining input: `a a a a b`
//! next_eis: [· a $( a )* a b]
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a a b`
//! cur: [a · $( a )* a b]
//! Descend/Skip (first item).
//! next: [a $( · a )* a b] [a $( a )* · a b].
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: usize) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: Vec<MatcherTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures
}
&TtDelimited(_, ref delim) => {
count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
Box::new(MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
})
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
&TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
panic!(p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
bind_name)))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
Error(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(Box::new(MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
}));
} | TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (!bb_eis.is_empty() && !next_eis.is_empty())
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
format!("{} ('{}')", name, bind)
}
_ => panic!()
} }).collect::<Vec<String>>().join(" or ");
return Error(sp, format!(
"local ambiguity: multiple parsing options: \
built-in NTs {} or {} other options.",
nts, next_eis.len()).to_string());
} else if bb_eis.is_empty() && next_eis.is_empty() {
return Failure(sp, format!("no rules expected the token `{}`",
pprust::token_to_string(&tok)).to_string());
} else if !next_eis.is_empty() {
/* Now process the next token */
while !next_eis.is_empty() {
cur_eis.push(next_eis.pop().unwrap());
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
let mut rust_parser = Parser::new(sess, cfg.clone(), Box::new(rdr.clone()));
let mut ei = bb_eis.pop().unwrap();
match ei.top_elts.get_tt(ei.idx) {
TtToken(span, MatchNt(_, ident, _, _)) => {
let match_cur = ei.match_cur;
(&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal(
parse_nt(&mut rust_parser, span, &ident.name.as_str()))));
ei.idx += 1;
ei.match_cur += 1;
}
_ => panic!()
}
cur_eis.push(ei);
for _ in 0..rust_parser.tokens_consumed {
let _ = rdr.next_token();
}
}
}
assert!(!cur_eis.is_empty());
}
}
pub fn parse_nt(p: &mut Parser, sp: Span, name: &str) -> Nonterminal {
match name {
"tt" => {
p.quote_depth += 1; //but in theory, non-quoted tts might be useful
let res = token::NtTT(P(panictry!(p.parse_token_tree())));
p.quote_depth -= 1;
return res;
}
_ => {}
}
// check at the beginning and the parser checks after each bump
panictry!(p.check_unknown_macro_variable());
match name {
"item" => match p.parse_item() {
Some(i) => token::NtItem(i),
None => panic!(p.fatal("expected an item keyword"))
},
"block" => token::NtBlock(panictry!(p.parse_block())),
"stmt" => match p.parse_stmt() {
Some(s) => token::NtStmt(s),
None => panic!(p.fatal("expected a statement"))
},
"pat" => token::NtPat(p.parse_pat()),
"expr" => token::NtExpr(p.parse_expr()),
"ty" => token::NtTy(p.parse_ty()),
// this could be handled like a token, since it is one
"ident" => match p.token {
token::Ident(sn,b) => { panictry!(p.bump()); token::NtIdent(Box::new(sn),b) }
_ => {
let token_str = pprust::token_to_string(&p.token);
panic!(p.fatal(&format!("expected ident, found {}",
&token_str[..])))
}
},
"path" => {
token::NtPath(Box::new(panictry!(p.parse_path(LifetimeAndTypesWithoutColons))))
}
"meta" => token::NtMeta(p.parse_meta_item()),
_ => {
panic!(p.span_fatal_help(sp,
&format!("invalid fragment specifier `{}`", name),
"valid fragment specifiers are `ident`, `block`, \
`stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \
and `item`"))
}
}
} | random_line_split |
|
flo2_pid.py | #!/usr/bin/env python
#--------------------------------------------------------------------------
# flo2_pid.py
# Rick Kauffman a.k.a. Chewie
#
# Hewlett Packard Company Revision: 1.0
# ~~~~~~~~~ WookieWare ~~~~~~~~~~~~~
# Change history....09/03/2014
#
#
##--------------------------------------------------------------------------
# Initial release - Pulls VARS from webform.
# build a database of all dpids not in glarn
# Calls glarn chooser deletes dpids
#
#
#------Might not need this but please they are handy------------------------
#
# Do the imports!!!!
#----------------------If you dont have it use "apt-get install (name)"
import sys
import subprocess
import cgi
import cgitb; cgitb.enable()
import hpsdnclient as hp
import sqlite3
import requests
from requests.auth import HTTPDigestAuth
import xml.etree.ElementTree as xml
# import pdb; pdb.set_trace()
#-------------------------------------------------------------------------
# Get the field VARS from the calling HTML form
#-------------------------------------------------------------------------
| imc_user = form.getvalue('imc_user')
imc_passw = form.getvalue('imc_passw')
pid_list = form.getvalue('list_o_pids')
imc = form.getvalue('imc')
if pid_list == None:
print "Content-type:text/html\r\n\r\n"
print "<!DOCTYPE html>"
print "<html>"
print "<head>"
print "<title> Wookieware.com</title>"
print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
print "</head>"
print "<body>"
print "<h1> <img src=\"../../images/glarn.png\" width=\"50\" height=\"50\">glarn: The dpid database</h1>"
print "<HR> "
print "<h1> No items selected</h1>"
print "<FORM method='post' ACTION=\"./pid_main.py\">"
print "<h3> List is empty </h3>"
print "<p> Click button below to go back to the system chooser</p>"
print "<hr>"
print "<input type=\"submit\" style=\"font-face: 'Arial'; font-size: larger; color: black; background-color: #0066FF; border: 3pt ridge lightgrey\" value=\" Main Menu\">"
print "<input type=\"hidden\" name=\"server\" value=%s>" % (server)
print "<input type=\"hidden\" name=\"user\" value=%s>" % (user)
print "<input type=\"hidden\" name=\"passw\" value=%s>" % (passw)
print "<input type=\"hidden\" name=\"imc_server\" value=%s>" % (imc_server)
print "<input type=\"hidden\" name=\"imc_user\" value=%s>" % (imc_user)
print "<input type=\"hidden\" name=\"imc_passw\" value=%s>" % (imc_passw)
print "<input type=\"hidden\" name=\"imc\" value=%s>" % (imc)
print "<p>For more information on how to use this application <a href=\"/faq.html\">User Guide</a></p>"
print "<center><font face=\"Arial\" size=\"1\">SDN Solutions From WookieWare 2014</font></center>"
print "</body>"
print "</html>"
#glarn.close()
sys.exit()
x = len(pid_list) # Keep track of how many items we need to process
# Check to see if anything was chozen. If x is zero goto Nothing Selected page and exit
j = 0
#Create authorization Token for the SDN controller
auth = hp.XAuthToken(user=user,password=passw,server=server)
api=hp.Api(controller=server,auth=auth)
#--------------------------------------------------------------------------
# dpid factory: Break up dpis and match to vendor to determin MAC address
#---------------------------------------------------------------------------
print "Content-type:text/html\r\n\r\n"
print "<!DOCTYPE html>"
print "<html>"
print "<head>"
print "<title> Wookieware.com</title>"
print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
print "</head>"
print "<body>"
print "<h1> <img src=\"../../images/glarn.png\" width=\"50\" height=\"50\">glarn: The dpid database</h1>"
print "<HR> "
print "<h3> Dpid flows display</h3>"
print "<p>List of current flows by dpid"
print "<FORM method='post' ACTION=\"./pid_main.py\">"
# Delete records in database
if x == 23:#only one entry (dpids are 23 chars long)
try:
flows = api.get_flows(pid_list)
print "<h1>Flows for dpid %s:</h1>" % (pid_list)
print "<table border=\"1\" cellpadding=\"3\" class=\"TFtable\">"
print "<tr> <td> Host MAC address </td> <td> Destination MAC address </td> <td> Output Port </td> </tr>"
for f in flows:
eth_src = f.match.eth_src
eth_dst = f.match.eth_dst
action = f.actions.output
print "<tr> <td> %s </td> <td> %s </td> <td> %s </td> </tr>" % (eth_src, eth_dst, action)
print "</table>"
except:
print "<h1>Error getting dpid information %s</h1>" % (pid_list)
elif j == 0:
for i in pid_list:
flows = api.get_flows(i)
print "<h1>Flows for dpid %s:</h1>" % (i)
print "<table border=\"1\" cellpadding=\"3\" class=\"TFtable\">"
print "<tr> <td> Host MAC address </td> <td> Destination MAC address </td> <td> Output Port </td> </tr>"
for f in flows:
eth_src = f.match.eth_src
eth_dst = f.match.eth_dst
action = f.actions.output
print "<tr> <td> %s </td> <td> %s </td> <td> %s </td> </tr>" % (eth_src, eth_dst, action)
print "</table>"
#--------------------------------------------------------------------------
# Finish manual or go home
#---------------------------------------------------------------------------
#print "Content-type:text/html\r\n\r\n"
#print "<!DOCTYPE html>"
#print "<html>"
#print "<head>"
#print "<title> Wookieware.com</title>"
#print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
#print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
#print "</head>"
print "<HR>"
print "<input type=\"submit\" style=\"font-face: 'Arial'; font-size: larger; color: black; background-color: #0066FF; border: 3pt ridge lightgrey\" value=\" Main Menu\">"
print "<input type=\"hidden\" name=\"server\" value=%s>" % (server)
print "<input type=\"hidden\" name=\"user\" value=%s>" % (user)
print "<input type=\"hidden\" name=\"passw\" value=%s>" % (passw)
print "<input type=\"hidden\" name=\"imc_server\" value=%s>" % (imc_server)
print "<input type=\"hidden\" name=\"imc_user\" value=%s>" % (imc_user)
print "<input type=\"hidden\" name=\"imc_passw\" value=%s>" % (imc_passw)
print "<input type=\"hidden\" name=\"imc\" value=%s>" % (imc)
print "</form>"
print "<footer>"
print "<p>For more information on how to use this application <a href=\"/faq.html\">User Guide</a></p>"
print "<a href=\"/index.html\">BACK</a>"
print "<center><font face=\"Arial\" size=\"1\">SDN Solutions From WookieWare 2014</font></center>"
print "</footer>"
print "</body>"
print "</html>"
sys.exit() | form = cgi.FieldStorage()
server = form.getvalue('server')
user = form.getvalue('user')
passw = form.getvalue('passw')
imc_server = form.getvalue('imc_server') | random_line_split |
flo2_pid.py | #!/usr/bin/env python
#--------------------------------------------------------------------------
# flo2_pid.py
# Rick Kauffman a.k.a. Chewie
#
# Hewlett Packard Company Revision: 1.0
# ~~~~~~~~~ WookieWare ~~~~~~~~~~~~~
# Change history....09/03/2014
#
#
##--------------------------------------------------------------------------
# Initial release - Pulls VARS from webform.
# build a database of all dpids not in glarn
# Calls glarn chooser deletes dpids
#
#
#------Might not need this but please they are handy------------------------
#
# Do the imports!!!!
#----------------------If you dont have it use "apt-get install (name)"
import sys
import subprocess
import cgi
import cgitb; cgitb.enable()
import hpsdnclient as hp
import sqlite3
import requests
from requests.auth import HTTPDigestAuth
import xml.etree.ElementTree as xml
# import pdb; pdb.set_trace()
#-------------------------------------------------------------------------
# Get the field VARS from the calling HTML form
#-------------------------------------------------------------------------
form = cgi.FieldStorage()
server = form.getvalue('server')
user = form.getvalue('user')
passw = form.getvalue('passw')
imc_server = form.getvalue('imc_server')
imc_user = form.getvalue('imc_user')
imc_passw = form.getvalue('imc_passw')
pid_list = form.getvalue('list_o_pids')
imc = form.getvalue('imc')
if pid_list == None:
print "Content-type:text/html\r\n\r\n"
print "<!DOCTYPE html>"
print "<html>"
print "<head>"
print "<title> Wookieware.com</title>"
print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
print "</head>"
print "<body>"
print "<h1> <img src=\"../../images/glarn.png\" width=\"50\" height=\"50\">glarn: The dpid database</h1>"
print "<HR> "
print "<h1> No items selected</h1>"
print "<FORM method='post' ACTION=\"./pid_main.py\">"
print "<h3> List is empty </h3>"
print "<p> Click button below to go back to the system chooser</p>"
print "<hr>"
print "<input type=\"submit\" style=\"font-face: 'Arial'; font-size: larger; color: black; background-color: #0066FF; border: 3pt ridge lightgrey\" value=\" Main Menu\">"
print "<input type=\"hidden\" name=\"server\" value=%s>" % (server)
print "<input type=\"hidden\" name=\"user\" value=%s>" % (user)
print "<input type=\"hidden\" name=\"passw\" value=%s>" % (passw)
print "<input type=\"hidden\" name=\"imc_server\" value=%s>" % (imc_server)
print "<input type=\"hidden\" name=\"imc_user\" value=%s>" % (imc_user)
print "<input type=\"hidden\" name=\"imc_passw\" value=%s>" % (imc_passw)
print "<input type=\"hidden\" name=\"imc\" value=%s>" % (imc)
print "<p>For more information on how to use this application <a href=\"/faq.html\">User Guide</a></p>"
print "<center><font face=\"Arial\" size=\"1\">SDN Solutions From WookieWare 2014</font></center>"
print "</body>"
print "</html>"
#glarn.close()
sys.exit()
x = len(pid_list) # Keep track of how many items we need to process
# Check to see if anything was chozen. If x is zero goto Nothing Selected page and exit
j = 0
#Create authorization Token for the SDN controller
auth = hp.XAuthToken(user=user,password=passw,server=server)
api=hp.Api(controller=server,auth=auth)
#--------------------------------------------------------------------------
# dpid factory: Break up dpis and match to vendor to determin MAC address
#---------------------------------------------------------------------------
print "Content-type:text/html\r\n\r\n"
print "<!DOCTYPE html>"
print "<html>"
print "<head>"
print "<title> Wookieware.com</title>"
print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
print "</head>"
print "<body>"
print "<h1> <img src=\"../../images/glarn.png\" width=\"50\" height=\"50\">glarn: The dpid database</h1>"
print "<HR> "
print "<h3> Dpid flows display</h3>"
print "<p>List of current flows by dpid"
print "<FORM method='post' ACTION=\"./pid_main.py\">"
# Delete records in database
if x == 23:#only one entry (dpids are 23 chars long)
try:
flows = api.get_flows(pid_list)
print "<h1>Flows for dpid %s:</h1>" % (pid_list)
print "<table border=\"1\" cellpadding=\"3\" class=\"TFtable\">"
print "<tr> <td> Host MAC address </td> <td> Destination MAC address </td> <td> Output Port </td> </tr>"
for f in flows:
eth_src = f.match.eth_src
eth_dst = f.match.eth_dst
action = f.actions.output
print "<tr> <td> %s </td> <td> %s </td> <td> %s </td> </tr>" % (eth_src, eth_dst, action)
print "</table>"
except:
print "<h1>Error getting dpid information %s</h1>" % (pid_list)
elif j == 0:
for i in pid_list:
flows = api.get_flows(i)
print "<h1>Flows for dpid %s:</h1>" % (i)
print "<table border=\"1\" cellpadding=\"3\" class=\"TFtable\">"
print "<tr> <td> Host MAC address </td> <td> Destination MAC address </td> <td> Output Port </td> </tr>"
for f in flows:
|
print "</table>"
#--------------------------------------------------------------------------
# Finish manual or go home
#---------------------------------------------------------------------------
#print "Content-type:text/html\r\n\r\n"
#print "<!DOCTYPE html>"
#print "<html>"
#print "<head>"
#print "<title> Wookieware.com</title>"
#print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
#print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
#print "</head>"
print "<HR>"
print "<input type=\"submit\" style=\"font-face: 'Arial'; font-size: larger; color: black; background-color: #0066FF; border: 3pt ridge lightgrey\" value=\" Main Menu\">"
print "<input type=\"hidden\" name=\"server\" value=%s>" % (server)
print "<input type=\"hidden\" name=\"user\" value=%s>" % (user)
print "<input type=\"hidden\" name=\"passw\" value=%s>" % (passw)
print "<input type=\"hidden\" name=\"imc_server\" value=%s>" % (imc_server)
print "<input type=\"hidden\" name=\"imc_user\" value=%s>" % (imc_user)
print "<input type=\"hidden\" name=\"imc_passw\" value=%s>" % (imc_passw)
print "<input type=\"hidden\" name=\"imc\" value=%s>" % (imc)
print "</form>"
print "<footer>"
print "<p>For more information on how to use this application <a href=\"/faq.html\">User Guide</a></p>"
print "<a href=\"/index.html\">BACK</a>"
print "<center><font face=\"Arial\" size=\"1\">SDN Solutions From WookieWare 2014</font></center>"
print "</footer>"
print "</body>"
print "</html>"
sys.exit()
| eth_src = f.match.eth_src
eth_dst = f.match.eth_dst
action = f.actions.output
print "<tr> <td> %s </td> <td> %s </td> <td> %s </td> </tr>" % (eth_src, eth_dst, action) | conditional_block |
stamplay-js-sdk-tests.ts | Stamplay.init('sample');
const userFn = Stamplay.User();
const user = new userFn.Model();
const colTags = Stamplay.Cobject('tag');
const tags = new colTags.Collection();
// Signing up
const registrationData = {
email : '[email protected]',
password: 'mySecret'
};
user.signup(registrationData).then(() => {
user.set('phoneNumber', '020 123 4567');
return user.save();
}).then(() => {
const number = user.get('phoneNumber');
console.log(number); // number value is 020 123 4567
});
// Action
const colFoo = Stamplay.Cobject('foo'); | () => {
// success callback
}, (err: any) => {
// error callback
}
); | const fooMod = new colFoo.Model();
fooMod.fetch(5)
.then(() => fooMod.upVote())
.then( | random_line_split |
commands.rs | use std::process::{Command, Child, ExitStatus, Output, Stdio};
use std::io::{Read, Write, Error as IOError};
use std::collections::BTreeSet;
use branches::Branches;
use error::Error;
use options::Options;
pub fn spawn_piped(args: &[&str]) -> Child {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap_or_else(|e| panic!("Error with child process: {}", e))
}
pub fn run_command_with_no_output(args: &[&str]) {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.output()
.unwrap_or_else(|e| panic!("Error with command: {}", e));
}
pub fn output(args: &[&str]) -> String {
let result = run_command(args);
String::from_utf8(result.stdout).unwrap().trim().to_owned()
}
pub fn run_command(args: &[&str]) -> Output {
run_command_with_result(args).unwrap_or_else(|e| panic!("Error with command: {}", e))
}
pub fn run_command_with_result(args: &[&str]) -> Result<Output, IOError> {
Command::new(&args[0])
.args(&args[1..])
.output()
}
pub fn run_command_with_status(args: &[&str]) -> Result<ExitStatus, IOError> {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
}
pub fn validate_git_installation() -> Result<(), Error> {
match Command::new("git").output() {
Ok(_) => Ok(()),
Err(_) => Err(Error::GitInstallationError),
}
}
pub fn delete_local_branches(branches: &Branches) -> String {
let xargs = spawn_piped(&["xargs", "git", "branch", "-D"]);
{
xargs.stdin.unwrap().write_all(branches.string.as_bytes()).unwrap()
}
let mut branches_delete_result = String::new();
xargs.stdout.unwrap().read_to_string(&mut branches_delete_result).unwrap();
branches_delete_result
}
pub fn delete_remote_branches(branches: &Branches, options: &Options) -> String {
let xargs = spawn_piped(&["xargs", "git", "push", &options.remote, "--delete"]);
let remote_branches_cmd = run_command(&["git", "branch", "-r"]);
let s = String::from_utf8(remote_branches_cmd.stdout).unwrap();
let all_remote_branches = s.split('\n').collect::<Vec<&str>>();
let origin_for_trim = &format!("{}/", &options.remote)[..];
let b_tree_remotes = all_remote_branches.iter()
.map(|b| b.trim().trim_start_matches(origin_for_trim).to_owned())
.collect::<BTreeSet<String>>(); | }
let intersection: Vec<_> = b_tree_remotes.intersection(&b_tree_branches).cloned().collect();
{
xargs.stdin.unwrap().write_all(intersection.join("\n").as_bytes()).unwrap()
}
let mut stderr = String::new();
xargs.stderr.unwrap().read_to_string(&mut stderr).unwrap();
// Everything is written to stderr, so we need to process that
let split = stderr.split('\n');
let vec: Vec<&str> = split.collect();
let mut output = vec![];
for s in vec {
if s.contains("error: unable to delete '") {
let branch = s.trim_start_matches("error: unable to delete '")
.trim_end_matches("': remote ref does not exist");
output.push(branch.to_owned() + " was already deleted in the remote.");
} else if s.contains(" - [deleted]") {
output.push(s.to_owned());
}
}
output.join("\n")
}
#[cfg(test)]
mod test {
use super::spawn_piped;
use std::io::{Read, Write};
#[test]
fn test_spawn_piped() {
let echo = spawn_piped(&["grep", "foo"]);
{
echo.stdin.unwrap().write_all("foo\nbar\nbaz".as_bytes()).unwrap()
}
let mut stdout = String::new();
echo.stdout.unwrap().read_to_string(&mut stdout).unwrap();
assert_eq!(stdout, "foo\n");
}
} |
let mut b_tree_branches = BTreeSet::new();
for branch in branches.vec.clone() {
b_tree_branches.insert(branch); | random_line_split |
commands.rs | use std::process::{Command, Child, ExitStatus, Output, Stdio};
use std::io::{Read, Write, Error as IOError};
use std::collections::BTreeSet;
use branches::Branches;
use error::Error;
use options::Options;
pub fn spawn_piped(args: &[&str]) -> Child {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap_or_else(|e| panic!("Error with child process: {}", e))
}
pub fn run_command_with_no_output(args: &[&str]) {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.output()
.unwrap_or_else(|e| panic!("Error with command: {}", e));
}
pub fn output(args: &[&str]) -> String {
let result = run_command(args);
String::from_utf8(result.stdout).unwrap().trim().to_owned()
}
pub fn run_command(args: &[&str]) -> Output {
run_command_with_result(args).unwrap_or_else(|e| panic!("Error with command: {}", e))
}
pub fn run_command_with_result(args: &[&str]) -> Result<Output, IOError> {
Command::new(&args[0])
.args(&args[1..])
.output()
}
pub fn run_command_with_status(args: &[&str]) -> Result<ExitStatus, IOError> {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
}
pub fn validate_git_installation() -> Result<(), Error> {
match Command::new("git").output() {
Ok(_) => Ok(()),
Err(_) => Err(Error::GitInstallationError),
}
}
pub fn delete_local_branches(branches: &Branches) -> String {
let xargs = spawn_piped(&["xargs", "git", "branch", "-D"]);
{
xargs.stdin.unwrap().write_all(branches.string.as_bytes()).unwrap()
}
let mut branches_delete_result = String::new();
xargs.stdout.unwrap().read_to_string(&mut branches_delete_result).unwrap();
branches_delete_result
}
pub fn delete_remote_branches(branches: &Branches, options: &Options) -> String {
let xargs = spawn_piped(&["xargs", "git", "push", &options.remote, "--delete"]);
let remote_branches_cmd = run_command(&["git", "branch", "-r"]);
let s = String::from_utf8(remote_branches_cmd.stdout).unwrap();
let all_remote_branches = s.split('\n').collect::<Vec<&str>>();
let origin_for_trim = &format!("{}/", &options.remote)[..];
let b_tree_remotes = all_remote_branches.iter()
.map(|b| b.trim().trim_start_matches(origin_for_trim).to_owned())
.collect::<BTreeSet<String>>();
let mut b_tree_branches = BTreeSet::new();
for branch in branches.vec.clone() {
b_tree_branches.insert(branch);
}
let intersection: Vec<_> = b_tree_remotes.intersection(&b_tree_branches).cloned().collect();
{
xargs.stdin.unwrap().write_all(intersection.join("\n").as_bytes()).unwrap()
}
let mut stderr = String::new();
xargs.stderr.unwrap().read_to_string(&mut stderr).unwrap();
// Everything is written to stderr, so we need to process that
let split = stderr.split('\n');
let vec: Vec<&str> = split.collect();
let mut output = vec![];
for s in vec {
if s.contains("error: unable to delete '") {
let branch = s.trim_start_matches("error: unable to delete '")
.trim_end_matches("': remote ref does not exist");
output.push(branch.to_owned() + " was already deleted in the remote.");
} else if s.contains(" - [deleted]") |
}
output.join("\n")
}
#[cfg(test)]
mod test {
use super::spawn_piped;
use std::io::{Read, Write};
#[test]
fn test_spawn_piped() {
let echo = spawn_piped(&["grep", "foo"]);
{
echo.stdin.unwrap().write_all("foo\nbar\nbaz".as_bytes()).unwrap()
}
let mut stdout = String::new();
echo.stdout.unwrap().read_to_string(&mut stdout).unwrap();
assert_eq!(stdout, "foo\n");
}
}
| {
output.push(s.to_owned());
} | conditional_block |
commands.rs | use std::process::{Command, Child, ExitStatus, Output, Stdio};
use std::io::{Read, Write, Error as IOError};
use std::collections::BTreeSet;
use branches::Branches;
use error::Error;
use options::Options;
pub fn spawn_piped(args: &[&str]) -> Child {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap_or_else(|e| panic!("Error with child process: {}", e))
}
pub fn run_command_with_no_output(args: &[&str]) {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.output()
.unwrap_or_else(|e| panic!("Error with command: {}", e));
}
pub fn output(args: &[&str]) -> String {
let result = run_command(args);
String::from_utf8(result.stdout).unwrap().trim().to_owned()
}
pub fn run_command(args: &[&str]) -> Output {
run_command_with_result(args).unwrap_or_else(|e| panic!("Error with command: {}", e))
}
pub fn run_command_with_result(args: &[&str]) -> Result<Output, IOError> {
Command::new(&args[0])
.args(&args[1..])
.output()
}
pub fn run_command_with_status(args: &[&str]) -> Result<ExitStatus, IOError> {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
}
pub fn validate_git_installation() -> Result<(), Error> {
match Command::new("git").output() {
Ok(_) => Ok(()),
Err(_) => Err(Error::GitInstallationError),
}
}
pub fn delete_local_branches(branches: &Branches) -> String {
let xargs = spawn_piped(&["xargs", "git", "branch", "-D"]);
{
xargs.stdin.unwrap().write_all(branches.string.as_bytes()).unwrap()
}
let mut branches_delete_result = String::new();
xargs.stdout.unwrap().read_to_string(&mut branches_delete_result).unwrap();
branches_delete_result
}
pub fn delete_remote_branches(branches: &Branches, options: &Options) -> String {
let xargs = spawn_piped(&["xargs", "git", "push", &options.remote, "--delete"]);
let remote_branches_cmd = run_command(&["git", "branch", "-r"]);
let s = String::from_utf8(remote_branches_cmd.stdout).unwrap();
let all_remote_branches = s.split('\n').collect::<Vec<&str>>();
let origin_for_trim = &format!("{}/", &options.remote)[..];
let b_tree_remotes = all_remote_branches.iter()
.map(|b| b.trim().trim_start_matches(origin_for_trim).to_owned())
.collect::<BTreeSet<String>>();
let mut b_tree_branches = BTreeSet::new();
for branch in branches.vec.clone() {
b_tree_branches.insert(branch);
}
let intersection: Vec<_> = b_tree_remotes.intersection(&b_tree_branches).cloned().collect();
{
xargs.stdin.unwrap().write_all(intersection.join("\n").as_bytes()).unwrap()
}
let mut stderr = String::new();
xargs.stderr.unwrap().read_to_string(&mut stderr).unwrap();
// Everything is written to stderr, so we need to process that
let split = stderr.split('\n');
let vec: Vec<&str> = split.collect();
let mut output = vec![];
for s in vec {
if s.contains("error: unable to delete '") {
let branch = s.trim_start_matches("error: unable to delete '")
.trim_end_matches("': remote ref does not exist");
output.push(branch.to_owned() + " was already deleted in the remote.");
} else if s.contains(" - [deleted]") {
output.push(s.to_owned());
}
}
output.join("\n")
}
#[cfg(test)]
mod test {
use super::spawn_piped;
use std::io::{Read, Write};
#[test]
fn test_spawn_piped() |
}
| {
let echo = spawn_piped(&["grep", "foo"]);
{
echo.stdin.unwrap().write_all("foo\nbar\nbaz".as_bytes()).unwrap()
}
let mut stdout = String::new();
echo.stdout.unwrap().read_to_string(&mut stdout).unwrap();
assert_eq!(stdout, "foo\n");
} | identifier_body |
commands.rs | use std::process::{Command, Child, ExitStatus, Output, Stdio};
use std::io::{Read, Write, Error as IOError};
use std::collections::BTreeSet;
use branches::Branches;
use error::Error;
use options::Options;
pub fn | (args: &[&str]) -> Child {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap_or_else(|e| panic!("Error with child process: {}", e))
}
pub fn run_command_with_no_output(args: &[&str]) {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.output()
.unwrap_or_else(|e| panic!("Error with command: {}", e));
}
pub fn output(args: &[&str]) -> String {
let result = run_command(args);
String::from_utf8(result.stdout).unwrap().trim().to_owned()
}
pub fn run_command(args: &[&str]) -> Output {
run_command_with_result(args).unwrap_or_else(|e| panic!("Error with command: {}", e))
}
pub fn run_command_with_result(args: &[&str]) -> Result<Output, IOError> {
Command::new(&args[0])
.args(&args[1..])
.output()
}
pub fn run_command_with_status(args: &[&str]) -> Result<ExitStatus, IOError> {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
}
pub fn validate_git_installation() -> Result<(), Error> {
match Command::new("git").output() {
Ok(_) => Ok(()),
Err(_) => Err(Error::GitInstallationError),
}
}
pub fn delete_local_branches(branches: &Branches) -> String {
let xargs = spawn_piped(&["xargs", "git", "branch", "-D"]);
{
xargs.stdin.unwrap().write_all(branches.string.as_bytes()).unwrap()
}
let mut branches_delete_result = String::new();
xargs.stdout.unwrap().read_to_string(&mut branches_delete_result).unwrap();
branches_delete_result
}
pub fn delete_remote_branches(branches: &Branches, options: &Options) -> String {
let xargs = spawn_piped(&["xargs", "git", "push", &options.remote, "--delete"]);
let remote_branches_cmd = run_command(&["git", "branch", "-r"]);
let s = String::from_utf8(remote_branches_cmd.stdout).unwrap();
let all_remote_branches = s.split('\n').collect::<Vec<&str>>();
let origin_for_trim = &format!("{}/", &options.remote)[..];
let b_tree_remotes = all_remote_branches.iter()
.map(|b| b.trim().trim_start_matches(origin_for_trim).to_owned())
.collect::<BTreeSet<String>>();
let mut b_tree_branches = BTreeSet::new();
for branch in branches.vec.clone() {
b_tree_branches.insert(branch);
}
let intersection: Vec<_> = b_tree_remotes.intersection(&b_tree_branches).cloned().collect();
{
xargs.stdin.unwrap().write_all(intersection.join("\n").as_bytes()).unwrap()
}
let mut stderr = String::new();
xargs.stderr.unwrap().read_to_string(&mut stderr).unwrap();
// Everything is written to stderr, so we need to process that
let split = stderr.split('\n');
let vec: Vec<&str> = split.collect();
let mut output = vec![];
for s in vec {
if s.contains("error: unable to delete '") {
let branch = s.trim_start_matches("error: unable to delete '")
.trim_end_matches("': remote ref does not exist");
output.push(branch.to_owned() + " was already deleted in the remote.");
} else if s.contains(" - [deleted]") {
output.push(s.to_owned());
}
}
output.join("\n")
}
#[cfg(test)]
mod test {
use super::spawn_piped;
use std::io::{Read, Write};
#[test]
fn test_spawn_piped() {
let echo = spawn_piped(&["grep", "foo"]);
{
echo.stdin.unwrap().write_all("foo\nbar\nbaz".as_bytes()).unwrap()
}
let mut stdout = String::new();
echo.stdout.unwrap().read_to_string(&mut stdout).unwrap();
assert_eq!(stdout, "foo\n");
}
}
| spawn_piped | identifier_name |
kanban-value-attribute-config.component.ts | /*
* Lumeer: Modern Data Definition and Processing Platform
*
* Copyright (C) since 2017 Lumeer.io, s.r.o. and/or its affiliates.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import {ChangeDetectionStrategy, Component, EventEmitter, Input, Output} from '@angular/core';
import {SelectItemModel} from '../../../../../../shared/select/select-item/select-item.model';
import {DataAggregationType} from '../../../../../../shared/utils/data/data-aggregation';
import {I18n} from '@ngx-translate/i18n-polyfill';
import {KanbanAttribute, KanbanValueAttribute, KanbanValueType} from '../../../../../../core/store/kanbans/kanban';
@Component({
selector: 'kanban-value-attribute-config',
templateUrl: './kanban-value-attribute-config.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class KanbanValueAttributeConfigComponent {
@Input()
public kanbanAttribute: KanbanValueAttribute;
@Input()
public availableAttributes: SelectItemModel[];
@Output()
public attributeSelect = new EventEmitter<KanbanValueAttribute>();
@Output()
public attributeChange = new EventEmitter<KanbanValueAttribute>();
@Output()
public attributeRemove = new EventEmitter();
public readonly buttonClasses = 'flex-grow-1 text-truncate';
public readonly aggregationPlaceholder: string;
public readonly aggregations = Object.values(DataAggregationType);
public readonly valueTypes = Object.values(KanbanValueType);
public readonly valueType = KanbanValueType;
constructor(private i18n: I18n) {
this.aggregationPlaceholder = i18n({id: 'aggregation', value: 'Aggregation'});
}
public onAggregationSelect(aggregation: DataAggregationType) {
const newAttribute = {...this.kanbanAttribute, aggregation};
this.attributeChange.emit(newAttribute);
}
public onAttributeSelected(attribute: KanbanAttribute) {
const valueAttribute: KanbanValueAttribute = {
...attribute,
aggregation: DataAggregationType.Sum,
valueType: KanbanValueType.Default,
};
this.attributeSelect.emit(valueAttribute);
}
public onAttributeRemoved() {
this.attributeRemove.emit();
} | } |
public onValueTypeSelected(valueType: KanbanValueType) {
const valueAttribute: KanbanValueAttribute = {...this.kanbanAttribute, valueType};
this.attributeChange.emit(valueAttribute);
} | random_line_split |
kanban-value-attribute-config.component.ts | /*
* Lumeer: Modern Data Definition and Processing Platform
*
* Copyright (C) since 2017 Lumeer.io, s.r.o. and/or its affiliates.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import {ChangeDetectionStrategy, Component, EventEmitter, Input, Output} from '@angular/core';
import {SelectItemModel} from '../../../../../../shared/select/select-item/select-item.model';
import {DataAggregationType} from '../../../../../../shared/utils/data/data-aggregation';
import {I18n} from '@ngx-translate/i18n-polyfill';
import {KanbanAttribute, KanbanValueAttribute, KanbanValueType} from '../../../../../../core/store/kanbans/kanban';
@Component({
selector: 'kanban-value-attribute-config',
templateUrl: './kanban-value-attribute-config.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class KanbanValueAttributeConfigComponent {
@Input()
public kanbanAttribute: KanbanValueAttribute;
@Input()
public availableAttributes: SelectItemModel[];
@Output()
public attributeSelect = new EventEmitter<KanbanValueAttribute>();
@Output()
public attributeChange = new EventEmitter<KanbanValueAttribute>();
@Output()
public attributeRemove = new EventEmitter();
public readonly buttonClasses = 'flex-grow-1 text-truncate';
public readonly aggregationPlaceholder: string;
public readonly aggregations = Object.values(DataAggregationType);
public readonly valueTypes = Object.values(KanbanValueType);
public readonly valueType = KanbanValueType;
| (private i18n: I18n) {
this.aggregationPlaceholder = i18n({id: 'aggregation', value: 'Aggregation'});
}
public onAggregationSelect(aggregation: DataAggregationType) {
const newAttribute = {...this.kanbanAttribute, aggregation};
this.attributeChange.emit(newAttribute);
}
public onAttributeSelected(attribute: KanbanAttribute) {
const valueAttribute: KanbanValueAttribute = {
...attribute,
aggregation: DataAggregationType.Sum,
valueType: KanbanValueType.Default,
};
this.attributeSelect.emit(valueAttribute);
}
public onAttributeRemoved() {
this.attributeRemove.emit();
}
public onValueTypeSelected(valueType: KanbanValueType) {
const valueAttribute: KanbanValueAttribute = {...this.kanbanAttribute, valueType};
this.attributeChange.emit(valueAttribute);
}
}
| constructor | identifier_name |
kanban-value-attribute-config.component.ts | /*
* Lumeer: Modern Data Definition and Processing Platform
*
* Copyright (C) since 2017 Lumeer.io, s.r.o. and/or its affiliates.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import {ChangeDetectionStrategy, Component, EventEmitter, Input, Output} from '@angular/core';
import {SelectItemModel} from '../../../../../../shared/select/select-item/select-item.model';
import {DataAggregationType} from '../../../../../../shared/utils/data/data-aggregation';
import {I18n} from '@ngx-translate/i18n-polyfill';
import {KanbanAttribute, KanbanValueAttribute, KanbanValueType} from '../../../../../../core/store/kanbans/kanban';
@Component({
selector: 'kanban-value-attribute-config',
templateUrl: './kanban-value-attribute-config.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class KanbanValueAttributeConfigComponent {
@Input()
public kanbanAttribute: KanbanValueAttribute;
@Input()
public availableAttributes: SelectItemModel[];
@Output()
public attributeSelect = new EventEmitter<KanbanValueAttribute>();
@Output()
public attributeChange = new EventEmitter<KanbanValueAttribute>();
@Output()
public attributeRemove = new EventEmitter();
public readonly buttonClasses = 'flex-grow-1 text-truncate';
public readonly aggregationPlaceholder: string;
public readonly aggregations = Object.values(DataAggregationType);
public readonly valueTypes = Object.values(KanbanValueType);
public readonly valueType = KanbanValueType;
constructor(private i18n: I18n) {
this.aggregationPlaceholder = i18n({id: 'aggregation', value: 'Aggregation'});
}
public onAggregationSelect(aggregation: DataAggregationType) {
const newAttribute = {...this.kanbanAttribute, aggregation};
this.attributeChange.emit(newAttribute);
}
public onAttributeSelected(attribute: KanbanAttribute) {
const valueAttribute: KanbanValueAttribute = {
...attribute,
aggregation: DataAggregationType.Sum,
valueType: KanbanValueType.Default,
};
this.attributeSelect.emit(valueAttribute);
}
public onAttributeRemoved() {
this.attributeRemove.emit();
}
public onValueTypeSelected(valueType: KanbanValueType) |
}
| {
const valueAttribute: KanbanValueAttribute = {...this.kanbanAttribute, valueType};
this.attributeChange.emit(valueAttribute);
} | identifier_body |
javax.swing.plaf.multi.MultiLabelUI.d.ts | declare namespace javax {
namespace swing {
namespace plaf {
namespace multi {
class | extends javax.swing.plaf.LabelUI {
protected uis: java.util.Vector<javax.swing.plaf.ComponentUI>
public constructor()
public getUIs(): javax.swing.plaf.ComponentUI[]
public contains(arg0: javax.swing.JComponent, arg1: number | java.lang.Integer, arg2: number | java.lang.Integer): boolean
public update(arg0: java.awt.Graphics, arg1: javax.swing.JComponent): void
public static createUI(arg0: javax.swing.JComponent): javax.swing.plaf.ComponentUI
public installUI(arg0: javax.swing.JComponent): void
public uninstallUI(arg0: javax.swing.JComponent): void
public paint(arg0: java.awt.Graphics, arg1: javax.swing.JComponent): void
public getPreferredSize(arg0: javax.swing.JComponent): java.awt.Dimension
public getMinimumSize(arg0: javax.swing.JComponent): java.awt.Dimension
public getMaximumSize(arg0: javax.swing.JComponent): java.awt.Dimension
public getAccessibleChildrenCount(arg0: javax.swing.JComponent): number
public getAccessibleChild(arg0: javax.swing.JComponent, arg1: number | java.lang.Integer): javax.accessibility.Accessible
}
}
}
}
}
| MultiLabelUI | identifier_name |
javax.swing.plaf.multi.MultiLabelUI.d.ts | declare namespace javax {
namespace swing {
namespace plaf {
namespace multi {
class MultiLabelUI extends javax.swing.plaf.LabelUI {
protected uis: java.util.Vector<javax.swing.plaf.ComponentUI>
public constructor()
public getUIs(): javax.swing.plaf.ComponentUI[]
public contains(arg0: javax.swing.JComponent, arg1: number | java.lang.Integer, arg2: number | java.lang.Integer): boolean
public update(arg0: java.awt.Graphics, arg1: javax.swing.JComponent): void
public static createUI(arg0: javax.swing.JComponent): javax.swing.plaf.ComponentUI
public installUI(arg0: javax.swing.JComponent): void
public uninstallUI(arg0: javax.swing.JComponent): void
public paint(arg0: java.awt.Graphics, arg1: javax.swing.JComponent): void
public getPreferredSize(arg0: javax.swing.JComponent): java.awt.Dimension
public getMinimumSize(arg0: javax.swing.JComponent): java.awt.Dimension
public getMaximumSize(arg0: javax.swing.JComponent): java.awt.Dimension
public getAccessibleChildrenCount(arg0: javax.swing.JComponent): number | }
} | public getAccessibleChild(arg0: javax.swing.JComponent, arg1: number | java.lang.Integer): javax.accessibility.Accessible
}
}
} | random_line_split |
snapshot.js | 'use strict';
var fs = require('fs'),
_ = require('lodash'),
colors = require('cli-color'),
utils = require('./utils.js');
function doSnapShot(roadMapPath, urlPrefix) {
var target,
payload,
response,
url;
var roadMap = utils.getRoadMapFromPath(roadMapPath);
console.log('Processing road map file "' + roadMapPath + '":');
var bootInfo = utils.getBootInfo();
var souvenirPath = utils.getSouvenirPathForRoadMapPath(roadMapPath);
var fixTestcases = utils.getTestcases();
if (_.isEmpty(fixTestcases)) {
utils.mkEmptyDirSync(souvenirPath);
}
var no = 0,
skipped = 0,
failed = 0,
fail = [],
bad = [];
for (target in roadMap) {
no++;
if (!_.isEmpty(fixTestcases) && !_.contains(fixTestcases, no)) {
skipped++;
continue;
}
console.log('');
console.log( 'Request #' + no + ' ==================');
payload = roadMap[target];
url = urlPrefix + target;
response = utils.getHttpResponse(url, payload, bootInfo.getHeaders());
if (_.isNull(response)) |
var targetFilePath = utils.getSouvenirPathForTarget(souvenirPath, target);
fs.writeFileSync(targetFilePath, JSON.stringify(response));
let code = response.statusCode;
if ((code >= 400) && (code < 500)) {
code = colors.magenta(code);
bad.push(no);
} else if (code >= 500) {
code = colors.red(code);
failed++;
fail.push(no);
} else {
code = colors.green(code);
}
console.log(' --> ' + code + ': Stored ' + response.body.length + ' bytes as souvenir');
}
console.log('');
console.log('==========================');
let status = [];
let succ = no - skipped - failed - bad.length;
if (skipped > 0) {
status.push(colors.blue(skipped + ' skipped'));
}
if (bad.length > 0) {
status.push(colors.magenta(bad.length + ' bad'));
}
if (failed > 0) {
status.push(colors.red(failed + ' failed'))
}
if (succ > 0) {
status.push(colors.green(succ + ' successful'))
}
console.log(' Summary: ' + no + ' requests found [ ' + status.join(', ') + ' ]');
if (fail.length > 0) {
console.log(colors.red(' Failed requests: ' + fail.join(',')));
}
if (bad.length > 0) {
console.log(colors.magenta(' Bad requests: ' + bad.join(',')));
}
}
var args = utils.getArguments();
if (args.hasOwnProperty('args') || args.length === 1) {
var roadMapPath = utils.getRoadMapPath();
var baseUrl = utils.getBaseUrlFromArguments();
doSnapShot(roadMapPath, baseUrl);
} else {
args.printHelp();
} | {
failed++;
fail.push(no);
console.log('ERROR! Request timed out!');
continue;
} | conditional_block |
snapshot.js | 'use strict';
var fs = require('fs'),
_ = require('lodash'),
colors = require('cli-color'),
utils = require('./utils.js');
function | (roadMapPath, urlPrefix) {
var target,
payload,
response,
url;
var roadMap = utils.getRoadMapFromPath(roadMapPath);
console.log('Processing road map file "' + roadMapPath + '":');
var bootInfo = utils.getBootInfo();
var souvenirPath = utils.getSouvenirPathForRoadMapPath(roadMapPath);
var fixTestcases = utils.getTestcases();
if (_.isEmpty(fixTestcases)) {
utils.mkEmptyDirSync(souvenirPath);
}
var no = 0,
skipped = 0,
failed = 0,
fail = [],
bad = [];
for (target in roadMap) {
no++;
if (!_.isEmpty(fixTestcases) && !_.contains(fixTestcases, no)) {
skipped++;
continue;
}
console.log('');
console.log( 'Request #' + no + ' ==================');
payload = roadMap[target];
url = urlPrefix + target;
response = utils.getHttpResponse(url, payload, bootInfo.getHeaders());
if (_.isNull(response)) {
failed++;
fail.push(no);
console.log('ERROR! Request timed out!');
continue;
}
var targetFilePath = utils.getSouvenirPathForTarget(souvenirPath, target);
fs.writeFileSync(targetFilePath, JSON.stringify(response));
let code = response.statusCode;
if ((code >= 400) && (code < 500)) {
code = colors.magenta(code);
bad.push(no);
} else if (code >= 500) {
code = colors.red(code);
failed++;
fail.push(no);
} else {
code = colors.green(code);
}
console.log(' --> ' + code + ': Stored ' + response.body.length + ' bytes as souvenir');
}
console.log('');
console.log('==========================');
let status = [];
let succ = no - skipped - failed - bad.length;
if (skipped > 0) {
status.push(colors.blue(skipped + ' skipped'));
}
if (bad.length > 0) {
status.push(colors.magenta(bad.length + ' bad'));
}
if (failed > 0) {
status.push(colors.red(failed + ' failed'))
}
if (succ > 0) {
status.push(colors.green(succ + ' successful'))
}
console.log(' Summary: ' + no + ' requests found [ ' + status.join(', ') + ' ]');
if (fail.length > 0) {
console.log(colors.red(' Failed requests: ' + fail.join(',')));
}
if (bad.length > 0) {
console.log(colors.magenta(' Bad requests: ' + bad.join(',')));
}
}
var args = utils.getArguments();
if (args.hasOwnProperty('args') || args.length === 1) {
var roadMapPath = utils.getRoadMapPath();
var baseUrl = utils.getBaseUrlFromArguments();
doSnapShot(roadMapPath, baseUrl);
} else {
args.printHelp();
} | doSnapShot | identifier_name |
snapshot.js | 'use strict';
var fs = require('fs'),
_ = require('lodash'),
colors = require('cli-color'),
utils = require('./utils.js');
function doSnapShot(roadMapPath, urlPrefix) |
var args = utils.getArguments();
if (args.hasOwnProperty('args') || args.length === 1) {
var roadMapPath = utils.getRoadMapPath();
var baseUrl = utils.getBaseUrlFromArguments();
doSnapShot(roadMapPath, baseUrl);
} else {
args.printHelp();
} | {
var target,
payload,
response,
url;
var roadMap = utils.getRoadMapFromPath(roadMapPath);
console.log('Processing road map file "' + roadMapPath + '":');
var bootInfo = utils.getBootInfo();
var souvenirPath = utils.getSouvenirPathForRoadMapPath(roadMapPath);
var fixTestcases = utils.getTestcases();
if (_.isEmpty(fixTestcases)) {
utils.mkEmptyDirSync(souvenirPath);
}
var no = 0,
skipped = 0,
failed = 0,
fail = [],
bad = [];
for (target in roadMap) {
no++;
if (!_.isEmpty(fixTestcases) && !_.contains(fixTestcases, no)) {
skipped++;
continue;
}
console.log('');
console.log( 'Request #' + no + ' ==================');
payload = roadMap[target];
url = urlPrefix + target;
response = utils.getHttpResponse(url, payload, bootInfo.getHeaders());
if (_.isNull(response)) {
failed++;
fail.push(no);
console.log('ERROR! Request timed out!');
continue;
}
var targetFilePath = utils.getSouvenirPathForTarget(souvenirPath, target);
fs.writeFileSync(targetFilePath, JSON.stringify(response));
let code = response.statusCode;
if ((code >= 400) && (code < 500)) {
code = colors.magenta(code);
bad.push(no);
} else if (code >= 500) {
code = colors.red(code);
failed++;
fail.push(no);
} else {
code = colors.green(code);
}
console.log(' --> ' + code + ': Stored ' + response.body.length + ' bytes as souvenir');
}
console.log('');
console.log('==========================');
let status = [];
let succ = no - skipped - failed - bad.length;
if (skipped > 0) {
status.push(colors.blue(skipped + ' skipped'));
}
if (bad.length > 0) {
status.push(colors.magenta(bad.length + ' bad'));
}
if (failed > 0) {
status.push(colors.red(failed + ' failed'))
}
if (succ > 0) {
status.push(colors.green(succ + ' successful'))
}
console.log(' Summary: ' + no + ' requests found [ ' + status.join(', ') + ' ]');
if (fail.length > 0) {
console.log(colors.red(' Failed requests: ' + fail.join(',')));
}
if (bad.length > 0) {
console.log(colors.magenta(' Bad requests: ' + bad.join(',')));
}
} | identifier_body |
snapshot.js | 'use strict';
var fs = require('fs'),
_ = require('lodash'),
colors = require('cli-color'),
utils = require('./utils.js');
function doSnapShot(roadMapPath, urlPrefix) {
var target,
payload,
response,
url;
var roadMap = utils.getRoadMapFromPath(roadMapPath);
console.log('Processing road map file "' + roadMapPath + '":');
var bootInfo = utils.getBootInfo();
var souvenirPath = utils.getSouvenirPathForRoadMapPath(roadMapPath);
var fixTestcases = utils.getTestcases();
if (_.isEmpty(fixTestcases)) {
utils.mkEmptyDirSync(souvenirPath);
}
var no = 0,
skipped = 0,
failed = 0,
fail = [],
bad = [];
for (target in roadMap) {
no++;
if (!_.isEmpty(fixTestcases) && !_.contains(fixTestcases, no)) {
skipped++;
continue;
}
console.log('');
console.log( 'Request #' + no + ' ==================');
payload = roadMap[target];
url = urlPrefix + target;
response = utils.getHttpResponse(url, payload, bootInfo.getHeaders());
if (_.isNull(response)) {
failed++;
fail.push(no);
console.log('ERROR! Request timed out!');
continue;
}
var targetFilePath = utils.getSouvenirPathForTarget(souvenirPath, target);
fs.writeFileSync(targetFilePath, JSON.stringify(response));
let code = response.statusCode;
if ((code >= 400) && (code < 500)) {
code = colors.magenta(code);
bad.push(no);
} else if (code >= 500) {
code = colors.red(code);
failed++;
fail.push(no);
} else {
code = colors.green(code);
}
console.log(' --> ' + code + ': Stored ' + response.body.length + ' bytes as souvenir');
}
console.log('');
console.log('==========================');
let status = [];
let succ = no - skipped - failed - bad.length;
if (skipped > 0) {
status.push(colors.blue(skipped + ' skipped'));
}
if (bad.length > 0) {
status.push(colors.magenta(bad.length + ' bad'));
}
if (failed > 0) {
status.push(colors.red(failed + ' failed'))
}
if (succ > 0) {
status.push(colors.green(succ + ' successful'))
}
console.log(' Summary: ' + no + ' requests found [ ' + status.join(', ') + ' ]');
if (fail.length > 0) {
console.log(colors.red(' Failed requests: ' + fail.join(',')));
}
if (bad.length > 0) { | console.log(colors.magenta(' Bad requests: ' + bad.join(',')));
}
}
var args = utils.getArguments();
if (args.hasOwnProperty('args') || args.length === 1) {
var roadMapPath = utils.getRoadMapPath();
var baseUrl = utils.getBaseUrlFromArguments();
doSnapShot(roadMapPath, baseUrl);
} else {
args.printHelp();
} | random_line_split |
|
Cleaner.ts | import { Arr } from '@ephox/katamari';
type Task = () => void;
export interface Cleaner {
readonly add: (task: Task) => void;
readonly run: () => void;
readonly wrap: <T extends any[], U>(fn: (...a: T) => U) => (...args: T) => U;
}
export const Cleaner = (): Cleaner => {
let tasks: Task[] = [];
const add = (task: Task) => {
tasks.push(task);
};
const run = () => {
Arr.each(tasks, (task) => {
try {
task();
} catch (e) {
// eslint-disable-next-line no-console
console.log(e);
}
});
tasks = [];
};
const wrap = <T extends any[], U> (fn: (...a: T) => U) => (...args: T): U => {
run();
return fn.apply(null, args);
};
return {
add, | wrap
};
}; | run, | random_line_split |
JsxProcessor.ts | import {omit} from './util';
import {getCurrentLine} from './util-stacktrace';
import {attributesWithoutListener, registerListenerAttributes} from './Listeners';
import {toValueString} from './Console';
import Color from './Color';
import Font from './Font';
import * as symbols from './symbols';
import checkType from './checkType';
type AttrConverters = {[attr: string]: (value: any) => any};
interface ElementFn {
new(...args: any[]): any;
(...args: any[]): any;
[symbols.originalComponent]?: ElementFn;
[symbols.jsxType]?: boolean;
}
const COMMON_ATTR: AttrConverters = Object.freeze({
textColor: (value: any) => Color.from(value).toString(),
font: (value: any) => Font.from(value).toString(),
children: (value: any) => {
if (!(value instanceof Array)) {
throw new Error('Not an array: ' + toValueString(value));
}
return value;
}
});
const MARKUP: {[el: string]: AttrConverters} = Object.freeze({
br: {},
b: COMMON_ATTR,
span: COMMON_ATTR,
big: COMMON_ATTR,
i: COMMON_ATTR,
small: COMMON_ATTR,
strong: COMMON_ATTR,
ins: COMMON_ATTR,
del: COMMON_ATTR,
a: Object.assign({
href: (value: any) => {
if (typeof value !== 'string') {
throw new Error('Not a string: ' + toValueString(value));
}
return value;
}
}, COMMON_ATTR)
});
export function createJsxProcessor() {
return new JsxProcessor();
}
export default class JsxProcessor {
createElement(Type: ElementFn | string, attributes?: any, ...children: any[]) {
if (!(Type instanceof Function) && typeof Type !== 'string') {
throw new Error(`JSX: Unsupported type ${toValueString(Type)}`);
}
const typeName = Type instanceof Function ? Type.name : Type;
if (attributes?.children && children && children.length) {
throw new Error(`JSX: Children for type ${typeName} given twice.`);
}
// Children may be part of attributes or given as varargs or both.
// For JSX factories/functional components they should always be part of attributes
const rawChildren = children.length ? children : attributes?.children || [];
const {finalChildren, additionalAttributes} = parseChildren(rawChildren, Type);
const finalAttributes = {...attributes};
joinAttributes(finalAttributes, additionalAttributes, Type);
if (finalChildren) {
finalAttributes.children = finalChildren;
}
if (typeof Type === 'string') {
return this.createIntrinsicElement(Type, finalAttributes);
} else if (Type.prototype && Type.prototype[JSX.jsxFactory]) {
return this.createCustomComponent(Type, finalAttributes);
} else {
return this.createFunctionalComponent(Type, finalAttributes);
}
}
createCustomComponent(Type: ElementFn, attributes: any) {
return Type.prototype[JSX.jsxFactory].call(this, Type, attributes);
}
createFunctionalComponent(Type: ElementFn, attributes: any) {
try {
const result = Type.call(this, attributes);
Type[symbols.jsxType] = true;
if (result instanceof Object) {
result[symbols.jsxType] = Type;
}
return result;
} catch (ex) {
throw new Error(`JSX: "${ex.message}" ${getCurrentLine(ex)}`);
}
}
createIntrinsicElement(el: string, attributes: any) {
if (el in MARKUP) {
const encoded: any = {};
Object.keys(attributes || {}).forEach(attribute => {
const encoder = MARKUP[el][attribute];
if (!encoder) {
if (attribute === 'children') {
throw new Error(`Element "${el}" can not have children`); | } else {
throw new Error(`Element "${el}" does not support attribute "${attribute}"`);
}
}
try {
encoded[attribute] = encoder(attributes[attribute]);
} catch(ex) {
throw new Error(`Element "${el}" attribute "${attribute}" can not bet set: ${ex.message}`);
}
});
const text = joinTextContent(encoded.children, true);
const tagOpen = [el].concat(Object.keys(encoded || {}).filter(attr => attr !== 'children').map(
attribute => `${attribute}='${encoded[attribute]}'`
)).join(' ');
if (text) {
return `<${tagOpen}>${text}</${el}>`;
}
return `<${tagOpen}/>`;
}
throw new Error(`JSX: Unsupported type ${el}`);
}
createNativeObject(Type: any, attributes: any) {
if (attributes && 'children' in attributes) {
throw new Error(`JSX: ${Type.name} can not have children`);
}
const {data, ...properties} = attributesWithoutListener(attributes || {});
const result = new Type(properties);
registerListenerAttributes(result, attributes);
if (data) {
result.data = data;
}
return result;
}
getChildren(attributes: any) {
if (!attributes || !('children' in attributes)) {
return null;
}
return flattenChildren(attributes.children);
}
withoutChildren(attributes: any) {
return omit(attributes, ['children']);
}
withContentText(attributes: any, content: any[], property: string, markupEnabled: boolean) {
if (attributes && attributes[property] && content && content.length) {
throw new Error(`JSX: ${property} given twice`);
}
const text = attributes && attributes[property]
? attributes[property].toString()
: joinTextContent(content || [], markupEnabled);
return Object.assign(attributes || {}, text ? {[property]: text} : {});
}
withContentChildren(attributes: any, content: any[], property: string) {
if (attributes && attributes[property] && content && content.length) {
throw new Error(`JSX: ${property} given twice`);
}
const children = attributes && attributes[property] ? attributes[property] : (content || []);
return Object.assign(attributes || {}, children ? {[property]: children} : {});
}
withShorthands(
attributes: object,
shorthandsMapping: {[attr: string]: string},
merge: ((value1: any, value2: string) => any)
): object {
const shorthandsKeys = Object.keys(shorthandsMapping);
const shorthands = shorthandsKeys.filter(value => value in attributes);
if (!shorthands.length) {
return attributes;
}
const attrCopy: any = omit(attributes, shorthandsKeys);
shorthands.forEach(shorthand => {
const prop = shorthandsMapping[shorthand];
if (prop in attrCopy) {
attrCopy[prop] = merge(attrCopy[prop], shorthand);
} else {
attrCopy[prop] = shorthand;
}
});
return attrCopy;
}
makeFactories(dic: {[key: string]: ElementFn}) {
const result: {[key: string]: ElementFn} = {};
Object.keys(dic).forEach(key => {
result[key] = this.makeFactory(dic[key]) as ElementFn;
});
return result;
}
makeFactory(constructor: ElementFn): ElementFn {
if (arguments.length !== 1) {
throw new Error(`Expected exactly one argument, got ${arguments.length}`);
}
checkType(constructor, Function, 'first parameter');
if (!constructor.prototype || !constructor.prototype[JSX.jsxFactory]) {
throw new Error(`Function ${constructor.name} is not a valid constructor`);
}
if (constructor[symbols.originalComponent]) {
return this.makeFactory(constructor[symbols.originalComponent] as ElementFn);
}
return createFactoryProxy(this, constructor);
}
}
function createFactoryProxy(processor: JsxProcessor, constructor: ElementFn): ElementFn {
const handler: ProxyHandler<ElementFn> = {
apply(target, _thisArg, args) {
const [attributes, functionalComponent] = args;
if (args.length > 1) {
if (!(functionalComponent instanceof Function)) {
throw new TypeError('Second parameter must be a function');
}
if (functionalComponent.prototype && functionalComponent.prototype[JSX.jsxFactory]) {
throw new TypeError('Second parameter must be a factory');
}
}
const result = processor.createElement(proxy, attributes);
if (args.length > 1 && result instanceof Object) {
functionalComponent[JSX.jsxType] = true;
result[JSX.jsxType] = functionalComponent;
}
return result;
},
get(target, property, receiver) {
if (receiver === proxy) {
if (property === symbols.originalComponent) {
return constructor;
}
if (property === symbols.proxyHandler) {
return handler;
}
}
return Reflect.get(target, property, receiver);
}
};
/** @type {Factory} */
const proxy = new Proxy(constructor, handler);
return proxy;
}
/**
* Converts any value to a flat array.
*/
export function flattenChildren(children: unknown) {
if (children instanceof Array) {
let result: any[] = [];
for (const child of children) {
if (child && child.toArray) {
result = result.concat(flattenChildren(child.toArray()));
} else if (child instanceof Array) {
result = result.concat(flattenChildren(child));
} else {
result.push(child);
}
}
return result;
}
return [children];
}
export function joinTextContent(textArray: string[], markupEnabled: boolean) {
if (!textArray) {
return null;
}
if (markupEnabled) {
return textArray
.map(str => str + '')
.join('')
.replace(/\s+/g, ' ')
.replace(/\s*<br\s*\/>\s*/g, '<br/>');
}
return textArray.join('');
}
export const JSX = {
processor: null as JsxProcessor | null,
jsxFactory: symbols.jsxFactory,
jsxType: symbols.jsxType,
install(jsxProcessor: JsxProcessor) {
this.processor = jsxProcessor;
},
createElement() {
return this.processor!.createElement.apply(this.processor, arguments as any);
}
};
function parseChildren(rawChildren: any[], type: ElementFn | string) {
const children = rawChildren?.filter(value => !isAttributesObject(value));
const attributes = rawChildren
?.filter(isAttributesObject)
.reduce((prev, current) => joinAttributes(prev, current, type), {});
return {
finalChildren: children.length ? children : null,
additionalAttributes: omit(attributes, [symbols.jsxType, symbols.setterTargetType])
};
}
function isAttributesObject(value: any): value is {[symbols.setterTargetType]: Function} {
return value instanceof Object && value[symbols.setterTargetType] instanceof Function;
}
function joinAttributes(target: any, source: any, actualTargetType: ElementFn | string): any {
const expectedTargetType = source[symbols.setterTargetType];
const actualTargetTypeFn = typeof actualTargetType === 'string' ? String : actualTargetType;
if (expectedTargetType
&& actualTargetTypeFn.prototype[JSX.jsxFactory] // for SFC we can't know the future instance type
&& (actualTargetTypeFn.prototype !== expectedTargetType.prototype)
&& !(actualTargetTypeFn.prototype instanceof expectedTargetType)
) {
const firstKey = Object.keys(source)[0];
const typeName = actualTargetType instanceof Function
? actualTargetType.name
: actualTargetType;
throw new TypeError(
`Attribute "${firstKey}" is targeting ${expectedTargetType.name}, but is set on ${typeName}`
);
}
Object.keys(source).forEach(key => {
if (key in target) {
if (Array.isArray(target[key]) && Array.isArray(source[key])) {
target[key] = target[key].concat(source[key]);
} else {
throw new Error(`Attribute "${key}" is set multiple times`);
}
} else {
target[key] = source[key];
}
});
return target;
} | random_line_split |
|
JsxProcessor.ts | import {omit} from './util';
import {getCurrentLine} from './util-stacktrace';
import {attributesWithoutListener, registerListenerAttributes} from './Listeners';
import {toValueString} from './Console';
import Color from './Color';
import Font from './Font';
import * as symbols from './symbols';
import checkType from './checkType';
type AttrConverters = {[attr: string]: (value: any) => any};
interface ElementFn {
new(...args: any[]): any;
(...args: any[]): any;
[symbols.originalComponent]?: ElementFn;
[symbols.jsxType]?: boolean;
}
const COMMON_ATTR: AttrConverters = Object.freeze({
textColor: (value: any) => Color.from(value).toString(),
font: (value: any) => Font.from(value).toString(),
children: (value: any) => {
if (!(value instanceof Array)) {
throw new Error('Not an array: ' + toValueString(value));
}
return value;
}
});
const MARKUP: {[el: string]: AttrConverters} = Object.freeze({
br: {},
b: COMMON_ATTR,
span: COMMON_ATTR,
big: COMMON_ATTR,
i: COMMON_ATTR,
small: COMMON_ATTR,
strong: COMMON_ATTR,
ins: COMMON_ATTR,
del: COMMON_ATTR,
a: Object.assign({
href: (value: any) => {
if (typeof value !== 'string') {
throw new Error('Not a string: ' + toValueString(value));
}
return value;
}
}, COMMON_ATTR)
});
export function createJsxProcessor() |
export default class JsxProcessor {
createElement(Type: ElementFn | string, attributes?: any, ...children: any[]) {
if (!(Type instanceof Function) && typeof Type !== 'string') {
throw new Error(`JSX: Unsupported type ${toValueString(Type)}`);
}
const typeName = Type instanceof Function ? Type.name : Type;
if (attributes?.children && children && children.length) {
throw new Error(`JSX: Children for type ${typeName} given twice.`);
}
// Children may be part of attributes or given as varargs or both.
// For JSX factories/functional components they should always be part of attributes
const rawChildren = children.length ? children : attributes?.children || [];
const {finalChildren, additionalAttributes} = parseChildren(rawChildren, Type);
const finalAttributes = {...attributes};
joinAttributes(finalAttributes, additionalAttributes, Type);
if (finalChildren) {
finalAttributes.children = finalChildren;
}
if (typeof Type === 'string') {
return this.createIntrinsicElement(Type, finalAttributes);
} else if (Type.prototype && Type.prototype[JSX.jsxFactory]) {
return this.createCustomComponent(Type, finalAttributes);
} else {
return this.createFunctionalComponent(Type, finalAttributes);
}
}
createCustomComponent(Type: ElementFn, attributes: any) {
return Type.prototype[JSX.jsxFactory].call(this, Type, attributes);
}
createFunctionalComponent(Type: ElementFn, attributes: any) {
try {
const result = Type.call(this, attributes);
Type[symbols.jsxType] = true;
if (result instanceof Object) {
result[symbols.jsxType] = Type;
}
return result;
} catch (ex) {
throw new Error(`JSX: "${ex.message}" ${getCurrentLine(ex)}`);
}
}
createIntrinsicElement(el: string, attributes: any) {
if (el in MARKUP) {
const encoded: any = {};
Object.keys(attributes || {}).forEach(attribute => {
const encoder = MARKUP[el][attribute];
if (!encoder) {
if (attribute === 'children') {
throw new Error(`Element "${el}" can not have children`);
} else {
throw new Error(`Element "${el}" does not support attribute "${attribute}"`);
}
}
try {
encoded[attribute] = encoder(attributes[attribute]);
} catch(ex) {
throw new Error(`Element "${el}" attribute "${attribute}" can not bet set: ${ex.message}`);
}
});
const text = joinTextContent(encoded.children, true);
const tagOpen = [el].concat(Object.keys(encoded || {}).filter(attr => attr !== 'children').map(
attribute => `${attribute}='${encoded[attribute]}'`
)).join(' ');
if (text) {
return `<${tagOpen}>${text}</${el}>`;
}
return `<${tagOpen}/>`;
}
throw new Error(`JSX: Unsupported type ${el}`);
}
createNativeObject(Type: any, attributes: any) {
if (attributes && 'children' in attributes) {
throw new Error(`JSX: ${Type.name} can not have children`);
}
const {data, ...properties} = attributesWithoutListener(attributes || {});
const result = new Type(properties);
registerListenerAttributes(result, attributes);
if (data) {
result.data = data;
}
return result;
}
getChildren(attributes: any) {
if (!attributes || !('children' in attributes)) {
return null;
}
return flattenChildren(attributes.children);
}
withoutChildren(attributes: any) {
return omit(attributes, ['children']);
}
withContentText(attributes: any, content: any[], property: string, markupEnabled: boolean) {
if (attributes && attributes[property] && content && content.length) {
throw new Error(`JSX: ${property} given twice`);
}
const text = attributes && attributes[property]
? attributes[property].toString()
: joinTextContent(content || [], markupEnabled);
return Object.assign(attributes || {}, text ? {[property]: text} : {});
}
withContentChildren(attributes: any, content: any[], property: string) {
if (attributes && attributes[property] && content && content.length) {
throw new Error(`JSX: ${property} given twice`);
}
const children = attributes && attributes[property] ? attributes[property] : (content || []);
return Object.assign(attributes || {}, children ? {[property]: children} : {});
}
withShorthands(
attributes: object,
shorthandsMapping: {[attr: string]: string},
merge: ((value1: any, value2: string) => any)
): object {
const shorthandsKeys = Object.keys(shorthandsMapping);
const shorthands = shorthandsKeys.filter(value => value in attributes);
if (!shorthands.length) {
return attributes;
}
const attrCopy: any = omit(attributes, shorthandsKeys);
shorthands.forEach(shorthand => {
const prop = shorthandsMapping[shorthand];
if (prop in attrCopy) {
attrCopy[prop] = merge(attrCopy[prop], shorthand);
} else {
attrCopy[prop] = shorthand;
}
});
return attrCopy;
}
makeFactories(dic: {[key: string]: ElementFn}) {
const result: {[key: string]: ElementFn} = {};
Object.keys(dic).forEach(key => {
result[key] = this.makeFactory(dic[key]) as ElementFn;
});
return result;
}
makeFactory(constructor: ElementFn): ElementFn {
if (arguments.length !== 1) {
throw new Error(`Expected exactly one argument, got ${arguments.length}`);
}
checkType(constructor, Function, 'first parameter');
if (!constructor.prototype || !constructor.prototype[JSX.jsxFactory]) {
throw new Error(`Function ${constructor.name} is not a valid constructor`);
}
if (constructor[symbols.originalComponent]) {
return this.makeFactory(constructor[symbols.originalComponent] as ElementFn);
}
return createFactoryProxy(this, constructor);
}
}
function createFactoryProxy(processor: JsxProcessor, constructor: ElementFn): ElementFn {
const handler: ProxyHandler<ElementFn> = {
apply(target, _thisArg, args) {
const [attributes, functionalComponent] = args;
if (args.length > 1) {
if (!(functionalComponent instanceof Function)) {
throw new TypeError('Second parameter must be a function');
}
if (functionalComponent.prototype && functionalComponent.prototype[JSX.jsxFactory]) {
throw new TypeError('Second parameter must be a factory');
}
}
const result = processor.createElement(proxy, attributes);
if (args.length > 1 && result instanceof Object) {
functionalComponent[JSX.jsxType] = true;
result[JSX.jsxType] = functionalComponent;
}
return result;
},
get(target, property, receiver) {
if (receiver === proxy) {
if (property === symbols.originalComponent) {
return constructor;
}
if (property === symbols.proxyHandler) {
return handler;
}
}
return Reflect.get(target, property, receiver);
}
};
/** @type {Factory} */
const proxy = new Proxy(constructor, handler);
return proxy;
}
/**
* Converts any value to a flat array.
*/
export function flattenChildren(children: unknown) {
if (children instanceof Array) {
let result: any[] = [];
for (const child of children) {
if (child && child.toArray) {
result = result.concat(flattenChildren(child.toArray()));
} else if (child instanceof Array) {
result = result.concat(flattenChildren(child));
} else {
result.push(child);
}
}
return result;
}
return [children];
}
export function joinTextContent(textArray: string[], markupEnabled: boolean) {
if (!textArray) {
return null;
}
if (markupEnabled) {
return textArray
.map(str => str + '')
.join('')
.replace(/\s+/g, ' ')
.replace(/\s*<br\s*\/>\s*/g, '<br/>');
}
return textArray.join('');
}
export const JSX = {
processor: null as JsxProcessor | null,
jsxFactory: symbols.jsxFactory,
jsxType: symbols.jsxType,
install(jsxProcessor: JsxProcessor) {
this.processor = jsxProcessor;
},
createElement() {
return this.processor!.createElement.apply(this.processor, arguments as any);
}
};
function parseChildren(rawChildren: any[], type: ElementFn | string) {
const children = rawChildren?.filter(value => !isAttributesObject(value));
const attributes = rawChildren
?.filter(isAttributesObject)
.reduce((prev, current) => joinAttributes(prev, current, type), {});
return {
finalChildren: children.length ? children : null,
additionalAttributes: omit(attributes, [symbols.jsxType, symbols.setterTargetType])
};
}
function isAttributesObject(value: any): value is {[symbols.setterTargetType]: Function} {
return value instanceof Object && value[symbols.setterTargetType] instanceof Function;
}
function joinAttributes(target: any, source: any, actualTargetType: ElementFn | string): any {
const expectedTargetType = source[symbols.setterTargetType];
const actualTargetTypeFn = typeof actualTargetType === 'string' ? String : actualTargetType;
if (expectedTargetType
&& actualTargetTypeFn.prototype[JSX.jsxFactory] // for SFC we can't know the future instance type
&& (actualTargetTypeFn.prototype !== expectedTargetType.prototype)
&& !(actualTargetTypeFn.prototype instanceof expectedTargetType)
) {
const firstKey = Object.keys(source)[0];
const typeName = actualTargetType instanceof Function
? actualTargetType.name
: actualTargetType;
throw new TypeError(
`Attribute "${firstKey}" is targeting ${expectedTargetType.name}, but is set on ${typeName}`
);
}
Object.keys(source).forEach(key => {
if (key in target) {
if (Array.isArray(target[key]) && Array.isArray(source[key])) {
target[key] = target[key].concat(source[key]);
} else {
throw new Error(`Attribute "${key}" is set multiple times`);
}
} else {
target[key] = source[key];
}
});
return target;
}
| {
return new JsxProcessor();
} | identifier_body |
JsxProcessor.ts | import {omit} from './util';
import {getCurrentLine} from './util-stacktrace';
import {attributesWithoutListener, registerListenerAttributes} from './Listeners';
import {toValueString} from './Console';
import Color from './Color';
import Font from './Font';
import * as symbols from './symbols';
import checkType from './checkType';
type AttrConverters = {[attr: string]: (value: any) => any};
interface ElementFn {
new(...args: any[]): any;
(...args: any[]): any;
[symbols.originalComponent]?: ElementFn;
[symbols.jsxType]?: boolean;
}
const COMMON_ATTR: AttrConverters = Object.freeze({
textColor: (value: any) => Color.from(value).toString(),
font: (value: any) => Font.from(value).toString(),
children: (value: any) => {
if (!(value instanceof Array)) {
throw new Error('Not an array: ' + toValueString(value));
}
return value;
}
});
const MARKUP: {[el: string]: AttrConverters} = Object.freeze({
br: {},
b: COMMON_ATTR,
span: COMMON_ATTR,
big: COMMON_ATTR,
i: COMMON_ATTR,
small: COMMON_ATTR,
strong: COMMON_ATTR,
ins: COMMON_ATTR,
del: COMMON_ATTR,
a: Object.assign({
href: (value: any) => {
if (typeof value !== 'string') {
throw new Error('Not a string: ' + toValueString(value));
}
return value;
}
}, COMMON_ATTR)
});
export function createJsxProcessor() {
return new JsxProcessor();
}
export default class JsxProcessor {
createElement(Type: ElementFn | string, attributes?: any, ...children: any[]) {
if (!(Type instanceof Function) && typeof Type !== 'string') {
throw new Error(`JSX: Unsupported type ${toValueString(Type)}`);
}
const typeName = Type instanceof Function ? Type.name : Type;
if (attributes?.children && children && children.length) {
throw new Error(`JSX: Children for type ${typeName} given twice.`);
}
// Children may be part of attributes or given as varargs or both.
// For JSX factories/functional components they should always be part of attributes
const rawChildren = children.length ? children : attributes?.children || [];
const {finalChildren, additionalAttributes} = parseChildren(rawChildren, Type);
const finalAttributes = {...attributes};
joinAttributes(finalAttributes, additionalAttributes, Type);
if (finalChildren) {
finalAttributes.children = finalChildren;
}
if (typeof Type === 'string') {
return this.createIntrinsicElement(Type, finalAttributes);
} else if (Type.prototype && Type.prototype[JSX.jsxFactory]) | else {
return this.createFunctionalComponent(Type, finalAttributes);
}
}
createCustomComponent(Type: ElementFn, attributes: any) {
return Type.prototype[JSX.jsxFactory].call(this, Type, attributes);
}
createFunctionalComponent(Type: ElementFn, attributes: any) {
try {
const result = Type.call(this, attributes);
Type[symbols.jsxType] = true;
if (result instanceof Object) {
result[symbols.jsxType] = Type;
}
return result;
} catch (ex) {
throw new Error(`JSX: "${ex.message}" ${getCurrentLine(ex)}`);
}
}
createIntrinsicElement(el: string, attributes: any) {
if (el in MARKUP) {
const encoded: any = {};
Object.keys(attributes || {}).forEach(attribute => {
const encoder = MARKUP[el][attribute];
if (!encoder) {
if (attribute === 'children') {
throw new Error(`Element "${el}" can not have children`);
} else {
throw new Error(`Element "${el}" does not support attribute "${attribute}"`);
}
}
try {
encoded[attribute] = encoder(attributes[attribute]);
} catch(ex) {
throw new Error(`Element "${el}" attribute "${attribute}" can not bet set: ${ex.message}`);
}
});
const text = joinTextContent(encoded.children, true);
const tagOpen = [el].concat(Object.keys(encoded || {}).filter(attr => attr !== 'children').map(
attribute => `${attribute}='${encoded[attribute]}'`
)).join(' ');
if (text) {
return `<${tagOpen}>${text}</${el}>`;
}
return `<${tagOpen}/>`;
}
throw new Error(`JSX: Unsupported type ${el}`);
}
createNativeObject(Type: any, attributes: any) {
if (attributes && 'children' in attributes) {
throw new Error(`JSX: ${Type.name} can not have children`);
}
const {data, ...properties} = attributesWithoutListener(attributes || {});
const result = new Type(properties);
registerListenerAttributes(result, attributes);
if (data) {
result.data = data;
}
return result;
}
getChildren(attributes: any) {
if (!attributes || !('children' in attributes)) {
return null;
}
return flattenChildren(attributes.children);
}
withoutChildren(attributes: any) {
return omit(attributes, ['children']);
}
withContentText(attributes: any, content: any[], property: string, markupEnabled: boolean) {
if (attributes && attributes[property] && content && content.length) {
throw new Error(`JSX: ${property} given twice`);
}
const text = attributes && attributes[property]
? attributes[property].toString()
: joinTextContent(content || [], markupEnabled);
return Object.assign(attributes || {}, text ? {[property]: text} : {});
}
withContentChildren(attributes: any, content: any[], property: string) {
if (attributes && attributes[property] && content && content.length) {
throw new Error(`JSX: ${property} given twice`);
}
const children = attributes && attributes[property] ? attributes[property] : (content || []);
return Object.assign(attributes || {}, children ? {[property]: children} : {});
}
withShorthands(
attributes: object,
shorthandsMapping: {[attr: string]: string},
merge: ((value1: any, value2: string) => any)
): object {
const shorthandsKeys = Object.keys(shorthandsMapping);
const shorthands = shorthandsKeys.filter(value => value in attributes);
if (!shorthands.length) {
return attributes;
}
const attrCopy: any = omit(attributes, shorthandsKeys);
shorthands.forEach(shorthand => {
const prop = shorthandsMapping[shorthand];
if (prop in attrCopy) {
attrCopy[prop] = merge(attrCopy[prop], shorthand);
} else {
attrCopy[prop] = shorthand;
}
});
return attrCopy;
}
makeFactories(dic: {[key: string]: ElementFn}) {
const result: {[key: string]: ElementFn} = {};
Object.keys(dic).forEach(key => {
result[key] = this.makeFactory(dic[key]) as ElementFn;
});
return result;
}
makeFactory(constructor: ElementFn): ElementFn {
if (arguments.length !== 1) {
throw new Error(`Expected exactly one argument, got ${arguments.length}`);
}
checkType(constructor, Function, 'first parameter');
if (!constructor.prototype || !constructor.prototype[JSX.jsxFactory]) {
throw new Error(`Function ${constructor.name} is not a valid constructor`);
}
if (constructor[symbols.originalComponent]) {
return this.makeFactory(constructor[symbols.originalComponent] as ElementFn);
}
return createFactoryProxy(this, constructor);
}
}
function createFactoryProxy(processor: JsxProcessor, constructor: ElementFn): ElementFn {
const handler: ProxyHandler<ElementFn> = {
apply(target, _thisArg, args) {
const [attributes, functionalComponent] = args;
if (args.length > 1) {
if (!(functionalComponent instanceof Function)) {
throw new TypeError('Second parameter must be a function');
}
if (functionalComponent.prototype && functionalComponent.prototype[JSX.jsxFactory]) {
throw new TypeError('Second parameter must be a factory');
}
}
const result = processor.createElement(proxy, attributes);
if (args.length > 1 && result instanceof Object) {
functionalComponent[JSX.jsxType] = true;
result[JSX.jsxType] = functionalComponent;
}
return result;
},
get(target, property, receiver) {
if (receiver === proxy) {
if (property === symbols.originalComponent) {
return constructor;
}
if (property === symbols.proxyHandler) {
return handler;
}
}
return Reflect.get(target, property, receiver);
}
};
/** @type {Factory} */
const proxy = new Proxy(constructor, handler);
return proxy;
}
/**
* Converts any value to a flat array.
*/
export function flattenChildren(children: unknown) {
if (children instanceof Array) {
let result: any[] = [];
for (const child of children) {
if (child && child.toArray) {
result = result.concat(flattenChildren(child.toArray()));
} else if (child instanceof Array) {
result = result.concat(flattenChildren(child));
} else {
result.push(child);
}
}
return result;
}
return [children];
}
export function joinTextContent(textArray: string[], markupEnabled: boolean) {
if (!textArray) {
return null;
}
if (markupEnabled) {
return textArray
.map(str => str + '')
.join('')
.replace(/\s+/g, ' ')
.replace(/\s*<br\s*\/>\s*/g, '<br/>');
}
return textArray.join('');
}
export const JSX = {
processor: null as JsxProcessor | null,
jsxFactory: symbols.jsxFactory,
jsxType: symbols.jsxType,
install(jsxProcessor: JsxProcessor) {
this.processor = jsxProcessor;
},
createElement() {
return this.processor!.createElement.apply(this.processor, arguments as any);
}
};
function parseChildren(rawChildren: any[], type: ElementFn | string) {
const children = rawChildren?.filter(value => !isAttributesObject(value));
const attributes = rawChildren
?.filter(isAttributesObject)
.reduce((prev, current) => joinAttributes(prev, current, type), {});
return {
finalChildren: children.length ? children : null,
additionalAttributes: omit(attributes, [symbols.jsxType, symbols.setterTargetType])
};
}
function isAttributesObject(value: any): value is {[symbols.setterTargetType]: Function} {
return value instanceof Object && value[symbols.setterTargetType] instanceof Function;
}
function joinAttributes(target: any, source: any, actualTargetType: ElementFn | string): any {
const expectedTargetType = source[symbols.setterTargetType];
const actualTargetTypeFn = typeof actualTargetType === 'string' ? String : actualTargetType;
if (expectedTargetType
&& actualTargetTypeFn.prototype[JSX.jsxFactory] // for SFC we can't know the future instance type
&& (actualTargetTypeFn.prototype !== expectedTargetType.prototype)
&& !(actualTargetTypeFn.prototype instanceof expectedTargetType)
) {
const firstKey = Object.keys(source)[0];
const typeName = actualTargetType instanceof Function
? actualTargetType.name
: actualTargetType;
throw new TypeError(
`Attribute "${firstKey}" is targeting ${expectedTargetType.name}, but is set on ${typeName}`
);
}
Object.keys(source).forEach(key => {
if (key in target) {
if (Array.isArray(target[key]) && Array.isArray(source[key])) {
target[key] = target[key].concat(source[key]);
} else {
throw new Error(`Attribute "${key}" is set multiple times`);
}
} else {
target[key] = source[key];
}
});
return target;
}
| {
return this.createCustomComponent(Type, finalAttributes);
} | conditional_block |
JsxProcessor.ts | import {omit} from './util';
import {getCurrentLine} from './util-stacktrace';
import {attributesWithoutListener, registerListenerAttributes} from './Listeners';
import {toValueString} from './Console';
import Color from './Color';
import Font from './Font';
import * as symbols from './symbols';
import checkType from './checkType';
type AttrConverters = {[attr: string]: (value: any) => any};
interface ElementFn {
new(...args: any[]): any;
(...args: any[]): any;
[symbols.originalComponent]?: ElementFn;
[symbols.jsxType]?: boolean;
}
const COMMON_ATTR: AttrConverters = Object.freeze({
textColor: (value: any) => Color.from(value).toString(),
font: (value: any) => Font.from(value).toString(),
children: (value: any) => {
if (!(value instanceof Array)) {
throw new Error('Not an array: ' + toValueString(value));
}
return value;
}
});
const MARKUP: {[el: string]: AttrConverters} = Object.freeze({
br: {},
b: COMMON_ATTR,
span: COMMON_ATTR,
big: COMMON_ATTR,
i: COMMON_ATTR,
small: COMMON_ATTR,
strong: COMMON_ATTR,
ins: COMMON_ATTR,
del: COMMON_ATTR,
a: Object.assign({
href: (value: any) => {
if (typeof value !== 'string') {
throw new Error('Not a string: ' + toValueString(value));
}
return value;
}
}, COMMON_ATTR)
});
export function createJsxProcessor() {
return new JsxProcessor();
}
export default class JsxProcessor {
createElement(Type: ElementFn | string, attributes?: any, ...children: any[]) {
if (!(Type instanceof Function) && typeof Type !== 'string') {
throw new Error(`JSX: Unsupported type ${toValueString(Type)}`);
}
const typeName = Type instanceof Function ? Type.name : Type;
if (attributes?.children && children && children.length) {
throw new Error(`JSX: Children for type ${typeName} given twice.`);
}
// Children may be part of attributes or given as varargs or both.
// For JSX factories/functional components they should always be part of attributes
const rawChildren = children.length ? children : attributes?.children || [];
const {finalChildren, additionalAttributes} = parseChildren(rawChildren, Type);
const finalAttributes = {...attributes};
joinAttributes(finalAttributes, additionalAttributes, Type);
if (finalChildren) {
finalAttributes.children = finalChildren;
}
if (typeof Type === 'string') {
return this.createIntrinsicElement(Type, finalAttributes);
} else if (Type.prototype && Type.prototype[JSX.jsxFactory]) {
return this.createCustomComponent(Type, finalAttributes);
} else {
return this.createFunctionalComponent(Type, finalAttributes);
}
}
| (Type: ElementFn, attributes: any) {
return Type.prototype[JSX.jsxFactory].call(this, Type, attributes);
}
createFunctionalComponent(Type: ElementFn, attributes: any) {
try {
const result = Type.call(this, attributes);
Type[symbols.jsxType] = true;
if (result instanceof Object) {
result[symbols.jsxType] = Type;
}
return result;
} catch (ex) {
throw new Error(`JSX: "${ex.message}" ${getCurrentLine(ex)}`);
}
}
createIntrinsicElement(el: string, attributes: any) {
if (el in MARKUP) {
const encoded: any = {};
Object.keys(attributes || {}).forEach(attribute => {
const encoder = MARKUP[el][attribute];
if (!encoder) {
if (attribute === 'children') {
throw new Error(`Element "${el}" can not have children`);
} else {
throw new Error(`Element "${el}" does not support attribute "${attribute}"`);
}
}
try {
encoded[attribute] = encoder(attributes[attribute]);
} catch(ex) {
throw new Error(`Element "${el}" attribute "${attribute}" can not bet set: ${ex.message}`);
}
});
const text = joinTextContent(encoded.children, true);
const tagOpen = [el].concat(Object.keys(encoded || {}).filter(attr => attr !== 'children').map(
attribute => `${attribute}='${encoded[attribute]}'`
)).join(' ');
if (text) {
return `<${tagOpen}>${text}</${el}>`;
}
return `<${tagOpen}/>`;
}
throw new Error(`JSX: Unsupported type ${el}`);
}
createNativeObject(Type: any, attributes: any) {
if (attributes && 'children' in attributes) {
throw new Error(`JSX: ${Type.name} can not have children`);
}
const {data, ...properties} = attributesWithoutListener(attributes || {});
const result = new Type(properties);
registerListenerAttributes(result, attributes);
if (data) {
result.data = data;
}
return result;
}
getChildren(attributes: any) {
if (!attributes || !('children' in attributes)) {
return null;
}
return flattenChildren(attributes.children);
}
withoutChildren(attributes: any) {
return omit(attributes, ['children']);
}
withContentText(attributes: any, content: any[], property: string, markupEnabled: boolean) {
if (attributes && attributes[property] && content && content.length) {
throw new Error(`JSX: ${property} given twice`);
}
const text = attributes && attributes[property]
? attributes[property].toString()
: joinTextContent(content || [], markupEnabled);
return Object.assign(attributes || {}, text ? {[property]: text} : {});
}
withContentChildren(attributes: any, content: any[], property: string) {
if (attributes && attributes[property] && content && content.length) {
throw new Error(`JSX: ${property} given twice`);
}
const children = attributes && attributes[property] ? attributes[property] : (content || []);
return Object.assign(attributes || {}, children ? {[property]: children} : {});
}
withShorthands(
attributes: object,
shorthandsMapping: {[attr: string]: string},
merge: ((value1: any, value2: string) => any)
): object {
const shorthandsKeys = Object.keys(shorthandsMapping);
const shorthands = shorthandsKeys.filter(value => value in attributes);
if (!shorthands.length) {
return attributes;
}
const attrCopy: any = omit(attributes, shorthandsKeys);
shorthands.forEach(shorthand => {
const prop = shorthandsMapping[shorthand];
if (prop in attrCopy) {
attrCopy[prop] = merge(attrCopy[prop], shorthand);
} else {
attrCopy[prop] = shorthand;
}
});
return attrCopy;
}
makeFactories(dic: {[key: string]: ElementFn}) {
const result: {[key: string]: ElementFn} = {};
Object.keys(dic).forEach(key => {
result[key] = this.makeFactory(dic[key]) as ElementFn;
});
return result;
}
makeFactory(constructor: ElementFn): ElementFn {
if (arguments.length !== 1) {
throw new Error(`Expected exactly one argument, got ${arguments.length}`);
}
checkType(constructor, Function, 'first parameter');
if (!constructor.prototype || !constructor.prototype[JSX.jsxFactory]) {
throw new Error(`Function ${constructor.name} is not a valid constructor`);
}
if (constructor[symbols.originalComponent]) {
return this.makeFactory(constructor[symbols.originalComponent] as ElementFn);
}
return createFactoryProxy(this, constructor);
}
}
function createFactoryProxy(processor: JsxProcessor, constructor: ElementFn): ElementFn {
const handler: ProxyHandler<ElementFn> = {
apply(target, _thisArg, args) {
const [attributes, functionalComponent] = args;
if (args.length > 1) {
if (!(functionalComponent instanceof Function)) {
throw new TypeError('Second parameter must be a function');
}
if (functionalComponent.prototype && functionalComponent.prototype[JSX.jsxFactory]) {
throw new TypeError('Second parameter must be a factory');
}
}
const result = processor.createElement(proxy, attributes);
if (args.length > 1 && result instanceof Object) {
functionalComponent[JSX.jsxType] = true;
result[JSX.jsxType] = functionalComponent;
}
return result;
},
get(target, property, receiver) {
if (receiver === proxy) {
if (property === symbols.originalComponent) {
return constructor;
}
if (property === symbols.proxyHandler) {
return handler;
}
}
return Reflect.get(target, property, receiver);
}
};
/** @type {Factory} */
const proxy = new Proxy(constructor, handler);
return proxy;
}
/**
* Converts any value to a flat array.
*/
export function flattenChildren(children: unknown) {
if (children instanceof Array) {
let result: any[] = [];
for (const child of children) {
if (child && child.toArray) {
result = result.concat(flattenChildren(child.toArray()));
} else if (child instanceof Array) {
result = result.concat(flattenChildren(child));
} else {
result.push(child);
}
}
return result;
}
return [children];
}
export function joinTextContent(textArray: string[], markupEnabled: boolean) {
if (!textArray) {
return null;
}
if (markupEnabled) {
return textArray
.map(str => str + '')
.join('')
.replace(/\s+/g, ' ')
.replace(/\s*<br\s*\/>\s*/g, '<br/>');
}
return textArray.join('');
}
export const JSX = {
processor: null as JsxProcessor | null,
jsxFactory: symbols.jsxFactory,
jsxType: symbols.jsxType,
install(jsxProcessor: JsxProcessor) {
this.processor = jsxProcessor;
},
createElement() {
return this.processor!.createElement.apply(this.processor, arguments as any);
}
};
function parseChildren(rawChildren: any[], type: ElementFn | string) {
const children = rawChildren?.filter(value => !isAttributesObject(value));
const attributes = rawChildren
?.filter(isAttributesObject)
.reduce((prev, current) => joinAttributes(prev, current, type), {});
return {
finalChildren: children.length ? children : null,
additionalAttributes: omit(attributes, [symbols.jsxType, symbols.setterTargetType])
};
}
function isAttributesObject(value: any): value is {[symbols.setterTargetType]: Function} {
return value instanceof Object && value[symbols.setterTargetType] instanceof Function;
}
function joinAttributes(target: any, source: any, actualTargetType: ElementFn | string): any {
const expectedTargetType = source[symbols.setterTargetType];
const actualTargetTypeFn = typeof actualTargetType === 'string' ? String : actualTargetType;
if (expectedTargetType
&& actualTargetTypeFn.prototype[JSX.jsxFactory] // for SFC we can't know the future instance type
&& (actualTargetTypeFn.prototype !== expectedTargetType.prototype)
&& !(actualTargetTypeFn.prototype instanceof expectedTargetType)
) {
const firstKey = Object.keys(source)[0];
const typeName = actualTargetType instanceof Function
? actualTargetType.name
: actualTargetType;
throw new TypeError(
`Attribute "${firstKey}" is targeting ${expectedTargetType.name}, but is set on ${typeName}`
);
}
Object.keys(source).forEach(key => {
if (key in target) {
if (Array.isArray(target[key]) && Array.isArray(source[key])) {
target[key] = target[key].concat(source[key]);
} else {
throw new Error(`Attribute "${key}" is set multiple times`);
}
} else {
target[key] = source[key];
}
});
return target;
}
| createCustomComponent | identifier_name |
java.io.PipedInputStream.d.ts | declare namespace java {
namespace io {
class PipedInputStream extends java.io.InputStream {
closedByWriter: boolean
closedByReader: boolean
connected: boolean
readSide: java.lang.Thread
writeSide: java.lang.Thread
protected static readonly PIPE_SIZE: int
protected buffer: byte[]
protected in: int
protected out: int
static readonly $assertionsDisabled: boolean
public constructor(arg0: java.io.PipedOutputStream)
public constructor(arg0: java.io.PipedOutputStream, arg1: number | java.lang.Integer)
public constructor()
public constructor(arg0: number | java.lang.Integer)
public connect(arg0: java.io.PipedOutputStream): void
protected receive(arg0: number | java.lang.Integer): void
receive(arg0: number[] | java.lang.Byte[], arg1: number | java.lang.Integer, arg2: number | java.lang.Integer): void
receivedLast(): void
public read(): number
public read(arg0: number[] | java.lang.Byte[], arg1: number | java.lang.Integer, arg2: number | java.lang.Integer): number
public available(): number | }
}
} | public close(): void | random_line_split |
java.io.PipedInputStream.d.ts | declare namespace java {
namespace io {
class | extends java.io.InputStream {
closedByWriter: boolean
closedByReader: boolean
connected: boolean
readSide: java.lang.Thread
writeSide: java.lang.Thread
protected static readonly PIPE_SIZE: int
protected buffer: byte[]
protected in: int
protected out: int
static readonly $assertionsDisabled: boolean
public constructor(arg0: java.io.PipedOutputStream)
public constructor(arg0: java.io.PipedOutputStream, arg1: number | java.lang.Integer)
public constructor()
public constructor(arg0: number | java.lang.Integer)
public connect(arg0: java.io.PipedOutputStream): void
protected receive(arg0: number | java.lang.Integer): void
receive(arg0: number[] | java.lang.Byte[], arg1: number | java.lang.Integer, arg2: number | java.lang.Integer): void
receivedLast(): void
public read(): number
public read(arg0: number[] | java.lang.Byte[], arg1: number | java.lang.Integer, arg2: number | java.lang.Integer): number
public available(): number
public close(): void
}
}
}
| PipedInputStream | identifier_name |
controllers.py | from application import CONFIG, app
from .models import *
from flask import current_app, session
from flask.ext.login import login_user, logout_user, current_user
from flask.ext.principal import Principal, Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed
import bcrypt
import re
import sendgrid
import time
from itsdangerous import URLSafeTimedSerializer
AuthenticationError = Exception("AuthenticationError", "Invalid credentials.")
UserExistsError = Exception("UserExistsError", "Email already exists in database.")
UserDoesNotExistError = Exception("UserDoesNotExistError", "Account with given email does not exist.")
login_manager = LoginManager()
login_manager.init_app(app)
principals = Principal(app)
sg = sendgrid.SendGridClient(CONFIG["SENDGRID_API_KEY"])
ts = URLSafeTimedSerializer(CONFIG["SECRET_KEY"])
@login_manager.user_loader
def load_user(user_id):
user_entries = StaffUserEntry.objects(id = user_id)
if user_entries.count() != 1:
return None
currUser = user_entries[0]
user = User(currUser.id, currUser.email, currUser.firstname, currUser.lastname, currUser.roles)
return user
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
identity.user = current_user
if hasattr(current_user, 'roles'):
for role in current_user.roles:
identity.provides.add(RoleNeed(role))
def get_user(email):
entries = StaffUserEntry.objects(email = email)
if entries.count() == 1:
return entries[0]
return None
def verify_user(email, password):
currUser = get_user(email)
if currUser is None:
return None
hashed = currUser.hashed
if bcrypt.hashpw(password.encode("utf-8"), hashed.encode("utf-8")) == hashed.encode("utf-8"):
return load_user(currUser.id)
else:
return None
def login(email):
user = load_user(get_user(email).id)
if user != None:
login_user(user)
identity_changed.send(current_app._get_current_object(), identity = Identity(user.uid))
else:
raise UserDoesNotExistError
def logout():
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identity_changed.send(current_app._get_current_object(), identity = AnonymousIdentity())
def tokenize_email(email):
return ts.dumps(email, salt = CONFIG["EMAIL_TOKENIZER_SALT"])
def detokenize_email(token):
return ts.loads(token, salt = CONFIG["EMAIL_TOKENIZER_SALT"], max_age = 86400)
def send_recovery_email(email):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
token = tokenize_email(email)
message = sendgrid.Mail()
message.add_to(email)
message.set_from("[email protected]")
message.set_subject("hackBCA III - Account Recovery")
message.set_html("<p></p>")
message.add_filter("templates", "enable", "1")
message.add_filter("templates", "template_id", CONFIG["SENDGRID_ACCOUNT_RECOVERY_TEMPLATE"])
message.add_substitution("prefix", "staff")
message.add_substitution("token", token)
status, msg = sg.send(message)
def change_name(email, firstname, lastname):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
account.firstname = firstname | account.lastname = lastname
account.save()
login(email) #To update navbar
def change_password(email, password):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
hashed = str(bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()))[2:-1]
account.hashed = hashed
account.save()
def get_user_attr(email, attr):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
return getattr(user, attr)
def set_user_attr(email, attr, value):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
setattr(user, attr, value)
user.save() | random_line_split |
|
controllers.py | from application import CONFIG, app
from .models import *
from flask import current_app, session
from flask.ext.login import login_user, logout_user, current_user
from flask.ext.principal import Principal, Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed
import bcrypt
import re
import sendgrid
import time
from itsdangerous import URLSafeTimedSerializer
AuthenticationError = Exception("AuthenticationError", "Invalid credentials.")
UserExistsError = Exception("UserExistsError", "Email already exists in database.")
UserDoesNotExistError = Exception("UserDoesNotExistError", "Account with given email does not exist.")
login_manager = LoginManager()
login_manager.init_app(app)
principals = Principal(app)
sg = sendgrid.SendGridClient(CONFIG["SENDGRID_API_KEY"])
ts = URLSafeTimedSerializer(CONFIG["SECRET_KEY"])
@login_manager.user_loader
def load_user(user_id):
user_entries = StaffUserEntry.objects(id = user_id)
if user_entries.count() != 1:
return None
currUser = user_entries[0]
user = User(currUser.id, currUser.email, currUser.firstname, currUser.lastname, currUser.roles)
return user
@identity_loaded.connect_via(app)
def | (sender, identity):
identity.user = current_user
if hasattr(current_user, 'roles'):
for role in current_user.roles:
identity.provides.add(RoleNeed(role))
def get_user(email):
entries = StaffUserEntry.objects(email = email)
if entries.count() == 1:
return entries[0]
return None
def verify_user(email, password):
currUser = get_user(email)
if currUser is None:
return None
hashed = currUser.hashed
if bcrypt.hashpw(password.encode("utf-8"), hashed.encode("utf-8")) == hashed.encode("utf-8"):
return load_user(currUser.id)
else:
return None
def login(email):
user = load_user(get_user(email).id)
if user != None:
login_user(user)
identity_changed.send(current_app._get_current_object(), identity = Identity(user.uid))
else:
raise UserDoesNotExistError
def logout():
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identity_changed.send(current_app._get_current_object(), identity = AnonymousIdentity())
def tokenize_email(email):
return ts.dumps(email, salt = CONFIG["EMAIL_TOKENIZER_SALT"])
def detokenize_email(token):
return ts.loads(token, salt = CONFIG["EMAIL_TOKENIZER_SALT"], max_age = 86400)
def send_recovery_email(email):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
token = tokenize_email(email)
message = sendgrid.Mail()
message.add_to(email)
message.set_from("[email protected]")
message.set_subject("hackBCA III - Account Recovery")
message.set_html("<p></p>")
message.add_filter("templates", "enable", "1")
message.add_filter("templates", "template_id", CONFIG["SENDGRID_ACCOUNT_RECOVERY_TEMPLATE"])
message.add_substitution("prefix", "staff")
message.add_substitution("token", token)
status, msg = sg.send(message)
def change_name(email, firstname, lastname):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
account.firstname = firstname
account.lastname = lastname
account.save()
login(email) #To update navbar
def change_password(email, password):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
hashed = str(bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()))[2:-1]
account.hashed = hashed
account.save()
def get_user_attr(email, attr):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
return getattr(user, attr)
def set_user_attr(email, attr, value):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
setattr(user, attr, value)
user.save()
| on_identity_loaded | identifier_name |
controllers.py | from application import CONFIG, app
from .models import *
from flask import current_app, session
from flask.ext.login import login_user, logout_user, current_user
from flask.ext.principal import Principal, Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed
import bcrypt
import re
import sendgrid
import time
from itsdangerous import URLSafeTimedSerializer
AuthenticationError = Exception("AuthenticationError", "Invalid credentials.")
UserExistsError = Exception("UserExistsError", "Email already exists in database.")
UserDoesNotExistError = Exception("UserDoesNotExistError", "Account with given email does not exist.")
login_manager = LoginManager()
login_manager.init_app(app)
principals = Principal(app)
sg = sendgrid.SendGridClient(CONFIG["SENDGRID_API_KEY"])
ts = URLSafeTimedSerializer(CONFIG["SECRET_KEY"])
@login_manager.user_loader
def load_user(user_id):
user_entries = StaffUserEntry.objects(id = user_id)
if user_entries.count() != 1:
return None
currUser = user_entries[0]
user = User(currUser.id, currUser.email, currUser.firstname, currUser.lastname, currUser.roles)
return user
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
identity.user = current_user
if hasattr(current_user, 'roles'):
for role in current_user.roles:
identity.provides.add(RoleNeed(role))
def get_user(email):
entries = StaffUserEntry.objects(email = email)
if entries.count() == 1:
return entries[0]
return None
def verify_user(email, password):
currUser = get_user(email)
if currUser is None:
return None
hashed = currUser.hashed
if bcrypt.hashpw(password.encode("utf-8"), hashed.encode("utf-8")) == hashed.encode("utf-8"):
return load_user(currUser.id)
else:
return None
def login(email):
user = load_user(get_user(email).id)
if user != None:
|
else:
raise UserDoesNotExistError
def logout():
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identity_changed.send(current_app._get_current_object(), identity = AnonymousIdentity())
def tokenize_email(email):
return ts.dumps(email, salt = CONFIG["EMAIL_TOKENIZER_SALT"])
def detokenize_email(token):
return ts.loads(token, salt = CONFIG["EMAIL_TOKENIZER_SALT"], max_age = 86400)
def send_recovery_email(email):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
token = tokenize_email(email)
message = sendgrid.Mail()
message.add_to(email)
message.set_from("[email protected]")
message.set_subject("hackBCA III - Account Recovery")
message.set_html("<p></p>")
message.add_filter("templates", "enable", "1")
message.add_filter("templates", "template_id", CONFIG["SENDGRID_ACCOUNT_RECOVERY_TEMPLATE"])
message.add_substitution("prefix", "staff")
message.add_substitution("token", token)
status, msg = sg.send(message)
def change_name(email, firstname, lastname):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
account.firstname = firstname
account.lastname = lastname
account.save()
login(email) #To update navbar
def change_password(email, password):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
hashed = str(bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()))[2:-1]
account.hashed = hashed
account.save()
def get_user_attr(email, attr):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
return getattr(user, attr)
def set_user_attr(email, attr, value):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
setattr(user, attr, value)
user.save()
| login_user(user)
identity_changed.send(current_app._get_current_object(), identity = Identity(user.uid)) | conditional_block |
controllers.py | from application import CONFIG, app
from .models import *
from flask import current_app, session
from flask.ext.login import login_user, logout_user, current_user
from flask.ext.principal import Principal, Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed
import bcrypt
import re
import sendgrid
import time
from itsdangerous import URLSafeTimedSerializer
AuthenticationError = Exception("AuthenticationError", "Invalid credentials.")
UserExistsError = Exception("UserExistsError", "Email already exists in database.")
UserDoesNotExistError = Exception("UserDoesNotExistError", "Account with given email does not exist.")
login_manager = LoginManager()
login_manager.init_app(app)
principals = Principal(app)
sg = sendgrid.SendGridClient(CONFIG["SENDGRID_API_KEY"])
ts = URLSafeTimedSerializer(CONFIG["SECRET_KEY"])
@login_manager.user_loader
def load_user(user_id):
user_entries = StaffUserEntry.objects(id = user_id)
if user_entries.count() != 1:
return None
currUser = user_entries[0]
user = User(currUser.id, currUser.email, currUser.firstname, currUser.lastname, currUser.roles)
return user
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
identity.user = current_user
if hasattr(current_user, 'roles'):
for role in current_user.roles:
identity.provides.add(RoleNeed(role))
def get_user(email):
entries = StaffUserEntry.objects(email = email)
if entries.count() == 1:
return entries[0]
return None
def verify_user(email, password):
|
def login(email):
user = load_user(get_user(email).id)
if user != None:
login_user(user)
identity_changed.send(current_app._get_current_object(), identity = Identity(user.uid))
else:
raise UserDoesNotExistError
def logout():
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identity_changed.send(current_app._get_current_object(), identity = AnonymousIdentity())
def tokenize_email(email):
return ts.dumps(email, salt = CONFIG["EMAIL_TOKENIZER_SALT"])
def detokenize_email(token):
return ts.loads(token, salt = CONFIG["EMAIL_TOKENIZER_SALT"], max_age = 86400)
def send_recovery_email(email):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
token = tokenize_email(email)
message = sendgrid.Mail()
message.add_to(email)
message.set_from("[email protected]")
message.set_subject("hackBCA III - Account Recovery")
message.set_html("<p></p>")
message.add_filter("templates", "enable", "1")
message.add_filter("templates", "template_id", CONFIG["SENDGRID_ACCOUNT_RECOVERY_TEMPLATE"])
message.add_substitution("prefix", "staff")
message.add_substitution("token", token)
status, msg = sg.send(message)
def change_name(email, firstname, lastname):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
account.firstname = firstname
account.lastname = lastname
account.save()
login(email) #To update navbar
def change_password(email, password):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
hashed = str(bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()))[2:-1]
account.hashed = hashed
account.save()
def get_user_attr(email, attr):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
return getattr(user, attr)
def set_user_attr(email, attr, value):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
setattr(user, attr, value)
user.save()
| currUser = get_user(email)
if currUser is None:
return None
hashed = currUser.hashed
if bcrypt.hashpw(password.encode("utf-8"), hashed.encode("utf-8")) == hashed.encode("utf-8"):
return load_user(currUser.id)
else:
return None | identifier_body |
router.js | import Ember from 'ember';
import config from './config/environment';
var Router = Ember.Router.extend({
location: config.locationType
});
Router.map(function() {
// Auth
this.route('activation', { path: 'activation/' }, function() {
this.route('activate', { path: ':user_id/:token/' }); | });
// Options
this.route('options', { path: 'options/' }, function() {
this.route('forum', { path: 'forum-options/' });
this.route('signature', { path: 'edit-signature/' });
this.route('username', { path: 'change-username/' });
this.route('password', { path: 'change-password/' }, function() {
this.route('confirm', { path: ':token/' });
});
this.route('email', { path: 'change-email/' }, function() {
this.route('confirm', { path: ':token/' });
});
});
// Legal
this.route('terms-of-service', { path: 'terms-of-service/' });
this.route('privacy-policy', { path: 'privacy-policy/' });
// Error
this.route('error-0', { path: 'error-0/' });
this.route('error-403', { path: 'error-403/:reason/' });
this.route('error-404', { path: 'error-404/' });
this.route('error-banned', { path: 'banned/:reason/' });
this.route('not-found', { path: '*path' });
});
export default Router; | });
this.route('forgotten-password', { path: 'forgotten-password/' }, function() {
this.route('change-form', { path: ':user_id/:token/' }); | random_line_split |
validate-lab.ts | import isEmpty from 'lodash/isEmpty'
import Lab from '../../shared/model/Lab'
export class LabError extends Error {
message: string
result?: string
patient?: string
type?: string
constructor(message: string, result: string, patient: string, type: string) {
super(message)
this.message = message
this.result = result
this.patient = patient
this.type = type
}
} | const labError = {} as LabError
if (!lab.patient) {
labError.patient = 'labs.requests.error.patientRequired'
}
if (!lab.type) {
labError.type = 'labs.requests.error.typeRequired'
}
if (!isEmpty(labError)) {
labError.message = 'labs.requests.error.unableToRequest'
}
return labError
}
export function validateLabComplete(lab: Partial<Lab>): LabError {
const labError = {} as LabError
if (!lab.result) {
labError.result = 'labs.requests.error.resultRequiredToComplete'
labError.message = 'labs.requests.error.unableToComplete'
}
return labError
} |
export function validateLabRequest(lab: Partial<Lab>): LabError { | random_line_split |
validate-lab.ts | import isEmpty from 'lodash/isEmpty'
import Lab from '../../shared/model/Lab'
export class LabError extends Error {
message: string
result?: string
patient?: string
type?: string
| (message: string, result: string, patient: string, type: string) {
super(message)
this.message = message
this.result = result
this.patient = patient
this.type = type
}
}
export function validateLabRequest(lab: Partial<Lab>): LabError {
const labError = {} as LabError
if (!lab.patient) {
labError.patient = 'labs.requests.error.patientRequired'
}
if (!lab.type) {
labError.type = 'labs.requests.error.typeRequired'
}
if (!isEmpty(labError)) {
labError.message = 'labs.requests.error.unableToRequest'
}
return labError
}
export function validateLabComplete(lab: Partial<Lab>): LabError {
const labError = {} as LabError
if (!lab.result) {
labError.result = 'labs.requests.error.resultRequiredToComplete'
labError.message = 'labs.requests.error.unableToComplete'
}
return labError
}
| constructor | identifier_name |
validate-lab.ts | import isEmpty from 'lodash/isEmpty'
import Lab from '../../shared/model/Lab'
export class LabError extends Error {
message: string
result?: string
patient?: string
type?: string
constructor(message: string, result: string, patient: string, type: string) {
super(message)
this.message = message
this.result = result
this.patient = patient
this.type = type
}
}
export function validateLabRequest(lab: Partial<Lab>): LabError {
const labError = {} as LabError
if (!lab.patient) {
labError.patient = 'labs.requests.error.patientRequired'
}
if (!lab.type) {
labError.type = 'labs.requests.error.typeRequired'
}
if (!isEmpty(labError)) {
labError.message = 'labs.requests.error.unableToRequest'
}
return labError
}
export function validateLabComplete(lab: Partial<Lab>): LabError | {
const labError = {} as LabError
if (!lab.result) {
labError.result = 'labs.requests.error.resultRequiredToComplete'
labError.message = 'labs.requests.error.unableToComplete'
}
return labError
} | identifier_body |
|
validate-lab.ts | import isEmpty from 'lodash/isEmpty'
import Lab from '../../shared/model/Lab'
export class LabError extends Error {
message: string
result?: string
patient?: string
type?: string
constructor(message: string, result: string, patient: string, type: string) {
super(message)
this.message = message
this.result = result
this.patient = patient
this.type = type
}
}
export function validateLabRequest(lab: Partial<Lab>): LabError {
const labError = {} as LabError
if (!lab.patient) |
if (!lab.type) {
labError.type = 'labs.requests.error.typeRequired'
}
if (!isEmpty(labError)) {
labError.message = 'labs.requests.error.unableToRequest'
}
return labError
}
export function validateLabComplete(lab: Partial<Lab>): LabError {
const labError = {} as LabError
if (!lab.result) {
labError.result = 'labs.requests.error.resultRequiredToComplete'
labError.message = 'labs.requests.error.unableToComplete'
}
return labError
}
| {
labError.patient = 'labs.requests.error.patientRequired'
} | conditional_block |
test.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Code that generates a test runner to run all the tests in a crate
use driver::session;
use front::config;
use std::vec;
use syntax::ast_util::*;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
use syntax::codemap::{dummy_sp, Span, ExpnInfo, NameAndSpan, MacroAttribute};
use syntax::codemap;
use syntax::ext::base::ExtCtxt;
use syntax::fold::ast_fold;
use syntax::fold;
use syntax::opt_vec;
use syntax::print::pprust;
use syntax::{ast, ast_util};
use syntax::util::small_vector::SmallVector;
struct Test {
span: Span,
path: ~[ast::Ident],
bench: bool,
ignore: bool,
should_fail: bool
}
struct TestCtxt {
sess: session::Session,
path: ~[ast::Ident],
ext_cx: @ExtCtxt,
testfns: ~[Test],
is_extra: bool,
config: ast::CrateConfig,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: session::Session,
crate: ast::Crate) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(crate.config, "test");
if should_test {
generate_test_harness(sess, crate)
} else {
strip_test_functions(crate)
}
}
struct TestHarnessGenerator {
cx: @mut TestCtxt,
}
impl fold::ast_fold for TestHarnessGenerator {
fn fold_crate(&self, c: ast::Crate) -> ast::Crate {
let folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
ast::Crate {
module: add_test_module(self.cx, &folded.module),
.. folded
}
}
fn fold_item(&self, i: @ast::item) -> SmallVector<@ast::item> {
self.cx.path.push(i.ident);
debug!("current path: {}",
ast_util::path_name_i(self.cx.path.clone()));
if is_test_fn(self.cx, i) || is_bench_fn(i) {
match i.node {
ast::item_fn(_, purity, _, _, _)
if purity == ast::unsafe_fn => |
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(i),
ignore: is_ignored(self.cx, i),
should_fail: should_fail(i)
};
self.cx.testfns.push(test);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
let res = fold::noop_fold_item(i, self);
self.cx.path.pop();
res
}
fn fold_mod(&self, m: &ast::_mod) -> ast::_mod {
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
fn nomain(cx: @mut TestCtxt, item: @ast::item) -> @ast::item {
if !*cx.sess.building_library {
@ast::item {
attrs: item.attrs.iter().filter_map(|attr| {
if "main" != attr.name() {
Some(*attr)
} else {
None
}
}).collect(),
.. (*item).clone()
}
} else {
item
}
}
let mod_nomain = ast::_mod {
view_items: m.view_items.clone(),
items: m.items.iter().map(|i| nomain(self.cx, *i)).collect(),
};
fold::noop_fold_mod(&mod_nomain, self)
}
}
fn generate_test_harness(sess: session::Session, crate: ast::Crate)
-> ast::Crate {
let cx: @mut TestCtxt = @mut TestCtxt {
sess: sess,
ext_cx: ExtCtxt::new(sess.parse_sess, sess.opts.cfg.clone()),
path: ~[],
testfns: ~[],
is_extra: is_extra(&crate),
config: crate.config.clone(),
};
let ext_cx = cx.ext_cx;
ext_cx.bt_push(ExpnInfo {
call_site: dummy_sp(),
callee: NameAndSpan {
name: @"test",
format: MacroAttribute,
span: None
}
});
let fold = TestHarnessGenerator {
cx: cx
};
let res = fold.fold_crate(crate);
ext_cx.bt_pop();
return res;
}
fn strip_test_functions(crate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(crate, |attrs| {
!attr::contains_name(attrs, "test") &&
!attr::contains_name(attrs, "bench")
})
}
fn is_test_fn(cx: @mut TestCtxt, i: @ast::item) -> bool {
let has_test_attr = attr::contains_name(i.attrs, "test");
fn has_test_signature(i: @ast::item) -> bool {
match &i.node {
&ast::item_fn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
decl.inputs.is_empty()
&& no_output
&& !generics.is_parameterized()
}
_ => false
}
}
if has_test_attr && !has_test_signature(i) {
let sess = cx.sess;
sess.span_err(
i.span,
"functions used as tests must have signature fn() -> ()."
);
}
return has_test_attr && has_test_signature(i);
}
fn is_bench_fn(i: @ast::item) -> bool {
let has_bench_attr = attr::contains_name(i.attrs, "bench");
fn has_test_signature(i: @ast::item) -> bool {
match i.node {
ast::item_fn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(cx: @mut TestCtxt, i: @ast::item) -> bool {
i.attrs.iter().any(|attr| {
// check ignore(cfg(foo, bar))
"ignore" == attr.name() && match attr.meta_item_list() {
Some(ref cfgs) => attr::test_cfg(cx.config, cfgs.iter().map(|x| *x)),
None => true
}
})
}
fn should_fail(i: @ast::item) -> bool {
attr::contains_name(i.attrs, "should_fail")
}
fn add_test_module(cx: &TestCtxt, m: &ast::_mod) -> ast::_mod {
let testmod = mk_test_module(cx);
ast::_mod {
items: vec::append_one(m.items.clone(), testmod),
..(*m).clone()
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
#[!resolve_unexported]
extern mod extra (name = "extra", vers = "...");
fn main() {
#[main];
extra::test::test_main_static(::os::args(), tests)
}
static tests : &'static [extra::test::TestDescAndFn] = &[
... the list of tests in the crate ...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::view_item {
let id_extra = cx.sess.ident_of("extra");
let vi = if cx.is_extra {
ast::view_item_use(
~[@nospan(ast::view_path_simple(id_extra,
path_node(~[id_extra]),
ast::DUMMY_NODE_ID))])
} else {
let mi = attr::mk_name_value_item_str(@"vers", @"0.9-pre");
ast::view_item_extern_mod(id_extra, None, ~[mi], ast::DUMMY_NODE_ID)
};
ast::view_item {
node: vi,
attrs: ~[],
vis: ast::public,
span: dummy_sp()
}
}
fn mk_test_module(cx: &TestCtxt) -> @ast::item {
// Link to extra
let view_items = ~[mk_std(cx)];
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(cx.ext_cx,
pub fn main() {
#[main];
extra::test::test_main_static(::std::os::args(), TESTS);
}
)).unwrap();
let testmod = ast::_mod {
view_items: view_items,
items: ~[mainfn, tests],
};
let item_ = ast::item_mod(testmod);
// This attribute tells resolve to let us call unexported functions
let resolve_unexported_attr =
attr::mk_attr(attr::mk_word_item(@"!resolve_unexported"));
let item = ast::item {
ident: cx.sess.ident_of("__test"),
attrs: ~[resolve_unexported_attr],
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::public,
span: dummy_sp(),
};
debug!("Synthetic test module:\n{}\n",
pprust::item_to_str(&item, cx.sess.intr()));
return @item;
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: dummy_sp() }
}
fn path_node(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: false,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn path_node_global(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: true,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> @ast::item {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
(quote_item!(cx.ext_cx,
pub static TESTS : &'static [self::extra::test::TestDescAndFn] =
$test_descs
;
)).unwrap()
}
fn is_extra(crate: &ast::Crate) -> bool {
let items = attr::find_linkage_metas(crate.attrs);
match attr::last_meta_item_value_str_by_name(items, "name") {
Some(s) if "extra" == s => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> @ast::Expr {
debug!("building test vector from {} tests", cx.testfns.len());
let mut descs = ~[];
for test in cx.testfns.iter() {
descs.push(mk_test_desc_and_fn_rec(cx, test));
}
let inner_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(descs, ast::MutImmutable),
span: dummy_sp(),
};
@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVstore(inner_expr, ast::ExprVstoreSlice),
span: dummy_sp(),
}
}
fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
let span = test.span;
let path = test.path.clone();
debug!("encoding {}", ast_util::path_name_i(path));
let name_lit: ast::lit =
nospan(ast::lit_str(ast_util::path_name_i(path).to_managed(), ast::CookedStr));
let name_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprLit(@name_lit),
span: span
};
let fn_path = path_node_global(path);
let fn_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(fn_path),
span: span,
};
let t_expr = if test.bench {
quote_expr!(cx.ext_cx, self::extra::test::StaticBenchFn($fn_expr) )
} else {
quote_expr!(cx.ext_cx, self::extra::test::StaticTestFn($fn_expr) )
};
let ignore_expr = if test.ignore {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let fail_expr = if test.should_fail {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let e = quote_expr!(cx.ext_cx,
self::extra::test::TestDescAndFn {
desc: self::extra::test::TestDesc {
name: self::extra::test::StaticTestName($name_expr),
ignore: $ignore_expr,
should_fail: $fail_expr
},
testfn: $t_expr,
}
);
e
}
| {
let sess = self.cx.sess;
sess.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
} | conditional_block |
test.rs | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Code that generates a test runner to run all the tests in a crate
use driver::session;
use front::config;
use std::vec;
use syntax::ast_util::*;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
use syntax::codemap::{dummy_sp, Span, ExpnInfo, NameAndSpan, MacroAttribute};
use syntax::codemap;
use syntax::ext::base::ExtCtxt;
use syntax::fold::ast_fold;
use syntax::fold;
use syntax::opt_vec;
use syntax::print::pprust;
use syntax::{ast, ast_util};
use syntax::util::small_vector::SmallVector;
struct Test {
span: Span,
path: ~[ast::Ident],
bench: bool,
ignore: bool,
should_fail: bool
}
struct TestCtxt {
sess: session::Session,
path: ~[ast::Ident],
ext_cx: @ExtCtxt,
testfns: ~[Test],
is_extra: bool,
config: ast::CrateConfig,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: session::Session,
crate: ast::Crate) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(crate.config, "test");
if should_test {
generate_test_harness(sess, crate)
} else {
strip_test_functions(crate)
}
}
struct TestHarnessGenerator {
cx: @mut TestCtxt,
}
impl fold::ast_fold for TestHarnessGenerator {
fn fold_crate(&self, c: ast::Crate) -> ast::Crate {
let folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
ast::Crate {
module: add_test_module(self.cx, &folded.module),
.. folded
}
}
fn fold_item(&self, i: @ast::item) -> SmallVector<@ast::item> {
self.cx.path.push(i.ident);
debug!("current path: {}",
ast_util::path_name_i(self.cx.path.clone()));
if is_test_fn(self.cx, i) || is_bench_fn(i) {
match i.node {
ast::item_fn(_, purity, _, _, _)
if purity == ast::unsafe_fn => {
let sess = self.cx.sess;
sess.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(i),
ignore: is_ignored(self.cx, i),
should_fail: should_fail(i)
};
self.cx.testfns.push(test);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
let res = fold::noop_fold_item(i, self);
self.cx.path.pop();
res
}
fn fold_mod(&self, m: &ast::_mod) -> ast::_mod {
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
fn nomain(cx: @mut TestCtxt, item: @ast::item) -> @ast::item {
if !*cx.sess.building_library {
@ast::item {
attrs: item.attrs.iter().filter_map(|attr| {
if "main" != attr.name() {
Some(*attr)
} else {
None
}
}).collect(),
.. (*item).clone()
}
} else {
item
}
}
let mod_nomain = ast::_mod {
view_items: m.view_items.clone(),
items: m.items.iter().map(|i| nomain(self.cx, *i)).collect(),
};
fold::noop_fold_mod(&mod_nomain, self)
}
}
fn generate_test_harness(sess: session::Session, crate: ast::Crate)
-> ast::Crate {
let cx: @mut TestCtxt = @mut TestCtxt {
sess: sess,
ext_cx: ExtCtxt::new(sess.parse_sess, sess.opts.cfg.clone()),
path: ~[],
testfns: ~[],
is_extra: is_extra(&crate),
config: crate.config.clone(),
};
let ext_cx = cx.ext_cx;
ext_cx.bt_push(ExpnInfo {
call_site: dummy_sp(),
callee: NameAndSpan {
name: @"test",
format: MacroAttribute,
span: None
}
});
let fold = TestHarnessGenerator {
cx: cx
};
let res = fold.fold_crate(crate);
ext_cx.bt_pop();
return res;
}
fn strip_test_functions(crate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(crate, |attrs| {
!attr::contains_name(attrs, "test") &&
!attr::contains_name(attrs, "bench")
})
}
fn is_test_fn(cx: @mut TestCtxt, i: @ast::item) -> bool {
let has_test_attr = attr::contains_name(i.attrs, "test");
fn has_test_signature(i: @ast::item) -> bool {
match &i.node {
&ast::item_fn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
decl.inputs.is_empty()
&& no_output
&& !generics.is_parameterized()
}
_ => false
}
}
if has_test_attr && !has_test_signature(i) {
let sess = cx.sess;
sess.span_err(
i.span,
"functions used as tests must have signature fn() -> ()."
);
}
return has_test_attr && has_test_signature(i);
}
fn is_bench_fn(i: @ast::item) -> bool {
let has_bench_attr = attr::contains_name(i.attrs, "bench");
fn has_test_signature(i: @ast::item) -> bool {
match i.node {
ast::item_fn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(cx: @mut TestCtxt, i: @ast::item) -> bool {
i.attrs.iter().any(|attr| {
// check ignore(cfg(foo, bar))
"ignore" == attr.name() && match attr.meta_item_list() {
Some(ref cfgs) => attr::test_cfg(cx.config, cfgs.iter().map(|x| *x)),
None => true
}
})
}
fn should_fail(i: @ast::item) -> bool {
attr::contains_name(i.attrs, "should_fail")
}
fn add_test_module(cx: &TestCtxt, m: &ast::_mod) -> ast::_mod {
let testmod = mk_test_module(cx);
ast::_mod {
items: vec::append_one(m.items.clone(), testmod),
..(*m).clone()
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
#[!resolve_unexported]
extern mod extra (name = "extra", vers = "...");
fn main() {
#[main];
extra::test::test_main_static(::os::args(), tests)
}
static tests : &'static [extra::test::TestDescAndFn] = &[
... the list of tests in the crate ...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::view_item {
let id_extra = cx.sess.ident_of("extra");
let vi = if cx.is_extra {
ast::view_item_use(
~[@nospan(ast::view_path_simple(id_extra,
path_node(~[id_extra]),
ast::DUMMY_NODE_ID))])
} else {
let mi = attr::mk_name_value_item_str(@"vers", @"0.9-pre");
ast::view_item_extern_mod(id_extra, None, ~[mi], ast::DUMMY_NODE_ID)
};
ast::view_item {
node: vi,
attrs: ~[],
vis: ast::public,
span: dummy_sp()
}
}
fn mk_test_module(cx: &TestCtxt) -> @ast::item {
// Link to extra
let view_items = ~[mk_std(cx)];
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(cx.ext_cx,
pub fn main() {
#[main];
extra::test::test_main_static(::std::os::args(), TESTS);
}
)).unwrap();
let testmod = ast::_mod {
view_items: view_items,
items: ~[mainfn, tests],
};
let item_ = ast::item_mod(testmod);
// This attribute tells resolve to let us call unexported functions
let resolve_unexported_attr =
attr::mk_attr(attr::mk_word_item(@"!resolve_unexported"));
let item = ast::item {
ident: cx.sess.ident_of("__test"),
attrs: ~[resolve_unexported_attr],
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::public,
span: dummy_sp(),
};
debug!("Synthetic test module:\n{}\n",
pprust::item_to_str(&item, cx.sess.intr()));
return @item;
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: dummy_sp() }
}
fn path_node(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: false,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn path_node_global(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: true,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> @ast::item {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
(quote_item!(cx.ext_cx,
pub static TESTS : &'static [self::extra::test::TestDescAndFn] =
$test_descs
;
)).unwrap()
}
fn is_extra(crate: &ast::Crate) -> bool {
let items = attr::find_linkage_metas(crate.attrs);
match attr::last_meta_item_value_str_by_name(items, "name") {
Some(s) if "extra" == s => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> @ast::Expr {
debug!("building test vector from {} tests", cx.testfns.len());
let mut descs = ~[];
for test in cx.testfns.iter() {
descs.push(mk_test_desc_and_fn_rec(cx, test));
}
let inner_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(descs, ast::MutImmutable),
span: dummy_sp(),
};
@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVstore(inner_expr, ast::ExprVstoreSlice),
span: dummy_sp(),
}
}
fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
let span = test.span;
let path = test.path.clone();
debug!("encoding {}", ast_util::path_name_i(path));
let name_lit: ast::lit =
nospan(ast::lit_str(ast_util::path_name_i(path).to_managed(), ast::CookedStr));
let name_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprLit(@name_lit),
span: span
};
let fn_path = path_node_global(path);
let fn_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(fn_path),
span: span,
};
let t_expr = if test.bench {
quote_expr!(cx.ext_cx, self::extra::test::StaticBenchFn($fn_expr) )
} else {
quote_expr!(cx.ext_cx, self::extra::test::StaticTestFn($fn_expr) )
};
let ignore_expr = if test.ignore {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let fail_expr = if test.should_fail {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let e = quote_expr!(cx.ext_cx,
self::extra::test::TestDescAndFn {
desc: self::extra::test::TestDesc {
name: self::extra::test::StaticTestName($name_expr),
ignore: $ignore_expr,
should_fail: $fail_expr
},
testfn: $t_expr,
}
);
e
} | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | random_line_split |
|
test.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Code that generates a test runner to run all the tests in a crate
use driver::session;
use front::config;
use std::vec;
use syntax::ast_util::*;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
use syntax::codemap::{dummy_sp, Span, ExpnInfo, NameAndSpan, MacroAttribute};
use syntax::codemap;
use syntax::ext::base::ExtCtxt;
use syntax::fold::ast_fold;
use syntax::fold;
use syntax::opt_vec;
use syntax::print::pprust;
use syntax::{ast, ast_util};
use syntax::util::small_vector::SmallVector;
struct Test {
span: Span,
path: ~[ast::Ident],
bench: bool,
ignore: bool,
should_fail: bool
}
struct TestCtxt {
sess: session::Session,
path: ~[ast::Ident],
ext_cx: @ExtCtxt,
testfns: ~[Test],
is_extra: bool,
config: ast::CrateConfig,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: session::Session,
crate: ast::Crate) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(crate.config, "test");
if should_test {
generate_test_harness(sess, crate)
} else {
strip_test_functions(crate)
}
}
struct TestHarnessGenerator {
cx: @mut TestCtxt,
}
impl fold::ast_fold for TestHarnessGenerator {
fn fold_crate(&self, c: ast::Crate) -> ast::Crate {
let folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
ast::Crate {
module: add_test_module(self.cx, &folded.module),
.. folded
}
}
fn fold_item(&self, i: @ast::item) -> SmallVector<@ast::item> {
self.cx.path.push(i.ident);
debug!("current path: {}",
ast_util::path_name_i(self.cx.path.clone()));
if is_test_fn(self.cx, i) || is_bench_fn(i) {
match i.node {
ast::item_fn(_, purity, _, _, _)
if purity == ast::unsafe_fn => {
let sess = self.cx.sess;
sess.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(i),
ignore: is_ignored(self.cx, i),
should_fail: should_fail(i)
};
self.cx.testfns.push(test);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
let res = fold::noop_fold_item(i, self);
self.cx.path.pop();
res
}
fn fold_mod(&self, m: &ast::_mod) -> ast::_mod {
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
fn nomain(cx: @mut TestCtxt, item: @ast::item) -> @ast::item {
if !*cx.sess.building_library {
@ast::item {
attrs: item.attrs.iter().filter_map(|attr| {
if "main" != attr.name() {
Some(*attr)
} else {
None
}
}).collect(),
.. (*item).clone()
}
} else {
item
}
}
let mod_nomain = ast::_mod {
view_items: m.view_items.clone(),
items: m.items.iter().map(|i| nomain(self.cx, *i)).collect(),
};
fold::noop_fold_mod(&mod_nomain, self)
}
}
fn generate_test_harness(sess: session::Session, crate: ast::Crate)
-> ast::Crate {
let cx: @mut TestCtxt = @mut TestCtxt {
sess: sess,
ext_cx: ExtCtxt::new(sess.parse_sess, sess.opts.cfg.clone()),
path: ~[],
testfns: ~[],
is_extra: is_extra(&crate),
config: crate.config.clone(),
};
let ext_cx = cx.ext_cx;
ext_cx.bt_push(ExpnInfo {
call_site: dummy_sp(),
callee: NameAndSpan {
name: @"test",
format: MacroAttribute,
span: None
}
});
let fold = TestHarnessGenerator {
cx: cx
};
let res = fold.fold_crate(crate);
ext_cx.bt_pop();
return res;
}
fn strip_test_functions(crate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(crate, |attrs| {
!attr::contains_name(attrs, "test") &&
!attr::contains_name(attrs, "bench")
})
}
fn is_test_fn(cx: @mut TestCtxt, i: @ast::item) -> bool {
let has_test_attr = attr::contains_name(i.attrs, "test");
fn has_test_signature(i: @ast::item) -> bool {
match &i.node {
&ast::item_fn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
decl.inputs.is_empty()
&& no_output
&& !generics.is_parameterized()
}
_ => false
}
}
if has_test_attr && !has_test_signature(i) {
let sess = cx.sess;
sess.span_err(
i.span,
"functions used as tests must have signature fn() -> ()."
);
}
return has_test_attr && has_test_signature(i);
}
fn is_bench_fn(i: @ast::item) -> bool {
let has_bench_attr = attr::contains_name(i.attrs, "bench");
fn has_test_signature(i: @ast::item) -> bool |
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(cx: @mut TestCtxt, i: @ast::item) -> bool {
i.attrs.iter().any(|attr| {
// check ignore(cfg(foo, bar))
"ignore" == attr.name() && match attr.meta_item_list() {
Some(ref cfgs) => attr::test_cfg(cx.config, cfgs.iter().map(|x| *x)),
None => true
}
})
}
fn should_fail(i: @ast::item) -> bool {
attr::contains_name(i.attrs, "should_fail")
}
fn add_test_module(cx: &TestCtxt, m: &ast::_mod) -> ast::_mod {
let testmod = mk_test_module(cx);
ast::_mod {
items: vec::append_one(m.items.clone(), testmod),
..(*m).clone()
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
#[!resolve_unexported]
extern mod extra (name = "extra", vers = "...");
fn main() {
#[main];
extra::test::test_main_static(::os::args(), tests)
}
static tests : &'static [extra::test::TestDescAndFn] = &[
... the list of tests in the crate ...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::view_item {
let id_extra = cx.sess.ident_of("extra");
let vi = if cx.is_extra {
ast::view_item_use(
~[@nospan(ast::view_path_simple(id_extra,
path_node(~[id_extra]),
ast::DUMMY_NODE_ID))])
} else {
let mi = attr::mk_name_value_item_str(@"vers", @"0.9-pre");
ast::view_item_extern_mod(id_extra, None, ~[mi], ast::DUMMY_NODE_ID)
};
ast::view_item {
node: vi,
attrs: ~[],
vis: ast::public,
span: dummy_sp()
}
}
fn mk_test_module(cx: &TestCtxt) -> @ast::item {
// Link to extra
let view_items = ~[mk_std(cx)];
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(cx.ext_cx,
pub fn main() {
#[main];
extra::test::test_main_static(::std::os::args(), TESTS);
}
)).unwrap();
let testmod = ast::_mod {
view_items: view_items,
items: ~[mainfn, tests],
};
let item_ = ast::item_mod(testmod);
// This attribute tells resolve to let us call unexported functions
let resolve_unexported_attr =
attr::mk_attr(attr::mk_word_item(@"!resolve_unexported"));
let item = ast::item {
ident: cx.sess.ident_of("__test"),
attrs: ~[resolve_unexported_attr],
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::public,
span: dummy_sp(),
};
debug!("Synthetic test module:\n{}\n",
pprust::item_to_str(&item, cx.sess.intr()));
return @item;
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: dummy_sp() }
}
fn path_node(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: false,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn path_node_global(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: true,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> @ast::item {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
(quote_item!(cx.ext_cx,
pub static TESTS : &'static [self::extra::test::TestDescAndFn] =
$test_descs
;
)).unwrap()
}
fn is_extra(crate: &ast::Crate) -> bool {
let items = attr::find_linkage_metas(crate.attrs);
match attr::last_meta_item_value_str_by_name(items, "name") {
Some(s) if "extra" == s => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> @ast::Expr {
debug!("building test vector from {} tests", cx.testfns.len());
let mut descs = ~[];
for test in cx.testfns.iter() {
descs.push(mk_test_desc_and_fn_rec(cx, test));
}
let inner_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(descs, ast::MutImmutable),
span: dummy_sp(),
};
@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVstore(inner_expr, ast::ExprVstoreSlice),
span: dummy_sp(),
}
}
fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
let span = test.span;
let path = test.path.clone();
debug!("encoding {}", ast_util::path_name_i(path));
let name_lit: ast::lit =
nospan(ast::lit_str(ast_util::path_name_i(path).to_managed(), ast::CookedStr));
let name_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprLit(@name_lit),
span: span
};
let fn_path = path_node_global(path);
let fn_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(fn_path),
span: span,
};
let t_expr = if test.bench {
quote_expr!(cx.ext_cx, self::extra::test::StaticBenchFn($fn_expr) )
} else {
quote_expr!(cx.ext_cx, self::extra::test::StaticTestFn($fn_expr) )
};
let ignore_expr = if test.ignore {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let fail_expr = if test.should_fail {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let e = quote_expr!(cx.ext_cx,
self::extra::test::TestDescAndFn {
desc: self::extra::test::TestDesc {
name: self::extra::test::StaticTestName($name_expr),
ignore: $ignore_expr,
should_fail: $fail_expr
},
testfn: $t_expr,
}
);
e
}
| {
match i.node {
ast::item_fn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
} | identifier_body |
test.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Code that generates a test runner to run all the tests in a crate
use driver::session;
use front::config;
use std::vec;
use syntax::ast_util::*;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
use syntax::codemap::{dummy_sp, Span, ExpnInfo, NameAndSpan, MacroAttribute};
use syntax::codemap;
use syntax::ext::base::ExtCtxt;
use syntax::fold::ast_fold;
use syntax::fold;
use syntax::opt_vec;
use syntax::print::pprust;
use syntax::{ast, ast_util};
use syntax::util::small_vector::SmallVector;
struct Test {
span: Span,
path: ~[ast::Ident],
bench: bool,
ignore: bool,
should_fail: bool
}
struct TestCtxt {
sess: session::Session,
path: ~[ast::Ident],
ext_cx: @ExtCtxt,
testfns: ~[Test],
is_extra: bool,
config: ast::CrateConfig,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: session::Session,
crate: ast::Crate) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(crate.config, "test");
if should_test {
generate_test_harness(sess, crate)
} else {
strip_test_functions(crate)
}
}
struct TestHarnessGenerator {
cx: @mut TestCtxt,
}
impl fold::ast_fold for TestHarnessGenerator {
fn fold_crate(&self, c: ast::Crate) -> ast::Crate {
let folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
ast::Crate {
module: add_test_module(self.cx, &folded.module),
.. folded
}
}
fn fold_item(&self, i: @ast::item) -> SmallVector<@ast::item> {
self.cx.path.push(i.ident);
debug!("current path: {}",
ast_util::path_name_i(self.cx.path.clone()));
if is_test_fn(self.cx, i) || is_bench_fn(i) {
match i.node {
ast::item_fn(_, purity, _, _, _)
if purity == ast::unsafe_fn => {
let sess = self.cx.sess;
sess.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(i),
ignore: is_ignored(self.cx, i),
should_fail: should_fail(i)
};
self.cx.testfns.push(test);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
let res = fold::noop_fold_item(i, self);
self.cx.path.pop();
res
}
fn fold_mod(&self, m: &ast::_mod) -> ast::_mod {
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
fn nomain(cx: @mut TestCtxt, item: @ast::item) -> @ast::item {
if !*cx.sess.building_library {
@ast::item {
attrs: item.attrs.iter().filter_map(|attr| {
if "main" != attr.name() {
Some(*attr)
} else {
None
}
}).collect(),
.. (*item).clone()
}
} else {
item
}
}
let mod_nomain = ast::_mod {
view_items: m.view_items.clone(),
items: m.items.iter().map(|i| nomain(self.cx, *i)).collect(),
};
fold::noop_fold_mod(&mod_nomain, self)
}
}
fn generate_test_harness(sess: session::Session, crate: ast::Crate)
-> ast::Crate {
let cx: @mut TestCtxt = @mut TestCtxt {
sess: sess,
ext_cx: ExtCtxt::new(sess.parse_sess, sess.opts.cfg.clone()),
path: ~[],
testfns: ~[],
is_extra: is_extra(&crate),
config: crate.config.clone(),
};
let ext_cx = cx.ext_cx;
ext_cx.bt_push(ExpnInfo {
call_site: dummy_sp(),
callee: NameAndSpan {
name: @"test",
format: MacroAttribute,
span: None
}
});
let fold = TestHarnessGenerator {
cx: cx
};
let res = fold.fold_crate(crate);
ext_cx.bt_pop();
return res;
}
fn strip_test_functions(crate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(crate, |attrs| {
!attr::contains_name(attrs, "test") &&
!attr::contains_name(attrs, "bench")
})
}
fn is_test_fn(cx: @mut TestCtxt, i: @ast::item) -> bool {
let has_test_attr = attr::contains_name(i.attrs, "test");
fn has_test_signature(i: @ast::item) -> bool {
match &i.node {
&ast::item_fn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
decl.inputs.is_empty()
&& no_output
&& !generics.is_parameterized()
}
_ => false
}
}
if has_test_attr && !has_test_signature(i) {
let sess = cx.sess;
sess.span_err(
i.span,
"functions used as tests must have signature fn() -> ()."
);
}
return has_test_attr && has_test_signature(i);
}
fn is_bench_fn(i: @ast::item) -> bool {
let has_bench_attr = attr::contains_name(i.attrs, "bench");
fn has_test_signature(i: @ast::item) -> bool {
match i.node {
ast::item_fn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(cx: @mut TestCtxt, i: @ast::item) -> bool {
i.attrs.iter().any(|attr| {
// check ignore(cfg(foo, bar))
"ignore" == attr.name() && match attr.meta_item_list() {
Some(ref cfgs) => attr::test_cfg(cx.config, cfgs.iter().map(|x| *x)),
None => true
}
})
}
fn should_fail(i: @ast::item) -> bool {
attr::contains_name(i.attrs, "should_fail")
}
fn add_test_module(cx: &TestCtxt, m: &ast::_mod) -> ast::_mod {
let testmod = mk_test_module(cx);
ast::_mod {
items: vec::append_one(m.items.clone(), testmod),
..(*m).clone()
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
#[!resolve_unexported]
extern mod extra (name = "extra", vers = "...");
fn main() {
#[main];
extra::test::test_main_static(::os::args(), tests)
}
static tests : &'static [extra::test::TestDescAndFn] = &[
... the list of tests in the crate ...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::view_item {
let id_extra = cx.sess.ident_of("extra");
let vi = if cx.is_extra {
ast::view_item_use(
~[@nospan(ast::view_path_simple(id_extra,
path_node(~[id_extra]),
ast::DUMMY_NODE_ID))])
} else {
let mi = attr::mk_name_value_item_str(@"vers", @"0.9-pre");
ast::view_item_extern_mod(id_extra, None, ~[mi], ast::DUMMY_NODE_ID)
};
ast::view_item {
node: vi,
attrs: ~[],
vis: ast::public,
span: dummy_sp()
}
}
fn mk_test_module(cx: &TestCtxt) -> @ast::item {
// Link to extra
let view_items = ~[mk_std(cx)];
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(cx.ext_cx,
pub fn main() {
#[main];
extra::test::test_main_static(::std::os::args(), TESTS);
}
)).unwrap();
let testmod = ast::_mod {
view_items: view_items,
items: ~[mainfn, tests],
};
let item_ = ast::item_mod(testmod);
// This attribute tells resolve to let us call unexported functions
let resolve_unexported_attr =
attr::mk_attr(attr::mk_word_item(@"!resolve_unexported"));
let item = ast::item {
ident: cx.sess.ident_of("__test"),
attrs: ~[resolve_unexported_attr],
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::public,
span: dummy_sp(),
};
debug!("Synthetic test module:\n{}\n",
pprust::item_to_str(&item, cx.sess.intr()));
return @item;
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: dummy_sp() }
}
fn | (ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: false,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn path_node_global(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: true,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> @ast::item {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
(quote_item!(cx.ext_cx,
pub static TESTS : &'static [self::extra::test::TestDescAndFn] =
$test_descs
;
)).unwrap()
}
fn is_extra(crate: &ast::Crate) -> bool {
let items = attr::find_linkage_metas(crate.attrs);
match attr::last_meta_item_value_str_by_name(items, "name") {
Some(s) if "extra" == s => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> @ast::Expr {
debug!("building test vector from {} tests", cx.testfns.len());
let mut descs = ~[];
for test in cx.testfns.iter() {
descs.push(mk_test_desc_and_fn_rec(cx, test));
}
let inner_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(descs, ast::MutImmutable),
span: dummy_sp(),
};
@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVstore(inner_expr, ast::ExprVstoreSlice),
span: dummy_sp(),
}
}
fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
let span = test.span;
let path = test.path.clone();
debug!("encoding {}", ast_util::path_name_i(path));
let name_lit: ast::lit =
nospan(ast::lit_str(ast_util::path_name_i(path).to_managed(), ast::CookedStr));
let name_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprLit(@name_lit),
span: span
};
let fn_path = path_node_global(path);
let fn_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(fn_path),
span: span,
};
let t_expr = if test.bench {
quote_expr!(cx.ext_cx, self::extra::test::StaticBenchFn($fn_expr) )
} else {
quote_expr!(cx.ext_cx, self::extra::test::StaticTestFn($fn_expr) )
};
let ignore_expr = if test.ignore {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let fail_expr = if test.should_fail {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let e = quote_expr!(cx.ext_cx,
self::extra::test::TestDescAndFn {
desc: self::extra::test::TestDesc {
name: self::extra::test::StaticTestName($name_expr),
ignore: $ignore_expr,
should_fail: $fail_expr
},
testfn: $t_expr,
}
);
e
}
| path_node | identifier_name |
roulette.component.js | "use strict";
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
Object.defineProperty(exports, "__esModule", { value: true });
var core_1 = require("@angular/core");
var app_service_1 = require("./app.service");
require("./rxjs-extensions");
var RouletteComponent = (function () {
function RouletteComponent(services) {
this.services = services;
this.a = 3000 / (Math.pow(20 * this.services.timba.players.length, 35));
this.totalRounds = 20 * this.services.timba.players.length;
this.initialRounds = 10 * this.services.timba.players.length;
this.accRounds = 15 * this.services.timba.players.length;
}
RouletteComponent.prototype.ngOnInit = function () {
var _this = this;
$("#welcome").css("opacity", "1");
setTimeout(function () {
$("#welcome").css("opacity", "0");
setTimeout(function () {
_this.addPlayerRoulette(0);
_this.addPlayerRouletteFade(0);
setTimeout(function () {
_this.showAndHide("three");
setTimeout(function () {
_this.showAndHide("two");
setTimeout(function () {
_this.showAndHide("one");
setTimeout(function () {
_this.rotate(_this.services.timba.winnerIndex);
setTimeout(function () {
_this.services.playing = false;
_this.services.nav = 'winner';
}, 24000);
}, 2000);
}, 2000);
}, 2000);
}, 500 * _this.services.timba.players.length);
}, 1000);
}, 4000);
};
RouletteComponent.prototype.showAndHide = function (n) {
$("#" + n).css("opacity", "1");
setTimeout(function () {
$("#" + n).css("opacity", "0");
}, 1000);
};
RouletteComponent.prototype.addPlayerRoulette = function (i) {
if (i < this.services.timba.players.length) {
$("#roulette").append("<div id=\"roulette" + i + "\" class=\"roulette-cell\" style=\"transition:opacity 0.5s ease-in-out;opacity:0;transform: rotate(" + i * 360 / this.services.timba.players.length + "deg) translateX(200px);\">" + this.services.timba.players[i].email + "</div>");
this.addPlayerRoulette(++i);
}
};
RouletteComponent.prototype.addPlayerRouletteFade = function (i) {
var _this = this;
setTimeout(function () {
if (i < _this.services.timba.players.length) {
$("#roulette" + i).css("opacity", "1");
if (_this.services.timba.players[i].email == _this.services.user.email) |
_this.addPlayerRouletteFade(++i);
}
}, 500);
};
RouletteComponent.prototype.rotate = function (i) {
$("#roulette").css("transition", "transform 20s cubic-bezier(0.2, 0, 0.000000000000000000000000000000000000000001, 1)");
$("#roulette").css("transform", "rotate(" + (4320 - Math.floor(i * 360 / this.services.timba.players.length)) + "deg)");
};
return RouletteComponent;
}());
RouletteComponent = __decorate([
core_1.Component({
selector: 'roulette',
templateUrl: 'app/roulette.component.html',
styleUrls: ['app/roulette.component.css']
}),
__metadata("design:paramtypes", [app_service_1.AppService])
], RouletteComponent);
exports.RouletteComponent = RouletteComponent;
//# sourceMappingURL=roulette.component.js.map | {
$("#roulette" + i).css("text-shadow", "0 0 10px #fff");
$("#roulette" + i).css("font-weight", "bold");
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.