file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
client.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import uuid
import socket
import time
__appname__ = "pymessage"
__author__ = "Marco Sirabella, Owen Davies"
__copyright__ = ""
__credits__ = "Marco Sirabella, Owen Davies"
__license__ = "new BSD 3-Clause"
__version__ = "0.0.3"
__maintainers__ = "Marco Sirabella, Owen Davies"
__email__ = "[email protected], [email protected]"
__status__ = "Prototype"
__module__ = ""
address = ('localhost', 5350)
lguid = '0'
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.send((hex(uuid.getnode()) + '\n').encode() + bytes(False)) # ik this is such BAD CODE
print("sent")
sock.send(lguid.encode())
print('sent latest guid: {}'.format(lguid))
# contents = "latest guid +5: {}".format(lguid + '5')
msg = True
fullmsg = ''
while msg:
|
print('received message: {}'.format(fullmsg))
sock.close()
connect()
| msg = sock.recv(16).decode() # low byte count for whatever reason
#print('mes rec: {}'.format(msg))
fullmsg += msg | conditional_block |
updater.py | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle as pickle
import os
import signal
import sys
import time
from random import random
from eventlet import patcher, Timeout, TimeoutError
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle
from swift.common.daemon import Daemon
from swift.obj.server import ASYNCDIR
class ObjectUpdater(Daemon):
"""Update object information in container listings."""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
self.container_ring = None
self.concurrency = int(conf.get('concurrency', 1))
self.slowdown = float(conf.get('slowdown', 0.01))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.successes = 0
self.failures = 0
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.logger.debug(
_('Loading container ring from %s'), self.container_ring_path)
self.container_ring = Ring(self.container_ring_path)
return self.container_ring
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin object update sweep'))
begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in os.listdir(self.devices):
if self.mount_check and not \
os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
patcher.monkey_patch(all=False, socket=True)
self.successes = 0
self.failures = 0
forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - forkbegin
self.logger.info(_('Object update sweep of %(device)s'
' completed: %(elapsed).02fs, %(success)s successes'
', %(fail)s failures'),
{'device': device, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info(_('Object update sweep completed: %.02fs'),
elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once"""
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()
self.successes = 0
self.failures = 0
for device in os.listdir(self.devices):
if self.mount_check and \
not os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - begin
self.logger.info(_('Object update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures})
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
async_pending = os.path.join(device, ASYNCDIR)
if not os.path.isdir(async_pending):
return
for prefix in os.listdir(async_pending):
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update in sorted(os.listdir(prefix_path), reverse=True):
update_path = os.path.join(prefix_path, update)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update.split('-')
except ValueError:
self.logger.error(
_('ERROR async pending file with unexpected name %s')
% (update_path))
continue
if obj_hash == last_obj_hash:
os.unlink(update_path)
else:
self.process_object_update(update_path, device)
last_obj_hash = obj_hash
time.sleep(self.slowdown)
try:
os.rmdir(prefix_path)
except OSError:
pass
def process_object_update(self, update_path, device):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
"""
try:
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
renamer(update_path, os.path.join(device,
'quarantined', 'objects', os.path.basename(update_path)))
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(
update['account'], update['container'])
obj = '/%s/%s/%s' % \
(update['account'], update['container'], update['obj'])
success = True
for node in nodes:
if node['id'] not in successes:
status = self.object_update(node, part, update['op'], obj,
update['headers'])
if not (200 <= status < 300) and status != 404:
success = False
else:
successes.append(node['id'])
if success:
self.successes += 1
self.logger.debug(_('Update sent for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
os.unlink(update_path)
else:
self.failures += 1
self.logger.debug(_('Update failed for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path}) | Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'POST' or 'DELETE')
:param obj: object name being updated
:param headers: headers to send with the update
"""
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, op, obj, headers)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
return resp.status
except (Exception, TimeoutError):
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return 500 | update['successes'] = successes
write_pickle(update, update_path, os.path.join(device, 'tmp'))
def object_update(self, node, part, op, obj, headers):
""" | random_line_split |
updater.py | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle as pickle
import os
import signal
import sys
import time
from random import random
from eventlet import patcher, Timeout, TimeoutError
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle
from swift.common.daemon import Daemon
from swift.obj.server import ASYNCDIR
class ObjectUpdater(Daemon):
"""Update object information in container listings."""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
self.container_ring = None
self.concurrency = int(conf.get('concurrency', 1))
self.slowdown = float(conf.get('slowdown', 0.01))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.successes = 0
self.failures = 0
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.logger.debug(
_('Loading container ring from %s'), self.container_ring_path)
self.container_ring = Ring(self.container_ring_path)
return self.container_ring
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin object update sweep'))
begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in os.listdir(self.devices):
if self.mount_check and not \
os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
patcher.monkey_patch(all=False, socket=True)
self.successes = 0
self.failures = 0
forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - forkbegin
self.logger.info(_('Object update sweep of %(device)s'
' completed: %(elapsed).02fs, %(success)s successes'
', %(fail)s failures'),
{'device': device, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info(_('Object update sweep completed: %.02fs'),
elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once"""
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()
self.successes = 0
self.failures = 0
for device in os.listdir(self.devices):
if self.mount_check and \
not os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - begin
self.logger.info(_('Object update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures})
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
async_pending = os.path.join(device, ASYNCDIR)
if not os.path.isdir(async_pending):
return
for prefix in os.listdir(async_pending):
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update in sorted(os.listdir(prefix_path), reverse=True):
update_path = os.path.join(prefix_path, update)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update.split('-')
except ValueError:
self.logger.error(
_('ERROR async pending file with unexpected name %s')
% (update_path))
continue
if obj_hash == last_obj_hash:
os.unlink(update_path)
else:
self.process_object_update(update_path, device)
last_obj_hash = obj_hash
time.sleep(self.slowdown)
try:
os.rmdir(prefix_path)
except OSError:
pass
def process_object_update(self, update_path, device):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
"""
try:
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
renamer(update_path, os.path.join(device,
'quarantined', 'objects', os.path.basename(update_path)))
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(
update['account'], update['container'])
obj = '/%s/%s/%s' % \
(update['account'], update['container'], update['obj'])
success = True
for node in nodes:
if node['id'] not in successes:
status = self.object_update(node, part, update['op'], obj,
update['headers'])
if not (200 <= status < 300) and status != 404:
success = False
else:
successes.append(node['id'])
if success:
|
else:
self.failures += 1
self.logger.debug(_('Update failed for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
update['successes'] = successes
write_pickle(update, update_path, os.path.join(device, 'tmp'))
def object_update(self, node, part, op, obj, headers):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'POST' or 'DELETE')
:param obj: object name being updated
:param headers: headers to send with the update
"""
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, op, obj, headers)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
return resp.status
except (Exception, TimeoutError):
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return 500
| self.successes += 1
self.logger.debug(_('Update sent for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
os.unlink(update_path) | conditional_block |
updater.py | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle as pickle
import os
import signal
import sys
import time
from random import random
from eventlet import patcher, Timeout, TimeoutError
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle
from swift.common.daemon import Daemon
from swift.obj.server import ASYNCDIR
class ObjectUpdater(Daemon):
"""Update object information in container listings."""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
self.container_ring = None
self.concurrency = int(conf.get('concurrency', 1))
self.slowdown = float(conf.get('slowdown', 0.01))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.successes = 0
self.failures = 0
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.logger.debug(
_('Loading container ring from %s'), self.container_ring_path)
self.container_ring = Ring(self.container_ring_path)
return self.container_ring
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin object update sweep'))
begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in os.listdir(self.devices):
if self.mount_check and not \
os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
patcher.monkey_patch(all=False, socket=True)
self.successes = 0
self.failures = 0
forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - forkbegin
self.logger.info(_('Object update sweep of %(device)s'
' completed: %(elapsed).02fs, %(success)s successes'
', %(fail)s failures'),
{'device': device, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info(_('Object update sweep completed: %.02fs'),
elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once"""
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()
self.successes = 0
self.failures = 0
for device in os.listdir(self.devices):
if self.mount_check and \
not os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - begin
self.logger.info(_('Object update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures})
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
async_pending = os.path.join(device, ASYNCDIR)
if not os.path.isdir(async_pending):
return
for prefix in os.listdir(async_pending):
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update in sorted(os.listdir(prefix_path), reverse=True):
update_path = os.path.join(prefix_path, update)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update.split('-')
except ValueError:
self.logger.error(
_('ERROR async pending file with unexpected name %s')
% (update_path))
continue
if obj_hash == last_obj_hash:
os.unlink(update_path)
else:
self.process_object_update(update_path, device)
last_obj_hash = obj_hash
time.sleep(self.slowdown)
try:
os.rmdir(prefix_path)
except OSError:
pass
def | (self, update_path, device):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
"""
try:
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
renamer(update_path, os.path.join(device,
'quarantined', 'objects', os.path.basename(update_path)))
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(
update['account'], update['container'])
obj = '/%s/%s/%s' % \
(update['account'], update['container'], update['obj'])
success = True
for node in nodes:
if node['id'] not in successes:
status = self.object_update(node, part, update['op'], obj,
update['headers'])
if not (200 <= status < 300) and status != 404:
success = False
else:
successes.append(node['id'])
if success:
self.successes += 1
self.logger.debug(_('Update sent for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
os.unlink(update_path)
else:
self.failures += 1
self.logger.debug(_('Update failed for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
update['successes'] = successes
write_pickle(update, update_path, os.path.join(device, 'tmp'))
def object_update(self, node, part, op, obj, headers):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'POST' or 'DELETE')
:param obj: object name being updated
:param headers: headers to send with the update
"""
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, op, obj, headers)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
return resp.status
except (Exception, TimeoutError):
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return 500
| process_object_update | identifier_name |
updater.py | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle as pickle
import os
import signal
import sys
import time
from random import random
from eventlet import patcher, Timeout, TimeoutError
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle
from swift.common.daemon import Daemon
from swift.obj.server import ASYNCDIR
class ObjectUpdater(Daemon):
"""Update object information in container listings."""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
self.container_ring = None
self.concurrency = int(conf.get('concurrency', 1))
self.slowdown = float(conf.get('slowdown', 0.01))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.successes = 0
self.failures = 0
def get_container_ring(self):
|
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin object update sweep'))
begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in os.listdir(self.devices):
if self.mount_check and not \
os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
patcher.monkey_patch(all=False, socket=True)
self.successes = 0
self.failures = 0
forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - forkbegin
self.logger.info(_('Object update sweep of %(device)s'
' completed: %(elapsed).02fs, %(success)s successes'
', %(fail)s failures'),
{'device': device, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info(_('Object update sweep completed: %.02fs'),
elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once"""
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()
self.successes = 0
self.failures = 0
for device in os.listdir(self.devices):
if self.mount_check and \
not os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - begin
self.logger.info(_('Object update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures})
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
async_pending = os.path.join(device, ASYNCDIR)
if not os.path.isdir(async_pending):
return
for prefix in os.listdir(async_pending):
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update in sorted(os.listdir(prefix_path), reverse=True):
update_path = os.path.join(prefix_path, update)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update.split('-')
except ValueError:
self.logger.error(
_('ERROR async pending file with unexpected name %s')
% (update_path))
continue
if obj_hash == last_obj_hash:
os.unlink(update_path)
else:
self.process_object_update(update_path, device)
last_obj_hash = obj_hash
time.sleep(self.slowdown)
try:
os.rmdir(prefix_path)
except OSError:
pass
def process_object_update(self, update_path, device):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
"""
try:
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
renamer(update_path, os.path.join(device,
'quarantined', 'objects', os.path.basename(update_path)))
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(
update['account'], update['container'])
obj = '/%s/%s/%s' % \
(update['account'], update['container'], update['obj'])
success = True
for node in nodes:
if node['id'] not in successes:
status = self.object_update(node, part, update['op'], obj,
update['headers'])
if not (200 <= status < 300) and status != 404:
success = False
else:
successes.append(node['id'])
if success:
self.successes += 1
self.logger.debug(_('Update sent for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
os.unlink(update_path)
else:
self.failures += 1
self.logger.debug(_('Update failed for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
update['successes'] = successes
write_pickle(update, update_path, os.path.join(device, 'tmp'))
def object_update(self, node, part, op, obj, headers):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'POST' or 'DELETE')
:param obj: object name being updated
:param headers: headers to send with the update
"""
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, op, obj, headers)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
return resp.status
except (Exception, TimeoutError):
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return 500
| """Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.logger.debug(
_('Loading container ring from %s'), self.container_ring_path)
self.container_ring = Ring(self.container_ring_path)
return self.container_ring | identifier_body |
regress-382509.js | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is JavaScript Engine testing utilities.
*
* The Initial Developer of the Original Code is
* Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
//-----------------------------------------------------------------------------
var BUGNUMBER = 382509;
var summary = 'Disallow non-global indirect eval';
var actual = '';
var expect = '';
var global = typeof window == 'undefined' ? this : window;
var object = {};
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function test()
{
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
if (options().match(/strict/))
|
if (options().match(/werror/))
{
options('werror');
}
global.foo = eval;
global.a = 'global';
expect = 'global indirect';
actual = global.foo('a+" indirect"');
reportCompare(expect, actual, summary + ': global indirect');
object.foo = eval;
object.a = 'local';
expect = 'EvalError: function eval must be called directly, and not by way of a function of another name';
try
{
actual = object.foo('a+" indirect"');
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': local indirect');
options('strict');
options('werror');
try
{
var foo = eval;
print("foo(1+1)" + foo('1+1'));
actual = 'No Error';
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': strict, rename warning');
options('strict');
options('werror');
expect = 'No Error';
try
{
var foo = eval;
foo('1+1');
actual = 'No Error';
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': not strict, no rename warning');
exitFunc ('test');
}
| {
options('strict');
} | conditional_block |
regress-382509.js | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is JavaScript Engine testing utilities.
*
* The Initial Developer of the Original Code is
* Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
//-----------------------------------------------------------------------------
var BUGNUMBER = 382509;
var summary = 'Disallow non-global indirect eval';
var actual = '';
var expect = '';
var global = typeof window == 'undefined' ? this : window;
var object = {};
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function | ()
{
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
if (options().match(/strict/))
{
options('strict');
}
if (options().match(/werror/))
{
options('werror');
}
global.foo = eval;
global.a = 'global';
expect = 'global indirect';
actual = global.foo('a+" indirect"');
reportCompare(expect, actual, summary + ': global indirect');
object.foo = eval;
object.a = 'local';
expect = 'EvalError: function eval must be called directly, and not by way of a function of another name';
try
{
actual = object.foo('a+" indirect"');
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': local indirect');
options('strict');
options('werror');
try
{
var foo = eval;
print("foo(1+1)" + foo('1+1'));
actual = 'No Error';
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': strict, rename warning');
options('strict');
options('werror');
expect = 'No Error';
try
{
var foo = eval;
foo('1+1');
actual = 'No Error';
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': not strict, no rename warning');
exitFunc ('test');
}
| test | identifier_name |
regress-382509.js | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is JavaScript Engine testing utilities.
*
* The Initial Developer of the Original Code is
* Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
//-----------------------------------------------------------------------------
var BUGNUMBER = 382509;
var summary = 'Disallow non-global indirect eval';
var actual = '';
var expect = '';
var global = typeof window == 'undefined' ? this : window;
var object = {};
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function test()
{
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
if (options().match(/strict/))
{
options('strict');
}
if (options().match(/werror/))
{
options('werror');
}
global.foo = eval;
global.a = 'global';
expect = 'global indirect';
actual = global.foo('a+" indirect"');
reportCompare(expect, actual, summary + ': global indirect');
object.foo = eval;
object.a = 'local';
expect = 'EvalError: function eval must be called directly, and not by way of a function of another name';
try
{
actual = object.foo('a+" indirect"');
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': local indirect');
options('strict');
options('werror');
try
{
var foo = eval;
print("foo(1+1)" + foo('1+1'));
actual = 'No Error';
}
catch(ex) | reportCompare(expect, actual, summary + ': strict, rename warning');
options('strict');
options('werror');
expect = 'No Error';
try
{
var foo = eval;
foo('1+1');
actual = 'No Error';
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': not strict, no rename warning');
exitFunc ('test');
} | {
actual = ex + '';
} | random_line_split |
regress-382509.js | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is JavaScript Engine testing utilities.
*
* The Initial Developer of the Original Code is
* Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
//-----------------------------------------------------------------------------
var BUGNUMBER = 382509;
var summary = 'Disallow non-global indirect eval';
var actual = '';
var expect = '';
var global = typeof window == 'undefined' ? this : window;
var object = {};
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function test()
| {
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
if (options().match(/strict/))
{
options('strict');
}
if (options().match(/werror/))
{
options('werror');
}
global.foo = eval;
global.a = 'global';
expect = 'global indirect';
actual = global.foo('a+" indirect"');
reportCompare(expect, actual, summary + ': global indirect');
object.foo = eval;
object.a = 'local';
expect = 'EvalError: function eval must be called directly, and not by way of a function of another name';
try
{
actual = object.foo('a+" indirect"');
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': local indirect');
options('strict');
options('werror');
try
{
var foo = eval;
print("foo(1+1)" + foo('1+1'));
actual = 'No Error';
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': strict, rename warning');
options('strict');
options('werror');
expect = 'No Error';
try
{
var foo = eval;
foo('1+1');
actual = 'No Error';
}
catch(ex)
{
actual = ex + '';
}
reportCompare(expect, actual, summary + ': not strict, no rename warning');
exitFunc ('test');
} | identifier_body |
|
glue.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//!
//
// Code relating to drop glue.
use back::abi;
use back::link::*;
use llvm;
use llvm::{ValueRef, get_param};
use metadata::csearch;
use middle::lang_items::ExchangeFreeFnLangItem;
use middle::subst;
use middle::subst::{Subst, Substs};
use middle::ty::{self, Ty};
use trans::adt;
use trans::adt::GetDtorType; // for tcx.dtor_type()
use trans::base::*;
use trans::build::*;
use trans::callee;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::common::*;
use trans::debuginfo::DebugLoc;
use trans::declare;
use trans::expr;
use trans::foreign;
use trans::inline;
use trans::machine::*;
use trans::monomorphize;
use trans::type_of::{type_of, type_of_dtor, sizing_type_of, align_of};
use trans::type_::Type;
use arena::TypedArena;
use libc::c_uint;
use syntax::ast;
pub fn | <'blk, 'tcx>(cx: Block<'blk, 'tcx>,
v: ValueRef,
size: ValueRef,
align: ValueRef,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
callee::trans_lang_call(cx,
langcall(cx, None, "", ExchangeFreeFnLangItem),
&[PointerCast(cx, v, Type::i8p(ccx)), size, align],
Some(expr::Ignore),
debug_loc).bcx
}
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
v: ValueRef,
size: u64,
align: u32,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
trans_exchange_free_dyn(cx,
v,
C_uint(cx.ccx(), size),
C_uint(cx.ccx(), align),
debug_loc)
}
pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ptr: ValueRef,
content_ty: Ty<'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
// `Box<ZeroSizeType>` does not allocate.
if content_size != 0 {
let content_align = align_of(bcx.ccx(), content_ty);
trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
} else {
bcx
}
}
pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> Ty<'tcx> {
let tcx = ccx.tcx();
// Even if there is no dtor for t, there might be one deeper down and we
// might need to pass in the vtable ptr.
if !type_is_sized(tcx, t) {
return t
}
// FIXME (#22815): note that type_needs_drop conservatively
// approximates in some cases and may say a type expression
// requires drop glue when it actually does not.
//
// (In this case it is not clear whether any harm is done, i.e.
// erroneously returning `t` in some cases where we could have
// returned `tcx.types.i8` does not appear unsound. The impact on
// code quality is unknown at this time.)
if !type_needs_drop(tcx, t) {
return tcx.types.i8;
}
match t.sty {
ty::TyBox(typ) if !type_needs_drop(tcx, typ)
&& type_is_sized(tcx, typ) => {
let llty = sizing_type_of(ccx, typ);
// `Box<ZeroSizeType>` does not allocate.
if llsize_of_alloc(ccx, llty) == 0 {
tcx.types.i8
} else {
t
}
}
_ => t
}
}
pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
drop_ty_core(bcx, v, t, debug_loc, false, None)
}
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool,
drop_hint: Option<cleanup::DropHintValue>)
-> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
let _icx = push_ctxt("drop_ty");
let mut bcx = bcx;
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
DropGlueKind::TyContents(t)
} else {
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx, t);
let ptr = if glue_type != t {
PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
} else {
v
};
match drop_hint {
Some(drop_hint) => {
let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
let moved_val =
C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
let may_need_drop =
ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
bcx = with_cond(bcx, may_need_drop, |cx| {
Call(cx, glue, &[ptr], None, debug_loc);
cx
})
}
None => {
// No drop-hint ==> call standard drop glue
Call(bcx, glue, &[ptr], None, debug_loc);
}
}
}
bcx
}
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
store_ty(bcx, v, vp, t);
drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None)
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
get_drop_glue_core(ccx, DropGlueKind::Ty(t))
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum DropGlueKind<'tcx> {
/// The normal path; runs the dtor, and then recurs on the contents
Ty(Ty<'tcx>),
/// Skips the dtor, if any, for ty; drops the contents directly.
/// Note that the dtor is only skipped at the most *shallow*
/// level, namely, an `impl Drop for Ty` itself. So, for example,
/// if Ty is Newtype(S) then only the Drop impl for for Newtype
/// itself will be skipped, while the Drop impl for S, if any,
/// will be invoked.
TyContents(Ty<'tcx>),
}
impl<'tcx> DropGlueKind<'tcx> {
fn ty(&self) -> Ty<'tcx> {
match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
}
fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
{
match *self {
DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
}
}
}
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
g: DropGlueKind<'tcx>) -> ValueRef {
debug!("make drop glue for {:?}", g);
let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
debug!("drop glue type {:?}", g);
match ccx.drop_glues().borrow().get(&g) {
Some(&glue) => return glue,
_ => { }
}
let t = g.ty();
let llty = if type_is_sized(ccx.tcx(), t) {
type_of(ccx, t).ptr_to()
} else {
type_of(ccx, ccx.tcx().mk_box(t)).ptr_to()
};
let llfnty = Type::glue_fn(ccx, llty);
// To avoid infinite recursion, don't `make_drop_glue` until after we've
// added the entry to the `drop_glues` cache.
if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil());
ccx.drop_glues().borrow_mut().insert(g, llfn);
return llfn;
};
let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop");
let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm));
});
ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
ty::FnConverging(ccx.tcx().mk_nil()),
empty_substs, None, &arena);
let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil()));
update_linkage(ccx, llfn, None, OriginalTranslation);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
//
// llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter.
let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint);
let bcx = make_drop_glue(bcx, llrawptr0, g);
finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
llfn
}
fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
struct_data: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx> {
assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
let repr = adt::represent_type(bcx.ccx(), t);
let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data));
let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
bcx
} else {
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
let drop_flag_neither_initialized_nor_cleared =
And(bcx, not_init, not_done, DebugLoc::None);
with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
Call(cx, llfn, &[], None, DebugLoc::None);
cx
})
};
let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
with_cond(bcx, drop_flag_dtor_needed, |cx| {
trans_struct_drop(cx, t, struct_data, dtor_did, class_did, substs)
})
}
pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
did: ast::DefId,
parent_id: ast::DefId,
substs: &Substs<'tcx>)
-> ValueRef {
let _icx = push_ctxt("trans_res_dtor");
let did = inline::maybe_instantiate_inline(ccx, did);
if !substs.types.is_empty() {
assert_eq!(did.krate, ast::LOCAL_CRATE);
// Since we're in trans we don't care for any region parameters
let substs = ccx.tcx().mk_substs(Substs::erased(substs.types.clone()));
let (val, _, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
val
} else if did.krate == ast::LOCAL_CRATE {
get_item_val(ccx, did.node)
} else {
let tcx = ccx.tcx();
let name = csearch::get_symbol(&ccx.sess().cstore, did);
let class_ty = tcx.lookup_item_type(parent_id).ty.subst(tcx, substs);
let llty = type_of_dtor(ccx, class_ty);
foreign::get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), &name[..], llvm::CCallConv,
llty, ccx.tcx().mk_nil())
}
}
fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
v0: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx>
{
debug!("trans_struct_drop t: {}", t);
// Find and call the actual destructor
let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, class_did, substs);
// Class dtors have no explicit args, so the params should
// just consist of the environment (self).
let params = unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr));
ty.element_type().func_params()
};
assert_eq!(params.len(), if type_is_sized(bcx.tcx(), t) { 1 } else { 2 });
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
let contents_scope = bcx.fcx.push_custom_cleanup_scope();
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
let glue_type = get_drop_glue_type(bcx.ccx(), t);
let dtor_ty = bcx.tcx().mk_ctor_fn(class_did, &[glue_type], bcx.tcx().mk_nil());
let (_, bcx) = if type_is_sized(bcx.tcx(), t) {
invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None)
} else {
let args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_len(bcx, v0))];
invoke(bcx, dtor_addr, &args, dtor_ty, DebugLoc::None)
};
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}
pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {}",
t, bcx.val_to_string(info));
if type_is_sized(bcx.tcx(), t) {
let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = llsize_of_alloc(bcx.ccx(), sizing_type);
let align = align_of(bcx.ccx(), t);
debug!("size_and_align_of_dst t={} info={} size: {} align: {}",
t, bcx.val_to_string(info), size, align);
let size = C_uint(bcx.ccx(), size);
let align = C_uint(bcx.ccx(), align);
return (size, align);
}
match t.sty {
ty::TyStruct(def, substs) => {
let ccx = bcx.ccx();
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized.
assert!(!t.is_simd());
let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_context_of(ccx, &*repr, true);
debug!("DST {} sizing_type: {}", t, sizing_type.to_string());
let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
let sized_align = llalign_of_min(ccx, sizing_type.prefix());
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_uint(ccx, sized_size);
let sized_align = C_uint(ccx, sized_align);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
let dbloc = DebugLoc::None;
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let mut size = Add(bcx, sized_size, unsized_size, dbloc);
// Issue #27023: If there is a drop flag, *now* we add 1
// to the size. (We can do this without adding any
// padding because drop flags do not have any alignment
// constraints.)
if sizing_type.needs_drop_flag() {
size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc);
}
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = Select(bcx,
ICmp(bcx,
llvm::IntUGT,
sized_align,
unsized_align,
dbloc),
sized_align,
unsized_align);
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
//
// Namely, the returned size should be, in C notation:
//
// `size + ((size & (align-1)) ? align : 0)`
//
// emulated via the semi-standard fast bit trick:
//
// `(size + (align-1)) & !align`
let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc);
let size = And(
bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc);
(size, align)
}
ty::TyTrait(..) => {
// info points to the vtable and the second entry in the vtable is the
// dynamic size of the object.
let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to());
let size_ptr = GEPi(bcx, info, &[1]);
let align_ptr = GEPi(bcx, info, &[2]);
(Load(bcx, size_ptr), Load(bcx, align_ptr))
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
(Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None),
C_uint(bcx.ccx(), unit_align))
}
_ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", t))
}
}
fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
-> Block<'blk, 'tcx> {
let t = g.ty();
let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
// NB: v0 is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("make_drop_glue");
// Only drop the value when it ... well, we used to check for
// non-null, (and maybe we need to continue doing so), but we now
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
let inttype = Type::int(bcx.ccx());
let dropped_pattern = C_integral(inttype, adt::dtor_done_usize(bcx.fcx.ccx) as u64, false);
match t.sty {
ty::TyBox(content_ty) => {
// Support for TyBox is built-in and its drop glue is
// special. It may move to library and have Drop impl. As
// a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor);
if !type_is_sized(bcx.tcx(), content_ty) {
let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let llbox = Load(bcx, llval);
let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]);
let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
// `Box<ZeroSizeType>` does not allocate.
let needs_free = ICmp(bcx,
llvm::IntNE,
llsize,
C_uint(bcx.ccx(), 0u64),
DebugLoc::None);
with_cond(bcx, needs_free, |bcx| {
trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
})
})
} else {
let llval = v0;
let llbox = Load(bcx, llval);
let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
})
}
}
ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
let tcx = bcx.tcx();
match (tcx.ty_dtor(def.did), skip_dtor) {
(ty::TraitDtor(dtor, true), false) => {
// FIXME(16758) Since the struct is unsized, it is hard to
// find the drop flag (which is at the end of the struct).
// Lets just ignore the flag and pretend everything will be
// OK.
if type_is_sized(bcx.tcx(), t) {
trans_struct_drop_flag(bcx, t, v0, dtor, def.did, substs)
} else {
// Give the user a heads up that we are doing something
// stupid and dangerous.
bcx.sess().warn(&format!("Ignoring drop flag in destructor for {}\
because the struct is unsized. See issue\
#16758", t));
trans_struct_drop(bcx, t, v0, dtor, def.did, substs)
}
}
(ty::TraitDtor(dtor, false), false) => {
trans_struct_drop(bcx, t, v0, dtor, def.did, substs)
}
(ty::NoDtor, _) | (_, true) => {
// No dtor? Just the default case
iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
}
}
}
ty::TyTrait(..) => {
// No support in vtable for distinguishing destroying with
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
let data_ptr = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let vtable_ptr = Load(bcx, GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]));
let dtor = Load(bcx, vtable_ptr);
Call(bcx,
dtor,
&[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
None,
DebugLoc::None);
bcx
}
_ => {
if bcx.fcx.type_needs_drop(t) {
iter_structural_ty(bcx,
v0,
t,
|bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
} else {
bcx
}
}
}
}
| trans_exchange_free_dyn | identifier_name |
glue.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//!
//
// Code relating to drop glue.
use back::abi;
use back::link::*;
use llvm;
use llvm::{ValueRef, get_param};
use metadata::csearch;
use middle::lang_items::ExchangeFreeFnLangItem;
use middle::subst;
use middle::subst::{Subst, Substs};
use middle::ty::{self, Ty};
use trans::adt;
use trans::adt::GetDtorType; // for tcx.dtor_type()
use trans::base::*;
use trans::build::*;
use trans::callee;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::common::*;
use trans::debuginfo::DebugLoc;
use trans::declare;
use trans::expr;
use trans::foreign;
use trans::inline;
use trans::machine::*;
use trans::monomorphize;
use trans::type_of::{type_of, type_of_dtor, sizing_type_of, align_of};
use trans::type_::Type;
use arena::TypedArena;
use libc::c_uint;
use syntax::ast;
pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
v: ValueRef,
size: ValueRef,
align: ValueRef,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
callee::trans_lang_call(cx,
langcall(cx, None, "", ExchangeFreeFnLangItem),
&[PointerCast(cx, v, Type::i8p(ccx)), size, align],
Some(expr::Ignore),
debug_loc).bcx
}
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
v: ValueRef,
size: u64,
align: u32,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
trans_exchange_free_dyn(cx,
v,
C_uint(cx.ccx(), size),
C_uint(cx.ccx(), align),
debug_loc)
}
pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ptr: ValueRef,
content_ty: Ty<'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
// `Box<ZeroSizeType>` does not allocate.
if content_size != 0 {
let content_align = align_of(bcx.ccx(), content_ty);
trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
} else {
bcx
}
}
pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> Ty<'tcx> {
let tcx = ccx.tcx();
// Even if there is no dtor for t, there might be one deeper down and we
// might need to pass in the vtable ptr.
if !type_is_sized(tcx, t) {
return t
}
// FIXME (#22815): note that type_needs_drop conservatively
// approximates in some cases and may say a type expression
// requires drop glue when it actually does not.
//
// (In this case it is not clear whether any harm is done, i.e.
// erroneously returning `t` in some cases where we could have
// returned `tcx.types.i8` does not appear unsound. The impact on
// code quality is unknown at this time.)
if !type_needs_drop(tcx, t) {
return tcx.types.i8;
}
match t.sty {
ty::TyBox(typ) if !type_needs_drop(tcx, typ)
&& type_is_sized(tcx, typ) => {
let llty = sizing_type_of(ccx, typ);
// `Box<ZeroSizeType>` does not allocate.
if llsize_of_alloc(ccx, llty) == 0 {
tcx.types.i8
} else {
t
}
}
_ => t
}
}
pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
drop_ty_core(bcx, v, t, debug_loc, false, None)
}
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool,
drop_hint: Option<cleanup::DropHintValue>)
-> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
let _icx = push_ctxt("drop_ty");
let mut bcx = bcx;
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
DropGlueKind::TyContents(t)
} else {
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx, t);
let ptr = if glue_type != t {
PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
} else {
v
};
match drop_hint {
Some(drop_hint) => {
let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
let moved_val =
C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
let may_need_drop =
ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
bcx = with_cond(bcx, may_need_drop, |cx| {
Call(cx, glue, &[ptr], None, debug_loc);
cx
})
}
None => {
// No drop-hint ==> call standard drop glue
Call(bcx, glue, &[ptr], None, debug_loc);
}
}
}
bcx
}
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
store_ty(bcx, v, vp, t);
drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None)
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
get_drop_glue_core(ccx, DropGlueKind::Ty(t))
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum DropGlueKind<'tcx> {
/// The normal path; runs the dtor, and then recurs on the contents
Ty(Ty<'tcx>),
/// Skips the dtor, if any, for ty; drops the contents directly.
/// Note that the dtor is only skipped at the most *shallow*
/// level, namely, an `impl Drop for Ty` itself. So, for example,
/// if Ty is Newtype(S) then only the Drop impl for for Newtype
/// itself will be skipped, while the Drop impl for S, if any,
/// will be invoked.
TyContents(Ty<'tcx>),
}
impl<'tcx> DropGlueKind<'tcx> {
fn ty(&self) -> Ty<'tcx> {
match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
}
fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
{
match *self {
DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
}
}
}
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
g: DropGlueKind<'tcx>) -> ValueRef {
debug!("make drop glue for {:?}", g);
let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
debug!("drop glue type {:?}", g);
match ccx.drop_glues().borrow().get(&g) {
Some(&glue) => return glue,
_ => { }
}
let t = g.ty();
let llty = if type_is_sized(ccx.tcx(), t) {
type_of(ccx, t).ptr_to()
} else {
type_of(ccx, ccx.tcx().mk_box(t)).ptr_to()
};
let llfnty = Type::glue_fn(ccx, llty);
// To avoid infinite recursion, don't `make_drop_glue` until after we've
// added the entry to the `drop_glues` cache.
if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil());
ccx.drop_glues().borrow_mut().insert(g, llfn);
return llfn;
};
let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop");
let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm));
});
ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
ty::FnConverging(ccx.tcx().mk_nil()),
empty_substs, None, &arena);
let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil()));
update_linkage(ccx, llfn, None, OriginalTranslation);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
//
// llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter.
let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint);
let bcx = make_drop_glue(bcx, llrawptr0, g);
finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
llfn
}
fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
struct_data: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx> {
assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
let repr = adt::represent_type(bcx.ccx(), t);
let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data));
let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
bcx
} else {
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
let drop_flag_neither_initialized_nor_cleared =
And(bcx, not_init, not_done, DebugLoc::None);
with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
Call(cx, llfn, &[], None, DebugLoc::None);
cx
})
};
let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
with_cond(bcx, drop_flag_dtor_needed, |cx| {
trans_struct_drop(cx, t, struct_data, dtor_did, class_did, substs)
})
}
pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
did: ast::DefId,
parent_id: ast::DefId,
substs: &Substs<'tcx>)
-> ValueRef {
let _icx = push_ctxt("trans_res_dtor");
let did = inline::maybe_instantiate_inline(ccx, did);
if !substs.types.is_empty() {
assert_eq!(did.krate, ast::LOCAL_CRATE);
// Since we're in trans we don't care for any region parameters
let substs = ccx.tcx().mk_substs(Substs::erased(substs.types.clone()));
let (val, _, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
val
} else if did.krate == ast::LOCAL_CRATE {
get_item_val(ccx, did.node)
} else {
let tcx = ccx.tcx();
let name = csearch::get_symbol(&ccx.sess().cstore, did);
let class_ty = tcx.lookup_item_type(parent_id).ty.subst(tcx, substs);
let llty = type_of_dtor(ccx, class_ty);
foreign::get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), &name[..], llvm::CCallConv,
llty, ccx.tcx().mk_nil())
}
}
fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
v0: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx>
{
debug!("trans_struct_drop t: {}", t);
// Find and call the actual destructor
let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, class_did, substs);
// Class dtors have no explicit args, so the params should
// just consist of the environment (self).
let params = unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr));
ty.element_type().func_params()
};
assert_eq!(params.len(), if type_is_sized(bcx.tcx(), t) { 1 } else { 2 });
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
let contents_scope = bcx.fcx.push_custom_cleanup_scope();
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
let glue_type = get_drop_glue_type(bcx.ccx(), t);
let dtor_ty = bcx.tcx().mk_ctor_fn(class_did, &[glue_type], bcx.tcx().mk_nil());
let (_, bcx) = if type_is_sized(bcx.tcx(), t) {
invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None)
} else {
let args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_len(bcx, v0))];
invoke(bcx, dtor_addr, &args, dtor_ty, DebugLoc::None)
};
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}
pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {}",
t, bcx.val_to_string(info));
if type_is_sized(bcx.tcx(), t) {
let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = llsize_of_alloc(bcx.ccx(), sizing_type);
let align = align_of(bcx.ccx(), t);
debug!("size_and_align_of_dst t={} info={} size: {} align: {}",
t, bcx.val_to_string(info), size, align);
let size = C_uint(bcx.ccx(), size);
let align = C_uint(bcx.ccx(), align);
return (size, align);
}
match t.sty {
ty::TyStruct(def, substs) => {
let ccx = bcx.ccx();
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized.
assert!(!t.is_simd());
let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_context_of(ccx, &*repr, true);
debug!("DST {} sizing_type: {}", t, sizing_type.to_string());
let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
let sized_align = llalign_of_min(ccx, sizing_type.prefix());
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_uint(ccx, sized_size);
let sized_align = C_uint(ccx, sized_align);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
let dbloc = DebugLoc::None;
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let mut size = Add(bcx, sized_size, unsized_size, dbloc);
// Issue #27023: If there is a drop flag, *now* we add 1
// to the size. (We can do this without adding any
// padding because drop flags do not have any alignment
// constraints.)
if sizing_type.needs_drop_flag() {
size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc);
}
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = Select(bcx,
ICmp(bcx,
llvm::IntUGT,
sized_align,
unsized_align,
dbloc),
sized_align,
unsized_align);
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
//
// Namely, the returned size should be, in C notation:
//
// `size + ((size & (align-1)) ? align : 0)`
//
// emulated via the semi-standard fast bit trick:
//
// `(size + (align-1)) & !align`
let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc);
let size = And(
bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc);
(size, align)
}
ty::TyTrait(..) => {
// info points to the vtable and the second entry in the vtable is the
// dynamic size of the object.
let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to());
let size_ptr = GEPi(bcx, info, &[1]);
let align_ptr = GEPi(bcx, info, &[2]);
(Load(bcx, size_ptr), Load(bcx, align_ptr))
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
let unit_align = llalign_of_min(bcx.ccx(), llunit_ty); | }
_ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", t))
}
}
fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
-> Block<'blk, 'tcx> {
let t = g.ty();
let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
// NB: v0 is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("make_drop_glue");
// Only drop the value when it ... well, we used to check for
// non-null, (and maybe we need to continue doing so), but we now
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
let inttype = Type::int(bcx.ccx());
let dropped_pattern = C_integral(inttype, adt::dtor_done_usize(bcx.fcx.ccx) as u64, false);
match t.sty {
ty::TyBox(content_ty) => {
// Support for TyBox is built-in and its drop glue is
// special. It may move to library and have Drop impl. As
// a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor);
if !type_is_sized(bcx.tcx(), content_ty) {
let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let llbox = Load(bcx, llval);
let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]);
let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
// `Box<ZeroSizeType>` does not allocate.
let needs_free = ICmp(bcx,
llvm::IntNE,
llsize,
C_uint(bcx.ccx(), 0u64),
DebugLoc::None);
with_cond(bcx, needs_free, |bcx| {
trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
})
})
} else {
let llval = v0;
let llbox = Load(bcx, llval);
let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
})
}
}
ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
let tcx = bcx.tcx();
match (tcx.ty_dtor(def.did), skip_dtor) {
(ty::TraitDtor(dtor, true), false) => {
// FIXME(16758) Since the struct is unsized, it is hard to
// find the drop flag (which is at the end of the struct).
// Lets just ignore the flag and pretend everything will be
// OK.
if type_is_sized(bcx.tcx(), t) {
trans_struct_drop_flag(bcx, t, v0, dtor, def.did, substs)
} else {
// Give the user a heads up that we are doing something
// stupid and dangerous.
bcx.sess().warn(&format!("Ignoring drop flag in destructor for {}\
because the struct is unsized. See issue\
#16758", t));
trans_struct_drop(bcx, t, v0, dtor, def.did, substs)
}
}
(ty::TraitDtor(dtor, false), false) => {
trans_struct_drop(bcx, t, v0, dtor, def.did, substs)
}
(ty::NoDtor, _) | (_, true) => {
// No dtor? Just the default case
iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
}
}
}
ty::TyTrait(..) => {
// No support in vtable for distinguishing destroying with
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
let data_ptr = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let vtable_ptr = Load(bcx, GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]));
let dtor = Load(bcx, vtable_ptr);
Call(bcx,
dtor,
&[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
None,
DebugLoc::None);
bcx
}
_ => {
if bcx.fcx.type_needs_drop(t) {
iter_structural_ty(bcx,
v0,
t,
|bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
} else {
bcx
}
}
}
} | let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
(Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None),
C_uint(bcx.ccx(), unit_align)) | random_line_split |
glue.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//!
//
// Code relating to drop glue.
use back::abi;
use back::link::*;
use llvm;
use llvm::{ValueRef, get_param};
use metadata::csearch;
use middle::lang_items::ExchangeFreeFnLangItem;
use middle::subst;
use middle::subst::{Subst, Substs};
use middle::ty::{self, Ty};
use trans::adt;
use trans::adt::GetDtorType; // for tcx.dtor_type()
use trans::base::*;
use trans::build::*;
use trans::callee;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::common::*;
use trans::debuginfo::DebugLoc;
use trans::declare;
use trans::expr;
use trans::foreign;
use trans::inline;
use trans::machine::*;
use trans::monomorphize;
use trans::type_of::{type_of, type_of_dtor, sizing_type_of, align_of};
use trans::type_::Type;
use arena::TypedArena;
use libc::c_uint;
use syntax::ast;
pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
v: ValueRef,
size: ValueRef,
align: ValueRef,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
callee::trans_lang_call(cx,
langcall(cx, None, "", ExchangeFreeFnLangItem),
&[PointerCast(cx, v, Type::i8p(ccx)), size, align],
Some(expr::Ignore),
debug_loc).bcx
}
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
v: ValueRef,
size: u64,
align: u32,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
trans_exchange_free_dyn(cx,
v,
C_uint(cx.ccx(), size),
C_uint(cx.ccx(), align),
debug_loc)
}
pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ptr: ValueRef,
content_ty: Ty<'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
// `Box<ZeroSizeType>` does not allocate.
if content_size != 0 {
let content_align = align_of(bcx.ccx(), content_ty);
trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
} else {
bcx
}
}
pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> Ty<'tcx> {
let tcx = ccx.tcx();
// Even if there is no dtor for t, there might be one deeper down and we
// might need to pass in the vtable ptr.
if !type_is_sized(tcx, t) {
return t
}
// FIXME (#22815): note that type_needs_drop conservatively
// approximates in some cases and may say a type expression
// requires drop glue when it actually does not.
//
// (In this case it is not clear whether any harm is done, i.e.
// erroneously returning `t` in some cases where we could have
// returned `tcx.types.i8` does not appear unsound. The impact on
// code quality is unknown at this time.)
if !type_needs_drop(tcx, t) {
return tcx.types.i8;
}
match t.sty {
ty::TyBox(typ) if !type_needs_drop(tcx, typ)
&& type_is_sized(tcx, typ) => {
let llty = sizing_type_of(ccx, typ);
// `Box<ZeroSizeType>` does not allocate.
if llsize_of_alloc(ccx, llty) == 0 {
tcx.types.i8
} else {
t
}
}
_ => t
}
}
pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
drop_ty_core(bcx, v, t, debug_loc, false, None)
}
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool,
drop_hint: Option<cleanup::DropHintValue>)
-> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
let _icx = push_ctxt("drop_ty");
let mut bcx = bcx;
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
DropGlueKind::TyContents(t)
} else {
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx, t);
let ptr = if glue_type != t {
PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
} else {
v
};
match drop_hint {
Some(drop_hint) => {
let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
let moved_val =
C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
let may_need_drop =
ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
bcx = with_cond(bcx, may_need_drop, |cx| {
Call(cx, glue, &[ptr], None, debug_loc);
cx
})
}
None => {
// No drop-hint ==> call standard drop glue
Call(bcx, glue, &[ptr], None, debug_loc);
}
}
}
bcx
}
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
store_ty(bcx, v, vp, t);
drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None)
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef |
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum DropGlueKind<'tcx> {
/// The normal path; runs the dtor, and then recurs on the contents
Ty(Ty<'tcx>),
/// Skips the dtor, if any, for ty; drops the contents directly.
/// Note that the dtor is only skipped at the most *shallow*
/// level, namely, an `impl Drop for Ty` itself. So, for example,
/// if Ty is Newtype(S) then only the Drop impl for for Newtype
/// itself will be skipped, while the Drop impl for S, if any,
/// will be invoked.
TyContents(Ty<'tcx>),
}
impl<'tcx> DropGlueKind<'tcx> {
fn ty(&self) -> Ty<'tcx> {
match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
}
fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
{
match *self {
DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
}
}
}
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
g: DropGlueKind<'tcx>) -> ValueRef {
debug!("make drop glue for {:?}", g);
let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
debug!("drop glue type {:?}", g);
match ccx.drop_glues().borrow().get(&g) {
Some(&glue) => return glue,
_ => { }
}
let t = g.ty();
let llty = if type_is_sized(ccx.tcx(), t) {
type_of(ccx, t).ptr_to()
} else {
type_of(ccx, ccx.tcx().mk_box(t)).ptr_to()
};
let llfnty = Type::glue_fn(ccx, llty);
// To avoid infinite recursion, don't `make_drop_glue` until after we've
// added the entry to the `drop_glues` cache.
if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil());
ccx.drop_glues().borrow_mut().insert(g, llfn);
return llfn;
};
let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop");
let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm));
});
ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
ty::FnConverging(ccx.tcx().mk_nil()),
empty_substs, None, &arena);
let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil()));
update_linkage(ccx, llfn, None, OriginalTranslation);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
//
// llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter.
let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint);
let bcx = make_drop_glue(bcx, llrawptr0, g);
finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
llfn
}
fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
struct_data: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx> {
assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
let repr = adt::represent_type(bcx.ccx(), t);
let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data));
let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
bcx
} else {
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
let drop_flag_neither_initialized_nor_cleared =
And(bcx, not_init, not_done, DebugLoc::None);
with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
Call(cx, llfn, &[], None, DebugLoc::None);
cx
})
};
let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
with_cond(bcx, drop_flag_dtor_needed, |cx| {
trans_struct_drop(cx, t, struct_data, dtor_did, class_did, substs)
})
}
pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
did: ast::DefId,
parent_id: ast::DefId,
substs: &Substs<'tcx>)
-> ValueRef {
let _icx = push_ctxt("trans_res_dtor");
let did = inline::maybe_instantiate_inline(ccx, did);
if !substs.types.is_empty() {
assert_eq!(did.krate, ast::LOCAL_CRATE);
// Since we're in trans we don't care for any region parameters
let substs = ccx.tcx().mk_substs(Substs::erased(substs.types.clone()));
let (val, _, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
val
} else if did.krate == ast::LOCAL_CRATE {
get_item_val(ccx, did.node)
} else {
let tcx = ccx.tcx();
let name = csearch::get_symbol(&ccx.sess().cstore, did);
let class_ty = tcx.lookup_item_type(parent_id).ty.subst(tcx, substs);
let llty = type_of_dtor(ccx, class_ty);
foreign::get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), &name[..], llvm::CCallConv,
llty, ccx.tcx().mk_nil())
}
}
fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
v0: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx>
{
debug!("trans_struct_drop t: {}", t);
// Find and call the actual destructor
let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, class_did, substs);
// Class dtors have no explicit args, so the params should
// just consist of the environment (self).
let params = unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr));
ty.element_type().func_params()
};
assert_eq!(params.len(), if type_is_sized(bcx.tcx(), t) { 1 } else { 2 });
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
let contents_scope = bcx.fcx.push_custom_cleanup_scope();
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
let glue_type = get_drop_glue_type(bcx.ccx(), t);
let dtor_ty = bcx.tcx().mk_ctor_fn(class_did, &[glue_type], bcx.tcx().mk_nil());
let (_, bcx) = if type_is_sized(bcx.tcx(), t) {
invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None)
} else {
let args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_len(bcx, v0))];
invoke(bcx, dtor_addr, &args, dtor_ty, DebugLoc::None)
};
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}
pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {}",
t, bcx.val_to_string(info));
if type_is_sized(bcx.tcx(), t) {
let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = llsize_of_alloc(bcx.ccx(), sizing_type);
let align = align_of(bcx.ccx(), t);
debug!("size_and_align_of_dst t={} info={} size: {} align: {}",
t, bcx.val_to_string(info), size, align);
let size = C_uint(bcx.ccx(), size);
let align = C_uint(bcx.ccx(), align);
return (size, align);
}
match t.sty {
ty::TyStruct(def, substs) => {
let ccx = bcx.ccx();
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized.
assert!(!t.is_simd());
let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_context_of(ccx, &*repr, true);
debug!("DST {} sizing_type: {}", t, sizing_type.to_string());
let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
let sized_align = llalign_of_min(ccx, sizing_type.prefix());
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_uint(ccx, sized_size);
let sized_align = C_uint(ccx, sized_align);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
let dbloc = DebugLoc::None;
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let mut size = Add(bcx, sized_size, unsized_size, dbloc);
// Issue #27023: If there is a drop flag, *now* we add 1
// to the size. (We can do this without adding any
// padding because drop flags do not have any alignment
// constraints.)
if sizing_type.needs_drop_flag() {
size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc);
}
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = Select(bcx,
ICmp(bcx,
llvm::IntUGT,
sized_align,
unsized_align,
dbloc),
sized_align,
unsized_align);
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
//
// Namely, the returned size should be, in C notation:
//
// `size + ((size & (align-1)) ? align : 0)`
//
// emulated via the semi-standard fast bit trick:
//
// `(size + (align-1)) & !align`
let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc);
let size = And(
bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc);
(size, align)
}
ty::TyTrait(..) => {
// info points to the vtable and the second entry in the vtable is the
// dynamic size of the object.
let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to());
let size_ptr = GEPi(bcx, info, &[1]);
let align_ptr = GEPi(bcx, info, &[2]);
(Load(bcx, size_ptr), Load(bcx, align_ptr))
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
(Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None),
C_uint(bcx.ccx(), unit_align))
}
_ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", t))
}
}
fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
-> Block<'blk, 'tcx> {
let t = g.ty();
let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
// NB: v0 is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("make_drop_glue");
// Only drop the value when it ... well, we used to check for
// non-null, (and maybe we need to continue doing so), but we now
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
let inttype = Type::int(bcx.ccx());
let dropped_pattern = C_integral(inttype, adt::dtor_done_usize(bcx.fcx.ccx) as u64, false);
match t.sty {
ty::TyBox(content_ty) => {
// Support for TyBox is built-in and its drop glue is
// special. It may move to library and have Drop impl. As
// a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor);
if !type_is_sized(bcx.tcx(), content_ty) {
let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let llbox = Load(bcx, llval);
let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]);
let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
// `Box<ZeroSizeType>` does not allocate.
let needs_free = ICmp(bcx,
llvm::IntNE,
llsize,
C_uint(bcx.ccx(), 0u64),
DebugLoc::None);
with_cond(bcx, needs_free, |bcx| {
trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
})
})
} else {
let llval = v0;
let llbox = Load(bcx, llval);
let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
})
}
}
ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
let tcx = bcx.tcx();
match (tcx.ty_dtor(def.did), skip_dtor) {
(ty::TraitDtor(dtor, true), false) => {
// FIXME(16758) Since the struct is unsized, it is hard to
// find the drop flag (which is at the end of the struct).
// Lets just ignore the flag and pretend everything will be
// OK.
if type_is_sized(bcx.tcx(), t) {
trans_struct_drop_flag(bcx, t, v0, dtor, def.did, substs)
} else {
// Give the user a heads up that we are doing something
// stupid and dangerous.
bcx.sess().warn(&format!("Ignoring drop flag in destructor for {}\
because the struct is unsized. See issue\
#16758", t));
trans_struct_drop(bcx, t, v0, dtor, def.did, substs)
}
}
(ty::TraitDtor(dtor, false), false) => {
trans_struct_drop(bcx, t, v0, dtor, def.did, substs)
}
(ty::NoDtor, _) | (_, true) => {
// No dtor? Just the default case
iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
}
}
}
ty::TyTrait(..) => {
// No support in vtable for distinguishing destroying with
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
let data_ptr = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let vtable_ptr = Load(bcx, GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]));
let dtor = Load(bcx, vtable_ptr);
Call(bcx,
dtor,
&[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
None,
DebugLoc::None);
bcx
}
_ => {
if bcx.fcx.type_needs_drop(t) {
iter_structural_ty(bcx,
v0,
t,
|bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
} else {
bcx
}
}
}
}
| {
get_drop_glue_core(ccx, DropGlueKind::Ty(t))
} | identifier_body |
plugins.js | import core from 'core-js';
import * as LogManager from 'aurelia-logging';
import {Metadata} from 'aurelia-metadata';
var logger = LogManager.getLogger('aurelia');
function | (aurelia, loader, info){
logger.debug(`Loading plugin ${info.moduleId}.`);
aurelia.currentPluginId = info.moduleId;
return loader.loadModule(info.moduleId).then(m => {
if('configure' in m){
return Promise.resolve(m.configure(aurelia, info.config || {})).then(() => {
aurelia.currentPluginId = null;
logger.debug(`Configured plugin ${info.moduleId}.`);
});
}else{
aurelia.currentPluginId = null;
logger.debug(`Loaded plugin ${info.moduleId}.`);
}
});
}
/**
* Manages loading and configuring plugins.
*
* @class Plugins
* @constructor
* @param {Aurelia} aurelia An instance of Aurelia.
*/
export class Plugins {
constructor(aurelia){
this.aurelia = aurelia;
this.info = [];
this.processed = false;
}
/**
* Configures a plugin before Aurelia starts.
*
* @method plugin
* @param {moduleId} moduleId The ID of the module to configure.
* @param {config} config The configuration for the specified module.
* @return {Plugins} Returns the current Plugins instance.
*/
plugin(moduleId, config){
var plugin = {moduleId:moduleId, config:config || {}};
if(this.processed){
loadPlugin(this.aurelia, this.aurelia.loader, plugin);
}else{
this.info.push(plugin);
}
return this;
}
_process(){
var aurelia = this.aurelia,
loader = aurelia.loader,
info = this.info,
current;
if(this.processed){
return;
}
var next = () => {
if(current = info.shift()){
return loadPlugin(aurelia, loader, current).then(next);
}
this.processed = true;
return Promise.resolve();
};
return next();
}
}
| loadPlugin | identifier_name |
plugins.js | import core from 'core-js';
import * as LogManager from 'aurelia-logging';
import {Metadata} from 'aurelia-metadata';
var logger = LogManager.getLogger('aurelia');
function loadPlugin(aurelia, loader, info){
logger.debug(`Loading plugin ${info.moduleId}.`);
aurelia.currentPluginId = info.moduleId;
return loader.loadModule(info.moduleId).then(m => {
if('configure' in m){
return Promise.resolve(m.configure(aurelia, info.config || {})).then(() => {
aurelia.currentPluginId = null;
logger.debug(`Configured plugin ${info.moduleId}.`);
});
}else |
});
}
/**
* Manages loading and configuring plugins.
*
* @class Plugins
* @constructor
* @param {Aurelia} aurelia An instance of Aurelia.
*/
export class Plugins {
constructor(aurelia){
this.aurelia = aurelia;
this.info = [];
this.processed = false;
}
/**
* Configures a plugin before Aurelia starts.
*
* @method plugin
* @param {moduleId} moduleId The ID of the module to configure.
* @param {config} config The configuration for the specified module.
* @return {Plugins} Returns the current Plugins instance.
*/
plugin(moduleId, config){
var plugin = {moduleId:moduleId, config:config || {}};
if(this.processed){
loadPlugin(this.aurelia, this.aurelia.loader, plugin);
}else{
this.info.push(plugin);
}
return this;
}
_process(){
var aurelia = this.aurelia,
loader = aurelia.loader,
info = this.info,
current;
if(this.processed){
return;
}
var next = () => {
if(current = info.shift()){
return loadPlugin(aurelia, loader, current).then(next);
}
this.processed = true;
return Promise.resolve();
};
return next();
}
}
| {
aurelia.currentPluginId = null;
logger.debug(`Loaded plugin ${info.moduleId}.`);
} | conditional_block |
plugins.js | import core from 'core-js';
import * as LogManager from 'aurelia-logging';
import {Metadata} from 'aurelia-metadata';
var logger = LogManager.getLogger('aurelia');
function loadPlugin(aurelia, loader, info){
logger.debug(`Loading plugin ${info.moduleId}.`);
aurelia.currentPluginId = info.moduleId;
return loader.loadModule(info.moduleId).then(m => {
if('configure' in m){
return Promise.resolve(m.configure(aurelia, info.config || {})).then(() => {
aurelia.currentPluginId = null;
logger.debug(`Configured plugin ${info.moduleId}.`);
});
}else{
aurelia.currentPluginId = null;
logger.debug(`Loaded plugin ${info.moduleId}.`);
}
});
}
/**
* Manages loading and configuring plugins.
*
* @class Plugins
* @constructor
* @param {Aurelia} aurelia An instance of Aurelia.
*/
export class Plugins {
constructor(aurelia){
this.aurelia = aurelia;
this.info = [];
this.processed = false;
}
| * @param {config} config The configuration for the specified module.
* @return {Plugins} Returns the current Plugins instance.
*/
plugin(moduleId, config){
var plugin = {moduleId:moduleId, config:config || {}};
if(this.processed){
loadPlugin(this.aurelia, this.aurelia.loader, plugin);
}else{
this.info.push(plugin);
}
return this;
}
_process(){
var aurelia = this.aurelia,
loader = aurelia.loader,
info = this.info,
current;
if(this.processed){
return;
}
var next = () => {
if(current = info.shift()){
return loadPlugin(aurelia, loader, current).then(next);
}
this.processed = true;
return Promise.resolve();
};
return next();
}
} | /**
* Configures a plugin before Aurelia starts.
*
* @method plugin
* @param {moduleId} moduleId The ID of the module to configure. | random_line_split |
plugins.js | import core from 'core-js';
import * as LogManager from 'aurelia-logging';
import {Metadata} from 'aurelia-metadata';
var logger = LogManager.getLogger('aurelia');
function loadPlugin(aurelia, loader, info){
logger.debug(`Loading plugin ${info.moduleId}.`);
aurelia.currentPluginId = info.moduleId;
return loader.loadModule(info.moduleId).then(m => {
if('configure' in m){
return Promise.resolve(m.configure(aurelia, info.config || {})).then(() => {
aurelia.currentPluginId = null;
logger.debug(`Configured plugin ${info.moduleId}.`);
});
}else{
aurelia.currentPluginId = null;
logger.debug(`Loaded plugin ${info.moduleId}.`);
}
});
}
/**
* Manages loading and configuring plugins.
*
* @class Plugins
* @constructor
* @param {Aurelia} aurelia An instance of Aurelia.
*/
export class Plugins {
constructor(aurelia){
this.aurelia = aurelia;
this.info = [];
this.processed = false;
}
/**
* Configures a plugin before Aurelia starts.
*
* @method plugin
* @param {moduleId} moduleId The ID of the module to configure.
* @param {config} config The configuration for the specified module.
* @return {Plugins} Returns the current Plugins instance.
*/
plugin(moduleId, config) |
_process(){
var aurelia = this.aurelia,
loader = aurelia.loader,
info = this.info,
current;
if(this.processed){
return;
}
var next = () => {
if(current = info.shift()){
return loadPlugin(aurelia, loader, current).then(next);
}
this.processed = true;
return Promise.resolve();
};
return next();
}
}
| {
var plugin = {moduleId:moduleId, config:config || {}};
if(this.processed){
loadPlugin(this.aurelia, this.aurelia.loader, plugin);
}else{
this.info.push(plugin);
}
return this;
} | identifier_body |
canvas.js | var DOM = require('../dom');
var utils = require('../utils');
module.exports = (function() {
var canvas = DOM.newEl('canvas');
var ctx = null;
return function(sceneGraph) {
if (ctx == null) {
ctx = canvas.getContext('2d');
}
var dpr = utils.canvasRatio();
var root = sceneGraph.root;
canvas.width = dpr * root.properties.width;
canvas.height = dpr * root.properties.height ;
ctx.textBaseline = 'middle';
var bg = root.children.holderBg;
var bgWidth = dpr * bg.width;
var bgHeight = dpr * bg.height;
//todo: parametrize outline width (e.g. in scene object)
var outlineWidth = 2;
var outlineOffsetWidth = outlineWidth / 2;
ctx.fillStyle = bg.properties.fill;
ctx.fillRect(0, 0, bgWidth, bgHeight);
if (bg.properties.outline) |
var textGroup = root.children.holderTextGroup;
ctx.font = textGroup.properties.font.weight + ' ' + (dpr * textGroup.properties.font.size) + textGroup.properties.font.units + ' ' + textGroup.properties.font.family + ', monospace';
ctx.fillStyle = textGroup.properties.fill;
for (var lineKey in textGroup.children) {
var line = textGroup.children[lineKey];
for (var wordKey in line.children) {
var word = line.children[wordKey];
var x = dpr * (textGroup.x + line.x + word.x);
var y = dpr * (textGroup.y + line.y + word.y + (textGroup.properties.leading / 2));
ctx.fillText(word.properties.text, x, y);
}
}
return canvas.toDataURL('image/png');
};
})(); | {
//todo: abstract this into a method
ctx.strokeStyle = bg.properties.outline.fill;
ctx.lineWidth = bg.properties.outline.width;
ctx.moveTo(outlineOffsetWidth, outlineOffsetWidth);
// TL, TR, BR, BL
ctx.lineTo(bgWidth - outlineOffsetWidth, outlineOffsetWidth);
ctx.lineTo(bgWidth - outlineOffsetWidth, bgHeight - outlineOffsetWidth);
ctx.lineTo(outlineOffsetWidth, bgHeight - outlineOffsetWidth);
ctx.lineTo(outlineOffsetWidth, outlineOffsetWidth);
// Diagonals
ctx.moveTo(0, outlineOffsetWidth);
ctx.lineTo(bgWidth, bgHeight - outlineOffsetWidth);
ctx.moveTo(0, bgHeight - outlineOffsetWidth);
ctx.lineTo(bgWidth, outlineOffsetWidth);
ctx.stroke();
} | conditional_block |
canvas.js | var DOM = require('../dom');
var utils = require('../utils');
module.exports = (function() {
var canvas = DOM.newEl('canvas');
var ctx = null;
return function(sceneGraph) {
if (ctx == null) {
ctx = canvas.getContext('2d');
}
var dpr = utils.canvasRatio();
var root = sceneGraph.root;
canvas.width = dpr * root.properties.width;
canvas.height = dpr * root.properties.height ; | ctx.textBaseline = 'middle';
var bg = root.children.holderBg;
var bgWidth = dpr * bg.width;
var bgHeight = dpr * bg.height;
//todo: parametrize outline width (e.g. in scene object)
var outlineWidth = 2;
var outlineOffsetWidth = outlineWidth / 2;
ctx.fillStyle = bg.properties.fill;
ctx.fillRect(0, 0, bgWidth, bgHeight);
if (bg.properties.outline) {
//todo: abstract this into a method
ctx.strokeStyle = bg.properties.outline.fill;
ctx.lineWidth = bg.properties.outline.width;
ctx.moveTo(outlineOffsetWidth, outlineOffsetWidth);
// TL, TR, BR, BL
ctx.lineTo(bgWidth - outlineOffsetWidth, outlineOffsetWidth);
ctx.lineTo(bgWidth - outlineOffsetWidth, bgHeight - outlineOffsetWidth);
ctx.lineTo(outlineOffsetWidth, bgHeight - outlineOffsetWidth);
ctx.lineTo(outlineOffsetWidth, outlineOffsetWidth);
// Diagonals
ctx.moveTo(0, outlineOffsetWidth);
ctx.lineTo(bgWidth, bgHeight - outlineOffsetWidth);
ctx.moveTo(0, bgHeight - outlineOffsetWidth);
ctx.lineTo(bgWidth, outlineOffsetWidth);
ctx.stroke();
}
var textGroup = root.children.holderTextGroup;
ctx.font = textGroup.properties.font.weight + ' ' + (dpr * textGroup.properties.font.size) + textGroup.properties.font.units + ' ' + textGroup.properties.font.family + ', monospace';
ctx.fillStyle = textGroup.properties.fill;
for (var lineKey in textGroup.children) {
var line = textGroup.children[lineKey];
for (var wordKey in line.children) {
var word = line.children[wordKey];
var x = dpr * (textGroup.x + line.x + word.x);
var y = dpr * (textGroup.y + line.y + word.y + (textGroup.properties.leading / 2));
ctx.fillText(word.properties.text, x, y);
}
}
return canvas.toDataURL('image/png');
};
})(); | random_line_split |
|
IntervalObservable.d.ts | import { Subscriber } from '../Subscriber';
import { IScheduler } from '../Scheduler';
import { Observable } from '../Observable';
/**
* We need this JSDoc comment for affecting ESDoc.
* @extends {Ignored}
* @hide true
*/
export declare class IntervalObservable extends Observable<number> {
private period;
private scheduler;
/**
* Creates an Observable that emits sequential numbers every specified
* interval of time, on a specified IScheduler.
*
* <span class="informal">Emits incremental numbers periodically in time.
* </span>
*
* <img src="./img/interval.png" width="100%">
*
* `interval` returns an Observable that emits an infinite sequence of
* ascending integers, with a constant interval of time of your choosing
* between those emissions. The first emission is not sent immediately, but
* only after the first period has passed. By default, this operator uses the
* `async` IScheduler to provide a notion of time, but you may pass any
* IScheduler to it.
*
* @example <caption>Emits ascending numbers, one every second (1000ms)</caption>
* var numbers = Rx.Observable.interval(1000);
* numbers.subscribe(x => console.log(x));
*
* @see {@link timer}
* @see {@link delay}
*
* @param {number} [period=0] The interval size in milliseconds (by default)
* or the time unit determined by the scheduler's clock. | * @static true
* @name interval
* @owner Observable
*/
static create(period?: number, scheduler?: IScheduler): Observable<number>;
static dispatch(state: any): void;
constructor(period?: number, scheduler?: IScheduler);
protected _subscribe(subscriber: Subscriber<number>): void;
} | * @param {Scheduler} [scheduler=async] The IScheduler to use for scheduling
* the emission of values, and providing a notion of "time".
* @return {Observable} An Observable that emits a sequential number each time
* interval. | random_line_split |
IntervalObservable.d.ts | import { Subscriber } from '../Subscriber';
import { IScheduler } from '../Scheduler';
import { Observable } from '../Observable';
/**
* We need this JSDoc comment for affecting ESDoc.
* @extends {Ignored}
* @hide true
*/
export declare class | extends Observable<number> {
private period;
private scheduler;
/**
* Creates an Observable that emits sequential numbers every specified
* interval of time, on a specified IScheduler.
*
* <span class="informal">Emits incremental numbers periodically in time.
* </span>
*
* <img src="./img/interval.png" width="100%">
*
* `interval` returns an Observable that emits an infinite sequence of
* ascending integers, with a constant interval of time of your choosing
* between those emissions. The first emission is not sent immediately, but
* only after the first period has passed. By default, this operator uses the
* `async` IScheduler to provide a notion of time, but you may pass any
* IScheduler to it.
*
* @example <caption>Emits ascending numbers, one every second (1000ms)</caption>
* var numbers = Rx.Observable.interval(1000);
* numbers.subscribe(x => console.log(x));
*
* @see {@link timer}
* @see {@link delay}
*
* @param {number} [period=0] The interval size in milliseconds (by default)
* or the time unit determined by the scheduler's clock.
* @param {Scheduler} [scheduler=async] The IScheduler to use for scheduling
* the emission of values, and providing a notion of "time".
* @return {Observable} An Observable that emits a sequential number each time
* interval.
* @static true
* @name interval
* @owner Observable
*/
static create(period?: number, scheduler?: IScheduler): Observable<number>;
static dispatch(state: any): void;
constructor(period?: number, scheduler?: IScheduler);
protected _subscribe(subscriber: Subscriber<number>): void;
}
| IntervalObservable | identifier_name |
lib.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generalization of a state machine for a consensus engine.
//! This will define traits for the header, block, and state of a blockchain.
extern crate ethcore_util as util;
extern crate ethcore_bigint as bigint;
use bigint::hash::H256;
use bigint::prelude::U256;
use util::Address;
/// A header. This contains important metadata about the block, as well as a
/// "seal" that indicates validity to a consensus engine.
pub trait Header {
/// Cryptographic hash of the header, excluding the seal.
fn bare_hash(&self) -> H256;
/// Cryptographic hash of the header, including the seal.
fn hash(&self) -> H256;
/// Get a reference to the seal fields.
fn seal(&self) -> &[Vec<u8>];
/// The author of the header.
fn author(&self) -> &Address;
/// The number of the header.
fn number(&self) -> u64;
}
/// a header with an associated score (difficulty in PoW terms)
pub trait ScoredHeader: Header {
/// Get the score of this header.
fn score(&self) -> &U256;
/// Set the score of this header.
fn set_score(&mut self, score: U256);
}
/// A "live" block is one which is in the process of the transition.
/// The state of this block can be mutated by arbitrary rules of the
/// state transition function.
pub trait LiveBlock: 'static {
/// The block header type;
type Header: Header;
/// Get a reference to the header.
fn header(&self) -> &Self::Header;
/// Get a reference to the uncle headers. If the block type doesn't
/// support uncles, return the empty slice.
fn uncles(&self) -> &[Self::Header];
}
/// Trait for blocks which have a transaction type.
pub trait Transactions: LiveBlock {
/// The transaction type.
type Transaction;
/// Get a reference to the transactions in this block.
fn transactions(&self) -> &[Self::Transaction];
}
/// Generalization of types surrounding blockchain-suitable state machines.
pub trait Machine: for<'a> LocalizedMachine<'a> {
/// The block header type.
type Header: Header;
/// The live block type.
type LiveBlock: LiveBlock<Header=Self::Header>;
/// A handle to a blockchain client for this machine.
type EngineClient: ?Sized;
/// A description of needed auxiliary data.
type AuxiliaryRequest;
/// Errors which can occur when querying or interacting with the machine.
type Error;
}
/// Machine-related types localized to a specific lifetime.
// TODO: this is a workaround for a lack of associated type constructors in the language.
pub trait LocalizedMachine<'a>: Sync + Send {
/// Definition of auxiliary data associated to a specific block.
type AuxiliaryData: 'a;
/// A context providing access to the state in a controlled capacity.
/// Generally also provides verifiable proofs.
type StateContext: ?Sized + 'a;
}
/// A state machine that uses balances.
pub trait WithBalances: Machine {
/// Get the balance, in base units, associated with an account.
/// Extracts data from the live block.
fn balance(&self, live: &Self::LiveBlock, address: &Address) -> Result<U256, Self::Error>;
/// Increment the balance of an account in the state of the live block.
fn add_balance(&self, live: &mut Self::LiveBlock, address: &Address, amount: &U256) -> Result<(), Self::Error>;
/// Note block rewards. "direct" rewards are for authors, "indirect" are for e.g. uncles.
fn note_rewards( | _direct: &[(Address, U256)],
_indirect: &[(Address, U256)],
) -> Result<(), Self::Error> { Ok(()) }
} | &self,
_live: &mut Self::LiveBlock, | random_line_split |
lib.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generalization of a state machine for a consensus engine.
//! This will define traits for the header, block, and state of a blockchain.
extern crate ethcore_util as util;
extern crate ethcore_bigint as bigint;
use bigint::hash::H256;
use bigint::prelude::U256;
use util::Address;
/// A header. This contains important metadata about the block, as well as a
/// "seal" that indicates validity to a consensus engine.
pub trait Header {
/// Cryptographic hash of the header, excluding the seal.
fn bare_hash(&self) -> H256;
/// Cryptographic hash of the header, including the seal.
fn hash(&self) -> H256;
/// Get a reference to the seal fields.
fn seal(&self) -> &[Vec<u8>];
/// The author of the header.
fn author(&self) -> &Address;
/// The number of the header.
fn number(&self) -> u64;
}
/// a header with an associated score (difficulty in PoW terms)
pub trait ScoredHeader: Header {
/// Get the score of this header.
fn score(&self) -> &U256;
/// Set the score of this header.
fn set_score(&mut self, score: U256);
}
/// A "live" block is one which is in the process of the transition.
/// The state of this block can be mutated by arbitrary rules of the
/// state transition function.
pub trait LiveBlock: 'static {
/// The block header type;
type Header: Header;
/// Get a reference to the header.
fn header(&self) -> &Self::Header;
/// Get a reference to the uncle headers. If the block type doesn't
/// support uncles, return the empty slice.
fn uncles(&self) -> &[Self::Header];
}
/// Trait for blocks which have a transaction type.
pub trait Transactions: LiveBlock {
/// The transaction type.
type Transaction;
/// Get a reference to the transactions in this block.
fn transactions(&self) -> &[Self::Transaction];
}
/// Generalization of types surrounding blockchain-suitable state machines.
pub trait Machine: for<'a> LocalizedMachine<'a> {
/// The block header type.
type Header: Header;
/// The live block type.
type LiveBlock: LiveBlock<Header=Self::Header>;
/// A handle to a blockchain client for this machine.
type EngineClient: ?Sized;
/// A description of needed auxiliary data.
type AuxiliaryRequest;
/// Errors which can occur when querying or interacting with the machine.
type Error;
}
/// Machine-related types localized to a specific lifetime.
// TODO: this is a workaround for a lack of associated type constructors in the language.
pub trait LocalizedMachine<'a>: Sync + Send {
/// Definition of auxiliary data associated to a specific block.
type AuxiliaryData: 'a;
/// A context providing access to the state in a controlled capacity.
/// Generally also provides verifiable proofs.
type StateContext: ?Sized + 'a;
}
/// A state machine that uses balances.
pub trait WithBalances: Machine {
/// Get the balance, in base units, associated with an account.
/// Extracts data from the live block.
fn balance(&self, live: &Self::LiveBlock, address: &Address) -> Result<U256, Self::Error>;
/// Increment the balance of an account in the state of the live block.
fn add_balance(&self, live: &mut Self::LiveBlock, address: &Address, amount: &U256) -> Result<(), Self::Error>;
/// Note block rewards. "direct" rewards are for authors, "indirect" are for e.g. uncles.
fn | (
&self,
_live: &mut Self::LiveBlock,
_direct: &[(Address, U256)],
_indirect: &[(Address, U256)],
) -> Result<(), Self::Error> { Ok(()) }
}
| note_rewards | identifier_name |
lib.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generalization of a state machine for a consensus engine.
//! This will define traits for the header, block, and state of a blockchain.
extern crate ethcore_util as util;
extern crate ethcore_bigint as bigint;
use bigint::hash::H256;
use bigint::prelude::U256;
use util::Address;
/// A header. This contains important metadata about the block, as well as a
/// "seal" that indicates validity to a consensus engine.
pub trait Header {
/// Cryptographic hash of the header, excluding the seal.
fn bare_hash(&self) -> H256;
/// Cryptographic hash of the header, including the seal.
fn hash(&self) -> H256;
/// Get a reference to the seal fields.
fn seal(&self) -> &[Vec<u8>];
/// The author of the header.
fn author(&self) -> &Address;
/// The number of the header.
fn number(&self) -> u64;
}
/// a header with an associated score (difficulty in PoW terms)
pub trait ScoredHeader: Header {
/// Get the score of this header.
fn score(&self) -> &U256;
/// Set the score of this header.
fn set_score(&mut self, score: U256);
}
/// A "live" block is one which is in the process of the transition.
/// The state of this block can be mutated by arbitrary rules of the
/// state transition function.
pub trait LiveBlock: 'static {
/// The block header type;
type Header: Header;
/// Get a reference to the header.
fn header(&self) -> &Self::Header;
/// Get a reference to the uncle headers. If the block type doesn't
/// support uncles, return the empty slice.
fn uncles(&self) -> &[Self::Header];
}
/// Trait for blocks which have a transaction type.
pub trait Transactions: LiveBlock {
/// The transaction type.
type Transaction;
/// Get a reference to the transactions in this block.
fn transactions(&self) -> &[Self::Transaction];
}
/// Generalization of types surrounding blockchain-suitable state machines.
pub trait Machine: for<'a> LocalizedMachine<'a> {
/// The block header type.
type Header: Header;
/// The live block type.
type LiveBlock: LiveBlock<Header=Self::Header>;
/// A handle to a blockchain client for this machine.
type EngineClient: ?Sized;
/// A description of needed auxiliary data.
type AuxiliaryRequest;
/// Errors which can occur when querying or interacting with the machine.
type Error;
}
/// Machine-related types localized to a specific lifetime.
// TODO: this is a workaround for a lack of associated type constructors in the language.
pub trait LocalizedMachine<'a>: Sync + Send {
/// Definition of auxiliary data associated to a specific block.
type AuxiliaryData: 'a;
/// A context providing access to the state in a controlled capacity.
/// Generally also provides verifiable proofs.
type StateContext: ?Sized + 'a;
}
/// A state machine that uses balances.
pub trait WithBalances: Machine {
/// Get the balance, in base units, associated with an account.
/// Extracts data from the live block.
fn balance(&self, live: &Self::LiveBlock, address: &Address) -> Result<U256, Self::Error>;
/// Increment the balance of an account in the state of the live block.
fn add_balance(&self, live: &mut Self::LiveBlock, address: &Address, amount: &U256) -> Result<(), Self::Error>;
/// Note block rewards. "direct" rewards are for authors, "indirect" are for e.g. uncles.
fn note_rewards(
&self,
_live: &mut Self::LiveBlock,
_direct: &[(Address, U256)],
_indirect: &[(Address, U256)],
) -> Result<(), Self::Error> |
}
| { Ok(()) } | identifier_body |
test_order.py | # coding: utf-8
"""
DeliveryHub
DeliveryHub API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.order import Order
class TestOrder(unittest.TestCase):
""" Order unit test stubs """
|
def tearDown(self):
pass
def testOrder(self):
"""
Test Order
"""
model = swagger_client.models.order.Order()
if __name__ == '__main__':
unittest.main() | def setUp(self):
pass | random_line_split |
test_order.py | # coding: utf-8
"""
DeliveryHub
DeliveryHub API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.order import Order
class TestOrder(unittest.TestCase):
""" Order unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testOrder(self):
"""
Test Order
"""
model = swagger_client.models.order.Order()
if __name__ == '__main__':
| unittest.main() | conditional_block |
|
test_order.py | # coding: utf-8
"""
DeliveryHub
DeliveryHub API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.order import Order
class TestOrder(unittest.TestCase):
""" Order unit test stubs """
def setUp(self):
pass
def tearDown(self):
|
def testOrder(self):
"""
Test Order
"""
model = swagger_client.models.order.Order()
if __name__ == '__main__':
unittest.main()
| pass | identifier_body |
test_order.py | # coding: utf-8
"""
DeliveryHub
DeliveryHub API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.order import Order
class TestOrder(unittest.TestCase):
""" Order unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def | (self):
"""
Test Order
"""
model = swagger_client.models.order.Order()
if __name__ == '__main__':
unittest.main()
| testOrder | identifier_name |
gcp_spanner_database_facts.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_database_facts
description:
- Gather facts for GCP Database
short_description: Gather facts for GCP Database
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The instance to create the database on.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place in the name of the resource here as a string
Alternatively, you can add `register: name-of-resource` to a gcp_spanner_instance
task and then set this instance field to "{{ name-of-resource }}"'
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a database facts
gcp_spanner_database_facts:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].
returned: success
type: str
extraStatements:
description:
- 'An optional list of DDL statements to run inside the newly created database.
Statements can create tables, indexes, etc. These statements execute atomically
with the creation of the database: if there is an error in any statement,
the database is not created.'
returned: success
type: list
instance:
description:
- The instance to create the database on.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def | ():
module = GcpModule(argument_spec=dict(instance=dict(required=True)))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
items = fetch_list(module, collection(module))
if items.get('databases'):
items = items.get('databases')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res)
def fetch_list(module, link):
auth = GcpSession(module, 'spanner')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| main | identifier_name |
gcp_spanner_database_facts.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_database_facts
description:
- Gather facts for GCP Database
short_description: Gather facts for GCP Database
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The instance to create the database on.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place in the name of the resource here as a string
Alternatively, you can add `register: name-of-resource` to a gcp_spanner_instance
task and then set this instance field to "{{ name-of-resource }}"'
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a database facts
gcp_spanner_database_facts:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].
returned: success
type: str
extraStatements:
description:
- 'An optional list of DDL statements to run inside the newly created database.
Statements can create tables, indexes, etc. These statements execute atomically
with the creation of the database: if there is an error in any statement,
the database is not created.'
returned: success
type: list
instance:
description:
- The instance to create the database on.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True)))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
items = fetch_list(module, collection(module))
if items.get('databases'):
items = items.get('databases')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
|
def fetch_list(module, link):
auth = GcpSession(module, 'spanner')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res) | identifier_body |
gcp_spanner_database_facts.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
| description:
- Gather facts for GCP Database
short_description: Gather facts for GCP Database
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The instance to create the database on.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place in the name of the resource here as a string
Alternatively, you can add `register: name-of-resource` to a gcp_spanner_instance
task and then set this instance field to "{{ name-of-resource }}"'
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a database facts
gcp_spanner_database_facts:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].
returned: success
type: str
extraStatements:
description:
- 'An optional list of DDL statements to run inside the newly created database.
Statements can create tables, indexes, etc. These statements execute atomically
with the creation of the database: if there is an error in any statement,
the database is not created.'
returned: success
type: list
instance:
description:
- The instance to create the database on.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True)))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
items = fetch_list(module, collection(module))
if items.get('databases'):
items = items.get('databases')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res)
def fetch_list(module, link):
auth = GcpSession(module, 'spanner')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main() | ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_database_facts | random_line_split |
gcp_spanner_database_facts.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_database_facts
description:
- Gather facts for GCP Database
short_description: Gather facts for GCP Database
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The instance to create the database on.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place in the name of the resource here as a string
Alternatively, you can add `register: name-of-resource` to a gcp_spanner_instance
task and then set this instance field to "{{ name-of-resource }}"'
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a database facts
gcp_spanner_database_facts:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].
returned: success
type: str
extraStatements:
description:
- 'An optional list of DDL statements to run inside the newly created database.
Statements can create tables, indexes, etc. These statements execute atomically
with the creation of the database: if there is an error in any statement,
the database is not created.'
returned: success
type: list
instance:
description:
- The instance to create the database on.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True)))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
items = fetch_list(module, collection(module))
if items.get('databases'):
items = items.get('databases')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res)
def fetch_list(module, link):
auth = GcpSession(module, 'spanner')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
|
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| return None | conditional_block |
config_installer.py | import os
from conans.tools import unzip
import shutil
from conans.util.files import rmdir, mkdir
from conans.client.remote_registry import RemoteRegistry
from conans import tools
from conans.errors import ConanException
def | (registry_path, remote_file, output):
registry = RemoteRegistry(registry_path, output)
new_registry = RemoteRegistry(remote_file, output)
registry.define_remotes(new_registry.remotes)
def _handle_profiles(source_folder, target_folder, output):
mkdir(target_folder)
for root, _, files in os.walk(source_folder):
relative_path = os.path.relpath(root, source_folder)
if relative_path == ".":
relative_path = ""
for f in files:
profile = os.path.join(relative_path, f)
output.info(" Installing profile %s" % profile)
shutil.copy(os.path.join(root, f), os.path.join(target_folder, profile))
def _process_git_repo(repo_url, client_cache, output, runner, tmp_folder):
output.info("Trying to clone repo %s" % repo_url)
with tools.chdir(tmp_folder):
runner('git clone "%s" config' % repo_url, output=output)
tmp_folder = os.path.join(tmp_folder, "config")
_process_folder(tmp_folder, client_cache, output)
def _process_zip_file(zippath, client_cache, output, tmp_folder, remove=False):
unzip(zippath, tmp_folder)
if remove:
os.unlink(zippath)
_process_folder(tmp_folder, client_cache, output)
def _handle_conan_conf(current_conan_conf, new_conan_conf_path):
current_conan_conf.read(new_conan_conf_path)
with open(current_conan_conf.filename, "w") as f:
current_conan_conf.write(f)
def _process_folder(folder, client_cache, output):
for root, dirs, files in os.walk(folder):
for f in files:
if f == "settings.yml":
output.info("Installing settings.yml")
settings_path = client_cache.settings_path
shutil.copy(os.path.join(root, f), settings_path)
elif f == "conan.conf":
output.info("Processing conan.conf")
conan_conf = client_cache.conan_config
_handle_conan_conf(conan_conf, os.path.join(root, f))
elif f == "remotes.txt":
output.info("Defining remotes")
registry_path = client_cache.registry
_handle_remotes(registry_path, os.path.join(root, f), output)
else:
output.info("Copying file %s to %s" % (f, client_cache.conan_folder))
shutil.copy(os.path.join(root, f), client_cache.conan_folder)
for d in dirs:
if d == "profiles":
output.info("Installing profiles")
profiles_path = client_cache.profiles_path
_handle_profiles(os.path.join(root, d), profiles_path, output)
break
dirs[:] = [d for d in dirs if d not in ("profiles", ".git")]
def _process_download(item, client_cache, output, tmp_folder):
output.info("Trying to download %s" % item)
zippath = os.path.join(tmp_folder, "config.zip")
tools.download(item, zippath, out=output)
_process_zip_file(zippath, client_cache, output, tmp_folder, remove=True)
def configuration_install(item, client_cache, output, runner):
tmp_folder = os.path.join(client_cache.conan_folder, "tmp_config_install")
# necessary for Mac OSX, where the temp folders in /var/ are symlinks to /private/var/
tmp_folder = os.path.realpath(tmp_folder)
mkdir(tmp_folder)
try:
if item is None:
try:
item = client_cache.conan_config.get_item("general.config_install")
except ConanException:
raise ConanException("Called config install without arguments and "
"'general.config_install' not defined in conan.conf")
if item.endswith(".git"):
_process_git_repo(item, client_cache, output, runner, tmp_folder)
elif os.path.exists(item):
# is a local file
_process_zip_file(item, client_cache, output, tmp_folder)
elif item.startswith("http"):
_process_download(item, client_cache, output, tmp_folder)
else:
raise ConanException("I don't know how to process %s" % item)
finally:
if item:
client_cache.conan_config.set_item("general.config_install", item)
rmdir(tmp_folder)
| _handle_remotes | identifier_name |
config_installer.py | import os
from conans.tools import unzip
import shutil
from conans.util.files import rmdir, mkdir
from conans.client.remote_registry import RemoteRegistry
from conans import tools
from conans.errors import ConanException
def _handle_remotes(registry_path, remote_file, output):
registry = RemoteRegistry(registry_path, output)
new_registry = RemoteRegistry(remote_file, output)
registry.define_remotes(new_registry.remotes)
def _handle_profiles(source_folder, target_folder, output):
mkdir(target_folder)
for root, _, files in os.walk(source_folder):
relative_path = os.path.relpath(root, source_folder)
if relative_path == ".":
relative_path = ""
for f in files:
profile = os.path.join(relative_path, f)
output.info(" Installing profile %s" % profile)
shutil.copy(os.path.join(root, f), os.path.join(target_folder, profile))
def _process_git_repo(repo_url, client_cache, output, runner, tmp_folder):
output.info("Trying to clone repo %s" % repo_url)
with tools.chdir(tmp_folder):
runner('git clone "%s" config' % repo_url, output=output)
tmp_folder = os.path.join(tmp_folder, "config")
_process_folder(tmp_folder, client_cache, output)
def _process_zip_file(zippath, client_cache, output, tmp_folder, remove=False):
unzip(zippath, tmp_folder)
if remove:
os.unlink(zippath)
_process_folder(tmp_folder, client_cache, output)
def _handle_conan_conf(current_conan_conf, new_conan_conf_path):
current_conan_conf.read(new_conan_conf_path)
with open(current_conan_conf.filename, "w") as f:
current_conan_conf.write(f)
def _process_folder(folder, client_cache, output):
for root, dirs, files in os.walk(folder):
for f in files:
if f == "settings.yml":
output.info("Installing settings.yml")
settings_path = client_cache.settings_path
shutil.copy(os.path.join(root, f), settings_path)
elif f == "conan.conf":
output.info("Processing conan.conf")
conan_conf = client_cache.conan_config
_handle_conan_conf(conan_conf, os.path.join(root, f))
elif f == "remotes.txt":
output.info("Defining remotes")
registry_path = client_cache.registry
_handle_remotes(registry_path, os.path.join(root, f), output)
else:
output.info("Copying file %s to %s" % (f, client_cache.conan_folder))
shutil.copy(os.path.join(root, f), client_cache.conan_folder)
for d in dirs:
if d == "profiles":
output.info("Installing profiles")
profiles_path = client_cache.profiles_path
_handle_profiles(os.path.join(root, d), profiles_path, output)
break
dirs[:] = [d for d in dirs if d not in ("profiles", ".git")]
def _process_download(item, client_cache, output, tmp_folder):
output.info("Trying to download %s" % item)
zippath = os.path.join(tmp_folder, "config.zip")
tools.download(item, zippath, out=output)
_process_zip_file(zippath, client_cache, output, tmp_folder, remove=True)
def configuration_install(item, client_cache, output, runner):
| tmp_folder = os.path.join(client_cache.conan_folder, "tmp_config_install")
# necessary for Mac OSX, where the temp folders in /var/ are symlinks to /private/var/
tmp_folder = os.path.realpath(tmp_folder)
mkdir(tmp_folder)
try:
if item is None:
try:
item = client_cache.conan_config.get_item("general.config_install")
except ConanException:
raise ConanException("Called config install without arguments and "
"'general.config_install' not defined in conan.conf")
if item.endswith(".git"):
_process_git_repo(item, client_cache, output, runner, tmp_folder)
elif os.path.exists(item):
# is a local file
_process_zip_file(item, client_cache, output, tmp_folder)
elif item.startswith("http"):
_process_download(item, client_cache, output, tmp_folder)
else:
raise ConanException("I don't know how to process %s" % item)
finally:
if item:
client_cache.conan_config.set_item("general.config_install", item)
rmdir(tmp_folder) | identifier_body |
|
config_installer.py | import os
from conans.tools import unzip
import shutil
from conans.util.files import rmdir, mkdir
from conans.client.remote_registry import RemoteRegistry
from conans import tools
from conans.errors import ConanException
def _handle_remotes(registry_path, remote_file, output):
registry = RemoteRegistry(registry_path, output)
new_registry = RemoteRegistry(remote_file, output)
registry.define_remotes(new_registry.remotes)
def _handle_profiles(source_folder, target_folder, output):
mkdir(target_folder)
for root, _, files in os.walk(source_folder):
relative_path = os.path.relpath(root, source_folder)
if relative_path == ".":
relative_path = ""
for f in files:
profile = os.path.join(relative_path, f)
output.info(" Installing profile %s" % profile)
shutil.copy(os.path.join(root, f), os.path.join(target_folder, profile))
def _process_git_repo(repo_url, client_cache, output, runner, tmp_folder):
output.info("Trying to clone repo %s" % repo_url)
with tools.chdir(tmp_folder):
runner('git clone "%s" config' % repo_url, output=output)
tmp_folder = os.path.join(tmp_folder, "config")
_process_folder(tmp_folder, client_cache, output)
def _process_zip_file(zippath, client_cache, output, tmp_folder, remove=False):
unzip(zippath, tmp_folder)
if remove:
os.unlink(zippath)
_process_folder(tmp_folder, client_cache, output)
def _handle_conan_conf(current_conan_conf, new_conan_conf_path):
current_conan_conf.read(new_conan_conf_path)
with open(current_conan_conf.filename, "w") as f:
current_conan_conf.write(f)
def _process_folder(folder, client_cache, output):
for root, dirs, files in os.walk(folder):
for f in files:
if f == "settings.yml":
output.info("Installing settings.yml")
settings_path = client_cache.settings_path
shutil.copy(os.path.join(root, f), settings_path)
elif f == "conan.conf":
output.info("Processing conan.conf")
conan_conf = client_cache.conan_config
_handle_conan_conf(conan_conf, os.path.join(root, f))
elif f == "remotes.txt":
output.info("Defining remotes")
registry_path = client_cache.registry
_handle_remotes(registry_path, os.path.join(root, f), output)
else:
output.info("Copying file %s to %s" % (f, client_cache.conan_folder))
shutil.copy(os.path.join(root, f), client_cache.conan_folder)
for d in dirs:
if d == "profiles":
output.info("Installing profiles")
profiles_path = client_cache.profiles_path
_handle_profiles(os.path.join(root, d), profiles_path, output)
break
dirs[:] = [d for d in dirs if d not in ("profiles", ".git")]
def _process_download(item, client_cache, output, tmp_folder):
output.info("Trying to download %s" % item)
zippath = os.path.join(tmp_folder, "config.zip")
tools.download(item, zippath, out=output)
_process_zip_file(zippath, client_cache, output, tmp_folder, remove=True)
def configuration_install(item, client_cache, output, runner):
tmp_folder = os.path.join(client_cache.conan_folder, "tmp_config_install")
# necessary for Mac OSX, where the temp folders in /var/ are symlinks to /private/var/
tmp_folder = os.path.realpath(tmp_folder)
mkdir(tmp_folder)
try:
if item is None:
try:
item = client_cache.conan_config.get_item("general.config_install")
except ConanException:
raise ConanException("Called config install without arguments and "
"'general.config_install' not defined in conan.conf")
if item.endswith(".git"):
_process_git_repo(item, client_cache, output, runner, tmp_folder)
elif os.path.exists(item):
# is a local file
|
elif item.startswith("http"):
_process_download(item, client_cache, output, tmp_folder)
else:
raise ConanException("I don't know how to process %s" % item)
finally:
if item:
client_cache.conan_config.set_item("general.config_install", item)
rmdir(tmp_folder)
| _process_zip_file(item, client_cache, output, tmp_folder) | conditional_block |
config_installer.py | import os
from conans.tools import unzip
import shutil
from conans.util.files import rmdir, mkdir
from conans.client.remote_registry import RemoteRegistry
from conans import tools
from conans.errors import ConanException
def _handle_remotes(registry_path, remote_file, output):
registry = RemoteRegistry(registry_path, output)
new_registry = RemoteRegistry(remote_file, output)
registry.define_remotes(new_registry.remotes)
def _handle_profiles(source_folder, target_folder, output):
mkdir(target_folder)
for root, _, files in os.walk(source_folder):
relative_path = os.path.relpath(root, source_folder)
if relative_path == ".":
relative_path = ""
for f in files:
profile = os.path.join(relative_path, f)
output.info(" Installing profile %s" % profile)
shutil.copy(os.path.join(root, f), os.path.join(target_folder, profile))
def _process_git_repo(repo_url, client_cache, output, runner, tmp_folder):
output.info("Trying to clone repo %s" % repo_url)
with tools.chdir(tmp_folder):
runner('git clone "%s" config' % repo_url, output=output)
tmp_folder = os.path.join(tmp_folder, "config")
_process_folder(tmp_folder, client_cache, output)
def _process_zip_file(zippath, client_cache, output, tmp_folder, remove=False):
unzip(zippath, tmp_folder)
if remove:
os.unlink(zippath)
_process_folder(tmp_folder, client_cache, output)
def _handle_conan_conf(current_conan_conf, new_conan_conf_path):
current_conan_conf.read(new_conan_conf_path)
with open(current_conan_conf.filename, "w") as f:
current_conan_conf.write(f)
def _process_folder(folder, client_cache, output):
for root, dirs, files in os.walk(folder): | settings_path = client_cache.settings_path
shutil.copy(os.path.join(root, f), settings_path)
elif f == "conan.conf":
output.info("Processing conan.conf")
conan_conf = client_cache.conan_config
_handle_conan_conf(conan_conf, os.path.join(root, f))
elif f == "remotes.txt":
output.info("Defining remotes")
registry_path = client_cache.registry
_handle_remotes(registry_path, os.path.join(root, f), output)
else:
output.info("Copying file %s to %s" % (f, client_cache.conan_folder))
shutil.copy(os.path.join(root, f), client_cache.conan_folder)
for d in dirs:
if d == "profiles":
output.info("Installing profiles")
profiles_path = client_cache.profiles_path
_handle_profiles(os.path.join(root, d), profiles_path, output)
break
dirs[:] = [d for d in dirs if d not in ("profiles", ".git")]
def _process_download(item, client_cache, output, tmp_folder):
output.info("Trying to download %s" % item)
zippath = os.path.join(tmp_folder, "config.zip")
tools.download(item, zippath, out=output)
_process_zip_file(zippath, client_cache, output, tmp_folder, remove=True)
def configuration_install(item, client_cache, output, runner):
tmp_folder = os.path.join(client_cache.conan_folder, "tmp_config_install")
# necessary for Mac OSX, where the temp folders in /var/ are symlinks to /private/var/
tmp_folder = os.path.realpath(tmp_folder)
mkdir(tmp_folder)
try:
if item is None:
try:
item = client_cache.conan_config.get_item("general.config_install")
except ConanException:
raise ConanException("Called config install without arguments and "
"'general.config_install' not defined in conan.conf")
if item.endswith(".git"):
_process_git_repo(item, client_cache, output, runner, tmp_folder)
elif os.path.exists(item):
# is a local file
_process_zip_file(item, client_cache, output, tmp_folder)
elif item.startswith("http"):
_process_download(item, client_cache, output, tmp_folder)
else:
raise ConanException("I don't know how to process %s" % item)
finally:
if item:
client_cache.conan_config.set_item("general.config_install", item)
rmdir(tmp_folder) | for f in files:
if f == "settings.yml":
output.info("Installing settings.yml") | random_line_split |
setup_test.ts | /**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {setTestEnvs} from '@tensorflow/tfjs-core/dist/jasmine_util';
setTestEnvs([{name: 'test-wasm', backendName: 'wasm', isDataSync: true}]);
const env = jasmine.getEnv();
/** Tests that have these substrings in their name will be included. */
const INCLUDE_LIST: string[] = ['add '];
/** Tests that have these substrings in their name will be excluded. */
const EXCLUDE_LIST: string[] = [
'complex', // Complex numbers not yet implemented.
'gradient', // Gradient is missing.
'broadcast inner dim', // Broadcast inner dim not yet supported.
'broadcast each with 1 dim', // Same as above.
'broadcasting same rank Tensors different shape', // Same as above.
'upcasts when dtypes dont match', // Uses the 'complex' dtype.
];
/**
* Filter method that returns boolean, if a given test should run or be
* ignored based on its name. The exclude list has priority over the include
* list. Thus, if a test matches both the exclude and the include list, it
* will be exluded.
*/
env.specFilter = spec => {
const name = spec.getFullName();
// Return false (skip the test) if the test is in the exclude list.
for (let i = 0; i < EXCLUDE_LIST.length; ++i) {
if (name.indexOf(EXCLUDE_LIST[i]) > -1) {
return false;
}
}
// Include all regular describe() tests.
if (name.indexOf('test-wasm') < 0) {
return true;
}
// Include all of the wasm specific tests.
if (name.startsWith('wasm')) {
return true;
}
// Include a describeWithFlags() test from tfjs-core only if the test is in
// the include list.
for (let i = 0; i < INCLUDE_LIST.length; ++i) {
if (name.indexOf(INCLUDE_LIST[i]) > -1) |
}
// Otherwise ignore the test.
return false;
};
// Import and run all the tests from core.
import '@tensorflow/tfjs-core/dist/tests';
| {
return true;
} | conditional_block |
setup_test.ts | /**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {setTestEnvs} from '@tensorflow/tfjs-core/dist/jasmine_util';
setTestEnvs([{name: 'test-wasm', backendName: 'wasm', isDataSync: true}]);
const env = jasmine.getEnv(); | const INCLUDE_LIST: string[] = ['add '];
/** Tests that have these substrings in their name will be excluded. */
const EXCLUDE_LIST: string[] = [
'complex', // Complex numbers not yet implemented.
'gradient', // Gradient is missing.
'broadcast inner dim', // Broadcast inner dim not yet supported.
'broadcast each with 1 dim', // Same as above.
'broadcasting same rank Tensors different shape', // Same as above.
'upcasts when dtypes dont match', // Uses the 'complex' dtype.
];
/**
* Filter method that returns boolean, if a given test should run or be
* ignored based on its name. The exclude list has priority over the include
* list. Thus, if a test matches both the exclude and the include list, it
* will be exluded.
*/
env.specFilter = spec => {
const name = spec.getFullName();
// Return false (skip the test) if the test is in the exclude list.
for (let i = 0; i < EXCLUDE_LIST.length; ++i) {
if (name.indexOf(EXCLUDE_LIST[i]) > -1) {
return false;
}
}
// Include all regular describe() tests.
if (name.indexOf('test-wasm') < 0) {
return true;
}
// Include all of the wasm specific tests.
if (name.startsWith('wasm')) {
return true;
}
// Include a describeWithFlags() test from tfjs-core only if the test is in
// the include list.
for (let i = 0; i < INCLUDE_LIST.length; ++i) {
if (name.indexOf(INCLUDE_LIST[i]) > -1) {
return true;
}
}
// Otherwise ignore the test.
return false;
};
// Import and run all the tests from core.
import '@tensorflow/tfjs-core/dist/tests'; |
/** Tests that have these substrings in their name will be included. */ | random_line_split |
creatureDB.py | '''A module containing a class for storing Creature objects in a
SQLite database.'''
import csv
import sqlite3
__all__ = ['CreatureDB']
class CreatureDB(object): | def __init__(self, name='creature.db', use_nominal_cr=False):
self.min_cr = 0.0
self.max_cr = float('inf')
# set flags
self.using_nominal_cr = use_nominal_cr
# initialize database
self.connection = sqlite3.connect(name)
self.connection.text_factory = str
self._create_table()
def _construct_table_columns(self):
'''Constructs a tuple that defines the columns in
the "creatures" table
:returns tuple that defines the columns in "creatures" table
'''
columns = ('id integer primary key autoincrement',
'name varchar(45)')
# set type of CR column depending on flag
if self.using_nominal_cr:
columns = columns + ('CR varchar(10)',)
else:
columns = columns + ('CR real',)
# add the remaining database fields to column tuple
main_entry_columns = (
'hp integer', 'HD integer',
'ac integer', 'touch_ac integer', 'flatfooted_ac integer',
'Fort integer', 'Ref integer', 'Will integer',
'Str integer', 'Dex integer', 'Con integer',
'Int integer', 'Wis integer', 'Cha integer',
'BAB integer', 'CMB integer', 'CMD integer'
)
columns = columns + main_entry_columns
return columns
def _construct_tuple_insert_values(self, creature):
'''Constructs a tuple of Creature values for insertion into
the "creatures" table
:returns tuple of values for insertion into "creatures" table
'''
values = (creature.name,)
# set value of CR column depending on flag
if self.using_nominal_cr:
values = values + ('CR ' + creature.cr,)
else:
values = values + (creature.cr,)
# add the remaining database fields to values tuple
main_entry_values = (
creature.hp,
creature.hd,
creature.ac['AC'],
creature.ac['touch'],
creature.ac['flat-footed'],
creature.saves['Fort'],
creature.saves['Ref'],
creature.saves['Will'],
creature.ability_scores['Str'],
creature.ability_scores['Dex'],
creature.ability_scores['Con'],
creature.ability_scores['Int'],
creature.ability_scores['Wis'],
creature.ability_scores['Cha'],
creature.bab,
creature.cmb,
creature.cmd
)
values = values + main_entry_values
return values
def _create_table(self):
'''Creates a SQLite table with the given name for storing
Creature objects if it does not already exist
:param name: a string value for the name of the table
'''
# create table
columns = self._construct_table_columns()
query = '''create table if not exists creatures
(
%s,%s,
%s,%s,
%s,%s,%s,
%s,%s,%s,
%s,%s,%s,%s,%s,%s,%s,
%s, %s, %s
)''' % columns
self.connection.execute(query)
def add_creature(self, creature):
'''Adds a Creature object as a row in the appropriate table
of the SQLite database
:param creature: a Creature object to be added to the database
'''
# check that creature CR is within desired range
creature_cr = float(creature.cr)
if creature_cr < self.min_cr or creature_cr > self.max_cr:
return
# ignore duplicate creatures
if self.is_creature_in_db(creature):
return
# insert creature into database
values = self._construct_tuple_insert_values(creature)
query = '''insert into creatures
(
name,CR,
hp,HD,
ac,touch_ac,flatfooted_ac,
Fort, Ref, Will,
Str,Dex,Con,Int,Wis,Cha,
BAB,CMB,CMD
)
values
(
?,?,
?,?,
?,?,?,
?,?,?,
?,?,?,?,?,?,
?,?,?
)'''
self.connection.execute(query, values)
def commit_and_close(self):
'''Commits any uncommitted changes to the SQLite database and
closes the connection
'''
self.connection.commit()
self.connection.close()
def export_as_csv(self, file_name='creature.csv'):
'''Exports the data in this object as a .csv file.
:param file_name: the name of the output csv file
'''
cursor = self.connection.cursor()
data = cursor.execute('select * from creatures')
# write data to output file
csv_file = open(file_name, 'w')
writer = csv.writer(csv_file)
writer.writerow([
'id',
'name', 'CR',
'hp', 'HD',
'ac', 'touch_ac', 'flatfooted_ac',
'Fort', 'Ref', 'Will',
'Str', 'Dex', 'Con', 'Int', 'Wis', 'Cha',
'BAB', 'CMB', 'CMD'
])
writer.writerows(data)
csv_file.close()
def is_creature_in_db(self, creature):
''' Determines whether or not a datbase entry exists for a
given creature
:returns True if entry exists, False otherwise
'''
# set value of CR column depending on flag
creature_cr = creature.cr
if self.using_nominal_cr:
creature_cr = 'CR ' + creature.cr
# query database for creature
values = (creature.name, creature_cr)
query = '''select * from creatures where name=? and cr=?'''
cursor = self.connection.cursor()
cursor.execute(query, values)
return cursor.fetchone() is not None | '''Class for storing Creature objects in a SQLite database.'''
| random_line_split |
creatureDB.py | '''A module containing a class for storing Creature objects in a
SQLite database.'''
import csv
import sqlite3
__all__ = ['CreatureDB']
class CreatureDB(object):
'''Class for storing Creature objects in a SQLite database.'''
def __init__(self, name='creature.db', use_nominal_cr=False):
self.min_cr = 0.0
self.max_cr = float('inf')
# set flags
self.using_nominal_cr = use_nominal_cr
# initialize database
self.connection = sqlite3.connect(name)
self.connection.text_factory = str
self._create_table()
def _construct_table_columns(self):
'''Constructs a tuple that defines the columns in
the "creatures" table
:returns tuple that defines the columns in "creatures" table
'''
columns = ('id integer primary key autoincrement',
'name varchar(45)')
# set type of CR column depending on flag
if self.using_nominal_cr:
columns = columns + ('CR varchar(10)',)
else:
columns = columns + ('CR real',)
# add the remaining database fields to column tuple
main_entry_columns = (
'hp integer', 'HD integer',
'ac integer', 'touch_ac integer', 'flatfooted_ac integer',
'Fort integer', 'Ref integer', 'Will integer',
'Str integer', 'Dex integer', 'Con integer',
'Int integer', 'Wis integer', 'Cha integer',
'BAB integer', 'CMB integer', 'CMD integer'
)
columns = columns + main_entry_columns
return columns
def _construct_tuple_insert_values(self, creature):
'''Constructs a tuple of Creature values for insertion into
the "creatures" table
:returns tuple of values for insertion into "creatures" table
'''
values = (creature.name,)
# set value of CR column depending on flag
if self.using_nominal_cr:
values = values + ('CR ' + creature.cr,)
else:
values = values + (creature.cr,)
# add the remaining database fields to values tuple
main_entry_values = (
creature.hp,
creature.hd,
creature.ac['AC'],
creature.ac['touch'],
creature.ac['flat-footed'],
creature.saves['Fort'],
creature.saves['Ref'],
creature.saves['Will'],
creature.ability_scores['Str'],
creature.ability_scores['Dex'],
creature.ability_scores['Con'],
creature.ability_scores['Int'],
creature.ability_scores['Wis'],
creature.ability_scores['Cha'],
creature.bab,
creature.cmb,
creature.cmd
)
values = values + main_entry_values
return values
def _create_table(self):
'''Creates a SQLite table with the given name for storing
Creature objects if it does not already exist
:param name: a string value for the name of the table
'''
# create table
columns = self._construct_table_columns()
query = '''create table if not exists creatures
(
%s,%s,
%s,%s,
%s,%s,%s,
%s,%s,%s,
%s,%s,%s,%s,%s,%s,%s,
%s, %s, %s
)''' % columns
self.connection.execute(query)
def add_creature(self, creature):
'''Adds a Creature object as a row in the appropriate table
of the SQLite database
:param creature: a Creature object to be added to the database
'''
# check that creature CR is within desired range
creature_cr = float(creature.cr)
if creature_cr < self.min_cr or creature_cr > self.max_cr:
return
# ignore duplicate creatures
if self.is_creature_in_db(creature):
|
# insert creature into database
values = self._construct_tuple_insert_values(creature)
query = '''insert into creatures
(
name,CR,
hp,HD,
ac,touch_ac,flatfooted_ac,
Fort, Ref, Will,
Str,Dex,Con,Int,Wis,Cha,
BAB,CMB,CMD
)
values
(
?,?,
?,?,
?,?,?,
?,?,?,
?,?,?,?,?,?,
?,?,?
)'''
self.connection.execute(query, values)
def commit_and_close(self):
'''Commits any uncommitted changes to the SQLite database and
closes the connection
'''
self.connection.commit()
self.connection.close()
def export_as_csv(self, file_name='creature.csv'):
'''Exports the data in this object as a .csv file.
:param file_name: the name of the output csv file
'''
cursor = self.connection.cursor()
data = cursor.execute('select * from creatures')
# write data to output file
csv_file = open(file_name, 'w')
writer = csv.writer(csv_file)
writer.writerow([
'id',
'name', 'CR',
'hp', 'HD',
'ac', 'touch_ac', 'flatfooted_ac',
'Fort', 'Ref', 'Will',
'Str', 'Dex', 'Con', 'Int', 'Wis', 'Cha',
'BAB', 'CMB', 'CMD'
])
writer.writerows(data)
csv_file.close()
def is_creature_in_db(self, creature):
''' Determines whether or not a datbase entry exists for a
given creature
:returns True if entry exists, False otherwise
'''
# set value of CR column depending on flag
creature_cr = creature.cr
if self.using_nominal_cr:
creature_cr = 'CR ' + creature.cr
# query database for creature
values = (creature.name, creature_cr)
query = '''select * from creatures where name=? and cr=?'''
cursor = self.connection.cursor()
cursor.execute(query, values)
return cursor.fetchone() is not None
| return | conditional_block |
creatureDB.py | '''A module containing a class for storing Creature objects in a
SQLite database.'''
import csv
import sqlite3
__all__ = ['CreatureDB']
class CreatureDB(object):
'''Class for storing Creature objects in a SQLite database.'''
def __init__(self, name='creature.db', use_nominal_cr=False):
self.min_cr = 0.0
self.max_cr = float('inf')
# set flags
self.using_nominal_cr = use_nominal_cr
# initialize database
self.connection = sqlite3.connect(name)
self.connection.text_factory = str
self._create_table()
def _construct_table_columns(self):
'''Constructs a tuple that defines the columns in
the "creatures" table
:returns tuple that defines the columns in "creatures" table
'''
columns = ('id integer primary key autoincrement',
'name varchar(45)')
# set type of CR column depending on flag
if self.using_nominal_cr:
columns = columns + ('CR varchar(10)',)
else:
columns = columns + ('CR real',)
# add the remaining database fields to column tuple
main_entry_columns = (
'hp integer', 'HD integer',
'ac integer', 'touch_ac integer', 'flatfooted_ac integer',
'Fort integer', 'Ref integer', 'Will integer',
'Str integer', 'Dex integer', 'Con integer',
'Int integer', 'Wis integer', 'Cha integer',
'BAB integer', 'CMB integer', 'CMD integer'
)
columns = columns + main_entry_columns
return columns
def _construct_tuple_insert_values(self, creature):
'''Constructs a tuple of Creature values for insertion into
the "creatures" table
:returns tuple of values for insertion into "creatures" table
'''
values = (creature.name,)
# set value of CR column depending on flag
if self.using_nominal_cr:
values = values + ('CR ' + creature.cr,)
else:
values = values + (creature.cr,)
# add the remaining database fields to values tuple
main_entry_values = (
creature.hp,
creature.hd,
creature.ac['AC'],
creature.ac['touch'],
creature.ac['flat-footed'],
creature.saves['Fort'],
creature.saves['Ref'],
creature.saves['Will'],
creature.ability_scores['Str'],
creature.ability_scores['Dex'],
creature.ability_scores['Con'],
creature.ability_scores['Int'],
creature.ability_scores['Wis'],
creature.ability_scores['Cha'],
creature.bab,
creature.cmb,
creature.cmd
)
values = values + main_entry_values
return values
def _create_table(self):
'''Creates a SQLite table with the given name for storing
Creature objects if it does not already exist
:param name: a string value for the name of the table
'''
# create table
columns = self._construct_table_columns()
query = '''create table if not exists creatures
(
%s,%s,
%s,%s,
%s,%s,%s,
%s,%s,%s,
%s,%s,%s,%s,%s,%s,%s,
%s, %s, %s
)''' % columns
self.connection.execute(query)
def add_creature(self, creature):
'''Adds a Creature object as a row in the appropriate table
of the SQLite database
:param creature: a Creature object to be added to the database
'''
# check that creature CR is within desired range
creature_cr = float(creature.cr)
if creature_cr < self.min_cr or creature_cr > self.max_cr:
return
# ignore duplicate creatures
if self.is_creature_in_db(creature):
return
# insert creature into database
values = self._construct_tuple_insert_values(creature)
query = '''insert into creatures
(
name,CR,
hp,HD,
ac,touch_ac,flatfooted_ac,
Fort, Ref, Will,
Str,Dex,Con,Int,Wis,Cha,
BAB,CMB,CMD
)
values
(
?,?,
?,?,
?,?,?,
?,?,?,
?,?,?,?,?,?,
?,?,?
)'''
self.connection.execute(query, values)
def commit_and_close(self):
'''Commits any uncommitted changes to the SQLite database and
closes the connection
'''
self.connection.commit()
self.connection.close()
def | (self, file_name='creature.csv'):
'''Exports the data in this object as a .csv file.
:param file_name: the name of the output csv file
'''
cursor = self.connection.cursor()
data = cursor.execute('select * from creatures')
# write data to output file
csv_file = open(file_name, 'w')
writer = csv.writer(csv_file)
writer.writerow([
'id',
'name', 'CR',
'hp', 'HD',
'ac', 'touch_ac', 'flatfooted_ac',
'Fort', 'Ref', 'Will',
'Str', 'Dex', 'Con', 'Int', 'Wis', 'Cha',
'BAB', 'CMB', 'CMD'
])
writer.writerows(data)
csv_file.close()
def is_creature_in_db(self, creature):
''' Determines whether or not a datbase entry exists for a
given creature
:returns True if entry exists, False otherwise
'''
# set value of CR column depending on flag
creature_cr = creature.cr
if self.using_nominal_cr:
creature_cr = 'CR ' + creature.cr
# query database for creature
values = (creature.name, creature_cr)
query = '''select * from creatures where name=? and cr=?'''
cursor = self.connection.cursor()
cursor.execute(query, values)
return cursor.fetchone() is not None
| export_as_csv | identifier_name |
creatureDB.py | '''A module containing a class for storing Creature objects in a
SQLite database.'''
import csv
import sqlite3
__all__ = ['CreatureDB']
class CreatureDB(object):
'''Class for storing Creature objects in a SQLite database.'''
def __init__(self, name='creature.db', use_nominal_cr=False):
self.min_cr = 0.0
self.max_cr = float('inf')
# set flags
self.using_nominal_cr = use_nominal_cr
# initialize database
self.connection = sqlite3.connect(name)
self.connection.text_factory = str
self._create_table()
def _construct_table_columns(self):
'''Constructs a tuple that defines the columns in
the "creatures" table
:returns tuple that defines the columns in "creatures" table
'''
columns = ('id integer primary key autoincrement',
'name varchar(45)')
# set type of CR column depending on flag
if self.using_nominal_cr:
columns = columns + ('CR varchar(10)',)
else:
columns = columns + ('CR real',)
# add the remaining database fields to column tuple
main_entry_columns = (
'hp integer', 'HD integer',
'ac integer', 'touch_ac integer', 'flatfooted_ac integer',
'Fort integer', 'Ref integer', 'Will integer',
'Str integer', 'Dex integer', 'Con integer',
'Int integer', 'Wis integer', 'Cha integer',
'BAB integer', 'CMB integer', 'CMD integer'
)
columns = columns + main_entry_columns
return columns
def _construct_tuple_insert_values(self, creature):
'''Constructs a tuple of Creature values for insertion into
the "creatures" table
:returns tuple of values for insertion into "creatures" table
'''
values = (creature.name,)
# set value of CR column depending on flag
if self.using_nominal_cr:
values = values + ('CR ' + creature.cr,)
else:
values = values + (creature.cr,)
# add the remaining database fields to values tuple
main_entry_values = (
creature.hp,
creature.hd,
creature.ac['AC'],
creature.ac['touch'],
creature.ac['flat-footed'],
creature.saves['Fort'],
creature.saves['Ref'],
creature.saves['Will'],
creature.ability_scores['Str'],
creature.ability_scores['Dex'],
creature.ability_scores['Con'],
creature.ability_scores['Int'],
creature.ability_scores['Wis'],
creature.ability_scores['Cha'],
creature.bab,
creature.cmb,
creature.cmd
)
values = values + main_entry_values
return values
def _create_table(self):
'''Creates a SQLite table with the given name for storing
Creature objects if it does not already exist
:param name: a string value for the name of the table
'''
# create table
columns = self._construct_table_columns()
query = '''create table if not exists creatures
(
%s,%s,
%s,%s,
%s,%s,%s,
%s,%s,%s,
%s,%s,%s,%s,%s,%s,%s,
%s, %s, %s
)''' % columns
self.connection.execute(query)
def add_creature(self, creature):
|
def commit_and_close(self):
'''Commits any uncommitted changes to the SQLite database and
closes the connection
'''
self.connection.commit()
self.connection.close()
def export_as_csv(self, file_name='creature.csv'):
'''Exports the data in this object as a .csv file.
:param file_name: the name of the output csv file
'''
cursor = self.connection.cursor()
data = cursor.execute('select * from creatures')
# write data to output file
csv_file = open(file_name, 'w')
writer = csv.writer(csv_file)
writer.writerow([
'id',
'name', 'CR',
'hp', 'HD',
'ac', 'touch_ac', 'flatfooted_ac',
'Fort', 'Ref', 'Will',
'Str', 'Dex', 'Con', 'Int', 'Wis', 'Cha',
'BAB', 'CMB', 'CMD'
])
writer.writerows(data)
csv_file.close()
def is_creature_in_db(self, creature):
''' Determines whether or not a datbase entry exists for a
given creature
:returns True if entry exists, False otherwise
'''
# set value of CR column depending on flag
creature_cr = creature.cr
if self.using_nominal_cr:
creature_cr = 'CR ' + creature.cr
# query database for creature
values = (creature.name, creature_cr)
query = '''select * from creatures where name=? and cr=?'''
cursor = self.connection.cursor()
cursor.execute(query, values)
return cursor.fetchone() is not None
| '''Adds a Creature object as a row in the appropriate table
of the SQLite database
:param creature: a Creature object to be added to the database
'''
# check that creature CR is within desired range
creature_cr = float(creature.cr)
if creature_cr < self.min_cr or creature_cr > self.max_cr:
return
# ignore duplicate creatures
if self.is_creature_in_db(creature):
return
# insert creature into database
values = self._construct_tuple_insert_values(creature)
query = '''insert into creatures
(
name,CR,
hp,HD,
ac,touch_ac,flatfooted_ac,
Fort, Ref, Will,
Str,Dex,Con,Int,Wis,Cha,
BAB,CMB,CMD
)
values
(
?,?,
?,?,
?,?,?,
?,?,?,
?,?,?,?,?,?,
?,?,?
)'''
self.connection.execute(query, values) | identifier_body |
ErrorObservable.d.ts | import { Scheduler } from '../Scheduler';
import { Observable } from '../Observable';
import { TeardownLogic } from '../Subscription';
export interface DispatchArg {
error: any;
subscriber: any;
}
/**
* We need this JSDoc comment for affecting ESDoc.
* @extends {Ignored}
* @hide true
*/
export declare class ErrorObservable<T> extends Observable<any> {
error: T;
private scheduler;
/**
* Creates an Observable that emits no items to the Observer and immediately
* emits an error notification.
*
* <span class="informal">Just emits 'error', and nothing else.
* </span>
*
* <img src="./img/throw.png" width="100%">
*
* This static operator is useful for creating a simple Observable that only
* emits the error notification. It can be used for composing with other
* Observables, such as in a {@link mergeMap}.
*
* @example <caption>Emit the number 7, then emit an error.</caption>
* var result = Rx.Observable.throw(new Error('oops!')).startWith(7); | * var result = interval.mergeMap(x =>
* x === 13 ?
* Rx.Observable.throw('Thirteens are bad') :
* Rx.Observable.of('a', 'b', 'c')
* );
* result.subscribe(x => console.log(x), e => console.error(e));
*
* @see {@link create}
* @see {@link empty}
* @see {@link never}
* @see {@link of}
*
* @param {any} error The particular Error to pass to the error notification.
* @param {Scheduler} [scheduler] A {@link Scheduler} to use for scheduling
* the emission of the error notification.
* @return {Observable} An error Observable: emits only the error notification
* using the given error argument.
* @static true
* @name throw
* @owner Observable
*/
static create<T>(error: T, scheduler?: Scheduler): ErrorObservable<T>;
static dispatch(arg: DispatchArg): void;
constructor(error: T, scheduler?: Scheduler);
protected _subscribe(subscriber: any): TeardownLogic;
} | * result.subscribe(x => console.log(x), e => console.error(e));
*
* @example <caption>Map and flattens numbers to the sequence 'a', 'b', 'c', but throw an error for 13</caption>
* var interval = Rx.Observable.interval(1000); | random_line_split |
ErrorObservable.d.ts | import { Scheduler } from '../Scheduler';
import { Observable } from '../Observable';
import { TeardownLogic } from '../Subscription';
export interface DispatchArg {
error: any;
subscriber: any;
}
/**
* We need this JSDoc comment for affecting ESDoc.
* @extends {Ignored}
* @hide true
*/
export declare class | <T> extends Observable<any> {
error: T;
private scheduler;
/**
* Creates an Observable that emits no items to the Observer and immediately
* emits an error notification.
*
* <span class="informal">Just emits 'error', and nothing else.
* </span>
*
* <img src="./img/throw.png" width="100%">
*
* This static operator is useful for creating a simple Observable that only
* emits the error notification. It can be used for composing with other
* Observables, such as in a {@link mergeMap}.
*
* @example <caption>Emit the number 7, then emit an error.</caption>
* var result = Rx.Observable.throw(new Error('oops!')).startWith(7);
* result.subscribe(x => console.log(x), e => console.error(e));
*
* @example <caption>Map and flattens numbers to the sequence 'a', 'b', 'c', but throw an error for 13</caption>
* var interval = Rx.Observable.interval(1000);
* var result = interval.mergeMap(x =>
* x === 13 ?
* Rx.Observable.throw('Thirteens are bad') :
* Rx.Observable.of('a', 'b', 'c')
* );
* result.subscribe(x => console.log(x), e => console.error(e));
*
* @see {@link create}
* @see {@link empty}
* @see {@link never}
* @see {@link of}
*
* @param {any} error The particular Error to pass to the error notification.
* @param {Scheduler} [scheduler] A {@link Scheduler} to use for scheduling
* the emission of the error notification.
* @return {Observable} An error Observable: emits only the error notification
* using the given error argument.
* @static true
* @name throw
* @owner Observable
*/
static create<T>(error: T, scheduler?: Scheduler): ErrorObservable<T>;
static dispatch(arg: DispatchArg): void;
constructor(error: T, scheduler?: Scheduler);
protected _subscribe(subscriber: any): TeardownLogic;
}
| ErrorObservable | identifier_name |
response.py | """
VerseBot for Reddit
By Matthieu Grieger
Continued By Team VerseBot
response.py
Copyright (c) 2015 Matthieu Grieger (MIT License)
"""
MAXIMUM_MESSAGE_LENGTH = 4000
class Response:
""" Class that holds the properties and methods of a comment
response. """
def __init__(self, message, parser, link=None):
""" Initializes a Response object. """
self.verse_list = list()
self.message = message
self.parser = parser
self.response = ""
if link is not None:
self.link = link
else:
self.link = ''
def add_verse(self, verse):
""" Adds a verse to the verse list.
:param verse: Verse to add to the list of verses
"""
self.verse_list.append(verse)
def is_duplicate_verse(self, verse):
""" Checks the incoming verse against the verse list to make sure
it is not a duplicate.
:param verse: Verse to check duplicates for
"""
for v in self.verse_list:
if (v.book == verse.book and
v.chapter == verse.chapter and
v.verse == verse.verse and
v.translation == verse.translation):
return True
return False
def construct_message(self):
""" Constructs a message response. """
for verse in self.verse_list:
verse.get_contents()
if verse.contents is not None:
if verse.verse is not None:
self.response += ("[**%s %d:%s | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.verse, verse.translation_title,
verse.permalink))
else:
|
self.response += verse.contents
self.response += "\n\n"
if self.response == "":
return None
else:
if self.exceeds_max_length():
self.response = self.generate_overflow_response()
# self.response += self.get_comment_footer()
return self.response
def exceeds_max_length(self):
""" Returns true if the current response exceeds the maximum comment
length, returns false otherwise. """
return len(self.response) > MAXIMUM_MESSAGE_LENGTH
def generate_overflow_response(self):
""" Constructs and generates an overflow comment whenever the comment
exceeds the character limit set by MAXIMUM_MESSAGE_LENGTH. Instead of
posting the contents of the verse(s) in the comment, it links to
webpages that contain the contents of the verse(s). """
comment = ("The contents of the verse(s) you quoted exceed the %d "
"character limit. Instead, here are links to the "
"verse(s)!\n\n" % MAXIMUM_MESSAGE_LENGTH)
for verse in self.verse_list:
if verse.translation == "JPS":
overflow_link = verse.permalink
else:
if verse.verse is not None:
overflow_link = ("https://www.biblegateway.com/passage/"
"?search=%s+%s:%s&version=%s"
% (verse.book, verse.chapter, verse.verse,
verse.translation))
else:
overflow_link = verse.permalink
if verse.verse is not None:
comment += ("- [%s %d:%s (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.verse,
verse.translation, overflow_link))
else:
comment += ("- [%s %d (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.translation,
overflow_link))
return comment
'''
def get_comment_footer(self):
""" Returns the footer for the comment. """
return ("\n***\n[^Code](https://github.com/Team-VerseBot/versebot) ^|"
" ^/r/VerseBot ^| [^Contact ^Devs](https://github.com/"
"Team-VerseBot/versebot/issues) ^|"
" [^Usage](https://github.com/Team-VerseBot/versebot/blob/"
"master/README.md) ^|"
" [^Changelog](https://github.com/Team-VerseBot/versebot/blob/"
"master/CHANGELOG.md) ^|"
" [^Stats](http://adamgrieger.com/versebot/) ^|"
" [^Set ^a ^Default ^Translation](http://adamgrieger.com/"
"versebot#defaults) \n\n"
"^All ^texts ^provided ^by [^BibleGateway]"
"(http://biblegateway.com) ^and [^Bible ^Hub]"
"(http://biblehub.com)^. \n\n"
" ^Mistake? ^%(user)s ^can [^edit](/message/compose/"
"?to=%(bot)s&subject=edit+request&message={%(link)s} "
"Please+enter+your+revised+verse+quotations+below+in+the+usual"
"+bracketed+syntax.)"
" ^or [^delete](/message/compose/?to=%(bot)s&subject=delete"
"+request&message={%(link)s} "
"This+action+cannot+be+reversed!) ^this ^comment."
% {"user": self.message.author, "bot": REDDIT_USERNAME,
"link": self.link})
'''
| self.response += ("[**%s %d | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.translation_title,
verse.permalink)) | conditional_block |
response.py | """
VerseBot for Reddit
By Matthieu Grieger
Continued By Team VerseBot
response.py
Copyright (c) 2015 Matthieu Grieger (MIT License)
"""
MAXIMUM_MESSAGE_LENGTH = 4000
class Response:
""" Class that holds the properties and methods of a comment
response. """
def __init__(self, message, parser, link=None):
""" Initializes a Response object. """
self.verse_list = list()
self.message = message
self.parser = parser
self.response = ""
if link is not None:
self.link = link
else:
self.link = ''
def add_verse(self, verse):
""" Adds a verse to the verse list.
:param verse: Verse to add to the list of verses
"""
self.verse_list.append(verse)
def is_duplicate_verse(self, verse):
""" Checks the incoming verse against the verse list to make sure
it is not a duplicate.
:param verse: Verse to check duplicates for
"""
for v in self.verse_list:
if (v.book == verse.book and
v.chapter == verse.chapter and
v.verse == verse.verse and
v.translation == verse.translation):
return True
return False
def construct_message(self):
""" Constructs a message response. """
for verse in self.verse_list:
verse.get_contents()
if verse.contents is not None:
if verse.verse is not None:
self.response += ("[**%s %d:%s | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.verse, verse.translation_title,
verse.permalink))
else:
self.response += ("[**%s %d | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.translation_title,
verse.permalink))
self.response += verse.contents
self.response += "\n\n"
if self.response == "":
return None
else:
if self.exceeds_max_length():
self.response = self.generate_overflow_response()
# self.response += self.get_comment_footer()
return self.response
def exceeds_max_length(self):
|
def generate_overflow_response(self):
""" Constructs and generates an overflow comment whenever the comment
exceeds the character limit set by MAXIMUM_MESSAGE_LENGTH. Instead of
posting the contents of the verse(s) in the comment, it links to
webpages that contain the contents of the verse(s). """
comment = ("The contents of the verse(s) you quoted exceed the %d "
"character limit. Instead, here are links to the "
"verse(s)!\n\n" % MAXIMUM_MESSAGE_LENGTH)
for verse in self.verse_list:
if verse.translation == "JPS":
overflow_link = verse.permalink
else:
if verse.verse is not None:
overflow_link = ("https://www.biblegateway.com/passage/"
"?search=%s+%s:%s&version=%s"
% (verse.book, verse.chapter, verse.verse,
verse.translation))
else:
overflow_link = verse.permalink
if verse.verse is not None:
comment += ("- [%s %d:%s (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.verse,
verse.translation, overflow_link))
else:
comment += ("- [%s %d (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.translation,
overflow_link))
return comment
'''
def get_comment_footer(self):
""" Returns the footer for the comment. """
return ("\n***\n[^Code](https://github.com/Team-VerseBot/versebot) ^|"
" ^/r/VerseBot ^| [^Contact ^Devs](https://github.com/"
"Team-VerseBot/versebot/issues) ^|"
" [^Usage](https://github.com/Team-VerseBot/versebot/blob/"
"master/README.md) ^|"
" [^Changelog](https://github.com/Team-VerseBot/versebot/blob/"
"master/CHANGELOG.md) ^|"
" [^Stats](http://adamgrieger.com/versebot/) ^|"
" [^Set ^a ^Default ^Translation](http://adamgrieger.com/"
"versebot#defaults) \n\n"
"^All ^texts ^provided ^by [^BibleGateway]"
"(http://biblegateway.com) ^and [^Bible ^Hub]"
"(http://biblehub.com)^. \n\n"
" ^Mistake? ^%(user)s ^can [^edit](/message/compose/"
"?to=%(bot)s&subject=edit+request&message={%(link)s} "
"Please+enter+your+revised+verse+quotations+below+in+the+usual"
"+bracketed+syntax.)"
" ^or [^delete](/message/compose/?to=%(bot)s&subject=delete"
"+request&message={%(link)s} "
"This+action+cannot+be+reversed!) ^this ^comment."
% {"user": self.message.author, "bot": REDDIT_USERNAME,
"link": self.link})
'''
| """ Returns true if the current response exceeds the maximum comment
length, returns false otherwise. """
return len(self.response) > MAXIMUM_MESSAGE_LENGTH | identifier_body |
response.py | """
VerseBot for Reddit
By Matthieu Grieger
Continued By Team VerseBot
response.py
Copyright (c) 2015 Matthieu Grieger (MIT License)
"""
MAXIMUM_MESSAGE_LENGTH = 4000
class Response:
""" Class that holds the properties and methods of a comment
response. """
def __init__(self, message, parser, link=None):
""" Initializes a Response object. """
self.verse_list = list()
self.message = message
self.parser = parser
self.response = ""
if link is not None:
self.link = link
else:
self.link = ''
def add_verse(self, verse):
""" Adds a verse to the verse list.
:param verse: Verse to add to the list of verses
"""
self.verse_list.append(verse)
def is_duplicate_verse(self, verse):
""" Checks the incoming verse against the verse list to make sure
it is not a duplicate.
:param verse: Verse to check duplicates for
"""
for v in self.verse_list:
if (v.book == verse.book and
v.chapter == verse.chapter and
v.verse == verse.verse and
v.translation == verse.translation):
return True
return False
def construct_message(self):
""" Constructs a message response. """
for verse in self.verse_list:
verse.get_contents()
if verse.contents is not None:
if verse.verse is not None:
self.response += ("[**%s %d:%s | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.verse, verse.translation_title,
verse.permalink))
else:
self.response += ("[**%s %d | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.translation_title,
verse.permalink))
self.response += verse.contents
self.response += "\n\n"
if self.response == "":
return None
else:
if self.exceeds_max_length():
self.response = self.generate_overflow_response()
# self.response += self.get_comment_footer()
return self.response | return len(self.response) > MAXIMUM_MESSAGE_LENGTH
def generate_overflow_response(self):
""" Constructs and generates an overflow comment whenever the comment
exceeds the character limit set by MAXIMUM_MESSAGE_LENGTH. Instead of
posting the contents of the verse(s) in the comment, it links to
webpages that contain the contents of the verse(s). """
comment = ("The contents of the verse(s) you quoted exceed the %d "
"character limit. Instead, here are links to the "
"verse(s)!\n\n" % MAXIMUM_MESSAGE_LENGTH)
for verse in self.verse_list:
if verse.translation == "JPS":
overflow_link = verse.permalink
else:
if verse.verse is not None:
overflow_link = ("https://www.biblegateway.com/passage/"
"?search=%s+%s:%s&version=%s"
% (verse.book, verse.chapter, verse.verse,
verse.translation))
else:
overflow_link = verse.permalink
if verse.verse is not None:
comment += ("- [%s %d:%s (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.verse,
verse.translation, overflow_link))
else:
comment += ("- [%s %d (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.translation,
overflow_link))
return comment
'''
def get_comment_footer(self):
""" Returns the footer for the comment. """
return ("\n***\n[^Code](https://github.com/Team-VerseBot/versebot) ^|"
" ^/r/VerseBot ^| [^Contact ^Devs](https://github.com/"
"Team-VerseBot/versebot/issues) ^|"
" [^Usage](https://github.com/Team-VerseBot/versebot/blob/"
"master/README.md) ^|"
" [^Changelog](https://github.com/Team-VerseBot/versebot/blob/"
"master/CHANGELOG.md) ^|"
" [^Stats](http://adamgrieger.com/versebot/) ^|"
" [^Set ^a ^Default ^Translation](http://adamgrieger.com/"
"versebot#defaults) \n\n"
"^All ^texts ^provided ^by [^BibleGateway]"
"(http://biblegateway.com) ^and [^Bible ^Hub]"
"(http://biblehub.com)^. \n\n"
" ^Mistake? ^%(user)s ^can [^edit](/message/compose/"
"?to=%(bot)s&subject=edit+request&message={%(link)s} "
"Please+enter+your+revised+verse+quotations+below+in+the+usual"
"+bracketed+syntax.)"
" ^or [^delete](/message/compose/?to=%(bot)s&subject=delete"
"+request&message={%(link)s} "
"This+action+cannot+be+reversed!) ^this ^comment."
% {"user": self.message.author, "bot": REDDIT_USERNAME,
"link": self.link})
''' |
def exceeds_max_length(self):
""" Returns true if the current response exceeds the maximum comment
length, returns false otherwise. """
| random_line_split |
response.py | """
VerseBot for Reddit
By Matthieu Grieger
Continued By Team VerseBot
response.py
Copyright (c) 2015 Matthieu Grieger (MIT License)
"""
MAXIMUM_MESSAGE_LENGTH = 4000
class Response:
""" Class that holds the properties and methods of a comment
response. """
def __init__(self, message, parser, link=None):
""" Initializes a Response object. """
self.verse_list = list()
self.message = message
self.parser = parser
self.response = ""
if link is not None:
self.link = link
else:
self.link = ''
def | (self, verse):
""" Adds a verse to the verse list.
:param verse: Verse to add to the list of verses
"""
self.verse_list.append(verse)
def is_duplicate_verse(self, verse):
""" Checks the incoming verse against the verse list to make sure
it is not a duplicate.
:param verse: Verse to check duplicates for
"""
for v in self.verse_list:
if (v.book == verse.book and
v.chapter == verse.chapter and
v.verse == verse.verse and
v.translation == verse.translation):
return True
return False
def construct_message(self):
""" Constructs a message response. """
for verse in self.verse_list:
verse.get_contents()
if verse.contents is not None:
if verse.verse is not None:
self.response += ("[**%s %d:%s | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.verse, verse.translation_title,
verse.permalink))
else:
self.response += ("[**%s %d | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.translation_title,
verse.permalink))
self.response += verse.contents
self.response += "\n\n"
if self.response == "":
return None
else:
if self.exceeds_max_length():
self.response = self.generate_overflow_response()
# self.response += self.get_comment_footer()
return self.response
def exceeds_max_length(self):
""" Returns true if the current response exceeds the maximum comment
length, returns false otherwise. """
return len(self.response) > MAXIMUM_MESSAGE_LENGTH
def generate_overflow_response(self):
""" Constructs and generates an overflow comment whenever the comment
exceeds the character limit set by MAXIMUM_MESSAGE_LENGTH. Instead of
posting the contents of the verse(s) in the comment, it links to
webpages that contain the contents of the verse(s). """
comment = ("The contents of the verse(s) you quoted exceed the %d "
"character limit. Instead, here are links to the "
"verse(s)!\n\n" % MAXIMUM_MESSAGE_LENGTH)
for verse in self.verse_list:
if verse.translation == "JPS":
overflow_link = verse.permalink
else:
if verse.verse is not None:
overflow_link = ("https://www.biblegateway.com/passage/"
"?search=%s+%s:%s&version=%s"
% (verse.book, verse.chapter, verse.verse,
verse.translation))
else:
overflow_link = verse.permalink
if verse.verse is not None:
comment += ("- [%s %d:%s (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.verse,
verse.translation, overflow_link))
else:
comment += ("- [%s %d (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.translation,
overflow_link))
return comment
'''
def get_comment_footer(self):
""" Returns the footer for the comment. """
return ("\n***\n[^Code](https://github.com/Team-VerseBot/versebot) ^|"
" ^/r/VerseBot ^| [^Contact ^Devs](https://github.com/"
"Team-VerseBot/versebot/issues) ^|"
" [^Usage](https://github.com/Team-VerseBot/versebot/blob/"
"master/README.md) ^|"
" [^Changelog](https://github.com/Team-VerseBot/versebot/blob/"
"master/CHANGELOG.md) ^|"
" [^Stats](http://adamgrieger.com/versebot/) ^|"
" [^Set ^a ^Default ^Translation](http://adamgrieger.com/"
"versebot#defaults) \n\n"
"^All ^texts ^provided ^by [^BibleGateway]"
"(http://biblegateway.com) ^and [^Bible ^Hub]"
"(http://biblehub.com)^. \n\n"
" ^Mistake? ^%(user)s ^can [^edit](/message/compose/"
"?to=%(bot)s&subject=edit+request&message={%(link)s} "
"Please+enter+your+revised+verse+quotations+below+in+the+usual"
"+bracketed+syntax.)"
" ^or [^delete](/message/compose/?to=%(bot)s&subject=delete"
"+request&message={%(link)s} "
"This+action+cannot+be+reversed!) ^this ^comment."
% {"user": self.message.author, "bot": REDDIT_USERNAME,
"link": self.link})
'''
| add_verse | identifier_name |
index.d.ts | /*
* @license Apache-2.0
*
* Copyright (c) 2019 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TypeScript Version: 2.0
/**
* Returns the expected value of an exponential distribution.
*
* ## Notes
*
* - If provided a negative value for `λ`, the function returns `NaN`.
*
* @param lambda - rate parameter
* @returns expected value
*
* @example
* var v = mean( 9.0 );
* // returns ~0.111
*
* @example
* var v = mean( 1.0 );
* // returns 1.0
* | * @example
* var v = mean( -0.2 );
* // returns NaN
*
* @example
* var v = mean( NaN );
* // returns NaN
*/
declare function mean( lambda: number ): number;
// EXPORTS //
export = mean; | random_line_split |
|
index.js | require('dotenv').config({ silent: true });
var Express = require('express');
var path = require('path');
var fs = require('fs');
var merge = require('lodash/merge');
var proxy = require('proxy-middleware');
var ejs = require('ejs');
var config = require('./config');
var server = new Express();
server.set('port', config.PORT);
server.engine('html', require('ejs').renderFile);
server.set('view engine', 'ejs');
server.set('views', path.resolve(__dirname, '../www'));
server.locals.CONFIG = escape(JSON.stringify(config));
server.use(config.API_PROXY_PATH, proxy(config.API_ENDPOINT));
server.get('/', function (req, res) {
res.render('index.html');
});
server.use(Express.static(path.resolve(__dirname, '../www')));
server.get('/404', function (req, res) {
res.render('404.html');
});
server.listen(server.get('port'), function (err) {
if (err) {
console.log('error while starting server', err);
}
console.log('Gandalf is started to listen at localhost:' + server.get('port')); | }); | random_line_split |
|
index.js | require('dotenv').config({ silent: true });
var Express = require('express');
var path = require('path');
var fs = require('fs');
var merge = require('lodash/merge');
var proxy = require('proxy-middleware');
var ejs = require('ejs');
var config = require('./config');
var server = new Express();
server.set('port', config.PORT);
server.engine('html', require('ejs').renderFile);
server.set('view engine', 'ejs');
server.set('views', path.resolve(__dirname, '../www'));
server.locals.CONFIG = escape(JSON.stringify(config));
server.use(config.API_PROXY_PATH, proxy(config.API_ENDPOINT));
server.get('/', function (req, res) {
res.render('index.html');
});
server.use(Express.static(path.resolve(__dirname, '../www')));
server.get('/404', function (req, res) {
res.render('404.html');
});
server.listen(server.get('port'), function (err) {
if (err) |
console.log('Gandalf is started to listen at localhost:' + server.get('port'));
});
| {
console.log('error while starting server', err);
} | conditional_block |
index.js | var features = require('./features');
var Rework = require('rework');
/**
* Expose `myth`.
*/
module.exports = exports = myth;
/**
* Expose `features`.
*/ | *
* @param {String} string (optional)
* @param {Object} options (optional)
* @property {String} source
* @property {Array} browsers
* @property {Boolean} compress
* @property {Object} features
* @return {String}
*/
function myth(string, options){
if ('object' == typeof string) options = string, string = null;
options = options || {};
if ('string' != typeof string) return plugin(options);
return Rework(string, options)
.use(plugin(options))
.toString(options);
}
/**
* Generate a Myth rework plugin with `options`.
*
* @param {Object} options
* @return {Function}
*/
function plugin(options){
return function(stylesheet, rework){
var enabled = options.features || {};
exports.features.forEach(function(key){
if (enabled[key] === false) return;
var plugin = features[key](options);
rework.use(plugin);
});
};
} |
exports.features = Object.keys(features);
/**
* Rework a CSS `string`, or return the Myth rework plugin. | random_line_split |
index.js |
var features = require('./features');
var Rework = require('rework');
/**
* Expose `myth`.
*/
module.exports = exports = myth;
/**
* Expose `features`.
*/
exports.features = Object.keys(features);
/**
* Rework a CSS `string`, or return the Myth rework plugin.
*
* @param {String} string (optional)
* @param {Object} options (optional)
* @property {String} source
* @property {Array} browsers
* @property {Boolean} compress
* @property {Object} features
* @return {String}
*/
function | (string, options){
if ('object' == typeof string) options = string, string = null;
options = options || {};
if ('string' != typeof string) return plugin(options);
return Rework(string, options)
.use(plugin(options))
.toString(options);
}
/**
* Generate a Myth rework plugin with `options`.
*
* @param {Object} options
* @return {Function}
*/
function plugin(options){
return function(stylesheet, rework){
var enabled = options.features || {};
exports.features.forEach(function(key){
if (enabled[key] === false) return;
var plugin = features[key](options);
rework.use(plugin);
});
};
}
| myth | identifier_name |
index.js |
var features = require('./features');
var Rework = require('rework');
/**
* Expose `myth`.
*/
module.exports = exports = myth;
/**
* Expose `features`.
*/
exports.features = Object.keys(features);
/**
* Rework a CSS `string`, or return the Myth rework plugin.
*
* @param {String} string (optional)
* @param {Object} options (optional)
* @property {String} source
* @property {Array} browsers
* @property {Boolean} compress
* @property {Object} features
* @return {String}
*/
function myth(string, options) |
/**
* Generate a Myth rework plugin with `options`.
*
* @param {Object} options
* @return {Function}
*/
function plugin(options){
return function(stylesheet, rework){
var enabled = options.features || {};
exports.features.forEach(function(key){
if (enabled[key] === false) return;
var plugin = features[key](options);
rework.use(plugin);
});
};
}
| {
if ('object' == typeof string) options = string, string = null;
options = options || {};
if ('string' != typeof string) return plugin(options);
return Rework(string, options)
.use(plugin(options))
.toString(options);
} | identifier_body |
FetchSimpleFeaturesByTags.ts | import { doRequest, Method } from 'lib/rest/RestRequests';
import FetchSimpleFeaturesByTagsTypes from './generated/FetchSimpleFeaturesByTagsTypes';
import FetchSimpleFeaturesTypes from './generated/FetchSimpleFeaturesTypes';
import Tag from 'models/Tag';
import { createSimpleFeatures, SimpleFeatureResponseData } from './FetchSimpleFeatures';
import { getEncodedURI } from 'lib/rest/URIHelper';
interface ResponseDataElement {
tag: string;
features: SimpleFeatureResponseData[]; | }
type ResponseData = ResponseDataElement[];
const createTag = (data: ResponseDataElement): Tag => {
return {
name: data.tag,
features: createSimpleFeatures(data.features),
};
};
const createTags = (data: ResponseData): Tag[] => {
return data.map(item => createTag(item));
};
const fetchSimpleFeaturesByTags = async (product: string, version: string, build: string): Promise<Tag[] | void> => {
const url = `/rest/tagview/featureTagIndex/${getEncodedURI(product, version, build)}`;
return doRequest(
Method.GET,
url,
'rest.error.featuresByTag',
null,
[FetchSimpleFeaturesTypes, FetchSimpleFeaturesByTagsTypes],
createTags
);
};
export default fetchSimpleFeaturesByTags; | random_line_split |
|
terrain.js | (function(){
function render (context, points) {
console.log ('render called');
var angle = 0,
center = new Point3D (400,400,400);
return function () {
context.clearRect(0,0,800,600);
if (points.length < 1000) {
points.push (randomPoint());
}
if (angle > 360) {angle = 0;}
points.map (
function (pt) {
return pt.subtract (center);
}
).map (
function (pt) {
return y_rotate(pt, angle);
}
)/*.map (
function (pt) {
return x_rotate(pt, angle);
}
)//.map (
function (pt) {
return z_rotate(pt,angle);
}
)/**/.map (
function (pt) {
return project (pt,700);
}
).map (
function (pt) {
return {
x: pt['x'] + center.x,
y: pt['y'] + center.y,
scale: pt['scale']
}
}
).forEach (
function (pt) {
if (pt.scale < 0) {return;}
context.fillStyle = 'rgba(255,255,255,' + pt.scale + ')';
context.beginPath();
context.arc(pt.x, pt.y, 4*pt.scale, 0, Math.PI * 2, true);
context.closePath();
context.fill();
}
);
angle = angle + 1;
}
}
function randomInt (min, max) {
return Math.floor(Math.random() * (max - min + 1)) + min;
}
function randomPoint () |
function init() {
console.log ('inited');
var viewport = document.getElementById('viewport'),
context = viewport.getContext ('2d'),
points = [];
context.strokeStyle = '#aaa';
context.lineWidth = 1;
setInterval (render (context, points), 50);
}
document.body.onload = init;
}()); | {
return new Point3D (
randomInt (-100,900),
randomInt (-100,900),
randomInt (-100,900),
1,
randomInt (1,10)
);
} | identifier_body |
terrain.js | (function(){
function render (context, points) {
console.log ('render called');
var angle = 0,
center = new Point3D (400,400,400);
return function () {
context.clearRect(0,0,800,600);
if (points.length < 1000) {
points.push (randomPoint());
}
if (angle > 360) {angle = 0;}
points.map (
function (pt) {
return pt.subtract (center);
}
).map (
function (pt) {
return y_rotate(pt, angle);
}
)/*.map (
function (pt) {
return x_rotate(pt, angle);
}
)//.map (
function (pt) {
return z_rotate(pt,angle);
}
)/**/.map (
function (pt) {
return project (pt,700);
}
).map (
function (pt) {
return {
x: pt['x'] + center.x,
y: pt['y'] + center.y,
scale: pt['scale']
}
}
).forEach (
function (pt) {
if (pt.scale < 0) {return;}
context.fillStyle = 'rgba(255,255,255,' + pt.scale + ')';
context.beginPath();
context.arc(pt.x, pt.y, 4*pt.scale, 0, Math.PI * 2, true);
context.closePath();
context.fill();
}
);
angle = angle + 1;
}
}
function randomInt (min, max) {
return Math.floor(Math.random() * (max - min + 1)) + min;
}
function | () {
return new Point3D (
randomInt (-100,900),
randomInt (-100,900),
randomInt (-100,900),
1,
randomInt (1,10)
);
}
function init() {
console.log ('inited');
var viewport = document.getElementById('viewport'),
context = viewport.getContext ('2d'),
points = [];
context.strokeStyle = '#aaa';
context.lineWidth = 1;
setInterval (render (context, points), 50);
}
document.body.onload = init;
}()); | randomPoint | identifier_name |
terrain.js | (function(){
function render (context, points) {
console.log ('render called');
var angle = 0,
center = new Point3D (400,400,400);
return function () {
context.clearRect(0,0,800,600);
if (points.length < 1000) {
points.push (randomPoint());
}
if (angle > 360) {angle = 0;}
points.map (
function (pt) {
return pt.subtract (center);
}
).map (
function (pt) {
return y_rotate(pt, angle);
}
)/*.map (
function (pt) {
return x_rotate(pt, angle);
}
)//.map (
function (pt) {
return z_rotate(pt,angle);
}
)/**/.map (
function (pt) {
return project (pt,700);
}
).map (
function (pt) {
return {
x: pt['x'] + center.x,
y: pt['y'] + center.y,
scale: pt['scale']
}
}
).forEach (
function (pt) {
if (pt.scale < 0) {return;}
context.fillStyle = 'rgba(255,255,255,' + pt.scale + ')';
context.beginPath();
context.arc(pt.x, pt.y, 4*pt.scale, 0, Math.PI * 2, true);
context.closePath(); | );
angle = angle + 1;
}
}
function randomInt (min, max) {
return Math.floor(Math.random() * (max - min + 1)) + min;
}
function randomPoint () {
return new Point3D (
randomInt (-100,900),
randomInt (-100,900),
randomInt (-100,900),
1,
randomInt (1,10)
);
}
function init() {
console.log ('inited');
var viewport = document.getElementById('viewport'),
context = viewport.getContext ('2d'),
points = [];
context.strokeStyle = '#aaa';
context.lineWidth = 1;
setInterval (render (context, points), 50);
}
document.body.onload = init;
}()); | context.fill();
} | random_line_split |
terrain.js | (function(){
function render (context, points) {
console.log ('render called');
var angle = 0,
center = new Point3D (400,400,400);
return function () {
context.clearRect(0,0,800,600);
if (points.length < 1000) {
points.push (randomPoint());
}
if (angle > 360) |
points.map (
function (pt) {
return pt.subtract (center);
}
).map (
function (pt) {
return y_rotate(pt, angle);
}
)/*.map (
function (pt) {
return x_rotate(pt, angle);
}
)//.map (
function (pt) {
return z_rotate(pt,angle);
}
)/**/.map (
function (pt) {
return project (pt,700);
}
).map (
function (pt) {
return {
x: pt['x'] + center.x,
y: pt['y'] + center.y,
scale: pt['scale']
}
}
).forEach (
function (pt) {
if (pt.scale < 0) {return;}
context.fillStyle = 'rgba(255,255,255,' + pt.scale + ')';
context.beginPath();
context.arc(pt.x, pt.y, 4*pt.scale, 0, Math.PI * 2, true);
context.closePath();
context.fill();
}
);
angle = angle + 1;
}
}
function randomInt (min, max) {
return Math.floor(Math.random() * (max - min + 1)) + min;
}
function randomPoint () {
return new Point3D (
randomInt (-100,900),
randomInt (-100,900),
randomInt (-100,900),
1,
randomInt (1,10)
);
}
function init() {
console.log ('inited');
var viewport = document.getElementById('viewport'),
context = viewport.getContext ('2d'),
points = [];
context.strokeStyle = '#aaa';
context.lineWidth = 1;
setInterval (render (context, points), 50);
}
document.body.onload = init;
}()); | {angle = 0;} | conditional_block |
mod.rs | //! Traits and datastructures representing a collection trace.
//!
//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
//!
//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
//! and allows various data structures to be interpretable as multiple different types of trace.
pub mod cursor;
pub mod description;
pub mod implementations;
pub mod layers;
pub mod wrappers;
use timely::progress::Antichain;
use timely::progress::Timestamp;
// use ::difference::Semigroup;
pub use self::cursor::Cursor;
pub use self::description::Description;
// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
// and vals that change the `Ord` implementation, or stash hash codes, or the like.
//
// This complicates what requirements we make so that the trace is still usable by someone who knows only about
// the base key and value types. For example, the complex types should likely dereference to the simpler types,
// so that the user can make sense of the result as if they were given references to the simpler types. At the
// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
// without cloning it.
//
// We could just start by cloning things. Worry about wrapping references later on.
/// A trace whose contents may be read.
///
/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
pub trait TraceReader {
/// Key by which updates are indexed.
type Key;
/// Values associated with keys.
type Val;
/// Timestamps associated with updates
type Time;
/// Associated update.
type R;
/// The type of an immutable collection of updates.
type Batch: BatchReader<Self::Key, Self::Val, Self::Time, Self::R>+Clone+'static;
/// The type used to enumerate the collections contents.
type Cursor: Cursor<Self::Key, Self::Val, Self::Time, Self::R>;
/// Provides a cursor over updates contained in the trace.
fn cursor(&mut self) -> (Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage) {
if let Some(cursor) = self.cursor_through(&[]) |
else {
panic!("unable to acquire complete cursor for trace; is it closed?");
}
}
/// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
/// equal to an element of `upper`.
///
/// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
/// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
/// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
/// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
fn cursor_through(&mut self, upper: &[Self::Time]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage)>;
/// Advances the frontier of times the collection must be correctly accumulable through.
///
/// Practically, this allows the trace to advance times in updates it maintains as long as the advanced times
/// still compare equivalently to any times greater or equal to some element of `frontier`. Times not greater
/// or equal to some element of `frontier` may no longer correctly accumulate, so do not advance a trace unless
/// you are quite sure you no longer require the distinction.
fn advance_by(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which all time comparisions should be accurate.
///
/// Times that are not greater or equal to some element of the advance frontier may accumulate inaccurately as
/// the trace may have lost the ability to distinguish between such times. Accumulations are only guaranteed to
/// be accurate from the frontier onwards.
fn advance_frontier(&mut self) -> &[Self::Time];
/// Advances the frontier that may be used in `cursor_through`.
///
/// Practically, this allows the trace to merge batches whose upper frontier comes before `frontier`. The trace
/// is likely to be annoyed or confused if you use a frontier other than one observed as an upper bound of an
/// actual batch. This doesn't seem likely to be a problem, but get in touch if it is.
///
/// Calling `distinguish_since(&[])` indicates that all batches may be merged at any point, which essentially
/// disables the use of `cursor_through` with any parameter other than `&[]`, which is the behavior of `cursor`.
fn distinguish_since(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which the collection may be subsetted.
///
/// The semantics are less elegant here, but the underlying trace will not merge batches in advance of this
/// frontier, which ensures that operators can extract the subset of the trace at batch boundaries from this
/// frontier onward. These boundaries may be used in `cursor_through`, whereas boundaries not in advance of
/// this frontier are not guaranteed to return a cursor.
fn distinguish_frontier(&mut self) -> &[Self::Time];
/// Maps logic across the non-empty sequence of batches in the trace.
///
/// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
/// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
/// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
fn map_batches<F: FnMut(&Self::Batch)>(&mut self, f: F);
/// Reads the upper frontier of committed times.
///
///
fn read_upper(&mut self, target: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
target.clear();
target.insert(Default::default());
self.map_batches(|batch| {
target.clear();
for time in batch.upper().iter().cloned() {
target.insert(time);
}
});
}
/// Advances `upper` by any empty batches.
///
/// An empty batch whose `batch.lower` bound equals the current
/// contents of `upper` will advance `upper` to `batch.upper`.
/// Taken across all batches, this should advance `upper` across
/// empty batch regions.
fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
self.map_batches(|batch| {
if batch.is_empty() && batch.lower() == upper.elements() {
upper.clear();
upper.extend(batch.upper().iter().cloned());
}
});
}
}
/// An append-only collection of `(key, val, time, diff)` tuples.
///
/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
///
/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
/// to return them.
pub trait Trace : TraceReader
where <Self as TraceReader>::Batch: Batch<Self::Key, Self::Val, Self::Time, Self::R> {
/// Allocates a new empty trace.
fn new(
info: ::timely::dataflow::operators::generic::OperatorInfo,
logging: Option<::logging::Logger>,
activator: Option<timely::scheduling::activate::Activator>,
) -> Self;
/// Exert merge effort, even without updates.
fn exert(&mut self, effort: &mut isize);
/// Introduces a batch of updates to the trace.
///
/// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
/// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
/// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
///
/// This restriction could be relaxed, especially if we discover ways in which batch interval order could
/// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
fn insert(&mut self, batch: Self::Batch);
/// Introduces an empty batch concluding the trace.
///
/// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
/// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
fn close(&mut self);
}
/// A batch of updates whose contents may be read.
///
/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
/// especially useful for views derived from other sources in ways that prevent the construction of batches
/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
pub trait BatchReader<K, V, T, R> where Self: ::std::marker::Sized
{
/// The type used to enumerate the batch's contents.
type Cursor: Cursor<K, V, T, R, Storage=Self>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor;
/// The number of updates in the batch.
fn len(&self) -> usize;
/// True if the batch is empty.
fn is_empty(&self) -> bool { self.len() == 0 }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T>;
/// All times in the batch are greater or equal to an element of `lower`.
fn lower(&self) -> &[T] { self.description().lower() }
/// All times in the batch are not greater or equal to any element of `upper`.
fn upper(&self) -> &[T] { self.description().upper() }
}
/// An immutable collection of updates.
pub trait Batch<K, V, T, R> : BatchReader<K, V, T, R> where Self: ::std::marker::Sized {
/// A type used to assemble batches from disordered updates.
type Batcher: Batcher<K, V, T, R, Self>;
/// A type used to assemble batches from ordered update sequences.
type Builder: Builder<K, V, T, R, Self>;
/// A type used to progressively merge batches.
type Merger: Merger<K, V, T, R, Self>;
/// Initiates the merging of consecutive batches.
///
/// The result of this method can be exercised to eventually produce the same result
/// that a call to `self.merge(other)` would produce, but it can be done in a measured
/// fashion. This can help to avoid latency spikes where a large merge needs to happen.
fn begin_merge(&self, other: &Self) -> Self::Merger {
Self::Merger::new(self, other)
}
///
fn empty(lower: &[T], upper: &[T], since: &[T]) -> Self {
<Self::Builder>::new().done(lower, upper, since)
}
}
/// Functionality for collecting and batching updates.
pub trait Batcher<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates a new empty batcher.
fn new() -> Self;
/// Adds an unordered batch of elements to the batcher.
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>);
/// Returns all updates not greater or equal to an element of `upper`.
fn seal(&mut self, upper: &[T]) -> Output;
/// Returns the lower envelope of contained update times.
fn frontier(&mut self) -> &[T];
}
/// Functionality for building batches from ordered update sequences.
pub trait Builder<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates an empty builder.
fn new() -> Self;
/// Allocates an empty builder with some capacity.
fn with_capacity(cap: usize) -> Self;
/// Adds an element to the batch.
fn push(&mut self, element: (K, V, T, R));
/// Adds an ordered sequence of elements to the batch.
fn extend<I: Iterator<Item=(K,V,T,R)>>(&mut self, iter: I) {
for item in iter { self.push(item); }
}
/// Completes building and returns the batch.
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Output;
}
/// Represents a merge in progress.
pub trait Merger<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Creates a new merger to merge the supplied batches.
fn new(source1: &Output, source2: &Output) -> Self;
/// Perform some amount of work, decrementing `fuel`.
///
/// If `fuel` is non-zero after the call, the merging is complete and
/// one should call `done` to extract the merged results.
fn work(&mut self, source1: &Output, source2: &Output, frontier: &Option<Vec<T>>, fuel: &mut isize);
/// Extracts merged results.
///
/// This method should only be called after `work` has been called and
/// has not brought `fuel` to zero. Otherwise, the merge is still in
/// progress.
fn done(self) -> Output;
}
/// Blanket implementations for reference counted batches.
pub mod rc_blanket_impls {
use std::rc::Rc;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>> BatchReader<K,V,T,R> for Rc<B> {
/// The type used to enumerate the batch's contents.
type Cursor = RcBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
RcBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct RcBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> RcBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
RcBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> Cursor<K, V, T, R> for RcBatchCursor<K, V, T, R, B> {
type Storage = Rc<B>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>> Batch<K, V, T, R> for Rc<B> {
type Batcher = RcBatcher<K, V, T, R, B>;
type Builder = RcBuilder<K, V, T, R, B>;
type Merger = RcMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct RcBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>> Batcher<K, V, T, R, Rc<B>> for RcBatcher<K,V,T,R,B> {
fn new() -> Self { RcBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Rc<B> { Rc::new(self.batcher.seal(upper)) }
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct RcBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>> Builder<K, V, T, R, Rc<B>> for RcBuilder<K,V,T,R,B> {
fn new() -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Rc<B> { Rc::new(self.builder.done(lower, upper, since)) }
}
/// Wrapper type for merging reference counted batches.
pub struct RcMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>> Merger<K, V, T, R, Rc<B>> for RcMerger<K,V,T,R,B> {
fn new(source1: &Rc<B>, source2: &Rc<B>) -> Self { RcMerger { merger: B::begin_merge(source1, source2) } }
fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, frontier: &Option<Vec<T>>, fuel: &mut isize) { self.merger.work(source1, source2, frontier, fuel) }
fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
}
}
/// Blanket implementations for reference counted batches.
pub mod abomonated_blanket_impls {
extern crate abomonation;
use abomonation::{Abomonation, measure};
use abomonation::abomonated::Abomonated;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>+Abomonation> BatchReader<K,V,T,R> for Abomonated<B, Vec<u8>> {
/// The type used to enumerate the batch's contents.
type Cursor = AbomonatedBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
AbomonatedBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct AbomonatedBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> AbomonatedBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
AbomonatedBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>+Abomonation> Cursor<K, V, T, R> for AbomonatedBatchCursor<K, V, T, R, B> {
type Storage = Abomonated<B, Vec<u8>>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>+Abomonation> Batch<K, V, T, R> for Abomonated<B, Vec<u8>> {
type Batcher = AbomonatedBatcher<K, V, T, R, B>;
type Builder = AbomonatedBuilder<K, V, T, R, B>;
type Merger = AbomonatedMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct AbomonatedBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Batcher<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBatcher<K,V,T,R,B> {
fn new() -> Self { AbomonatedBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.batcher.seal(upper);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct AbomonatedBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Builder<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBuilder<K,V,T,R,B> {
fn new() -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.builder.done(lower, upper, since);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
/// Wrapper type for merging reference counted batches.
pub struct AbomonatedMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Merger<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedMerger<K,V,T,R,B> {
fn new(source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>) -> Self {
AbomonatedMerger { merger: B::begin_merge(source1, source2) }
}
fn work(&mut self, source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>, frontier: &Option<Vec<T>>, fuel: &mut isize) {
self.merger.work(source1, source2, frontier, fuel)
}
fn done(self) -> Abomonated<B, Vec<u8>> {
let batch = self.merger.done();
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
}
| {
cursor
} | conditional_block |
mod.rs | //! Traits and datastructures representing a collection trace.
//!
//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
//!
//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
//! and allows various data structures to be interpretable as multiple different types of trace.
pub mod cursor;
pub mod description;
pub mod implementations;
pub mod layers;
pub mod wrappers;
use timely::progress::Antichain;
use timely::progress::Timestamp;
// use ::difference::Semigroup;
pub use self::cursor::Cursor;
pub use self::description::Description;
// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
// and vals that change the `Ord` implementation, or stash hash codes, or the like.
//
// This complicates what requirements we make so that the trace is still usable by someone who knows only about
// the base key and value types. For example, the complex types should likely dereference to the simpler types,
// so that the user can make sense of the result as if they were given references to the simpler types. At the
// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
// without cloning it.
//
// We could just start by cloning things. Worry about wrapping references later on.
/// A trace whose contents may be read.
///
/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
pub trait TraceReader {
/// Key by which updates are indexed.
type Key;
/// Values associated with keys.
type Val;
/// Timestamps associated with updates
type Time;
/// Associated update.
type R;
/// The type of an immutable collection of updates.
type Batch: BatchReader<Self::Key, Self::Val, Self::Time, Self::R>+Clone+'static;
/// The type used to enumerate the collections contents.
type Cursor: Cursor<Self::Key, Self::Val, Self::Time, Self::R>;
/// Provides a cursor over updates contained in the trace.
fn cursor(&mut self) -> (Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage) {
if let Some(cursor) = self.cursor_through(&[]) {
cursor
}
else {
panic!("unable to acquire complete cursor for trace; is it closed?");
}
}
/// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
/// equal to an element of `upper`.
///
/// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
/// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
/// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
/// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
fn cursor_through(&mut self, upper: &[Self::Time]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage)>;
/// Advances the frontier of times the collection must be correctly accumulable through.
///
/// Practically, this allows the trace to advance times in updates it maintains as long as the advanced times
/// still compare equivalently to any times greater or equal to some element of `frontier`. Times not greater
/// or equal to some element of `frontier` may no longer correctly accumulate, so do not advance a trace unless
/// you are quite sure you no longer require the distinction.
fn advance_by(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which all time comparisions should be accurate.
///
/// Times that are not greater or equal to some element of the advance frontier may accumulate inaccurately as
/// the trace may have lost the ability to distinguish between such times. Accumulations are only guaranteed to
/// be accurate from the frontier onwards.
fn advance_frontier(&mut self) -> &[Self::Time];
/// Advances the frontier that may be used in `cursor_through`.
///
/// Practically, this allows the trace to merge batches whose upper frontier comes before `frontier`. The trace
/// is likely to be annoyed or confused if you use a frontier other than one observed as an upper bound of an
/// actual batch. This doesn't seem likely to be a problem, but get in touch if it is.
///
/// Calling `distinguish_since(&[])` indicates that all batches may be merged at any point, which essentially
/// disables the use of `cursor_through` with any parameter other than `&[]`, which is the behavior of `cursor`.
fn distinguish_since(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which the collection may be subsetted.
///
/// The semantics are less elegant here, but the underlying trace will not merge batches in advance of this
/// frontier, which ensures that operators can extract the subset of the trace at batch boundaries from this
/// frontier onward. These boundaries may be used in `cursor_through`, whereas boundaries not in advance of
/// this frontier are not guaranteed to return a cursor.
fn distinguish_frontier(&mut self) -> &[Self::Time];
/// Maps logic across the non-empty sequence of batches in the trace.
///
/// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
/// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
/// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
fn map_batches<F: FnMut(&Self::Batch)>(&mut self, f: F);
/// Reads the upper frontier of committed times.
///
///
fn read_upper(&mut self, target: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
target.clear();
target.insert(Default::default());
self.map_batches(|batch| {
target.clear();
for time in batch.upper().iter().cloned() {
target.insert(time);
}
});
}
/// Advances `upper` by any empty batches.
///
/// An empty batch whose `batch.lower` bound equals the current
/// contents of `upper` will advance `upper` to `batch.upper`.
/// Taken across all batches, this should advance `upper` across
/// empty batch regions.
fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
self.map_batches(|batch| {
if batch.is_empty() && batch.lower() == upper.elements() {
upper.clear();
upper.extend(batch.upper().iter().cloned());
}
});
}
}
/// An append-only collection of `(key, val, time, diff)` tuples.
///
/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
///
/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
/// to return them.
pub trait Trace : TraceReader
where <Self as TraceReader>::Batch: Batch<Self::Key, Self::Val, Self::Time, Self::R> {
/// Allocates a new empty trace.
fn new(
info: ::timely::dataflow::operators::generic::OperatorInfo,
logging: Option<::logging::Logger>,
activator: Option<timely::scheduling::activate::Activator>,
) -> Self;
/// Exert merge effort, even without updates.
fn exert(&mut self, effort: &mut isize);
/// Introduces a batch of updates to the trace.
///
/// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
/// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
/// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
///
/// This restriction could be relaxed, especially if we discover ways in which batch interval order could
/// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
fn insert(&mut self, batch: Self::Batch);
/// Introduces an empty batch concluding the trace.
///
/// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
/// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
fn close(&mut self);
}
/// A batch of updates whose contents may be read.
///
/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
/// especially useful for views derived from other sources in ways that prevent the construction of batches
/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
pub trait BatchReader<K, V, T, R> where Self: ::std::marker::Sized
{
/// The type used to enumerate the batch's contents.
type Cursor: Cursor<K, V, T, R, Storage=Self>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor;
/// The number of updates in the batch.
fn len(&self) -> usize;
/// True if the batch is empty.
fn is_empty(&self) -> bool { self.len() == 0 }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T>;
/// All times in the batch are greater or equal to an element of `lower`.
fn lower(&self) -> &[T] { self.description().lower() }
/// All times in the batch are not greater or equal to any element of `upper`.
fn upper(&self) -> &[T] { self.description().upper() }
}
/// An immutable collection of updates.
pub trait Batch<K, V, T, R> : BatchReader<K, V, T, R> where Self: ::std::marker::Sized {
/// A type used to assemble batches from disordered updates.
type Batcher: Batcher<K, V, T, R, Self>;
/// A type used to assemble batches from ordered update sequences.
type Builder: Builder<K, V, T, R, Self>;
/// A type used to progressively merge batches.
type Merger: Merger<K, V, T, R, Self>;
/// Initiates the merging of consecutive batches.
///
/// The result of this method can be exercised to eventually produce the same result
/// that a call to `self.merge(other)` would produce, but it can be done in a measured
/// fashion. This can help to avoid latency spikes where a large merge needs to happen.
fn begin_merge(&self, other: &Self) -> Self::Merger {
Self::Merger::new(self, other)
}
///
fn empty(lower: &[T], upper: &[T], since: &[T]) -> Self {
<Self::Builder>::new().done(lower, upper, since)
}
}
/// Functionality for collecting and batching updates.
pub trait Batcher<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates a new empty batcher.
fn new() -> Self;
/// Adds an unordered batch of elements to the batcher.
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>);
/// Returns all updates not greater or equal to an element of `upper`.
fn seal(&mut self, upper: &[T]) -> Output;
/// Returns the lower envelope of contained update times.
fn frontier(&mut self) -> &[T];
}
/// Functionality for building batches from ordered update sequences.
pub trait Builder<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates an empty builder.
fn new() -> Self;
/// Allocates an empty builder with some capacity.
fn with_capacity(cap: usize) -> Self;
/// Adds an element to the batch.
fn push(&mut self, element: (K, V, T, R));
/// Adds an ordered sequence of elements to the batch.
fn extend<I: Iterator<Item=(K,V,T,R)>>(&mut self, iter: I) {
for item in iter { self.push(item); }
}
/// Completes building and returns the batch.
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Output;
}
/// Represents a merge in progress.
pub trait Merger<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Creates a new merger to merge the supplied batches.
fn new(source1: &Output, source2: &Output) -> Self;
/// Perform some amount of work, decrementing `fuel`.
///
/// If `fuel` is non-zero after the call, the merging is complete and
/// one should call `done` to extract the merged results.
fn work(&mut self, source1: &Output, source2: &Output, frontier: &Option<Vec<T>>, fuel: &mut isize);
/// Extracts merged results.
///
/// This method should only be called after `work` has been called and
/// has not brought `fuel` to zero. Otherwise, the merge is still in
/// progress.
fn done(self) -> Output;
}
/// Blanket implementations for reference counted batches.
pub mod rc_blanket_impls {
use std::rc::Rc;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>> BatchReader<K,V,T,R> for Rc<B> {
/// The type used to enumerate the batch's contents.
type Cursor = RcBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
RcBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct RcBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> RcBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
RcBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> Cursor<K, V, T, R> for RcBatchCursor<K, V, T, R, B> {
type Storage = Rc<B>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>> Batch<K, V, T, R> for Rc<B> {
type Batcher = RcBatcher<K, V, T, R, B>;
type Builder = RcBuilder<K, V, T, R, B>;
type Merger = RcMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct RcBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>> Batcher<K, V, T, R, Rc<B>> for RcBatcher<K,V,T,R,B> {
fn new() -> Self { RcBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Rc<B> { Rc::new(self.batcher.seal(upper)) }
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct RcBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>> Builder<K, V, T, R, Rc<B>> for RcBuilder<K,V,T,R,B> {
fn new() -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Rc<B> { Rc::new(self.builder.done(lower, upper, since)) }
}
/// Wrapper type for merging reference counted batches.
pub struct RcMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>> Merger<K, V, T, R, Rc<B>> for RcMerger<K,V,T,R,B> {
fn new(source1: &Rc<B>, source2: &Rc<B>) -> Self { RcMerger { merger: B::begin_merge(source1, source2) } }
fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, frontier: &Option<Vec<T>>, fuel: &mut isize) { self.merger.work(source1, source2, frontier, fuel) }
fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
}
}
/// Blanket implementations for reference counted batches.
pub mod abomonated_blanket_impls {
extern crate abomonation;
use abomonation::{Abomonation, measure};
use abomonation::abomonated::Abomonated;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>+Abomonation> BatchReader<K,V,T,R> for Abomonated<B, Vec<u8>> {
/// The type used to enumerate the batch's contents.
type Cursor = AbomonatedBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
AbomonatedBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct AbomonatedBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> AbomonatedBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
AbomonatedBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>+Abomonation> Cursor<K, V, T, R> for AbomonatedBatchCursor<K, V, T, R, B> {
type Storage = Abomonated<B, Vec<u8>>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>+Abomonation> Batch<K, V, T, R> for Abomonated<B, Vec<u8>> {
type Batcher = AbomonatedBatcher<K, V, T, R, B>;
type Builder = AbomonatedBuilder<K, V, T, R, B>;
type Merger = AbomonatedMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct AbomonatedBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Batcher<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBatcher<K,V,T,R,B> {
fn new() -> Self { AbomonatedBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.batcher.seal(upper);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct AbomonatedBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Builder<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBuilder<K,V,T,R,B> {
fn new() -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.builder.done(lower, upper, since);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
/// Wrapper type for merging reference counted batches. | pub struct AbomonatedMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Merger<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedMerger<K,V,T,R,B> {
fn new(source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>) -> Self {
AbomonatedMerger { merger: B::begin_merge(source1, source2) }
}
fn work(&mut self, source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>, frontier: &Option<Vec<T>>, fuel: &mut isize) {
self.merger.work(source1, source2, frontier, fuel)
}
fn done(self) -> Abomonated<B, Vec<u8>> {
let batch = self.merger.done();
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
} | random_line_split |
|
mod.rs | //! Traits and datastructures representing a collection trace.
//!
//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
//!
//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
//! and allows various data structures to be interpretable as multiple different types of trace.
pub mod cursor;
pub mod description;
pub mod implementations;
pub mod layers;
pub mod wrappers;
use timely::progress::Antichain;
use timely::progress::Timestamp;
// use ::difference::Semigroup;
pub use self::cursor::Cursor;
pub use self::description::Description;
// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
// and vals that change the `Ord` implementation, or stash hash codes, or the like.
//
// This complicates what requirements we make so that the trace is still usable by someone who knows only about
// the base key and value types. For example, the complex types should likely dereference to the simpler types,
// so that the user can make sense of the result as if they were given references to the simpler types. At the
// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
// without cloning it.
//
// We could just start by cloning things. Worry about wrapping references later on.
/// A trace whose contents may be read.
///
/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
pub trait TraceReader {
/// Key by which updates are indexed.
type Key;
/// Values associated with keys.
type Val;
/// Timestamps associated with updates
type Time;
/// Associated update.
type R;
/// The type of an immutable collection of updates.
type Batch: BatchReader<Self::Key, Self::Val, Self::Time, Self::R>+Clone+'static;
/// The type used to enumerate the collections contents.
type Cursor: Cursor<Self::Key, Self::Val, Self::Time, Self::R>;
/// Provides a cursor over updates contained in the trace.
fn cursor(&mut self) -> (Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage) {
if let Some(cursor) = self.cursor_through(&[]) {
cursor
}
else {
panic!("unable to acquire complete cursor for trace; is it closed?");
}
}
/// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
/// equal to an element of `upper`.
///
/// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
/// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
/// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
/// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
fn cursor_through(&mut self, upper: &[Self::Time]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage)>;
/// Advances the frontier of times the collection must be correctly accumulable through.
///
/// Practically, this allows the trace to advance times in updates it maintains as long as the advanced times
/// still compare equivalently to any times greater or equal to some element of `frontier`. Times not greater
/// or equal to some element of `frontier` may no longer correctly accumulate, so do not advance a trace unless
/// you are quite sure you no longer require the distinction.
fn advance_by(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which all time comparisions should be accurate.
///
/// Times that are not greater or equal to some element of the advance frontier may accumulate inaccurately as
/// the trace may have lost the ability to distinguish between such times. Accumulations are only guaranteed to
/// be accurate from the frontier onwards.
fn advance_frontier(&mut self) -> &[Self::Time];
/// Advances the frontier that may be used in `cursor_through`.
///
/// Practically, this allows the trace to merge batches whose upper frontier comes before `frontier`. The trace
/// is likely to be annoyed or confused if you use a frontier other than one observed as an upper bound of an
/// actual batch. This doesn't seem likely to be a problem, but get in touch if it is.
///
/// Calling `distinguish_since(&[])` indicates that all batches may be merged at any point, which essentially
/// disables the use of `cursor_through` with any parameter other than `&[]`, which is the behavior of `cursor`.
fn distinguish_since(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which the collection may be subsetted.
///
/// The semantics are less elegant here, but the underlying trace will not merge batches in advance of this
/// frontier, which ensures that operators can extract the subset of the trace at batch boundaries from this
/// frontier onward. These boundaries may be used in `cursor_through`, whereas boundaries not in advance of
/// this frontier are not guaranteed to return a cursor.
fn distinguish_frontier(&mut self) -> &[Self::Time];
/// Maps logic across the non-empty sequence of batches in the trace.
///
/// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
/// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
/// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
fn map_batches<F: FnMut(&Self::Batch)>(&mut self, f: F);
/// Reads the upper frontier of committed times.
///
///
fn | (&mut self, target: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
target.clear();
target.insert(Default::default());
self.map_batches(|batch| {
target.clear();
for time in batch.upper().iter().cloned() {
target.insert(time);
}
});
}
/// Advances `upper` by any empty batches.
///
/// An empty batch whose `batch.lower` bound equals the current
/// contents of `upper` will advance `upper` to `batch.upper`.
/// Taken across all batches, this should advance `upper` across
/// empty batch regions.
fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
self.map_batches(|batch| {
if batch.is_empty() && batch.lower() == upper.elements() {
upper.clear();
upper.extend(batch.upper().iter().cloned());
}
});
}
}
/// An append-only collection of `(key, val, time, diff)` tuples.
///
/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
///
/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
/// to return them.
pub trait Trace : TraceReader
where <Self as TraceReader>::Batch: Batch<Self::Key, Self::Val, Self::Time, Self::R> {
/// Allocates a new empty trace.
fn new(
info: ::timely::dataflow::operators::generic::OperatorInfo,
logging: Option<::logging::Logger>,
activator: Option<timely::scheduling::activate::Activator>,
) -> Self;
/// Exert merge effort, even without updates.
fn exert(&mut self, effort: &mut isize);
/// Introduces a batch of updates to the trace.
///
/// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
/// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
/// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
///
/// This restriction could be relaxed, especially if we discover ways in which batch interval order could
/// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
fn insert(&mut self, batch: Self::Batch);
/// Introduces an empty batch concluding the trace.
///
/// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
/// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
fn close(&mut self);
}
/// A batch of updates whose contents may be read.
///
/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
/// especially useful for views derived from other sources in ways that prevent the construction of batches
/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
pub trait BatchReader<K, V, T, R> where Self: ::std::marker::Sized
{
/// The type used to enumerate the batch's contents.
type Cursor: Cursor<K, V, T, R, Storage=Self>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor;
/// The number of updates in the batch.
fn len(&self) -> usize;
/// True if the batch is empty.
fn is_empty(&self) -> bool { self.len() == 0 }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T>;
/// All times in the batch are greater or equal to an element of `lower`.
fn lower(&self) -> &[T] { self.description().lower() }
/// All times in the batch are not greater or equal to any element of `upper`.
fn upper(&self) -> &[T] { self.description().upper() }
}
/// An immutable collection of updates.
pub trait Batch<K, V, T, R> : BatchReader<K, V, T, R> where Self: ::std::marker::Sized {
/// A type used to assemble batches from disordered updates.
type Batcher: Batcher<K, V, T, R, Self>;
/// A type used to assemble batches from ordered update sequences.
type Builder: Builder<K, V, T, R, Self>;
/// A type used to progressively merge batches.
type Merger: Merger<K, V, T, R, Self>;
/// Initiates the merging of consecutive batches.
///
/// The result of this method can be exercised to eventually produce the same result
/// that a call to `self.merge(other)` would produce, but it can be done in a measured
/// fashion. This can help to avoid latency spikes where a large merge needs to happen.
fn begin_merge(&self, other: &Self) -> Self::Merger {
Self::Merger::new(self, other)
}
///
fn empty(lower: &[T], upper: &[T], since: &[T]) -> Self {
<Self::Builder>::new().done(lower, upper, since)
}
}
/// Functionality for collecting and batching updates.
pub trait Batcher<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates a new empty batcher.
fn new() -> Self;
/// Adds an unordered batch of elements to the batcher.
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>);
/// Returns all updates not greater or equal to an element of `upper`.
fn seal(&mut self, upper: &[T]) -> Output;
/// Returns the lower envelope of contained update times.
fn frontier(&mut self) -> &[T];
}
/// Functionality for building batches from ordered update sequences.
pub trait Builder<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates an empty builder.
fn new() -> Self;
/// Allocates an empty builder with some capacity.
fn with_capacity(cap: usize) -> Self;
/// Adds an element to the batch.
fn push(&mut self, element: (K, V, T, R));
/// Adds an ordered sequence of elements to the batch.
fn extend<I: Iterator<Item=(K,V,T,R)>>(&mut self, iter: I) {
for item in iter { self.push(item); }
}
/// Completes building and returns the batch.
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Output;
}
/// Represents a merge in progress.
pub trait Merger<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Creates a new merger to merge the supplied batches.
fn new(source1: &Output, source2: &Output) -> Self;
/// Perform some amount of work, decrementing `fuel`.
///
/// If `fuel` is non-zero after the call, the merging is complete and
/// one should call `done` to extract the merged results.
fn work(&mut self, source1: &Output, source2: &Output, frontier: &Option<Vec<T>>, fuel: &mut isize);
/// Extracts merged results.
///
/// This method should only be called after `work` has been called and
/// has not brought `fuel` to zero. Otherwise, the merge is still in
/// progress.
fn done(self) -> Output;
}
/// Blanket implementations for reference counted batches.
pub mod rc_blanket_impls {
use std::rc::Rc;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>> BatchReader<K,V,T,R> for Rc<B> {
/// The type used to enumerate the batch's contents.
type Cursor = RcBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
RcBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct RcBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> RcBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
RcBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> Cursor<K, V, T, R> for RcBatchCursor<K, V, T, R, B> {
type Storage = Rc<B>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>> Batch<K, V, T, R> for Rc<B> {
type Batcher = RcBatcher<K, V, T, R, B>;
type Builder = RcBuilder<K, V, T, R, B>;
type Merger = RcMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct RcBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>> Batcher<K, V, T, R, Rc<B>> for RcBatcher<K,V,T,R,B> {
fn new() -> Self { RcBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Rc<B> { Rc::new(self.batcher.seal(upper)) }
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct RcBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>> Builder<K, V, T, R, Rc<B>> for RcBuilder<K,V,T,R,B> {
fn new() -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Rc<B> { Rc::new(self.builder.done(lower, upper, since)) }
}
/// Wrapper type for merging reference counted batches.
pub struct RcMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>> Merger<K, V, T, R, Rc<B>> for RcMerger<K,V,T,R,B> {
fn new(source1: &Rc<B>, source2: &Rc<B>) -> Self { RcMerger { merger: B::begin_merge(source1, source2) } }
fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, frontier: &Option<Vec<T>>, fuel: &mut isize) { self.merger.work(source1, source2, frontier, fuel) }
fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
}
}
/// Blanket implementations for reference counted batches.
pub mod abomonated_blanket_impls {
extern crate abomonation;
use abomonation::{Abomonation, measure};
use abomonation::abomonated::Abomonated;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>+Abomonation> BatchReader<K,V,T,R> for Abomonated<B, Vec<u8>> {
/// The type used to enumerate the batch's contents.
type Cursor = AbomonatedBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
AbomonatedBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct AbomonatedBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> AbomonatedBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
AbomonatedBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>+Abomonation> Cursor<K, V, T, R> for AbomonatedBatchCursor<K, V, T, R, B> {
type Storage = Abomonated<B, Vec<u8>>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>+Abomonation> Batch<K, V, T, R> for Abomonated<B, Vec<u8>> {
type Batcher = AbomonatedBatcher<K, V, T, R, B>;
type Builder = AbomonatedBuilder<K, V, T, R, B>;
type Merger = AbomonatedMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct AbomonatedBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Batcher<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBatcher<K,V,T,R,B> {
fn new() -> Self { AbomonatedBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.batcher.seal(upper);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct AbomonatedBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Builder<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBuilder<K,V,T,R,B> {
fn new() -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.builder.done(lower, upper, since);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
/// Wrapper type for merging reference counted batches.
pub struct AbomonatedMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Merger<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedMerger<K,V,T,R,B> {
fn new(source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>) -> Self {
AbomonatedMerger { merger: B::begin_merge(source1, source2) }
}
fn work(&mut self, source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>, frontier: &Option<Vec<T>>, fuel: &mut isize) {
self.merger.work(source1, source2, frontier, fuel)
}
fn done(self) -> Abomonated<B, Vec<u8>> {
let batch = self.merger.done();
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
}
| read_upper | identifier_name |
mod.rs | //! Traits and datastructures representing a collection trace.
//!
//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
//!
//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
//! and allows various data structures to be interpretable as multiple different types of trace.
pub mod cursor;
pub mod description;
pub mod implementations;
pub mod layers;
pub mod wrappers;
use timely::progress::Antichain;
use timely::progress::Timestamp;
// use ::difference::Semigroup;
pub use self::cursor::Cursor;
pub use self::description::Description;
// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
// and vals that change the `Ord` implementation, or stash hash codes, or the like.
//
// This complicates what requirements we make so that the trace is still usable by someone who knows only about
// the base key and value types. For example, the complex types should likely dereference to the simpler types,
// so that the user can make sense of the result as if they were given references to the simpler types. At the
// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
// without cloning it.
//
// We could just start by cloning things. Worry about wrapping references later on.
/// A trace whose contents may be read.
///
/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
pub trait TraceReader {
/// Key by which updates are indexed.
type Key;
/// Values associated with keys.
type Val;
/// Timestamps associated with updates
type Time;
/// Associated update.
type R;
/// The type of an immutable collection of updates.
type Batch: BatchReader<Self::Key, Self::Val, Self::Time, Self::R>+Clone+'static;
/// The type used to enumerate the collections contents.
type Cursor: Cursor<Self::Key, Self::Val, Self::Time, Self::R>;
/// Provides a cursor over updates contained in the trace.
fn cursor(&mut self) -> (Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage) {
if let Some(cursor) = self.cursor_through(&[]) {
cursor
}
else {
panic!("unable to acquire complete cursor for trace; is it closed?");
}
}
/// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
/// equal to an element of `upper`.
///
/// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
/// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
/// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
/// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
fn cursor_through(&mut self, upper: &[Self::Time]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage)>;
/// Advances the frontier of times the collection must be correctly accumulable through.
///
/// Practically, this allows the trace to advance times in updates it maintains as long as the advanced times
/// still compare equivalently to any times greater or equal to some element of `frontier`. Times not greater
/// or equal to some element of `frontier` may no longer correctly accumulate, so do not advance a trace unless
/// you are quite sure you no longer require the distinction.
fn advance_by(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which all time comparisions should be accurate.
///
/// Times that are not greater or equal to some element of the advance frontier may accumulate inaccurately as
/// the trace may have lost the ability to distinguish between such times. Accumulations are only guaranteed to
/// be accurate from the frontier onwards.
fn advance_frontier(&mut self) -> &[Self::Time];
/// Advances the frontier that may be used in `cursor_through`.
///
/// Practically, this allows the trace to merge batches whose upper frontier comes before `frontier`. The trace
/// is likely to be annoyed or confused if you use a frontier other than one observed as an upper bound of an
/// actual batch. This doesn't seem likely to be a problem, but get in touch if it is.
///
/// Calling `distinguish_since(&[])` indicates that all batches may be merged at any point, which essentially
/// disables the use of `cursor_through` with any parameter other than `&[]`, which is the behavior of `cursor`.
fn distinguish_since(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which the collection may be subsetted.
///
/// The semantics are less elegant here, but the underlying trace will not merge batches in advance of this
/// frontier, which ensures that operators can extract the subset of the trace at batch boundaries from this
/// frontier onward. These boundaries may be used in `cursor_through`, whereas boundaries not in advance of
/// this frontier are not guaranteed to return a cursor.
fn distinguish_frontier(&mut self) -> &[Self::Time];
/// Maps logic across the non-empty sequence of batches in the trace.
///
/// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
/// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
/// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
fn map_batches<F: FnMut(&Self::Batch)>(&mut self, f: F);
/// Reads the upper frontier of committed times.
///
///
fn read_upper(&mut self, target: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
target.clear();
target.insert(Default::default());
self.map_batches(|batch| {
target.clear();
for time in batch.upper().iter().cloned() {
target.insert(time);
}
});
}
/// Advances `upper` by any empty batches.
///
/// An empty batch whose `batch.lower` bound equals the current
/// contents of `upper` will advance `upper` to `batch.upper`.
/// Taken across all batches, this should advance `upper` across
/// empty batch regions.
fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
self.map_batches(|batch| {
if batch.is_empty() && batch.lower() == upper.elements() {
upper.clear();
upper.extend(batch.upper().iter().cloned());
}
});
}
}
/// An append-only collection of `(key, val, time, diff)` tuples.
///
/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
///
/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
/// to return them.
pub trait Trace : TraceReader
where <Self as TraceReader>::Batch: Batch<Self::Key, Self::Val, Self::Time, Self::R> {
/// Allocates a new empty trace.
fn new(
info: ::timely::dataflow::operators::generic::OperatorInfo,
logging: Option<::logging::Logger>,
activator: Option<timely::scheduling::activate::Activator>,
) -> Self;
/// Exert merge effort, even without updates.
fn exert(&mut self, effort: &mut isize);
/// Introduces a batch of updates to the trace.
///
/// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
/// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
/// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
///
/// This restriction could be relaxed, especially if we discover ways in which batch interval order could
/// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
fn insert(&mut self, batch: Self::Batch);
/// Introduces an empty batch concluding the trace.
///
/// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
/// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
fn close(&mut self);
}
/// A batch of updates whose contents may be read.
///
/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
/// especially useful for views derived from other sources in ways that prevent the construction of batches
/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
pub trait BatchReader<K, V, T, R> where Self: ::std::marker::Sized
{
/// The type used to enumerate the batch's contents.
type Cursor: Cursor<K, V, T, R, Storage=Self>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor;
/// The number of updates in the batch.
fn len(&self) -> usize;
/// True if the batch is empty.
fn is_empty(&self) -> bool { self.len() == 0 }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T>;
/// All times in the batch are greater or equal to an element of `lower`.
fn lower(&self) -> &[T] |
/// All times in the batch are not greater or equal to any element of `upper`.
fn upper(&self) -> &[T] { self.description().upper() }
}
/// An immutable collection of updates.
pub trait Batch<K, V, T, R> : BatchReader<K, V, T, R> where Self: ::std::marker::Sized {
/// A type used to assemble batches from disordered updates.
type Batcher: Batcher<K, V, T, R, Self>;
/// A type used to assemble batches from ordered update sequences.
type Builder: Builder<K, V, T, R, Self>;
/// A type used to progressively merge batches.
type Merger: Merger<K, V, T, R, Self>;
/// Initiates the merging of consecutive batches.
///
/// The result of this method can be exercised to eventually produce the same result
/// that a call to `self.merge(other)` would produce, but it can be done in a measured
/// fashion. This can help to avoid latency spikes where a large merge needs to happen.
fn begin_merge(&self, other: &Self) -> Self::Merger {
Self::Merger::new(self, other)
}
///
fn empty(lower: &[T], upper: &[T], since: &[T]) -> Self {
<Self::Builder>::new().done(lower, upper, since)
}
}
/// Functionality for collecting and batching updates.
pub trait Batcher<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates a new empty batcher.
fn new() -> Self;
/// Adds an unordered batch of elements to the batcher.
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>);
/// Returns all updates not greater or equal to an element of `upper`.
fn seal(&mut self, upper: &[T]) -> Output;
/// Returns the lower envelope of contained update times.
fn frontier(&mut self) -> &[T];
}
/// Functionality for building batches from ordered update sequences.
pub trait Builder<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates an empty builder.
fn new() -> Self;
/// Allocates an empty builder with some capacity.
fn with_capacity(cap: usize) -> Self;
/// Adds an element to the batch.
fn push(&mut self, element: (K, V, T, R));
/// Adds an ordered sequence of elements to the batch.
fn extend<I: Iterator<Item=(K,V,T,R)>>(&mut self, iter: I) {
for item in iter { self.push(item); }
}
/// Completes building and returns the batch.
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Output;
}
/// Represents a merge in progress.
pub trait Merger<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Creates a new merger to merge the supplied batches.
fn new(source1: &Output, source2: &Output) -> Self;
/// Perform some amount of work, decrementing `fuel`.
///
/// If `fuel` is non-zero after the call, the merging is complete and
/// one should call `done` to extract the merged results.
fn work(&mut self, source1: &Output, source2: &Output, frontier: &Option<Vec<T>>, fuel: &mut isize);
/// Extracts merged results.
///
/// This method should only be called after `work` has been called and
/// has not brought `fuel` to zero. Otherwise, the merge is still in
/// progress.
fn done(self) -> Output;
}
/// Blanket implementations for reference counted batches.
pub mod rc_blanket_impls {
use std::rc::Rc;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>> BatchReader<K,V,T,R> for Rc<B> {
/// The type used to enumerate the batch's contents.
type Cursor = RcBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
RcBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct RcBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> RcBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
RcBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> Cursor<K, V, T, R> for RcBatchCursor<K, V, T, R, B> {
type Storage = Rc<B>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>> Batch<K, V, T, R> for Rc<B> {
type Batcher = RcBatcher<K, V, T, R, B>;
type Builder = RcBuilder<K, V, T, R, B>;
type Merger = RcMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct RcBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>> Batcher<K, V, T, R, Rc<B>> for RcBatcher<K,V,T,R,B> {
fn new() -> Self { RcBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Rc<B> { Rc::new(self.batcher.seal(upper)) }
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct RcBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>> Builder<K, V, T, R, Rc<B>> for RcBuilder<K,V,T,R,B> {
fn new() -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Rc<B> { Rc::new(self.builder.done(lower, upper, since)) }
}
/// Wrapper type for merging reference counted batches.
pub struct RcMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>> Merger<K, V, T, R, Rc<B>> for RcMerger<K,V,T,R,B> {
fn new(source1: &Rc<B>, source2: &Rc<B>) -> Self { RcMerger { merger: B::begin_merge(source1, source2) } }
fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, frontier: &Option<Vec<T>>, fuel: &mut isize) { self.merger.work(source1, source2, frontier, fuel) }
fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
}
}
/// Blanket implementations for reference counted batches.
pub mod abomonated_blanket_impls {
extern crate abomonation;
use abomonation::{Abomonation, measure};
use abomonation::abomonated::Abomonated;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>+Abomonation> BatchReader<K,V,T,R> for Abomonated<B, Vec<u8>> {
/// The type used to enumerate the batch's contents.
type Cursor = AbomonatedBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
AbomonatedBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct AbomonatedBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> AbomonatedBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
AbomonatedBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>+Abomonation> Cursor<K, V, T, R> for AbomonatedBatchCursor<K, V, T, R, B> {
type Storage = Abomonated<B, Vec<u8>>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>+Abomonation> Batch<K, V, T, R> for Abomonated<B, Vec<u8>> {
type Batcher = AbomonatedBatcher<K, V, T, R, B>;
type Builder = AbomonatedBuilder<K, V, T, R, B>;
type Merger = AbomonatedMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct AbomonatedBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Batcher<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBatcher<K,V,T,R,B> {
fn new() -> Self { AbomonatedBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.batcher.seal(upper);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct AbomonatedBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Builder<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBuilder<K,V,T,R,B> {
fn new() -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.builder.done(lower, upper, since);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
/// Wrapper type for merging reference counted batches.
pub struct AbomonatedMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Merger<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedMerger<K,V,T,R,B> {
fn new(source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>) -> Self {
AbomonatedMerger { merger: B::begin_merge(source1, source2) }
}
fn work(&mut self, source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>, frontier: &Option<Vec<T>>, fuel: &mut isize) {
self.merger.work(source1, source2, frontier, fuel)
}
fn done(self) -> Abomonated<B, Vec<u8>> {
let batch = self.merger.done();
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
}
| { self.description().lower() } | identifier_body |
dirname.rs | #![crate_name = "uu_dirname"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Derek Chiang <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
use std::path::Path;
static NAME: &'static str = "dirname";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn | (args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("z", "zero", "separate output with NUL rather than newline");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("Invalid options\n{}", f)
};
if matches.opt_present("help") {
let msg = format!("{0} {1} - strip last component from file name
Usage:
{0} [OPTION] NAME...
Output each NAME with its last non-slash component and trailing slashes
removed; if NAME contains no /'s, output '.' (meaning the current
directory).", NAME, VERSION);
print!("{}", opts.usage(&msg));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let separator = if matches.opt_present("zero") {"\0"} else {"\n"};
if !matches.free.is_empty() {
for path in &matches.free {
let p = Path::new(path);
match p.parent() {
Some(d) => {
if d.components().next() == None {
print!(".")
} else {
print!("{}", d.to_string_lossy());
}
}
None => {
if p.is_absolute() {
print!("/");
} else {
print!(".");
}
}
}
print!("{}", separator);
}
} else {
println!("{0}: missing operand", NAME);
println!("Try '{0} --help' for more information.", NAME);
return 1;
}
0
}
| uumain | identifier_name |
dirname.rs | #![crate_name = "uu_dirname"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Derek Chiang <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
use std::path::Path;
static NAME: &'static str = "dirname";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("z", "zero", "separate output with NUL rather than newline");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("Invalid options\n{}", f)
};
if matches.opt_present("help") {
let msg = format!("{0} {1} - strip last component from file name
Usage:
{0} [OPTION] NAME...
Output each NAME with its last non-slash component and trailing slashes
removed; if NAME contains no /'s, output '.' (meaning the current
directory).", NAME, VERSION);
print!("{}", opts.usage(&msg));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let separator = if matches.opt_present("zero") {"\0"} else {"\n"};
if !matches.free.is_empty() {
for path in &matches.free {
let p = Path::new(path);
match p.parent() {
Some(d) => {
if d.components().next() == None {
print!(".")
} else {
print!("{}", d.to_string_lossy());
}
}
None => {
if p.is_absolute() {
print!("/");
} else {
print!(".");
}
}
}
print!("{}", separator); | println!("{0}: missing operand", NAME);
println!("Try '{0} --help' for more information.", NAME);
return 1;
}
0
} | }
} else { | random_line_split |
dirname.rs | #![crate_name = "uu_dirname"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Derek Chiang <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
use std::path::Path;
static NAME: &'static str = "dirname";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 | {
let mut opts = getopts::Options::new();
opts.optflag("z", "zero", "separate output with NUL rather than newline");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("Invalid options\n{}", f)
};
if matches.opt_present("help") {
let msg = format!("{0} {1} - strip last component from file name
Usage:
{0} [OPTION] NAME...
Output each NAME with its last non-slash component and trailing slashes
removed; if NAME contains no /'s, output '.' (meaning the current
directory).", NAME, VERSION);
print!("{}", opts.usage(&msg));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let separator = if matches.opt_present("zero") {"\0"} else {"\n"};
if !matches.free.is_empty() {
for path in &matches.free {
let p = Path::new(path);
match p.parent() {
Some(d) => {
if d.components().next() == None {
print!(".")
} else {
print!("{}", d.to_string_lossy());
}
}
None => {
if p.is_absolute() {
print!("/");
} else {
print!(".");
}
}
}
print!("{}", separator);
}
} else {
println!("{0}: missing operand", NAME);
println!("Try '{0} --help' for more information.", NAME);
return 1;
}
0
} | identifier_body |
|
models.py | from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
import misaka
from groups.models import Group
# Create your models here.
# POSTS MODELS.PY
from django.contrib.auth import get_user_model
User = get_user_model()
class Post(models.Model):
user = models.ForeignKey(User, related_name='posts')
created_at = models.DateTimeField(auto_now=True)
message = models.TextField()
message_html = models.TextField(editable=False)
group = models.ForeignKey(Group, related_name='posts', null=True, blank=True)
def | (self):
return self.message
def save(self, *args, **kwargs):
self.message_html = misaka.html(self.message)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('posts:single', kwargs={'username': self.user.username, 'pk': self.pk})
class Meta:
ordering = ['-created_at']
unique_together = ['user', 'message'] | __str__ | identifier_name |
models.py | from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
import misaka
from groups.models import Group
# Create your models here.
# POSTS MODELS.PY
from django.contrib.auth import get_user_model
User = get_user_model()
class Post(models.Model):
user = models.ForeignKey(User, related_name='posts')
created_at = models.DateTimeField(auto_now=True)
message = models.TextField()
message_html = models.TextField(editable=False)
group = models.ForeignKey(Group, related_name='posts', null=True, blank=True)
def __str__(self):
return self.message
def save(self, *args, **kwargs):
self.message_html = misaka.html(self.message)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('posts:single', kwargs={'username': self.user.username, 'pk': self.pk})
class Meta:
| ordering = ['-created_at']
unique_together = ['user', 'message'] | identifier_body |
|
models.py | from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
import misaka
from groups.models import Group
# Create your models here.
# POSTS MODELS.PY
from django.contrib.auth import get_user_model
User = get_user_model()
class Post(models.Model): |
def __str__(self):
return self.message
def save(self, *args, **kwargs):
self.message_html = misaka.html(self.message)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('posts:single', kwargs={'username': self.user.username, 'pk': self.pk})
class Meta:
ordering = ['-created_at']
unique_together = ['user', 'message'] | user = models.ForeignKey(User, related_name='posts')
created_at = models.DateTimeField(auto_now=True)
message = models.TextField()
message_html = models.TextField(editable=False)
group = models.ForeignKey(Group, related_name='posts', null=True, blank=True) | random_line_split |
error.rs | use common::document::ContainerDocument;
use snafu::Snafu;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Failed to parse {} document: {}", target_type, source))]
Deserialization {
target_type: &'static str,
source: serde_json::Error,
},
#[snafu(display("Document Retrieval Error: {}", source))]
DocumentRetrievalError { source: Box<dyn std::error::Error> },
#[snafu(display("Index Creation Error: {}", source))]
IndexCreation { source: Box<dyn std::error::Error> },
#[snafu(display("Index Publication Error: {}", source))]
IndexPublication { source: Box<dyn std::error::Error> },
#[snafu(display("Index Optimization Error: {}", source))]
IndexOptimization { source: Box<dyn std::error::Error> },
#[snafu(display("Storage Connection Error: {}", source))]
StorageConnection { source: Box<dyn std::error::Error> },
#[snafu(display("Document Stream Insertion Error: {}", source))]
DocumentStreamInsertion { source: Box<dyn std::error::Error> },
#[snafu(display("Document Stream Update Error: {}", source))]
DocumentStreamUpdate { source: Box<dyn std::error::Error> },
#[snafu(display("Expected Index: {}", index))]
ExpectedIndex { index: String },
#[snafu(display("Configuration Error: {}", source))]
Configuration { source: config::ConfigError },
#[snafu(display("Status Error: {}", source))]
Status { source: Box<dyn std::error::Error> },
#[snafu(display("Backend Configuration Error: {}", source))]
BackendConfiguration { source: Box<dyn std::error::Error> },
}
impl Error {
pub fn | <T: ContainerDocument>(err: serde_json::Error) -> Self {
Self::Deserialization {
target_type: T::static_doc_type(),
source: err,
}
}
}
| from_deserialization | identifier_name |
error.rs | use common::document::ContainerDocument;
use snafu::Snafu;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Failed to parse {} document: {}", target_type, source))]
Deserialization {
target_type: &'static str,
source: serde_json::Error,
},
#[snafu(display("Document Retrieval Error: {}", source))]
DocumentRetrievalError { source: Box<dyn std::error::Error> },
#[snafu(display("Index Creation Error: {}", source))]
IndexCreation { source: Box<dyn std::error::Error> },
#[snafu(display("Index Publication Error: {}", source))]
IndexPublication { source: Box<dyn std::error::Error> },
#[snafu(display("Index Optimization Error: {}", source))]
IndexOptimization { source: Box<dyn std::error::Error> },
#[snafu(display("Storage Connection Error: {}", source))]
StorageConnection { source: Box<dyn std::error::Error> },
#[snafu(display("Document Stream Insertion Error: {}", source))]
DocumentStreamInsertion { source: Box<dyn std::error::Error> },
#[snafu(display("Document Stream Update Error: {}", source))]
DocumentStreamUpdate { source: Box<dyn std::error::Error> },
#[snafu(display("Expected Index: {}", index))]
ExpectedIndex { index: String }, | #[snafu(display("Status Error: {}", source))]
Status { source: Box<dyn std::error::Error> },
#[snafu(display("Backend Configuration Error: {}", source))]
BackendConfiguration { source: Box<dyn std::error::Error> },
}
impl Error {
pub fn from_deserialization<T: ContainerDocument>(err: serde_json::Error) -> Self {
Self::Deserialization {
target_type: T::static_doc_type(),
source: err,
}
}
} |
#[snafu(display("Configuration Error: {}", source))]
Configuration { source: config::ConfigError },
| random_line_split |
main.js | (function() {
var lastTime = 0;
var vendors = ['ms', 'moz', 'webkit', 'o'];
for(var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) {
window.requestAnimationFrame = window[vendors[x]+'RequestAnimationFrame'];
window.cancelAnimationFrame =
window[vendors[x]+'CancelAnimationFrame'] || window[vendors[x]+'CancelRequestAnimationFrame'];
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = function(callback, element) {
var currTime = new Date().getTime();
var timeToCall = Math.max(0, 16 - (currTime - lastTime));
var id = window.setTimeout(function() { callback(currTime + timeToCall); },
timeToCall);
lastTime = currTime + timeToCall;
return id;
};
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = function(id) {
clearTimeout(id);
};
}());
(function() {
Event.observe(window, 'load', main);
var WIDTH = 400;
var HEIGHT = 200;
var canvas = null;
var context = null;
var groundCanvas = null;
var groundContext = null;
var groundImage = null;
var scaleCanvas = null;
var scaleContext = null;
// Physics Variables
var lastTime = null;
var dt = null;
var av = 0;
var lv = 0;
var px = 953;
var py = 792;
var pa = 0;
// Viewport Settings
var omega = 120 * Math.PI/180;
var theta = 60 * Math.PI/180;
var alpha = 30 * Math.PI/180;
var h = 15;
var H = 180;
var LH = 1;
var modes = [];
// Input Settings
var codeOffset = 37;
var LEFT = 0;
var UP = 1;
var RIGHT = 2;
var DOWN = 3;
var downKeys = [false, false, false, false];
MAX_VELOCITY = 100/1000;
ANGULAR_VELOCITY = Math.PI/(4 * 1000);
var images = {
'img/mariocircuit.png': null
};
function main()
{
canvas = new Element('canvas', { 'width': WIDTH, 'height': HEIGHT});
context = canvas.getContext("2d");
$$('body')[0].insert(canvas);
Event.observe(window, 'keydown', handleKeydown);
Event.observe(window, 'keyup', handleKeyup);
canvas.observe('mousedown', handleMousedown);
canvas.observe('mouseup', handleMouseup);
canvas.observe('mouseover', function() {
Event.observe(document, 'touchmove', function(e){ e.preventDefault(); });
});
loadImages();
}
function loadImages()
{
for(var key in images) {
console.log(key);
if(images[key] == null) {
var img = new Image();
img.addEventListener("load", handleLoadImageSuccess.bindAsEventListener(this, key), false);
img.addEventListener("error", handleLoadImageFailure.bindAsEventListener(this), false);
img.src = key;
return;
}
}
handleLoadComplete();
}
function handleLoadImageSuccess(event, key)
{
images[key] = event.target;
loadImages();
}
function handleLoadImageFailure(event)
{
loadImages();
}
function handleLoadComplete()
{
window.requestAnimationFrame(update.bind(this));
groundImage = images['img/mariocircuit.png'];
var max = Math.max(groundImage.width, groundImage.height);
groundCanvas = new Element('canvas', { 'width': max, 'height': max, 'style': 'width:' + max/2 + 'px;height:' + max/2 + 'px'});
groundContext = groundCanvas.getContext("2d");
//$$('body')[0].insert(groundCanvas);
/*
scaleCanvas = new Element('canvas', { 'width': max, 'height': max, 'style': 'width:' + max/2 + 'px;height:' + max/2 + 'px'});
scaleContext = scaleCanvas.getContext("2d");
$$('body')[0].insert(scaleCanvas);
*/
// MODES
var sx, sy, sw, sh, dx, dw;
var w = 0,
w1 = 0,
w2 = 0,
d1 = 0,
d2 = 0;
for(var L = 1; L <= H; L++) {
modes[L] = null;
w1 = 2 * h * Math.tan( (Math.PI - theta)/2 + alpha*(L - 1)/H ) / Math.tan(omega/2);
w2 = 2 * h * Math.tan( (Math.PI - theta)/2 + alpha*L/H ) / Math.tan(omega/2);
d1 = h * Math.tan( (Math.PI - theta)/2 + alpha*(L - 1)/H );
d2 = h * Math.tan( (Math.PI - theta)/2 + alpha*L/H );
//w = w1 + (w2-w1)/2;
w = w1;
//if(d2 > groundCanvas.height) continue;
sx = (groundCanvas.width - w)/2;
sy = groundCanvas.height - d1;
sw = w;
sh = d2 - d1;
dw = WIDTH;
dx = 0;
if(w > groundCanvas.width) {
sx = 0;
sw = groundCanvas.width;
dw = WIDTH * (sw/w);
dx = (WIDTH - dw) / 2;
}
/*
context.drawImage(
groundCanvas,
sx,
sy,
sw,
sh,
dx,
HEIGHT - L,
dw,
1
);
*/
modes[L] = {
'sx': sx,
'sy': sy,
'sw': sw,
'sh': sh,
'dx': dx,
'dw': dw
};
}
}
function update(t)
{
window.requestAnimationFrame(update.bind(this));
if(lastTime == null) lastTime = t;
dt = t - lastTime;
lastTime = t;
lv = 0;
av = 0;
//if(lv < 0.05 * MAX_VELOCITY) lv = 0;
//if(av < 0.05 * ANGULAR_VELOCITY) av = 0;
if(downKeys[LEFT] || downKeys[RIGHT] || downKeys[UP] || downKeys[DOWN]) {
lv = MAX_VELOCITY * ((0+downKeys[DOWN]) + (0+downKeys[UP])*-1);
if(lv == -1) lv *= 0.5;
av = ANGULAR_VELOCITY * ((0+downKeys[LEFT]) + (0+downKeys[RIGHT])*-1);
}
pa += (dt * av);
px += (dt * lv) * Math.sin(pa);
py += (dt * lv) * Math.cos(pa);
// Clear the canvas
groundCanvas.width = groundCanvas.width;
//scaleCanvas.width = scaleCanvas.width;
canvas.width = canvas.width;
var dx = (groundCanvas.width/2 - px);
var dy = (groundCanvas.height - py);
groundContext.save();
groundContext.translate(dx + px, dy + py);
groundContext.rotate(pa);
groundContext.translate((dx + px)*-1, (dy + py)*-1);
groundContext.drawImage(groundImage, dx, dy);
groundContext.restore();
for(var L = 1; L <= H; L++) {
var val = modes[L];
if(val == undefined) continue;
context.drawImage(
groundCanvas,
val.sx,
val.sy,
val.sw,
val.sh,
val.dx,
HEIGHT - (L*LH),
val.dw,
LH
);
/*
scaleContext.drawImage(
groundCanvas,
val.sx,
val.sy,
val.sw,
val.sh,
val.dx,
val.sy,
val.dw,
val.sh
);
*/
}
}
function handleKeydown(event)
{
var code = event.keyCode - codeOffset;
//console.log('keydown: ' + code);
switch(code) {
case UP:
case DOWN:
case LEFT:
case RIGHT:
downKeys[code] = true;
break;
}
}
function handleKeyup(event)
{
var code = event.keyCode - codeOffset;
//console.log('keyup: ' + code);
switch(code) {
case UP:
case DOWN:
case LEFT:
case RIGHT:
downKeys[code] = false;
break;
}
}
function handleMousedown(event)
|
function handleMouseup(event)
{
downKeys[UP] = false;
downKeys[DOWN] = false;
downKeys[LEFT] = false;
downKeys[RIGHT] = false;
}
}());
| {
if(event.layerY < HEIGHT / 3) {
downKeys[UP] = true;
} else if(event.layerY < HEIGHT * 2 / 3) {
if(event.layerX < WIDTH/2) {
downKeys[LEFT] = true;
} else {
downKeys[RIGHT] = true;
}
} else {
downKeys[DOWN] = true;
}
} | identifier_body |
main.js | (function() {
var lastTime = 0;
var vendors = ['ms', 'moz', 'webkit', 'o'];
for(var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) {
window.requestAnimationFrame = window[vendors[x]+'RequestAnimationFrame'];
window.cancelAnimationFrame =
window[vendors[x]+'CancelAnimationFrame'] || window[vendors[x]+'CancelRequestAnimationFrame'];
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = function(callback, element) {
var currTime = new Date().getTime();
var timeToCall = Math.max(0, 16 - (currTime - lastTime));
var id = window.setTimeout(function() { callback(currTime + timeToCall); },
timeToCall);
lastTime = currTime + timeToCall;
return id;
};
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = function(id) {
clearTimeout(id);
};
}());
(function() {
Event.observe(window, 'load', main);
var WIDTH = 400;
var HEIGHT = 200;
var canvas = null;
var context = null;
var groundCanvas = null;
var groundContext = null;
var groundImage = null;
var scaleCanvas = null;
var scaleContext = null;
// Physics Variables
var lastTime = null;
var dt = null;
var av = 0;
var lv = 0;
var px = 953;
var py = 792;
var pa = 0;
// Viewport Settings
var omega = 120 * Math.PI/180;
var theta = 60 * Math.PI/180;
var alpha = 30 * Math.PI/180;
var h = 15;
var H = 180;
var LH = 1;
var modes = [];
// Input Settings
var codeOffset = 37;
var LEFT = 0;
var UP = 1;
var RIGHT = 2;
var DOWN = 3;
var downKeys = [false, false, false, false];
MAX_VELOCITY = 100/1000;
ANGULAR_VELOCITY = Math.PI/(4 * 1000);
var images = {
'img/mariocircuit.png': null
};
function main()
{
canvas = new Element('canvas', { 'width': WIDTH, 'height': HEIGHT});
context = canvas.getContext("2d");
$$('body')[0].insert(canvas);
Event.observe(window, 'keydown', handleKeydown);
Event.observe(window, 'keyup', handleKeyup);
canvas.observe('mousedown', handleMousedown);
canvas.observe('mouseup', handleMouseup);
canvas.observe('mouseover', function() {
Event.observe(document, 'touchmove', function(e){ e.preventDefault(); });
});
loadImages();
}
function loadImages()
{
for(var key in images) {
console.log(key);
if(images[key] == null) {
var img = new Image();
img.addEventListener("load", handleLoadImageSuccess.bindAsEventListener(this, key), false);
img.addEventListener("error", handleLoadImageFailure.bindAsEventListener(this), false);
img.src = key;
return;
}
}
handleLoadComplete();
}
function handleLoadImageSuccess(event, key)
{
images[key] = event.target;
loadImages();
}
function handleLoadImageFailure(event)
{
loadImages();
}
function handleLoadComplete()
{
window.requestAnimationFrame(update.bind(this));
groundImage = images['img/mariocircuit.png'];
var max = Math.max(groundImage.width, groundImage.height);
groundCanvas = new Element('canvas', { 'width': max, 'height': max, 'style': 'width:' + max/2 + 'px;height:' + max/2 + 'px'});
groundContext = groundCanvas.getContext("2d");
//$$('body')[0].insert(groundCanvas);
/*
scaleCanvas = new Element('canvas', { 'width': max, 'height': max, 'style': 'width:' + max/2 + 'px;height:' + max/2 + 'px'});
scaleContext = scaleCanvas.getContext("2d");
$$('body')[0].insert(scaleCanvas);
*/
// MODES
var sx, sy, sw, sh, dx, dw;
var w = 0,
w1 = 0,
w2 = 0,
d1 = 0,
d2 = 0;
for(var L = 1; L <= H; L++) {
modes[L] = null;
w1 = 2 * h * Math.tan( (Math.PI - theta)/2 + alpha*(L - 1)/H ) / Math.tan(omega/2);
w2 = 2 * h * Math.tan( (Math.PI - theta)/2 + alpha*L/H ) / Math.tan(omega/2);
d1 = h * Math.tan( (Math.PI - theta)/2 + alpha*(L - 1)/H );
d2 = h * Math.tan( (Math.PI - theta)/2 + alpha*L/H );
//w = w1 + (w2-w1)/2;
w = w1;
//if(d2 > groundCanvas.height) continue;
sx = (groundCanvas.width - w)/2;
sy = groundCanvas.height - d1;
sw = w;
sh = d2 - d1;
dw = WIDTH;
dx = 0;
if(w > groundCanvas.width) {
sx = 0;
sw = groundCanvas.width;
dw = WIDTH * (sw/w);
dx = (WIDTH - dw) / 2;
}
/*
context.drawImage(
groundCanvas,
sx,
sy,
sw,
sh,
dx,
HEIGHT - L,
dw,
1
);
*/
modes[L] = {
'sx': sx,
'sy': sy,
'sw': sw,
'sh': sh,
'dx': dx,
'dw': dw
};
}
}
function update(t)
{
window.requestAnimationFrame(update.bind(this));
if(lastTime == null) lastTime = t;
dt = t - lastTime;
lastTime = t;
lv = 0;
av = 0;
//if(lv < 0.05 * MAX_VELOCITY) lv = 0;
//if(av < 0.05 * ANGULAR_VELOCITY) av = 0;
if(downKeys[LEFT] || downKeys[RIGHT] || downKeys[UP] || downKeys[DOWN]) {
lv = MAX_VELOCITY * ((0+downKeys[DOWN]) + (0+downKeys[UP])*-1);
if(lv == -1) lv *= 0.5;
av = ANGULAR_VELOCITY * ((0+downKeys[LEFT]) + (0+downKeys[RIGHT])*-1);
}
pa += (dt * av);
px += (dt * lv) * Math.sin(pa);
py += (dt * lv) * Math.cos(pa);
// Clear the canvas
groundCanvas.width = groundCanvas.width;
//scaleCanvas.width = scaleCanvas.width;
canvas.width = canvas.width;
var dx = (groundCanvas.width/2 - px);
var dy = (groundCanvas.height - py);
groundContext.save();
groundContext.translate(dx + px, dy + py);
groundContext.rotate(pa);
groundContext.translate((dx + px)*-1, (dy + py)*-1);
groundContext.drawImage(groundImage, dx, dy);
groundContext.restore();
for(var L = 1; L <= H; L++) {
var val = modes[L];
if(val == undefined) continue;
context.drawImage(
groundCanvas,
val.sx,
val.sy,
val.sw,
val.sh,
val.dx,
HEIGHT - (L*LH),
val.dw,
LH
);
/*
scaleContext.drawImage(
groundCanvas,
val.sx,
val.sy,
val.sw,
val.sh,
val.dx,
val.sy,
val.dw,
val.sh
);
*/
}
}
function handleKeydown(event)
{
var code = event.keyCode - codeOffset;
//console.log('keydown: ' + code);
switch(code) {
case UP:
case DOWN:
case LEFT:
case RIGHT:
downKeys[code] = true;
break;
}
}
function handleKeyup(event)
{
var code = event.keyCode - codeOffset;
//console.log('keyup: ' + code);
switch(code) {
case UP:
case DOWN:
case LEFT:
case RIGHT:
downKeys[code] = false;
break;
}
}
function handleMousedown(event)
{
if(event.layerY < HEIGHT / 3) {
downKeys[UP] = true;
} else if(event.layerY < HEIGHT * 2 / 3) {
if(event.layerX < WIDTH/2) {
downKeys[LEFT] = true;
} else {
downKeys[RIGHT] = true;
}
} else {
downKeys[DOWN] = true;
}
}
function | (event)
{
downKeys[UP] = false;
downKeys[DOWN] = false;
downKeys[LEFT] = false;
downKeys[RIGHT] = false;
}
}());
| handleMouseup | identifier_name |
main.js | (function() {
var lastTime = 0;
var vendors = ['ms', 'moz', 'webkit', 'o'];
for(var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) {
window.requestAnimationFrame = window[vendors[x]+'RequestAnimationFrame'];
window.cancelAnimationFrame =
window[vendors[x]+'CancelAnimationFrame'] || window[vendors[x]+'CancelRequestAnimationFrame'];
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = function(callback, element) {
var currTime = new Date().getTime();
var timeToCall = Math.max(0, 16 - (currTime - lastTime));
var id = window.setTimeout(function() { callback(currTime + timeToCall); },
timeToCall);
lastTime = currTime + timeToCall;
return id;
};
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = function(id) {
clearTimeout(id);
};
}());
(function() {
Event.observe(window, 'load', main);
var WIDTH = 400;
var HEIGHT = 200;
var canvas = null;
var context = null;
var groundCanvas = null;
var groundContext = null;
var groundImage = null;
var scaleCanvas = null;
var scaleContext = null;
// Physics Variables
var lastTime = null;
var dt = null;
var av = 0;
var lv = 0;
var px = 953;
var py = 792;
var pa = 0;
// Viewport Settings
var omega = 120 * Math.PI/180;
var theta = 60 * Math.PI/180;
var alpha = 30 * Math.PI/180;
var h = 15;
var H = 180;
var LH = 1;
var modes = [];
// Input Settings
var codeOffset = 37;
var LEFT = 0;
var UP = 1;
var RIGHT = 2;
var DOWN = 3;
var downKeys = [false, false, false, false];
MAX_VELOCITY = 100/1000;
ANGULAR_VELOCITY = Math.PI/(4 * 1000);
var images = {
'img/mariocircuit.png': null
};
function main()
{
canvas = new Element('canvas', { 'width': WIDTH, 'height': HEIGHT});
context = canvas.getContext("2d");
$$('body')[0].insert(canvas);
Event.observe(window, 'keydown', handleKeydown);
Event.observe(window, 'keyup', handleKeyup);
canvas.observe('mousedown', handleMousedown);
canvas.observe('mouseup', handleMouseup);
canvas.observe('mouseover', function() {
Event.observe(document, 'touchmove', function(e){ e.preventDefault(); });
});
loadImages();
}
function loadImages()
{
for(var key in images) {
console.log(key);
if(images[key] == null) {
var img = new Image();
img.addEventListener("load", handleLoadImageSuccess.bindAsEventListener(this, key), false);
img.addEventListener("error", handleLoadImageFailure.bindAsEventListener(this), false);
img.src = key;
return;
}
}
handleLoadComplete();
}
function handleLoadImageSuccess(event, key)
{
images[key] = event.target;
loadImages();
}
function handleLoadImageFailure(event)
{
loadImages();
}
function handleLoadComplete()
{
window.requestAnimationFrame(update.bind(this));
groundImage = images['img/mariocircuit.png'];
var max = Math.max(groundImage.width, groundImage.height);
groundCanvas = new Element('canvas', { 'width': max, 'height': max, 'style': 'width:' + max/2 + 'px;height:' + max/2 + 'px'});
groundContext = groundCanvas.getContext("2d");
//$$('body')[0].insert(groundCanvas);
/*
scaleCanvas = new Element('canvas', { 'width': max, 'height': max, 'style': 'width:' + max/2 + 'px;height:' + max/2 + 'px'});
scaleContext = scaleCanvas.getContext("2d");
$$('body')[0].insert(scaleCanvas);
*/
// MODES
var sx, sy, sw, sh, dx, dw;
var w = 0,
w1 = 0,
w2 = 0,
d1 = 0,
d2 = 0;
for(var L = 1; L <= H; L++) {
modes[L] = null;
w1 = 2 * h * Math.tan( (Math.PI - theta)/2 + alpha*(L - 1)/H ) / Math.tan(omega/2);
w2 = 2 * h * Math.tan( (Math.PI - theta)/2 + alpha*L/H ) / Math.tan(omega/2);
d1 = h * Math.tan( (Math.PI - theta)/2 + alpha*(L - 1)/H );
d2 = h * Math.tan( (Math.PI - theta)/2 + alpha*L/H );
//w = w1 + (w2-w1)/2;
w = w1;
//if(d2 > groundCanvas.height) continue;
sx = (groundCanvas.width - w)/2;
sy = groundCanvas.height - d1;
sw = w;
sh = d2 - d1;
dw = WIDTH;
dx = 0;
if(w > groundCanvas.width) {
sx = 0;
sw = groundCanvas.width;
dw = WIDTH * (sw/w);
dx = (WIDTH - dw) / 2;
}
/*
context.drawImage(
groundCanvas,
sx,
sy,
sw,
sh,
dx,
HEIGHT - L,
dw,
1
);
*/
modes[L] = {
'sx': sx,
'sy': sy,
'sw': sw,
'sh': sh,
'dx': dx,
'dw': dw
};
}
}
function update(t)
{
window.requestAnimationFrame(update.bind(this));
if(lastTime == null) lastTime = t;
dt = t - lastTime;
lastTime = t;
lv = 0;
av = 0;
//if(lv < 0.05 * MAX_VELOCITY) lv = 0;
//if(av < 0.05 * ANGULAR_VELOCITY) av = 0;
if(downKeys[LEFT] || downKeys[RIGHT] || downKeys[UP] || downKeys[DOWN]) {
lv = MAX_VELOCITY * ((0+downKeys[DOWN]) + (0+downKeys[UP])*-1);
if(lv == -1) lv *= 0.5;
av = ANGULAR_VELOCITY * ((0+downKeys[LEFT]) + (0+downKeys[RIGHT])*-1);
}
pa += (dt * av);
px += (dt * lv) * Math.sin(pa);
py += (dt * lv) * Math.cos(pa);
// Clear the canvas
groundCanvas.width = groundCanvas.width;
//scaleCanvas.width = scaleCanvas.width;
canvas.width = canvas.width;
var dx = (groundCanvas.width/2 - px);
var dy = (groundCanvas.height - py);
groundContext.save();
groundContext.translate(dx + px, dy + py);
groundContext.rotate(pa);
groundContext.translate((dx + px)*-1, (dy + py)*-1);
groundContext.drawImage(groundImage, dx, dy);
groundContext.restore();
for(var L = 1; L <= H; L++) {
var val = modes[L];
if(val == undefined) continue;
context.drawImage(
groundCanvas,
val.sx,
val.sy,
val.sw,
val.sh,
val.dx,
HEIGHT - (L*LH),
val.dw,
LH
);
/*
scaleContext.drawImage( | val.sx,
val.sy,
val.sw,
val.sh,
val.dx,
val.sy,
val.dw,
val.sh
);
*/
}
}
function handleKeydown(event)
{
var code = event.keyCode - codeOffset;
//console.log('keydown: ' + code);
switch(code) {
case UP:
case DOWN:
case LEFT:
case RIGHT:
downKeys[code] = true;
break;
}
}
function handleKeyup(event)
{
var code = event.keyCode - codeOffset;
//console.log('keyup: ' + code);
switch(code) {
case UP:
case DOWN:
case LEFT:
case RIGHT:
downKeys[code] = false;
break;
}
}
function handleMousedown(event)
{
if(event.layerY < HEIGHT / 3) {
downKeys[UP] = true;
} else if(event.layerY < HEIGHT * 2 / 3) {
if(event.layerX < WIDTH/2) {
downKeys[LEFT] = true;
} else {
downKeys[RIGHT] = true;
}
} else {
downKeys[DOWN] = true;
}
}
function handleMouseup(event)
{
downKeys[UP] = false;
downKeys[DOWN] = false;
downKeys[LEFT] = false;
downKeys[RIGHT] = false;
}
}()); | groundCanvas, | random_line_split |
race.ts | import { tee } from "./tee";
import { pump } from "./pump";
import { createDuplex } from "./duplex";
import { wrapAsyncIterableIterator } from "./wrap-async-iterable-iterator";
/**
* Calls all target functions in parallel, and returns the yielded values of the _fastest_ one.
*
* @example
*
* const ping = race(
*
* );
*/
export const race = (...fns: Function[]) => (...args): AsyncIterableIterator<any> => {
return createDuplex((input, output) => {
let primaryInput = input as AsyncIterableIterator<any>;
let wonFn;
fns.forEach((fn, i) => {
let spareInput;
[spareInput, primaryInput] = tee(primaryInput);
const iter = wrapAsyncIterableIterator(fn(...args));
pump(spareInput, (value) => {
return iter.next(value).then(({ value, done }) => {
if (wonFn && wonFn !== fn) {
return;
}
wonFn = fn;
if (done) {
| lse {
output.unshift(value);
}
});
}).then(() => {
if (wonFn === fn) {
output.return();
}
});
});
});
}; | output.return();
} e | conditional_block |
race.ts | import { tee } from "./tee"; | import { wrapAsyncIterableIterator } from "./wrap-async-iterable-iterator";
/**
* Calls all target functions in parallel, and returns the yielded values of the _fastest_ one.
*
* @example
*
* const ping = race(
*
* );
*/
export const race = (...fns: Function[]) => (...args): AsyncIterableIterator<any> => {
return createDuplex((input, output) => {
let primaryInput = input as AsyncIterableIterator<any>;
let wonFn;
fns.forEach((fn, i) => {
let spareInput;
[spareInput, primaryInput] = tee(primaryInput);
const iter = wrapAsyncIterableIterator(fn(...args));
pump(spareInput, (value) => {
return iter.next(value).then(({ value, done }) => {
if (wonFn && wonFn !== fn) {
return;
}
wonFn = fn;
if (done) {
output.return();
} else {
output.unshift(value);
}
});
}).then(() => {
if (wonFn === fn) {
output.return();
}
});
});
});
}; | import { pump } from "./pump";
import { createDuplex } from "./duplex"; | random_line_split |
telescope.py | '''Tutorial to demonstrate theia.'''
import numpy as np
from theia.running import simulation as sim
from theia.optics import beam as gbeam
from theia.optics import mirror as mir
from theia.helpers import settings
from theia.helpers.units import *
# initialize globals (necessary to use theia in library form)
dic = {'info': False, 'warning': True, 'text': True, 'cad': True,
'fname': 'telescope', 'fclib' : '/usr/lib/freecad/lib',
'antiClip': True, 'short': False}
settings.init(dic)
# Mirrors of telescope
alpha = np.pi/2. - np.arctan(.1)
beta = np.pi/2. + np.arctan(.1)
mirror1 = mir.Mirror(Thickness = 1*cm, Diameter = 5*cm, X = 1*m,
Theta = alpha, Phi = 180.*deg, HRK = 0.90, ARK = 0, Wedge = 0, HRr = 0.90,
HRt = 0.10, ARr = 0.10, ARt = .90, Ref = 'M1',
N = 1.5)
mirror2 = mir.Mirror(Thickness = 1* cm, Diameter = 10*cm,
Y = 20*cm, Theta = beta, Phi = 0., HRK = 0.90,
ARK = 0, Wedge = 0, HRr = 0.90, HRt = 0.1, ARr = 0.1, ARt = 0.90,
Ref = 'M2', N = 1.5)
#Input beam
beam1 = gbeam.userGaussianBeam(Wx = .5*cm, Wy = .5*cm, WDistx = 0, WDisty = 0,
Ref = 'ORI')
# parameters
threshold = -1*W
order = 1
inBeams = [beam1]
optList = [mirror1, mirror2]
# Create simulation object:
simu = sim.Simulation(FName = 'telescope')
simu.OptList = optList
simu.InBeams = inBeams
simu.Order = order |
# Run simulation. Output of this simulation is intended to be compared
# to the output of the telescope.f90 optocad simulation
simu.run()
if __name__ == "__main__":
print(simu)
print(simu.BeamTreeList[0].beamList()) | simu.Threshold = threshold | random_line_split |
telescope.py | '''Tutorial to demonstrate theia.'''
import numpy as np
from theia.running import simulation as sim
from theia.optics import beam as gbeam
from theia.optics import mirror as mir
from theia.helpers import settings
from theia.helpers.units import *
# initialize globals (necessary to use theia in library form)
dic = {'info': False, 'warning': True, 'text': True, 'cad': True,
'fname': 'telescope', 'fclib' : '/usr/lib/freecad/lib',
'antiClip': True, 'short': False}
settings.init(dic)
# Mirrors of telescope
alpha = np.pi/2. - np.arctan(.1)
beta = np.pi/2. + np.arctan(.1)
mirror1 = mir.Mirror(Thickness = 1*cm, Diameter = 5*cm, X = 1*m,
Theta = alpha, Phi = 180.*deg, HRK = 0.90, ARK = 0, Wedge = 0, HRr = 0.90,
HRt = 0.10, ARr = 0.10, ARt = .90, Ref = 'M1',
N = 1.5)
mirror2 = mir.Mirror(Thickness = 1* cm, Diameter = 10*cm,
Y = 20*cm, Theta = beta, Phi = 0., HRK = 0.90,
ARK = 0, Wedge = 0, HRr = 0.90, HRt = 0.1, ARr = 0.1, ARt = 0.90,
Ref = 'M2', N = 1.5)
#Input beam
beam1 = gbeam.userGaussianBeam(Wx = .5*cm, Wy = .5*cm, WDistx = 0, WDisty = 0,
Ref = 'ORI')
# parameters
threshold = -1*W
order = 1
inBeams = [beam1]
optList = [mirror1, mirror2]
# Create simulation object:
simu = sim.Simulation(FName = 'telescope')
simu.OptList = optList
simu.InBeams = inBeams
simu.Order = order
simu.Threshold = threshold
# Run simulation. Output of this simulation is intended to be compared
# to the output of the telescope.f90 optocad simulation
simu.run()
if __name__ == "__main__":
| print(simu)
print(simu.BeamTreeList[0].beamList()) | conditional_block |
|
module_disk_select_test.py | #
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vendula Poncova <[email protected]>
#
import unittest
from blivet.devices import DiskDevice
from blivet.formats import get_format
from blivet.size import Size
from pyanaconda.modules.common.constants.objects import DISK_SELECTION
from pyanaconda.modules.common.errors.storage import UnavailableStorageError
from pyanaconda.modules.common.structures.validation import ValidationReport
from pyanaconda.modules.storage.disk_selection import DiskSelectionModule
from pyanaconda.modules.storage.disk_selection.selection_interface import DiskSelectionInterface
from pyanaconda.storage.initialization import create_storage
from tests.nosetests.pyanaconda_tests import check_dbus_property
class DiskSelectionInterfaceTestCase(unittest.TestCase):
"""Test DBus interface of the disk selection module."""
def setUp(self):
"""Set up the module."""
self.disk_selection_module = DiskSelectionModule()
self.disk_selection_interface = DiskSelectionInterface(self.disk_selection_module)
def _test_dbus_property(self, *args, **kwargs):
check_dbus_property(
self,
DISK_SELECTION,
self.disk_selection_interface,
*args, **kwargs
)
def selected_disks_property_test(self):
"""Test the selected disks property."""
self._test_dbus_property(
"SelectedDisks",
["sda", "sdb"]
)
def validate_selected_disks_test(self):
"""Test ValidateSelectedDisks."""
storage = create_storage()
self.disk_selection_module.on_storage_changed(storage)
dev1 = DiskDevice(
"dev1",
exists=False,
size=Size("15 GiB"),
fmt=get_format("disklabel")
)
dev2 = DiskDevice(
"dev2",
exists=False,
parents=[dev1],
size=Size("6 GiB"),
fmt=get_format("disklabel")
)
dev3 = DiskDevice(
"dev3",
exists=False,
parents=[dev2],
size=Size("6 GiB"),
fmt=get_format("disklabel")
)
storage.devicetree._add_device(dev1)
storage.devicetree._add_device(dev2)
storage.devicetree._add_device(dev3)
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks([])
)
self.assertEqual(report.is_valid(), True)
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1"])
)
self.assertEqual(report.is_valid(), False)
self.assertEqual(report.error_messages, [
"You selected disk dev1, which contains devices that also use "
"unselected disks dev2, dev3. You must select or de-select "
"these disks as a set."
])
self.assertEqual(report.warning_messages, [])
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1", "dev2"])
)
self.assertEqual(report.is_valid(), False)
self.assertEqual(report.error_messages, [
"You selected disk dev1, which contains devices that also "
"use unselected disk dev3. You must select or de-select "
"these disks as a set.",
"You selected disk dev2, which contains devices that also "
"use unselected disk dev3. You must select or de-select "
"these disks as a set."
])
self.assertEqual(report.warning_messages, [])
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1", "dev2", "dev3"])
)
self.assertEqual(report.is_valid(), True)
def exclusive_disks_property_test(self):
|
def ignored_disks_property_test(self):
"""Test the ignored disks property."""
self._test_dbus_property(
"IgnoredDisks",
["sda", "sdb"]
)
def protected_disks_property_test(self):
"""Test the protected disks property."""
self._test_dbus_property(
"ProtectedDevices",
["sda", "sdb"]
)
def disk_images_property_test(self):
"""Test the protected disks property."""
self._test_dbus_property(
"DiskImages",
{
"image_1": "/path/1",
"image_2": "/path/2"
}
)
def get_usable_disks_test(self):
"""Test the GetUsableDisks method."""
with self.assertRaises(UnavailableStorageError):
self.disk_selection_interface.GetUsableDisks()
self.disk_selection_module.on_storage_changed(create_storage())
self.assertEqual(self.disk_selection_interface.GetUsableDisks(), [])
| """Test the exclusive disks property."""
self._test_dbus_property(
"ExclusiveDisks",
["sda", "sdb"]
) | identifier_body |
module_disk_select_test.py | #
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vendula Poncova <[email protected]>
#
import unittest
from blivet.devices import DiskDevice
from blivet.formats import get_format
from blivet.size import Size
from pyanaconda.modules.common.constants.objects import DISK_SELECTION
from pyanaconda.modules.common.errors.storage import UnavailableStorageError
from pyanaconda.modules.common.structures.validation import ValidationReport
from pyanaconda.modules.storage.disk_selection import DiskSelectionModule
from pyanaconda.modules.storage.disk_selection.selection_interface import DiskSelectionInterface
from pyanaconda.storage.initialization import create_storage
from tests.nosetests.pyanaconda_tests import check_dbus_property
class DiskSelectionInterfaceTestCase(unittest.TestCase):
"""Test DBus interface of the disk selection module."""
def setUp(self):
"""Set up the module."""
self.disk_selection_module = DiskSelectionModule()
self.disk_selection_interface = DiskSelectionInterface(self.disk_selection_module)
def _test_dbus_property(self, *args, **kwargs):
check_dbus_property(
self,
DISK_SELECTION,
self.disk_selection_interface,
*args, **kwargs
)
def selected_disks_property_test(self):
"""Test the selected disks property."""
self._test_dbus_property(
"SelectedDisks",
["sda", "sdb"]
)
def validate_selected_disks_test(self):
"""Test ValidateSelectedDisks."""
storage = create_storage()
self.disk_selection_module.on_storage_changed(storage)
dev1 = DiskDevice(
"dev1",
exists=False,
size=Size("15 GiB"),
fmt=get_format("disklabel")
)
dev2 = DiskDevice(
"dev2",
exists=False,
parents=[dev1],
size=Size("6 GiB"),
fmt=get_format("disklabel")
)
dev3 = DiskDevice(
"dev3",
exists=False,
parents=[dev2],
size=Size("6 GiB"),
fmt=get_format("disklabel")
)
storage.devicetree._add_device(dev1)
storage.devicetree._add_device(dev2)
storage.devicetree._add_device(dev3)
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks([])
)
self.assertEqual(report.is_valid(), True)
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1"])
)
self.assertEqual(report.is_valid(), False)
self.assertEqual(report.error_messages, [
"You selected disk dev1, which contains devices that also use "
"unselected disks dev2, dev3. You must select or de-select "
"these disks as a set."
])
self.assertEqual(report.warning_messages, [])
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1", "dev2"])
)
self.assertEqual(report.is_valid(), False)
self.assertEqual(report.error_messages, [
"You selected disk dev1, which contains devices that also "
"use unselected disk dev3. You must select or de-select "
"these disks as a set.",
"You selected disk dev2, which contains devices that also "
"use unselected disk dev3. You must select or de-select "
"these disks as a set."
])
self.assertEqual(report.warning_messages, [])
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1", "dev2", "dev3"])
)
self.assertEqual(report.is_valid(), True)
def exclusive_disks_property_test(self):
"""Test the exclusive disks property."""
self._test_dbus_property(
"ExclusiveDisks",
["sda", "sdb"]
)
def ignored_disks_property_test(self):
"""Test the ignored disks property."""
self._test_dbus_property(
"IgnoredDisks",
["sda", "sdb"]
)
def protected_disks_property_test(self):
"""Test the protected disks property."""
self._test_dbus_property(
"ProtectedDevices",
["sda", "sdb"]
)
def disk_images_property_test(self):
"""Test the protected disks property."""
self._test_dbus_property(
"DiskImages",
{
"image_1": "/path/1",
"image_2": "/path/2"
}
)
def | (self):
"""Test the GetUsableDisks method."""
with self.assertRaises(UnavailableStorageError):
self.disk_selection_interface.GetUsableDisks()
self.disk_selection_module.on_storage_changed(create_storage())
self.assertEqual(self.disk_selection_interface.GetUsableDisks(), [])
| get_usable_disks_test | identifier_name |
module_disk_select_test.py | #
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version. | # ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vendula Poncova <[email protected]>
#
import unittest
from blivet.devices import DiskDevice
from blivet.formats import get_format
from blivet.size import Size
from pyanaconda.modules.common.constants.objects import DISK_SELECTION
from pyanaconda.modules.common.errors.storage import UnavailableStorageError
from pyanaconda.modules.common.structures.validation import ValidationReport
from pyanaconda.modules.storage.disk_selection import DiskSelectionModule
from pyanaconda.modules.storage.disk_selection.selection_interface import DiskSelectionInterface
from pyanaconda.storage.initialization import create_storage
from tests.nosetests.pyanaconda_tests import check_dbus_property
class DiskSelectionInterfaceTestCase(unittest.TestCase):
"""Test DBus interface of the disk selection module."""
def setUp(self):
"""Set up the module."""
self.disk_selection_module = DiskSelectionModule()
self.disk_selection_interface = DiskSelectionInterface(self.disk_selection_module)
def _test_dbus_property(self, *args, **kwargs):
check_dbus_property(
self,
DISK_SELECTION,
self.disk_selection_interface,
*args, **kwargs
)
def selected_disks_property_test(self):
"""Test the selected disks property."""
self._test_dbus_property(
"SelectedDisks",
["sda", "sdb"]
)
def validate_selected_disks_test(self):
"""Test ValidateSelectedDisks."""
storage = create_storage()
self.disk_selection_module.on_storage_changed(storage)
dev1 = DiskDevice(
"dev1",
exists=False,
size=Size("15 GiB"),
fmt=get_format("disklabel")
)
dev2 = DiskDevice(
"dev2",
exists=False,
parents=[dev1],
size=Size("6 GiB"),
fmt=get_format("disklabel")
)
dev3 = DiskDevice(
"dev3",
exists=False,
parents=[dev2],
size=Size("6 GiB"),
fmt=get_format("disklabel")
)
storage.devicetree._add_device(dev1)
storage.devicetree._add_device(dev2)
storage.devicetree._add_device(dev3)
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks([])
)
self.assertEqual(report.is_valid(), True)
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1"])
)
self.assertEqual(report.is_valid(), False)
self.assertEqual(report.error_messages, [
"You selected disk dev1, which contains devices that also use "
"unselected disks dev2, dev3. You must select or de-select "
"these disks as a set."
])
self.assertEqual(report.warning_messages, [])
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1", "dev2"])
)
self.assertEqual(report.is_valid(), False)
self.assertEqual(report.error_messages, [
"You selected disk dev1, which contains devices that also "
"use unselected disk dev3. You must select or de-select "
"these disks as a set.",
"You selected disk dev2, which contains devices that also "
"use unselected disk dev3. You must select or de-select "
"these disks as a set."
])
self.assertEqual(report.warning_messages, [])
report = ValidationReport.from_structure(
self.disk_selection_interface.ValidateSelectedDisks(["dev1", "dev2", "dev3"])
)
self.assertEqual(report.is_valid(), True)
def exclusive_disks_property_test(self):
"""Test the exclusive disks property."""
self._test_dbus_property(
"ExclusiveDisks",
["sda", "sdb"]
)
def ignored_disks_property_test(self):
"""Test the ignored disks property."""
self._test_dbus_property(
"IgnoredDisks",
["sda", "sdb"]
)
def protected_disks_property_test(self):
"""Test the protected disks property."""
self._test_dbus_property(
"ProtectedDevices",
["sda", "sdb"]
)
def disk_images_property_test(self):
"""Test the protected disks property."""
self._test_dbus_property(
"DiskImages",
{
"image_1": "/path/1",
"image_2": "/path/2"
}
)
def get_usable_disks_test(self):
"""Test the GetUsableDisks method."""
with self.assertRaises(UnavailableStorageError):
self.disk_selection_interface.GetUsableDisks()
self.disk_selection_module.on_storage_changed(create_storage())
self.assertEqual(self.disk_selection_interface.GetUsableDisks(), []) | # This program is distributed in the hope that it will be useful, but WITHOUT | random_line_split |
client.py | # - coding: utf-8 -
# Copyright (C) 2007 Patryk Zawadzki <patrys at pld-linux.org>
# Copyright (C) 2007-2009 Toms Baugis <[email protected]>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import datetime as dt
from calendar import timegm
import dbus, dbus.mainloop.glib
from gi.repository import GObject as gobject
from hamster.lib import Fact
from hamster.lib import trophies
def from_dbus_fact(fact):
"""unpack the struct into a proper dict"""
return Fact(fact[4],
start_time = dt.datetime.utcfromtimestamp(fact[1]),
end_time = dt.datetime.utcfromtimestamp(fact[2]) if fact[2] else None,
description = fact[3],
activity_id = fact[5],
category = fact[6],
tags = fact[7],
date = dt.datetime.utcfromtimestamp(fact[8]).date(),
delta = dt.timedelta(days = fact[9] // (24 * 60 * 60),
seconds = fact[9] % (24 * 60 * 60)),
id = fact[0]
)
class Storage(gobject.GObject):
"""Hamster client class, communicating to hamster storage daemon via d-bus.
Subscribe to the `tags-changed`, `facts-changed` and `activities-changed`
signals to be notified when an appropriate factoid of interest has been
changed.
In storage a distinguishment is made between the classificator of
activities and the event in tracking log.
When talking about the event we use term 'fact'. For the classificator
we use term 'activity'.
The relationship is - one activity can be used in several facts.
The rest is hopefully obvious. But if not, please file bug reports!
"""
__gsignals__ = {
"tags-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"facts-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"activities-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"toggle-called": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self):
gobject.GObject.__init__(self)
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
self.bus = dbus.SessionBus()
self._connection = None # will be initiated on demand
self.bus.add_signal_receiver(self._on_tags_changed, 'TagsChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_facts_changed, 'FactsChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_activities_changed, 'ActivitiesChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_toggle_called, 'ToggleCalled', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_dbus_connection_change, 'NameOwnerChanged',
'org.freedesktop.DBus', arg0='org.gnome.Hamster')
@staticmethod
def _to_dict(columns, result_list):
return [dict(zip(columns, row)) for row in result_list]
@property
def conn(self):
if not self._connection:
self._connection = dbus.Interface(self.bus.get_object('org.gnome.Hamster',
'/org/gnome/Hamster'),
dbus_interface='org.gnome.Hamster')
return self._connection
def _on_dbus_connection_change(self, name, old, new):
self._connection = None
def _on_tags_changed(self):
self.emit("tags-changed")
def _on_facts_changed(self):
self.emit("facts-changed")
def _on_activities_changed(self):
self.emit("activities-changed")
def _on_toggle_called(self):
self.emit("toggle-called")
def toggle(self):
"""toggle visibility of the main application window if any"""
self.conn.Toggle()
def get_todays_facts(self):
"""returns facts of the current date, respecting hamster midnight
hamster midnight is stored in gconf, and presented in minutes
"""
return [from_dbus_fact(fact) for fact in self.conn.GetTodaysFacts()]
def get_facts(self, date, end_date = None, search_terms = ""):
"""Returns facts for the time span matching the optional filter criteria.
In search terms comma (",") translates to boolean OR and space (" ")
to boolean AND.
Filter is applied to tags, categories, activity names and description
"""
date = timegm(date.timetuple())
end_date = end_date or 0
if end_date:
end_date = timegm(end_date.timetuple())
return [from_dbus_fact(fact) for fact in self.conn.GetFacts(date,
end_date,
search_terms)]
def get_activities(self, search = ""):
"""returns list of activities name matching search criteria.
results are sorted by most recent usage.
search is case insensitive
"""
return self._to_dict(('name', 'category'), self.conn.GetActivities(search))
def get_categories(self):
"""returns list of categories"""
return self._to_dict(('id', 'name'), self.conn.GetCategories())
def get_tags(self, only_autocomplete = False):
"""returns list of all tags. by default only those that have been set for autocomplete"""
return self._to_dict(('id', 'name', 'autocomplete'), self.conn.GetTags(only_autocomplete))
def get_tag_ids(self, tags):
"""find tag IDs by name. tags should be a list of labels
if a requested tag had been removed from the autocomplete list, it
will be ressurrected. if tag with such label does not exist, it will
be created.
on database changes the `tags-changed` signal is emitted.
"""
return self._to_dict(('id', 'name', 'autocomplete'), self.conn.GetTagIds(tags))
def update_autocomplete_tags(self, tags):
"""update list of tags that should autocomplete. this list replaces
anything that is currently set"""
self.conn.SetTagsAutocomplete(tags)
def get_fact(self, id):
"""returns fact by it's ID"""
return from_dbus_fact(self.conn.GetFact(id))
def add_fact(self, fact, temporary_activity = False):
"""Add fact. activity name can use the
`[-]start_time[-end_time] activity@category, description #tag1 #tag2`
syntax, or params can be stated explicitly.
Params will take precedence over the derived values.
start_time defaults to current moment.
"""
if not fact.activity:
return None
serialized = fact.serialized_name()
start_timestamp = timegm((fact.start_time or dt.datetime.now()).timetuple())
end_timestamp = fact.end_time or 0
if end_timestamp:
end_timestamp = timegm(end_timestamp.timetuple())
new_id = self.conn.AddFact(serialized,
start_timestamp,
end_timestamp,
temporary_activity)
# TODO - the parsing should happen just once and preferably here
# we should feed (serialized_activity, start_time, end_time) into AddFact and others
if new_id:
trophies.checker.check_fact_based(fact)
return new_id
def stop_tracking(self, end_time = None):
"""Stop tracking current activity. end_time can be passed in if the
activity should have other end time than the current moment"""
end_time = timegm((end_time or dt.datetime.now()).timetuple())
return self.conn.StopTracking(end_time)
def remove_fact(self, fact_id):
"delete fact from database"
self.conn.RemoveFact(fact_id)
def update_fact(self, fact_id, fact, temporary_activity = False):
"""Update fact values. See add_fact for rules.
Update is performed via remove/insert, so the
fact_id after update should not be used anymore. Instead use the ID
from the fact dict that is returned by this function"""
start_time = timegm((fact.start_time or dt.datetime.now()).timetuple())
end_time = fact.end_time or 0
if end_time:
end_time = timegm(end_time.timetuple())
new_id = self.conn.UpdateFact(fact_id,
fact.serialized_name(),
start_time,
end_time,
temporary_activity)
trophies.checker.check_update_based(fact_id, new_id, fact)
return new_id
def get_category_activities(self, category_id = None):
"""Return activities for category. If category is not specified, will
return activities that have no category"""
category_id = category_id or -1
return self._to_dict(('id', 'name', 'category_id', 'category'), self.conn.GetCategoryActivities(category_id))
| def get_activity_by_name(self, activity, category_id = None, resurrect = True):
"""returns activity dict by name and optionally filtering by category.
if activity is found but is marked as deleted, it will be resurrected
unless told otherwise in the resurrect param
"""
category_id = category_id or 0
return self.conn.GetActivityByName(activity, category_id, resurrect)
# category and activity manipulations (normally just via preferences)
def remove_activity(self, id):
self.conn.RemoveActivity(id)
def remove_category(self, id):
self.conn.RemoveCategory(id)
def change_category(self, id, category_id):
return self.conn.ChangeCategory(id, category_id)
def update_activity(self, id, name, category_id):
return self.conn.UpdateActivity(id, name, category_id)
def add_activity(self, name, category_id = -1):
return self.conn.AddActivity(name, category_id)
def update_category(self, id, name):
return self.conn.UpdateCategory(id, name)
def add_category(self, name):
return self.conn.AddCategory(name) | def get_category_id(self, category_name):
"""returns category id by name"""
return self.conn.GetCategoryId(category_name)
| random_line_split |
client.py | # - coding: utf-8 -
# Copyright (C) 2007 Patryk Zawadzki <patrys at pld-linux.org>
# Copyright (C) 2007-2009 Toms Baugis <[email protected]>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import datetime as dt
from calendar import timegm
import dbus, dbus.mainloop.glib
from gi.repository import GObject as gobject
from hamster.lib import Fact
from hamster.lib import trophies
def from_dbus_fact(fact):
"""unpack the struct into a proper dict"""
return Fact(fact[4],
start_time = dt.datetime.utcfromtimestamp(fact[1]),
end_time = dt.datetime.utcfromtimestamp(fact[2]) if fact[2] else None,
description = fact[3],
activity_id = fact[5],
category = fact[6],
tags = fact[7],
date = dt.datetime.utcfromtimestamp(fact[8]).date(),
delta = dt.timedelta(days = fact[9] // (24 * 60 * 60),
seconds = fact[9] % (24 * 60 * 60)),
id = fact[0]
)
class Storage(gobject.GObject):
"""Hamster client class, communicating to hamster storage daemon via d-bus.
Subscribe to the `tags-changed`, `facts-changed` and `activities-changed`
signals to be notified when an appropriate factoid of interest has been
changed.
In storage a distinguishment is made between the classificator of
activities and the event in tracking log.
When talking about the event we use term 'fact'. For the classificator
we use term 'activity'.
The relationship is - one activity can be used in several facts.
The rest is hopefully obvious. But if not, please file bug reports!
"""
__gsignals__ = {
"tags-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"facts-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"activities-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"toggle-called": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self):
gobject.GObject.__init__(self)
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
self.bus = dbus.SessionBus()
self._connection = None # will be initiated on demand
self.bus.add_signal_receiver(self._on_tags_changed, 'TagsChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_facts_changed, 'FactsChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_activities_changed, 'ActivitiesChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_toggle_called, 'ToggleCalled', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_dbus_connection_change, 'NameOwnerChanged',
'org.freedesktop.DBus', arg0='org.gnome.Hamster')
@staticmethod
def _to_dict(columns, result_list):
return [dict(zip(columns, row)) for row in result_list]
@property
def conn(self):
if not self._connection:
self._connection = dbus.Interface(self.bus.get_object('org.gnome.Hamster',
'/org/gnome/Hamster'),
dbus_interface='org.gnome.Hamster')
return self._connection
def _on_dbus_connection_change(self, name, old, new):
self._connection = None
def _on_tags_changed(self):
self.emit("tags-changed")
def _on_facts_changed(self):
self.emit("facts-changed")
def _on_activities_changed(self):
self.emit("activities-changed")
def _on_toggle_called(self):
self.emit("toggle-called")
def toggle(self):
"""toggle visibility of the main application window if any"""
self.conn.Toggle()
def get_todays_facts(self):
"""returns facts of the current date, respecting hamster midnight
hamster midnight is stored in gconf, and presented in minutes
"""
return [from_dbus_fact(fact) for fact in self.conn.GetTodaysFacts()]
def get_facts(self, date, end_date = None, search_terms = ""):
"""Returns facts for the time span matching the optional filter criteria.
In search terms comma (",") translates to boolean OR and space (" ")
to boolean AND.
Filter is applied to tags, categories, activity names and description
"""
date = timegm(date.timetuple())
end_date = end_date or 0
if end_date:
end_date = timegm(end_date.timetuple())
return [from_dbus_fact(fact) for fact in self.conn.GetFacts(date,
end_date,
search_terms)]
def get_activities(self, search = ""):
"""returns list of activities name matching search criteria.
results are sorted by most recent usage.
search is case insensitive
"""
return self._to_dict(('name', 'category'), self.conn.GetActivities(search))
def get_categories(self):
"""returns list of categories"""
return self._to_dict(('id', 'name'), self.conn.GetCategories())
def get_tags(self, only_autocomplete = False):
"""returns list of all tags. by default only those that have been set for autocomplete"""
return self._to_dict(('id', 'name', 'autocomplete'), self.conn.GetTags(only_autocomplete))
def get_tag_ids(self, tags):
"""find tag IDs by name. tags should be a list of labels
if a requested tag had been removed from the autocomplete list, it
will be ressurrected. if tag with such label does not exist, it will
be created.
on database changes the `tags-changed` signal is emitted.
"""
return self._to_dict(('id', 'name', 'autocomplete'), self.conn.GetTagIds(tags))
def update_autocomplete_tags(self, tags):
"""update list of tags that should autocomplete. this list replaces
anything that is currently set"""
self.conn.SetTagsAutocomplete(tags)
def get_fact(self, id):
"""returns fact by it's ID"""
return from_dbus_fact(self.conn.GetFact(id))
def add_fact(self, fact, temporary_activity = False):
"""Add fact. activity name can use the
`[-]start_time[-end_time] activity@category, description #tag1 #tag2`
syntax, or params can be stated explicitly.
Params will take precedence over the derived values.
start_time defaults to current moment.
"""
if not fact.activity:
return None
serialized = fact.serialized_name()
start_timestamp = timegm((fact.start_time or dt.datetime.now()).timetuple())
end_timestamp = fact.end_time or 0
if end_timestamp:
|
new_id = self.conn.AddFact(serialized,
start_timestamp,
end_timestamp,
temporary_activity)
# TODO - the parsing should happen just once and preferably here
# we should feed (serialized_activity, start_time, end_time) into AddFact and others
if new_id:
trophies.checker.check_fact_based(fact)
return new_id
def stop_tracking(self, end_time = None):
"""Stop tracking current activity. end_time can be passed in if the
activity should have other end time than the current moment"""
end_time = timegm((end_time or dt.datetime.now()).timetuple())
return self.conn.StopTracking(end_time)
def remove_fact(self, fact_id):
"delete fact from database"
self.conn.RemoveFact(fact_id)
def update_fact(self, fact_id, fact, temporary_activity = False):
"""Update fact values. See add_fact for rules.
Update is performed via remove/insert, so the
fact_id after update should not be used anymore. Instead use the ID
from the fact dict that is returned by this function"""
start_time = timegm((fact.start_time or dt.datetime.now()).timetuple())
end_time = fact.end_time or 0
if end_time:
end_time = timegm(end_time.timetuple())
new_id = self.conn.UpdateFact(fact_id,
fact.serialized_name(),
start_time,
end_time,
temporary_activity)
trophies.checker.check_update_based(fact_id, new_id, fact)
return new_id
def get_category_activities(self, category_id = None):
"""Return activities for category. If category is not specified, will
return activities that have no category"""
category_id = category_id or -1
return self._to_dict(('id', 'name', 'category_id', 'category'), self.conn.GetCategoryActivities(category_id))
def get_category_id(self, category_name):
"""returns category id by name"""
return self.conn.GetCategoryId(category_name)
def get_activity_by_name(self, activity, category_id = None, resurrect = True):
"""returns activity dict by name and optionally filtering by category.
if activity is found but is marked as deleted, it will be resurrected
unless told otherwise in the resurrect param
"""
category_id = category_id or 0
return self.conn.GetActivityByName(activity, category_id, resurrect)
# category and activity manipulations (normally just via preferences)
def remove_activity(self, id):
self.conn.RemoveActivity(id)
def remove_category(self, id):
self.conn.RemoveCategory(id)
def change_category(self, id, category_id):
return self.conn.ChangeCategory(id, category_id)
def update_activity(self, id, name, category_id):
return self.conn.UpdateActivity(id, name, category_id)
def add_activity(self, name, category_id = -1):
return self.conn.AddActivity(name, category_id)
def update_category(self, id, name):
return self.conn.UpdateCategory(id, name)
def add_category(self, name):
return self.conn.AddCategory(name)
| end_timestamp = timegm(end_timestamp.timetuple()) | conditional_block |
client.py | # - coding: utf-8 -
# Copyright (C) 2007 Patryk Zawadzki <patrys at pld-linux.org>
# Copyright (C) 2007-2009 Toms Baugis <[email protected]>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import datetime as dt
from calendar import timegm
import dbus, dbus.mainloop.glib
from gi.repository import GObject as gobject
from hamster.lib import Fact
from hamster.lib import trophies
def from_dbus_fact(fact):
"""unpack the struct into a proper dict"""
return Fact(fact[4],
start_time = dt.datetime.utcfromtimestamp(fact[1]),
end_time = dt.datetime.utcfromtimestamp(fact[2]) if fact[2] else None,
description = fact[3],
activity_id = fact[5],
category = fact[6],
tags = fact[7],
date = dt.datetime.utcfromtimestamp(fact[8]).date(),
delta = dt.timedelta(days = fact[9] // (24 * 60 * 60),
seconds = fact[9] % (24 * 60 * 60)),
id = fact[0]
)
class Storage(gobject.GObject):
"""Hamster client class, communicating to hamster storage daemon via d-bus.
Subscribe to the `tags-changed`, `facts-changed` and `activities-changed`
signals to be notified when an appropriate factoid of interest has been
changed.
In storage a distinguishment is made between the classificator of
activities and the event in tracking log.
When talking about the event we use term 'fact'. For the classificator
we use term 'activity'.
The relationship is - one activity can be used in several facts.
The rest is hopefully obvious. But if not, please file bug reports!
"""
__gsignals__ = {
"tags-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"facts-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"activities-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"toggle-called": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self):
gobject.GObject.__init__(self)
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
self.bus = dbus.SessionBus()
self._connection = None # will be initiated on demand
self.bus.add_signal_receiver(self._on_tags_changed, 'TagsChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_facts_changed, 'FactsChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_activities_changed, 'ActivitiesChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_toggle_called, 'ToggleCalled', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_dbus_connection_change, 'NameOwnerChanged',
'org.freedesktop.DBus', arg0='org.gnome.Hamster')
@staticmethod
def _to_dict(columns, result_list):
return [dict(zip(columns, row)) for row in result_list]
@property
def conn(self):
if not self._connection:
self._connection = dbus.Interface(self.bus.get_object('org.gnome.Hamster',
'/org/gnome/Hamster'),
dbus_interface='org.gnome.Hamster')
return self._connection
def _on_dbus_connection_change(self, name, old, new):
self._connection = None
def _on_tags_changed(self):
self.emit("tags-changed")
def _on_facts_changed(self):
self.emit("facts-changed")
def _on_activities_changed(self):
self.emit("activities-changed")
def _on_toggle_called(self):
self.emit("toggle-called")
def toggle(self):
"""toggle visibility of the main application window if any"""
self.conn.Toggle()
def get_todays_facts(self):
|
def get_facts(self, date, end_date = None, search_terms = ""):
"""Returns facts for the time span matching the optional filter criteria.
In search terms comma (",") translates to boolean OR and space (" ")
to boolean AND.
Filter is applied to tags, categories, activity names and description
"""
date = timegm(date.timetuple())
end_date = end_date or 0
if end_date:
end_date = timegm(end_date.timetuple())
return [from_dbus_fact(fact) for fact in self.conn.GetFacts(date,
end_date,
search_terms)]
def get_activities(self, search = ""):
"""returns list of activities name matching search criteria.
results are sorted by most recent usage.
search is case insensitive
"""
return self._to_dict(('name', 'category'), self.conn.GetActivities(search))
def get_categories(self):
"""returns list of categories"""
return self._to_dict(('id', 'name'), self.conn.GetCategories())
def get_tags(self, only_autocomplete = False):
"""returns list of all tags. by default only those that have been set for autocomplete"""
return self._to_dict(('id', 'name', 'autocomplete'), self.conn.GetTags(only_autocomplete))
def get_tag_ids(self, tags):
"""find tag IDs by name. tags should be a list of labels
if a requested tag had been removed from the autocomplete list, it
will be ressurrected. if tag with such label does not exist, it will
be created.
on database changes the `tags-changed` signal is emitted.
"""
return self._to_dict(('id', 'name', 'autocomplete'), self.conn.GetTagIds(tags))
def update_autocomplete_tags(self, tags):
"""update list of tags that should autocomplete. this list replaces
anything that is currently set"""
self.conn.SetTagsAutocomplete(tags)
def get_fact(self, id):
"""returns fact by it's ID"""
return from_dbus_fact(self.conn.GetFact(id))
def add_fact(self, fact, temporary_activity = False):
"""Add fact. activity name can use the
`[-]start_time[-end_time] activity@category, description #tag1 #tag2`
syntax, or params can be stated explicitly.
Params will take precedence over the derived values.
start_time defaults to current moment.
"""
if not fact.activity:
return None
serialized = fact.serialized_name()
start_timestamp = timegm((fact.start_time or dt.datetime.now()).timetuple())
end_timestamp = fact.end_time or 0
if end_timestamp:
end_timestamp = timegm(end_timestamp.timetuple())
new_id = self.conn.AddFact(serialized,
start_timestamp,
end_timestamp,
temporary_activity)
# TODO - the parsing should happen just once and preferably here
# we should feed (serialized_activity, start_time, end_time) into AddFact and others
if new_id:
trophies.checker.check_fact_based(fact)
return new_id
def stop_tracking(self, end_time = None):
"""Stop tracking current activity. end_time can be passed in if the
activity should have other end time than the current moment"""
end_time = timegm((end_time or dt.datetime.now()).timetuple())
return self.conn.StopTracking(end_time)
def remove_fact(self, fact_id):
"delete fact from database"
self.conn.RemoveFact(fact_id)
def update_fact(self, fact_id, fact, temporary_activity = False):
"""Update fact values. See add_fact for rules.
Update is performed via remove/insert, so the
fact_id after update should not be used anymore. Instead use the ID
from the fact dict that is returned by this function"""
start_time = timegm((fact.start_time or dt.datetime.now()).timetuple())
end_time = fact.end_time or 0
if end_time:
end_time = timegm(end_time.timetuple())
new_id = self.conn.UpdateFact(fact_id,
fact.serialized_name(),
start_time,
end_time,
temporary_activity)
trophies.checker.check_update_based(fact_id, new_id, fact)
return new_id
def get_category_activities(self, category_id = None):
"""Return activities for category. If category is not specified, will
return activities that have no category"""
category_id = category_id or -1
return self._to_dict(('id', 'name', 'category_id', 'category'), self.conn.GetCategoryActivities(category_id))
def get_category_id(self, category_name):
"""returns category id by name"""
return self.conn.GetCategoryId(category_name)
def get_activity_by_name(self, activity, category_id = None, resurrect = True):
"""returns activity dict by name and optionally filtering by category.
if activity is found but is marked as deleted, it will be resurrected
unless told otherwise in the resurrect param
"""
category_id = category_id or 0
return self.conn.GetActivityByName(activity, category_id, resurrect)
# category and activity manipulations (normally just via preferences)
def remove_activity(self, id):
self.conn.RemoveActivity(id)
def remove_category(self, id):
self.conn.RemoveCategory(id)
def change_category(self, id, category_id):
return self.conn.ChangeCategory(id, category_id)
def update_activity(self, id, name, category_id):
return self.conn.UpdateActivity(id, name, category_id)
def add_activity(self, name, category_id = -1):
return self.conn.AddActivity(name, category_id)
def update_category(self, id, name):
return self.conn.UpdateCategory(id, name)
def add_category(self, name):
return self.conn.AddCategory(name)
| """returns facts of the current date, respecting hamster midnight
hamster midnight is stored in gconf, and presented in minutes
"""
return [from_dbus_fact(fact) for fact in self.conn.GetTodaysFacts()] | identifier_body |
client.py | # - coding: utf-8 -
# Copyright (C) 2007 Patryk Zawadzki <patrys at pld-linux.org>
# Copyright (C) 2007-2009 Toms Baugis <[email protected]>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import datetime as dt
from calendar import timegm
import dbus, dbus.mainloop.glib
from gi.repository import GObject as gobject
from hamster.lib import Fact
from hamster.lib import trophies
def from_dbus_fact(fact):
"""unpack the struct into a proper dict"""
return Fact(fact[4],
start_time = dt.datetime.utcfromtimestamp(fact[1]),
end_time = dt.datetime.utcfromtimestamp(fact[2]) if fact[2] else None,
description = fact[3],
activity_id = fact[5],
category = fact[6],
tags = fact[7],
date = dt.datetime.utcfromtimestamp(fact[8]).date(),
delta = dt.timedelta(days = fact[9] // (24 * 60 * 60),
seconds = fact[9] % (24 * 60 * 60)),
id = fact[0]
)
class Storage(gobject.GObject):
"""Hamster client class, communicating to hamster storage daemon via d-bus.
Subscribe to the `tags-changed`, `facts-changed` and `activities-changed`
signals to be notified when an appropriate factoid of interest has been
changed.
In storage a distinguishment is made between the classificator of
activities and the event in tracking log.
When talking about the event we use term 'fact'. For the classificator
we use term 'activity'.
The relationship is - one activity can be used in several facts.
The rest is hopefully obvious. But if not, please file bug reports!
"""
__gsignals__ = {
"tags-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"facts-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"activities-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"toggle-called": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self):
gobject.GObject.__init__(self)
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
self.bus = dbus.SessionBus()
self._connection = None # will be initiated on demand
self.bus.add_signal_receiver(self._on_tags_changed, 'TagsChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_facts_changed, 'FactsChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_activities_changed, 'ActivitiesChanged', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_toggle_called, 'ToggleCalled', 'org.gnome.Hamster')
self.bus.add_signal_receiver(self._on_dbus_connection_change, 'NameOwnerChanged',
'org.freedesktop.DBus', arg0='org.gnome.Hamster')
@staticmethod
def _to_dict(columns, result_list):
return [dict(zip(columns, row)) for row in result_list]
@property
def conn(self):
if not self._connection:
self._connection = dbus.Interface(self.bus.get_object('org.gnome.Hamster',
'/org/gnome/Hamster'),
dbus_interface='org.gnome.Hamster')
return self._connection
def _on_dbus_connection_change(self, name, old, new):
self._connection = None
def _on_tags_changed(self):
self.emit("tags-changed")
def _on_facts_changed(self):
self.emit("facts-changed")
def _on_activities_changed(self):
self.emit("activities-changed")
def _on_toggle_called(self):
self.emit("toggle-called")
def toggle(self):
"""toggle visibility of the main application window if any"""
self.conn.Toggle()
def get_todays_facts(self):
"""returns facts of the current date, respecting hamster midnight
hamster midnight is stored in gconf, and presented in minutes
"""
return [from_dbus_fact(fact) for fact in self.conn.GetTodaysFacts()]
def get_facts(self, date, end_date = None, search_terms = ""):
"""Returns facts for the time span matching the optional filter criteria.
In search terms comma (",") translates to boolean OR and space (" ")
to boolean AND.
Filter is applied to tags, categories, activity names and description
"""
date = timegm(date.timetuple())
end_date = end_date or 0
if end_date:
end_date = timegm(end_date.timetuple())
return [from_dbus_fact(fact) for fact in self.conn.GetFacts(date,
end_date,
search_terms)]
def get_activities(self, search = ""):
"""returns list of activities name matching search criteria.
results are sorted by most recent usage.
search is case insensitive
"""
return self._to_dict(('name', 'category'), self.conn.GetActivities(search))
def get_categories(self):
"""returns list of categories"""
return self._to_dict(('id', 'name'), self.conn.GetCategories())
def get_tags(self, only_autocomplete = False):
"""returns list of all tags. by default only those that have been set for autocomplete"""
return self._to_dict(('id', 'name', 'autocomplete'), self.conn.GetTags(only_autocomplete))
def get_tag_ids(self, tags):
"""find tag IDs by name. tags should be a list of labels
if a requested tag had been removed from the autocomplete list, it
will be ressurrected. if tag with such label does not exist, it will
be created.
on database changes the `tags-changed` signal is emitted.
"""
return self._to_dict(('id', 'name', 'autocomplete'), self.conn.GetTagIds(tags))
def update_autocomplete_tags(self, tags):
"""update list of tags that should autocomplete. this list replaces
anything that is currently set"""
self.conn.SetTagsAutocomplete(tags)
def get_fact(self, id):
"""returns fact by it's ID"""
return from_dbus_fact(self.conn.GetFact(id))
def add_fact(self, fact, temporary_activity = False):
"""Add fact. activity name can use the
`[-]start_time[-end_time] activity@category, description #tag1 #tag2`
syntax, or params can be stated explicitly.
Params will take precedence over the derived values.
start_time defaults to current moment.
"""
if not fact.activity:
return None
serialized = fact.serialized_name()
start_timestamp = timegm((fact.start_time or dt.datetime.now()).timetuple())
end_timestamp = fact.end_time or 0
if end_timestamp:
end_timestamp = timegm(end_timestamp.timetuple())
new_id = self.conn.AddFact(serialized,
start_timestamp,
end_timestamp,
temporary_activity)
# TODO - the parsing should happen just once and preferably here
# we should feed (serialized_activity, start_time, end_time) into AddFact and others
if new_id:
trophies.checker.check_fact_based(fact)
return new_id
def stop_tracking(self, end_time = None):
"""Stop tracking current activity. end_time can be passed in if the
activity should have other end time than the current moment"""
end_time = timegm((end_time or dt.datetime.now()).timetuple())
return self.conn.StopTracking(end_time)
def remove_fact(self, fact_id):
"delete fact from database"
self.conn.RemoveFact(fact_id)
def update_fact(self, fact_id, fact, temporary_activity = False):
"""Update fact values. See add_fact for rules.
Update is performed via remove/insert, so the
fact_id after update should not be used anymore. Instead use the ID
from the fact dict that is returned by this function"""
start_time = timegm((fact.start_time or dt.datetime.now()).timetuple())
end_time = fact.end_time or 0
if end_time:
end_time = timegm(end_time.timetuple())
new_id = self.conn.UpdateFact(fact_id,
fact.serialized_name(),
start_time,
end_time,
temporary_activity)
trophies.checker.check_update_based(fact_id, new_id, fact)
return new_id
def get_category_activities(self, category_id = None):
"""Return activities for category. If category is not specified, will
return activities that have no category"""
category_id = category_id or -1
return self._to_dict(('id', 'name', 'category_id', 'category'), self.conn.GetCategoryActivities(category_id))
def get_category_id(self, category_name):
"""returns category id by name"""
return self.conn.GetCategoryId(category_name)
def get_activity_by_name(self, activity, category_id = None, resurrect = True):
"""returns activity dict by name and optionally filtering by category.
if activity is found but is marked as deleted, it will be resurrected
unless told otherwise in the resurrect param
"""
category_id = category_id or 0
return self.conn.GetActivityByName(activity, category_id, resurrect)
# category and activity manipulations (normally just via preferences)
def remove_activity(self, id):
self.conn.RemoveActivity(id)
def remove_category(self, id):
self.conn.RemoveCategory(id)
def change_category(self, id, category_id):
return self.conn.ChangeCategory(id, category_id)
def update_activity(self, id, name, category_id):
return self.conn.UpdateActivity(id, name, category_id)
def add_activity(self, name, category_id = -1):
return self.conn.AddActivity(name, category_id)
def | (self, id, name):
return self.conn.UpdateCategory(id, name)
def add_category(self, name):
return self.conn.AddCategory(name)
| update_category | identifier_name |
is_integer.rs | use integer::Integer;
use malachite_base::num::conversion::traits::IsInteger;
impl<'a> IsInteger for &'a Integer {
/// Determines whether an `Integer` is an integer. It always returns `true`.
///
/// $f(x) = \textrm{true}$.
///
/// # Worst-case complexity
/// Constant time and additional memory.
/// | /// extern crate malachite_base;
/// extern crate malachite_nz;
///
/// use malachite_base::num::basic::traits::{NegativeOne, One, Zero};
/// use malachite_base::num::conversion::traits::IsInteger;
/// use malachite_nz::integer::Integer;
///
/// assert_eq!(Integer::ZERO.is_integer(), true);
/// assert_eq!(Integer::ONE.is_integer(), true);
/// assert_eq!(Integer::from(100).is_integer(), true);
/// assert_eq!(Integer::NEGATIVE_ONE.is_integer(), true);
/// assert_eq!(Integer::from(-100).is_integer(), true);
/// ```
#[inline]
fn is_integer(self) -> bool {
true
}
} | /// # Examples
/// ``` | random_line_split |
is_integer.rs | use integer::Integer;
use malachite_base::num::conversion::traits::IsInteger;
impl<'a> IsInteger for &'a Integer {
/// Determines whether an `Integer` is an integer. It always returns `true`.
///
/// $f(x) = \textrm{true}$.
///
/// # Worst-case complexity
/// Constant time and additional memory.
///
/// # Examples
/// ```
/// extern crate malachite_base;
/// extern crate malachite_nz;
///
/// use malachite_base::num::basic::traits::{NegativeOne, One, Zero};
/// use malachite_base::num::conversion::traits::IsInteger;
/// use malachite_nz::integer::Integer;
///
/// assert_eq!(Integer::ZERO.is_integer(), true);
/// assert_eq!(Integer::ONE.is_integer(), true);
/// assert_eq!(Integer::from(100).is_integer(), true);
/// assert_eq!(Integer::NEGATIVE_ONE.is_integer(), true);
/// assert_eq!(Integer::from(-100).is_integer(), true);
/// ```
#[inline]
fn | (self) -> bool {
true
}
}
| is_integer | identifier_name |
is_integer.rs | use integer::Integer;
use malachite_base::num::conversion::traits::IsInteger;
impl<'a> IsInteger for &'a Integer {
/// Determines whether an `Integer` is an integer. It always returns `true`.
///
/// $f(x) = \textrm{true}$.
///
/// # Worst-case complexity
/// Constant time and additional memory.
///
/// # Examples
/// ```
/// extern crate malachite_base;
/// extern crate malachite_nz;
///
/// use malachite_base::num::basic::traits::{NegativeOne, One, Zero};
/// use malachite_base::num::conversion::traits::IsInteger;
/// use malachite_nz::integer::Integer;
///
/// assert_eq!(Integer::ZERO.is_integer(), true);
/// assert_eq!(Integer::ONE.is_integer(), true);
/// assert_eq!(Integer::from(100).is_integer(), true);
/// assert_eq!(Integer::NEGATIVE_ONE.is_integer(), true);
/// assert_eq!(Integer::from(-100).is_integer(), true);
/// ```
#[inline]
fn is_integer(self) -> bool |
}
| {
true
} | identifier_body |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::memory::{MemoryGridView, NextShapeView, FLIP_SLOWDOWN};
use crate::elements::{
FadeStyle, ProgressBar, PuzzleCmd, PuzzleCore, PuzzleView,
};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources, Sound};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Direction, Game, PuzzleState, ServesState};
// ========================================================================= //
const REMOVE_DELAY: i32 = FLIP_SLOWDOWN * 5 + 20;
const REMOVE_SOUND_AT: i32 = 20 + FLIP_SLOWDOWN * 2;
// ========================================================================= //
pub struct View {
core: PuzzleCore<()>,
grid: MemoryGridView,
next: NextShapeView,
progress: ProgressBar,
progress_adjust: u32,
remove_countdown: i32,
show_next: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &ServesState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::LeftToRight);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_argony_midscene(resources));
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
grid: MemoryGridView::new(
resources,
"memory/serves",
(256, 176),
state.grid(),
),
next: NextShapeView::new(resources, "memory/serves", (96, 208)),
progress: ProgressBar::new(
(104, 176),
Direction::East,
80,
(191, 191, 0),
),
progress_adjust: 0,
remove_countdown: 0,
show_next: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.if_memory_serves;
self.core.draw_back_layer(canvas);
if !state.is_solved() {
let value = state.current_step() as u32 + self.progress_adjust;
let maximum = state.total_num_steps() as u32;
self.progress.draw(value, maximum, canvas);
}
self.grid.draw(state.grid(), canvas);
self.core.draw_middle_layer(canvas);
if self.show_next {
self.next.draw(&state.next_shape(), canvas);
}
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> |
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.if_memory_serves.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, _: &mut Game) {}
fn redo(&mut self, _: &mut Game) {}
fn reset(&mut self, game: &mut Game) {
self.core.clear_undo_redo();
game.if_memory_serves.reset();
}
fn solve(&mut self, game: &mut Game) {
game.if_memory_serves.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_next = value != 0;
} else if kind == 1 {
if value >= 0 && (value as usize) < LETTERS.len() {
let (col, row, letter) = LETTERS[value as usize];
self.grid.add_letter(col, row, letter);
}
}
}
}
}
// ========================================================================= //
#[cfg_attr(rustfmt, rustfmt_skip)]
const LETTERS: &[(i32, i32, char)] = &[
(1, 0, 'I'), (1, 1, 'N'), (1, 2, 'T'), (1, 3, 'E'),
(3, 0, 'C'), (3, 1, 'O'), (3, 2, 'N'), (3, 3, 'S'),
(5, 0, 'I'), (5, 1, 'N'), (5, 2, 'D'), (5, 3, 'E'),
];
const INFO_BOX_TEXT: &str = "\
Your goal is to place (and later remove) each group of tiles on
the grid.
When a group of tiles appears on the left, use $M{your finger}{the mouse} to
drag it onto the grid on the right. The tiles will then flip over;
the backs of the tiles will be green.
Tiles will eventually turn from green to gray; once all tiles
with a given symbol are gray, they may be safely removed.
You can remove a group of tiles at any time by $M{tapp}{click}ing any of
the tiles on the grid that had that symbol. However, if you
accidentally remove a tile that's still green, you will have to
start over.
$M{Tap}{Click} on a character in the scene to hear their words of wisdom.";
// ========================================================================= //
| {
let state = &mut game.if_memory_serves;
let mut action = self.core.handle_event(event, state);
if event == &Event::ClockTick && self.remove_countdown > 0 {
self.remove_countdown -= 1;
if self.remove_countdown == REMOVE_SOUND_AT {
let symbol = self.grid.flip_symbol();
let sound = if state.can_remove_symbol(symbol) {
self.progress_adjust = 1;
Sound::mid_puzzle_chime()
} else {
Sound::talk_annoyed_hi()
};
action.merge(Action::redraw().and_play_sound(sound));
}
if self.remove_countdown == 0 {
self.progress_adjust = 0;
state.remove_symbol(self.grid.flip_symbol());
self.grid.clear_flip();
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
}
action.also_redraw();
}
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction =
self.next.handle_event(event, &mut state.next_shape());
if let Some(&pt) = subaction.value() {
let (col, row) = self.grid.coords_for_point(pt);
if let Some(symbol) = state.try_place_shape(col, row) {
action.also_play_sound(Sound::device_drop());
self.grid.place_symbol(symbol);
}
}
action.merge(subaction.but_no_value());
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction = self.grid.handle_event(event, state.grid_mut());
if let Some(&symbol) = subaction.value() {
action.also_play_sound(Sound::device_rotate());
self.grid.reveal_symbol(symbol);
self.remove_countdown = REMOVE_DELAY;
}
action.merge(subaction.but_no_value());
}
if !action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
} | identifier_body |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::memory::{MemoryGridView, NextShapeView, FLIP_SLOWDOWN};
use crate::elements::{
FadeStyle, ProgressBar, PuzzleCmd, PuzzleCore, PuzzleView,
};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources, Sound};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Direction, Game, PuzzleState, ServesState};
// ========================================================================= //
const REMOVE_DELAY: i32 = FLIP_SLOWDOWN * 5 + 20;
const REMOVE_SOUND_AT: i32 = 20 + FLIP_SLOWDOWN * 2;
// ========================================================================= //
pub struct View {
core: PuzzleCore<()>,
grid: MemoryGridView,
next: NextShapeView,
progress: ProgressBar,
progress_adjust: u32,
remove_countdown: i32,
show_next: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &ServesState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::LeftToRight);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_argony_midscene(resources));
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
grid: MemoryGridView::new(
resources,
"memory/serves",
(256, 176),
state.grid(),
),
next: NextShapeView::new(resources, "memory/serves", (96, 208)),
progress: ProgressBar::new(
(104, 176),
Direction::East,
80,
(191, 191, 0),
),
progress_adjust: 0,
remove_countdown: 0,
show_next: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.if_memory_serves;
self.core.draw_back_layer(canvas);
if !state.is_solved() {
let value = state.current_step() as u32 + self.progress_adjust;
let maximum = state.total_num_steps() as u32;
self.progress.draw(value, maximum, canvas);
}
self.grid.draw(state.grid(), canvas);
self.core.draw_middle_layer(canvas);
if self.show_next {
self.next.draw(&state.next_shape(), canvas);
}
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.if_memory_serves;
let mut action = self.core.handle_event(event, state);
if event == &Event::ClockTick && self.remove_countdown > 0 {
self.remove_countdown -= 1;
if self.remove_countdown == REMOVE_SOUND_AT {
let symbol = self.grid.flip_symbol();
let sound = if state.can_remove_symbol(symbol) {
self.progress_adjust = 1;
Sound::mid_puzzle_chime()
} else {
Sound::talk_annoyed_hi()
};
action.merge(Action::redraw().and_play_sound(sound));
}
if self.remove_countdown == 0 {
self.progress_adjust = 0;
state.remove_symbol(self.grid.flip_symbol());
self.grid.clear_flip();
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
}
action.also_redraw();
}
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
|
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction = self.grid.handle_event(event, state.grid_mut());
if let Some(&symbol) = subaction.value() {
action.also_play_sound(Sound::device_rotate());
self.grid.reveal_symbol(symbol);
self.remove_countdown = REMOVE_DELAY;
}
action.merge(subaction.but_no_value());
}
if !action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.if_memory_serves.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, _: &mut Game) {}
fn redo(&mut self, _: &mut Game) {}
fn reset(&mut self, game: &mut Game) {
self.core.clear_undo_redo();
game.if_memory_serves.reset();
}
fn solve(&mut self, game: &mut Game) {
game.if_memory_serves.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_next = value != 0;
} else if kind == 1 {
if value >= 0 && (value as usize) < LETTERS.len() {
let (col, row, letter) = LETTERS[value as usize];
self.grid.add_letter(col, row, letter);
}
}
}
}
}
// ========================================================================= //
#[cfg_attr(rustfmt, rustfmt_skip)]
const LETTERS: &[(i32, i32, char)] = &[
(1, 0, 'I'), (1, 1, 'N'), (1, 2, 'T'), (1, 3, 'E'),
(3, 0, 'C'), (3, 1, 'O'), (3, 2, 'N'), (3, 3, 'S'),
(5, 0, 'I'), (5, 1, 'N'), (5, 2, 'D'), (5, 3, 'E'),
];
const INFO_BOX_TEXT: &str = "\
Your goal is to place (and later remove) each group of tiles on
the grid.
When a group of tiles appears on the left, use $M{your finger}{the mouse} to
drag it onto the grid on the right. The tiles will then flip over;
the backs of the tiles will be green.
Tiles will eventually turn from green to gray; once all tiles
with a given symbol are gray, they may be safely removed.
You can remove a group of tiles at any time by $M{tapp}{click}ing any of
the tiles on the grid that had that symbol. However, if you
accidentally remove a tile that's still green, you will have to
start over.
$M{Tap}{Click} on a character in the scene to hear their words of wisdom.";
// ========================================================================= //
| {
let subaction =
self.next.handle_event(event, &mut state.next_shape());
if let Some(&pt) = subaction.value() {
let (col, row) = self.grid.coords_for_point(pt);
if let Some(symbol) = state.try_place_shape(col, row) {
action.also_play_sound(Sound::device_drop());
self.grid.place_symbol(symbol);
}
}
action.merge(subaction.but_no_value());
} | conditional_block |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::memory::{MemoryGridView, NextShapeView, FLIP_SLOWDOWN};
use crate::elements::{
FadeStyle, ProgressBar, PuzzleCmd, PuzzleCore, PuzzleView,
};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources, Sound};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Direction, Game, PuzzleState, ServesState};
// ========================================================================= //
const REMOVE_DELAY: i32 = FLIP_SLOWDOWN * 5 + 20;
const REMOVE_SOUND_AT: i32 = 20 + FLIP_SLOWDOWN * 2;
// ========================================================================= //
pub struct View {
core: PuzzleCore<()>,
grid: MemoryGridView,
next: NextShapeView,
progress: ProgressBar,
progress_adjust: u32,
remove_countdown: i32,
show_next: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &ServesState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::LeftToRight);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_argony_midscene(resources));
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
grid: MemoryGridView::new(
resources,
"memory/serves",
(256, 176),
state.grid(),
),
next: NextShapeView::new(resources, "memory/serves", (96, 208)),
progress: ProgressBar::new(
(104, 176),
Direction::East,
80,
(191, 191, 0),
),
progress_adjust: 0,
remove_countdown: 0,
show_next: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.if_memory_serves;
self.core.draw_back_layer(canvas);
if !state.is_solved() {
let value = state.current_step() as u32 + self.progress_adjust;
let maximum = state.total_num_steps() as u32;
self.progress.draw(value, maximum, canvas);
}
self.grid.draw(state.grid(), canvas);
self.core.draw_middle_layer(canvas);
if self.show_next {
self.next.draw(&state.next_shape(), canvas);
}
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.if_memory_serves;
let mut action = self.core.handle_event(event, state);
if event == &Event::ClockTick && self.remove_countdown > 0 {
self.remove_countdown -= 1;
if self.remove_countdown == REMOVE_SOUND_AT {
let symbol = self.grid.flip_symbol();
let sound = if state.can_remove_symbol(symbol) {
self.progress_adjust = 1;
Sound::mid_puzzle_chime()
} else {
Sound::talk_annoyed_hi()
};
action.merge(Action::redraw().and_play_sound(sound));
}
if self.remove_countdown == 0 {
self.progress_adjust = 0;
state.remove_symbol(self.grid.flip_symbol());
self.grid.clear_flip();
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
}
action.also_redraw();
}
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction =
self.next.handle_event(event, &mut state.next_shape());
if let Some(&pt) = subaction.value() {
let (col, row) = self.grid.coords_for_point(pt);
if let Some(symbol) = state.try_place_shape(col, row) {
action.also_play_sound(Sound::device_drop());
self.grid.place_symbol(symbol);
}
}
action.merge(subaction.but_no_value());
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction = self.grid.handle_event(event, state.grid_mut());
if let Some(&symbol) = subaction.value() {
action.also_play_sound(Sound::device_rotate());
self.grid.reveal_symbol(symbol);
self.remove_countdown = REMOVE_DELAY;
}
action.merge(subaction.but_no_value());
}
if !action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.if_memory_serves.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn | (&mut self, _: &mut Game) {}
fn redo(&mut self, _: &mut Game) {}
fn reset(&mut self, game: &mut Game) {
self.core.clear_undo_redo();
game.if_memory_serves.reset();
}
fn solve(&mut self, game: &mut Game) {
game.if_memory_serves.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_next = value != 0;
} else if kind == 1 {
if value >= 0 && (value as usize) < LETTERS.len() {
let (col, row, letter) = LETTERS[value as usize];
self.grid.add_letter(col, row, letter);
}
}
}
}
}
// ========================================================================= //
#[cfg_attr(rustfmt, rustfmt_skip)]
const LETTERS: &[(i32, i32, char)] = &[
(1, 0, 'I'), (1, 1, 'N'), (1, 2, 'T'), (1, 3, 'E'),
(3, 0, 'C'), (3, 1, 'O'), (3, 2, 'N'), (3, 3, 'S'),
(5, 0, 'I'), (5, 1, 'N'), (5, 2, 'D'), (5, 3, 'E'),
];
const INFO_BOX_TEXT: &str = "\
Your goal is to place (and later remove) each group of tiles on
the grid.
When a group of tiles appears on the left, use $M{your finger}{the mouse} to
drag it onto the grid on the right. The tiles will then flip over;
the backs of the tiles will be green.
Tiles will eventually turn from green to gray; once all tiles
with a given symbol are gray, they may be safely removed.
You can remove a group of tiles at any time by $M{tapp}{click}ing any of
the tiles on the grid that had that symbol. However, if you
accidentally remove a tile that's still green, you will have to
start over.
$M{Tap}{Click} on a character in the scene to hear their words of wisdom.";
// ========================================================================= //
| undo | identifier_name |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::memory::{MemoryGridView, NextShapeView, FLIP_SLOWDOWN};
use crate::elements::{
FadeStyle, ProgressBar, PuzzleCmd, PuzzleCore, PuzzleView,
};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources, Sound};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Direction, Game, PuzzleState, ServesState};
// ========================================================================= //
const REMOVE_DELAY: i32 = FLIP_SLOWDOWN * 5 + 20;
const REMOVE_SOUND_AT: i32 = 20 + FLIP_SLOWDOWN * 2;
// ========================================================================= //
pub struct View {
core: PuzzleCore<()>,
grid: MemoryGridView,
next: NextShapeView,
progress: ProgressBar,
progress_adjust: u32,
remove_countdown: i32,
show_next: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &ServesState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::LeftToRight);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_argony_midscene(resources));
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
grid: MemoryGridView::new(
resources,
"memory/serves",
(256, 176),
state.grid(),
),
next: NextShapeView::new(resources, "memory/serves", (96, 208)),
progress: ProgressBar::new(
(104, 176),
Direction::East,
80,
(191, 191, 0),
),
progress_adjust: 0,
remove_countdown: 0,
show_next: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.if_memory_serves;
self.core.draw_back_layer(canvas);
if !state.is_solved() {
let value = state.current_step() as u32 + self.progress_adjust;
let maximum = state.total_num_steps() as u32;
self.progress.draw(value, maximum, canvas);
}
self.grid.draw(state.grid(), canvas);
self.core.draw_middle_layer(canvas);
if self.show_next {
self.next.draw(&state.next_shape(), canvas);
}
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.if_memory_serves;
let mut action = self.core.handle_event(event, state);
if event == &Event::ClockTick && self.remove_countdown > 0 {
self.remove_countdown -= 1;
if self.remove_countdown == REMOVE_SOUND_AT {
let symbol = self.grid.flip_symbol();
let sound = if state.can_remove_symbol(symbol) {
self.progress_adjust = 1;
Sound::mid_puzzle_chime()
} else {
Sound::talk_annoyed_hi()
};
action.merge(Action::redraw().and_play_sound(sound));
}
if self.remove_countdown == 0 {
self.progress_adjust = 0;
state.remove_symbol(self.grid.flip_symbol());
self.grid.clear_flip();
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
}
action.also_redraw();
}
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction =
self.next.handle_event(event, &mut state.next_shape());
if let Some(&pt) = subaction.value() {
let (col, row) = self.grid.coords_for_point(pt);
if let Some(symbol) = state.try_place_shape(col, row) {
action.also_play_sound(Sound::device_drop());
self.grid.place_symbol(symbol);
}
}
action.merge(subaction.but_no_value());
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction = self.grid.handle_event(event, state.grid_mut());
if let Some(&symbol) = subaction.value() {
action.also_play_sound(Sound::device_rotate());
self.grid.reveal_symbol(symbol);
self.remove_countdown = REMOVE_DELAY;
}
action.merge(subaction.but_no_value());
}
if !action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.if_memory_serves.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, _: &mut Game) {}
fn redo(&mut self, _: &mut Game) {}
fn reset(&mut self, game: &mut Game) {
self.core.clear_undo_redo();
game.if_memory_serves.reset();
}
fn solve(&mut self, game: &mut Game) {
game.if_memory_serves.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() { | if value >= 0 && (value as usize) < LETTERS.len() {
let (col, row, letter) = LETTERS[value as usize];
self.grid.add_letter(col, row, letter);
}
}
}
}
}
// ========================================================================= //
#[cfg_attr(rustfmt, rustfmt_skip)]
const LETTERS: &[(i32, i32, char)] = &[
(1, 0, 'I'), (1, 1, 'N'), (1, 2, 'T'), (1, 3, 'E'),
(3, 0, 'C'), (3, 1, 'O'), (3, 2, 'N'), (3, 3, 'S'),
(5, 0, 'I'), (5, 1, 'N'), (5, 2, 'D'), (5, 3, 'E'),
];
const INFO_BOX_TEXT: &str = "\
Your goal is to place (and later remove) each group of tiles on
the grid.
When a group of tiles appears on the left, use $M{your finger}{the mouse} to
drag it onto the grid on the right. The tiles will then flip over;
the backs of the tiles will be green.
Tiles will eventually turn from green to gray; once all tiles
with a given symbol are gray, they may be safely removed.
You can remove a group of tiles at any time by $M{tapp}{click}ing any of
the tiles on the grid that had that symbol. However, if you
accidentally remove a tile that's still green, you will have to
start over.
$M{Tap}{Click} on a character in the scene to hear their words of wisdom.";
// ========================================================================= // | if kind == 0 {
self.show_next = value != 0;
} else if kind == 1 { | random_line_split |
html_generator.py | def generate_concept_HTML(concept_title, concept_description):
html_text_1 = '''
<div class="concept">
<div class="concept-title">
''' + concept_title
html_text_2 = '''
</div>
<div class="concept-description">
''' + concept_description
html_text_3 = '''
</div>
</div>'''
full_html_text = html_text_1 + html_text_2 + html_text_3
return full_html_text
def get_title(concept):
start_location = concept.find('TITLE:')
end_location = concept.find('DESCRIPTION:')
title = concept[start_location+7 : end_location-1]
return title
def get_description(concept):
start_location = concept.find('DESCRIPTION:')
description = concept[start_location+13 :]
return description
def get_concept_by_number(text, concept_number):
counter = 0
while counter < concept_number:
counter = counter + 1
next_concept_start = text.find('TITLE:')
next_concept_end = text.find('TITLE:', next_concept_start + 1)
concept = text[next_concept_start:next_concept_end]
text = text[next_concept_end:]
return concept
TEST_TEXT = """TITLE: Programming Language
DESCRIPTION: Programming languages are used by programmers to tell a computer what to do. Python is one example of a programming language.
TITLE: Python
DESCRIPTION: When you write Python code and "Run" the code, a Python Interpreter converts the written code into a set of instructions that the computer can understand and execute.
TITLE: Python Expressions
DESCRIPTION: In Python an "expression" is a legal Python statement. For example: print 2 + 2 is a valid expression, but print 2 + (without a number at the end) is not.
TITLE: What is a variable in Python?
DESCRIPTION: Variables give programmers a way to give names to values. If my_variable is a variable with a value of 2, then the following code would print out 0:
print my_variable - my_variable """
def generate_all_html(text):
|
print generate_all_html(TEST_TEXT) | current_concept_number = 1
concept = get_concept_by_number(text, current_concept_number)
all_html = ''
while concept != '':
title = get_title(concept)
description = get_description(concept)
concept_html = generate_concept_HTML(title, description)
all_html = all_html + concept_html
current_concept_number = current_concept_number + 1
concept = get_concept_by_number(text, current_concept_number)
return all_html | identifier_body |
html_generator.py | def generate_concept_HTML(concept_title, concept_description):
html_text_1 = '''
<div class="concept">
<div class="concept-title">
''' + concept_title
html_text_2 = '''
</div>
<div class="concept-description">
''' + concept_description
html_text_3 = '''
</div>
</div>'''
full_html_text = html_text_1 + html_text_2 + html_text_3
return full_html_text
def get_title(concept):
start_location = concept.find('TITLE:')
end_location = concept.find('DESCRIPTION:')
title = concept[start_location+7 : end_location-1]
return title
def get_description(concept):
start_location = concept.find('DESCRIPTION:')
description = concept[start_location+13 :]
return description
def get_concept_by_number(text, concept_number):
counter = 0
while counter < concept_number:
counter = counter + 1
next_concept_start = text.find('TITLE:')
next_concept_end = text.find('TITLE:', next_concept_start + 1)
concept = text[next_concept_start:next_concept_end]
text = text[next_concept_end:]
return concept
TEST_TEXT = """TITLE: Programming Language
| TITLE: What is a variable in Python?
DESCRIPTION: Variables give programmers a way to give names to values. If my_variable is a variable with a value of 2, then the following code would print out 0:
print my_variable - my_variable """
def generate_all_html(text):
current_concept_number = 1
concept = get_concept_by_number(text, current_concept_number)
all_html = ''
while concept != '':
title = get_title(concept)
description = get_description(concept)
concept_html = generate_concept_HTML(title, description)
all_html = all_html + concept_html
current_concept_number = current_concept_number + 1
concept = get_concept_by_number(text, current_concept_number)
return all_html
print generate_all_html(TEST_TEXT) | DESCRIPTION: Programming languages are used by programmers to tell a computer what to do. Python is one example of a programming language.
TITLE: Python
DESCRIPTION: When you write Python code and "Run" the code, a Python Interpreter converts the written code into a set of instructions that the computer can understand and execute.
TITLE: Python Expressions
DESCRIPTION: In Python an "expression" is a legal Python statement. For example: print 2 + 2 is a valid expression, but print 2 + (without a number at the end) is not.
| random_line_split |
html_generator.py | def generate_concept_HTML(concept_title, concept_description):
html_text_1 = '''
<div class="concept">
<div class="concept-title">
''' + concept_title
html_text_2 = '''
</div>
<div class="concept-description">
''' + concept_description
html_text_3 = '''
</div>
</div>'''
full_html_text = html_text_1 + html_text_2 + html_text_3
return full_html_text
def get_title(concept):
start_location = concept.find('TITLE:')
end_location = concept.find('DESCRIPTION:')
title = concept[start_location+7 : end_location-1]
return title
def get_description(concept):
start_location = concept.find('DESCRIPTION:')
description = concept[start_location+13 :]
return description
def get_concept_by_number(text, concept_number):
counter = 0
while counter < concept_number:
counter = counter + 1
next_concept_start = text.find('TITLE:')
next_concept_end = text.find('TITLE:', next_concept_start + 1)
concept = text[next_concept_start:next_concept_end]
text = text[next_concept_end:]
return concept
TEST_TEXT = """TITLE: Programming Language
DESCRIPTION: Programming languages are used by programmers to tell a computer what to do. Python is one example of a programming language.
TITLE: Python
DESCRIPTION: When you write Python code and "Run" the code, a Python Interpreter converts the written code into a set of instructions that the computer can understand and execute.
TITLE: Python Expressions
DESCRIPTION: In Python an "expression" is a legal Python statement. For example: print 2 + 2 is a valid expression, but print 2 + (without a number at the end) is not.
TITLE: What is a variable in Python?
DESCRIPTION: Variables give programmers a way to give names to values. If my_variable is a variable with a value of 2, then the following code would print out 0:
print my_variable - my_variable """
def generate_all_html(text):
current_concept_number = 1
concept = get_concept_by_number(text, current_concept_number)
all_html = ''
while concept != '':
|
return all_html
print generate_all_html(TEST_TEXT) | title = get_title(concept)
description = get_description(concept)
concept_html = generate_concept_HTML(title, description)
all_html = all_html + concept_html
current_concept_number = current_concept_number + 1
concept = get_concept_by_number(text, current_concept_number) | conditional_block |
html_generator.py | def | (concept_title, concept_description):
html_text_1 = '''
<div class="concept">
<div class="concept-title">
''' + concept_title
html_text_2 = '''
</div>
<div class="concept-description">
''' + concept_description
html_text_3 = '''
</div>
</div>'''
full_html_text = html_text_1 + html_text_2 + html_text_3
return full_html_text
def get_title(concept):
start_location = concept.find('TITLE:')
end_location = concept.find('DESCRIPTION:')
title = concept[start_location+7 : end_location-1]
return title
def get_description(concept):
start_location = concept.find('DESCRIPTION:')
description = concept[start_location+13 :]
return description
def get_concept_by_number(text, concept_number):
counter = 0
while counter < concept_number:
counter = counter + 1
next_concept_start = text.find('TITLE:')
next_concept_end = text.find('TITLE:', next_concept_start + 1)
concept = text[next_concept_start:next_concept_end]
text = text[next_concept_end:]
return concept
TEST_TEXT = """TITLE: Programming Language
DESCRIPTION: Programming languages are used by programmers to tell a computer what to do. Python is one example of a programming language.
TITLE: Python
DESCRIPTION: When you write Python code and "Run" the code, a Python Interpreter converts the written code into a set of instructions that the computer can understand and execute.
TITLE: Python Expressions
DESCRIPTION: In Python an "expression" is a legal Python statement. For example: print 2 + 2 is a valid expression, but print 2 + (without a number at the end) is not.
TITLE: What is a variable in Python?
DESCRIPTION: Variables give programmers a way to give names to values. If my_variable is a variable with a value of 2, then the following code would print out 0:
print my_variable - my_variable """
def generate_all_html(text):
current_concept_number = 1
concept = get_concept_by_number(text, current_concept_number)
all_html = ''
while concept != '':
title = get_title(concept)
description = get_description(concept)
concept_html = generate_concept_HTML(title, description)
all_html = all_html + concept_html
current_concept_number = current_concept_number + 1
concept = get_concept_by_number(text, current_concept_number)
return all_html
print generate_all_html(TEST_TEXT) | generate_concept_HTML | identifier_name |
connect.py | import requests
import os
def post_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function posts data to ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
value: Value to be sent
timestamp: Optional, for custom timestamp
context: Optional, for custom context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
data = {ubi_var:{"value": value}}
if (timestamp!=None):
data[ubi_var]["timestamp"]=timestamp
if (context!=None):
data[ubi_var]["context"]=context
r = requests.post(url=url, headers=headers, json= data)
except Exception as e:
return e
def get_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
| '''
This function gets data from ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
Return:
Returns in this order the next parameters: value, timestamp, context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/" + ubi_var + "/values?page_size=1"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
r = requests.get(url=url, headers=headers)
return r.json()[0]['value'], r.json()[0]['timestamp'], r.json()[0]['context']
except Exception as e:
return e | identifier_body |
|
connect.py | import requests
import os
def | (token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function posts data to ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
value: Value to be sent
timestamp: Optional, for custom timestamp
context: Optional, for custom context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
data = {ubi_var:{"value": value}}
if (timestamp!=None):
data[ubi_var]["timestamp"]=timestamp
if (context!=None):
data[ubi_var]["context"]=context
r = requests.post(url=url, headers=headers, json= data)
except Exception as e:
return e
def get_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function gets data from ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
Return:
Returns in this order the next parameters: value, timestamp, context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/" + ubi_var + "/values?page_size=1"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
r = requests.get(url=url, headers=headers)
return r.json()[0]['value'], r.json()[0]['timestamp'], r.json()[0]['context']
except Exception as e:
return e
| post_var | identifier_name |
connect.py | import requests
import os
def post_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function posts data to ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
value: Value to be sent
timestamp: Optional, for custom timestamp
context: Optional, for custom context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
data = {ubi_var:{"value": value}}
if (timestamp!=None):
data[ubi_var]["timestamp"]=timestamp
if (context!=None):
|
r = requests.post(url=url, headers=headers, json= data)
except Exception as e:
return e
def get_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function gets data from ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
Return:
Returns in this order the next parameters: value, timestamp, context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/" + ubi_var + "/values?page_size=1"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
r = requests.get(url=url, headers=headers)
return r.json()[0]['value'], r.json()[0]['timestamp'], r.json()[0]['context']
except Exception as e:
return e
| data[ubi_var]["context"]=context | conditional_block |
connect.py | import requests
import os
def post_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function posts data to ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
value: Value to be sent
timestamp: Optional, for custom timestamp
context: Optional, for custom context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
data = {ubi_var:{"value": value}}
if (timestamp!=None):
data[ubi_var]["timestamp"]=timestamp
if (context!=None):
data[ubi_var]["context"]=context
r = requests.post(url=url, headers=headers, json= data)
except Exception as e:
return e
def get_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function gets data from ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
Return: |
Returns in this order the next parameters: value, timestamp, context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/" + ubi_var + "/values?page_size=1"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
r = requests.get(url=url, headers=headers)
return r.json()[0]['value'], r.json()[0]['timestamp'], r.json()[0]['context']
except Exception as e:
return e | random_line_split |
|
migrations.js | // File describing all migrations and their upward/downward changes
// for API Usage Information see https://github.com/percolatestudio/meteor-migrations
import {Meteor} from 'meteor/meteor';
import {ImageResources} from '../imports/modules/constants';
/* eslint-disable lodash/prefer-lodash-method */
if(Meteor.isServer) {
Migrations.config({
log: true,
logIfLatest: false
});
Migrations.add({
version: 1,
name: 'Adds profileImage field to every user if not already there. Uses the id `Default/default_<gender-name>`.',
up: () => {
const users = Meteor.users.find().fetch(); //eslint-disable-line
_(users)
.filter((u) => _.isNil(u.profile.profileImage))
.forEach((user) => {
const gender = user.profile.gender;
Meteor.users.update({_id: user._id}, {
$set: {
'profile.profileImage': ImageResources.profile.defaultProfileImageUrl(gender)
}
});
});
},
down: () => Meteor.users.update({}, {$unset: {'profile.profileImage': ""}}, {validate: false, multi: true})
});
Migrations.add({
version: 2,
name: 'Move api details from root user object to a stressApi sub-document',
up: () => {
const users = Meteor.users.find().fetch();
_(users)
.thru((user) => {
user.stressApi = {
apiKey: user.apiAuthKey,
apiAuthType: user.apiAuthType
};
delete user.apiAuthKey;
delete user.apiAuthType;
return user;
}).forEach((user) => Meteor.users.update({_id: user._id}, {
$unset: {
apiAuthKey: "",
apiAuthType: ""
},
$set: {
"stressApi.apiKey": user.stressApi.apiKey,
"stressApi.apiAuthType": user.stressApi.apiKey
}
}, {validate: false, multi: true}));
},
down() |
});
}
/* eslint-enable */
| {
const users = Meteor.users.find().fetch();
_(users)
.thru((user) => {
user.apiAuthKey = user.stressApi.apiKey;
user.apiAuthType = user.stressApi.apiAuthType;
delete user.stressApi;
return user;
}).forEach((user) => Meteor.users.update({_id: user._id}, {
$unset: {
stressApi: ""
},
$set: {
apiAuthKey: user.apiAuthKey,
apiAuthType: user.apiAuthType
}
}, {validate: false, multi: true}));
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.