response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Decorator to check if the request is a CORS request and if so, if it's
valid.
:param func: function to check | def cors_validation(func):
"""
Decorator to check if the request is a CORS request and if so, if it's
valid.
:param func: function to check
"""
@functools.wraps(func)
def wrapped(*a, **kw):
controller = a[0]
req = a[1]
# The logic here was interpreted from
# http://www.w3.org/TR/cors/#resource-requests
# Is this a CORS request?
req_origin = req.headers.get('Origin', None)
if req_origin:
# Yes, this is a CORS request so test if the origin is allowed
container_info = \
controller.container_info(controller.account_name,
controller.container_name, req)
cors_info = container_info.get('cors', {})
# Call through to the decorated method
resp = func(*a, **kw)
if controller.app.strict_cors_mode and \
not controller.is_origin_allowed(cors_info, req_origin):
return resp
# Expose,
# - simple response headers,
# http://www.w3.org/TR/cors/#simple-response-header
# - swift specific: etag, x-timestamp, x-trans-id
# - headers provided by the operator in cors_expose_headers
# - user metadata headers
# - headers provided by the user in
# x-container-meta-access-control-expose-headers
if 'Access-Control-Expose-Headers' not in resp.headers:
expose_headers = set([
'cache-control', 'content-language', 'content-type',
'expires', 'last-modified', 'pragma', 'etag',
'x-timestamp', 'x-trans-id', 'x-openstack-request-id'])
expose_headers.update(controller.app.cors_expose_headers)
for header in resp.headers:
if header.startswith('X-Container-Meta') or \
header.startswith('X-Object-Meta'):
expose_headers.add(header.lower())
if cors_info.get('expose_headers'):
expose_headers = expose_headers.union(
[header_line.strip().lower()
for header_line in
cors_info['expose_headers'].split(' ')
if header_line.strip()])
resp.headers['Access-Control-Expose-Headers'] = \
', '.join(expose_headers)
# The user agent won't process the response if the Allow-Origin
# header isn't included
if 'Access-Control-Allow-Origin' not in resp.headers:
if cors_info['allow_origin'] and \
cors_info['allow_origin'].strip() == '*':
resp.headers['Access-Control-Allow-Origin'] = '*'
else:
resp.headers['Access-Control-Allow-Origin'] = req_origin
if 'Vary' in resp.headers:
resp.headers['Vary'] += ', Origin'
else:
resp.headers['Vary'] = 'Origin'
return resp
else:
# Not a CORS request so make the call as normal
return func(*a, **kw)
return wrapped |
Get the info structure for an object, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the object. | def get_object_info(env, app, path=None, swift_source=None):
"""
Get the info structure for an object, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the object.
"""
(version, account, container, obj) = \
split_path(path or env['PATH_INFO'], 4, 4, True)
info = _get_object_info(app, env, account, container, obj,
swift_source=swift_source)
if info:
info = deepcopy(info)
else:
info = headers_to_object_info({}, 0)
for field in ('length',):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
return info |
Record a single cache operation by account or container lookup into its
corresponding metrics.
:param app: the application object
:param cache_state: the state of this cache operation, includes
infocache_hit, memcache hit, miss, error, skip, force_skip
and disabled.
:param container: the container name
:param resp: the response from either backend or cache hit. | def _record_ac_info_cache_metrics(
app, cache_state, container=None, resp=None):
"""
Record a single cache operation by account or container lookup into its
corresponding metrics.
:param app: the application object
:param cache_state: the state of this cache operation, includes
infocache_hit, memcache hit, miss, error, skip, force_skip
and disabled.
:param container: the container name
:param resp: the response from either backend or cache hit.
"""
try:
proxy_app = app._pipeline_final_app
except AttributeError:
logger = None
else:
logger = proxy_app.logger
server_type = 'container' if container else 'account'
if logger:
record_cache_op_metrics(logger, server_type, 'info', cache_state, resp) |
Get the info structure for a container, based on env and app.
This is useful to middlewares.
:param env: the environment used by the current request
:param app: the application object
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:param cache_only: If true, indicates that caller doesn't want to HEAD the
backend container when cache miss.
:returns: the object info
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the container. | def get_container_info(env, app, swift_source=None, cache_only=False):
"""
Get the info structure for a container, based on env and app.
This is useful to middlewares.
:param env: the environment used by the current request
:param app: the application object
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:param cache_only: If true, indicates that caller doesn't want to HEAD the
backend container when cache miss.
:returns: the object info
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the container.
"""
(version, wsgi_account, wsgi_container, unused) = \
split_path(env['PATH_INFO'], 3, 4, True)
if not constraints.valid_api_version(version):
# Not a valid Swift request; return 0 like we do
# if there's an account failure
return headers_to_container_info({}, 0)
account = wsgi_to_str(wsgi_account)
container = wsgi_to_str(wsgi_container)
# Try to cut through all the layers to the proxy app
# (while also preserving logging)
try:
logged_app = app._pipeline_request_logging_app
proxy_app = app._pipeline_final_app
except AttributeError:
logged_app = proxy_app = app
# Check in environment cache and in memcache (in that order)
info, cache_state = _get_info_from_caches(
proxy_app, env, account, container)
resp = None
if not info and not cache_only:
# Cache miss; go HEAD the container and populate the caches
env.setdefault('swift.infocache', {})
# Before checking the container, make sure the account exists.
#
# If it is an autocreateable account, just assume it exists; don't
# HEAD the account, as a GET or HEAD response for an autocreateable
# account is successful whether the account actually has .db files
# on disk or not.
is_autocreate_account = account.startswith(
constraints.AUTO_CREATE_ACCOUNT_PREFIX)
if not is_autocreate_account:
account_info = get_account_info(env, logged_app, swift_source)
if not account_info or not is_success(account_info['status']):
_record_ac_info_cache_metrics(
logged_app, cache_state, container)
return headers_to_container_info({}, 0)
req = _prepare_pre_auth_info_request(
env, ("/%s/%s/%s" % (version, wsgi_account, wsgi_container)),
(swift_source or 'GET_CONTAINER_INFO'))
# *Always* allow reserved names for get-info requests -- it's on the
# caller to keep the result private-ish
req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
resp = req.get_response(logged_app)
drain_and_close(resp)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#
# See similar comment in get_account_info() for justification.
info = _get_info_from_infocache(env, account, container)
if info is None:
info = set_info_cache(env, account, container, resp)
if info:
info = deepcopy(info) # avoid mutating what's in swift.infocache
else:
status_int = 0 if cache_only else 503
info = headers_to_container_info({}, status_int)
# Old data format in memcache immediately after a Swift upgrade; clean
# it up so consumers of get_container_info() aren't exposed to it.
if 'object_count' not in info and 'container_size' in info:
info['object_count'] = info.pop('container_size')
for field in ('storage_policy', 'bytes', 'object_count'):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
if info.get('sharding_state') is None:
info['sharding_state'] = 'unsharded'
versions_cont = info.get('sysmeta', {}).get('versions-container', '')
if versions_cont:
versions_cont = wsgi_unquote(str_to_wsgi(
versions_cont)).split('/')[0]
versions_req = _prepare_pre_auth_info_request(
env, ("/%s/%s/%s" % (version, wsgi_account, versions_cont)),
(swift_source or 'GET_CONTAINER_INFO'))
versions_req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
versions_info = get_container_info(versions_req.environ, app)
info['bytes'] = info['bytes'] + versions_info['bytes']
_record_ac_info_cache_metrics(logged_app, cache_state, container, resp)
return info |
Get the info structure for an account, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the account.
:raises ValueError: when path doesn't contain an account | def get_account_info(env, app, swift_source=None):
"""
Get the info structure for an account, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the account.
:raises ValueError: when path doesn't contain an account
"""
(version, wsgi_account, _junk) = split_path(env['PATH_INFO'], 2, 3, True)
if not constraints.valid_api_version(version):
return headers_to_account_info({}, 0)
account = wsgi_to_str(wsgi_account)
# Try to cut through all the layers to the proxy app
# (while also preserving logging)
try:
app = app._pipeline_request_logging_app
except AttributeError:
pass
# Check in environment cache and in memcache (in that order)
info, cache_state = _get_info_from_caches(app, env, account)
# Cache miss; go HEAD the account and populate the caches
if info:
resp = None
else:
env.setdefault('swift.infocache', {})
req = _prepare_pre_auth_info_request(
env, "/%s/%s" % (version, wsgi_account),
(swift_source or 'GET_ACCOUNT_INFO'))
# *Always* allow reserved names for get-info requests -- it's on the
# caller to keep the result private-ish
req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
resp = req.get_response(app)
drain_and_close(resp)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#
# The point of this is to avoid setting the value in memcached
# twice. Otherwise, we're needlessly sending requests across the
# network.
#
# If the info didn't make it into the cache, we'll compute it from
# the response and populate the cache ourselves.
#
# Note that this is taking "exists in infocache" to imply "exists in
# memcache". That's because we're trying to avoid superfluous
# network traffic, and checking in memcache prior to setting in
# memcache would defeat the purpose.
info = _get_info_from_infocache(env, account)
if info is None:
info = set_info_cache(env, account, None, resp)
if info:
info = info.copy() # avoid mutating what's in swift.infocache
else:
info = headers_to_account_info({}, 503)
for field in ('container_count', 'bytes', 'total_object_count'):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
_record_ac_info_cache_metrics(app, cache_state, container=None, resp=resp)
return info |
Get the keys for both memcache and env['swift.infocache'] (cache_key)
where info about accounts, containers, and objects is cached
:param account: The name of the account
:param container: The name of the container (or None if account)
:param obj: The name of the object (or None if account or container)
:param shard: Sharding state for the container query; typically 'updating'
or 'listing' (Requires account and container; cannot use
with obj)
:returns: a (native) string cache_key | def get_cache_key(account, container=None, obj=None, shard=None):
"""
Get the keys for both memcache and env['swift.infocache'] (cache_key)
where info about accounts, containers, and objects is cached
:param account: The name of the account
:param container: The name of the container (or None if account)
:param obj: The name of the object (or None if account or container)
:param shard: Sharding state for the container query; typically 'updating'
or 'listing' (Requires account and container; cannot use
with obj)
:returns: a (native) string cache_key
"""
if six.PY2:
def to_native(s):
if s is None or isinstance(s, str):
return s
return s.encode('utf8')
else:
def to_native(s):
if s is None or isinstance(s, str):
return s
return s.decode('utf8', 'surrogateescape')
account = to_native(account)
container = to_native(container)
obj = to_native(obj)
if shard:
if not (account and container):
raise ValueError('Shard cache key requires account and container')
if obj:
raise ValueError('Shard cache key cannot have obj')
cache_key = 'shard-%s-v2/%s/%s' % (shard, account, container)
elif obj:
if not (account and container):
raise ValueError('Object cache key requires account and container')
cache_key = 'object/%s/%s/%s' % (account, container, obj)
elif container:
if not account:
raise ValueError('Container cache key requires account')
cache_key = 'container/%s/%s' % (account, container)
else:
cache_key = 'account/%s' % account
# Use a unique environment cache key per account and one container.
# This allows caching both account and container and ensures that when we
# copy this env to form a new request, it won't accidentally reuse the
# old container or account info
return cache_key |
Cache info in both memcache and env.
:param env: the WSGI request environment
:param account: the unquoted account name
:param container: the unquoted container name or None
:param resp: the response received or None if info cache should be cleared
:returns: the info that was placed into the cache, or None if the
request status was not in (404, 410, 2xx). | def set_info_cache(env, account, container, resp):
"""
Cache info in both memcache and env.
:param env: the WSGI request environment
:param account: the unquoted account name
:param container: the unquoted container name or None
:param resp: the response received or None if info cache should be cleared
:returns: the info that was placed into the cache, or None if the
request status was not in (404, 410, 2xx).
"""
cache_key = get_cache_key(account, container)
infocache = env.setdefault('swift.infocache', {})
memcache = cache_from_env(env, True)
if resp is None:
clear_info_cache(env, account, container)
return
if container:
cache_time = int(resp.headers.get(
'X-Backend-Recheck-Container-Existence',
DEFAULT_RECHECK_CONTAINER_EXISTENCE))
else:
cache_time = int(resp.headers.get(
'X-Backend-Recheck-Account-Existence',
DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
if resp.status_int in (HTTP_NOT_FOUND, HTTP_GONE):
cache_time *= 0.1
elif not is_success(resp.status_int):
# If we got a response, it was unsuccessful, and it wasn't an
# "authoritative" failure, bail without touching caches.
return
if container:
info = headers_to_container_info(resp.headers, resp.status_int)
else:
info = headers_to_account_info(resp.headers, resp.status_int)
if memcache:
memcache.set(cache_key, info, time=cache_time)
infocache[cache_key] = info
return info |
Cache object info in the WSGI environment, but not in memcache. Caching
in memcache would lead to cache pressure and mass evictions due to the
large number of objects in a typical Swift cluster. This is a
per-request cache only.
:param app: the application object
:param env: the environment used by the current request
:param account: the unquoted account name
:param container: the unquoted container name
:param obj: the unquoted object name
:param resp: a GET or HEAD response received from an object server, or
None if info cache should be cleared
:returns: the object info | def set_object_info_cache(app, env, account, container, obj, resp):
"""
Cache object info in the WSGI environment, but not in memcache. Caching
in memcache would lead to cache pressure and mass evictions due to the
large number of objects in a typical Swift cluster. This is a
per-request cache only.
:param app: the application object
:param env: the environment used by the current request
:param account: the unquoted account name
:param container: the unquoted container name
:param obj: the unquoted object name
:param resp: a GET or HEAD response received from an object server, or
None if info cache should be cleared
:returns: the object info
"""
cache_key = get_cache_key(account, container, obj)
if 'swift.infocache' in env and not resp:
env['swift.infocache'].pop(cache_key, None)
return
info = headers_to_object_info(resp.headers, resp.status_int)
env.setdefault('swift.infocache', {})[cache_key] = info
return info |
Clear the cached info in both memcache and env
:param env: the WSGI request environment
:param account: the account name
:param container: the container name if clearing info for containers, or
None
:param shard: the sharding state if clearing info for container shard
ranges, or None | def clear_info_cache(env, account, container=None, shard=None):
"""
Clear the cached info in both memcache and env
:param env: the WSGI request environment
:param account: the account name
:param container: the container name if clearing info for containers, or
None
:param shard: the sharding state if clearing info for container shard
ranges, or None
"""
cache_key = get_cache_key(account, container, shard=shard)
infocache = env.setdefault('swift.infocache', {})
memcache = cache_from_env(env, True)
infocache.pop(cache_key, None)
if memcache:
memcache.delete(cache_key) |
Get cached account or container information from request-environment
cache (swift.infocache).
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a dictionary of cached info on cache hit, None on miss | def _get_info_from_infocache(env, account, container=None):
"""
Get cached account or container information from request-environment
cache (swift.infocache).
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a dictionary of cached info on cache hit, None on miss
"""
cache_key = get_cache_key(account, container)
if 'swift.infocache' in env and cache_key in env['swift.infocache']:
return env['swift.infocache'][cache_key]
return None |
Record a single cache operation into its corresponding metrics.
:param logger: the metrics logger
:param server_type: 'account' or 'container'
:param op_type: the name of the operation type, includes 'shard_listing',
'shard_updating', and etc.
:param cache_state: the state of this cache operation. When it's
'infocache_hit' or memcache 'hit', expect it succeeded and 'resp'
will be None; for all other cases like memcache 'miss' or 'skip'
which will make to backend, expect a valid 'resp'.
:param resp: the response from backend for all cases except cache hits. | def record_cache_op_metrics(
logger, server_type, op_type, cache_state, resp=None):
"""
Record a single cache operation into its corresponding metrics.
:param logger: the metrics logger
:param server_type: 'account' or 'container'
:param op_type: the name of the operation type, includes 'shard_listing',
'shard_updating', and etc.
:param cache_state: the state of this cache operation. When it's
'infocache_hit' or memcache 'hit', expect it succeeded and 'resp'
will be None; for all other cases like memcache 'miss' or 'skip'
which will make to backend, expect a valid 'resp'.
:param resp: the response from backend for all cases except cache hits.
"""
server_type = server_type.lower()
if cache_state == 'infocache_hit':
logger.increment('%s.%s.infocache.hit' % (server_type, op_type))
elif cache_state == 'hit':
# memcache hits.
logger.increment('%s.%s.cache.hit' % (server_type, op_type))
else:
# the cases of cache_state is memcache miss, error, skip, force_skip
# or disabled.
if resp:
logger.increment('%s.%s.cache.%s.%d' % (
server_type, op_type, cache_state, resp.status_int))
else:
# In some situation, we choose not to lookup backend after cache
# miss.
logger.increment('%s.%s.cache.%s' % (
server_type, op_type, cache_state)) |
Get cached account or container information from memcache
:param app: the application object
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a tuple of two values, the first is a dictionary of cached info
on cache hit, None on miss or if memcache is not in use; the second is
cache state. | def _get_info_from_memcache(app, env, account, container=None):
"""
Get cached account or container information from memcache
:param app: the application object
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a tuple of two values, the first is a dictionary of cached info
on cache hit, None on miss or if memcache is not in use; the second is
cache state.
"""
memcache = cache_from_env(env, True)
if not memcache:
return None, 'disabled'
try:
proxy_app = app._pipeline_final_app
except AttributeError:
# Only the middleware entry-points get a reference to the
# proxy-server app; if a middleware composes itself as multiple
# filters, we'll just have to choose a reasonable default
skip_chance = 0.0
else:
if container:
skip_chance = proxy_app.container_existence_skip_cache
else:
skip_chance = proxy_app.account_existence_skip_cache
cache_key = get_cache_key(account, container)
if skip_chance and random.random() < skip_chance:
info = None
cache_state = 'skip'
else:
info = memcache.get(cache_key)
cache_state = 'hit' if info else 'miss'
if info and six.PY2:
# Get back to native strings
new_info = {}
for key in info:
new_key = key.encode("utf-8") if isinstance(
key, six.text_type) else key
if isinstance(info[key], six.text_type):
new_info[new_key] = info[key].encode("utf-8")
elif isinstance(info[key], dict):
new_info[new_key] = {}
for subkey, value in info[key].items():
new_subkey = subkey.encode("utf-8") if isinstance(
subkey, six.text_type) else subkey
if isinstance(value, six.text_type):
new_info[new_key][new_subkey] = \
value.encode("utf-8")
else:
new_info[new_key][new_subkey] = value
else:
new_info[new_key] = info[key]
info = new_info
if info:
env.setdefault('swift.infocache', {})[cache_key] = info
return info, cache_state |
Get the cached info from env or memcache (if used) in that order.
Used for both account and container info.
:param app: the application object
:param env: the environment used by the current request
:returns: a tuple of (the cached info or None if not cached, cache state) | def _get_info_from_caches(app, env, account, container=None):
"""
Get the cached info from env or memcache (if used) in that order.
Used for both account and container info.
:param app: the application object
:param env: the environment used by the current request
:returns: a tuple of (the cached info or None if not cached, cache state)
"""
info = _get_info_from_infocache(env, account, container)
if info:
cache_state = 'infocache_hit'
else:
info, cache_state = _get_info_from_memcache(
app, env, account, container)
return info, cache_state |
Get cached namespaces from infocache or memcache.
:param req: a :class:`swift.common.swob.Request` object.
:param cache_key: the cache key for both infocache and memcache.
:param skip_chance: the probability of skipping the memcache look-up.
:return: a tuple of (value, cache state). Value is an instance of
:class:`swift.common.utils.NamespaceBoundList` if a non-empty list is
found in memcache. Otherwise value is ``None``, for example if memcache
look-up was skipped, or no value was found, or an empty list was found. | def get_namespaces_from_cache(req, cache_key, skip_chance):
"""
Get cached namespaces from infocache or memcache.
:param req: a :class:`swift.common.swob.Request` object.
:param cache_key: the cache key for both infocache and memcache.
:param skip_chance: the probability of skipping the memcache look-up.
:return: a tuple of (value, cache state). Value is an instance of
:class:`swift.common.utils.NamespaceBoundList` if a non-empty list is
found in memcache. Otherwise value is ``None``, for example if memcache
look-up was skipped, or no value was found, or an empty list was found.
"""
# try get namespaces from infocache first
infocache = req.environ.setdefault('swift.infocache', {})
ns_bound_list = infocache.get(cache_key)
if ns_bound_list:
return ns_bound_list, 'infocache_hit'
# then try get them from memcache
memcache = cache_from_env(req.environ, True)
if not memcache:
return None, 'disabled'
if skip_chance and random.random() < skip_chance:
return None, 'skip'
try:
bounds = memcache.get(cache_key, raise_on_error=True)
cache_state = 'hit' if bounds else 'miss'
except MemcacheConnectionError:
bounds = None
cache_state = 'error'
if bounds:
if six.PY2:
# json.loads() in memcache.get will convert json 'string' to
# 'unicode' with python2, here we cast 'unicode' back to 'str'
bounds = [
[lower.encode('utf-8'), name.encode('utf-8')]
for lower, name in bounds]
ns_bound_list = NamespaceBoundList(bounds)
infocache[cache_key] = ns_bound_list
else:
ns_bound_list = None
return ns_bound_list, cache_state |
Set a list of namespace bounds in infocache and memcache.
:param req: a :class:`swift.common.swob.Request` object.
:param cache_key: the cache key for both infocache and memcache.
:param ns_bound_list: a :class:`swift.common.utils.NamespaceBoundList`.
:param time: how long the namespaces should remain in memcache.
:return: the cache_state. | def set_namespaces_in_cache(req, cache_key, ns_bound_list, time):
"""
Set a list of namespace bounds in infocache and memcache.
:param req: a :class:`swift.common.swob.Request` object.
:param cache_key: the cache key for both infocache and memcache.
:param ns_bound_list: a :class:`swift.common.utils.NamespaceBoundList`.
:param time: how long the namespaces should remain in memcache.
:return: the cache_state.
"""
infocache = req.environ.setdefault('swift.infocache', {})
infocache[cache_key] = ns_bound_list
memcache = cache_from_env(req.environ, True)
if memcache and ns_bound_list:
try:
memcache.set(cache_key, ns_bound_list.bounds, time=time,
raise_on_error=True)
except MemcacheConnectionError:
cache_state = 'set_error'
else:
cache_state = 'set'
else:
cache_state = 'disabled'
return cache_state |
Prepares a pre authed request to obtain info using a HEAD.
:param env: the environment used by the current request
:param path: The unquoted, WSGI-str request path
:param swift_source: value for swift.source in WSGI environment
:returns: the pre authed request | def _prepare_pre_auth_info_request(env, path, swift_source):
"""
Prepares a pre authed request to obtain info using a HEAD.
:param env: the environment used by the current request
:param path: The unquoted, WSGI-str request path
:param swift_source: value for swift.source in WSGI environment
:returns: the pre authed request
"""
# Set the env for the pre_authed call without a query string
newenv = make_pre_authed_env(env, 'HEAD', path, agent='Swift',
query_string='', swift_source=swift_source)
# This is a sub request for container metadata- drop the Origin header from
# the request so the it is not treated as a CORS request.
newenv.pop('HTTP_ORIGIN', None)
# ACLs are only shown to account owners, so let's make sure this request
# looks like it came from the account owner.
newenv['swift_owner'] = True
# Note that Request.blank expects quoted path
return Request.blank(wsgi_quote(path), environ=newenv) |
Get info about accounts or containers
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container (or None if account)
:param swift_source: swift source logged for any subrequests made while
retrieving the account or container info
:returns: information about the specified entity in a dictionary. See
get_account_info and get_container_info for details on what's in the
dictionary. | def get_info(app, env, account, container=None, swift_source=None):
"""
Get info about accounts or containers
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container (or None if account)
:param swift_source: swift source logged for any subrequests made while
retrieving the account or container info
:returns: information about the specified entity in a dictionary. See
get_account_info and get_container_info for details on what's in the
dictionary.
"""
env.setdefault('swift.infocache', {})
if container:
path = '/v1/%s/%s' % (account, container)
path_env = env.copy()
path_env['PATH_INFO'] = path
return get_container_info(path_env, app, swift_source=swift_source)
else:
# account info
path = '/v1/%s' % (account,)
path_env = env.copy()
path_env['PATH_INFO'] = path
return get_account_info(path_env, app, swift_source=swift_source) |
Get the info about object
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted, WSGI-str name of the account
:param container: The unquoted, WSGI-str name of the container
:param obj: The unquoted, WSGI-str name of the object
:returns: the cached info or None if cannot be retrieved | def _get_object_info(app, env, account, container, obj, swift_source=None):
"""
Get the info about object
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted, WSGI-str name of the account
:param container: The unquoted, WSGI-str name of the container
:param obj: The unquoted, WSGI-str name of the object
:returns: the cached info or None if cannot be retrieved
"""
cache_key = get_cache_key(account, container, obj)
info = env.get('swift.infocache', {}).get(cache_key)
if info:
return info
# Not in cache, let's try the object servers
path = '/v1/%s/%s/%s' % (account, container, obj)
req = _prepare_pre_auth_info_request(env, path, swift_source)
# *Always* allow reserved names for get-info requests -- it's on the
# caller to keep the result private-ish
req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
resp = req.get_response(app)
# Unlike get_account_info() and get_container_info(), we don't save
# things in memcache, so we can store the info without network traffic,
# *and* the proxy doesn't cache object info for us, so there's no chance
# that the object info would be in the environment. Thus, we just
# compute the object info based on the response and stash it in
# swift.infocache.
info = set_object_info_cache(app, env, account, container, obj, resp)
return info |
Force close the http connection to the backend.
:param src: the response from the backend | def close_swift_conn(src):
"""
Force close the http connection to the backend.
:param src: the response from the backend
"""
try:
# Since the backends set "Connection: close" in their response
# headers, the response object (src) is solely responsible for the
# socket. The connection object (src.swift_conn) has no references
# to the socket, so calling its close() method does nothing, and
# therefore we don't do it.
#
# Also, since calling the response's close() method might not
# close the underlying socket but only decrement some
# reference-counter, we have a special method here that really,
# really kills the underlying socket with a close() syscall.
src.nuke_from_orbit() # it's the only way to be sure
except Exception:
pass |
Assume an object is composed of N records, where the first N-1 are all
the same size and the last is at most that large, but may be smaller.
When a range request is made, it might start with a partial record. This
must be discarded, lest the consumer get bad data. This is particularly
true of suffix-byte-range requests, e.g. "Range: bytes=-12345" where the
size of the object is unknown at the time the request is made.
This function computes the number of bytes that must be discarded to
ensure only whole records are yielded. Erasure-code decoding needs this.
This function could have been inlined, but it took enough tries to get
right that some targeted unit tests were desirable, hence its extraction. | def bytes_to_skip(record_size, range_start):
"""
Assume an object is composed of N records, where the first N-1 are all
the same size and the last is at most that large, but may be smaller.
When a range request is made, it might start with a partial record. This
must be discarded, lest the consumer get bad data. This is particularly
true of suffix-byte-range requests, e.g. "Range: bytes=-12345" where the
size of the object is unknown at the time the request is made.
This function computes the number of bytes that must be discarded to
ensure only whole records are yielded. Erasure-code decoding needs this.
This function could have been inlined, but it took enough tries to get
right that some targeted unit tests were desirable, hence its extraction.
"""
return (record_size - (range_start % record_size)) % record_size |
Indicates whether or not the request made to the backend found
what it was looking for.
:param resp: the response from the backend.
:param server_type: the type of server: 'Account', 'Container' or 'Object'.
:returns: True if the response status code is acceptable, False if not. | def is_good_source(status, server_type):
"""
Indicates whether or not the request made to the backend found
what it was looking for.
:param resp: the response from the backend.
:param server_type: the type of server: 'Account', 'Container' or 'Object'.
:returns: True if the response status code is acceptable, False if not.
"""
if (server_type == 'Object' and
status == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE):
return True
return is_success(status) or is_redirection(status) |
We need to send container updates via enough object servers such
that, if the object PUT succeeds, then the container update is
durable (either it's synchronously updated or written to async
pendings).
Define:
Qc = the quorum size for the container ring
Qo = the quorum size for the object ring
Rc = the replica count for the container ring
Ro = the replica count (or EC N+K) for the object ring
A durable container update is one that's made it to at least Qc
nodes. To always be durable, we have to send enough container
updates so that, if only Qo object PUTs succeed, and all the
failed object PUTs had container updates, at least Qc updates
remain. Since (Ro - Qo) object PUTs may fail, we must have at
least Qc + Ro - Qo container updates to ensure that Qc of them
remain.
Also, each container replica is named in at least one object PUT
request so that, when all requests succeed, no work is generated
for the container replicator. Thus, at least Rc updates are
necessary.
:param container_replicas: replica count for the container ring (Rc)
:param container_quorum: quorum size for the container ring (Qc)
:param object_replicas: replica count for the object ring (Ro)
:param object_quorum: quorum size for the object ring (Qo) | def num_container_updates(container_replicas, container_quorum,
object_replicas, object_quorum):
"""
We need to send container updates via enough object servers such
that, if the object PUT succeeds, then the container update is
durable (either it's synchronously updated or written to async
pendings).
Define:
Qc = the quorum size for the container ring
Qo = the quorum size for the object ring
Rc = the replica count for the container ring
Ro = the replica count (or EC N+K) for the object ring
A durable container update is one that's made it to at least Qc
nodes. To always be durable, we have to send enough container
updates so that, if only Qo object PUTs succeed, and all the
failed object PUTs had container updates, at least Qc updates
remain. Since (Ro - Qo) object PUTs may fail, we must have at
least Qc + Ro - Qo container updates to ensure that Qc of them
remain.
Also, each container replica is named in at least one object PUT
request so that, when all requests succeed, no work is generated
for the container replicator. Thus, at least Rc updates are
necessary.
:param container_replicas: replica count for the container ring (Rc)
:param container_quorum: quorum size for the container ring (Qc)
:param object_replicas: replica count for the object ring (Ro)
:param object_quorum: quorum size for the object ring (Qo)
"""
return max(
# Qc + Ro - Qo
container_quorum + object_replicas - object_quorum,
# Rc
container_replicas) |
Takes a byterange from the client and converts it into a byterange
spanning the necessary segments.
Handles prefix, suffix, and fully-specified byte ranges.
Examples:
client_range_to_segment_range(100, 700, 512) = (0, 1023)
client_range_to_segment_range(100, 700, 256) = (0, 767)
client_range_to_segment_range(300, None, 256) = (256, None)
:param client_start: first byte of the range requested by the client
:param client_end: last byte of the range requested by the client
:param segment_size: size of an EC segment, in bytes
:returns: a 2-tuple (seg_start, seg_end) where
* seg_start is the first byte of the first segment, or None if this is
a suffix byte range
* seg_end is the last byte of the last segment, or None if this is a
prefix byte range | def client_range_to_segment_range(client_start, client_end, segment_size):
"""
Takes a byterange from the client and converts it into a byterange
spanning the necessary segments.
Handles prefix, suffix, and fully-specified byte ranges.
Examples:
client_range_to_segment_range(100, 700, 512) = (0, 1023)
client_range_to_segment_range(100, 700, 256) = (0, 767)
client_range_to_segment_range(300, None, 256) = (256, None)
:param client_start: first byte of the range requested by the client
:param client_end: last byte of the range requested by the client
:param segment_size: size of an EC segment, in bytes
:returns: a 2-tuple (seg_start, seg_end) where
* seg_start is the first byte of the first segment, or None if this is
a suffix byte range
* seg_end is the last byte of the last segment, or None if this is a
prefix byte range
"""
# the index of the first byte of the first segment
segment_start = (
int(client_start // segment_size)
* segment_size) if client_start is not None else None
# the index of the last byte of the last segment
segment_end = (
# bytes M-
None if client_end is None else
# bytes M-N
(((int(client_end // segment_size) + 1)
* segment_size) - 1) if client_start is not None else
# bytes -N: we get some extra bytes to make sure we
# have all we need.
#
# To see why, imagine a 100-byte segment size, a
# 340-byte object, and a request for the last 50
# bytes. Naively requesting the last 100 bytes would
# result in a truncated first segment and hence a
# truncated download. (Of course, the actual
# obj-server requests are for fragments, not
# segments, but that doesn't change the
# calculation.)
#
# This does mean that we fetch an extra segment if
# the object size is an exact multiple of the
# segment size. It's a little wasteful, but it's
# better to be a little wasteful than to get some
# range requests completely wrong.
(int(math.ceil((
float(client_end) / segment_size) + 1)) # nsegs
* segment_size))
return (segment_start, segment_end) |
Takes a byterange spanning some segments and converts that into a
byterange spanning the corresponding fragments within their fragment
archives.
Handles prefix, suffix, and fully-specified byte ranges.
:param segment_start: first byte of the first segment
:param segment_end: last byte of the last segment
:param segment_size: size of an EC segment, in bytes
:param fragment_size: size of an EC fragment, in bytes
:returns: a 2-tuple (frag_start, frag_end) where
* frag_start is the first byte of the first fragment, or None if this
is a suffix byte range
* frag_end is the last byte of the last fragment, or None if this is a
prefix byte range | def segment_range_to_fragment_range(segment_start, segment_end, segment_size,
fragment_size):
"""
Takes a byterange spanning some segments and converts that into a
byterange spanning the corresponding fragments within their fragment
archives.
Handles prefix, suffix, and fully-specified byte ranges.
:param segment_start: first byte of the first segment
:param segment_end: last byte of the last segment
:param segment_size: size of an EC segment, in bytes
:param fragment_size: size of an EC fragment, in bytes
:returns: a 2-tuple (frag_start, frag_end) where
* frag_start is the first byte of the first fragment, or None if this
is a suffix byte range
* frag_end is the last byte of the last fragment, or None if this is a
prefix byte range
"""
# Note: segment_start and (segment_end + 1) are
# multiples of segment_size, so we don't have to worry
# about integer math giving us rounding troubles.
#
# There's a whole bunch of +1 and -1 in here; that's because HTTP wants
# byteranges to be inclusive of the start and end, so e.g. bytes 200-300
# is a range containing 101 bytes. Python has half-inclusive ranges, of
# course, so we have to convert back and forth. We try to keep things in
# HTTP-style byteranges for consistency.
# the index of the first byte of the first fragment
fragment_start = ((
segment_start // segment_size * fragment_size)
if segment_start is not None else None)
# the index of the last byte of the last fragment
fragment_end = (
# range unbounded on the right
None if segment_end is None else
# range unbounded on the left; no -1 since we're
# asking for the last N bytes, not to have a
# particular byte be the last one
((segment_end + 1) // segment_size
* fragment_size) if segment_start is None else
# range bounded on both sides; the -1 is because the
# rest of the expression computes the length of the
# fragment, and a range of N bytes starts at index M
# and ends at M + N - 1.
((segment_end + 1) // segment_size * fragment_size) - 1)
return (fragment_start, fragment_end) |
A generator to transform a source chunk to erasure coded chunks for each
`send` call. The number of erasure coded chunks is as
policy.ec_n_unique_fragments. | def chunk_transformer(policy):
"""
A generator to transform a source chunk to erasure coded chunks for each
`send` call. The number of erasure coded chunks is as
policy.ec_n_unique_fragments.
"""
segment_size = policy.ec_segment_size
buf = collections.deque()
total_buf_len = 0
chunk = yield
while chunk:
buf.append(chunk)
total_buf_len += len(chunk)
if total_buf_len >= segment_size:
chunks_to_encode = []
# extract as many chunks as we can from the input buffer
while total_buf_len >= segment_size:
to_take = segment_size
pieces = []
while to_take > 0:
piece = buf.popleft()
if len(piece) > to_take:
buf.appendleft(piece[to_take:])
piece = piece[:to_take]
pieces.append(piece)
to_take -= len(piece)
total_buf_len -= len(piece)
chunks_to_encode.append(b''.join(pieces))
frags_by_byte_order = []
for chunk_to_encode in chunks_to_encode:
frags_by_byte_order.append(
policy.pyeclib_driver.encode(chunk_to_encode))
# Sequential calls to encode() have given us a list that
# looks like this:
#
# [[frag_A1, frag_B1, frag_C1, ...],
# [frag_A2, frag_B2, frag_C2, ...], ...]
#
# What we need is a list like this:
#
# [(frag_A1 + frag_A2 + ...), # destined for node A
# (frag_B1 + frag_B2 + ...), # destined for node B
# (frag_C1 + frag_C2 + ...), # destined for node C
# ...]
obj_data = [b''.join(frags)
for frags in zip(*frags_by_byte_order)]
chunk = yield obj_data
else:
# didn't have enough data to encode
chunk = yield None
# Now we've gotten an empty chunk, which indicates end-of-input.
# Take any leftover bytes and encode them.
last_bytes = b''.join(buf)
if last_bytes:
last_frags = policy.pyeclib_driver.encode(last_bytes)
yield last_frags
else:
yield [b''] * policy.ec_n_unique_fragments |
get a named adapted debug logger | def debug_logger(name='test'):
"""get a named adapted debug logger"""
return DebugLogAdapter(DebugLogger(), name) |
Yields an adapted system logger based on the conf options. The log adapter
captures logs in order to support the pattern of tests calling the log
accessor methods (e.g. get_lines_for_level) directly on the logger
instance. | def capture_logger(conf, *args, **kwargs):
"""
Yields an adapted system logger based on the conf options. The log adapter
captures logs in order to support the pattern of tests calling the log
accessor methods (e.g. get_lines_for_level) directly on the logger
instance.
"""
with mock.patch('swift.common.utils.logs.LogAdapter', CaptureLogAdapter):
log_adapter = utils.logs.get_logger(conf, *args, **kwargs)
log_adapter.start_capture()
try:
yield log_adapter
finally:
log_adapter.stop_capture() |
Attempt to get a test config dictionary.
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults | def get_config(section_name=None, defaults=None):
"""
Attempt to get a test config dictionary.
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults
"""
config = {}
if defaults is not None:
config.update(defaults)
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
try:
config = readconf(config_file, section_name)
except IOError:
if not os.path.exists(config_file):
print('Unable to read test config %s - file not found'
% config_file, file=sys.stderr)
elif not os.access(config_file, os.R_OK):
print('Unable to read test config %s - permission denied'
% config_file, file=sys.stderr)
except ValueError as e:
print(e)
return config |
The eventlet.listen() always sets SO_REUSEPORT, so when called with
("localhost",0), instead of returning unique ports it can return the
same port twice. That causes our tests to fail, so open-code it here
without SO_REUSEPORT. | def listen_zero():
"""
The eventlet.listen() always sets SO_REUSEPORT, so when called with
("localhost",0), instead of returning unique ports it can return the
same port twice. That causes our tests to fail, so open-code it here
without SO_REUSEPORT.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 0))
sock.listen(50)
return sock |
Catch AssertionError and annotate it with a message. Useful when making
assertions in a loop where the message can indicate the loop index or
richer context about the failure.
:param msg: A message to be prefixed to the AssertionError message. | def annotate_failure(msg):
"""
Catch AssertionError and annotate it with a message. Useful when making
assertions in a loop where the message can indicate the loop index or
richer context about the failure.
:param msg: A message to be prefixed to the AssertionError message.
"""
try:
yield
except AssertionError as err:
err_typ, err_val, err_tb = sys.exc_info()
if err_val.args:
msg = '%s Failed with %s' % (msg, err_val.args[0])
err_val.args = (msg, ) + err_val.args[1:]
else:
# workaround for some IDE's raising custom AssertionErrors
err_val = '%s Failed with %s' % (msg, err)
err_typ = AssertionError
reraise(err_typ, err_val, err_tb) |
Sum line offsets matched by offset_re_fragment and convert them to strings
like @+3 or @-2. | def offsetify(*offsets):
"""Sum line offsets matched by offset_re_fragment and convert them to strings
like @+3 or @-2."""
offset = sum([int(o) for o in offsets if o is not None])
if offset < 0:
return u"@-" + str(-offset)
elif offset > 0:
return u"@+" + str(offset)
else:
return u"" |
Replace expected-errors with expected-remarks, and make other adjustments
to diagnostics so that they reflect access notes. | def adjust_comments(offset, inserted_attr, comment_str):
"""Replace expected-errors with expected-remarks, and make other adjustments
to diagnostics so that they reflect access notes."""
prefix = u"{{ignored access note: "
suffix = u"; did not implicitly add '" + inserted_attr + "' to this }}"
adjusted = expected_other_diag_re.sub(lambda m: u"expected-" + m.group(1) +
offsetify(offset, m.group(2)),
comment_str)
adjusted = expected_error_re.sub(lambda m: u"expected-remark" +
offsetify(offset, m.group(1)) + " " +
m.group(2) + prefix + m.group(3) +
suffix,
adjusted)
adjusted = marked_objc_re.sub(u"marked @objc by an access note", adjusted)
adjusted = fixit_re.sub(u"{{none}}", adjusted)
return u"// [expectations adjusted] " + adjusted |
Write an @objc attribute into an access notes file, then return the
string that will replace the attribute and trailing comment. | def move_at_objc_to_access_note(access_notes_file, arg, maybe_bad, offset,
access_note_name):
"""Write an @objc attribute into an access notes file, then return the
string that will replace the attribute and trailing comment."""
is_bad = (maybe_bad == "bad-")
access_notes_file.write(u"""
- Name: '{}'
ObjC: true""".format(access_note_name))
if arg:
access_notes_file.write(u"""
ObjCName: '{}'""".format(arg))
# Default to shifting expected diagnostics down 1 line.
if offset is None:
offset = 1
inserted_attr = u"@objc"
if arg:
inserted_attr += u"(" + arg + u")"
replacement = u"// access-note-adjust" + offsetify(offset) + \
u"{{" + inserted_attr + "}} [attr moved] "
if not is_bad:
replacement += u"expected-remark{{implicitly added '" + inserted_attr + \
u"' to this }} expected-note{{add '" + inserted_attr + \
u"' explicitly to silence this warning}}"
return replacement |
Returns a lambda which calls fn with args, followed by the groups from
the match passed to the lambda. | def replacer(fn, *args):
"""Returns a lambda which calls fn with args, followed by the groups from
the match passed to the lambda."""
return lambda m: fn(*(args + m.groups())) |
Send a request to the server.
This is mostly a regurgitation of CPython's HTTPConnection.putrequest,
but fixed up so we can still send arbitrary bytes in the request line
on py3. See also: https://bugs.python.org/issue36274
To use, swap out a HTTP(S)Connection's putrequest with something like::
conn.putrequest = putrequest.__get__(conn)
:param method: specifies an HTTP request method, e.g. 'GET'.
:param url: specifies the object being requested, e.g. '/index.html'.
:param skip_host: if True does not add automatically a 'Host:' header
:param skip_accept_encoding: if True does not add automatically an
'Accept-Encoding:' header | def putrequest(self, method, url, skip_host=False, skip_accept_encoding=False):
'''Send a request to the server.
This is mostly a regurgitation of CPython's HTTPConnection.putrequest,
but fixed up so we can still send arbitrary bytes in the request line
on py3. See also: https://bugs.python.org/issue36274
To use, swap out a HTTP(S)Connection's putrequest with something like::
conn.putrequest = putrequest.__get__(conn)
:param method: specifies an HTTP request method, e.g. 'GET'.
:param url: specifies the object being requested, e.g. '/index.html'.
:param skip_host: if True does not add automatically a 'Host:' header
:param skip_accept_encoding: if True does not add automatically an
'Accept-Encoding:' header
'''
# (Mostly) inline the HTTPConnection implementation; just fix it
# so we can send non-ascii request lines. For comparison, see
# https://github.com/python/cpython/blob/v2.7.16/Lib/httplib.py#L888-L1003
# and https://github.com/python/cpython/blob/v3.7.2/
# Lib/http/client.py#L1061-L1183
if self._HTTPConnection__response \
and self._HTTPConnection__response.isclosed():
self._HTTPConnection__response = None
if self._HTTPConnection__state == http_client._CS_IDLE:
self._HTTPConnection__state = http_client._CS_REQ_STARTED
else:
raise http_client.CannotSendRequest(self._HTTPConnection__state)
self._method = method
if not url:
url = '/'
self._path = url
request = '%s %s %s' % (method, url, self._http_vsn_str)
if not isinstance(request, bytes):
# This choice of encoding is the whole reason we copy/paste from
# cpython. When making backend requests, it should never be
# necessary; however, we have some functional tests that want
# to send non-ascii bytes.
# TODO: when https://bugs.python.org/issue36274 is resolved, make
# sure we fix up our API to match whatever upstream chooses to do
self._output(request.encode('latin1'))
else:
self._output(request)
if self._http_vsn == 11:
if not skip_host:
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urllib.parse.urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
if host.find(':') >= 0:
host_enc = b'[' + host_enc + b']'
if port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, port))
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity') |
Look for a file first in conf_src_dir, if it exists, otherwise optionally
look in the source tree sample 'etc' dir.
:param conf_src_dir: Directory in which to search first for conf file. May
be None
:param conf_file_name: Name of conf file
:param use_sample: If True and the conf_file_name is not found, then return
any sample conf file found in the source tree sample
'etc' dir by appending '-sample' to conf_file_name
:returns: Path to conf file
:raises InProcessException: If no conf file is found | def _in_process_find_conf_file(conf_src_dir, conf_file_name, use_sample=True):
"""
Look for a file first in conf_src_dir, if it exists, otherwise optionally
look in the source tree sample 'etc' dir.
:param conf_src_dir: Directory in which to search first for conf file. May
be None
:param conf_file_name: Name of conf file
:param use_sample: If True and the conf_file_name is not found, then return
any sample conf file found in the source tree sample
'etc' dir by appending '-sample' to conf_file_name
:returns: Path to conf file
:raises InProcessException: If no conf file is found
"""
dflt_src_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
os.pardir, os.pardir, os.pardir,
'etc'))
conf_src_dir = dflt_src_dir if conf_src_dir is None else conf_src_dir
conf_file_path = os.path.join(conf_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
if use_sample:
# fall back to using the corresponding sample conf file
conf_file_name += '-sample'
conf_file_path = os.path.join(dflt_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
msg = 'Failed to find config file %s' % conf_file_name
raise InProcessException(msg) |
If SWIFT_TEST_POLICY is set:
- look in swift.conf file for specified policy
- move this to be policy-0 but preserving its options
- copy its ring file to test dir, changing its devices to suit
in process testing, and renaming it to suit policy-0
Otherwise, create a default ring file. | def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
"""
If SWIFT_TEST_POLICY is set:
- look in swift.conf file for specified policy
- move this to be policy-0 but preserving its options
- copy its ring file to test dir, changing its devices to suit
in process testing, and renaming it to suit policy-0
Otherwise, create a default ring file.
"""
conf = ConfigParser()
conf.read(swift_conf)
sp_prefix = 'storage-policy:'
try:
# policy index 0 will be created if no policy exists in conf
policies = parse_storage_policies(conf)
except PolicyError as e:
raise InProcessException(e)
# clear all policies from test swift.conf before adding test policy back
for policy in policies:
conf.remove_section(sp_prefix + str(policy.idx))
if policy_specified:
policy_to_test = policies.get_by_name(policy_specified)
if policy_to_test is None:
raise InProcessException('Failed to find policy name "%s"'
% policy_specified)
_info('Using specified policy %s' % policy_to_test.name)
else:
policy_to_test = policies.default
_info('Defaulting to policy %s' % policy_to_test.name)
# make policy_to_test be policy index 0 and default for the test config
sp_zero_section = sp_prefix + '0'
conf.add_section(sp_zero_section)
for (k, v) in policy_to_test.get_info(config=True).items():
conf.set(sp_zero_section, k, str(v))
conf.set(sp_zero_section, 'default', 'True')
with open(swift_conf, 'w') as fp:
conf.write(fp)
# look for a source ring file
ring_file_src = ring_file_test = 'object.ring.gz'
if policy_to_test.idx:
ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
try:
ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
use_sample=False)
except InProcessException:
if policy_specified:
raise InProcessException('Failed to find ring file %s'
% ring_file_src)
ring_file_src = None
ring_file_test = os.path.join(testdir, ring_file_test)
if ring_file_src:
# copy source ring file to a policy-0 test ring file, re-homing servers
_info('Using source ring file %s' % ring_file_src)
ring_data = ring.RingData.load(ring_file_src)
obj_sockets = []
for dev in ring_data.devs:
device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
obj_socket = listen_zero()
obj_sockets.append(obj_socket)
dev['port'] = obj_socket.getsockname()[1]
dev['ip'] = '127.0.0.1'
dev['device'] = device
dev['replication_port'] = dev['port']
dev['replication_ip'] = dev['ip']
ring_data.save(ring_file_test)
else:
# make default test ring, 3 replicas, 4 partitions, 3 devices
# which will work for a replication policy or a 2+1 EC policy
_info('No source object ring file, creating 3rep/4part/3dev ring')
obj_sockets = [listen_zero() for _ in (0, 1, 2)]
replica2part2dev_id = [[0, 1, 2, 0],
[1, 2, 0, 1],
[2, 0, 1, 2]]
devs = [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj_sockets[0].getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj_sockets[1].getsockname()[1]},
{'id': 2, 'zone': 2, 'device': 'sdc1', 'ip': '127.0.0.1',
'port': obj_sockets[2].getsockname()[1]}]
ring_data = ring.RingData(replica2part2dev_id, devs, 30)
with closing(GzipFile(ring_file_test, 'wb')) as f:
pickle.dump(ring_data, f)
for dev in ring_data.devs:
_debug('Ring file dev: %s' % dev)
return obj_sockets |
Load encryption configuration and override proxy-server.conf contents.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid | def _load_encryption(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load encryption configuration and override proxy-server.conf contents.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for encryption')
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
pipeline = conf.get(section, 'pipeline')
pipeline = pipeline.replace(
"proxy-logging proxy-server",
"keymaster encryption proxy-logging proxy-server")
pipeline = pipeline.replace(
"cache listing_formats",
"cache etag-quoter listing_formats")
conf.set(section, 'pipeline', pipeline)
root_secret = base64.b64encode(os.urandom(32))
if not six.PY2:
root_secret = root_secret.decode('ascii')
conf.set('filter:keymaster', 'encryption_root_secret', root_secret)
conf.set('filter:versioned_writes', 'allow_object_versioning', 'true')
conf.set('filter:etag-quoter', 'enable_by_default', 'true')
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file |
Override swift.conf [storage-policy:0] section to use a 2+1 EC policy.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use | def _load_ec_as_default_policy(proxy_conf_file, swift_conf_file, **kwargs):
"""
Override swift.conf [storage-policy:0] section to use a 2+1 EC policy.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
"""
_debug('Setting configuration for default EC policy')
conf = ConfigParser()
conf.read(swift_conf_file)
# remove existing policy sections that came with swift.conf-sample
for section in list(conf.sections()):
if section.startswith('storage-policy'):
conf.remove_section(section)
# add new policy 0 section for an EC policy
conf.add_section('storage-policy:0')
ec_policy_spec = {
'name': 'ec-test',
'policy_type': 'erasure_coding',
'ec_type': 'liberasurecode_rs_vand',
'ec_num_data_fragments': 2,
'ec_num_parity_fragments': 1,
'ec_object_segment_size': 1048576,
'default': True
}
for k, v in ec_policy_spec.items():
conf.set('storage-policy:0', k, str(v))
with open(swift_conf_file, 'w') as fp:
conf.write(fp)
return proxy_conf_file, swift_conf_file |
Load domain_remap and staticweb into proxy server pipeline.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid | def _load_domain_remap_staticweb(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load domain_remap and staticweb into proxy server pipeline.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for domain_remap')
# add a domain_remap storage_domain to the test configuration
storage_domain = 'example.net'
global config
config['storage_domain'] = storage_domain
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
old_pipeline = conf.get(section, 'pipeline')
pipeline = old_pipeline.replace(
" tempauth ",
" tempauth staticweb ")
pipeline = pipeline.replace(
" listing_formats ",
" domain_remap listing_formats ")
if pipeline == old_pipeline:
raise InProcessException(
"Failed to insert domain_remap and staticweb into pipeline: %s"
% old_pipeline)
conf.set(section, 'pipeline', pipeline)
# set storage_domain in domain_remap middleware to match test config
section = 'filter:domain_remap'
conf.set(section, 'storage_domain', storage_domain)
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file |
Load s3api configuration and override proxy-server.conf contents.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid | def _load_s3api(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load s3api configuration and override proxy-server.conf contents.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for s3api')
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
pipeline = conf.get(section, 'pipeline')
pipeline = pipeline.replace(
"tempauth",
"s3api tempauth")
conf.set(section, 'pipeline', pipeline)
conf.set('filter:s3api', 's3_acl', 'true')
conf.set('filter:versioned_writes', 'allow_object_versioning', 'true')
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file |
You can use the kwargs to override:
'retries' (default: 5)
'use_account' (default: 1) - which user's token to pass
'url_account' (default: matches 'use_account') - which user's storage URL
'resource' (default: url[url_account] - URL to connect to; retry()
will interpolate the variable :storage_url: if present
'service_user' - add a service token from this user (1 indexed) | def retry(func, *args, **kwargs):
"""
You can use the kwargs to override:
'retries' (default: 5)
'use_account' (default: 1) - which user's token to pass
'url_account' (default: matches 'use_account') - which user's storage URL
'resource' (default: url[url_account] - URL to connect to; retry()
will interpolate the variable :storage_url: if present
'service_user' - add a service token from this user (1 indexed)
"""
global url, token, service_token, parsed, conn
retries = kwargs.get('retries', 5)
attempts, backoff = 0, 1
# use account #1 by default; turn user's 1-indexed account into 0-indexed
use_account = kwargs.pop('use_account', 1) - 1
service_user = kwargs.pop('service_user', None)
if service_user:
service_user -= 1 # 0-index
# access our own account by default
url_account = kwargs.pop('url_account', use_account + 1) - 1
os_options = {'user_domain_name': swift_test_domain[use_account],
'project_domain_name': swift_test_domain[use_account]}
while attempts <= retries:
auth_failure = False
attempts += 1
try:
if not url[use_account] or not token[use_account]:
url[use_account], token[use_account] = get_url_token(
use_account, os_options)
parsed[use_account] = conn[use_account] = None
if not parsed[use_account] or not conn[use_account]:
parsed[use_account], conn[use_account] = \
connection(url[use_account])
# default resource is the account url[url_account]
resource = kwargs.pop('resource', '%(storage_url)s')
template_vars = {'storage_url': url[url_account]}
parsed_result = urlparse(resource % template_vars)
if isinstance(service_user, int):
if not service_token[service_user]:
dummy, service_token[service_user] = get_url_token(
service_user, os_options)
kwargs['service_token'] = service_token[service_user]
return func(url[url_account], token[use_account],
parsed_result, conn[url_account],
*args, **kwargs)
except (socket.error, HTTPException):
if attempts > retries:
raise
parsed[use_account] = conn[use_account] = None
if service_user:
service_token[service_user] = None
except AuthError:
auth_failure = True
url[use_account] = token[use_account] = None
if service_user:
service_token[service_user] = None
except InternalServerError:
pass
if attempts <= retries:
if not auth_failure:
sleep(backoff)
backoff *= 2
raise Exception('No result after %s retries.' % retries) |
Reset all swift environment to keep clean. As a result by calling this
method, we can assume the backend swift keeps no containers and no
objects on this connection's account. | def tear_down_s3(conn):
"""
Reset all swift environment to keep clean. As a result by calling this
method, we can assume the backend swift keeps no containers and no
objects on this connection's account.
"""
exceptions = []
for i in range(RETRY_COUNT):
try:
resp = conn.list_buckets()
buckets = [bucket['Name'] for bucket in resp.get('Buckets', [])]
for bucket in buckets:
try:
resp = conn.list_multipart_uploads(Bucket=bucket)
for upload in resp.get('Uploads', []):
conn.abort_multipart_upload(
Bucket=bucket,
Key=upload['Key'],
UploadId=upload['UploadId'])
resp = conn.list_objects(Bucket=bucket)
for obj in resp.get('Contents', []):
conn.delete_object(Bucket=bucket, Key=obj['Key'])
try:
conn.delete_bucket(Bucket=bucket)
except ClientError as e:
err_code = e.response.get('Error', {}).get('Code')
if err_code != 'BucketNotEmpty':
raise
# else, listing consistency issue; try again
except ClientError as e:
# 404 means NoSuchBucket, NoSuchKey, or NoSuchUpload
if e.response['ResponseMetadata']['HTTPStatusCode'] != 404:
raise
except Exception:
exceptions.append(''.join(
traceback.format_exception(*sys.exc_info())))
if exceptions:
exceptions.insert(0, 'Too many errors to continue:')
raise Exception('\n========\n'.join(exceptions)) |
Return tester connection behaves as:
user_test_admin = admin .admin | def get_admin_connection():
"""
Return tester connection behaves as:
user_test_admin = admin .admin
"""
aws_access_key = tf.config['s3_access_key']
aws_secret_key = tf.config['s3_secret_key']
user_id = tf.config['s3_access_key']
return Connection(aws_access_key, aws_secret_key, user_id) |
Look for attrs with a truthy attribute __command__ and add them to an
attribute __commands__ on the type that maps names to decorated methods.
The decorated methods' doc strings also get mapped in __docs__.
Also adds a method run(command_name, *args, **kwargs) that will
execute the method mapped to the name in __commands__. | def meta_command(name, bases, attrs):
"""
Look for attrs with a truthy attribute __command__ and add them to an
attribute __commands__ on the type that maps names to decorated methods.
The decorated methods' doc strings also get mapped in __docs__.
Also adds a method run(command_name, *args, **kwargs) that will
execute the method mapped to the name in __commands__.
"""
commands = {}
docs = {}
for attr, value in attrs.items():
if getattr(value, '__command__', False):
commands[attr] = value
# methods always have a __doc__ attribute, sometimes empty
docs[attr] = (getattr(value, '__doc__', None) or
'perform the %s command' % attr).strip()
attrs['__commands__'] = commands
attrs['__docs__'] = docs
def run(self, command, *args, **kwargs):
return self.__commands__[command](self, *args, **kwargs)
attrs.setdefault('run', run)
return type(name, bases, attrs) |
Iterate over ``nodes`` yielding only those not in ``excludes``.
The index key of the node dicts is ignored when matching nodes against the
``excludes`` nodes. Index is not a fundamental property of a node but a
variable annotation added by the Ring depending upon the partition for
which the nodes were generated.
:param nodes: an iterable of node dicts.
:param *excludes: one or more node dicts that should not be yielded.
:return: yields node dicts. | def exclude_nodes(nodes, *excludes):
"""
Iterate over ``nodes`` yielding only those not in ``excludes``.
The index key of the node dicts is ignored when matching nodes against the
``excludes`` nodes. Index is not a fundamental property of a node but a
variable annotation added by the Ring depending upon the partition for
which the nodes were generated.
:param nodes: an iterable of node dicts.
:param *excludes: one or more node dicts that should not be yielded.
:return: yields node dicts.
"""
for node in nodes:
match_node = {k: mock.ANY if k == 'index' else v
for k, v in node.items()}
if any(exclude == match_node for exclude in excludes):
continue
yield node |
Helper to ensure swiftclient sends a chunked request. | def chunker(body):
'''Helper to ensure swiftclient sends a chunked request.'''
yield body |
Recursive collect dirs and files in path_list directory.
:param path_list: start directory for collecting
:return: files_list, dir_list tuple of included
directories and files | def collect_info(path_list):
"""
Recursive collect dirs and files in path_list directory.
:param path_list: start directory for collecting
:return: files_list, dir_list tuple of included
directories and files
"""
files_list = []
dir_list = []
for path in path_list:
temp_files_list = []
temp_dir_list = []
for root, dirs, files in os.walk(path):
files = [f for f in files if not EXCLUDE_FILES.match(f)]
temp_files_list += files
temp_dir_list += dirs
files_list.append(temp_files_list)
dir_list.append(temp_dir_list)
return files_list, dir_list |
Find node with maximum occupancy.
:param dir_list: list of directories for each node.
:return: number number node in list_dir | def find_max_occupancy_node(dir_list):
"""
Find node with maximum occupancy.
:param dir_list: list of directories for each node.
:return: number number node in list_dir
"""
count = 0
number = 0
length = 0
for dirs in dir_list:
if length < len(dirs):
length = len(dirs)
number = count
count += 1
return number |
Read user credentials from an AWS CLI style credentials file and translate
to a swift test config. Currently only supports a single user.
:param conf_file: path to AWS credentials file | def load_aws_config(conf_file):
"""
Read user credentials from an AWS CLI style credentials file and translate
to a swift test config. Currently only supports a single user.
:param conf_file: path to AWS credentials file
"""
conf = readconf(conf_file, 'default')
global _CONFIG
_CONFIG = {
'endpoint': 'https://s3.amazonaws.com',
'region': 'us-east-1',
'access_key1': conf.get('aws_access_key_id'),
'secret_key1': conf.get('aws_secret_access_key'),
'session_token1': conf.get('aws_session_token')
} |
Get a boto3 client to talk to an S3 endpoint.
:param user: user number to use. Should be one of:
1 -- primary user
2 -- secondary user
3 -- unprivileged user
:param signature_version: S3 signing method. Should be one of:
s3 -- v2 signatures; produces Authorization headers like
``AWS access_key:signature``
s3-query -- v2 pre-signed URLs; produces query strings like
``?AWSAccessKeyId=access_key&Signature=signature``
s3v4 -- v4 signatures; produces Authorization headers like
``AWS4-HMAC-SHA256
Credential=access_key/date/region/s3/aws4_request,
Signature=signature``
s3v4-query -- v4 pre-signed URLs; produces query strings like
``?X-Amz-Algorithm=AWS4-HMAC-SHA256&
X-Amz-Credential=access_key/date/region/s3/aws4_request&
X-Amz-Signature=signature``
:param addressing_style: One of:
path -- produces URLs like ``http(s)://host.domain/bucket/key``
virtual -- produces URLs like ``http(s)://bucket.host.domain/key`` | def get_s3_client(user=1, signature_version='s3v4', addressing_style='path'):
'''
Get a boto3 client to talk to an S3 endpoint.
:param user: user number to use. Should be one of:
1 -- primary user
2 -- secondary user
3 -- unprivileged user
:param signature_version: S3 signing method. Should be one of:
s3 -- v2 signatures; produces Authorization headers like
``AWS access_key:signature``
s3-query -- v2 pre-signed URLs; produces query strings like
``?AWSAccessKeyId=access_key&Signature=signature``
s3v4 -- v4 signatures; produces Authorization headers like
``AWS4-HMAC-SHA256
Credential=access_key/date/region/s3/aws4_request,
Signature=signature``
s3v4-query -- v4 pre-signed URLs; produces query strings like
``?X-Amz-Algorithm=AWS4-HMAC-SHA256&
X-Amz-Credential=access_key/date/region/s3/aws4_request&
X-Amz-Signature=signature``
:param addressing_style: One of:
path -- produces URLs like ``http(s)://host.domain/bucket/key``
virtual -- produces URLs like ``http(s)://bucket.host.domain/key``
'''
endpoint = get_opt('endpoint', None)
if endpoint:
scheme = urllib.parse.urlsplit(endpoint).scheme
if scheme not in ('http', 'https'):
raise ConfigError('unexpected scheme in endpoint: %r; '
'expected http or https' % scheme)
else:
scheme = None
region = get_opt('region', 'us-east-1')
access_key = get_opt_or_error('access_key%d' % user)
secret_key = get_opt_or_error('secret_key%d' % user)
session_token = get_opt('session_token%d' % user)
ca_cert = get_opt('ca_cert')
if ca_cert is not None:
try:
# do a quick check now; it's more expensive to have boto check
os.stat(ca_cert)
except OSError as e:
raise ConfigError(str(e))
return boto3.client(
's3',
endpoint_url=endpoint,
region_name=region,
use_ssl=(scheme == 'https'),
verify=ca_cert,
config=boto3.session.Config(s3={
'signature_version': signature_version,
'addressing_style': addressing_style,
}),
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token
) |
Setup proxy, account, container and object servers using a set of fake
rings and policies.
:param the_object_server: The object server module to use (optional,
defaults to swift.obj.server)
:param extra_conf: A dict of config options that will update the basic
config passed to all server instances.
:returns: A dict containing the following entries:
orig_POLICIES: the value of storage_policy.POLICIES prior to
it being patched with fake policies
orig_SysLogHandler: the value of utils.logs.SysLogHandler
prior to it being patched
testdir: root directory used for test files
test_POLICIES: a StoragePolicyCollection of fake policies
test_servers: a tuple of test server instances
test_sockets: a tuple of sockets used by test servers
test_coros: a tuple of greenthreads in which test servers are
running | def setup_servers(the_object_server=object_server, extra_conf=None):
"""
Setup proxy, account, container and object servers using a set of fake
rings and policies.
:param the_object_server: The object server module to use (optional,
defaults to swift.obj.server)
:param extra_conf: A dict of config options that will update the basic
config passed to all server instances.
:returns: A dict containing the following entries:
orig_POLICIES: the value of storage_policy.POLICIES prior to
it being patched with fake policies
orig_SysLogHandler: the value of utils.logs.SysLogHandler
prior to it being patched
testdir: root directory used for test files
test_POLICIES: a StoragePolicyCollection of fake policies
test_servers: a tuple of test server instances
test_sockets: a tuple of sockets used by test servers
test_coros: a tuple of greenthreads in which test servers are
running
"""
context = {
"orig_POLICIES": storage_policy._POLICIES,
"orig_SysLogHandler": utils.logs.SysLogHandler}
utils.HASH_PATH_SUFFIX = b'endcap'
utils.logs.SysLogHandler = mock.MagicMock()
# Since we're starting up a lot here, we're going to test more than
# just chunked puts; we're also going to test parts of
# proxy_server.Application we couldn't get to easily otherwise.
context["testdir"] = _testdir = \
os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked')
mkdirs(_testdir)
rmtree(_testdir)
for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
'sdf1', 'sdg1', 'sdh1', 'sdi1', 'sdj1',
'sdk1', 'sdl1'):
mkdirs(os.path.join(_testdir, drive, 'tmp'))
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers':
'content-encoding, x-object-manifest, content-disposition, foo',
'allow_versions': 't', 'node_timeout': 20}
if extra_conf:
conf.update(extra_conf)
context['conf'] = conf
prolis = listen_zero()
acc1lis = listen_zero()
acc2lis = listen_zero()
con1lis = listen_zero()
con2lis = listen_zero()
obj1lis = listen_zero()
obj2lis = listen_zero()
obj3lis = listen_zero()
obj4lis = listen_zero()
obj5lis = listen_zero()
obj6lis = listen_zero()
objsocks = [obj1lis, obj2lis, obj3lis, obj4lis, obj5lis, obj6lis]
context["test_sockets"] = \
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis,
obj4lis, obj5lis, obj6lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
account_devs = [
{'port': acc1lis.getsockname()[1]},
{'port': acc2lis.getsockname()[1]},
]
write_fake_ring(account_ring_path, *account_devs)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
container_devs = [
{'port': con1lis.getsockname()[1]},
{'port': con2lis.getsockname()[1]},
]
write_fake_ring(container_ring_path, *container_devs)
storage_policy._POLICIES = storage_policy.StoragePolicyCollection([
StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False),
ECStoragePolicy(3, 'ec', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, ec_segment_size=4096),
ECStoragePolicy(4, 'ec-dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, ec_segment_size=4096,
ec_duplication_factor=2)])
obj_rings = {
0: ('sda1', 'sdb1'),
1: ('sdc1', 'sdd1'),
2: ('sde1', 'sdf1'),
# sdg1, sdh1, sdi1 taken by policy 3 (see below)
}
for policy_index, devices in obj_rings.items():
policy = storage_policy.POLICIES[policy_index]
obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz')
obj_devs = [
{'port': objsock.getsockname()[1], 'device': dev}
for objsock, dev in zip(objsocks, devices)]
write_fake_ring(obj_ring_path, *obj_devs)
# write_fake_ring can't handle a 3-element ring, and the EC policy needs
# at least 6 devs to work with (ec_k=2, ec_m=1, duplication_factor=2),
# so we do it manually
devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]},
{'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1',
'port': obj3lis.getsockname()[1]},
{'id': 3, 'zone': 0, 'device': 'sdj1', 'ip': '127.0.0.1',
'port': obj4lis.getsockname()[1]},
{'id': 4, 'zone': 0, 'device': 'sdk1', 'ip': '127.0.0.1',
'port': obj5lis.getsockname()[1]},
{'id': 5, 'zone': 0, 'device': 'sdl1', 'ip': '127.0.0.1',
'port': obj6lis.getsockname()[1]}]
pol3_replica2part2dev_id = [[0, 1, 2, 0],
[1, 2, 0, 1],
[2, 0, 1, 2]]
pol4_replica2part2dev_id = [[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 0],
[4, 5, 0, 1],
[5, 0, 1, 2]]
obj3_ring_path = os.path.join(
_testdir, storage_policy.POLICIES[3].ring_name + '.ring.gz')
part_shift = 30
with closing(GzipFile(obj3_ring_path, 'wb')) as fh:
pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh)
obj4_ring_path = os.path.join(
_testdir, storage_policy.POLICIES[4].ring_name + '.ring.gz')
part_shift = 30
with closing(GzipFile(obj4_ring_path, 'wb')) as fh:
pickle.dump(RingData(pol4_replica2part2dev_id, devs, part_shift), fh)
prosrv = proxy_server.Application(conf, logger=debug_logger('proxy'))
for policy in storage_policy.POLICIES:
# make sure all the rings are loaded
prosrv.get_object_ring(policy.idx)
# don't lose this one!
context["test_POLICIES"] = storage_policy._POLICIES
acc1srv = account_server.AccountController(
conf, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
conf, logger=debug_logger('acct2'))
con1srv = container_server.ContainerController(
conf, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
conf, logger=debug_logger('cont2'))
obj1srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj2'))
obj3srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj3'))
obj4srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj4'))
obj5srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj5'))
obj6srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj6'))
context["test_servers"] = \
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv,
obj4srv, obj5srv, obj6srv)
nl = NullLogger()
logging_prosv = proxy_logging.ProxyLoggingMiddleware(
listing_formats.ListingFilter(prosrv, {}, logger=prosrv.logger),
conf, logger=prosrv.logger)
# Yes, eventlet, we know -- we have to support bad clients, though
warnings.filterwarnings(
'ignore', module='eventlet',
message='capitalize_response_headers is disabled')
prospa = spawn(wsgi.server, prolis, logging_prosv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
con1spa = spawn(wsgi.server, con1lis, con1srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
con2spa = spawn(wsgi.server, con2lis, con2srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj4spa = spawn(wsgi.server, obj4lis, obj4srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj5spa = spawn(wsgi.server, obj5lis, obj5srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj6spa = spawn(wsgi.server, obj6lis, obj6srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
context["test_coros"] = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa,
obj4spa, obj5spa, obj6spa)
# Create account
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT', '/a',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert resp.status == 201
# Create another account
# used for account-to-account tests
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a1')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT',
'/a1',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert resp.status == 201
# Create containers, 1 per test policy
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
# Create container in other account
# used for account-to-account tests
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(
b'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected %r, encountered %r" % (exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(
b'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
return context |
Pretty much just a two node, two replica, 2 part power ring... | def write_fake_ring(path, *devs):
"""
Pretty much just a two node, two replica, 2 part power ring...
"""
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6200}
dev2 = {'id': 1, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6200}
dev1_updates, dev2_updates = devs or ({}, {})
dev1.update(dev1_updates)
dev2.update(dev2_updates)
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
devs = [dev1, dev2]
part_shift = 30
with closing(GzipFile(path, 'wb')) as f:
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f) |
Pretty much just a three node, three replica, 8 part power builder...
:param tmpdir: a place to write the builder, be sure to clean it up!
:param region: an integer, fills in region and ip
:param name: the name of the builder (i.e. <name>.builder) | def write_stub_builder(tmpdir, region=1, name=''):
"""
Pretty much just a three node, three replica, 8 part power builder...
:param tmpdir: a place to write the builder, be sure to clean it up!
:param region: an integer, fills in region and ip
:param name: the name of the builder (i.e. <name>.builder)
"""
name = name or str(region)
replicas = 3
builder = RingBuilder(8, replicas, 1)
for i in range(replicas):
dev = {'weight': 100,
'region': '%d' % region,
'zone': '1',
'ip': '10.0.0.%d' % region,
'port': '3600',
'device': 'sdb%d' % i}
builder.add_dev(dev)
builder.rebalance()
builder_file = os.path.join(tmpdir, '%s.builder' % name)
builder.save(builder_file)
return builder, builder_file |
Decorator to give a single test a tempdir as argument to test method. | def with_tempdir(f):
"""
Decorator to give a single test a tempdir as argument to test method.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
tempdir = mkdtemp()
args = list(args)
args.append(tempdir)
try:
return f(*args, **kwargs)
finally:
rmtree(tempdir)
return wrapped |
All device/drive/mount checking should be done through the constraints
module. If we keep the mocking consistently within that module, we can
keep our tests robust to further rework on that interface.
Replace the constraint modules underlying os calls with mocks.
:param isdir: return value of constraints isdir calls, default False
:param ismount: return value of constraints ismount calls, default False
:returns: a dict of constraint module mocks | def mock_check_drive(isdir=False, ismount=False):
"""
All device/drive/mount checking should be done through the constraints
module. If we keep the mocking consistently within that module, we can
keep our tests robust to further rework on that interface.
Replace the constraint modules underlying os calls with mocks.
:param isdir: return value of constraints isdir calls, default False
:param ismount: return value of constraints ismount calls, default False
:returns: a dict of constraint module mocks
"""
mock_base = 'swift.common.constraints.'
with mocklib.patch(mock_base + 'isdir') as mock_isdir, \
mocklib.patch(mock_base + 'utils.ismount') as mock_ismount:
mock_isdir.return_value = isdir
mock_ismount.return_value = ismount
yield {
'isdir': mock_isdir,
'ismount': mock_ismount,
} |
Given a stub body produce a list of complete frag_archive bodies as
strings in frag_index order.
:param policy: a StoragePolicy instance, with policy_type EC_POLICY
:param body: a string, the body to encode into frag archives
:returns: list of strings, the complete frag_archive bodies for the given
plaintext | def encode_frag_archive_bodies(policy, body):
"""
Given a stub body produce a list of complete frag_archive bodies as
strings in frag_index order.
:param policy: a StoragePolicy instance, with policy_type EC_POLICY
:param body: a string, the body to encode into frag archives
:returns: list of strings, the complete frag_archive bodies for the given
plaintext
"""
segment_size = policy.ec_segment_size
# split up the body into buffers
chunks = [body[x:x + segment_size]
for x in range(0, len(body), segment_size)]
# encode the buffers into fragment payloads
fragment_payloads = []
for chunk in chunks:
fragments = policy.pyeclib_driver.encode(chunk) \
* policy.ec_duplication_factor
if not fragments:
break
fragment_payloads.append(fragments)
# join up the fragment payloads per node
ec_archive_bodies = [b''.join(frags)
for frags in zip(*fragment_payloads)]
return ec_archive_bodies |
Given a list of entries for each node in ring order, where the entries
are a dict (or list of dicts) which describes the fragment (or
fragments) that are on the node; create a function suitable for use
with capture_http_requests that will accept a req object and return a
response that will suitably fake the behavior of an object server who
had the given fragments on disk at the time.
:param node_frags: a list. Each item in the list describes the
fragments that are on a node; each item is a dict or list of dicts,
each dict describing a single fragment; where the item is a list,
repeated calls to get_response will return fragments in the order
of the list; each dict has keys:
- obj: an object stub, as generated by _make_ec_object_stub,
that defines all of the fragments that compose an object
at a specific timestamp.
- frag: the index of a fragment to be selected from the object
stub
- durable (optional): True if the selected fragment is durable
:param policy: storage policy to return | def fake_ec_node_response(node_frags, policy):
"""
Given a list of entries for each node in ring order, where the entries
are a dict (or list of dicts) which describes the fragment (or
fragments) that are on the node; create a function suitable for use
with capture_http_requests that will accept a req object and return a
response that will suitably fake the behavior of an object server who
had the given fragments on disk at the time.
:param node_frags: a list. Each item in the list describes the
fragments that are on a node; each item is a dict or list of dicts,
each dict describing a single fragment; where the item is a list,
repeated calls to get_response will return fragments in the order
of the list; each dict has keys:
- obj: an object stub, as generated by _make_ec_object_stub,
that defines all of the fragments that compose an object
at a specific timestamp.
- frag: the index of a fragment to be selected from the object
stub
- durable (optional): True if the selected fragment is durable
:param policy: storage policy to return
"""
node_map = {} # maps node ip and port to node index
all_nodes = []
call_count = {} # maps node index to get_response call count for node
def _build_node_map(req, policy):
part = utils.split_path(req['path'], 5, 5, True)[1]
all_nodes.extend(policy.object_ring.get_part_nodes(part))
all_nodes.extend(policy.object_ring.get_more_nodes(part))
for i, node in enumerate(all_nodes):
node_map[(node['ip'], node['port'])] = i
call_count[i] = 0
# normalize node_frags to a list of fragments for each node even
# if there's only one fragment in the dataset provided.
for i, frags in enumerate(node_frags):
if isinstance(frags, dict):
node_frags[i] = [frags]
def get_response(req):
requested_policy = int(
req['headers']['X-Backend-Storage-Policy-Index'])
if int(policy) != requested_policy:
AssertionError(
"Requested polciy doesn't fit the fake response policy")
if not node_map:
_build_node_map(req, policy)
try:
node_index = node_map[(req['ip'], req['port'])]
except KeyError:
raise Exception("Couldn't find node %s:%s in %r" % (
req['ip'], req['port'], all_nodes))
try:
frags = node_frags[node_index]
except IndexError:
raise Exception('Found node %r:%r at index %s - '
'but only got %s stub response nodes' % (
req['ip'], req['port'], node_index,
len(node_frags)))
if not frags:
return StubResponse(404)
# determine response fragment (if any) for this call
resp_frag = frags[call_count[node_index]]
call_count[node_index] += 1
frag_prefs = req['headers'].get('X-Backend-Fragment-Preferences')
if not (frag_prefs or resp_frag.get('durable', True)):
return StubResponse(404)
# prepare durable timestamp and backend frags header for this node
obj_stub = resp_frag['obj']
ts2frags = defaultdict(list)
durable_timestamp = None
for frag in frags:
ts_frag = frag['obj']['timestamp']
if frag.get('durable', True):
durable_timestamp = ts_frag.internal
ts2frags[ts_frag].append(frag['frag'])
try:
body = obj_stub['frags'][resp_frag['frag']]
except IndexError as err:
raise Exception(
'Frag index %s not defined: node index %s, frags %r\n%s' %
(resp_frag['frag'], node_index, [f['frag'] for f in frags],
err))
headers = {
'X-Object-Sysmeta-Ec-Content-Length': len(obj_stub['body']),
'X-Object-Sysmeta-Ec-Etag': obj_stub['etag'],
'X-Object-Sysmeta-Ec-Frag-Index':
policy.get_backend_index(resp_frag['frag']),
'X-Backend-Timestamp': obj_stub['timestamp'].internal,
'X-Timestamp': obj_stub['timestamp'].normal,
'X-Backend-Data-Timestamp': obj_stub['timestamp'].internal,
'X-Backend-Fragments':
server._make_backend_fragments_header(ts2frags)
}
if durable_timestamp:
headers['X-Backend-Durable-Timestamp'] = durable_timestamp
return StubResponse(200, body, headers)
return get_response |
This check simply sets more than 4k of metadata on a tempfile and
returns True if it worked and False if not.
We want to use *more* than 4k of metadata in this check because
some filesystems (eg ext4) only allow one blocksize worth of
metadata. The XFS filesystem doesn't have this limit, and so this
check returns True when TMPDIR is XFS. This check will return
False under ext4 (which supports xattrs <= 4k) and tmpfs (which
doesn't support xattrs at all). | def xattr_supported_check():
"""
This check simply sets more than 4k of metadata on a tempfile and
returns True if it worked and False if not.
We want to use *more* than 4k of metadata in this check because
some filesystems (eg ext4) only allow one blocksize worth of
metadata. The XFS filesystem doesn't have this limit, and so this
check returns True when TMPDIR is XFS. This check will return
False under ext4 (which supports xattrs <= 4k) and tmpfs (which
doesn't support xattrs at all).
"""
global supports_xattr_cached_val
if supports_xattr_cached_val is not None:
return supports_xattr_cached_val
# assume the worst -- xattrs aren't supported
supports_xattr_cached_val = False
big_val = b'x' * (4096 + 1) # more than 4k of metadata
try:
fd, tmppath = mkstemp()
xattr.setxattr(fd, 'user.swift.testing_key', big_val)
except IOError as e:
if errno.errorcode.get(e.errno) in ('ENOSPC', 'ENOTSUP', 'EOPNOTSUPP',
'ERANGE'):
# filesystem does not support xattr of this size
return False
raise
else:
supports_xattr_cached_val = True
return True
finally:
# clean up the tmpfile
os.close(fd)
os.unlink(tmppath) |
The AccountBroker initialze() function before we added the
policy stat table. Used by test_policy_table_creation() to
make sure that the AccountBroker will correctly add the table
for cases where the DB existed before the policy support was added.
:param conn: DB connection object
:param put_timestamp: put timestamp | def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):
"""
The AccountBroker initialze() function before we added the
policy stat table. Used by test_policy_table_creation() to
make sure that the AccountBroker will correctly add the table
for cases where the DB existed before the policy support was added.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
self.create_container_table(conn)
self.create_account_stat_table(conn, put_timestamp) |
Copied from AccountBroker before the metadata column was
added; used for testing with TestAccountBrokerBeforeMetadata.
Create account_stat table which is specific to the account DB.
:param conn: DB connection object
:param put_timestamp: put timestamp | def premetadata_create_account_stat_table(self, conn, put_timestamp):
"""
Copied from AccountBroker before the metadata column was
added; used for testing with TestAccountBrokerBeforeMetadata.
Create account_stat table which is specific to the account DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
conn.executescript('''
CREATE TABLE account_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
container_count INTEGER,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO account_stat (container_count) VALUES (0);
''')
conn.execute('''
UPDATE account_stat SET account = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, Timestamp.now().internal, str(uuid4()),
put_timestamp)) |
Copied from AccountBroker before the sstoage_policy_index column was
added; used for testing with TestAccountBrokerBeforeSPI.
Create container table which is specific to the account DB.
:param conn: DB connection object | def prespi_create_container_table(self, conn):
"""
Copied from AccountBroker before the sstoage_policy_index column was
added; used for testing with TestAccountBrokerBeforeSPI.
Create container table which is specific to the account DB.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE container (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
put_timestamp TEXT,
delete_timestamp TEXT,
object_count INTEGER,
bytes_used INTEGER,
deleted INTEGER DEFAULT 0
);
CREATE INDEX ix_container_deleted_name ON
container (deleted, name);
CREATE TRIGGER container_insert AFTER INSERT ON container
BEGIN
UPDATE account_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used,
hash = chexor(hash, new.name,
new.put_timestamp || '-' ||
new.delete_timestamp || '-' ||
new.object_count || '-' || new.bytes_used);
END;
CREATE TRIGGER container_update BEFORE UPDATE ON container
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER container_delete AFTER DELETE ON container
BEGIN
UPDATE account_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used,
hash = chexor(hash, old.name,
old.put_timestamp || '-' ||
old.delete_timestamp || '-' ||
old.object_count || '-' || old.bytes_used);
END;
""") |
Copied from AccountBroker before the container_count column was
added.
Create policy_stat table which is specific to the account DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object | def pre_track_containers_create_policy_stat(self, conn):
"""
Copied from AccountBroker before the container_count column was
added.
Create policy_stat table which is specific to the account DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE policy_stat (
storage_policy_index INTEGER PRIMARY KEY,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0
);
INSERT OR IGNORE INTO policy_stat (
storage_policy_index, object_count, bytes_used
)
SELECT 0, object_count, bytes_used
FROM account_stat
WHERE container_count > 0;
""") |
Copied from AccountBroker before the container_count column was
added (using old stat trigger script)
Create container table which is specific to the account DB.
:param conn: DB connection object | def pre_track_containers_create_container_table(self, conn):
"""
Copied from AccountBroker before the container_count column was
added (using old stat trigger script)
Create container table which is specific to the account DB.
:param conn: DB connection object
"""
# revert to old trigger script to support one of the tests
OLD_POLICY_STAT_TRIGGER_SCRIPT = """
CREATE TRIGGER container_insert_ps AFTER INSERT ON container
BEGIN
INSERT OR IGNORE INTO policy_stat
(storage_policy_index, object_count, bytes_used)
VALUES (new.storage_policy_index, 0, 0);
UPDATE policy_stat
SET object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used
WHERE storage_policy_index = new.storage_policy_index;
END;
CREATE TRIGGER container_delete_ps AFTER DELETE ON container
BEGIN
UPDATE policy_stat
SET object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used
WHERE storage_policy_index = old.storage_policy_index;
END;
"""
conn.executescript("""
CREATE TABLE container (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
put_timestamp TEXT,
delete_timestamp TEXT,
object_count INTEGER,
bytes_used INTEGER,
deleted INTEGER DEFAULT 0,
storage_policy_index INTEGER DEFAULT 0
);
CREATE INDEX ix_container_deleted_name ON
container (deleted, name);
CREATE TRIGGER container_insert AFTER INSERT ON container
BEGIN
UPDATE account_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used,
hash = chexor(hash, new.name,
new.put_timestamp || '-' ||
new.delete_timestamp || '-' ||
new.object_count || '-' || new.bytes_used);
END;
CREATE TRIGGER container_update BEFORE UPDATE ON container
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER container_delete AFTER DELETE ON container
BEGIN
UPDATE account_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used,
hash = chexor(hash, old.name,
old.put_timestamp || '-' ||
old.delete_timestamp || '-' ||
old.object_count || '-' || old.bytes_used);
END;
""" + OLD_POLICY_STAT_TRIGGER_SCRIPT) |
clean up my monkey patching | def teardown_module():
"clean up my monkey patching"
reload_module(db_replicator) |
read everything out of file from the top and clear it out
| def pop_stream(f):
"""read everything out of file from the top and clear it out
"""
f.flush()
f.seek(0)
output = f.read()
f.seek(0)
f.truncate()
return output |
Wrapper to tempfile.NamedTemporaryFile() disabling bufferring.
The wrapper is used to support Python 2 and Python 3 in the same
code base. | def NamedTemporaryFile():
'''Wrapper to tempfile.NamedTemporaryFile() disabling bufferring.
The wrapper is used to support Python 2 and Python 3 in the same
code base.
'''
if six.PY3:
return tempfile.NamedTemporaryFile(buffering=0)
else:
return tempfile.NamedTemporaryFile(bufsize=0) |
Close a file descriptor, ignoring any exceptions | def safe_close(fd):
'''Close a file descriptor, ignoring any exceptions'''
try:
os.close(fd)
except Exception:
LOGGER.exception('Error while closing FD') |
Context-manager providing 2 ends of a pipe, closing them at exit | def pipe():
'''Context-manager providing 2 ends of a pipe, closing them at exit'''
fds = os.pipe()
try:
yield fds
finally:
safe_close(fds[0])
safe_close(fds[1]) |
Convenience function reduces unit test churn | def get_account_for_tenant(test_auth, tenant_id):
"""Convenience function reduces unit test churn"""
return '%s%s' % (test_auth.reseller_prefixes[0], tenant_id) |
Decorator to change the timezone when running a test.
This uses the Eastern Time Zone definition from the time module's docs.
Note that the timezone affects things like time.time() and time.mktime(). | def local_tz(func):
'''
Decorator to change the timezone when running a test.
This uses the Eastern Time Zone definition from the time module's docs.
Note that the timezone affects things like time.time() and time.mktime().
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
tz = os.environ.get('TZ', '')
try:
os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'
time.tzset()
return func(*args, **kwargs)
finally:
os.environ['TZ'] = tz
time.tzset()
return wrapper |
Decorator to change the timezone when running a test.
This uses the Eastern Time Zone definition from the time module's docs.
Note that the timezone affects things like time.time() and time.mktime(). | def local_tz(func):
'''
Decorator to change the timezone when running a test.
This uses the Eastern Time Zone definition from the time module's docs.
Note that the timezone affects things like time.time() and time.mktime().
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
tz = os.environ.get('TZ', '')
try:
os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'
time.tzset()
return func(*args, **kwargs)
finally:
os.environ['TZ'] = tz
time.tzset()
return wrapper |
Add options from kwargs into args dict. | def mock_options_set_defaults_side_effect(*args, **kwargs):
'''
Add options from kwargs into args dict.
'''
args[0].update(kwargs) |
Create a json from bucket list
:param buckets: a list of tuples (or lists) consist of elements orderd as
name, count, bytes | def create_bucket_list_json(buckets):
"""
Create a json from bucket list
:param buckets: a list of tuples (or lists) consist of elements orderd as
name, count, bytes
"""
bucket_list = [{'name': item[0], 'count': item[1], 'bytes': item[2]}
for item in buckets]
return json.dumps(bucket_list) |
Returns a dictionary mapping the given device key to (number of
partitions assigned to that key). | def _partition_counts(builder, key='id'):
"""
Returns a dictionary mapping the given device key to (number of
partitions assigned to that key).
"""
return Counter(builder.devs[dev_id][key]
for part2dev_id in builder._replica2part2dev
for dev_id in part2dev_id) |
Copied from ContainerBroker before the metadata column was
added; used for testing with TestContainerBrokerBeforeMetadata.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp | def premetadata_create_container_info_table(self, conn, put_timestamp,
_spi=None):
"""
Copied from ContainerBroker before the metadata column was
added; used for testing with TestContainerBrokerBeforeMetadata.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
conn.executescript('''
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
''')
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, Timestamp.now().internal,
str(uuid4()), put_timestamp)) |
Copied from ContainerBroker before the
x_container_sync_point[12] columns were added; used for testing with
TestContainerBrokerBeforeXSync.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp | def prexsync_create_container_info_table(self, conn, put_timestamp,
_spi=None):
"""
Copied from ContainerBroker before the
x_container_sync_point[12] columns were added; used for testing with
TestContainerBrokerBeforeXSync.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, Timestamp.now().internal,
str(uuid4()), put_timestamp)) |
Copied from ContainerBroker before the
storage_policy_index column was added; used for testing with
TestContainerBrokerBeforeSPI.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp | def prespi_create_container_info_table(self, conn, put_timestamp,
_spi=None):
"""
Copied from ContainerBroker before the
storage_policy_index column was added; used for testing with
TestContainerBrokerBeforeSPI.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT '',
x_container_sync_point1 INTEGER DEFAULT -1,
x_container_sync_point2 INTEGER DEFAULT -1
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, Timestamp.now().internal,
str(uuid4()), put_timestamp)) |
Copied from ContainerBroker before the
reported column was added; used for testing with
TestContainerBrokerBeforeShardRangeReportedColumn.
Create a shard_range table with no 'reported' column.
:param conn: DB connection object | def pre_reported_create_shard_range_table(self, conn):
"""
Copied from ContainerBroker before the
reported column was added; used for testing with
TestContainerBrokerBeforeShardRangeReportedColumn.
Create a shard_range table with no 'reported' column.
:param conn: DB connection object
"""
conn.execute("""
CREATE TABLE shard_range (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
timestamp TEXT,
lower TEXT,
upper TEXT,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
meta_timestamp TEXT,
deleted INTEGER DEFAULT 0,
state INTEGER,
state_timestamp TEXT,
epoch TEXT
);
""")
conn.execute("""
CREATE TRIGGER shard_range_update BEFORE UPDATE ON shard_range
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
""") |
Copied from ContainerBroker before the
tombstones column was added; used for testing with
TestContainerBrokerBeforeShardRangeTombstonesColumn.
Create a shard_range table with no 'tombstones' column.
:param conn: DB connection object | def pre_tombstones_create_shard_range_table(self, conn):
"""
Copied from ContainerBroker before the
tombstones column was added; used for testing with
TestContainerBrokerBeforeShardRangeTombstonesColumn.
Create a shard_range table with no 'tombstones' column.
:param conn: DB connection object
"""
# Use execute (not executescript) so we get the benefits of our
# GreenDBConnection. Creating a table requires a whole-DB lock;
# *any* in-progress cursor will otherwise trip a "database is locked"
# error.
conn.execute("""
CREATE TABLE shard_range (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
timestamp TEXT,
lower TEXT,
upper TEXT,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
meta_timestamp TEXT,
deleted INTEGER DEFAULT 0,
state INTEGER,
state_timestamp TEXT,
epoch TEXT,
reported INTEGER DEFAULT 0
);
""")
conn.execute("""
CREATE TRIGGER shard_range_update BEFORE UPDATE ON shard_range
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
""") |
Spawn and capture the result so we can later wait on it. This means we can
test code executing in a greenthread but still wait() on the result to
ensure that the method has completed. | def fake_spawn():
"""
Spawn and capture the result so we can later wait on it. This means we can
test code executing in a greenthread but still wait() on the result to
ensure that the method has completed.
"""
greenlets = []
def _inner_fake_spawn(func, *a, **kw):
gt = greenthread.spawn(func, *a, **kw)
greenlets.append(gt)
return gt
object_server.spawn = _inner_fake_spawn
with mock.patch('swift.obj.server.spawn', _inner_fake_spawn):
try:
yield
finally:
for gt in greenlets:
gt.wait() |
This will unset a TestCase level patch_policies to use the module level
policies setup for the _test_servers instead.
N.B. You should NEVER modify the _test_server policies or rings during a
test because they persist for the life of the entire module! | def unpatch_policies(f):
"""
This will unset a TestCase level patch_policies to use the module level
policies setup for the _test_servers instead.
N.B. You should NEVER modify the _test_server policies or rings during a
test because they persist for the life of the entire module!
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
with patch_policies(_test_POLICIES):
return f(*args, **kwargs)
return wrapper |
Return the given string of header names sorted.
headerName: a comma-delimited list of header names | def sortHeaderNames(headerNames):
"""
Return the given string of header names sorted.
headerName: a comma-delimited list of header names
"""
headers = [a.strip() for a in headerNames.split(',') if a.strip()]
headers.sort()
return ', '.join(headers) |
This will limit constraints.MAX_FILE_SIZE for the duration of the
wrapped function, based on whether MAX_FILE_SIZE exceeds the
sys.maxsize limit on the system running the tests.
This allows successful testing on 32 bit systems. | def _limit_max_file_size(f):
"""
This will limit constraints.MAX_FILE_SIZE for the duration of the
wrapped function, based on whether MAX_FILE_SIZE exceeds the
sys.maxsize limit on the system running the tests.
This allows successful testing on 32 bit systems.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_max_file_size = constraints.MAX_FILE_SIZE
if constraints.MAX_FILE_SIZE >= sys.maxsize:
test_max_file_size = (2 ** 30 + 2)
with mock.patch.object(constraints, 'MAX_FILE_SIZE',
test_max_file_size):
return f(*args, **kwargs)
return wrapper |
Returns a http_connect function that delegates to
entity-specific http_connect methods based on request path. | def get_http_connect(account_func, container_func, object_func):
'''Returns a http_connect function that delegates to
entity-specific http_connect methods based on request path.
'''
def http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
a, c, o = split_path(path, 1, 3, True)
if o:
func = object_func
elif c:
func = container_func
else:
func = account_func
resp = func(ipaddr, port, device, partition, method, path,
headers=headers, query_string=query_string)
return resp
return http_connect |
Register function prototypes with a sourcekitd library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library. | def register_functions(lib, ignore_errors):
"""Register function prototypes with a sourcekitd library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library.
"""
def register(item):
return register_function(lib, item, ignore_errors)
map(register, functionList) |
Return a list containing the start index of each line in s.
The list also contains a sentinel index for the end of the string,
so there will be one more element in the list than there are lines
in the string | def get_line_starts(s):
"""Return a list containing the start index of each line in s.
The list also contains a sentinel index for the end of the string,
so there will be one more element in the list than there are lines
in the string
"""
starts = [0]
for line in s.split('\n'):
starts.append(starts[-1] + len(line) + 1)
starts[-1] -= 1
return starts |
If s ends with a newline, drop it; else return s intact | def strip_trailing_nl(s):
"""If s ends with a newline, drop it; else return s intact"""
return s[:-1] if s.endswith('\n') else s |
Split s into a list of lines, each of which has a trailing newline
If the lines are later concatenated, the result is s, possibly
with a single appended newline. | def split_lines(s):
"""Split s into a list of lines, each of which has a trailing newline
If the lines are later concatenated, the result is s, possibly
with a single appended newline.
"""
return [line + '\n' for line in s.split('\n')] |
Translate a tokenize (line, column) pair into an absolute
position in source text given the position where we started
tokenizing and a list that maps lines onto their starting
character indexes. | def token_pos_to_index(token_pos, start, line_starts):
"""Translate a tokenize (line, column) pair into an absolute
position in source text given the position where we started
tokenizing and a list that maps lines onto their starting
character indexes.
"""
relative_token_line_plus1, token_col = token_pos
# line number where we started tokenizing
start_line_num = bisect(line_starts, start) - 1
# line number of the token in the whole text
abs_token_line = relative_token_line_plus1 - 1 + start_line_num
# if found in the first line, adjust the end column to account
# for the extra text
if relative_token_line_plus1 == 1:
token_col += start - line_starts[start_line_num]
# Sometimes tokenizer errors report a line beyond the last one
if abs_token_line >= len(line_starts):
return line_starts[-1]
return line_starts[abs_token_line] + token_col |
Apply Python's tokenize to source_text starting at index start
while matching open and close curly braces. When an unmatched
close curly brace is found, return its index. If not found,
return len(source_text). If there's a tokenization error, return
the position of the error. | def tokenize_python_to_unmatched_close_curly(source_text, start, line_starts):
"""Apply Python's tokenize to source_text starting at index start
while matching open and close curly braces. When an unmatched
close curly brace is found, return its index. If not found,
return len(source_text). If there's a tokenization error, return
the position of the error.
"""
stream = StringIO(source_text)
stream.seek(start)
nesting = 0
try:
for kind, text, token_start, token_end, line_text \
in tokenize.generate_tokens(stream.readline):
if text == '{':
nesting += 1
elif text == '}':
nesting -= 1
if nesting < 0:
return token_pos_to_index(token_start, start, line_starts)
except tokenize.TokenError as error:
(message, error_pos) = error.args
return token_pos_to_index(error_pos, start, line_starts)
return len(source_text) |
Given the text of a template, returns an iterator over
(tokenType, token, match) tuples.
**Note**: this is template syntax tokenization, not Python
tokenization.
When a non-literal token is matched, a client may call
iter.send(pos) on the iterator to reset the position in
template_text at which scanning will resume.
This function provides a base level of tokenization which is
then refined by ParseContext.token_generator.
>>> from pprint import *
>>> pprint(list((kind, text) for kind, text, _ in tokenize_template(
... '%for x in range(10):\n% print x\n%end\njuicebox')))
[('gybLines', '%for x in range(10):\n% print x'),
('gybLinesClose', '%end'),
('literal', 'juicebox')]
>>> pprint(list((kind, text) for kind, text, _ in tokenize_template(
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... ''')))
[('literal', 'Nothing\n'),
('gybLines', '% if x:\n% for i in range(3):'),
('substitutionOpen', '${'),
('literal', '\n'),
('gybLinesClose', '% end'),
('gybLines', '% else:'),
('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n')]
>>> for kind, text, _ in tokenize_template('''
... This is $some$ literal stuff containing a ${substitution}
... followed by a %{...} block:
... %{
... # Python code
... }%
... and here $${are} some %-lines:
... % x = 1
... % y = 2
... % if z == 3:
... % print '${hello}'
... % end
... % for x in zz:
... % print x
... % # different indentation
... % twice
... and some lines that literally start with a %% token
... %% first line
... %% second line
... '''):
... print((kind, text.strip().split('\n',1)[0]))
('literal', 'This is $some$ literal stuff containing a')
('substitutionOpen', '${')
('literal', 'followed by a %{...} block:')
('gybBlockOpen', '%{')
('literal', 'and here ${are} some %-lines:')
('gybLines', '% x = 1')
('gybLinesClose', '% end')
('gybLines', '% for x in zz:')
('gybLines', '% # different indentation')
('gybLines', '% twice')
('literal', 'and some lines that literally start with a % token') | def tokenize_template(template_text):
r"""Given the text of a template, returns an iterator over
(tokenType, token, match) tuples.
**Note**: this is template syntax tokenization, not Python
tokenization.
When a non-literal token is matched, a client may call
iter.send(pos) on the iterator to reset the position in
template_text at which scanning will resume.
This function provides a base level of tokenization which is
then refined by ParseContext.token_generator.
>>> from pprint import *
>>> pprint(list((kind, text) for kind, text, _ in tokenize_template(
... '%for x in range(10):\n% print x\n%end\njuicebox')))
[('gybLines', '%for x in range(10):\n% print x'),
('gybLinesClose', '%end'),
('literal', 'juicebox')]
>>> pprint(list((kind, text) for kind, text, _ in tokenize_template(
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... ''')))
[('literal', 'Nothing\n'),
('gybLines', '% if x:\n% for i in range(3):'),
('substitutionOpen', '${'),
('literal', '\n'),
('gybLinesClose', '% end'),
('gybLines', '% else:'),
('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n')]
>>> for kind, text, _ in tokenize_template('''
... This is $some$ literal stuff containing a ${substitution}
... followed by a %{...} block:
... %{
... # Python code
... }%
... and here $${are} some %-lines:
... % x = 1
... % y = 2
... % if z == 3:
... % print '${hello}'
... % end
... % for x in zz:
... % print x
... % # different indentation
... % twice
... and some lines that literally start with a %% token
... %% first line
... %% second line
... '''):
... print((kind, text.strip().split('\n',1)[0]))
('literal', 'This is $some$ literal stuff containing a')
('substitutionOpen', '${')
('literal', 'followed by a %{...} block:')
('gybBlockOpen', '%{')
('literal', 'and here ${are} some %-lines:')
('gybLines', '% x = 1')
('gybLinesClose', '% end')
('gybLines', '% for x in zz:')
('gybLines', '% # different indentation')
('gybLines', '% twice')
('literal', 'and some lines that literally start with a % token')
"""
pos = 0
end = len(template_text)
saved_literal = []
literal_first_match = None
while pos < end:
m = tokenize_re.match(template_text, pos, end)
# pull out the one matched key (ignoring internal patterns starting
# with _)
((kind, text), ) = (
(kind, text) for (kind, text) in m.groupdict().items()
if text is not None and kind[0] != '_')
if kind in ('literal', 'symbol'):
if len(saved_literal) == 0:
literal_first_match = m
# literals and symbols get batched together
saved_literal.append(text)
pos = None
else:
# found a non-literal. First yield any literal we've accumulated
if saved_literal != []:
yield 'literal', ''.join(saved_literal), literal_first_match
saved_literal = []
# Then yield the thing we found. If we get a reply, it's
# the place to resume tokenizing
pos = yield kind, text, m
# If we were not sent a new position by our client, resume
# tokenizing at the end of this match.
if pos is None:
pos = m.end(0)
else:
# Client is not yet ready to process next token
yield
if saved_literal != []:
yield 'literal', ''.join(saved_literal), literal_first_match |
Return a list of lines at which to split the incoming source
These positions represent the beginnings of python line groups that
will require a matching %end construct if they are to be closed.
>>> src = split_lines('''\
... if x:
... print x
... if y: # trailing comment
... print z
... if z: # another comment\
... ''')
>>> s = split_gyb_lines(src)
>>> len(s)
2
>>> src[s[0]]
' print z\n'
>>> s[1] - len(src)
0
>>> src = split_lines('''\
... if x:
... if y: print 1
... if z:
... print 2
... pass\
... ''')
>>> s = split_gyb_lines(src)
>>> len(s)
1
>>> src[s[0]]
' if y: print 1\n'
>>> src = split_lines('''\
... if x:
... if y:
... print 1
... print 2
... ''')
>>> s = split_gyb_lines(src)
>>> len(s)
2
>>> src[s[0]]
' if y:\n'
>>> src[s[1]]
' print 1\n' | def split_gyb_lines(source_lines):
r"""Return a list of lines at which to split the incoming source
These positions represent the beginnings of python line groups that
will require a matching %end construct if they are to be closed.
>>> src = split_lines('''\
... if x:
... print x
... if y: # trailing comment
... print z
... if z: # another comment\
... ''')
>>> s = split_gyb_lines(src)
>>> len(s)
2
>>> src[s[0]]
' print z\n'
>>> s[1] - len(src)
0
>>> src = split_lines('''\
... if x:
... if y: print 1
... if z:
... print 2
... pass\
... ''')
>>> s = split_gyb_lines(src)
>>> len(s)
1
>>> src[s[0]]
' if y: print 1\n'
>>> src = split_lines('''\
... if x:
... if y:
... print 1
... print 2
... ''')
>>> s = split_gyb_lines(src)
>>> len(s)
2
>>> src[s[0]]
' if y:\n'
>>> src[s[1]]
' print 1\n'
"""
last_token_text, last_token_kind = None, None
unmatched_indents = []
dedents = 0
try:
for token_kind, token_text, token_start, \
(token_end_line, token_end_col), line_text \
in tokenize.generate_tokens(lambda i=iter(source_lines):
next(i)):
if token_kind in (tokenize.COMMENT, tokenize.ENDMARKER):
continue
if token_text == '\n' and last_token_text == ':':
unmatched_indents.append(token_end_line)
# The tokenizer appends dedents at EOF; don't consider
# those as matching indentations. Instead just save them
# up...
if last_token_kind == tokenize.DEDENT:
dedents += 1
# And count them later, when we see something real.
if token_kind != tokenize.DEDENT and dedents > 0:
unmatched_indents = unmatched_indents[:-dedents]
dedents = 0
last_token_text, last_token_kind = token_text, token_kind
except tokenize.TokenError:
# Let the later compile() call report the error
return []
if last_token_text == ':':
unmatched_indents.append(len(source_lines))
return unmatched_indents |
Return True iff the incoming Python source_lines begin with "else",
"elif", "except", or "finally".
Initial comments and whitespace are ignored.
>>> code_starts_with_dedent_keyword(split_lines('if x in y: pass'))
False
>>> code_starts_with_dedent_keyword(split_lines('except ifSomethingElse:'))
True
>>> code_starts_with_dedent_keyword(
... split_lines('\n# comment\nelse: # yes'))
True | def code_starts_with_dedent_keyword(source_lines):
r"""Return True iff the incoming Python source_lines begin with "else",
"elif", "except", or "finally".
Initial comments and whitespace are ignored.
>>> code_starts_with_dedent_keyword(split_lines('if x in y: pass'))
False
>>> code_starts_with_dedent_keyword(split_lines('except ifSomethingElse:'))
True
>>> code_starts_with_dedent_keyword(
... split_lines('\n# comment\nelse: # yes'))
True
"""
token_text = None
for token_kind, token_text, _, _, _ \
in tokenize.generate_tokens(lambda i=iter(source_lines): next(i)):
if token_kind != tokenize.COMMENT and token_text.strip() != '':
break
return token_text in ('else', 'elif', 'except', 'finally') |
Return the contents of the given template file, executed with the given
local bindings.
>>> from tempfile import NamedTemporaryFile
>>> # On Windows, the name of a NamedTemporaryFile cannot be used to open
>>> # the file for a second time if delete=True. Therefore, we have to
>>> # manually handle closing and deleting this file to allow us to open
>>> # the file by its name across all platforms.
>>> f = NamedTemporaryFile(delete=False)
>>> _ = f.write(
... br'''---
... % for i in range(int(x)):
... a pox on ${i} for epoxy
... % end
... ${120 +
...
... 3}
... abc
... ${"w\nx\nX\ny"}
... z
... ''')
>>> f.flush()
>>> result = expand(
... f.name,
... line_directive='//#sourceLocation(file: "%(file)s", ' + \
... 'line: %(line)d)',
... x=2
... ).replace(
... '"%s"' % f.name.replace('\\', '/'), '"dummy.file"')
>>> print(result, end='')
//#sourceLocation(file: "dummy.file", line: 1)
---
//#sourceLocation(file: "dummy.file", line: 3)
a pox on 0 for epoxy
//#sourceLocation(file: "dummy.file", line: 3)
a pox on 1 for epoxy
//#sourceLocation(file: "dummy.file", line: 5)
123
//#sourceLocation(file: "dummy.file", line: 8)
abc
w
x
X
y
//#sourceLocation(file: "dummy.file", line: 10)
z
>>> f.close()
>>> os.remove(f.name) | def expand(filename, line_directive=_default_line_directive, **local_bindings):
r"""Return the contents of the given template file, executed with the given
local bindings.
>>> from tempfile import NamedTemporaryFile
>>> # On Windows, the name of a NamedTemporaryFile cannot be used to open
>>> # the file for a second time if delete=True. Therefore, we have to
>>> # manually handle closing and deleting this file to allow us to open
>>> # the file by its name across all platforms.
>>> f = NamedTemporaryFile(delete=False)
>>> _ = f.write(
... br'''---
... % for i in range(int(x)):
... a pox on ${i} for epoxy
... % end
... ${120 +
...
... 3}
... abc
... ${"w\nx\nX\ny"}
... z
... ''')
>>> f.flush()
>>> result = expand(
... f.name,
... line_directive='//#sourceLocation(file: "%(file)s", ' + \
... 'line: %(line)d)',
... x=2
... ).replace(
... '"%s"' % f.name.replace('\\', '/'), '"dummy.file"')
>>> print(result, end='')
//#sourceLocation(file: "dummy.file", line: 1)
---
//#sourceLocation(file: "dummy.file", line: 3)
a pox on 0 for epoxy
//#sourceLocation(file: "dummy.file", line: 3)
a pox on 1 for epoxy
//#sourceLocation(file: "dummy.file", line: 5)
123
//#sourceLocation(file: "dummy.file", line: 8)
abc
w
x
X
y
//#sourceLocation(file: "dummy.file", line: 10)
z
>>> f.close()
>>> os.remove(f.name)
"""
with io.open(filename, encoding='utf-8') as f:
t = parse_template(filename, f.read())
d = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(filename)))
try:
return execute_template(
t, line_directive=line_directive, **local_bindings)
finally:
os.chdir(d) |
Return an AST corresponding to the given template file.
If text is supplied, it is assumed to be the contents of the file,
as a string.
>>> print(parse_template('dummy.file', text=
... '''% for x in [1, 2, 3]:
... % if x == 1:
... literal1
... % elif x > 1: # add output line after this line to fix bug
... % if x == 2:
... literal2
... % end
... % end
... % end
... '''))
Block:
[
Code:
{
for x in [1, 2, 3]:
__children__[0].execute(__context__)
}
[
Block:
[
Code:
{
if x == 1:
__children__[0].execute(__context__)
elif x > 1: # add output line after this line to fix bug
__children__[1].execute(__context__)
}
[
Block:
[
Literal:
literal1
]
Block:
[
Code:
{
if x == 2:
__children__[0].execute(__context__)
}
[
Block:
[
Literal:
literal2
]
]
]
]
]
]
]
>>> print(parse_template(
... 'dummy.file',
... text='%for x in range(10):\n% print(x)\n%end\njuicebox'))
Block:
[
Code:
{
for x in range(10):
__children__[0].execute(__context__)
}
[
Block:
[
Code: {print(x)} []
]
]
Literal:
juicebox
]
>>> print(parse_template('/dummy.file', text=
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... '''))
Block:
[
Literal:
Nothing
Code:
{
if x:
__children__[0].execute(__context__)
else:
__children__[1].execute(__context__)
}
[
Block:
[
Code:
{
for i in range(3):
__children__[0].execute(__context__)
}
[
Block:
[
Code: {(i)} []
Literal:
<BLANKLINE>
]
]
]
Block:
[
Literal:
THIS SHOULD NOT APPEAR IN THE OUTPUT
]
]
]
>>> print(parse_template('dummy.file', text='''%
... %for x in y:
... % print(y)
... '''))
Block:
[
Code:
{
for x in y:
__children__[0].execute(__context__)
}
[
Block:
[
Code: {print(y)} []
]
]
]
>>> print(parse_template('dummy.file', text='''%
... %if x:
... % print(y)
... AAAA
... %else:
... BBBB
... '''))
Block:
[
Code:
{
if x:
__children__[0].execute(__context__)
else:
__children__[1].execute(__context__)
}
[
Block:
[
Code: {print(y)} []
Literal:
AAAA
]
Block:
[
Literal:
BBBB
]
]
]
>>> print(parse_template('dummy.file', text='''%
... %if x:
... % print(y)
... AAAA
... %# This is a comment
... %else:
... BBBB
... '''))
Block:
[
Code:
{
if x:
__children__[0].execute(__context__)
# This is a comment
else:
__children__[1].execute(__context__)
}
[
Block:
[
Code: {print(y)} []
Literal:
AAAA
]
Block:
[
Literal:
BBBB
]
]
]
>>> print(parse_template('dummy.file', text='''\
... %for x in y:
... AAAA
... %if x:
... BBBB
... %end
... CCCC
... '''))
Block:
[
Code:
{
for x in y:
__children__[0].execute(__context__)
}
[
Block:
[
Literal:
AAAA
Code:
{
if x:
__children__[0].execute(__context__)
}
[
Block:
[
Literal:
BBBB
]
]
Literal:
CCCC
]
]
] | def parse_template(filename, text=None):
r"""Return an AST corresponding to the given template file.
If text is supplied, it is assumed to be the contents of the file,
as a string.
>>> print(parse_template('dummy.file', text=
... '''% for x in [1, 2, 3]:
... % if x == 1:
... literal1
... % elif x > 1: # add output line after this line to fix bug
... % if x == 2:
... literal2
... % end
... % end
... % end
... '''))
Block:
[
Code:
{
for x in [1, 2, 3]:
__children__[0].execute(__context__)
}
[
Block:
[
Code:
{
if x == 1:
__children__[0].execute(__context__)
elif x > 1: # add output line after this line to fix bug
__children__[1].execute(__context__)
}
[
Block:
[
Literal:
literal1
]
Block:
[
Code:
{
if x == 2:
__children__[0].execute(__context__)
}
[
Block:
[
Literal:
literal2
]
]
]
]
]
]
]
>>> print(parse_template(
... 'dummy.file',
... text='%for x in range(10):\n% print(x)\n%end\njuicebox'))
Block:
[
Code:
{
for x in range(10):
__children__[0].execute(__context__)
}
[
Block:
[
Code: {print(x)} []
]
]
Literal:
juicebox
]
>>> print(parse_template('/dummy.file', text=
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... '''))
Block:
[
Literal:
Nothing
Code:
{
if x:
__children__[0].execute(__context__)
else:
__children__[1].execute(__context__)
}
[
Block:
[
Code:
{
for i in range(3):
__children__[0].execute(__context__)
}
[
Block:
[
Code: {(i)} []
Literal:
<BLANKLINE>
]
]
]
Block:
[
Literal:
THIS SHOULD NOT APPEAR IN THE OUTPUT
]
]
]
>>> print(parse_template('dummy.file', text='''%
... %for x in y:
... % print(y)
... '''))
Block:
[
Code:
{
for x in y:
__children__[0].execute(__context__)
}
[
Block:
[
Code: {print(y)} []
]
]
]
>>> print(parse_template('dummy.file', text='''%
... %if x:
... % print(y)
... AAAA
... %else:
... BBBB
... '''))
Block:
[
Code:
{
if x:
__children__[0].execute(__context__)
else:
__children__[1].execute(__context__)
}
[
Block:
[
Code: {print(y)} []
Literal:
AAAA
]
Block:
[
Literal:
BBBB
]
]
]
>>> print(parse_template('dummy.file', text='''%
... %if x:
... % print(y)
... AAAA
... %# This is a comment
... %else:
... BBBB
... '''))
Block:
[
Code:
{
if x:
__children__[0].execute(__context__)
# This is a comment
else:
__children__[1].execute(__context__)
}
[
Block:
[
Code: {print(y)} []
Literal:
AAAA
]
Block:
[
Literal:
BBBB
]
]
]
>>> print(parse_template('dummy.file', text='''\
... %for x in y:
... AAAA
... %if x:
... BBBB
... %end
... CCCC
... '''))
Block:
[
Code:
{
for x in y:
__children__[0].execute(__context__)
}
[
Block:
[
Literal:
AAAA
Code:
{
if x:
__children__[0].execute(__context__)
}
[
Block:
[
Literal:
BBBB
]
]
Literal:
CCCC
]
]
]
"""
return Block(ParseContext(filename, text)) |
Return the text generated by executing the given template AST.
Keyword arguments become local variable bindings in the execution context
>>> root_directory = os.path.abspath('/')
>>> file_name = (root_directory + 'dummy.file').replace('\\', '/')
>>> ast = parse_template(file_name, text=
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... ''')
>>> out = execute_template(ast,
... line_directive='//#sourceLocation(file: "%(file)s", line: %(line)d)',
... x=1)
>>> out = out.replace(file_name, "DUMMY-FILE")
>>> print(out, end="")
//#sourceLocation(file: "DUMMY-FILE", line: 1)
Nothing
//#sourceLocation(file: "DUMMY-FILE", line: 4)
0
//#sourceLocation(file: "DUMMY-FILE", line: 4)
1
//#sourceLocation(file: "DUMMY-FILE", line: 4)
2
>>> ast = parse_template(file_name, text=
... '''Nothing
... % a = []
... % for x in range(3):
... % a.append(x)
... % end
... ${a}
... ''')
>>> out = execute_template(ast,
... line_directive='//#sourceLocation(file: "%(file)s", line: %(line)d)',
... x=1)
>>> out = out.replace(file_name, "DUMMY-FILE")
>>> print(out, end="")
//#sourceLocation(file: "DUMMY-FILE", line: 1)
Nothing
//#sourceLocation(file: "DUMMY-FILE", line: 6)
[0, 1, 2]
>>> ast = parse_template(file_name, text=
... '''Nothing
... % a = []
... % for x in range(3):
... % a.append(x)
... % end
... ${a}
... ''')
>>> out = execute_template(ast,
... line_directive='#line %(line)d "%(file)s"', x=1)
>>> out = out.replace(file_name, "DUMMY-FILE")
>>> print(out, end="")
#line 1 "DUMMY-FILE"
Nothing
#line 6 "DUMMY-FILE"
[0, 1, 2] | def execute_template(
ast, line_directive=_default_line_directive, **local_bindings):
r"""Return the text generated by executing the given template AST.
Keyword arguments become local variable bindings in the execution context
>>> root_directory = os.path.abspath('/')
>>> file_name = (root_directory + 'dummy.file').replace('\\', '/')
>>> ast = parse_template(file_name, text=
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... ''')
>>> out = execute_template(ast,
... line_directive='//#sourceLocation(file: "%(file)s", line: %(line)d)',
... x=1)
>>> out = out.replace(file_name, "DUMMY-FILE")
>>> print(out, end="")
//#sourceLocation(file: "DUMMY-FILE", line: 1)
Nothing
//#sourceLocation(file: "DUMMY-FILE", line: 4)
0
//#sourceLocation(file: "DUMMY-FILE", line: 4)
1
//#sourceLocation(file: "DUMMY-FILE", line: 4)
2
>>> ast = parse_template(file_name, text=
... '''Nothing
... % a = []
... % for x in range(3):
... % a.append(x)
... % end
... ${a}
... ''')
>>> out = execute_template(ast,
... line_directive='//#sourceLocation(file: "%(file)s", line: %(line)d)',
... x=1)
>>> out = out.replace(file_name, "DUMMY-FILE")
>>> print(out, end="")
//#sourceLocation(file: "DUMMY-FILE", line: 1)
Nothing
//#sourceLocation(file: "DUMMY-FILE", line: 6)
[0, 1, 2]
>>> ast = parse_template(file_name, text=
... '''Nothing
... % a = []
... % for x in range(3):
... % a.append(x)
... % end
... ${a}
... ''')
>>> out = execute_template(ast,
... line_directive='#line %(line)d "%(file)s"', x=1)
>>> out = out.replace(file_name, "DUMMY-FILE")
>>> print(out, end="")
#line 1 "DUMMY-FILE"
Nothing
#line 6 "DUMMY-FILE"
[0, 1, 2]
"""
execution_context = ExecutionContext(
line_directive=line_directive, **local_bindings)
ast.execute(execution_context)
return ''.join(execution_context.result_text) |
Returns a list of path objects for all known Python sources in the Swift
project. | def _get_python_sources():
"""Returns a list of path objects for all known Python sources in the Swift
project.
"""
return list(_SWIFT_PATH.rglob("*.py")) + _KNOWN_SCRIPT_PATHS |
Runs the pip command to check if a package is installed.
| def _is_package_installed(name):
"""Runs the pip command to check if a package is installed.
"""
command = [
sys.executable,
"-m",
"pip",
"show",
"--quiet",
name,
]
with open(os.devnull, "w") as devnull:
status = subprocess.call(command, stderr=devnull)
return not status |
Runs the pip command to check if a package is installed.
| def _is_package_installed(name):
"""Runs the pip command to check if a package is installed.
"""
command = [
sys.executable,
'-m', 'pip',
'show', '--quiet',
name,
]
with open(os.devnull, 'w') as devnull:
status = subprocess.call(command, stderr=devnull)
return not status |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.