response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Appends an item to a comma-separated string.
If the comma-separated string is empty/None, just returns item. | def csv_append(csv_string, item):
"""
Appends an item to a comma-separated string.
If the comma-separated string is empty/None, just returns item.
"""
if csv_string:
return ",".join((csv_string, item))
else:
return item |
Consume the first truthy item from an iterator, then re-chain it to the
rest of the iterator. This is useful when you want to make sure the
prologue to downstream generators have been executed before continuing.
:param iterable: an iterable object | def reiterate(iterable):
"""
Consume the first truthy item from an iterator, then re-chain it to the
rest of the iterator. This is useful when you want to make sure the
prologue to downstream generators have been executed before continuing.
:param iterable: an iterable object
"""
if isinstance(iterable, (list, tuple)):
return iterable
else:
iterator = iter(iterable)
try:
chunk = next(iterator)
while not chunk:
chunk = next(iterator)
return CloseableChain([chunk], iterator)
except StopIteration:
close_if_possible(iterable)
return iter([]) |
Test whether a path is a mount point. This will catch any
exceptions and translate them into a False return value
Use ismount_raw to have the exceptions raised instead. | def ismount(path):
"""
Test whether a path is a mount point. This will catch any
exceptions and translate them into a False return value
Use ismount_raw to have the exceptions raised instead.
"""
try:
return ismount_raw(path)
except OSError:
return False |
Test whether a path is a mount point. Whereas ismount will catch
any exceptions and just return False, this raw version will not
catch exceptions.
This is code hijacked from C Python 2.6.8, adapted to remove the extra
lstat() system call. | def ismount_raw(path):
"""
Test whether a path is a mount point. Whereas ismount will catch
any exceptions and just return False, this raw version will not
catch exceptions.
This is code hijacked from C Python 2.6.8, adapted to remove the extra
lstat() system call.
"""
try:
s1 = os.lstat(path)
except os.error as err:
if err.errno == errno.ENOENT:
# It doesn't exist -- so not a mount point :-)
return False
raise
if stat.S_ISLNK(s1.st_mode):
# Some environments (like vagrant-swift-all-in-one) use a symlink at
# the device level but could still provide a stubfile in the target
# to indicate that it should be treated as a mount point for swift's
# purposes.
if os.path.isfile(os.path.join(path, ".ismount")):
return True
# Otherwise, a symlink can never be a mount point
return False
s2 = os.lstat(os.path.join(path, '..'))
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
# path/.. on a different device as path
return True
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
# path/.. is the same i-node as path
return True
# Device and inode checks are not properly working inside containerized
# environments, therefore using a workaround to check if there is a
# stubfile placed by an operator
if os.path.isfile(os.path.join(path, ".ismount")):
return True
return False |
Like contextlib.closing(), but doesn't crash if the object lacks a close()
method.
PEP 333 (WSGI) says: "If the iterable returned by the application has a
close() method, the server or gateway must call that method upon
completion of the current request[.]" This function makes that easier. | def closing_if_possible(maybe_closable):
"""
Like contextlib.closing(), but doesn't crash if the object lacks a close()
method.
PEP 333 (WSGI) says: "If the iterable returned by the application has a
close() method, the server or gateway must call that method upon
completion of the current request[.]" This function makes that easier.
"""
try:
yield maybe_closable
finally:
close_if_possible(maybe_closable) |
Drain and close a swob or WSGI response.
This ensures we don't log a 499 in the proxy just because we realized we
don't care about the body of an error. | def drain_and_close(response_or_app_iter, read_limit=None):
"""
Drain and close a swob or WSGI response.
This ensures we don't log a 499 in the proxy just because we realized we
don't care about the body of an error.
"""
app_iter = getattr(response_or_app_iter, 'app_iter', response_or_app_iter)
if app_iter is None: # for example, if we used the Response.body property
return
bytes_read = 0
with closing_if_possible(app_iter):
for chunk in app_iter:
bytes_read += len(chunk)
if read_limit is not None and bytes_read >= read_limit:
break |
Close a swob or WSGI response and maybe drain it.
It's basically free to "read" a HEAD or HTTPException response - the bytes
are probably already in our network buffers. For a larger response we
could possibly burn a lot of CPU/network trying to drain an un-used
response. This method will read up to DEFAULT_DRAIN_LIMIT bytes to avoid
logging a 499 in the proxy when it would otherwise be easy to just throw
away the small/empty body. | def friendly_close(resp):
"""
Close a swob or WSGI response and maybe drain it.
It's basically free to "read" a HEAD or HTTPException response - the bytes
are probably already in our network buffers. For a larger response we
could possibly burn a lot of CPU/network trying to drain an un-used
response. This method will read up to DEFAULT_DRAIN_LIMIT bytes to avoid
logging a 499 in the proxy when it would otherwise be easy to just throw
away the small/empty body.
"""
return drain_and_close(resp, read_limit=DEFAULT_DRAIN_LIMIT) |
Parse a content-range header into (first_byte, last_byte, total_size).
See RFC 7233 section 4.2 for details on the header format, but it's
basically "Content-Range: bytes ${start}-${end}/${total}".
:param content_range: Content-Range header value to parse,
e.g. "bytes 100-1249/49004"
:returns: 3-tuple (start, end, total)
:raises ValueError: if malformed | def parse_content_range(content_range):
"""
Parse a content-range header into (first_byte, last_byte, total_size).
See RFC 7233 section 4.2 for details on the header format, but it's
basically "Content-Range: bytes ${start}-${end}/${total}".
:param content_range: Content-Range header value to parse,
e.g. "bytes 100-1249/49004"
:returns: 3-tuple (start, end, total)
:raises ValueError: if malformed
"""
found = re.search(_content_range_pattern, content_range)
if not found:
raise ValueError("malformed Content-Range %r" % (content_range,))
return tuple(int(x) for x in found.groups()) |
Parse a content-type and its parameters into values.
RFC 2616 sec 14.17 and 3.7 are pertinent.
**Examples**::
'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')])
'text/plain; charset=UTF-8; level=1' ->
('text/plain', [('charset, 'UTF-8'), ('level', '1')])
:param content_type: content_type to parse
:returns: a tuple containing (content type, list of k, v parameter tuples) | def parse_content_type(content_type):
"""
Parse a content-type and its parameters into values.
RFC 2616 sec 14.17 and 3.7 are pertinent.
**Examples**::
'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')])
'text/plain; charset=UTF-8; level=1' ->
('text/plain', [('charset, 'UTF-8'), ('level', '1')])
:param content_type: content_type to parse
:returns: a tuple containing (content type, list of k, v parameter tuples)
"""
parm_list = []
if ';' in content_type:
content_type, parms = content_type.split(';', 1)
parms = ';' + parms
for m in _rfc_extension_pattern.findall(parms):
key = m[0].strip()
value = m[1].strip()
parm_list.append((key, value))
return content_type, parm_list |
Parse a content-type and return a tuple containing:
- the content_type string minus any swift_bytes param,
- the swift_bytes value or None if the param was not found
:param content_type: a content-type string
:return: a tuple of (content-type, swift_bytes or None) | def extract_swift_bytes(content_type):
"""
Parse a content-type and return a tuple containing:
- the content_type string minus any swift_bytes param,
- the swift_bytes value or None if the param was not found
:param content_type: a content-type string
:return: a tuple of (content-type, swift_bytes or None)
"""
content_type, params = parse_content_type(content_type)
swift_bytes = None
for k, v in params:
if k == 'swift_bytes':
swift_bytes = v
else:
content_type += ';%s=%s' % (k, v)
return content_type, swift_bytes |
Takes a dict from a container listing and overrides the content_type,
bytes fields if swift_bytes is set. | def override_bytes_from_content_type(listing_dict, logger=None):
"""
Takes a dict from a container listing and overrides the content_type,
bytes fields if swift_bytes is set.
"""
listing_dict['content_type'], swift_bytes = extract_swift_bytes(
listing_dict['content_type'])
if swift_bytes is not None:
try:
listing_dict['bytes'] = int(swift_bytes)
except ValueError:
if logger:
logger.exception("Invalid swift_bytes") |
Returns an expiring object container name for given X-Delete-At and
(native string) a/c/o. | def get_expirer_container(x_delete_at, expirer_divisor, acc, cont, obj):
"""
Returns an expiring object container name for given X-Delete-At and
(native string) a/c/o.
"""
shard_int = int(hash_path(acc, cont, obj), 16) % 100
return normalize_delete_at_timestamp(
int(x_delete_at) // expirer_divisor * expirer_divisor - shard_int) |
Given a multi-part-mime-encoded input file object and boundary,
yield file-like objects for each part. Note that this does not
split each part into headers and body; the caller is responsible
for doing that if necessary.
:param wsgi_input: The file-like object to read from.
:param boundary: The mime boundary to separate new file-like objects on.
:returns: A generator of file-like objects for each part.
:raises MimeInvalid: if the document is malformed | def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096):
"""
Given a multi-part-mime-encoded input file object and boundary,
yield file-like objects for each part. Note that this does not
split each part into headers and body; the caller is responsible
for doing that if necessary.
:param wsgi_input: The file-like object to read from.
:param boundary: The mime boundary to separate new file-like objects on.
:returns: A generator of file-like objects for each part.
:raises MimeInvalid: if the document is malformed
"""
boundary = b'--' + boundary
blen = len(boundary) + 2 # \r\n
try:
got = wsgi_input.readline(blen)
while got == b'\r\n':
got = wsgi_input.readline(blen)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
if got.strip() != boundary:
raise swift.common.exceptions.MimeInvalid(
'invalid starting boundary: wanted %r, got %r' % (boundary, got))
boundary = b'\r\n' + boundary
input_buffer = b''
done = False
while not done:
it = _MultipartMimeFileLikeObject(wsgi_input, boundary, input_buffer,
read_chunk_size)
yield it
done = it.no_more_files
input_buffer = it.input_buffer |
Takes a file-like object containing a MIME document and returns a
HeaderKeyDict containing the headers. The body of the message is not
consumed: the position in doc_file is left at the beginning of the body.
This function was inspired by the Python standard library's
http.client.parse_headers.
:param doc_file: binary file-like object containing a MIME document
:returns: a swift.common.swob.HeaderKeyDict containing the headers | def parse_mime_headers(doc_file):
"""
Takes a file-like object containing a MIME document and returns a
HeaderKeyDict containing the headers. The body of the message is not
consumed: the position in doc_file is left at the beginning of the body.
This function was inspired by the Python standard library's
http.client.parse_headers.
:param doc_file: binary file-like object containing a MIME document
:returns: a swift.common.swob.HeaderKeyDict containing the headers
"""
headers = []
while True:
line = doc_file.readline()
done = line in (b'\r\n', b'\n', b'')
if six.PY3:
try:
line = line.decode('utf-8')
except UnicodeDecodeError:
line = line.decode('latin1')
headers.append(line)
if done:
break
if six.PY3:
header_string = ''.join(headers)
else:
header_string = b''.join(headers)
headers = email.parser.Parser().parsestr(header_string)
return HeaderKeyDict(headers) |
Takes a file-like object containing a multipart MIME document and
returns an iterator of (headers, body-file) tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read() | def mime_to_document_iters(input_file, boundary, read_chunk_size=4096):
"""
Takes a file-like object containing a multipart MIME document and
returns an iterator of (headers, body-file) tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read()
"""
if six.PY3 and isinstance(boundary, str):
# Since the boundary is in client-supplied headers, it can contain
# garbage that trips us and we don't like client-induced 500.
boundary = boundary.encode('latin-1', errors='replace')
doc_files = iter_multipart_mime_documents(input_file, boundary,
read_chunk_size)
for i, doc_file in enumerate(doc_files):
# this consumes the headers and leaves just the body in doc_file
headers = parse_mime_headers(doc_file)
yield (headers, doc_file) |
Takes an iterator that may or may not contain a multipart MIME document
as well as content type and returns an iterator of body iterators.
:param app_iter: iterator that may contain a multipart MIME document
:param content_type: content type of the app_iter, used to determine
whether it conains a multipart document and, if
so, what the boundary is between documents | def maybe_multipart_byteranges_to_document_iters(app_iter, content_type):
"""
Takes an iterator that may or may not contain a multipart MIME document
as well as content type and returns an iterator of body iterators.
:param app_iter: iterator that may contain a multipart MIME document
:param content_type: content type of the app_iter, used to determine
whether it conains a multipart document and, if
so, what the boundary is between documents
"""
content_type, params_list = parse_content_type(content_type)
if content_type != 'multipart/byteranges':
yield app_iter
return
body_file = FileLikeIter(app_iter)
boundary = dict(params_list)['boundary']
for _headers, body in mime_to_document_iters(body_file, boundary):
yield (chunk for chunk in iter(lambda: body.read(65536), b'')) |
Takes an iterator of range iters and yields a multipart/byteranges MIME
document suitable for sending as the body of a multi-range 206 response.
See document_iters_to_http_response_body for parameter descriptions. | def document_iters_to_multipart_byteranges(ranges_iter, boundary):
"""
Takes an iterator of range iters and yields a multipart/byteranges MIME
document suitable for sending as the body of a multi-range 206 response.
See document_iters_to_http_response_body for parameter descriptions.
"""
if not isinstance(boundary, bytes):
boundary = boundary.encode('ascii')
divider = b"--" + boundary + b"\r\n"
terminator = b"--" + boundary + b"--"
for range_spec in ranges_iter:
start_byte = range_spec["start_byte"]
end_byte = range_spec["end_byte"]
entity_length = range_spec.get("entity_length", "*")
content_type = range_spec["content_type"]
part_iter = range_spec["part_iter"]
if not isinstance(content_type, bytes):
content_type = str(content_type).encode('utf-8')
if not isinstance(entity_length, bytes):
entity_length = str(entity_length).encode('utf-8')
part_header = b''.join((
divider,
b"Content-Type: ", content_type, b"\r\n",
b"Content-Range: ", b"bytes %d-%d/%s\r\n" % (
start_byte, end_byte, entity_length),
b"\r\n"
))
yield part_header
for chunk in part_iter:
yield chunk
yield b"\r\n"
yield terminator |
Takes an iterator of range iters and turns it into an appropriate
HTTP response body, whether that's multipart/byteranges or not.
This is almost, but not quite, the inverse of
request_helpers.http_response_to_document_iters(). This function only
yields chunks of the body, not any headers.
:param ranges_iter: an iterator of dictionaries, one per range.
Each dictionary must contain at least the following key:
"part_iter": iterator yielding the bytes in the range
Additionally, if multipart is True, then the following other keys
are required:
"start_byte": index of the first byte in the range
"end_byte": index of the last byte in the range
"content_type": value for the range's Content-Type header
Finally, there is one optional key that is used in the
multipart/byteranges case:
"entity_length": length of the requested entity (not necessarily
equal to the response length). If omitted, "*" will be used.
Each part_iter will be exhausted prior to calling next(ranges_iter).
:param boundary: MIME boundary to use, sans dashes (e.g. "boundary", not
"--boundary").
:param multipart: True if the response should be multipart/byteranges,
False otherwise. This should be True if and only if you have 2 or
more ranges.
:param logger: a logger | def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
logger):
"""
Takes an iterator of range iters and turns it into an appropriate
HTTP response body, whether that's multipart/byteranges or not.
This is almost, but not quite, the inverse of
request_helpers.http_response_to_document_iters(). This function only
yields chunks of the body, not any headers.
:param ranges_iter: an iterator of dictionaries, one per range.
Each dictionary must contain at least the following key:
"part_iter": iterator yielding the bytes in the range
Additionally, if multipart is True, then the following other keys
are required:
"start_byte": index of the first byte in the range
"end_byte": index of the last byte in the range
"content_type": value for the range's Content-Type header
Finally, there is one optional key that is used in the
multipart/byteranges case:
"entity_length": length of the requested entity (not necessarily
equal to the response length). If omitted, "*" will be used.
Each part_iter will be exhausted prior to calling next(ranges_iter).
:param boundary: MIME boundary to use, sans dashes (e.g. "boundary", not
"--boundary").
:param multipart: True if the response should be multipart/byteranges,
False otherwise. This should be True if and only if you have 2 or
more ranges.
:param logger: a logger
"""
if multipart:
return document_iters_to_multipart_byteranges(ranges_iter, boundary)
else:
try:
response_body_iter = next(ranges_iter)['part_iter']
except StopIteration:
return ''
# We need to make sure ranges_iter does not get garbage-collected
# before response_body_iter is exhausted. The reason is that
# ranges_iter has a finally block that calls close_swift_conn, and
# so if that finally block fires before we read response_body_iter,
# there's nothing there.
result = StringAlong(
response_body_iter, ranges_iter,
lambda: logger.warning(
"More than one part in a single-part response?"))
return result |
Takes a file-like object containing a multipart/byteranges MIME document
(see RFC 7233, Appendix A) and returns an iterator of (first-byte,
last-byte, length, document-headers, body-file) 5-tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read() | def multipart_byteranges_to_document_iters(input_file, boundary,
read_chunk_size=4096):
"""
Takes a file-like object containing a multipart/byteranges MIME document
(see RFC 7233, Appendix A) and returns an iterator of (first-byte,
last-byte, length, document-headers, body-file) 5-tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read()
"""
for headers, body in mime_to_document_iters(input_file, boundary,
read_chunk_size):
first_byte, last_byte, length = parse_content_range(
headers.get('content-range'))
yield (first_byte, last_byte, length, headers.items(), body) |
Given the value of a header like:
Content-Disposition: form-data; name="somefile"; filename="test.html"
Return data like
("form-data", {"name": "somefile", "filename": "test.html"})
:param header: Value of a header (the part after the ': ').
:returns: (value name, dict) of the attribute data parsed (see above). | def parse_content_disposition(header):
"""
Given the value of a header like:
Content-Disposition: form-data; name="somefile"; filename="test.html"
Return data like
("form-data", {"name": "somefile", "filename": "test.html"})
:param header: Value of a header (the part after the ': ').
:returns: (value name, dict) of the attribute data parsed (see above).
"""
attributes = {}
attrs = ''
if ';' in header:
header, attrs = [x.strip() for x in header.split(';', 1)]
m = True
while m:
m = ATTRIBUTES_RE.match(attrs)
if m:
attrs = attrs[len(m.group(0)):]
attributes[m.group(1)] = m.group(2).strip('"')
return header, attributes |
Find a Namespace/ShardRange in given list of ``namespaces`` whose namespace
contains ``item``.
:param item: The item for a which a Namespace is to be found.
:param ranges: a sorted list of Namespaces.
:return: the Namespace/ShardRange whose namespace contains ``item``, or
None if no suitable Namespace is found. | def find_namespace(item, namespaces):
"""
Find a Namespace/ShardRange in given list of ``namespaces`` whose namespace
contains ``item``.
:param item: The item for a which a Namespace is to be found.
:param ranges: a sorted list of Namespaces.
:return: the Namespace/ShardRange whose namespace contains ``item``, or
None if no suitable Namespace is found.
"""
index = bisect.bisect_left(namespaces, item)
if index != len(namespaces) and item in namespaces[index]:
return namespaces[index]
return None |
Filter the given Namespaces/ShardRanges to those whose namespace includes
the ``includes`` name or any part of the namespace between ``marker`` and
``end_marker``. If none of ``includes``, ``marker`` or ``end_marker`` are
specified then all Namespaces will be returned.
:param namespaces: A list of :class:`~swift.common.utils.Namespace` or
:class:`~swift.common.utils.ShardRange`.
:param includes: a string; if not empty then only the Namespace,
if any, whose namespace includes this string will be returned,
``marker`` and ``end_marker`` will be ignored.
:param marker: if specified then only shard ranges whose upper bound is
greater than this value will be returned.
:param end_marker: if specified then only shard ranges whose lower bound is
less than this value will be returned.
:return: A filtered list of :class:`~swift.common.utils.Namespace`. | def filter_namespaces(namespaces, includes, marker, end_marker):
"""
Filter the given Namespaces/ShardRanges to those whose namespace includes
the ``includes`` name or any part of the namespace between ``marker`` and
``end_marker``. If none of ``includes``, ``marker`` or ``end_marker`` are
specified then all Namespaces will be returned.
:param namespaces: A list of :class:`~swift.common.utils.Namespace` or
:class:`~swift.common.utils.ShardRange`.
:param includes: a string; if not empty then only the Namespace,
if any, whose namespace includes this string will be returned,
``marker`` and ``end_marker`` will be ignored.
:param marker: if specified then only shard ranges whose upper bound is
greater than this value will be returned.
:param end_marker: if specified then only shard ranges whose lower bound is
less than this value will be returned.
:return: A filtered list of :class:`~swift.common.utils.Namespace`.
"""
if includes:
namespace = find_namespace(includes, namespaces)
return [namespace] if namespace else []
def namespace_filter(sr):
end = start = True
if end_marker:
end = end_marker > sr.lower
if marker:
start = marker < sr.upper
return start and end
if marker or end_marker:
return list(filter(namespace_filter, namespaces))
if marker == Namespace.MAX or end_marker == Namespace.MIN:
# MIN and MAX are both Falsy so not handled by namespace_filter
return []
return namespaces |
Validate and decode Base64-encoded data.
The stdlib base64 module silently discards bad characters, but we often
want to treat them as an error.
:param value: some base64-encoded data
:param allow_line_breaks: if True, ignore carriage returns and newlines
:returns: the decoded data
:raises ValueError: if ``value`` is not a string, contains invalid
characters, or has insufficient padding | def strict_b64decode(value, allow_line_breaks=False):
'''
Validate and decode Base64-encoded data.
The stdlib base64 module silently discards bad characters, but we often
want to treat them as an error.
:param value: some base64-encoded data
:param allow_line_breaks: if True, ignore carriage returns and newlines
:returns: the decoded data
:raises ValueError: if ``value`` is not a string, contains invalid
characters, or has insufficient padding
'''
if isinstance(value, bytes):
try:
value = value.decode('ascii')
except UnicodeDecodeError:
raise ValueError
if not isinstance(value, six.text_type):
raise ValueError
# b64decode will silently discard bad characters, but we want to
# treat them as an error
valid_chars = string.digits + string.ascii_letters + '/+'
strip_chars = '='
if allow_line_breaks:
valid_chars += '\r\n'
strip_chars += '\r\n'
if any(c not in valid_chars for c in value.strip(strip_chars)):
raise ValueError
try:
return base64.b64decode(value)
except (TypeError, binascii.Error): # (py2 error, py3 error)
raise ValueError |
Get the MD5 checksum of a file.
:param fname: path to file
:returns: MD5 checksum, hex encoded | def md5_hash_for_file(fname):
"""
Get the MD5 checksum of a file.
:param fname: path to file
:returns: MD5 checksum, hex encoded
"""
with open(fname, 'rb') as f:
md5sum = md5(usedforsecurity=False)
for block in iter(lambda: f.read(MD5_BLOCK_READ_BYTES), b''):
md5sum.update(block)
return md5sum.hexdigest() |
Return partition number for given hex hash and partition power.
:param hex_hash: A hash string
:param part_power: partition power
:returns: partition number | def get_partition_for_hash(hex_hash, part_power):
"""
Return partition number for given hex hash and partition power.
:param hex_hash: A hash string
:param part_power: partition power
:returns: partition number
"""
raw_hash = binascii.unhexlify(hex_hash)
part_shift = 32 - int(part_power)
return struct.unpack_from('>I', raw_hash)[0] >> part_shift |
:param devices: directory where devices are mounted (e.g. /srv/node)
:param path: full path to a object file or hashdir
:returns: the (integer) partition from the path | def get_partition_from_path(devices, path):
"""
:param devices: directory where devices are mounted (e.g. /srv/node)
:param path: full path to a object file or hashdir
:returns: the (integer) partition from the path
"""
offset_parts = devices.rstrip(os.sep).split(os.sep)
path_components = path.split(os.sep)
if offset_parts == path_components[:len(offset_parts)]:
offset = len(offset_parts)
else:
raise ValueError('Path %r is not under device dir %r' % (
path, devices))
return int(path_components[offset + 2]) |
Takes a path and a partition power and returns the same path, but with the
correct partition number. Most useful when increasing the partition power.
:param devices: directory where devices are mounted (e.g. /srv/node)
:param path: full path to a object file or hashdir
:param part_power: partition power to compute correct partition number
:returns: Path with re-computed partition power | def replace_partition_in_path(devices, path, part_power):
"""
Takes a path and a partition power and returns the same path, but with the
correct partition number. Most useful when increasing the partition power.
:param devices: directory where devices are mounted (e.g. /srv/node)
:param path: full path to a object file or hashdir
:param part_power: partition power to compute correct partition number
:returns: Path with re-computed partition power
"""
offset_parts = devices.rstrip(os.sep).split(os.sep)
path_components = path.split(os.sep)
if offset_parts == path_components[:len(offset_parts)]:
offset = len(offset_parts)
else:
raise ValueError('Path %r is not under device dir %r' % (
path, devices))
part = get_partition_for_hash(path_components[offset + 4], part_power)
path_components[offset + 2] = "%d" % part
return os.sep.join(path_components) |
Takes a list of iterators, yield an element from each in a round-robin
fashion until all of them are exhausted.
:param its: list of iterators | def round_robin_iter(its):
"""
Takes a list of iterators, yield an element from each in a round-robin
fashion until all of them are exhausted.
:param its: list of iterators
"""
while its:
for it in its:
try:
yield next(it)
except StopIteration:
its.remove(it) |
Figure out which policies, devices, and partitions we should operate on,
based on kwargs.
If 'override_policies' is already present in kwargs, then return that
value. This happens when using multiple worker processes; the parent
process supplies override_policies=X to each child process.
Otherwise, in run-once mode, look at the 'policies' keyword argument.
This is the value of the "--policies" command-line option. In
run-forever mode or if no --policies option was provided, an empty list
will be returned.
The procedures for devices and partitions are similar.
:returns: a named tuple with fields "devices", "partitions", and
"policies". | def parse_override_options(**kwargs):
"""
Figure out which policies, devices, and partitions we should operate on,
based on kwargs.
If 'override_policies' is already present in kwargs, then return that
value. This happens when using multiple worker processes; the parent
process supplies override_policies=X to each child process.
Otherwise, in run-once mode, look at the 'policies' keyword argument.
This is the value of the "--policies" command-line option. In
run-forever mode or if no --policies option was provided, an empty list
will be returned.
The procedures for devices and partitions are similar.
:returns: a named tuple with fields "devices", "partitions", and
"policies".
"""
run_once = kwargs.get('once', False)
if 'override_policies' in kwargs:
policies = kwargs['override_policies']
elif run_once:
policies = [
int(p) for p in list_from_csv(kwargs.get('policies'))]
else:
policies = []
if 'override_devices' in kwargs:
devices = kwargs['override_devices']
elif run_once:
devices = list_from_csv(kwargs.get('devices'))
else:
devices = []
if 'override_partitions' in kwargs:
partitions = kwargs['override_partitions']
elif run_once:
partitions = [
int(p) for p in list_from_csv(kwargs.get('partitions'))]
else:
partitions = []
return OverrideOptions(devices=devices, partitions=partitions,
policies=policies) |
Distribute items as evenly as possible into N buckets. | def distribute_evenly(items, num_buckets):
"""
Distribute items as evenly as possible into N buckets.
"""
out = [[] for _ in range(num_buckets)]
for index, item in enumerate(items):
out[index % num_buckets].append(item)
return out |
Extract a redirect location from a response's headers.
:param response: a response
:return: a tuple of (path, Timestamp) if a Location header is found,
otherwise None
:raises ValueError: if the Location header is found but a
X-Backend-Redirect-Timestamp is not found, or if there is a problem
with the format of etiher header | def get_redirect_data(response):
"""
Extract a redirect location from a response's headers.
:param response: a response
:return: a tuple of (path, Timestamp) if a Location header is found,
otherwise None
:raises ValueError: if the Location header is found but a
X-Backend-Redirect-Timestamp is not found, or if there is a problem
with the format of etiher header
"""
headers = HeaderKeyDict(response.getheaders())
if 'Location' not in headers:
return None
location = urlparse(headers['Location']).path
if config_true_value(headers.get('X-Backend-Location-Is-Quoted',
'false')):
location = unquote(location)
account, container, _junk = split_path(location, 2, 3, True)
timestamp_val = headers.get('X-Backend-Redirect-Timestamp')
try:
timestamp = Timestamp(timestamp_val)
except (TypeError, ValueError):
raise ValueError('Invalid timestamp value: %s' % timestamp_val)
return '%s/%s' % (account, container), timestamp |
Splits a db filename into three parts: the hash, the epoch, and the
extension.
>>> parse_db_filename("ab2134.db")
('ab2134', None, '.db')
>>> parse_db_filename("ab2134_1234567890.12345.db")
('ab2134', '1234567890.12345', '.db')
:param filename: A db file basename or path to a db file.
:return: A tuple of (hash , epoch, extension). ``epoch`` may be None.
:raises ValueError: if ``filename`` is not a path to a file. | def parse_db_filename(filename):
"""
Splits a db filename into three parts: the hash, the epoch, and the
extension.
>>> parse_db_filename("ab2134.db")
('ab2134', None, '.db')
>>> parse_db_filename("ab2134_1234567890.12345.db")
('ab2134', '1234567890.12345', '.db')
:param filename: A db file basename or path to a db file.
:return: A tuple of (hash , epoch, extension). ``epoch`` may be None.
:raises ValueError: if ``filename`` is not a path to a file.
"""
filename = os.path.basename(filename)
if not filename:
raise ValueError('Path to a file required.')
name, ext = os.path.splitext(filename)
parts = name.split('_')
hash_ = parts.pop(0)
epoch = parts[0] if parts else None
return hash_, epoch, ext |
Given a path to a db file, return a modified path whose filename part has
the given epoch.
A db filename takes the form ``<hash>[_<epoch>].db``; this method replaces
the ``<epoch>`` part of the given ``db_path`` with the given ``epoch``
value, or drops the epoch part if the given ``epoch`` is ``None``.
:param db_path: Path to a db file that does not necessarily exist.
:param epoch: A string (or ``None``) that will be used as the epoch
in the new path's filename; non-``None`` values will be
normalized to the normal string representation of a
:class:`~swift.common.utils.Timestamp`.
:return: A modified path to a db file.
:raises ValueError: if the ``epoch`` is not valid for constructing a
:class:`~swift.common.utils.Timestamp`. | def make_db_file_path(db_path, epoch):
"""
Given a path to a db file, return a modified path whose filename part has
the given epoch.
A db filename takes the form ``<hash>[_<epoch>].db``; this method replaces
the ``<epoch>`` part of the given ``db_path`` with the given ``epoch``
value, or drops the epoch part if the given ``epoch`` is ``None``.
:param db_path: Path to a db file that does not necessarily exist.
:param epoch: A string (or ``None``) that will be used as the epoch
in the new path's filename; non-``None`` values will be
normalized to the normal string representation of a
:class:`~swift.common.utils.Timestamp`.
:return: A modified path to a db file.
:raises ValueError: if the ``epoch`` is not valid for constructing a
:class:`~swift.common.utils.Timestamp`.
"""
hash_, _, ext = parse_db_filename(db_path)
db_dir = os.path.dirname(db_path)
if epoch is None:
return os.path.join(db_dir, hash_ + ext)
epoch = Timestamp(epoch).normal
return os.path.join(db_dir, '%s_%s%s' % (hash_, epoch, ext)) |
Given the path to a db file, return a sorted list of all valid db files
that actually exist in that path's dir. A valid db filename has the form::
<hash>[_<epoch>].db
where <hash> matches the <hash> part of the given db_path as would be
parsed by :meth:`~swift.utils.common.parse_db_filename`.
:param db_path: Path to a db file that does not necessarily exist.
:return: List of valid db files that do exist in the dir of the
``db_path``. This list may be empty. | def get_db_files(db_path):
"""
Given the path to a db file, return a sorted list of all valid db files
that actually exist in that path's dir. A valid db filename has the form::
<hash>[_<epoch>].db
where <hash> matches the <hash> part of the given db_path as would be
parsed by :meth:`~swift.utils.common.parse_db_filename`.
:param db_path: Path to a db file that does not necessarily exist.
:return: List of valid db files that do exist in the dir of the
``db_path``. This list may be empty.
"""
db_dir, db_file = os.path.split(db_path)
try:
files = os.listdir(db_dir)
except OSError as err:
if err.errno == errno.ENOENT:
return []
raise
if not files:
return []
match_hash, epoch, ext = parse_db_filename(db_file)
results = []
for f in files:
hash_, epoch, ext = parse_db_filename(f)
if ext != '.db':
continue
if hash_ != match_hash:
continue
results.append(os.path.join(db_dir, f))
return sorted(results) |
Send systemd-compatible notifications.
Notify the service manager that started this process, if it has set the
NOTIFY_SOCKET environment variable. For example, systemd will set this
when the unit has ``Type=notify``. More information can be found in
systemd documentation:
https://www.freedesktop.org/software/systemd/man/sd_notify.html
Common messages include::
READY=1
RELOADING=1
STOPPING=1
STATUS=<some string>
:param logger: a logger object
:param msg: the message to send | def systemd_notify(logger=None, msg=b"READY=1"):
"""
Send systemd-compatible notifications.
Notify the service manager that started this process, if it has set the
NOTIFY_SOCKET environment variable. For example, systemd will set this
when the unit has ``Type=notify``. More information can be found in
systemd documentation:
https://www.freedesktop.org/software/systemd/man/sd_notify.html
Common messages include::
READY=1
RELOADING=1
STOPPING=1
STATUS=<some string>
:param logger: a logger object
:param msg: the message to send
"""
if not isinstance(msg, bytes):
msg = msg.encode('utf8')
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
if notify_socket.startswith('@'):
# abstract namespace socket
notify_socket = '\0%s' % notify_socket[1:]
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
with closing(sock):
try:
sock.connect(notify_socket)
sock.sendall(msg)
except EnvironmentError:
if logger:
logger.debug("Systemd notification failed", exc_info=True) |
Compare the data and meta related timestamps of a new object item with
the timestamps of an existing object record, and update the new item
with data and/or meta related attributes from the existing record if
their timestamps are newer.
The multiple timestamps are encoded into a single string for storing
in the 'created_at' column of the objects db table.
:param new_item: A dict of object update attributes
:param existing: A dict of existing object attributes
:return: True if any attributes of the new item dict were found to be
newer than the existing and therefore not updated, otherwise
False implying that the updated item is equal to the existing. | def update_new_item_from_existing(new_item, existing):
"""
Compare the data and meta related timestamps of a new object item with
the timestamps of an existing object record, and update the new item
with data and/or meta related attributes from the existing record if
their timestamps are newer.
The multiple timestamps are encoded into a single string for storing
in the 'created_at' column of the objects db table.
:param new_item: A dict of object update attributes
:param existing: A dict of existing object attributes
:return: True if any attributes of the new item dict were found to be
newer than the existing and therefore not updated, otherwise
False implying that the updated item is equal to the existing.
"""
# item[created_at] may be updated so keep a copy of the original
# value in case we process this item again
new_item.setdefault('data_timestamp', new_item['created_at'])
# content-type and metadata timestamps may be encoded in
# item[created_at], or may be set explicitly.
item_ts_data, item_ts_ctype, item_ts_meta = decode_timestamps(
new_item['data_timestamp'])
if new_item.get('ctype_timestamp'):
item_ts_ctype = Timestamp(new_item.get('ctype_timestamp'))
item_ts_meta = item_ts_ctype
if new_item.get('meta_timestamp'):
item_ts_meta = Timestamp(new_item.get('meta_timestamp'))
if not existing:
# encode new_item timestamps into one string for db record
new_item['created_at'] = encode_timestamps(
item_ts_data, item_ts_ctype, item_ts_meta)
return True
# decode existing timestamp into separate data, content-type and
# metadata timestamps
rec_ts_data, rec_ts_ctype, rec_ts_meta = decode_timestamps(
existing['created_at'])
# Extract any swift_bytes values from the content_type values. This is
# necessary because the swift_bytes value to persist should be that at the
# most recent data timestamp whereas the content-type value to persist is
# that at the most recent content-type timestamp. The two values happen to
# be stored in the same database column for historical reasons.
for item in (new_item, existing):
content_type, swift_bytes = extract_swift_bytes(item['content_type'])
item['content_type'] = content_type
item['swift_bytes'] = swift_bytes
newer_than_existing = [True, True, True]
if rec_ts_data >= item_ts_data:
# apply data attributes from existing record
new_item.update([(k, existing[k])
for k in ('size', 'etag', 'deleted', 'swift_bytes')])
item_ts_data = rec_ts_data
newer_than_existing[0] = False
if rec_ts_ctype >= item_ts_ctype:
# apply content-type attribute from existing record
new_item['content_type'] = existing['content_type']
item_ts_ctype = rec_ts_ctype
newer_than_existing[1] = False
if rec_ts_meta >= item_ts_meta:
# apply metadata timestamp from existing record
item_ts_meta = rec_ts_meta
newer_than_existing[2] = False
# encode updated timestamps into one string for db record
new_item['created_at'] = encode_timestamps(
item_ts_data, item_ts_ctype, item_ts_meta)
# append the most recent swift_bytes onto the most recent content_type in
# new_item and restore existing to its original state
for item in (new_item, existing):
if item['swift_bytes']:
item['content_type'] += ';swift_bytes=%s' % item['swift_bytes']
del item['swift_bytes']
return any(newer_than_existing) |
Compares ``shard_data`` with ``existing`` and updates ``shard_data`` with
any items of ``existing`` that take precedence over the corresponding item
in ``shard_data``.
:param shard_data: a dict representation of shard range that may be
modified by this method.
:param existing: a dict representation of shard range.
:returns: True if ``shard data`` has any item(s) that are considered to
take precedence over the corresponding item in ``existing`` | def merge_shards(shard_data, existing):
"""
Compares ``shard_data`` with ``existing`` and updates ``shard_data`` with
any items of ``existing`` that take precedence over the corresponding item
in ``shard_data``.
:param shard_data: a dict representation of shard range that may be
modified by this method.
:param existing: a dict representation of shard range.
:returns: True if ``shard data`` has any item(s) that are considered to
take precedence over the corresponding item in ``existing``
"""
if not existing:
return True
if existing['timestamp'] < shard_data['timestamp']:
# note that currently we do not roll forward any meta or state from
# an item that was created at older time, newer created time trumps
shard_data['reported'] = 0 # reset the latch
return True
elif existing['timestamp'] > shard_data['timestamp']:
return False
new_content = False
# timestamp must be the same, so preserve existing range bounds and deleted
for k in ('lower', 'upper', 'deleted'):
shard_data[k] = existing[k]
# now we need to look for meta data updates
if existing['meta_timestamp'] >= shard_data['meta_timestamp']:
for k in ('object_count', 'bytes_used', 'meta_timestamp'):
shard_data[k] = existing[k]
shard_data['tombstones'] = existing.get('tombstones', -1)
else:
new_content = True
# We can latch the reported flag
if existing['reported'] and \
existing['object_count'] == shard_data['object_count'] and \
existing['bytes_used'] == shard_data['bytes_used'] and \
existing.get('tombstones', -1) == shard_data['tombstones'] and \
existing['state'] == shard_data['state'] and \
existing['epoch'] == shard_data['epoch']:
shard_data['reported'] = 1
else:
shard_data.setdefault('reported', 0)
if shard_data['reported'] and not existing['reported']:
new_content = True
if (existing['state_timestamp'] == shard_data['state_timestamp']
and shard_data['state'] > existing['state']):
new_content = True
elif existing['state_timestamp'] >= shard_data['state_timestamp']:
for k in ('state', 'state_timestamp', 'epoch'):
shard_data[k] = existing[k]
else:
new_content = True
return new_content |
Compares new and existing shard ranges, updating the new shard ranges with
any more recent state from the existing, and returns shard ranges sorted
into those that need adding because they contain new or updated state and
those that need deleting because their state has been superseded.
:param new_shard_ranges: a list of dicts, each of which represents a shard
range.
:param existing_shard_ranges: a dict mapping shard range names to dicts
representing a shard range.
:return: a tuple (to_add, to_delete); to_add is a list of dicts, each of
which represents a shard range that is to be added to the existing
shard ranges; to_delete is a set of shard range names that are to be
deleted. | def sift_shard_ranges(new_shard_ranges, existing_shard_ranges):
"""
Compares new and existing shard ranges, updating the new shard ranges with
any more recent state from the existing, and returns shard ranges sorted
into those that need adding because they contain new or updated state and
those that need deleting because their state has been superseded.
:param new_shard_ranges: a list of dicts, each of which represents a shard
range.
:param existing_shard_ranges: a dict mapping shard range names to dicts
representing a shard range.
:return: a tuple (to_add, to_delete); to_add is a list of dicts, each of
which represents a shard range that is to be added to the existing
shard ranges; to_delete is a set of shard range names that are to be
deleted.
"""
to_delete = set()
to_add = {}
for item in new_shard_ranges:
item_ident = item['name']
existing = existing_shard_ranges.get(item_ident)
if merge_shards(item, existing):
# exists with older timestamp
if item_ident in existing_shard_ranges:
to_delete.add(item_ident)
# duplicate entries in item_list
if (item_ident not in to_add or
merge_shards(item, to_add[item_ident])):
to_add[item_ident] = item
return to_add.values(), to_delete |
You have to squint to see it, but the general strategy is just:
if either has been recreated:
return the newest (of the recreated)
else
return the oldest
I tried cleaning it up for awhile, but settled on just writing a bunch of
tests instead. Once you get an intuitive sense for the nuance here you
can try and see there's a better way to spell the boolean logic but it all
ends up looking sorta hairy.
:returns: -1 if info is correct, 1 if remote_info is better | def cmp_policy_info(info, remote_info):
"""
You have to squint to see it, but the general strategy is just:
if either has been recreated:
return the newest (of the recreated)
else
return the oldest
I tried cleaning it up for awhile, but settled on just writing a bunch of
tests instead. Once you get an intuitive sense for the nuance here you
can try and see there's a better way to spell the boolean logic but it all
ends up looking sorta hairy.
:returns: -1 if info is correct, 1 if remote_info is better
"""
def is_deleted(info):
return (info['delete_timestamp'] > info['put_timestamp'] and
info.get('count', info.get('object_count', 0)) == 0)
def cmp(a, b):
if a < b:
return -1
elif b < a:
return 1
else:
return 0
deleted = is_deleted(info)
remote_deleted = is_deleted(remote_info)
if any([deleted, remote_deleted]):
if not deleted:
return -1
elif not remote_deleted:
return 1
return cmp(remote_info['status_changed_at'],
info['status_changed_at'])
def has_been_recreated(info):
return (info['put_timestamp'] > info['delete_timestamp'] >
Timestamp(0))
remote_recreated = has_been_recreated(remote_info)
recreated = has_been_recreated(info)
if any([remote_recreated, recreated]):
if not recreated:
return 1
elif not remote_recreated:
return -1
# both have been recreated, everything devoles to here eventually
most_recent_successful_delete = max(info['delete_timestamp'],
remote_info['delete_timestamp'])
if info['put_timestamp'] < most_recent_successful_delete:
return 1
elif remote_info['put_timestamp'] < most_recent_successful_delete:
return -1
return cmp(info['status_changed_at'], remote_info['status_changed_at']) |
Compare remote_info to info and decide if the remote storage policy index
should be used instead of ours. | def incorrect_policy_index(info, remote_info):
"""
Compare remote_info to info and decide if the remote storage policy index
should be used instead of ours.
"""
if 'storage_policy_index' not in remote_info:
return False
if remote_info['storage_policy_index'] == info['storage_policy_index']:
return False
# Only return True if remote_info has the better data;
# see the docstring for cmp_policy_info
return cmp_policy_info(info, remote_info) > 0 |
Get the name of a container into which a misplaced object should be
enqueued. The name is the object's last modified time rounded down to the
nearest hour.
:param obj_timestamp: a string representation of the object's 'created_at'
time from it's container db row.
:return: a container name | def get_reconciler_container_name(obj_timestamp):
"""
Get the name of a container into which a misplaced object should be
enqueued. The name is the object's last modified time rounded down to the
nearest hour.
:param obj_timestamp: a string representation of the object's 'created_at'
time from it's container db row.
:return: a container name
"""
# Use last modified time of object to determine reconciler container name
_junk, _junk, ts_meta = decode_timestamps(obj_timestamp)
return str(int(ts_meta) //
MISPLACED_OBJECTS_CONTAINER_DIVISOR *
MISPLACED_OBJECTS_CONTAINER_DIVISOR) |
Add an object to the container reconciler's queue. This will cause the
container reconciler to move it from its current storage policy index to
the correct storage policy index.
:param container_ring: container ring
:param account: the misplaced object's account
:param container: the misplaced object's container
:param obj: the misplaced object
:param obj_policy_index: the policy index where the misplaced object
currently is
:param obj_timestamp: the misplaced object's X-Timestamp. We need this to
ensure that the reconciler doesn't overwrite a newer
object with an older one.
:param op: the method of the operation (DELETE or PUT)
:param force: over-write queue entries newer than obj_timestamp
:param conn_timeout: max time to wait for connection to container server
:param response_timeout: max time to wait for response from container
server
:returns: .misplaced_object container name, False on failure. "Success"
means a majority of containers got the update. | def add_to_reconciler_queue(container_ring, account, container, obj,
obj_policy_index, obj_timestamp, op,
force=False, conn_timeout=5, response_timeout=15):
"""
Add an object to the container reconciler's queue. This will cause the
container reconciler to move it from its current storage policy index to
the correct storage policy index.
:param container_ring: container ring
:param account: the misplaced object's account
:param container: the misplaced object's container
:param obj: the misplaced object
:param obj_policy_index: the policy index where the misplaced object
currently is
:param obj_timestamp: the misplaced object's X-Timestamp. We need this to
ensure that the reconciler doesn't overwrite a newer
object with an older one.
:param op: the method of the operation (DELETE or PUT)
:param force: over-write queue entries newer than obj_timestamp
:param conn_timeout: max time to wait for connection to container server
:param response_timeout: max time to wait for response from container
server
:returns: .misplaced_object container name, False on failure. "Success"
means a majority of containers got the update.
"""
container_name = get_reconciler_container_name(obj_timestamp)
object_name = get_reconciler_obj_name(obj_policy_index, account,
container, obj)
if force:
# this allows an operator to re-enqueue an object that has
# already been popped from the queue to be reprocessed, but
# could potentially prevent out of order updates from making it
# into the queue
x_timestamp = Timestamp.now().internal
else:
x_timestamp = obj_timestamp
q_op_type = get_reconciler_content_type(op)
headers = {
'X-Size': 0,
'X-Etag': obj_timestamp,
'X-Timestamp': x_timestamp,
'X-Content-Type': q_op_type,
USE_REPLICATION_NETWORK_HEADER: 'true',
}
def _check_success(*args, **kwargs):
try:
direct_put_container_object(*args, **kwargs)
return 1
except (ClientException, Timeout, socket.error):
return 0
pile = GreenPile()
part, nodes = container_ring.get_nodes(MISPLACED_OBJECTS_ACCOUNT,
container_name)
for node in nodes:
pile.spawn(_check_success, node, part, MISPLACED_OBJECTS_ACCOUNT,
container_name, object_name, headers=headers,
conn_timeout=conn_timeout,
response_timeout=response_timeout)
successes = sum(pile)
if successes >= majority_size(len(nodes)):
return container_name
else:
return False |
Translate a reconciler container listing entry to a dictionary
containing the parts of the misplaced object queue entry.
:param obj_info: an entry in an a container listing with the
required keys: name, content_type, and hash
:returns: a queue entry dict with the keys: q_policy_index, account,
container, obj, q_op, q_ts, q_record, and path | def parse_raw_obj(obj_info):
"""
Translate a reconciler container listing entry to a dictionary
containing the parts of the misplaced object queue entry.
:param obj_info: an entry in an a container listing with the
required keys: name, content_type, and hash
:returns: a queue entry dict with the keys: q_policy_index, account,
container, obj, q_op, q_ts, q_record, and path
"""
if six.PY2:
raw_obj_name = obj_info['name'].encode('utf-8')
else:
raw_obj_name = obj_info['name']
policy_index, obj_name = raw_obj_name.split(':', 1)
q_policy_index = int(policy_index)
account, container, obj = split_path(obj_name, 3, 3, rest_with_last=True)
try:
q_op = {
'application/x-put': 'PUT',
'application/x-delete': 'DELETE',
}[obj_info['content_type']]
except KeyError:
raise ValueError('invalid operation type %r' %
obj_info.get('content_type', None))
return {
'q_policy_index': q_policy_index,
'account': account,
'container': container,
'obj': obj,
'q_op': q_op,
'q_ts': decode_timestamps((obj_info['hash']))[0],
'q_record': last_modified_date_to_timestamp(
obj_info['last_modified']),
'path': '/%s/%s/%s' % (account, container, obj)
} |
Talk directly to the primary container servers to figure out the storage
policy index for a given container.
:param container_ring: ring in which to look up the container locations
:param account_name: name of the container's account
:param container_name: name of the container
:returns: storage policy index, or None if it couldn't get a majority | def direct_get_container_policy_index(container_ring, account_name,
container_name):
"""
Talk directly to the primary container servers to figure out the storage
policy index for a given container.
:param container_ring: ring in which to look up the container locations
:param account_name: name of the container's account
:param container_name: name of the container
:returns: storage policy index, or None if it couldn't get a majority
"""
def _eat_client_exception(*args):
try:
return direct_head_container(*args, headers={
USE_REPLICATION_NETWORK_HEADER: 'true'})
except ClientException as err:
if err.http_status == 404:
return err.http_headers
except (Timeout, socket.error):
pass
pile = GreenPile()
part, nodes = container_ring.get_nodes(account_name, container_name)
for node in nodes:
pile.spawn(_eat_client_exception, node, part, account_name,
container_name)
headers = [x for x in pile if x is not None]
if len(headers) < majority_size(len(nodes)):
return
return best_policy_index(headers) |
Talk directly to the primary container servers to delete a particular
object listing. Does not talk to object servers; use this only when a
container entry does not actually have a corresponding object. | def direct_delete_container_entry(container_ring, account_name, container_name,
object_name, headers=None):
"""
Talk directly to the primary container servers to delete a particular
object listing. Does not talk to object servers; use this only when a
container entry does not actually have a corresponding object.
"""
if headers is None:
headers = {}
headers[USE_REPLICATION_NETWORK_HEADER] = 'true'
pool = GreenPool()
part, nodes = container_ring.get_nodes(account_name, container_name)
for node in nodes:
pool.spawn_n(direct_delete_container_object, node, part, account_name,
container_name, object_name, headers=headers)
# This either worked or it didn't; if it didn't, we'll retry on the next
# reconciler loop when we see the queue entry again.
pool.waitall() |
If broker has own_shard_range *with an epoch* then filter out an
own_shard_range *without an epoch*, and log a warning about it.
:param shards: a list of candidate ShardRanges to merge
:param broker: a ContainerBroker
:param logger: a logger
:param source: string to log as source of shards
:return: a list of ShardRanges to actually merge | def check_merge_own_shard_range(shards, broker, logger, source):
"""
If broker has own_shard_range *with an epoch* then filter out an
own_shard_range *without an epoch*, and log a warning about it.
:param shards: a list of candidate ShardRanges to merge
:param broker: a ContainerBroker
:param logger: a logger
:param source: string to log as source of shards
:return: a list of ShardRanges to actually merge
"""
# work-around for https://bugs.launchpad.net/swift/+bug/1980451
own_sr = broker.get_own_shard_range()
if own_sr.epoch is None:
return shards
to_merge = []
for shard in shards:
if shard['name'] == own_sr.name and not shard['epoch']:
shard_copy = dict(shard)
new_content = merge_shards(shard_copy, dict(own_sr))
if new_content and shard_copy['epoch'] is None:
logger.warning(
'Ignoring remote osr w/o epoch, own_sr: %r, remote_sr: %r,'
' source: %s', dict(own_sr), shard, source)
continue
to_merge.append(shard)
return to_merge |
Convert container info dict to headers. | def gen_resp_headers(info, is_deleted=False):
"""
Convert container info dict to headers.
"""
# backend headers are always included
headers = {
'X-Backend-Timestamp': Timestamp(info.get('created_at', 0)).internal,
'X-Backend-PUT-Timestamp': Timestamp(info.get(
'put_timestamp', 0)).internal,
'X-Backend-DELETE-Timestamp': Timestamp(
info.get('delete_timestamp', 0)).internal,
'X-Backend-Status-Changed-At': Timestamp(
info.get('status_changed_at', 0)).internal,
'X-Backend-Storage-Policy-Index': info.get('storage_policy_index', 0),
}
if not is_deleted:
# base container info on deleted containers is not exposed to client
headers.update({
'X-Container-Object-Count': info.get('object_count', 0),
'X-Container-Bytes-Used': info.get('bytes_used', 0),
'X-Timestamp': Timestamp(info.get('created_at', 0)).normal,
'X-PUT-Timestamp': Timestamp(
info.get('put_timestamp', 0)).normal,
'X-Backend-Sharding-State': info.get('db_state', UNSHARDED),
})
return headers |
Split and validate path for a container.
:param req: a swob request
:returns: a tuple of path parts as strings | def get_container_name_and_placement(req):
"""
Split and validate path for a container.
:param req: a swob request
:returns: a tuple of path parts as strings
"""
drive, part, account, container = split_and_validate_path(req, 4)
validate_internal_container(account, container)
return drive, part, account, container |
Split and validate path for an object.
:param req: a swob request
:returns: a tuple of path parts as strings | def get_obj_name_and_placement(req):
"""
Split and validate path for an object.
:param req: a swob request
:returns: a tuple of path parts as strings
"""
drive, part, account, container, obj = split_and_validate_path(
req, 4, 5, True)
validate_internal_obj(account, container, obj)
return drive, part, account, container, obj |
paste.deploy app factory for creating WSGI container server apps | def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI container server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ContainerController(conf) |
Find gaps in the shard ranges and pairs of shard range paths that lead to
and from those gaps. For each gap a single pair of adjacent paths is
selected. The concatenation of all selected paths and gaps will span the
entire namespace with no overlaps.
:param shard_ranges: a list of instances of ShardRange.
:param within_range: an optional ShardRange that constrains the search
space; the method will only return gaps within this range. The default
is the entire namespace.
:return: A list of tuples of ``(start_path, gap_range, end_path)`` where
``start_path`` is a list of ShardRanges leading to the gap,
``gap_range`` is a ShardRange synthesized to describe the namespace
gap, and ``end_path`` is a list of ShardRanges leading from the gap.
When gaps start or end at the namespace minimum or maximum bounds,
``start_path`` and ``end_path`` may be 'null' paths that contain a
single ShardRange covering either the minimum or maximum of the
namespace. | def find_paths_with_gaps(shard_ranges, within_range=None):
"""
Find gaps in the shard ranges and pairs of shard range paths that lead to
and from those gaps. For each gap a single pair of adjacent paths is
selected. The concatenation of all selected paths and gaps will span the
entire namespace with no overlaps.
:param shard_ranges: a list of instances of ShardRange.
:param within_range: an optional ShardRange that constrains the search
space; the method will only return gaps within this range. The default
is the entire namespace.
:return: A list of tuples of ``(start_path, gap_range, end_path)`` where
``start_path`` is a list of ShardRanges leading to the gap,
``gap_range`` is a ShardRange synthesized to describe the namespace
gap, and ``end_path`` is a list of ShardRanges leading from the gap.
When gaps start or end at the namespace minimum or maximum bounds,
``start_path`` and ``end_path`` may be 'null' paths that contain a
single ShardRange covering either the minimum or maximum of the
namespace.
"""
timestamp = Timestamp.now()
within_range = within_range or ShardRange('entire/namespace', timestamp)
shard_ranges = ShardRangeList(shard_ranges)
# note: find_paths results do not include shrinking ranges
paths = find_paths(shard_ranges)
# add paths covering no namespace at start and end of namespace to ensure
# that a start_path and end_path is always found even when there is a gap
# at the start or end of the namespace
null_start = ShardRange('null/start', timestamp,
lower=ShardRange.MIN,
upper=ShardRange.MIN,
state=ShardRange.FOUND)
null_end = ShardRange('null/end', timestamp,
lower=ShardRange.MAX,
upper=ShardRange.MAX,
state=ShardRange.FOUND)
paths.extend([ShardRangeList([null_start]), ShardRangeList([null_end])])
paths_with_gaps = []
start = null_start.lower
while True:
start_path, end_path = _find_discontinuity(paths, start)
if end_path is None:
# end of namespace reached
break
start = end_path.lower
if start_path.upper > end_path.lower:
# overlap
continue
gap_range = ShardRange('gap/index_%06d' % len(paths_with_gaps),
timestamp,
lower=start_path.upper,
upper=end_path.lower)
if gap_range.overlaps(within_range):
paths_with_gaps.append((start_path, gap_range, end_path))
return paths_with_gaps |
Test if shard range ``shard_range`` is the parent or a child of another
shard range ``other`` within past time period ``time_period``. This method
is limited to work only within the scope of the same user-facing account
(with and without shard prefix).
:param shard_range: an instance of ``ShardRange``.
:param other: an instance of ``ShardRange``.
:param time_period: the specified past time period in seconds. Value of
0 means all time in the past.
:return: True if ``shard_range`` is the parent or a child of ``other``
within past time period, False otherwise, assuming that they are within
the same account. | def _is_parent_or_child(shard_range, other, time_period):
"""
Test if shard range ``shard_range`` is the parent or a child of another
shard range ``other`` within past time period ``time_period``. This method
is limited to work only within the scope of the same user-facing account
(with and without shard prefix).
:param shard_range: an instance of ``ShardRange``.
:param other: an instance of ``ShardRange``.
:param time_period: the specified past time period in seconds. Value of
0 means all time in the past.
:return: True if ``shard_range`` is the parent or a child of ``other``
within past time period, False otherwise, assuming that they are within
the same account.
"""
exclude_age = (time.time() - float(time_period)) if time_period > 0 else 0
if shard_range.is_child_of(other) and shard_range.timestamp >= exclude_age:
return True
if other.is_child_of(shard_range) and other.timestamp >= exclude_age:
return True
return False |
Find all pairs of overlapping ranges in the given list.
:param shard_ranges: A list of :class:`~swift.utils.ShardRange`
:param exclude_parent_child: If True then overlapping pairs that have a
parent-child relationship within the past time period
``time_period`` are excluded from the returned set. Default is
False.
:param time_period: the specified past time period in seconds. Value of
0 means all time in the past.
:return: a set of tuples, each tuple containing ranges that overlap with
each other. | def find_overlapping_ranges(
shard_ranges, exclude_parent_child=False, time_period=0):
"""
Find all pairs of overlapping ranges in the given list.
:param shard_ranges: A list of :class:`~swift.utils.ShardRange`
:param exclude_parent_child: If True then overlapping pairs that have a
parent-child relationship within the past time period
``time_period`` are excluded from the returned set. Default is
False.
:param time_period: the specified past time period in seconds. Value of
0 means all time in the past.
:return: a set of tuples, each tuple containing ranges that overlap with
each other.
"""
result = set()
for i, shard_range in enumerate(shard_ranges):
if exclude_parent_child:
overlapping = [
sr for sr in shard_ranges[i + 1:]
if shard_range.name != sr.name and shard_range.overlaps(sr) and
not _is_parent_or_child(shard_range, sr, time_period)]
else:
overlapping = [
sr for sr in shard_ranges[i + 1:]
if shard_range.name != sr.name and shard_range.overlaps(sr)]
if overlapping:
overlapping.append(shard_range)
overlapping.sort(key=ShardRange.sort_key)
result.add(tuple(overlapping))
return result |
Find sequences of shard ranges that could be compacted into a single
acceptor shard range.
This function does not modify shard ranges.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param shrink_threshold: the number of rows below which a shard may be
considered for shrinking into another shard
:param expansion_limit: the maximum number of rows that an acceptor shard
range should have after other shard ranges have been compacted into it
:param max_shrinking: the maximum number of shard ranges that should be
compacted into each acceptor; -1 implies unlimited.
:param max_expanding: the maximum number of acceptors to be found (i.e. the
maximum number of sequences to be returned); -1 implies unlimited.
:param include_shrinking: if True then existing compactible sequences are
included in the results; default is False.
:returns: A list of :class:`~swift.common.utils.ShardRangeList` each
containing a sequence of neighbouring shard ranges that may be
compacted; the final shard range in the list is the acceptor | def find_compactible_shard_sequences(broker,
shrink_threshold,
expansion_limit,
max_shrinking,
max_expanding,
include_shrinking=False):
"""
Find sequences of shard ranges that could be compacted into a single
acceptor shard range.
This function does not modify shard ranges.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param shrink_threshold: the number of rows below which a shard may be
considered for shrinking into another shard
:param expansion_limit: the maximum number of rows that an acceptor shard
range should have after other shard ranges have been compacted into it
:param max_shrinking: the maximum number of shard ranges that should be
compacted into each acceptor; -1 implies unlimited.
:param max_expanding: the maximum number of acceptors to be found (i.e. the
maximum number of sequences to be returned); -1 implies unlimited.
:param include_shrinking: if True then existing compactible sequences are
included in the results; default is False.
:returns: A list of :class:`~swift.common.utils.ShardRangeList` each
containing a sequence of neighbouring shard ranges that may be
compacted; the final shard range in the list is the acceptor
"""
# this should only execute on root containers that have sharded; the
# goal is to find small shard containers that could be retired by
# merging with a neighbour.
# First cut is simple: assume root container shard usage stats are good
# enough to make decision; only merge with upper neighbour so that
# upper bounds never change (shard names include upper bound).
shard_ranges = broker.get_shard_ranges()
own_shard_range = broker.get_own_shard_range()
def sequence_complete(sequence):
# a sequence is considered complete if any of the following are true:
# - the final shard range has more objects than the shrink_threshold,
# so should not be shrunk (this shard will be the acceptor)
# - the max number of shard ranges to be compacted (max_shrinking) has
# been reached
# - the total number of objects in the sequence has reached the
# expansion_limit
if (sequence and
(not is_shrinking_candidate(
sequence[-1], shrink_threshold, expansion_limit,
states=(ShardRange.ACTIVE, ShardRange.SHRINKING)) or
0 < max_shrinking < len(sequence) or
sequence.row_count >= expansion_limit)):
return True
return False
compactible_sequences = []
index = 0
expanding = 0
while ((max_expanding < 0 or expanding < max_expanding) and
index < len(shard_ranges)):
if not is_shrinking_candidate(
shard_ranges[index], shrink_threshold, expansion_limit,
states=(ShardRange.ACTIVE, ShardRange.SHRINKING)):
# this shard range cannot be the start of a new or existing
# compactible sequence, move on
index += 1
continue
# start of a *possible* sequence
sequence = ShardRangeList([shard_ranges[index]])
for shard_range in shard_ranges[index + 1:]:
# attempt to add contiguous shard ranges to the sequence
if sequence.upper < shard_range.lower:
# found a gap! break before consuming this range because it
# could become the first in the next sequence
break
if shard_range.state not in (ShardRange.ACTIVE,
ShardRange.SHRINKING):
# found? created? sharded? don't touch it
break
if shard_range.state == ShardRange.SHRINKING:
# already shrinking: add to sequence unconditionally
sequence.append(shard_range)
elif (sequence.row_count + shard_range.row_count
<= expansion_limit):
# add to sequence: could be a donor or acceptor
sequence.append(shard_range)
if sequence_complete(sequence):
break
else:
break
index += len(sequence)
if (index == len(shard_ranges) and
len(shard_ranges) == len(sequence) and
not sequence_complete(sequence) and
sequence.includes(own_shard_range)):
# special case: only one sequence has been found, which consumes
# all shard ranges, encompasses the entire namespace, has no more
# than expansion_limit records and whose shard ranges are all
# shrinkable; all the shards in the sequence can be shrunk to the
# root, so append own_shard_range to the sequence to act as an
# acceptor; note: only shrink to the root when *all* the remaining
# shard ranges can be simultaneously shrunk to the root.
sequence.append(own_shard_range)
if len(sequence) < 2 or sequence[-1].state not in (ShardRange.ACTIVE,
ShardRange.SHARDED):
# this sequence doesn't end with a suitable acceptor shard range
continue
# all valid sequences are counted against the max_expanding allowance
# even if the sequence is already shrinking
expanding += 1
if (all([sr.state != ShardRange.SHRINKING for sr in sequence]) or
include_shrinking):
compactible_sequences.append(sequence)
return compactible_sequences |
Update donor shard ranges to shrinking state and merge donors and acceptors
to broker.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param acceptor_ranges: A list of :class:`~swift.common.utils.ShardRange`
that are to be acceptors.
:param donor_ranges: A list of :class:`~swift.common.utils.ShardRange`
that are to be donors; these will have their state and timestamp
updated.
:param timestamp: timestamp to use when updating donor state | def finalize_shrinking(broker, acceptor_ranges, donor_ranges, timestamp):
"""
Update donor shard ranges to shrinking state and merge donors and acceptors
to broker.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param acceptor_ranges: A list of :class:`~swift.common.utils.ShardRange`
that are to be acceptors.
:param donor_ranges: A list of :class:`~swift.common.utils.ShardRange`
that are to be donors; these will have their state and timestamp
updated.
:param timestamp: timestamp to use when updating donor state
"""
for donor in donor_ranges:
if donor.update_state(ShardRange.SHRINKING):
# Set donor state to shrinking state_timestamp defines new epoch
donor.epoch = donor.state_timestamp = timestamp
broker.merge_shard_ranges(acceptor_ranges + donor_ranges) |
Transform the given sequences of shard ranges into a list of acceptors and
a list of shrinking donors. For each given sequence the final ShardRange in
the sequence (the acceptor) is expanded to accommodate the other
ShardRanges in the sequence (the donors). The donors and acceptors are then
merged into the broker.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param sequences: A list of :class:`~swift.common.utils.ShardRangeList` | def process_compactible_shard_sequences(broker, sequences):
"""
Transform the given sequences of shard ranges into a list of acceptors and
a list of shrinking donors. For each given sequence the final ShardRange in
the sequence (the acceptor) is expanded to accommodate the other
ShardRanges in the sequence (the donors). The donors and acceptors are then
merged into the broker.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param sequences: A list of :class:`~swift.common.utils.ShardRangeList`
"""
timestamp = Timestamp.now()
acceptor_ranges = []
shrinking_ranges = []
for sequence in sequences:
donors = sequence[:-1]
shrinking_ranges.extend(donors)
# Update the acceptor container with its expanded bounds to prevent it
# treating objects cleaved from the donor as misplaced.
acceptor = sequence[-1]
if acceptor.expand(donors):
# Update the acceptor container with its expanded bounds to prevent
# it treating objects cleaved from the donor as misplaced.
acceptor.timestamp = timestamp
if acceptor.update_state(ShardRange.ACTIVE):
# Ensure acceptor state is ACTIVE (when acceptor is root)
acceptor.state_timestamp = timestamp
acceptor_ranges.append(acceptor)
finalize_shrinking(broker, acceptor_ranges, shrinking_ranges, timestamp) |
Returns a list of all continuous paths through the shard ranges. An
individual path may not necessarily span the entire namespace, but it will
span a continuous namespace without gaps.
:param shard_ranges: A list of :class:`~swift.common.utils.ShardRange`.
:return: A list of :class:`~swift.common.utils.ShardRangeList`. | def find_paths(shard_ranges):
"""
Returns a list of all continuous paths through the shard ranges. An
individual path may not necessarily span the entire namespace, but it will
span a continuous namespace without gaps.
:param shard_ranges: A list of :class:`~swift.common.utils.ShardRange`.
:return: A list of :class:`~swift.common.utils.ShardRangeList`.
"""
# A node is a point in the namespace that is used as a bound of any shard
# range. Shard ranges form the edges between nodes.
# First build a dict mapping nodes to a list of edges that leave that node
# (in other words, shard ranges whose lower bound equals the node)
node_successors = collections.defaultdict(list)
for shard_range in shard_ranges:
if shard_range.state == ShardRange.SHRINKING:
# shrinking shards are not a viable edge in any path
continue
node_successors[shard_range.lower].append(shard_range)
paths = []
def clone_path(other=None):
# create a new path, possibly cloning another path, and add it to the
# list of all paths through the shards
path = ShardRangeList() if other is None else ShardRangeList(other)
paths.append(path)
return path
# we need to keep track of every path that ends at each node so that when
# we visit the node we can extend those paths, or clones of them, with the
# edges that leave the node
paths_to_node = collections.defaultdict(list)
# visit the nodes in ascending order by name...
for node, edges in sorted(node_successors.items()):
if not edges:
# this node is a dead-end, so there's no path updates to make
continue
if not paths_to_node[node]:
# this is either the first node to be visited, or it has no paths
# leading to it, so we need to start a new path here
paths_to_node[node].append(clone_path([]))
for path_to_node in paths_to_node[node]:
# extend each path that arrives at this node with all of the
# possible edges that leave the node; if more than edge leaves the
# node then we will make clones of the path to the node and extend
# those clones, adding to the collection of all paths though the
# shards
for i, edge in enumerate(edges):
if i == len(edges) - 1:
# the last edge is used to extend the original path to the
# node; there is nothing special about the last edge, but
# doing this last means the original path to the node can
# be cloned for all other edges before being modified here
path = path_to_node
else:
# for all but one of the edges leaving the node we need to
# make a clone the original path
path = clone_path(path_to_node)
# extend the path with the edge
path.append(edge)
# keep track of which node this path now arrives at
paths_to_node[edge.upper].append(path)
return paths |
Sorts the given list of paths such that the most preferred path is the
first item in the list.
:param paths: A list of :class:`~swift.common.utils.ShardRangeList`.
:param shard_range_to_span: An instance of
:class:`~swift.common.utils.ShardRange` that describes the namespace
that would ideally be spanned by a path. Paths that include this
namespace will be preferred over those that do not.
:return: A sorted list of :class:`~swift.common.utils.ShardRangeList`. | def rank_paths(paths, shard_range_to_span):
"""
Sorts the given list of paths such that the most preferred path is the
first item in the list.
:param paths: A list of :class:`~swift.common.utils.ShardRangeList`.
:param shard_range_to_span: An instance of
:class:`~swift.common.utils.ShardRange` that describes the namespace
that would ideally be spanned by a path. Paths that include this
namespace will be preferred over those that do not.
:return: A sorted list of :class:`~swift.common.utils.ShardRangeList`.
"""
def sort_key(path):
# defines the order of preference for paths through shards
return (
# complete path for the namespace
path.includes(shard_range_to_span),
# most cleaving progress
path.find_lower(lambda sr: sr.state not in (
ShardRange.CLEAVED, ShardRange.ACTIVE)),
# largest object count
path.object_count,
# fewest timestamps
-1 * len(path.timestamps),
# newest timestamp
sorted(path.timestamps)[-1]
)
paths.sort(key=sort_key, reverse=True)
return paths |
Combines new and existing shard ranges based on most recent state.
:param new_shard_ranges: a list of ShardRange instances.
:param existing_shard_ranges: a list of ShardRange instances.
:return: a list of ShardRange instances. | def combine_shard_ranges(new_shard_ranges, existing_shard_ranges):
"""
Combines new and existing shard ranges based on most recent state.
:param new_shard_ranges: a list of ShardRange instances.
:param existing_shard_ranges: a list of ShardRange instances.
:return: a list of ShardRange instances.
"""
new_shard_ranges = [dict(sr) for sr in new_shard_ranges]
existing_shard_ranges = [dict(sr) for sr in existing_shard_ranges]
to_add, to_delete = sift_shard_ranges(
new_shard_ranges,
dict((sr['name'], sr) for sr in existing_shard_ranges))
result = [ShardRange.from_dict(existing)
for existing in existing_shard_ranges
if existing['name'] not in to_delete]
result.extend([ShardRange.from_dict(sr) for sr in to_add])
return sorted([sr for sr in result if not sr.deleted],
key=ShardRange.sort_key) |
Update the ``own_shard_range`` with the up-to-date object stats from
the ``broker``.
Note: this method does not persist the updated ``own_shard_range``;
callers should use ``broker.merge_shard_ranges`` if the updated stats
need to be persisted.
:param broker: an instance of ``ContainerBroker``.
:param own_shard_range: and instance of ``ShardRange``.
:returns: ``own_shard_range`` with up-to-date ``object_count``
and ``bytes_used``. | def update_own_shard_range_stats(broker, own_shard_range):
"""
Update the ``own_shard_range`` with the up-to-date object stats from
the ``broker``.
Note: this method does not persist the updated ``own_shard_range``;
callers should use ``broker.merge_shard_ranges`` if the updated stats
need to be persisted.
:param broker: an instance of ``ContainerBroker``.
:param own_shard_range: and instance of ``ShardRange``.
:returns: ``own_shard_range`` with up-to-date ``object_count``
and ``bytes_used``.
"""
info = broker.get_info()
own_shard_range.update_meta(
info['object_count'], info['bytes_used'])
return own_shard_range |
Get the data dir for the given policy.
:param policy_or_index: ``StoragePolicy`` instance, or an index (string or
int); if None, the legacy Policy-0 is assumed.
:returns: ``objects`` or ``objects-<N>`` as appropriate | def get_data_dir(policy_or_index):
'''
Get the data dir for the given policy.
:param policy_or_index: ``StoragePolicy`` instance, or an index (string or
int); if None, the legacy Policy-0 is assumed.
:returns: ``objects`` or ``objects-<N>`` as appropriate
'''
return get_policy_string(DATADIR_BASE, policy_or_index) |
Get the async dir for the given policy.
:param policy_or_index: ``StoragePolicy`` instance, or an index (string or
int); if None, the legacy Policy-0 is assumed.
:returns: ``async_pending`` or ``async_pending-<N>`` as appropriate | def get_async_dir(policy_or_index):
'''
Get the async dir for the given policy.
:param policy_or_index: ``StoragePolicy`` instance, or an index (string or
int); if None, the legacy Policy-0 is assumed.
:returns: ``async_pending`` or ``async_pending-<N>`` as appropriate
'''
return get_policy_string(ASYNCDIR_BASE, policy_or_index) |
Get the temp dir for the given policy.
:param policy_or_index: ``StoragePolicy`` instance, or an index (string or
int); if None, the legacy Policy-0 is assumed.
:returns: ``tmp`` or ``tmp-<N>`` as appropriate | def get_tmp_dir(policy_or_index):
'''
Get the temp dir for the given policy.
:param policy_or_index: ``StoragePolicy`` instance, or an index (string or
int); if None, the legacy Policy-0 is assumed.
:returns: ``tmp`` or ``tmp-<N>`` as appropriate
'''
return get_policy_string(TMP_BASE, policy_or_index) |
Helper function to get to file name from a file descriptor or filename.
:param fd: file descriptor or filename.
:returns: the filename. | def _get_filename(fd):
"""
Helper function to get to file name from a file descriptor or filename.
:param fd: file descriptor or filename.
:returns: the filename.
"""
if hasattr(fd, 'name'):
# fd object
return fd.name
# fd is a filename
return fd |
UTF8 encode any unicode keys or values in given metadata dict.
:param metadata: a dict | def _encode_metadata(metadata):
"""
UTF8 encode any unicode keys or values in given metadata dict.
:param metadata: a dict
"""
if six.PY2:
def encode_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
else:
def encode_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8', 'surrogateescape')
return item
return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items())) |
Given a metadata dict from disk, convert keys and values to native strings.
:param metadata: a dict
:param metadata_written_by_py3: | def _decode_metadata(metadata, metadata_written_by_py3):
"""
Given a metadata dict from disk, convert keys and values to native strings.
:param metadata: a dict
:param metadata_written_by_py3:
"""
if six.PY2:
def to_str(item, is_name=False):
# For years, py2 and py3 handled non-ascii metadata differently;
# see https://bugs.launchpad.net/swift/+bug/2012531
if metadata_written_by_py3 and not is_name:
# do our best to read new-style data replicated from a py3 node
item = item.decode('utf8').encode('latin1')
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
else:
def to_str(item, is_name=False):
# For years, py2 and py3 handled non-ascii metadata differently;
# see https://bugs.launchpad.net/swift/+bug/2012531
if not metadata_written_by_py3 and isinstance(item, bytes) \
and not is_name:
# do our best to read old py2 data
item = item.decode('latin1')
if isinstance(item, six.binary_type):
return item.decode('utf8', 'surrogateescape')
return item
return {to_str(k): to_str(v, k == b'name') for k, v in metadata.items()} |
Helper function to read the pickled metadata from an object file.
:param fd: file descriptor or filename to load the metadata from
:param add_missing_checksum: if set and checksum is missing, add it
:returns: dictionary of metadata | def read_metadata(fd, add_missing_checksum=False):
"""
Helper function to read the pickled metadata from an object file.
:param fd: file descriptor or filename to load the metadata from
:param add_missing_checksum: if set and checksum is missing, add it
:returns: dictionary of metadata
"""
metadata = b''
key = 0
try:
while True:
metadata += xattr.getxattr(
fd, METADATA_KEY + str(key or '').encode('ascii'))
key += 1
except (IOError, OSError) as e:
if errno.errorcode.get(e.errno) in ('ENOTSUP', 'EOPNOTSUPP'):
msg = "Filesystem at %s does not support xattr"
logging.exception(msg, _get_filename(fd))
raise DiskFileXattrNotSupported(e)
if e.errno == errno.ENOENT:
raise DiskFileNotExist()
# TODO: we might want to re-raise errors that don't denote a missing
# xattr here. Seems to be ENODATA on linux and ENOATTR on BSD/OSX.
metadata_checksum = None
try:
metadata_checksum = xattr.getxattr(fd, METADATA_CHECKSUM_KEY)
except (IOError, OSError):
# All the interesting errors were handled above; the only thing left
# here is ENODATA / ENOATTR to indicate that this attribute doesn't
# exist. This is fine; it just means that this object predates the
# introduction of metadata checksums.
if add_missing_checksum:
new_checksum = (md5(metadata, usedforsecurity=False)
.hexdigest().encode('ascii'))
try:
xattr.setxattr(fd, METADATA_CHECKSUM_KEY, new_checksum)
except (IOError, OSError) as e:
logging.error("Error adding metadata: %s" % e)
if metadata_checksum:
computed_checksum = (md5(metadata, usedforsecurity=False)
.hexdigest().encode('ascii'))
if metadata_checksum != computed_checksum:
raise DiskFileBadMetadataChecksum(
"Metadata checksum mismatch for %s: "
"stored checksum='%s', computed='%s'" % (
fd, metadata_checksum, computed_checksum))
metadata_written_by_py3 = (b'_codecs\nencode' in metadata[:32])
# strings are utf-8 encoded when written, but have not always been
# (see https://bugs.launchpad.net/swift/+bug/1678018) so encode them again
# when read
if six.PY2:
metadata = pickle.loads(metadata)
else:
metadata = pickle.loads(metadata, encoding='bytes')
return _decode_metadata(metadata, metadata_written_by_py3) |
Helper function to write pickled metadata for an object file.
:param fd: file descriptor or filename to write the metadata
:param metadata: metadata to write | def write_metadata(fd, metadata, xattr_size=65536):
"""
Helper function to write pickled metadata for an object file.
:param fd: file descriptor or filename to write the metadata
:param metadata: metadata to write
"""
metastr = pickle.dumps(_encode_metadata(metadata), PICKLE_PROTOCOL)
metastr_md5 = (
md5(metastr, usedforsecurity=False).hexdigest().encode('ascii'))
key = 0
try:
while metastr:
xattr.setxattr(fd, METADATA_KEY + str(key or '').encode('ascii'),
metastr[:xattr_size])
metastr = metastr[xattr_size:]
key += 1
xattr.setxattr(fd, METADATA_CHECKSUM_KEY, metastr_md5)
except IOError as e:
# errno module doesn't always have both of these, hence the ugly
# check
if errno.errorcode.get(e.errno) in ('ENOTSUP', 'EOPNOTSUPP'):
msg = "Filesystem at %s does not support xattr"
logging.exception(msg, _get_filename(fd))
raise DiskFileXattrNotSupported(e)
elif e.errno in (errno.ENOSPC, errno.EDQUOT):
msg = "No space left on device for %s" % _get_filename(fd)
logging.exception(msg)
raise DiskFileNoSpace()
raise |
Extracts the policy for an object (based on the name of the objects
directory) given the device-relative path to the object. Returns None in
the event that the path is malformed in some way.
The device-relative path is everything after the mount point; for example:
/srv/node/d42/objects-5/30/179/
485dc017205a81df3af616d917c90179/1401811134.873649.data
would have device-relative path:
objects-5/30/179/485dc017205a81df3af616d917c90179/1401811134.873649.data
:param obj_path: device-relative path of an object, or the full path
:returns: a :class:`~swift.common.storage_policy.BaseStoragePolicy` or None | def extract_policy(obj_path):
"""
Extracts the policy for an object (based on the name of the objects
directory) given the device-relative path to the object. Returns None in
the event that the path is malformed in some way.
The device-relative path is everything after the mount point; for example:
/srv/node/d42/objects-5/30/179/
485dc017205a81df3af616d917c90179/1401811134.873649.data
would have device-relative path:
objects-5/30/179/485dc017205a81df3af616d917c90179/1401811134.873649.data
:param obj_path: device-relative path of an object, or the full path
:returns: a :class:`~swift.common.storage_policy.BaseStoragePolicy` or None
"""
try:
obj_portion = obj_path[obj_path.rindex(DATADIR_BASE):]
obj_dirname = obj_portion[:obj_portion.index('/')]
except Exception:
return None
try:
base, policy = split_policy_string(obj_dirname)
except PolicyError:
return None
return policy |
In the case that a file is corrupted, move it to a quarantined
area to allow replication to fix it.
:params device_path: The path to the device the corrupted file is on.
:params corrupted_file_path: The path to the file you want quarantined.
:returns: path (str) of directory the file was moved to
:raises OSError: re-raises non errno.EEXIST / errno.ENOTEMPTY
exceptions from rename | def quarantine_renamer(device_path, corrupted_file_path):
"""
In the case that a file is corrupted, move it to a quarantined
area to allow replication to fix it.
:params device_path: The path to the device the corrupted file is on.
:params corrupted_file_path: The path to the file you want quarantined.
:returns: path (str) of directory the file was moved to
:raises OSError: re-raises non errno.EEXIST / errno.ENOTEMPTY
exceptions from rename
"""
policy = extract_policy(corrupted_file_path)
if policy is None:
# TODO: support a quarantine-unknown location
policy = POLICIES.legacy
from_dir = dirname(corrupted_file_path)
to_dir = join(device_path, 'quarantined',
get_data_dir(policy),
basename(from_dir))
if len(basename(from_dir)) == 3:
# quarantining whole suffix
invalidate_hash(from_dir)
else:
invalidate_hash(dirname(from_dir))
try:
renamer(from_dir, to_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
to_dir = "%s-%s" % (to_dir, uuid.uuid4().hex)
renamer(from_dir, to_dir, fsync=False)
return to_dir |
Read the existing hashes.pkl
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist | def read_hashes(partition_dir):
"""
Read the existing hashes.pkl
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
hashes_file = join(partition_dir, HASH_FILE)
hashes = {'valid': False}
try:
with open(hashes_file, 'rb') as hashes_fp:
pickled_hashes = hashes_fp.read()
except (IOError, OSError):
pass
else:
try:
hashes = pickle.loads(pickled_hashes)
except Exception:
# pickle.loads() can raise a wide variety of exceptions when
# given invalid input depending on the way in which the
# input is invalid.
pass
# Check for corrupted data that could break os.listdir()
if not all(valid_suffix(key) or key in ('valid', 'updated')
for key in hashes):
return {'valid': False}
# hashes.pkl w/o valid updated key is "valid" but "forever old"
hashes.setdefault('valid', True)
hashes.setdefault('updated', -1)
return hashes |
Write hashes to hashes.pkl
The updated key is added to hashes before it is written. | def write_hashes(partition_dir, hashes):
"""
Write hashes to hashes.pkl
The updated key is added to hashes before it is written.
"""
hashes_file = join(partition_dir, HASH_FILE)
# 'valid' key should always be set by the caller; however, if there's a bug
# setting invalid is most safe
hashes.setdefault('valid', False)
hashes['updated'] = time.time()
write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL) |
Take what's in hashes.pkl and hashes.invalid, combine them, write the
result back to hashes.pkl, and clear out hashes.invalid.
:param partition_dir: absolute path to partition dir containing hashes.pkl
and hashes.invalid
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist | def consolidate_hashes(partition_dir):
"""
Take what's in hashes.pkl and hashes.invalid, combine them, write the
result back to hashes.pkl, and clear out hashes.invalid.
:param partition_dir: absolute path to partition dir containing hashes.pkl
and hashes.invalid
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
with lock_path(partition_dir):
hashes = read_hashes(partition_dir)
found_invalidation_entry = hashes_updated = False
try:
with open(invalidations_file, 'r') as inv_fh:
for line in inv_fh:
found_invalidation_entry = True
suffix = line.strip()
if not valid_suffix(suffix):
continue
hashes_updated = True
hashes[suffix] = None
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
if hashes_updated:
write_hashes(partition_dir, hashes)
if found_invalidation_entry:
# Now that all the invalidations are reflected in hashes.pkl, it's
# safe to clear out the invalidations file.
with open(invalidations_file, 'wb') as inv_fh:
pass
return hashes |
Invalidates the hash for a suffix_dir in the partition's hashes file.
:param suffix_dir: absolute path to suffix dir whose hash needs
invalidating | def invalidate_hash(suffix_dir):
"""
Invalidates the hash for a suffix_dir in the partition's hashes file.
:param suffix_dir: absolute path to suffix dir whose hash needs
invalidating
"""
suffix = basename(suffix_dir)
partition_dir = dirname(suffix_dir)
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
if not isinstance(suffix, bytes):
suffix = suffix.encode('utf-8')
with lock_path(partition_dir), open(invalidations_file, 'ab') as inv_fh:
inv_fh.write(suffix + b"\n") |
Hard-links a file located in ``target_path`` using the second path
``new_target_path``. Creates intermediate directories if required.
:param target_path: current absolute filename
:param new_target_path: new absolute filename for the hardlink
:param ignore_missing: if True then no exception is raised if the link
could not be made because ``target_path`` did not exist, otherwise an
OSError will be raised.
:raises: OSError if the hard link could not be created, unless the intended
hard link already exists or the ``target_path`` does not exist and
``must_exist`` if False.
:returns: True if the link was created by the call to this method, False
otherwise. | def relink_paths(target_path, new_target_path, ignore_missing=True):
"""
Hard-links a file located in ``target_path`` using the second path
``new_target_path``. Creates intermediate directories if required.
:param target_path: current absolute filename
:param new_target_path: new absolute filename for the hardlink
:param ignore_missing: if True then no exception is raised if the link
could not be made because ``target_path`` did not exist, otherwise an
OSError will be raised.
:raises: OSError if the hard link could not be created, unless the intended
hard link already exists or the ``target_path`` does not exist and
``must_exist`` if False.
:returns: True if the link was created by the call to this method, False
otherwise.
"""
link_created = False
if target_path != new_target_path:
new_target_dir = os.path.dirname(new_target_path)
try:
os.makedirs(new_target_dir)
except OSError as err:
if err.errno != errno.EEXIST:
raise
try:
os.link(target_path, new_target_path)
link_created = True
except OSError as err:
# there are some circumstances in which it may be ok that the
# attempted link failed
ok = False
if err.errno == errno.ENOENT:
# this is ok if the *target* path doesn't exist anymore
ok = not os.path.exists(target_path) and ignore_missing
if err.errno == errno.EEXIST:
# this is ok *if* the intended link has already been made
try:
orig_stat = os.stat(target_path)
except OSError as sub_err:
# this is ok: the *target* path doesn't exist anymore
ok = sub_err.errno == errno.ENOENT and ignore_missing
else:
try:
new_stat = os.stat(new_target_path)
ok = new_stat.st_ino == orig_stat.st_ino
except OSError:
# squash this exception; the original will be raised
pass
if not ok:
raise err
return link_created |
Given the device path, policy, and partition, returns the full
path to the partition | def get_part_path(dev_path, policy, partition):
"""
Given the device path, policy, and partition, returns the full
path to the partition
"""
return os.path.join(dev_path, get_data_dir(policy), str(partition)) |
Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all
objects stored under that directory for the given datadir (policy),
if device_dirs isn't set. If device_dirs is set, only yield AuditLocation
for the objects under the entries in device_dirs. The AuditLocation only
knows the path to the hash directory, not to the .data file therein
(if any). This is to avoid a double listdir(hash_dir); the DiskFile object
will always do one, so we don't.
:param devices: parent directory of the devices to be audited
:param datadir: objects directory
:param mount_check: flag to check if a mount check should be performed
on devices
:param logger: a logger object
:param device_dirs: a list of directories under devices to traverse
:param auditor_type: either ALL or ZBF | def object_audit_location_generator(devices, datadir, mount_check=True,
logger=None, device_dirs=None,
auditor_type="ALL"):
"""
Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all
objects stored under that directory for the given datadir (policy),
if device_dirs isn't set. If device_dirs is set, only yield AuditLocation
for the objects under the entries in device_dirs. The AuditLocation only
knows the path to the hash directory, not to the .data file therein
(if any). This is to avoid a double listdir(hash_dir); the DiskFile object
will always do one, so we don't.
:param devices: parent directory of the devices to be audited
:param datadir: objects directory
:param mount_check: flag to check if a mount check should be performed
on devices
:param logger: a logger object
:param device_dirs: a list of directories under devices to traverse
:param auditor_type: either ALL or ZBF
"""
if not device_dirs:
device_dirs = listdir(devices)
else:
# remove bogus devices and duplicates from device_dirs
device_dirs = list(
set(listdir(devices)).intersection(set(device_dirs)))
# randomize devices in case of process restart before sweep completed
shuffle(device_dirs)
base, policy = split_policy_string(datadir)
for device in device_dirs:
try:
check_drive(devices, device, mount_check)
except ValueError as err:
if logger:
logger.debug('Skipping: %s', err)
continue
datadir_path = os.path.join(devices, device, datadir)
if not os.path.exists(datadir_path):
continue
partitions = get_auditor_status(datadir_path, logger, auditor_type)
for pos, partition in enumerate(partitions):
update_auditor_status(datadir_path, logger,
partitions[pos:], auditor_type)
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno not in (errno.ENOTDIR, errno.ENODATA):
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno not in (errno.ENOTDIR, errno.ENODATA):
raise
continue
for hsh in hashes:
hsh_path = os.path.join(suff_path, hsh)
yield AuditLocation(hsh_path, device, partition,
policy)
update_auditor_status(datadir_path, logger, [], auditor_type) |
:return: a task object name in format of
"<timestamp>-<target_account>/<target_container>/<target_obj>" | def build_task_obj(timestamp, target_account, target_container,
target_obj, high_precision=False):
"""
:return: a task object name in format of
"<timestamp>-<target_account>/<target_container>/<target_obj>"
"""
timestamp = Timestamp(timestamp)
return '%s-%s/%s/%s' % (
normalize_delete_at_timestamp(timestamp, high_precision),
target_account, target_container, target_obj) |
:param task_obj: a task object name in format of
"<timestamp>-<target_account>/<target_container>" +
"/<target_obj>"
:return: 4-tuples of (delete_at_time, target_account, target_container,
target_obj) | def parse_task_obj(task_obj):
"""
:param task_obj: a task object name in format of
"<timestamp>-<target_account>/<target_container>" +
"/<target_obj>"
:return: 4-tuples of (delete_at_time, target_account, target_container,
target_obj)
"""
timestamp, target_path = task_obj.split('-', 1)
timestamp = Timestamp(timestamp)
target_account, target_container, target_obj = \
split_path('/' + target_path, 3, 3, True)
return timestamp, target_account, target_container, target_obj |
paste.deploy app factory for creating WSGI object server apps | def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf) |
Returns the left, right and far partners of the node whose index is equal
to the given node_index.
:param node_index: the primary index
:param part_nodes: a list of primary nodes
:returns: [<node-to-left>, <node-to-right>, <node-opposite>] | def _get_partners(node_index, part_nodes):
"""
Returns the left, right and far partners of the node whose index is equal
to the given node_index.
:param node_index: the primary index
:param part_nodes: a list of primary nodes
:returns: [<node-to-left>, <node-to-right>, <node-opposite>]
"""
num_nodes = len(part_nodes)
return [
part_nodes[(node_index - 1) % num_nodes],
part_nodes[(node_index + 1) % num_nodes],
part_nodes[(
node_index + (num_nodes // 2)
) % num_nodes],
] |
Combines the node properties, partition, relative-path and policy into a
single string representation.
:param node: a dict describing node properties
:param part: partition number
:param path: path of the desired EC archive relative to partition dir
:param policy: an instance of
:class:`~swift.common.storage_policy.BaseStoragePolicy`
:return: string representation of absolute path on node plus policy index | def _full_path(node, part, relative_path, policy):
"""
Combines the node properties, partition, relative-path and policy into a
single string representation.
:param node: a dict describing node properties
:param part: partition number
:param path: path of the desired EC archive relative to partition dir
:param policy: an instance of
:class:`~swift.common.storage_policy.BaseStoragePolicy`
:return: string representation of absolute path on node plus policy index
"""
if not isinstance(relative_path, six.text_type):
relative_path = relative_path.decode('utf8')
return '%(node)s/%(part)s%(path)s policy#%(policy)d' % {
'node': node_to_string(node, replication=True),
'part': part, 'path': relative_path,
'policy': policy,
} |
Read and discard any bytes from file_like.
:param file_like: file-like object to read from
:param read_size: how big a chunk to read at a time
:param timeout: how long to wait for a read (use None for no timeout)
:raises ChunkReadTimeout: if no chunk was read in time | def drain(file_like, read_size, timeout):
"""
Read and discard any bytes from file_like.
:param file_like: file-like object to read from
:param read_size: how big a chunk to read at a time
:param timeout: how long to wait for a read (use None for no timeout)
:raises ChunkReadTimeout: if no chunk was read in time
"""
while True:
with ChunkReadTimeout(timeout):
chunk = file_like.read(read_size)
if not chunk:
break |
Split and validate path for an object.
:param request: a swob request
:returns: a tuple of path parts and storage policy | def get_obj_name_and_placement(request):
"""
Split and validate path for an object.
:param request: a swob request
:returns: a tuple of path parts and storage policy
"""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
validate_internal_obj(account, container, obj)
return device, partition, account, container, obj, policy |
Callback for swift.common.wsgi.run_wsgi during the global_conf
creation so that we can add our replication_semaphore, used to
limit the number of concurrent SSYNC_REQUESTS across all
workers.
:param preloaded_app_conf: The preloaded conf for the WSGI app.
This conf instance will go away, so
just read from it, don't write.
:param global_conf: The global conf that will eventually be
passed to the app_factory function later.
This conf is created before the worker
subprocesses are forked, so can be useful to
set up semaphores, shared memory, etc. | def global_conf_callback(preloaded_app_conf, global_conf):
"""
Callback for swift.common.wsgi.run_wsgi during the global_conf
creation so that we can add our replication_semaphore, used to
limit the number of concurrent SSYNC_REQUESTS across all
workers.
:param preloaded_app_conf: The preloaded conf for the WSGI app.
This conf instance will go away, so
just read from it, don't write.
:param global_conf: The global conf that will eventually be
passed to the app_factory function later.
This conf is created before the worker
subprocesses are forked, so can be useful to
set up semaphores, shared memory, etc.
"""
replication_concurrency = int(
preloaded_app_conf.get('replication_concurrency') or 4)
if replication_concurrency:
# Have to put the value in a list so it can get past paste
global_conf['replication_semaphore'] = [
multiprocessing.BoundedSemaphore(replication_concurrency)] |
paste.deploy app factory for creating WSGI object server apps | def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf) |
Parse a string of the form generated by
:py:func:`~swift.obj.ssync_sender.encode_missing` and return a dict
with keys ``object_hash``, ``ts_data``, ``ts_meta``, ``ts_ctype``,
``durable``.
The encoder for this line is
:py:func:`~swift.obj.ssync_sender.encode_missing` | def decode_missing(line):
"""
Parse a string of the form generated by
:py:func:`~swift.obj.ssync_sender.encode_missing` and return a dict
with keys ``object_hash``, ``ts_data``, ``ts_meta``, ``ts_ctype``,
``durable``.
The encoder for this line is
:py:func:`~swift.obj.ssync_sender.encode_missing`
"""
result = {}
parts = line.decode('ascii').split()
result['object_hash'] = urllib.parse.unquote(parts[0])
t_data = urllib.parse.unquote(parts[1])
result['ts_data'] = ts_data = Timestamp(t_data)
result['ts_meta'] = result['ts_ctype'] = ts_data
result['durable'] = True # default to True in case this key isn't sent
if len(parts) > 2:
# allow for a comma separated list of k:v pairs to future-proof
subparts = urllib.parse.unquote(parts[2]).split(',')
for item in [subpart for subpart in subparts if ':' in subpart]:
k, v = item.split(':')
if k == 'm':
v, _, o = v.partition('__')
# ignore ts_data offset when calculating ts_meta
result['ts_meta'] = Timestamp(ts_data.normal,
delta=int(v, 16),
offset=int(o or '0', 16))
elif k == 't':
v, _, o = v.partition('__')
# ignore ts_data offset when calculating ts_ctype
result['ts_ctype'] = Timestamp(Timestamp(ts_data).normal,
delta=int(v, 16),
offset=int(o or '0', 16))
elif k == 'durable':
result['durable'] = utils.config_true_value(v)
return result |
Compare a remote and local results and generate a wanted line.
:param remote: a dict, with ts_data and ts_meta keys in the form
returned by :py:func:`decode_missing`
:param local: a dict, possibly empty, with ts_data and ts_meta keys
in the form returned :py:meth:`Receiver._check_local`
The decoder for this line is
:py:func:`~swift.obj.ssync_sender.decode_wanted` | def encode_wanted(remote, local):
"""
Compare a remote and local results and generate a wanted line.
:param remote: a dict, with ts_data and ts_meta keys in the form
returned by :py:func:`decode_missing`
:param local: a dict, possibly empty, with ts_data and ts_meta keys
in the form returned :py:meth:`Receiver._check_local`
The decoder for this line is
:py:func:`~swift.obj.ssync_sender.decode_wanted`
"""
want = {}
if 'ts_data' in local:
# we have something, let's get just the right stuff
if remote['ts_data'] > local['ts_data']:
want['data'] = True
if 'ts_meta' in local and remote['ts_meta'] > local['ts_meta']:
want['meta'] = True
if ('ts_ctype' in local and remote['ts_ctype'] > local['ts_ctype']
and remote['ts_ctype'] > remote['ts_data']):
want['meta'] = True
else:
# we got nothing, so we'll take whatever the remote has
want['data'] = True
want['meta'] = True
if want:
# this is the inverse of _decode_wanted's key_map
key_map = dict(data='d', meta='m')
parts = ''.join(v for k, v in sorted(key_map.items()) if want.get(k))
return '%s %s' % (urllib.parse.quote(remote['object_hash']), parts)
return None |
Returns a string representing the object hash, its data file timestamp,
the delta forwards to its metafile and content-type timestamps, if
non-zero, and its durability, in the form:
``<hash> <ts_data> [m:<hex delta to ts_meta>[,t:<hex delta to ts_ctype>]
[,durable:False]``
The decoder for this line is
:py:func:`~swift.obj.ssync_receiver.decode_missing` | def encode_missing(object_hash, ts_data, ts_meta=None, ts_ctype=None,
**kwargs):
"""
Returns a string representing the object hash, its data file timestamp,
the delta forwards to its metafile and content-type timestamps, if
non-zero, and its durability, in the form:
``<hash> <ts_data> [m:<hex delta to ts_meta>[,t:<hex delta to ts_ctype>]
[,durable:False]``
The decoder for this line is
:py:func:`~swift.obj.ssync_receiver.decode_missing`
"""
msg = ('%s %s'
% (urllib.parse.quote(object_hash),
urllib.parse.quote(ts_data.internal)))
extra_parts = []
if ts_meta and ts_meta != ts_data:
delta = ts_meta.raw - ts_data.raw
extra_parts.append('m:%x' % delta)
if ts_meta.offset:
extra_parts[-1] += '__%x' % ts_meta.offset
if ts_ctype and ts_ctype != ts_data:
delta = ts_ctype.raw - ts_data.raw
extra_parts.append('t:%x' % delta)
if ts_ctype.offset:
extra_parts[-1] += '__%x' % ts_ctype.offset
if 'durable' in kwargs and kwargs['durable'] is False:
# only send durable in the less common case that it is False
extra_parts.append('durable:%s' % kwargs['durable'])
if extra_parts:
msg = '%s %s' % (msg, ','.join(extra_parts))
return msg.encode('ascii') |
Parse missing_check line parts to determine which parts of local
diskfile were wanted by the receiver.
The encoder for parts is
:py:func:`~swift.obj.ssync_receiver.encode_wanted` | def decode_wanted(parts):
"""
Parse missing_check line parts to determine which parts of local
diskfile were wanted by the receiver.
The encoder for parts is
:py:func:`~swift.obj.ssync_receiver.encode_wanted`
"""
wanted = {}
key_map = {'d': 'data', 'm': 'meta'}
if parts:
# receiver specified data and/or meta wanted, so use those as
# conditions for sending PUT and/or POST subrequests
for k in key_map:
if k in parts[0]:
wanted[key_map[k]] = True
if not wanted:
# assume legacy receiver which will only accept PUTs. There is no
# way to send any meta file content without morphing the timestamp
# of either the data or the metadata, so we just send data file
# content to a legacy receiver. Once the receiver gets updated we
# will be able to send it the meta file content.
wanted['data'] = True
return wanted |
Split the account and container parts out of the async update data.
N.B. updates to shards set the container_path key while the account and
container keys are always the root. | def split_update_path(update):
"""
Split the account and container parts out of the async update data.
N.B. updates to shards set the container_path key while the account and
container keys are always the root.
"""
container_path = update.get('container_path')
if container_path:
acct, cont = split_path('/' + container_path, minsegs=2)
else:
acct, cont = update['account'], update['container']
return acct, cont |
Search the config file for any per-policy config sections and load those
sections to a dict mapping policy reference (name or index) to policy
options.
:param conf: the proxy server conf dict
:return: a dict mapping policy reference -> dict of policy options
:raises ValueError: if a policy config section has an invalid name | def parse_per_policy_config(conf):
"""
Search the config file for any per-policy config sections and load those
sections to a dict mapping policy reference (name or index) to policy
options.
:param conf: the proxy server conf dict
:return: a dict mapping policy reference -> dict of policy options
:raises ValueError: if a policy config section has an invalid name
"""
policy_section_prefix = conf['__name__'] + ':policy:'
return parse_prefixed_conf(conf['__file__'], policy_section_prefix) |
paste.deploy app factory for creating WSGI proxy apps. | def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI proxy apps."""
conf = global_conf.copy()
conf.update(local_conf)
# Do this here so that the use of conf['__file__'] and conf['__name__'] is
# isolated from the Application. This also simplifies tests that construct
# an Application instance directly.
conf['policy_config'] = parse_per_policy_config(conf)
app = Application(conf)
app.check_config()
return app |
Helper function to update headers in the response.
:param response: swob.Response object
:param headers: dictionary headers | def update_headers(response, headers):
"""
Helper function to update headers in the response.
:param response: swob.Response object
:param headers: dictionary headers
"""
if hasattr(headers, 'items'):
headers = headers.items()
for name, value in headers:
if name.lower() == 'etag':
response.headers[name] = value.replace('"', '')
elif name.lower() not in (
'date', 'content-length', 'content-type',
'connection', 'x-put-timestamp', 'x-delete-after'):
response.headers[name] = value |
Decorator to declare which methods should have any swift.authorize call
delayed. This is so the method can load the Request object up with
additional information that may be needed by the authorization system.
:param func: function for which authorization will be delayed | def delay_denial(func):
"""
Decorator to declare which methods should have any swift.authorize call
delayed. This is so the method can load the Request object up with
additional information that may be needed by the authorization system.
:param func: function for which authorization will be delayed
"""
func.delay_denial = True
return func |
Helper method that iterates once over a dict of headers,
converting all keys to lower case and separating
into subsets containing user metadata, system metadata
and other headers. | def _prep_headers_to_info(headers, server_type):
"""
Helper method that iterates once over a dict of headers,
converting all keys to lower case and separating
into subsets containing user metadata, system metadata
and other headers.
"""
meta = {}
sysmeta = {}
other = {}
for key, val in dict(headers).items():
lkey = wsgi_to_str(key).lower()
val = wsgi_to_str(val) if isinstance(val, str) else val
if is_user_meta(server_type, lkey):
meta[strip_user_meta_prefix(server_type, lkey)] = val
elif is_sys_meta(server_type, lkey):
sysmeta[strip_sys_meta_prefix(server_type, lkey)] = val
else:
other[lkey] = val
return other, meta, sysmeta |
Construct a cacheable dict of account info based on response headers. | def headers_to_account_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of account info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'account')
account_info = {
'status': status_int,
# 'container_count' anomaly:
# Previous code sometimes expects an int sometimes a string
# Current code aligns to str and None, yet translates to int in
# deprecated functions as needed
'container_count': headers.get('x-account-container-count'),
'total_object_count': headers.get('x-account-object-count'),
'bytes': headers.get('x-account-bytes-used'),
'storage_policies': {policy.idx: {
'container_count': int(headers.get(
'x-account-storage-policy-{}-container-count'.format(
policy.name), 0)),
'object_count': int(headers.get(
'x-account-storage-policy-{}-object-count'.format(
policy.name), 0)),
'bytes': int(headers.get(
'x-account-storage-policy-{}-bytes-used'.format(
policy.name), 0))}
for policy in POLICIES
},
'meta': meta,
'sysmeta': sysmeta,
}
if is_success(status_int):
account_info['account_really_exists'] = not config_true_value(
headers.get('x-backend-fake-account-listing'))
return account_info |
Construct a cacheable dict of container info based on response headers. | def headers_to_container_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of container info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'container')
return {
'status': status_int,
'read_acl': headers.get('x-container-read'),
'write_acl': headers.get('x-container-write'),
'sync_to': headers.get('x-container-sync-to'),
'sync_key': headers.get('x-container-sync-key'),
'object_count': headers.get('x-container-object-count'),
'bytes': headers.get('x-container-bytes-used'),
'versions': headers.get('x-versions-location'),
'storage_policy': headers.get('x-backend-storage-policy-index', '0'),
'cors': {
'allow_origin': meta.get('access-control-allow-origin'),
'expose_headers': meta.get('access-control-expose-headers'),
'max_age': meta.get('access-control-max-age')
},
'meta': meta,
'sysmeta': sysmeta,
'sharding_state': headers.get('x-backend-sharding-state', 'unsharded'),
# the 'internal' format version of timestamps is cached since the
# normal format can be derived from this when required
'created_at': headers.get('x-backend-timestamp'),
'put_timestamp': headers.get('x-backend-put-timestamp'),
'delete_timestamp': headers.get('x-backend-delete-timestamp'),
'status_changed_at': headers.get('x-backend-status-changed-at'),
} |
Construct a HeaderKeyDict from a container info dict.
:param info: a dict of container metadata
:returns: a HeaderKeyDict or None if info is None or any required headers
could not be constructed | def headers_from_container_info(info):
"""
Construct a HeaderKeyDict from a container info dict.
:param info: a dict of container metadata
:returns: a HeaderKeyDict or None if info is None or any required headers
could not be constructed
"""
if not info:
return None
required = (
('x-backend-timestamp', 'created_at'),
('x-backend-put-timestamp', 'put_timestamp'),
('x-backend-delete-timestamp', 'delete_timestamp'),
('x-backend-status-changed-at', 'status_changed_at'),
('x-backend-storage-policy-index', 'storage_policy'),
('x-container-object-count', 'object_count'),
('x-container-bytes-used', 'bytes'),
('x-backend-sharding-state', 'sharding_state'),
)
required_normal_format_timestamps = (
('x-timestamp', 'created_at'),
('x-put-timestamp', 'put_timestamp'),
)
optional = (
('x-container-read', 'read_acl'),
('x-container-write', 'write_acl'),
('x-container-sync-key', 'sync_key'),
('x-container-sync-to', 'sync_to'),
('x-versions-location', 'versions'),
)
cors_optional = (
('access-control-allow-origin', 'allow_origin'),
('access-control-expose-headers', 'expose_headers'),
('access-control-max-age', 'max_age')
)
def lookup(info, key):
# raises KeyError or ValueError
val = info[key]
if val is None:
raise ValueError
return val
# note: required headers may be missing from info for example during
# upgrade when stale info is still in cache
headers = HeaderKeyDict()
for hdr, key in required:
try:
headers[hdr] = lookup(info, key)
except (KeyError, ValueError):
return None
for hdr, key in required_normal_format_timestamps:
try:
headers[hdr] = Timestamp(lookup(info, key)).normal
except (KeyError, ValueError):
return None
for hdr, key in optional:
try:
headers[hdr] = lookup(info, key)
except (KeyError, ValueError):
pass
policy_index = info.get('storage_policy')
headers['x-storage-policy'] = POLICIES[int(policy_index)].name
prefix = get_user_meta_prefix('container')
headers.update(
(prefix + k, v)
for k, v in info.get('meta', {}).items())
for hdr, key in cors_optional:
try:
headers[prefix + hdr] = lookup(info.get('cors'), key)
except (KeyError, ValueError):
pass
prefix = get_sys_meta_prefix('container')
headers.update(
(prefix + k, v)
for k, v in info.get('sysmeta', {}).items())
return headers |
Construct a cacheable dict of object info based on response headers. | def headers_to_object_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of object info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'object')
transient_sysmeta = {}
for key, val in headers.items():
if is_object_transient_sysmeta(key):
key = strip_object_transient_sysmeta_prefix(key.lower())
transient_sysmeta[key] = val
info = {'status': status_int,
'length': headers.get('content-length'),
'type': headers.get('content-type'),
'etag': headers.get('etag'),
'meta': meta,
'sysmeta': sysmeta,
'transient_sysmeta': transient_sysmeta
}
return info |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.