response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
The <search-value> can be of the form::
d<device_id>r<region>z<zone>-<ip>:<port>R<r_ip>:<r_port>/
<device_name>_<meta>
Where <r_ip> and <r_port> are replication ip and port.
Any part is optional, but you must include at least one part.
Examples::
d74 Matches the device id 74
r4 Matches devices in region 4
z1 Matches devices in zone 1
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
z1:5678 Matches devices in zone 1 using port 5678
:5678 Matches devices that use port 5678
R5.6.7.8 Matches devices that use replication ip 5.6.7.8
R:5678 Matches devices that use replication port 5678
1.2.3.4R5.6.7.8 Matches devices that use ip 1.2.3.4 and replication ip
5.6.7.8
/sdb1 Matches devices with the device name sdb1
_shiny Matches devices with shiny in the meta data
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
[::1] Matches devices in any zone with the ip ::1
z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678
Most specific example::
d74r4z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
Nerd explanation:
All items require their single character prefix except the ip, in which
case the - is optional unless the device id or zone is also included. | def parse_search_value(search_value):
"""The <search-value> can be of the form::
d<device_id>r<region>z<zone>-<ip>:<port>R<r_ip>:<r_port>/
<device_name>_<meta>
Where <r_ip> and <r_port> are replication ip and port.
Any part is optional, but you must include at least one part.
Examples::
d74 Matches the device id 74
r4 Matches devices in region 4
z1 Matches devices in zone 1
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
z1:5678 Matches devices in zone 1 using port 5678
:5678 Matches devices that use port 5678
R5.6.7.8 Matches devices that use replication ip 5.6.7.8
R:5678 Matches devices that use replication port 5678
1.2.3.4R5.6.7.8 Matches devices that use ip 1.2.3.4 and replication ip
5.6.7.8
/sdb1 Matches devices with the device name sdb1
_shiny Matches devices with shiny in the meta data
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
[::1] Matches devices in any zone with the ip ::1
z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678
Most specific example::
d74r4z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
Nerd explanation:
All items require their single character prefix except the ip, in which
case the - is optional unless the device id or zone is also included.
"""
orig_search_value = search_value
match = {}
if search_value.startswith('d'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['id'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('r'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['region'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('z'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['zone'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('-'):
search_value = search_value[1:]
if search_value and search_value[0].isdigit():
i = 1
while i < len(search_value) and search_value[i] in '0123456789.':
i += 1
match['ip'] = search_value[:i]
search_value = search_value[i:]
elif search_value and search_value.startswith('['):
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if 'ip' in match:
# ipv6 addresses are converted to all lowercase
# and use the fully expanded representation
match['ip'] = validate_and_normalize_ip(match['ip'])
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['port'] = int(search_value[1:i])
search_value = search_value[i:]
# replication parameters
if search_value.startswith('R'):
search_value = search_value[1:]
if search_value and search_value[0].isdigit():
i = 1
while (i < len(search_value) and
search_value[i] in '0123456789.'):
i += 1
match['replication_ip'] = search_value[:i]
search_value = search_value[i:]
elif search_value and search_value.startswith('['):
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['replication_ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if 'replication_ip' in match:
# ipv6 addresses are converted to all lowercase
# and use the fully expanded representation
match['replication_ip'] = \
validate_and_normalize_ip(match['replication_ip'])
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['replication_port'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('/'):
i = 1
while i < len(search_value) and search_value[i] != '_':
i += 1
match['device'] = search_value[1:i]
search_value = search_value[i:]
if search_value.startswith('_'):
match['meta'] = search_value[1:]
search_value = ''
if search_value:
raise ValueError('Invalid <search-value>: %s' %
repr(orig_search_value))
return match |
Convert optparse style options into a dictionary for searching.
:param opts: optparse style options
:returns: a dictionary with search values to filter devices,
supported parameters are id, region, zone, ip, port,
replication_ip, replication_port, device, weight, meta | def parse_search_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for searching.
:param opts: optparse style options
:returns: a dictionary with search values to filter devices,
supported parameters are id, region, zone, ip, port,
replication_ip, replication_port, device, weight, meta
"""
search_values = {}
for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip',
'replication_port', 'device', 'weight', 'meta'):
value = getattr(opts, key, None)
if value:
if key == 'ip' or key == 'replication_ip':
value = validate_and_normalize_address(value)
search_values[key] = value
return search_values |
Convert optparse style options into a dictionary for changing.
:param opts: optparse style options
:returns: a dictonary with change values to filter devices,
supported parameters are ip, port, replication_ip,
replication_port | def parse_change_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for changing.
:param opts: optparse style options
:returns: a dictonary with change values to filter devices,
supported parameters are ip, port, replication_ip,
replication_port
"""
change_values = {}
for key in ('change_ip', 'change_port', 'change_replication_ip',
'change_replication_port', 'change_device', 'change_meta'):
value = getattr(opts, key, None)
if value:
if key == 'change_ip' or key == 'change_replication_ip':
value = validate_and_normalize_address(value)
change_values[key.replace('change_', '')] = value
return change_values |
Convert an add value, like 'r1z2-10.1.2.3:7878/sdf', to a dictionary.
If the string does not start with 'r<N>', then the value of 'region' in
the returned dictionary will be None. Callers should check for this and
set a reasonable default. This is done so callers can emit errors or
warnings if desired.
Similarly, 'replication_ip' and 'replication_port' will be None if not
specified.
:returns: dictionary with keys 'region', 'zone', 'ip', 'port', 'device',
'replication_ip', 'replication_port', 'meta'
:raises ValueError: if add_value is malformed | def parse_add_value(add_value):
"""
Convert an add value, like 'r1z2-10.1.2.3:7878/sdf', to a dictionary.
If the string does not start with 'r<N>', then the value of 'region' in
the returned dictionary will be None. Callers should check for this and
set a reasonable default. This is done so callers can emit errors or
warnings if desired.
Similarly, 'replication_ip' and 'replication_port' will be None if not
specified.
:returns: dictionary with keys 'region', 'zone', 'ip', 'port', 'device',
'replication_ip', 'replication_port', 'meta'
:raises ValueError: if add_value is malformed
"""
region = None
rest = add_value
if add_value.startswith('r'):
i = 1
while i < len(add_value) and add_value[i].isdigit():
i += 1
region = int(add_value[1:i])
rest = add_value[i:]
if not rest.startswith('z'):
raise ValueError('Invalid add value: %s' % add_value)
i = 1
while i < len(rest) and rest[i].isdigit():
i += 1
zone = int(rest[1:i])
rest = rest[i:]
if not rest.startswith('-'):
raise ValueError('Invalid add value: %s' % add_value)
ip, port, rest = parse_address(rest[1:])
replication_ip = replication_port = None
if rest.startswith('R'):
replication_ip, replication_port, rest = \
parse_address(rest[1:])
if not rest.startswith('/'):
raise ValueError(
'Invalid add value: %s' % add_value)
i = 1
while i < len(rest) and rest[i] != '_':
i += 1
device_name = rest[1:i]
if not validate_device_name(device_name):
raise ValueError('Invalid device name')
rest = rest[i:]
meta = ''
if rest.startswith('_'):
meta = rest[1:]
return {'region': region, 'zone': zone, 'ip': ip, 'port': port,
'device': device_name, 'replication_ip': replication_ip,
'replication_port': replication_port, 'meta': meta} |
Build OptionParse and validate it whether the format is new command-line
format or not. | def validate_args(argvish):
"""
Build OptionParse and validate it whether the format is new command-line
format or not.
"""
opts, args = parse_args(argvish)
# id can be 0 (swift starts generating id from 0),
# also zone, region and weight can be set to zero.
new_cmd_format = opts.id is not None or opts.region is not None or \
opts.zone is not None or opts.ip or opts.port or \
opts.replication_ip or opts.replication_port or \
opts.device or opts.weight is not None or opts.meta
return (new_cmd_format, opts, args) |
Build OptionParser and evaluate command line arguments. | def parse_args(argvish):
"""
Build OptionParser and evaluate command line arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-u', '--id', type="int",
help="Device ID")
parser.add_option('-r', '--region', type="int",
help="Region")
parser.add_option('-z', '--zone', type="int",
help="Zone")
parser.add_option('-i', '--ip', type="string",
help="IP address")
parser.add_option('-p', '--port', type="int",
help="Port number")
parser.add_option('-j', '--replication-ip', type="string",
help="Replication IP address")
parser.add_option('-q', '--replication-port', type="int",
help="Replication port number")
parser.add_option('-d', '--device', type="string",
help="Device name (e.g. md0, sdb1)")
parser.add_option('-w', '--weight', type="float",
help="Device weight")
parser.add_option('-m', '--meta', type="string", default="",
help="Extra device info (just a string)")
parser.add_option('-I', '--change-ip', type="string",
help="IP address for change")
parser.add_option('-P', '--change-port', type="int",
help="Port number for change")
parser.add_option('-J', '--change-replication-ip', type="string",
help="Replication IP address for change")
parser.add_option('-Q', '--change-replication-port', type="int",
help="Replication port number for change")
parser.add_option('-D', '--change-device', type="string",
help="Device name (e.g. md0, sdb1) for change")
parser.add_option('-M', '--change-meta', type="string", default="",
help="Extra device info (just a string) for change")
parser.add_option('-y', '--yes', default=False, action="store_true",
help="Assume a yes response to all questions")
return parser.parse_args(argvish) |
Convert optparse stype options into a device dictionary. | def build_dev_from_opts(opts):
"""
Convert optparse stype options into a device dictionary.
"""
for attribute, shortopt, longopt in (['region', '-r', '--region'],
['zone', '-z', '--zone'],
['ip', '-i', '--ip'],
['port', '-p', '--port'],
['device', '-d', '--device'],
['weight', '-w', '--weight']):
if getattr(opts, attribute, None) is None:
raise ValueError('Required argument %s/%s not specified.' %
(shortopt, longopt))
ip = validate_and_normalize_address(opts.ip)
replication_ip = validate_and_normalize_address(
(opts.replication_ip or opts.ip))
replication_port = opts.replication_port or opts.port
if not validate_device_name(opts.device):
raise ValueError('Invalid device name')
return {'region': opts.region, 'zone': opts.zone, 'ip': ip,
'port': opts.port, 'device': opts.device, 'meta': opts.meta,
'replication_ip': replication_ip,
'replication_port': replication_port, 'weight': opts.weight} |
Validate the sum of the replicas at each tier.
The sum of the replicas at each tier should be less than or very close to
the upper limit indicated by replicas
:param replicas: float,the upper limit of replicas
:param replicas_by_tier: defaultdict,the replicas by tier | def validate_replicas_by_tier(replicas, replicas_by_tier):
"""
Validate the sum of the replicas at each tier.
The sum of the replicas at each tier should be less than or very close to
the upper limit indicated by replicas
:param replicas: float,the upper limit of replicas
:param replicas_by_tier: defaultdict,the replicas by tier
"""
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
replicas_at_tier = sum(replicas_by_tier[t] for t in
replicas_by_tier if len(t) == i)
if abs(replicas - replicas_at_tier) > 1e-10:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
replicas_at_tier, replicas, tier_name)) |
Convert device dict or tier attributes to a representative string.
:returns: a string, the normalized format of a device tier | def format_device(region=None, zone=None, ip=None, device=None, **kwargs):
"""
Convert device dict or tier attributes to a representative string.
:returns: a string, the normalized format of a device tier
"""
return "r%sz%s-%s/%s" % (region, zone, ip, device) |
Get valid parts of utf-8 str from str, unicode and even invalid utf-8 str
:param str_or_unicode: a string or an unicode which can be invalid utf-8 | def get_valid_utf8_str(str_or_unicode):
"""
Get valid parts of utf-8 str from str, unicode and even invalid utf-8 str
:param str_or_unicode: a string or an unicode which can be invalid utf-8
"""
if six.PY2:
if isinstance(str_or_unicode, six.text_type):
(str_or_unicode, _len) = utf8_encoder(str_or_unicode, 'replace')
(valid_unicode_str, _len) = utf8_decoder(str_or_unicode, 'replace')
else:
if isinstance(str_or_unicode, six.binary_type):
try:
(str_or_unicode, _len) = utf8_decoder(str_or_unicode,
'surrogatepass')
except UnicodeDecodeError:
(str_or_unicode, _len) = utf8_decoder(str_or_unicode,
'replace')
(str_or_unicode, _len) = utf16_encoder(str_or_unicode, 'surrogatepass')
(valid_unicode_str, _len) = utf16_decoder(str_or_unicode, 'replace')
return valid_unicode_str.encode('utf-8') |
Patched version of urllib.quote that encodes utf-8 strings before quoting | def quote(value, safe='/'):
"""
Patched version of urllib.quote that encodes utf-8 strings before quoting
"""
quoted = _quote(get_valid_utf8_str(value), safe)
if isinstance(value, six.binary_type):
quoted = quoted.encode('utf-8')
return quoted |
Validate and split the given HTTP request path.
**Examples**::
['a'] = split_path('/a')
['a', None] = split_path('/a', 1, 2)
['a', 'c'] = split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
:param path: HTTP Request path to be split
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises ValueError: if given an invalid path | def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
"""
Validate and split the given HTTP request path.
**Examples**::
['a'] = split_path('/a')
['a', None] = split_path('/a', 1, 2)
['a', 'c'] = split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
:param path: HTTP Request path to be split
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises ValueError: if given an invalid path
"""
if not maxsegs:
maxsegs = minsegs
if minsegs > maxsegs:
raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))
if rest_with_last:
segs = path.split('/', maxsegs)
minsegs += 1
maxsegs += 1
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs or
'' in segs[1:minsegs]):
raise ValueError('Invalid path: %s' % quote(path))
else:
minsegs += 1
maxsegs += 1
segs = path.split('/', maxsegs)
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs + 1 or
'' in segs[1:minsegs] or
(count == maxsegs + 1 and segs[maxsegs])):
raise ValueError('Invalid path: %s' % quote(path))
segs = segs[1:maxsegs]
segs.extend([None] * (maxsegs - 1 - len(segs)))
return segs |
Returns True if the value is either True or a string in TRUE_VALUES.
Returns False otherwise. | def config_true_value(value):
"""
Returns True if the value is either True or a string in TRUE_VALUES.
Returns False otherwise.
"""
return value is True or \
(isinstance(value, six.string_types) and value.lower() in TRUE_VALUES) |
Check that the value casts to a float and is non-negative.
:param value: value to check
:raises ValueError: if the value cannot be cast to a float or is negative.
:return: a float | def non_negative_float(value):
"""
Check that the value casts to a float and is non-negative.
:param value: value to check
:raises ValueError: if the value cannot be cast to a float or is negative.
:return: a float
"""
try:
value = float(value)
if value < 0:
raise ValueError
except (TypeError, ValueError):
raise ValueError('Value must be a non-negative float number, not "%s".'
% value)
return value |
Check that the value casts to an int and is a whole number.
:param value: value to check
:raises ValueError: if the value cannot be cast to an int or does not
represent a whole number.
:return: an int | def non_negative_int(value):
"""
Check that the value casts to an int and is a whole number.
:param value: value to check
:raises ValueError: if the value cannot be cast to an int or does not
represent a whole number.
:return: an int
"""
int_value = int(value)
if int_value != non_negative_float(value):
raise ValueError
return int_value |
Returns positive int value if it can be cast by int() and it's an
integer > 0. (not including zero) Raises ValueError otherwise. | def config_positive_int_value(value):
"""
Returns positive int value if it can be cast by int() and it's an
integer > 0. (not including zero) Raises ValueError otherwise.
"""
try:
result = int(value)
if result < 1:
raise ValueError()
except (TypeError, ValueError):
raise ValueError(
'Config option must be an positive int number, not "%s".' % value)
return result |
Returns default if value is None or 'auto'.
Returns value as an int or raises ValueError otherwise. | def config_auto_int_value(value, default):
"""
Returns default if value is None or 'auto'.
Returns value as an int or raises ValueError otherwise.
"""
if value is None or \
(isinstance(value, six.string_types) and value.lower() == 'auto'):
return default
try:
value = int(value)
except (TypeError, ValueError):
raise ValueError('Config option must be an integer or the '
'string "auto", not "%s".' % value)
return value |
Returns fallocate reserve_value as an int or float.
Returns is_percent as a boolean.
Returns a ValueError on invalid fallocate value. | def config_fallocate_value(reserve_value):
"""
Returns fallocate reserve_value as an int or float.
Returns is_percent as a boolean.
Returns a ValueError on invalid fallocate value.
"""
try:
if str(reserve_value[-1:]) == '%':
reserve_value = float(reserve_value[:-1])
is_percent = True
else:
reserve_value = int(reserve_value)
is_percent = False
except ValueError:
raise ValueError('Error: %s is an invalid value for fallocate'
'_reserve.' % reserve_value)
return reserve_value, is_percent |
Read prefixed options from configuration
:param conf: the configuration
:param prefix_name: the prefix (including, if needed, an underscore)
:param defaults: a dict of default values. The dict supplies the
option name and type (string or comma separated string)
:return: a dict containing the options | def config_read_prefixed_options(conf, prefix_name, defaults):
"""
Read prefixed options from configuration
:param conf: the configuration
:param prefix_name: the prefix (including, if needed, an underscore)
:param defaults: a dict of default values. The dict supplies the
option name and type (string or comma separated string)
:return: a dict containing the options
"""
params = {}
for option_name in defaults.keys():
value = conf.get('%s%s' % (prefix_name, option_name))
if value:
if isinstance(defaults.get(option_name), list):
params[option_name] = []
for role in value.lower().split(','):
params[option_name].append(role.strip())
else:
params[option_name] = value.strip()
return params |
Read reseller_prefix option and associated options from configuration
Reads the reseller_prefix option, then reads options that may be
associated with a specific reseller prefix. Reads options such that an
option without a prefix applies to all reseller prefixes unless an option
has an explicit prefix.
:param conf: the configuration
:param defaults: a dict of default values. The key is the option
name. The value is either an array of strings or a string
:return: tuple of an array of reseller prefixes and a dict of option values | def config_read_reseller_options(conf, defaults):
"""
Read reseller_prefix option and associated options from configuration
Reads the reseller_prefix option, then reads options that may be
associated with a specific reseller prefix. Reads options such that an
option without a prefix applies to all reseller prefixes unless an option
has an explicit prefix.
:param conf: the configuration
:param defaults: a dict of default values. The key is the option
name. The value is either an array of strings or a string
:return: tuple of an array of reseller prefixes and a dict of option values
"""
reseller_prefix_opt = conf.get('reseller_prefix', 'AUTH').split(',')
reseller_prefixes = []
for prefix in [pre.strip() for pre in reseller_prefix_opt if pre.strip()]:
if prefix == "''":
prefix = ''
prefix = append_underscore(prefix)
if prefix not in reseller_prefixes:
reseller_prefixes.append(prefix)
if len(reseller_prefixes) == 0:
reseller_prefixes.append('')
# Get prefix-using config options
associated_options = {}
for prefix in reseller_prefixes:
associated_options[prefix] = dict(defaults)
associated_options[prefix].update(
config_read_prefixed_options(conf, '', defaults))
prefix_name = prefix if prefix != '' else "''"
associated_options[prefix].update(
config_read_prefixed_options(conf, prefix_name, defaults))
return reseller_prefixes, associated_options |
Turns an affinity config value into a function suitable for passing to
sort(). After doing so, the array will be sorted with respect to the given
ordering.
For example, if affinity_str is "r1=1, r2z7=2, r2z8=2", then the array
will be sorted with all nodes from region 1 (r1=1) first, then all the
nodes from region 2 zones 7 and 8 (r2z7=2 and r2z8=2), then everything
else.
Note that the order of the pieces of affinity_str is irrelevant; the
priority values are what comes after the equals sign.
If affinity_str is empty or all whitespace, then the resulting function
will not alter the ordering of the nodes.
:param affinity_str: affinity config value, e.g. "r1z2=3"
or "r1=1, r2z1=2, r2z2=2"
:returns: single-argument function
:raises ValueError: if argument invalid | def affinity_key_function(affinity_str):
"""Turns an affinity config value into a function suitable for passing to
sort(). After doing so, the array will be sorted with respect to the given
ordering.
For example, if affinity_str is "r1=1, r2z7=2, r2z8=2", then the array
will be sorted with all nodes from region 1 (r1=1) first, then all the
nodes from region 2 zones 7 and 8 (r2z7=2 and r2z8=2), then everything
else.
Note that the order of the pieces of affinity_str is irrelevant; the
priority values are what comes after the equals sign.
If affinity_str is empty or all whitespace, then the resulting function
will not alter the ordering of the nodes.
:param affinity_str: affinity config value, e.g. "r1z2=3"
or "r1=1, r2z1=2, r2z2=2"
:returns: single-argument function
:raises ValueError: if argument invalid
"""
affinity_str = affinity_str.strip()
if not affinity_str:
return lambda x: 0
priority_matchers = []
pieces = [s.strip() for s in affinity_str.split(',')]
for piece in pieces:
# matches r<number>=<number> or r<number>z<number>=<number>
match = re.match(r"r(\d+)(?:z(\d+))?=(\d+)$", piece)
if match:
region, zone, priority = match.groups()
region = int(region)
priority = int(priority)
zone = int(zone) if zone else None
matcher = {'region': region, 'priority': priority}
if zone is not None:
matcher['zone'] = zone
priority_matchers.append(matcher)
else:
raise ValueError("Invalid affinity value: %r" % affinity_str)
priority_matchers.sort(key=operator.itemgetter('priority'))
def keyfn(ring_node):
for matcher in priority_matchers:
if (matcher['region'] == ring_node['region']
and ('zone' not in matcher
or matcher['zone'] == ring_node['zone'])):
return matcher['priority']
return 4294967296 # 2^32, i.e. "a big number"
return keyfn |
Turns a write-affinity config value into a predicate function for nodes.
The returned value will be a 1-arg function that takes a node dictionary
and returns a true value if it is "local" and a false value otherwise. The
definition of "local" comes from the affinity_str argument passed in here.
For example, if affinity_str is "r1, r2z2", then only nodes where region=1
or where (region=2 and zone=2) are considered local.
If affinity_str is empty or all whitespace, then the resulting function
will consider everything local
:param write_affinity_str: affinity config value, e.g. "r1z2"
or "r1, r2z1, r2z2"
:returns: single-argument function, or None if affinity_str is empty
:raises ValueError: if argument invalid | def affinity_locality_predicate(write_affinity_str):
"""
Turns a write-affinity config value into a predicate function for nodes.
The returned value will be a 1-arg function that takes a node dictionary
and returns a true value if it is "local" and a false value otherwise. The
definition of "local" comes from the affinity_str argument passed in here.
For example, if affinity_str is "r1, r2z2", then only nodes where region=1
or where (region=2 and zone=2) are considered local.
If affinity_str is empty or all whitespace, then the resulting function
will consider everything local
:param write_affinity_str: affinity config value, e.g. "r1z2"
or "r1, r2z1, r2z2"
:returns: single-argument function, or None if affinity_str is empty
:raises ValueError: if argument invalid
"""
affinity_str = write_affinity_str.strip()
if not affinity_str:
return None
matchers = []
pieces = [s.strip() for s in affinity_str.split(',')]
for piece in pieces:
# matches r<number> or r<number>z<number>
match = re.match(r"r(\d+)(?:z(\d+))?$", piece)
if match:
region, zone = match.groups()
region = int(region)
zone = int(zone) if zone else None
matcher = {'region': region}
if zone is not None:
matcher['zone'] = zone
matchers.append(matcher)
else:
raise ValueError("Invalid write-affinity value: %r" % affinity_str)
def is_local(ring_node):
for matcher in matchers:
if (matcher['region'] == ring_node['region']
and ('zone' not in matcher
or matcher['zone'] == ring_node['zone'])):
return True
return False
return is_local |
Read config file(s) and return config items as a dict
:param conf_path: path to config file/directory, or a file-like object
(hasattr readline)
:param section_name: config section to read (will return all sections if
not defined)
:param log_name: name to be used with logging (will use section_name if
not defined)
:param defaults: dict of default values to pre-populate the config with
:returns: dict of config items
:raises ValueError: if section_name does not exist
:raises IOError: if reading the file failed | def readconf(conf_path, section_name=None, log_name=None, defaults=None,
raw=False):
"""
Read config file(s) and return config items as a dict
:param conf_path: path to config file/directory, or a file-like object
(hasattr readline)
:param section_name: config section to read (will return all sections if
not defined)
:param log_name: name to be used with logging (will use section_name if
not defined)
:param defaults: dict of default values to pre-populate the config with
:returns: dict of config items
:raises ValueError: if section_name does not exist
:raises IOError: if reading the file failed
"""
if defaults is None:
defaults = {}
if raw:
c = RawConfigParser(defaults)
else:
if six.PY2:
c = ConfigParser(defaults)
else:
# In general, we haven't really thought much about interpolation
# in configs. Python's default ConfigParser has always supported
# it, though, so *we* got it "for free". Unfortunatley, since we
# "supported" interpolation, we have to assume there are
# deployments in the wild that use it, and try not to break them.
# So, do what we can to mimic the py2 behavior of passing through
# values like "1%" (which we want to support for
# fallocate_reserve).
c = ConfigParser(defaults, interpolation=NicerInterpolation())
c.optionxform = str # Don't lower-case keys
if hasattr(conf_path, 'readline'):
if hasattr(conf_path, 'seek'):
conf_path.seek(0)
if six.PY2:
c.readfp(conf_path)
else:
c.read_file(conf_path)
else:
if os.path.isdir(conf_path):
# read all configs in directory
success = read_conf_dir(c, conf_path)
else:
success = c.read(conf_path)
if not success:
raise IOError("Unable to read config from %s" %
conf_path)
if section_name:
if c.has_section(section_name):
conf = dict(c.items(section_name))
else:
raise ValueError(
"Unable to find %(section)s config section in %(conf)s" %
{'section': section_name, 'conf': conf_path})
if "log_name" not in conf:
if log_name is not None:
conf['log_name'] = log_name
else:
conf['log_name'] = section_name
else:
conf = {}
for s in c.sections():
conf.update({s: dict(c.items(s))})
if 'log_name' not in conf:
conf['log_name'] = log_name
conf['__file__'] = conf_path
return conf |
Search the config file for any common-prefix sections and load those
sections to a dict mapping the after-prefix reference to options.
:param conf_file: the file name of the config to parse
:param prefix: the common prefix of the sections
:return: a dict mapping policy reference -> dict of policy options
:raises ValueError: if a policy config section has an invalid name | def parse_prefixed_conf(conf_file, prefix):
"""
Search the config file for any common-prefix sections and load those
sections to a dict mapping the after-prefix reference to options.
:param conf_file: the file name of the config to parse
:param prefix: the common prefix of the sections
:return: a dict mapping policy reference -> dict of policy options
:raises ValueError: if a policy config section has an invalid name
"""
ret_config = {}
all_conf = readconf(conf_file)
for section, options in all_conf.items():
if not section.startswith(prefix):
continue
target_ref = section[len(prefix):]
ret_config[target_ref] = options
return ret_config |
Return True if the provided ip is a valid IP-address | def is_valid_ip(ip):
"""
Return True if the provided ip is a valid IP-address
"""
return is_valid_ipv4(ip) or is_valid_ipv6(ip) |
Return True if the provided ip is a valid IPv4-address | def is_valid_ipv4(ip):
"""
Return True if the provided ip is a valid IPv4-address
"""
try:
socket.inet_pton(socket.AF_INET, ip)
except socket.error: # not a valid IPv4 address
return False
return True |
Returns True if the provided ip is a valid IPv6-address | def is_valid_ipv6(ip):
"""
Returns True if the provided ip is a valid IPv6-address
"""
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error: # not a valid IPv6 address
return False
return True |
Expand ipv6 address.
:param address: a string indicating valid ipv6 address
:returns: a string indicating fully expanded ipv6 address | def expand_ipv6(address):
"""
Expand ipv6 address.
:param address: a string indicating valid ipv6 address
:returns: a string indicating fully expanded ipv6 address
"""
packed_ip = socket.inet_pton(socket.AF_INET6, address)
return socket.inet_ntop(socket.AF_INET6, packed_ip) |
Get "our" IP addresses ("us" being the set of services configured by
one `*.conf` file). If our REST listens on a specific address, return it.
Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including
the loopback.
:param str ring_ip: Optional ring_ip/bind_ip from a config file; may be
IP address or hostname.
:returns: list of Strings of ip addresses | def whataremyips(ring_ip=None):
"""
Get "our" IP addresses ("us" being the set of services configured by
one `*.conf` file). If our REST listens on a specific address, return it.
Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including
the loopback.
:param str ring_ip: Optional ring_ip/bind_ip from a config file; may be
IP address or hostname.
:returns: list of Strings of ip addresses
"""
if ring_ip:
# See if bind_ip is '0.0.0.0'/'::'
try:
_, _, _, _, sockaddr = socket.getaddrinfo(
ring_ip, None, 0, socket.SOCK_STREAM, 0,
socket.AI_NUMERICHOST)[0]
if sockaddr[0] not in ('0.0.0.0', '::'):
return [ring_ip]
except socket.gaierror:
pass
addresses = []
if getifaddrs:
addrs = ctypes.POINTER(ifaddrs)()
getifaddrs(ctypes.byref(addrs))
try:
cur = addrs
while cur:
if not cur.contents.ifa_addr:
# Not all interfaces will have addresses; move on
cur = cur.contents.ifa_next
continue
sa_family = cur.contents.ifa_addr.contents.sin_family
if sa_family == socket.AF_INET:
addresses.append(
socket.inet_ntop(
socket.AF_INET,
cur.contents.ifa_addr.contents.sin_addr,
)
)
elif sa_family == socket.AF_INET6:
addr = ctypes.cast(cur.contents.ifa_addr,
ctypes.POINTER(sockaddr_in6))
addresses.append(
socket.inet_ntop(
socket.AF_INET6,
addr.contents.sin6_addr,
)
)
cur = cur.contents.ifa_next
finally:
freeifaddrs(addrs)
return addresses
# getifaddrs not available; try netifaces
for interface in netifaces.interfaces():
try:
iface_data = netifaces.ifaddresses(interface)
for family in iface_data:
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
continue
for address in iface_data[family]:
addr = address['addr']
# If we have an ipv6 address remove the
# %ether_interface at the end
if family == netifaces.AF_INET6:
addr = expand_ipv6(addr.split('%')[0])
addresses.append(addr)
except ValueError:
pass
return addresses |
Given a string representing a socket, returns a tuple of (host, port).
Valid strings are DNS names, IPv4 addresses, or IPv6 addresses, with an
optional port. If an IPv6 address is specified it **must** be enclosed in
[], like *[::1]* or *[::1]:11211*. This follows the accepted prescription
for `IPv6 host literals`_.
Examples::
server.org
server.org:1337
127.0.0.1:1337
[::1]:1337
[::1]
.. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2 | def parse_socket_string(socket_string, default_port):
"""
Given a string representing a socket, returns a tuple of (host, port).
Valid strings are DNS names, IPv4 addresses, or IPv6 addresses, with an
optional port. If an IPv6 address is specified it **must** be enclosed in
[], like *[::1]* or *[::1]:11211*. This follows the accepted prescription
for `IPv6 host literals`_.
Examples::
server.org
server.org:1337
127.0.0.1:1337
[::1]:1337
[::1]
.. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2
"""
port = default_port
# IPv6 addresses must be between '[]'
if socket_string.startswith('['):
match = IPV6_RE.match(socket_string)
if not match:
raise ValueError("Invalid IPv6 address: %s" % socket_string)
host = match.group('address')
port = match.group('port') or port
else:
if ':' in socket_string:
tokens = socket_string.split(':')
if len(tokens) > 2:
raise ValueError("IPv6 addresses must be between '[]'")
host, port = tokens
else:
host = socket_string
return (host, port) |
Give __NR_ioprio_set value for your system. | def NR_ioprio_set():
"""Give __NR_ioprio_set value for your system."""
architecture = os.uname()[4]
arch_bits = platform.architecture()[0]
# check if supported system, now support x86_64 and AArch64
if architecture == 'x86_64' and arch_bits == '64bit':
return 251
elif architecture == 'aarch64' and arch_bits == '64bit':
return 30
raise OSError("Swift doesn't support ionice priority for %s %s" %
(architecture, arch_bits)) |
Attempt to find the function in libc, otherwise return a no-op func.
:param func_name: name of the function to pull from libc.
:param log_error: log an error when a function can't be found
:param fail_if_missing: raise an exception when a function can't be found.
Default behavior is to return a no-op function.
:param errcheck: boolean, if true install a wrapper on the function
to check for a return values of -1 and call
ctype.get_errno and raise an OSError | def load_libc_function(func_name, log_error=True,
fail_if_missing=False, errcheck=False):
"""
Attempt to find the function in libc, otherwise return a no-op func.
:param func_name: name of the function to pull from libc.
:param log_error: log an error when a function can't be found
:param fail_if_missing: raise an exception when a function can't be found.
Default behavior is to return a no-op function.
:param errcheck: boolean, if true install a wrapper on the function
to check for a return values of -1 and call
ctype.get_errno and raise an OSError
"""
try:
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
func = getattr(libc, func_name)
except AttributeError:
if fail_if_missing:
raise
if log_error:
logging.warning("Unable to locate %s in libc. Leaving as a "
"no-op.", func_name)
return noop_libc_function
if errcheck:
def _errcheck(result, f, args):
if result == -1:
errcode = ctypes.get_errno()
raise OSError(errcode, os.strerror(errcode))
return result
func.errcheck = _errcheck
return func |
Drop 'buffer' cache for the given range of the given file.
:param fd: file descriptor
:param offset: start offset
:param length: length | def drop_buffer_cache(fd, offset, length):
"""
Drop 'buffer' cache for the given range of the given file.
:param fd: file descriptor
:param offset: start offset
:param length: length
"""
global _posix_fadvise
if _posix_fadvise is None:
_posix_fadvise = load_libc_function('posix_fadvise64')
# 4 means "POSIX_FADV_DONTNEED"
ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
ctypes.c_uint64(length), 4)
if ret != 0:
logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
"-> %(ret)s", {'fd': fd, 'offset': offset,
'length': length, 'ret': ret}) |
Get an MD5 socket file descriptor. One can MD5 data with it by writing it
to the socket with os.write, then os.read the 16 bytes of the checksum out
later.
NOTE: It is the caller's responsibility to ensure that os.close() is
called on the returned file descriptor. This is a bare file descriptor,
not a Python object. It doesn't close itself. | def get_md5_socket():
"""
Get an MD5 socket file descriptor. One can MD5 data with it by writing it
to the socket with os.write, then os.read the 16 bytes of the checksum out
later.
NOTE: It is the caller's responsibility to ensure that os.close() is
called on the returned file descriptor. This is a bare file descriptor,
not a Python object. It doesn't close itself.
"""
# Linux's AF_ALG sockets work like this:
#
# First, initialize a socket with socket() and bind(). This tells the
# socket what algorithm to use, as well as setting up any necessary bits
# like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
# algorithm name.
#
# Second, to hash some data, get a second socket by calling accept() on
# the first socket. Write data to the socket, then when finished, read the
# checksum from the socket and close it. This lets you checksum multiple
# things without repeating all the setup code each time.
#
# Since we only need to bind() one socket, we do that here and save it for
# future re-use. That way, we only use one file descriptor to get an MD5
# socket instead of two, and we also get to save some syscalls.
global _bound_md5_sockfd
global _libc_socket
global _libc_bind
global _libc_accept
if _libc_accept is None:
_libc_accept = load_libc_function('accept', fail_if_missing=True)
if _libc_socket is None:
_libc_socket = load_libc_function('socket', fail_if_missing=True)
if _libc_bind is None:
_libc_bind = load_libc_function('bind', fail_if_missing=True)
# Do this at first call rather than at import time so that we don't use a
# file descriptor on systems that aren't using any MD5 sockets.
if _bound_md5_sockfd is None:
sockaddr_setup = sockaddr_alg(
AF_ALG,
(ord('h'), ord('a'), ord('s'), ord('h'), 0),
0, 0,
(ord('m'), ord('d'), ord('5'), 0))
hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
ctypes.c_int(socket.SOCK_SEQPACKET),
ctypes.c_int(0))
if hash_sockfd < 0:
raise IOError(ctypes.get_errno(),
"Failed to initialize MD5 socket")
bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
ctypes.pointer(sockaddr_setup),
ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
if bind_result < 0:
os.close(hash_sockfd)
raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
_bound_md5_sockfd = hash_sockfd
md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
if md5_sockfd < 0:
raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
return md5_sockfd |
Modify priority by nice and ionice. | def modify_priority(conf, logger):
"""
Modify priority by nice and ionice.
"""
global _libc_setpriority
if _libc_setpriority is None:
_libc_setpriority = load_libc_function('setpriority',
errcheck=True)
def _setpriority(nice_priority):
"""
setpriority for this pid
:param nice_priority: valid values are -19 to 20
"""
try:
_libc_setpriority(PRIO_PROCESS, os.getpid(),
int(nice_priority))
except (ValueError, OSError):
print("WARNING: Unable to modify scheduling priority of process."
" Keeping unchanged! Check logs for more info. ")
logger.exception('Unable to modify nice priority')
else:
logger.debug('set nice priority to %s' % nice_priority)
nice_priority = conf.get('nice_priority')
if nice_priority is not None:
_setpriority(nice_priority)
global _posix_syscall
if _posix_syscall is None:
_posix_syscall = load_libc_function('syscall', errcheck=True)
def _ioprio_set(io_class, io_priority):
"""
ioprio_set for this process
:param io_class: the I/O class component, can be
IOPRIO_CLASS_RT, IOPRIO_CLASS_BE,
or IOPRIO_CLASS_IDLE
:param io_priority: priority value in the I/O class
"""
try:
io_class = IO_CLASS_ENUM[io_class]
io_priority = int(io_priority)
_posix_syscall(NR_ioprio_set(),
IOPRIO_WHO_PROCESS,
os.getpid(),
IOPRIO_PRIO_VALUE(io_class, io_priority))
except (KeyError, ValueError, OSError):
print("WARNING: Unable to modify I/O scheduling class "
"and priority of process. Keeping unchanged! "
"Check logs for more info.")
logger.exception("Unable to modify ionice priority")
else:
logger.debug('set ionice class %s priority %s',
io_class, io_priority)
io_class = conf.get("ionice_class")
if io_class is None:
return
io_priority = conf.get("ionice_priority", 0)
_ioprio_set(io_class, io_priority) |
Get the current system logger using config settings.
**Log config and defaults**::
log_facility = LOG_LOCAL0
log_level = INFO
log_name = swift
log_max_line_length = 0
log_udp_host = (disabled)
log_udp_port = logging.handlers.SYSLOG_UDP_PORT
log_address = /dev/log
log_statsd_host = (disabled)
log_statsd_port = 8125
log_statsd_default_sample_rate = 1.0
log_statsd_sample_rate_factor = 1.0
log_statsd_metric_prefix = (empty-string)
:param conf: Configuration dict to read settings from
:param name: This value is used to populate the ``server`` field in the log
format, as the prefix for statsd messages, and as the default
value for ``log_route``; defaults to the ``log_name`` value in
``conf``, if it exists, or to 'swift'.
:param log_to_console: Add handler which writes to console on stderr
:param log_route: Route for the logging, not emitted to the log, just used
to separate logging configurations; defaults to the value
of ``name`` or whatever ``name`` defaults to. This value
is used as the name attribute of the
``logging.LogAdapter`` that is returned.
:param fmt: Override log format
:param statsd_tail_prefix: tail prefix to pass to statsd client; if None
then the tail prefix defaults to the value of ``name``.
:return: an instance of ``LogAdapter`` | def get_logger(conf, name=None, log_to_console=False, log_route=None,
fmt="%(server)s: %(message)s", statsd_tail_prefix=None):
"""
Get the current system logger using config settings.
**Log config and defaults**::
log_facility = LOG_LOCAL0
log_level = INFO
log_name = swift
log_max_line_length = 0
log_udp_host = (disabled)
log_udp_port = logging.handlers.SYSLOG_UDP_PORT
log_address = /dev/log
log_statsd_host = (disabled)
log_statsd_port = 8125
log_statsd_default_sample_rate = 1.0
log_statsd_sample_rate_factor = 1.0
log_statsd_metric_prefix = (empty-string)
:param conf: Configuration dict to read settings from
:param name: This value is used to populate the ``server`` field in the log
format, as the prefix for statsd messages, and as the default
value for ``log_route``; defaults to the ``log_name`` value in
``conf``, if it exists, or to 'swift'.
:param log_to_console: Add handler which writes to console on stderr
:param log_route: Route for the logging, not emitted to the log, just used
to separate logging configurations; defaults to the value
of ``name`` or whatever ``name`` defaults to. This value
is used as the name attribute of the
``logging.LogAdapter`` that is returned.
:param fmt: Override log format
:param statsd_tail_prefix: tail prefix to pass to statsd client; if None
then the tail prefix defaults to the value of ``name``.
:return: an instance of ``LogAdapter``
"""
# note: log_name is typically specified in conf (i.e. defined by
# operators), whereas log_route is typically hard-coded in callers of
# get_logger (i.e. defined by developers)
if not conf:
conf = {}
if name is None:
name = conf.get('log_name', 'swift')
if not log_route:
log_route = name
logger = logging.getLogger(log_route)
logger.propagate = False
# all new handlers will get the same formatter
formatter = SwiftLogFormatter(
fmt=fmt, max_line_length=int(conf.get('log_max_line_length', 0)))
# get_logger will only ever add one SysLog Handler to a logger
if not hasattr(get_logger, 'handler4logger'):
get_logger.handler4logger = {}
if logger in get_logger.handler4logger:
logger.removeHandler(get_logger.handler4logger[logger])
# facility for this logger will be set by last call wins
facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
SysLogHandler.LOG_LOCAL0)
udp_host = conf.get('log_udp_host')
if udp_host:
udp_port = int(conf.get('log_udp_port',
logging.handlers.SYSLOG_UDP_PORT))
handler = ThreadSafeSysLogHandler(address=(udp_host, udp_port),
facility=facility)
else:
log_address = conf.get('log_address', '/dev/log')
handler = None
try:
mode = os.stat(log_address).st_mode
if stat.S_ISSOCK(mode):
handler = ThreadSafeSysLogHandler(address=log_address,
facility=facility)
except (OSError, socket.error) as e:
# If either /dev/log isn't a UNIX socket or it does not exist at
# all then py2 would raise an error
if e.errno not in [errno.ENOTSOCK, errno.ENOENT]:
raise
if handler is None:
# fallback to default UDP
handler = ThreadSafeSysLogHandler(facility=facility)
handler.setFormatter(formatter)
logger.addHandler(handler)
get_logger.handler4logger[logger] = handler
# setup console logging
if log_to_console or hasattr(get_logger, 'console_handler4logger'):
# remove pre-existing console handler for this logger
if not hasattr(get_logger, 'console_handler4logger'):
get_logger.console_handler4logger = {}
if logger in get_logger.console_handler4logger:
logger.removeHandler(get_logger.console_handler4logger[logger])
console_handler = logging.StreamHandler(sys.__stderr__)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
get_logger.console_handler4logger[logger] = console_handler
# set the level for the logger
logger.setLevel(
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
# Setup logger with a StatsD client if so configured
statsd_host = conf.get('log_statsd_host')
if statsd_host:
statsd_port = int(conf.get('log_statsd_port', 8125))
base_prefix = conf.get('log_statsd_metric_prefix', '')
default_sample_rate = float(conf.get(
'log_statsd_default_sample_rate', 1))
sample_rate_factor = float(conf.get(
'log_statsd_sample_rate_factor', 1))
if statsd_tail_prefix is None:
statsd_tail_prefix = name
logger.statsd_client = statsd_client.StatsdClient(
statsd_host, statsd_port, base_prefix, statsd_tail_prefix,
default_sample_rate, sample_rate_factor, logger=logger)
else:
logger.statsd_client = None
adapted_logger = LogAdapter(logger, name)
other_handlers = conf.get('log_custom_handlers', None)
if other_handlers:
log_custom_handlers = [s.strip() for s in other_handlers.split(',')
if s.strip()]
for hook in log_custom_handlers:
try:
mod, fnc = hook.rsplit('.', 1)
logger_hook = getattr(__import__(mod, fromlist=[fnc]), fnc)
logger_hook(conf, name, log_to_console, log_route, fmt,
logger, adapted_logger)
except (AttributeError, ImportError):
print('Error calling custom handler [%s]' % hook,
file=sys.stderr)
except ValueError:
print('Invalid custom handler format [%s]' % hook,
file=sys.stderr)
return adapted_logger |
Log unhandled exceptions, close stdio, capture stdout and stderr.
param logger: Logger object to use | def capture_stdio(logger, **kwargs):
"""
Log unhandled exceptions, close stdio, capture stdout and stderr.
param logger: Logger object to use
"""
# log uncaught exceptions
sys.excepthook = lambda * exc_info: \
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
# collect stdio file desc not in use for logging
stdio_files = [sys.stdin, sys.stdout, sys.stderr]
console_fds = [h.stream.fileno() for _junk, h in getattr(
get_logger, 'console_handler4logger', {}).items()]
stdio_files = [f for f in stdio_files if f.fileno() not in console_fds]
with open(os.devnull, 'r+b') as nullfile:
# close stdio (excludes fds open for logging)
for f in stdio_files:
# some platforms throw an error when attempting an stdin flush
try:
f.flush()
except IOError:
pass
try:
os.dup2(nullfile.fileno(), f.fileno())
except OSError:
pass
# redirect stdio
if kwargs.pop('capture_stdout', True):
sys.stdout = LoggerFileObject(logger)
if kwargs.pop('capture_stderr', True):
sys.stderr = LoggerFileObject(logger, 'STDERR') |
Make a line for logging that matches the documented log line format
for backend servers.
:param req: the request.
:param res: the response.
:param trans_time: the time the request took to complete, a float.
:param additional_info: a string to log at the end of the line
:returns: a properly formatted line for logging. | def get_log_line(req, res, trans_time, additional_info, fmt,
anonymization_method, anonymization_salt):
"""
Make a line for logging that matches the documented log line format
for backend servers.
:param req: the request.
:param res: the response.
:param trans_time: the time the request took to complete, a float.
:param additional_info: a string to log at the end of the line
:returns: a properly formatted line for logging.
"""
policy_index = get_policy_index(req.headers, res.headers)
if req.path.startswith('/'):
disk, partition, account, container, obj = split_path(req.path, 0, 5,
True)
else:
disk, partition, account, container, obj = (None, ) * 5
replacements = {
'remote_addr': StrAnonymizer(req.remote_addr, anonymization_method,
anonymization_salt),
'time': StrFormatTime(time.time()),
'method': req.method,
'path': StrAnonymizer(req.path, anonymization_method,
anonymization_salt),
'disk': disk,
'partition': partition,
'account': StrAnonymizer(account, anonymization_method,
anonymization_salt),
'container': StrAnonymizer(container, anonymization_method,
anonymization_salt),
'object': StrAnonymizer(obj, anonymization_method,
anonymization_salt),
'status': res.status.split()[0],
'content_length': res.content_length,
'referer': StrAnonymizer(req.referer, anonymization_method,
anonymization_salt),
'txn_id': req.headers.get('x-trans-id'),
'user_agent': StrAnonymizer(req.user_agent, anonymization_method,
anonymization_salt),
'trans_time': trans_time,
'additional_info': additional_info,
'pid': os.getpid(),
'policy_index': policy_index,
}
return LogStringFormatter(default='-').format(fmt, **replacements) |
Returns the appropriate index of the storage policy for the request from
a proxy server
:param req_headers: dict of the request headers.
:param res_headers: dict of the response headers.
:returns: string index of storage policy, or None | def get_policy_index(req_headers, res_headers):
"""
Returns the appropriate index of the storage policy for the request from
a proxy server
:param req_headers: dict of the request headers.
:param res_headers: dict of the response headers.
:returns: string index of storage policy, or None
"""
header = 'X-Backend-Storage-Policy-Index'
policy_index = res_headers.get(header, req_headers.get(header))
if isinstance(policy_index, six.binary_type) and not six.PY2:
policy_index = policy_index.decode('ascii')
return str(policy_index) if policy_index is not None else None |
Encode up to three timestamps into a string. Unlike a Timestamp object, the
encoded string does NOT used fixed width fields and consequently no
relative chronology of the timestamps can be inferred from lexicographic
sorting of encoded timestamp strings.
The format of the encoded string is:
<t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
otherwise the time offsets for t2 and t3 are appended. If explicit is True
then the offsets for t2 and t3 are always appended even if zero.
Note: any offset value in t1 will be preserved, but offsets on t2 and t3
are not preserved. In the anticipated use cases for this method (and the
inverse decode_timestamps method) the timestamps passed as t2 and t3 are
not expected to have offsets as they will be timestamps associated with a
POST request. In the case where the encoding is used in a container objects
table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
content type and metadata times (if different from the data file) i.e.
correspond to POST timestamps. In the case where the encoded form is used
in a .meta file name, t1 and t2 both correspond to POST timestamps. | def encode_timestamps(t1, t2=None, t3=None, explicit=False):
"""
Encode up to three timestamps into a string. Unlike a Timestamp object, the
encoded string does NOT used fixed width fields and consequently no
relative chronology of the timestamps can be inferred from lexicographic
sorting of encoded timestamp strings.
The format of the encoded string is:
<t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
otherwise the time offsets for t2 and t3 are appended. If explicit is True
then the offsets for t2 and t3 are always appended even if zero.
Note: any offset value in t1 will be preserved, but offsets on t2 and t3
are not preserved. In the anticipated use cases for this method (and the
inverse decode_timestamps method) the timestamps passed as t2 and t3 are
not expected to have offsets as they will be timestamps associated with a
POST request. In the case where the encoding is used in a container objects
table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
content type and metadata times (if different from the data file) i.e.
correspond to POST timestamps. In the case where the encoded form is used
in a .meta file name, t1 and t2 both correspond to POST timestamps.
"""
form = '{0}'
values = [t1.short]
if t2 is not None:
t2_t1_delta = t2.raw - t1.raw
explicit = explicit or (t2_t1_delta != 0)
values.append(t2_t1_delta)
if t3 is not None:
t3_t2_delta = t3.raw - t2.raw
explicit = explicit or (t3_t2_delta != 0)
values.append(t3_t2_delta)
if explicit:
form += '{1:+x}'
if t3 is not None:
form += '{2:+x}'
return form.format(*values) |
Parses a string of the form generated by encode_timestamps and returns
a tuple of the three component timestamps. If explicit is False, component
timestamps that are not explicitly encoded will be assumed to have zero
delta from the previous component and therefore take the value of the
previous component. If explicit is True, component timestamps that are
not explicitly encoded will be returned with value None. | def decode_timestamps(encoded, explicit=False):
"""
Parses a string of the form generated by encode_timestamps and returns
a tuple of the three component timestamps. If explicit is False, component
timestamps that are not explicitly encoded will be assumed to have zero
delta from the previous component and therefore take the value of the
previous component. If explicit is True, component timestamps that are
not explicitly encoded will be returned with value None.
"""
# TODO: some tests, e.g. in test_replicator, put float timestamps values
# into container db's, hence this defensive check, but in real world
# this may never happen.
if not isinstance(encoded, six.string_types):
ts = Timestamp(encoded)
return ts, ts, ts
parts = []
signs = []
pos_parts = encoded.split('+')
for part in pos_parts:
# parse time components and their signs
# e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1]
neg_parts = part.split('-')
parts = parts + neg_parts
signs = signs + [1] + [-1] * (len(neg_parts) - 1)
t1 = Timestamp(parts[0])
t2 = t3 = None
if len(parts) > 1:
t2 = t1
delta = signs[1] * int(parts[1], 16)
# if delta = 0 we want t2 = t3 = t1 in order to
# preserve any offset in t1 - only construct a distinct
# timestamp if there is a non-zero delta.
if delta:
t2 = Timestamp((t1.raw + delta) * PRECISION)
elif not explicit:
t2 = t1
if len(parts) > 2:
t3 = t2
delta = signs[2] * int(parts[2], 16)
if delta:
t3 = Timestamp((t2.raw + delta) * PRECISION)
elif not explicit:
t3 = t2
return t1, t2, t3 |
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps using values greater than or equal to November 20th,
2286 at 17:46 UTC will use 11 digits to represent the number of
seconds.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string | def normalize_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps using values greater than or equal to November 20th,
2286 at 17:46 UTC will use 11 digits to represent the number of
seconds.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return Timestamp(timestamp).normal |
Convert a last modified date (like you'd get from a container listing,
e.g. 2014-02-28T23:22:36.698390) to a float. | def last_modified_date_to_timestamp(last_modified_date_str):
"""
Convert a last modified date (like you'd get from a container listing,
e.g. 2014-02-28T23:22:36.698390) to a float.
"""
return Timestamp.from_isoformat(last_modified_date_str) |
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx (10) or xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps less than 0000000000 are raised to
0000000000 and values greater than November 20th, 2286 at
17:46:39 UTC will be capped at that date and time, resulting in
no return value exceeding 9999999999.99999 (or 9999999999 if
using low-precision).
This cap is because the expirer is already working through a
sorted list of strings that were all a length of 10. Adding
another digit would mess up the sort and cause the expirer to
break from processing early. By 2286, this problem will need to
be fixed, probably by creating an additional .expiring_objects
account to work from with 11 (or more) digit container names.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string | def normalize_delete_at_timestamp(timestamp, high_precision=False):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx (10) or xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps less than 0000000000 are raised to
0000000000 and values greater than November 20th, 2286 at
17:46:39 UTC will be capped at that date and time, resulting in
no return value exceeding 9999999999.99999 (or 9999999999 if
using low-precision).
This cap is because the expirer is already working through a
sorted list of strings that were all a length of 10. Adding
another digit would mess up the sort and cause the expirer to
break from processing early. By 2286, this problem will need to
be fixed, probably by creating an additional .expiring_objects
account to work from with 11 (or more) digit container names.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
fmt = '%016.5f' if high_precision else '%010d'
return fmt % min(max(0, float(timestamp)), 9999999999.99999) |
Sets the directory from which swift config files will be read. If the given
directory differs from that already set then the swift.conf file in the new
directory will be validated and storage policies will be reloaded from the
new swift.conf file.
:param swift_dir: non-default directory to read swift.conf from | def set_swift_dir(swift_dir):
"""
Sets the directory from which swift config files will be read. If the given
directory differs from that already set then the swift.conf file in the new
directory will be validated and storage policies will be reloaded from the
new swift.conf file.
:param swift_dir: non-default directory to read swift.conf from
"""
global HASH_PATH_SUFFIX
global HASH_PATH_PREFIX
global SWIFT_CONF_FILE
if (swift_dir is not None and
swift_dir != os.path.dirname(SWIFT_CONF_FILE)):
SWIFT_CONF_FILE = os.path.join(
swift_dir, os.path.basename(SWIFT_CONF_FILE))
HASH_PATH_PREFIX = b''
HASH_PATH_SUFFIX = b''
validate_configuration()
return True
return False |
A generator returning lines from a file starting with the last line,
then the second last line, etc. i.e., it reads lines backwards.
Stops when the first line (if any) is read.
This is useful when searching for recent activity in very
large files.
:param f: file object to read
:param blocksize: no of characters to go backwards at each block | def backward(f, blocksize=4096):
"""
A generator returning lines from a file starting with the last line,
then the second last line, etc. i.e., it reads lines backwards.
Stops when the first line (if any) is read.
This is useful when searching for recent activity in very
large files.
:param f: file object to read
:param blocksize: no of characters to go backwards at each block
"""
f.seek(0, os.SEEK_END)
if f.tell() == 0:
return
last_row = b''
while f.tell() != 0:
try:
f.seek(-blocksize, os.SEEK_CUR)
except IOError:
blocksize = f.tell()
f.seek(-blocksize, os.SEEK_CUR)
block = f.read(blocksize)
f.seek(-blocksize, os.SEEK_CUR)
rows = block.split(b'\n')
rows[-1] = rows[-1] + last_row
while rows:
last_row = rows.pop(-1)
if rows and last_row:
yield last_row
yield last_row |
Install the appropriate Eventlet monkey patches. | def eventlet_monkey_patch():
"""
Install the appropriate Eventlet monkey patches.
"""
# NOTE(sileht):
# monkey-patching thread is required by python-keystoneclient;
# monkey-patching select is required by oslo.messaging pika driver
# if thread is monkey-patched.
eventlet.patcher.monkey_patch(all=False, socket=True, select=True,
thread=True) |
Apply all swift monkey patching consistently in one place. | def monkey_patch():
"""
Apply all swift monkey patching consistently in one place.
"""
eventlet_monkey_patch()
logging_monkey_patch() |
Check to see whether or not a filesystem has the given amount of space
free. Unlike fallocate(), this does not reserve any space.
:param fs_path_or_fd: path to a file or directory on the filesystem, or an
open file descriptor; if a directory, typically the path to the
filesystem's mount point
:param space_needed: minimum bytes or percentage of free space
:param is_percent: if True, then space_needed is treated as a percentage
of the filesystem's capacity; if False, space_needed is a number of
free bytes.
:returns: True if the filesystem has at least that much free space,
False otherwise
:raises OSError: if fs_path does not exist | def fs_has_free_space(fs_path_or_fd, space_needed, is_percent):
"""
Check to see whether or not a filesystem has the given amount of space
free. Unlike fallocate(), this does not reserve any space.
:param fs_path_or_fd: path to a file or directory on the filesystem, or an
open file descriptor; if a directory, typically the path to the
filesystem's mount point
:param space_needed: minimum bytes or percentage of free space
:param is_percent: if True, then space_needed is treated as a percentage
of the filesystem's capacity; if False, space_needed is a number of
free bytes.
:returns: True if the filesystem has at least that much free space,
False otherwise
:raises OSError: if fs_path does not exist
"""
if isinstance(fs_path_or_fd, int):
st = os.fstatvfs(fs_path_or_fd)
else:
st = os.statvfs(fs_path_or_fd)
free_bytes = st.f_frsize * st.f_bavail
if is_percent:
size_bytes = st.f_frsize * st.f_blocks
free_percent = float(free_bytes) / float(size_bytes) * 100
return free_percent >= space_needed
else:
return free_bytes >= space_needed |
Pre-allocate disk space for a file.
This function can be disabled by calling disable_fallocate(). If no
suitable C function is available in libc, this function is a no-op.
:param fd: file descriptor
:param size: size to allocate (in bytes) | def fallocate(fd, size, offset=0):
"""
Pre-allocate disk space for a file.
This function can be disabled by calling disable_fallocate(). If no
suitable C function is available in libc, this function is a no-op.
:param fd: file descriptor
:param size: size to allocate (in bytes)
"""
global _fallocate_enabled
if not _fallocate_enabled:
return
if size < 0:
size = 0 # Done historically; not really sure why
if size >= (1 << 63):
raise ValueError('size must be less than 2 ** 63')
if offset < 0:
raise ValueError('offset must be non-negative')
if offset >= (1 << 63):
raise ValueError('offset must be less than 2 ** 63')
# Make sure there's some (configurable) amount of free space in
# addition to the number of bytes we're allocating.
if FALLOCATE_RESERVE:
st = os.fstatvfs(fd)
free = st.f_frsize * st.f_bavail - size
if FALLOCATE_IS_PERCENT:
free = (float(free) / float(st.f_frsize * st.f_blocks)) * 100
if float(free) <= float(FALLOCATE_RESERVE):
raise OSError(
errno.ENOSPC,
'FALLOCATE_RESERVE fail %g <= %g' %
(free, FALLOCATE_RESERVE))
if _sys_fallocate.available:
# Parameters are (fd, mode, offset, length).
#
# mode=FALLOC_FL_KEEP_SIZE pre-allocates invisibly (without
# affecting the reported file size).
ret = _sys_fallocate(
fd, FALLOC_FL_KEEP_SIZE, ctypes.c_uint64(offset),
ctypes.c_uint64(size))
err = ctypes.get_errno()
elif _sys_posix_fallocate.available:
# Parameters are (fd, offset, length).
ret = _sys_posix_fallocate(fd, ctypes.c_uint64(offset),
ctypes.c_uint64(size))
err = ctypes.get_errno()
else:
# No suitable fallocate-like function is in our libc. Warn about it,
# but just once per process, and then do nothing.
global _fallocate_warned_about_missing
if not _fallocate_warned_about_missing:
logging.warning("Unable to locate fallocate, posix_fallocate in "
"libc. Leaving as a no-op.")
_fallocate_warned_about_missing = True
return
if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP,
errno.EINVAL):
raise OSError(err, 'Unable to fallocate(%s)' % size) |
De-allocate disk space in the middle of a file.
:param fd: file descriptor
:param offset: index of first byte to de-allocate
:param length: number of bytes to de-allocate | def punch_hole(fd, offset, length):
"""
De-allocate disk space in the middle of a file.
:param fd: file descriptor
:param offset: index of first byte to de-allocate
:param length: number of bytes to de-allocate
"""
if offset < 0:
raise ValueError('offset must be non-negative')
if offset >= (1 << 63):
raise ValueError('offset must be less than 2 ** 63')
if length <= 0:
raise ValueError('length must be positive')
if length >= (1 << 63):
raise ValueError('length must be less than 2 ** 63')
if _sys_fallocate.available:
# Parameters are (fd, mode, offset, length).
ret = _sys_fallocate(
fd,
FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
ctypes.c_uint64(offset),
ctypes.c_uint64(length))
err = ctypes.get_errno()
if ret and err:
mode_str = "FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE"
raise OSError(err, "Unable to fallocate(%d, %s, %d, %d)" % (
fd, mode_str, offset, length))
else:
raise OSError(errno.ENOTSUP,
'No suitable C function found for hole punching') |
Sync modified file data and metadata to disk.
:param fd: file descriptor | def fsync(fd):
"""
Sync modified file data and metadata to disk.
:param fd: file descriptor
"""
if hasattr(fcntl, 'F_FULLSYNC'):
try:
fcntl.fcntl(fd, fcntl.F_FULLSYNC)
except IOError as e:
raise OSError(e.errno, 'Unable to F_FULLSYNC(%s)' % fd)
else:
os.fsync(fd) |
Sync modified file data to disk.
:param fd: file descriptor | def fdatasync(fd):
"""
Sync modified file data to disk.
:param fd: file descriptor
"""
try:
os.fdatasync(fd)
except AttributeError:
fsync(fd) |
Sync directory entries to disk.
:param dirpath: Path to the directory to be synced. | def fsync_dir(dirpath):
"""
Sync directory entries to disk.
:param dirpath: Path to the directory to be synced.
"""
dirfd = None
try:
dirfd = os.open(dirpath, os.O_DIRECTORY | os.O_RDONLY)
fsync(dirfd)
except OSError as err:
if err.errno == errno.ENOTDIR:
# Raise error if someone calls fsync_dir on a non-directory
raise
logging.warning('Unable to perform fsync() on directory %(dir)s:'
' %(err)s',
{'dir': dirpath, 'err': os.strerror(err.errno)})
finally:
if dirfd:
os.close(dirfd) |
Ensures the path is a directory or makes it if not. Errors if the path
exists but is a file or on permissions failure.
:param path: path to create | def mkdirs(path):
"""
Ensures the path is a directory or makes it if not. Errors if the path
exists but is a file or on permissions failure.
:param path: path to create
"""
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST or not os.path.isdir(path):
raise |
Same as os.makedirs() except that this method returns the number of
new directories that had to be created.
Also, this does not raise an error if target directory already exists.
This behaviour is similar to Python 3.x's os.makedirs() called with
exist_ok=True. Also similar to swift.common.utils.mkdirs()
https://hg.python.org/cpython/file/v3.4.2/Lib/os.py#l212 | def makedirs_count(path, count=0):
"""
Same as os.makedirs() except that this method returns the number of
new directories that had to be created.
Also, this does not raise an error if target directory already exists.
This behaviour is similar to Python 3.x's os.makedirs() called with
exist_ok=True. Also similar to swift.common.utils.mkdirs()
https://hg.python.org/cpython/file/v3.4.2/Lib/os.py#l212
"""
head, tail = os.path.split(path)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
count = makedirs_count(head, count)
if tail == os.path.curdir:
return
try:
os.mkdir(path)
except OSError as e:
# EEXIST may also be raised if path exists as a file
# Do not let that pass.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
else:
count += 1
return count |
Attempt to fix / hide race conditions like empty object directories
being removed by backend processes during uploads, by retrying.
The containing directory of 'new' and of all newly created directories are
fsync'd by default. This _will_ come at a performance penalty. In cases
where these additional fsyncs are not necessary, it is expected that the
caller of renamer() turn it off explicitly.
:param old: old path to be renamed
:param new: new path to be renamed to
:param fsync: fsync on containing directory of new and also all
the newly created directories. | def renamer(old, new, fsync=True):
"""
Attempt to fix / hide race conditions like empty object directories
being removed by backend processes during uploads, by retrying.
The containing directory of 'new' and of all newly created directories are
fsync'd by default. This _will_ come at a performance penalty. In cases
where these additional fsyncs are not necessary, it is expected that the
caller of renamer() turn it off explicitly.
:param old: old path to be renamed
:param new: new path to be renamed to
:param fsync: fsync on containing directory of new and also all
the newly created directories.
"""
dirpath = os.path.dirname(new)
try:
count = makedirs_count(dirpath)
os.rename(old, new)
except OSError:
count = makedirs_count(dirpath)
os.rename(old, new)
if fsync:
# If count=0, no new directories were created. But we still need to
# fsync leaf dir after os.rename().
# If count>0, starting from leaf dir, fsync parent dirs of all
# directories created by makedirs_count()
for i in range(0, count + 1):
fsync_dir(dirpath)
dirpath = os.path.dirname(dirpath) |
Creates a link to file descriptor at target_path specified. This method
does not close the fd for you. Unlike rename, as linkat() cannot
overwrite target_path if it exists, we unlink and try again.
Attempts to fix / hide race conditions like empty object directories
being removed by backend processes during uploads, by retrying.
:param fd: File descriptor to be linked
:param target_path: Path in filesystem where fd is to be linked
:param dirs_created: Number of newly created directories that needs to
be fsync'd.
:param retries: number of retries to make
:param fsync: fsync on containing directory of target_path and also all
the newly created directories. | def link_fd_to_path(fd, target_path, dirs_created=0, retries=2, fsync=True):
"""
Creates a link to file descriptor at target_path specified. This method
does not close the fd for you. Unlike rename, as linkat() cannot
overwrite target_path if it exists, we unlink and try again.
Attempts to fix / hide race conditions like empty object directories
being removed by backend processes during uploads, by retrying.
:param fd: File descriptor to be linked
:param target_path: Path in filesystem where fd is to be linked
:param dirs_created: Number of newly created directories that needs to
be fsync'd.
:param retries: number of retries to make
:param fsync: fsync on containing directory of target_path and also all
the newly created directories.
"""
dirpath = os.path.dirname(target_path)
for _junk in range(0, retries):
try:
linkat(linkat.AT_FDCWD, "/proc/self/fd/%d" % (fd),
linkat.AT_FDCWD, target_path, linkat.AT_SYMLINK_FOLLOW)
break
except IOError as err:
if err.errno == errno.ENOENT:
dirs_created = makedirs_count(dirpath)
elif err.errno == errno.EEXIST:
try:
os.unlink(target_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
raise
if fsync:
for i in range(0, dirs_created + 1):
fsync_dir(dirpath)
dirpath = os.path.dirname(dirpath) |
Validate that a device and a partition are valid and won't lead to
directory traversal when used.
:param device: device to validate
:param partition: partition to validate
:raises ValueError: if given an invalid device or partition | def validate_device_partition(device, partition):
"""
Validate that a device and a partition are valid and won't lead to
directory traversal when used.
:param device: device to validate
:param partition: partition to validate
:raises ValueError: if given an invalid device or partition
"""
if not device or '/' in device or device in ['.', '..']:
raise ValueError('Invalid device: %s' % quote(device or ''))
if not partition or '/' in partition or partition in ['.', '..']:
raise ValueError('Invalid partition: %s' % quote(partition or '')) |
Returns a decorator that logs timing events or errors for public methods in
swift's wsgi server controllers, based on response code. | def timing_stats(**dec_kwargs):
"""
Returns a decorator that logs timing events or errors for public methods in
swift's wsgi server controllers, based on response code.
"""
def decorating_func(func):
method = func.__name__
@functools.wraps(func)
def _timing_stats(ctrl, *args, **kwargs):
start_time = time.time()
resp = func(ctrl, *args, **kwargs)
# .timing is for successful responses *or* error codes that are
# not Swift's fault. For example, 500 is definitely the server's
# fault, but 412 is an error code (4xx are all errors) that is
# due to a header the client sent.
#
# .errors.timing is for failures that *are* Swift's fault.
# Examples include 507 for an unmounted drive or 500 for an
# unhandled exception.
if not is_server_error(resp.status_int):
ctrl.logger.timing_since(method + '.timing',
start_time, **dec_kwargs)
else:
ctrl.logger.timing_since(method + '.errors.timing',
start_time, **dec_kwargs)
return resp
return _timing_stats
return decorating_func |
Returns a decorator that logs timing events or errors for public methods in
MemcacheRing class, such as memcached set, get and etc. | def memcached_timing_stats(**dec_kwargs):
"""
Returns a decorator that logs timing events or errors for public methods in
MemcacheRing class, such as memcached set, get and etc.
"""
def decorating_func(func):
method = func.__name__
@functools.wraps(func)
def _timing_stats(cache, *args, **kwargs):
start_time = time.time()
result = func(cache, *args, **kwargs)
cache.logger.timing_since(
'memcached.' + method + '.timing', start_time, **dec_kwargs)
return result
return _timing_stats
return decorating_func |
Checks whether poll is available and falls back
on select if it isn't.
Note about epoll:
Review: https://review.opendev.org/#/c/18806/
There was a problem where once out of every 30 quadrillion
connections, a coroutine wouldn't wake up when the client
closed its end. Epoll was not reporting the event or it was
getting swallowed somewhere. Then when that file descriptor
was re-used, eventlet would freak right out because it still
thought it was waiting for activity from it in some other coro.
Another note about epoll: it's hard to use when forking. epoll works
like so:
* create an epoll instance: ``efd = epoll_create(...)``
* register file descriptors of interest with
``epoll_ctl(efd, EPOLL_CTL_ADD, fd, ...)``
* wait for events with ``epoll_wait(efd, ...)``
If you fork, you and all your child processes end up using the same
epoll instance, and everyone becomes confused. It is possible to use
epoll and fork and still have a correct program as long as you do the
right things, but eventlet doesn't do those things. Really, it can't
even try to do those things since it doesn't get notified of forks.
In contrast, both poll() and select() specify the set of interesting
file descriptors with each call, so there's no problem with forking.
As eventlet monkey patching is now done before call get_hub() in wsgi.py
if we use 'import select' we get the eventlet version, but since version
0.20.0 eventlet removed select.poll() function in patched select (see:
http://eventlet.net/doc/changelog.html and
https://github.com/eventlet/eventlet/commit/614a20462).
We use eventlet.patcher.original function to get python select module
to test if poll() is available on platform. | def get_hub():
"""
Checks whether poll is available and falls back
on select if it isn't.
Note about epoll:
Review: https://review.opendev.org/#/c/18806/
There was a problem where once out of every 30 quadrillion
connections, a coroutine wouldn't wake up when the client
closed its end. Epoll was not reporting the event or it was
getting swallowed somewhere. Then when that file descriptor
was re-used, eventlet would freak right out because it still
thought it was waiting for activity from it in some other coro.
Another note about epoll: it's hard to use when forking. epoll works
like so:
* create an epoll instance: ``efd = epoll_create(...)``
* register file descriptors of interest with
``epoll_ctl(efd, EPOLL_CTL_ADD, fd, ...)``
* wait for events with ``epoll_wait(efd, ...)``
If you fork, you and all your child processes end up using the same
epoll instance, and everyone becomes confused. It is possible to use
epoll and fork and still have a correct program as long as you do the
right things, but eventlet doesn't do those things. Really, it can't
even try to do those things since it doesn't get notified of forks.
In contrast, both poll() and select() specify the set of interesting
file descriptors with each call, so there's no problem with forking.
As eventlet monkey patching is now done before call get_hub() in wsgi.py
if we use 'import select' we get the eventlet version, but since version
0.20.0 eventlet removed select.poll() function in patched select (see:
http://eventlet.net/doc/changelog.html and
https://github.com/eventlet/eventlet/commit/614a20462).
We use eventlet.patcher.original function to get python select module
to test if poll() is available on platform.
"""
try:
select = eventlet.patcher.original('select')
if hasattr(select, "poll"):
return "poll"
return "selects"
except ImportError:
return None |
Sets the userid/groupid of the current process, get session leader, etc.
:param user: User name to change privileges to | def drop_privileges(user):
"""
Sets the userid/groupid of the current process, get session leader, etc.
:param user: User name to change privileges to
"""
if os.geteuid() == 0:
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
os.setgroups(groups)
user = pwd.getpwnam(user)
os.setgid(user[3])
os.setuid(user[2])
os.environ['HOME'] = user[5] |
Parse standard swift server/daemon options with optparse.OptionParser.
:param parser: OptionParser to use. If not sent one will be created.
:param once: Boolean indicating the "once" option is available
:param test_config: Boolean indicating the "test-config" option is
available
:param test_args: Override sys.argv; used in testing
:returns: Tuple of (config, options); config is an absolute path to the
config file, options is the parser options as a dictionary.
:raises SystemExit: First arg (CONFIG) is required, file must exist | def parse_options(parser=None, once=False, test_config=False, test_args=None):
"""Parse standard swift server/daemon options with optparse.OptionParser.
:param parser: OptionParser to use. If not sent one will be created.
:param once: Boolean indicating the "once" option is available
:param test_config: Boolean indicating the "test-config" option is
available
:param test_args: Override sys.argv; used in testing
:returns: Tuple of (config, options); config is an absolute path to the
config file, options is the parser options as a dictionary.
:raises SystemExit: First arg (CONFIG) is required, file must exist
"""
if not parser:
parser = OptionParser(usage="%prog CONFIG [options]")
parser.add_option("-v", "--verbose", default=False, action="store_true",
help="log to console")
if once:
parser.add_option("-o", "--once", default=False, action="store_true",
help="only run one pass of daemon")
if test_config:
parser.add_option("-t", "--test-config",
default=False, action="store_true",
help="exit after loading and validating config; "
"do not run the daemon")
# if test_args is None, optparse will use sys.argv[:1]
options, args = parser.parse_args(args=test_args)
if not args:
parser.print_usage()
print("Error: missing config path argument")
sys.exit(1)
config = os.path.abspath(args.pop(0))
if not os.path.exists(config):
parser.print_usage()
print("Error: unable to locate %s" % config)
sys.exit(1)
extra_args = []
# if any named options appear in remaining args, set the option to True
for arg in args:
if arg in options.__dict__:
setattr(options, arg, True)
else:
extra_args.append(arg)
options = vars(options)
if extra_args:
options['extra_args'] = extra_args
return config, options |
Get the ip address and port that should be used for the given
``node_dict``.
If ``use_replication`` is True then the replication ip address and port are
returned.
If ``use_replication`` is False (the default) and the ``node`` dict has an
item with key ``use_replication`` then that item's value will determine if
the replication ip address and port are returned.
If neither ``use_replication`` nor ``node_dict['use_replication']``
indicate otherwise then the normal ip address and port are returned.
:param node_dict: a dict describing a node
:param use_replication: if True then the replication ip address and port
are returned.
:return: a tuple of (ip address, port) | def select_ip_port(node_dict, use_replication=False):
"""
Get the ip address and port that should be used for the given
``node_dict``.
If ``use_replication`` is True then the replication ip address and port are
returned.
If ``use_replication`` is False (the default) and the ``node`` dict has an
item with key ``use_replication`` then that item's value will determine if
the replication ip address and port are returned.
If neither ``use_replication`` nor ``node_dict['use_replication']``
indicate otherwise then the normal ip address and port are returned.
:param node_dict: a dict describing a node
:param use_replication: if True then the replication ip address and port
are returned.
:return: a tuple of (ip address, port)
"""
if use_replication or node_dict.get('use_replication', False):
node_ip = node_dict['replication_ip']
node_port = node_dict['replication_port']
else:
node_ip = node_dict['ip']
node_port = node_dict['port']
return node_ip, node_port |
Get a string representation of a node's location.
:param node_dict: a dict describing a node
:param replication: if True then the replication ip address and port are
used, otherwise the normal ip address and port are used.
:return: a string of the form <ip address>:<port>/<device> | def node_to_string(node_dict, replication=False):
"""
Get a string representation of a node's location.
:param node_dict: a dict describing a node
:param replication: if True then the replication ip address and port are
used, otherwise the normal ip address and port are used.
:return: a string of the form <ip address>:<port>/<device>
"""
node_ip, node_port = select_ip_port(node_dict, use_replication=replication)
if ':' in node_ip:
# IPv6
node_ip = '[%s]' % node_ip
return '{}:{}/{}'.format(node_ip, node_port, node_dict['device']) |
Get the storage directory
:param datadir: Base data directory
:param partition: Partition
:param name_hash: Account, container or object name hash
:returns: Storage directory | def storage_directory(datadir, partition, name_hash):
"""
Get the storage directory
:param datadir: Base data directory
:param partition: Partition
:param name_hash: Account, container or object name hash
:returns: Storage directory
"""
return os.path.join(datadir, str(partition), name_hash[-3:], name_hash) |
Get the canonical hash for an account/container/object
:param account: Account
:param container: Container
:param object: Object
:param raw_digest: If True, return the raw version rather than a hex digest
:returns: hash string | def hash_path(account, container=None, object=None, raw_digest=False):
"""
Get the canonical hash for an account/container/object
:param account: Account
:param container: Container
:param object: Object
:param raw_digest: If True, return the raw version rather than a hex digest
:returns: hash string
"""
if object and not container:
raise ValueError('container is required if object is provided')
paths = [account if isinstance(account, six.binary_type)
else account.encode('utf8')]
if container:
paths.append(container if isinstance(container, six.binary_type)
else container.encode('utf8'))
if object:
paths.append(object if isinstance(object, six.binary_type)
else object.encode('utf8'))
if raw_digest:
return md5(HASH_PATH_PREFIX + b'/' + b'/'.join(paths)
+ HASH_PATH_SUFFIX, usedforsecurity=False).digest()
else:
return md5(HASH_PATH_PREFIX + b'/' + b'/'.join(paths)
+ HASH_PATH_SUFFIX, usedforsecurity=False).hexdigest() |
This allows the caller to make a list of things with indexes, where the
first item (zero indexed) is just the bare base string, and subsequent
indexes are appended '-1', '-2', etc.
e.g.::
'lock', None => 'lock'
'lock', 0 => 'lock'
'lock', 1 => 'lock-1'
'object', 2 => 'object-2'
:param base: a string, the base string; when ``index`` is 0 (or None) this
is the identity function.
:param index: a digit, typically an integer (or None); for values other
than 0 or None this digit is appended to the base string
separated by a hyphen. | def get_zero_indexed_base_string(base, index):
"""
This allows the caller to make a list of things with indexes, where the
first item (zero indexed) is just the bare base string, and subsequent
indexes are appended '-1', '-2', etc.
e.g.::
'lock', None => 'lock'
'lock', 0 => 'lock'
'lock', 1 => 'lock-1'
'object', 2 => 'object-2'
:param base: a string, the base string; when ``index`` is 0 (or None) this
is the identity function.
:param index: a digit, typically an integer (or None); for values other
than 0 or None this digit is appended to the base string
separated by a hyphen.
"""
if index == 0 or index is None:
return_string = base
else:
return_string = base + "-%d" % int(index)
return return_string |
Context manager that acquires a lock on a directory. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
For locking exclusively, file or directory has to be opened in Write mode.
Python doesn't allow directories to be opened in Write Mode. So we
workaround by locking a hidden file in the directory.
:param directory: directory to be locked
:param timeout: timeout (in seconds). If None, defaults to
DEFAULT_LOCK_TIMEOUT
:param timeout_class: The class of the exception to raise if the
lock cannot be granted within the timeout. Will be
constructed as timeout_class(timeout, lockpath). Default:
LockTimeout
:param limit: The maximum number of locks that may be held concurrently on
the same directory at the time this method is called. Note that this
limit is only applied during the current call to this method and does
not prevent subsequent calls giving a larger limit. Defaults to 1.
:param name: A string to distinguishes different type of locks in a
directory
:raises TypeError: if limit is not an int.
:raises ValueError: if limit is less than 1. | def lock_path(directory, timeout=None, timeout_class=None,
limit=1, name=None):
"""
Context manager that acquires a lock on a directory. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
For locking exclusively, file or directory has to be opened in Write mode.
Python doesn't allow directories to be opened in Write Mode. So we
workaround by locking a hidden file in the directory.
:param directory: directory to be locked
:param timeout: timeout (in seconds). If None, defaults to
DEFAULT_LOCK_TIMEOUT
:param timeout_class: The class of the exception to raise if the
lock cannot be granted within the timeout. Will be
constructed as timeout_class(timeout, lockpath). Default:
LockTimeout
:param limit: The maximum number of locks that may be held concurrently on
the same directory at the time this method is called. Note that this
limit is only applied during the current call to this method and does
not prevent subsequent calls giving a larger limit. Defaults to 1.
:param name: A string to distinguishes different type of locks in a
directory
:raises TypeError: if limit is not an int.
:raises ValueError: if limit is less than 1.
"""
if timeout is None:
timeout = DEFAULT_LOCK_TIMEOUT
if timeout_class is None:
timeout_class = swift.common.exceptions.LockTimeout
if limit < 1:
raise ValueError('limit must be greater than or equal to 1')
mkdirs(directory)
lockpath = '%s/.lock' % directory
if name:
lockpath += '-%s' % str(name)
fds = [os.open(get_zero_indexed_base_string(lockpath, i),
os.O_WRONLY | os.O_CREAT)
for i in range(limit)]
sleep_time = 0.01
slower_sleep_time = max(timeout * 0.01, sleep_time)
slowdown_at = timeout * 0.01
time_slept = 0
try:
with timeout_class(timeout, lockpath):
while True:
if _get_any_lock(fds):
break
if time_slept > slowdown_at:
sleep_time = slower_sleep_time
sleep(sleep_time)
time_slept += sleep_time
yield True
finally:
for fd in fds:
os.close(fd) |
Context manager that acquires a lock on a file. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
:param filename: file to be locked
:param timeout: timeout (in seconds). If None, defaults to
DEFAULT_LOCK_TIMEOUT
:param append: True if file should be opened in append mode
:param unlink: True if the file should be unlinked at the end | def lock_file(filename, timeout=None, append=False, unlink=True):
"""
Context manager that acquires a lock on a file. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
:param filename: file to be locked
:param timeout: timeout (in seconds). If None, defaults to
DEFAULT_LOCK_TIMEOUT
:param append: True if file should be opened in append mode
:param unlink: True if the file should be unlinked at the end
"""
if timeout is None:
timeout = DEFAULT_LOCK_TIMEOUT
flags = os.O_CREAT | os.O_RDWR
if append:
flags |= os.O_APPEND
mode = 'a+b'
else:
mode = 'r+b'
while True:
fd = os.open(filename, flags)
file_obj = os.fdopen(fd, mode)
try:
with swift.common.exceptions.LockTimeout(timeout, filename):
while True:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
sleep(0.01)
try:
if os.stat(filename).st_ino != os.fstat(fd).st_ino:
continue
except OSError as err:
if err.errno == errno.ENOENT:
continue
raise
yield file_obj
if unlink:
os.unlink(filename)
break
finally:
file_obj.close() |
Context manager that acquires a lock on the parent directory of the given
file path. This will block until the lock can be acquired, or the timeout
time has expired (whichever occurs first).
:param filename: file path of the parent directory to be locked
:param timeout: timeout (in seconds). If None, defaults to
DEFAULT_LOCK_TIMEOUT | def lock_parent_directory(filename, timeout=None):
"""
Context manager that acquires a lock on the parent directory of the given
file path. This will block until the lock can be acquired, or the timeout
time has expired (whichever occurs first).
:param filename: file path of the parent directory to be locked
:param timeout: timeout (in seconds). If None, defaults to
DEFAULT_LOCK_TIMEOUT
"""
return lock_path(os.path.dirname(filename), timeout=timeout) |
Get a nomralized length of time in the largest unit of time (hours,
minutes, or seconds.)
:param time_amount: length of time in seconds
:returns: A touple of (length of time, unit of time) where unit of time is
one of ('h', 'm', 's') | def get_time_units(time_amount):
"""
Get a nomralized length of time in the largest unit of time (hours,
minutes, or seconds.)
:param time_amount: length of time in seconds
:returns: A touple of (length of time, unit of time) where unit of time is
one of ('h', 'm', 's')
"""
time_unit = 's'
if time_amount > 60:
time_amount /= 60
time_unit = 'm'
if time_amount > 60:
time_amount /= 60
time_unit = 'h'
return time_amount, time_unit |
Compute an ETA. Now only if we could also have a progress bar...
:param start_time: Unix timestamp when the operation began
:param current_value: Current value
:param final_value: Final value
:returns: ETA as a tuple of (length of time, unit of time) where unit of
time is one of ('h', 'm', 's') | def compute_eta(start_time, current_value, final_value):
"""
Compute an ETA. Now only if we could also have a progress bar...
:param start_time: Unix timestamp when the operation began
:param current_value: Current value
:param final_value: Final value
:returns: ETA as a tuple of (length of time, unit of time) where unit of
time is one of ('h', 'm', 's')
"""
elapsed = time.time() - start_time
completion = (float(current_value) / final_value) or 0.00001
return get_time_units(1.0 / completion * elapsed - elapsed) |
Remove any file in a given path that was last modified before mtime.
:param path: path to remove file from
:param mtime: timestamp of oldest file to keep | def unlink_older_than(path, mtime):
"""
Remove any file in a given path that was last modified before mtime.
:param path: path to remove file from
:param mtime: timestamp of oldest file to keep
"""
filepaths = map(functools.partial(os.path.join, path), listdir(path))
return unlink_paths_older_than(filepaths, mtime) |
Remove any files from the given list that were
last modified before mtime.
:param filepaths: a list of strings, the full paths of files to check
:param mtime: timestamp of oldest file to keep | def unlink_paths_older_than(filepaths, mtime):
"""
Remove any files from the given list that were
last modified before mtime.
:param filepaths: a list of strings, the full paths of files to check
:param mtime: timestamp of oldest file to keep
"""
for fpath in filepaths:
try:
if os.path.getmtime(fpath) < mtime:
os.unlink(fpath)
except OSError:
pass |
Get a value from the wsgi environment
:param env: wsgi environment dict
:param item_name: name of item to get
:returns: the value from the environment | def item_from_env(env, item_name, allow_none=False):
"""
Get a value from the wsgi environment
:param env: wsgi environment dict
:param item_name: name of item to get
:returns: the value from the environment
"""
item = env.get(item_name, None)
if item is None and not allow_none:
logging.error("ERROR: %s could not be found in env!", item_name)
return item |
Get memcache connection pool from the environment (which had been
previously set by the memcache middleware
:param env: wsgi environment dict
:returns: swift.common.memcached.MemcacheRing from environment | def cache_from_env(env, allow_none=False):
"""
Get memcache connection pool from the environment (which had been
previously set by the memcache middleware
:param env: wsgi environment dict
:returns: swift.common.memcached.MemcacheRing from environment
"""
return item_from_env(env, 'swift.cache', allow_none) |
Ensure that a pickle file gets written to disk. The file
is first written to a tmp location, ensure it is synced to disk, then
perform a move to its final location
:param obj: python object to be pickled
:param dest: path of final destination file
:param tmp: path to tmp to use, defaults to None
:param pickle_protocol: protocol to pickle the obj with, defaults to 0 | def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
"""
Ensure that a pickle file gets written to disk. The file
is first written to a tmp location, ensure it is synced to disk, then
perform a move to its final location
:param obj: python object to be pickled
:param dest: path of final destination file
:param tmp: path to tmp to use, defaults to None
:param pickle_protocol: protocol to pickle the obj with, defaults to 0
"""
if tmp is None:
tmp = os.path.dirname(dest)
mkdirs(tmp)
fd, tmppath = mkstemp(dir=tmp, suffix='.tmp')
with os.fdopen(fd, 'wb') as fo:
pickle.dump(obj, fo, pickle_protocol)
fo.flush()
os.fsync(fd)
renamer(tmppath, dest) |
Look in root, for any files/dirs matching glob, recursively traversing
any found directories looking for files ending with ext
:param root: start of search path
:param glob_match: glob to match in root, matching dirs are traversed with
os.walk
:param ext: only files that end in ext will be returned
:param exts: a list of file extensions; only files that end in one of these
extensions will be returned; if set this list overrides any
extension specified using the 'ext' param.
:param dir_ext: if present directories that end with dir_ext will not be
traversed and instead will be returned as a matched path
:returns: list of full paths to matching files, sorted | def search_tree(root, glob_match, ext='', exts=None, dir_ext=None):
"""Look in root, for any files/dirs matching glob, recursively traversing
any found directories looking for files ending with ext
:param root: start of search path
:param glob_match: glob to match in root, matching dirs are traversed with
os.walk
:param ext: only files that end in ext will be returned
:param exts: a list of file extensions; only files that end in one of these
extensions will be returned; if set this list overrides any
extension specified using the 'ext' param.
:param dir_ext: if present directories that end with dir_ext will not be
traversed and instead will be returned as a matched path
:returns: list of full paths to matching files, sorted
"""
exts = exts or [ext]
found_files = []
for path in glob.glob(os.path.join(root, glob_match)):
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
if dir_ext and root.endswith(dir_ext):
found_files.append(root)
# the root is a config dir, descend no further
break
for file_ in files:
if any(exts) and not any(file_.endswith(e) for e in exts):
continue
found_files.append(os.path.join(root, file_))
found_dir = False
for dir_ in dirs:
if dir_ext and dir_.endswith(dir_ext):
found_dir = True
found_files.append(os.path.join(root, dir_))
if found_dir:
# do not descend further into matching directories
break
else:
if ext and not path.endswith(ext):
continue
found_files.append(path)
return sorted(found_files) |
Write contents to file at path
:param path: any path, subdirs will be created as needed
:param contents: data to write to file, will be converted to string | def write_file(path, contents):
"""Write contents to file at path
:param path: any path, subdirs will be created as needed
:param contents: data to write to file, will be converted to string
"""
dirname, name = os.path.split(path)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as err:
if err.errno == errno.EACCES:
sys.exit('Unable to create %s. Running as '
'non-root?' % dirname)
with open(path, 'w') as f:
f.write('%s' % contents) |
Quiet wrapper for os.unlink, OSErrors are suppressed
:param path: first and only argument passed to os.unlink | def remove_file(path):
"""Quiet wrapper for os.unlink, OSErrors are suppressed
:param path: first and only argument passed to os.unlink
"""
try:
os.unlink(path)
except OSError:
pass |
Wrapper for os.rmdir, ENOENT and ENOTEMPTY are ignored
:param path: first and only argument passed to os.rmdir | def remove_directory(path):
"""Wrapper for os.rmdir, ENOENT and ENOTEMPTY are ignored
:param path: first and only argument passed to os.rmdir
"""
try:
os.rmdir(path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
raise |
Test if a file mtime is older than the given age, suppressing any OSErrors.
:param path: first and only argument passed to os.stat
:param age: age in seconds
:return: True if age is less than or equal to zero or if the file mtime is
more than ``age`` in the past; False if age is greater than zero and
the file mtime is less than or equal to ``age`` in the past or if there
is an OSError while stat'ing the file. | def is_file_older(path, age):
"""
Test if a file mtime is older than the given age, suppressing any OSErrors.
:param path: first and only argument passed to os.stat
:param age: age in seconds
:return: True if age is less than or equal to zero or if the file mtime is
more than ``age`` in the past; False if age is greater than zero and
the file mtime is less than or equal to ``age`` in the past or if there
is an OSError while stat'ing the file.
"""
if age <= 0:
return True
try:
return time.time() - os.stat(path).st_mtime > age
except OSError:
return False |
Given a devices path and a data directory, yield (path, device,
partition) for all files in that directory
(devices|partitions|suffixes|hashes)_filter are meant to modify the list of
elements that will be iterated. eg: they can be used to exclude some
elements based on a custom condition defined by the caller.
hook_pre_(device|partition|suffix|hash) are called before yielding the
element, hook_pos_(device|partition|suffix|hash) are called after the
element was yielded. They are meant to do some pre/post processing.
eg: saving a progress status.
:param devices: parent directory of the devices to be audited
:param datadir: a directory located under self.devices. This should be
one of the DATADIR constants defined in the account,
container, and object servers.
:param suffix: path name suffix required for all names returned
(ignored if yield_hash_dirs is True)
:param mount_check: Flag to check if a mount check should be performed
on devices
:param logger: a logger object
:param devices_filter: a callable taking (devices, [list of devices]) as
parameters and returning a [list of devices]
:param partitions_filter: a callable taking (datadir_path, [list of parts])
as parameters and returning a [list of parts]
:param suffixes_filter: a callable taking (part_path, [list of suffixes])
as parameters and returning a [list of suffixes]
:param hashes_filter: a callable taking (suff_path, [list of hashes]) as
parameters and returning a [list of hashes]
:param hook_pre_device: a callable taking device_path as parameter
:param hook_post_device: a callable taking device_path as parameter
:param hook_pre_partition: a callable taking part_path as parameter
:param hook_post_partition: a callable taking part_path as parameter
:param hook_pre_suffix: a callable taking suff_path as parameter
:param hook_post_suffix: a callable taking suff_path as parameter
:param hook_pre_hash: a callable taking hash_path as parameter
:param hook_post_hash: a callable taking hash_path as parameter
:param error_counter: a dictionary used to accumulate error counts; may
add keys 'unmounted' and 'unlistable_partitions'
:param yield_hash_dirs: if True, yield hash dirs instead of individual
files | def audit_location_generator(devices, datadir, suffix='',
mount_check=True, logger=None,
devices_filter=None, partitions_filter=None,
suffixes_filter=None, hashes_filter=None,
hook_pre_device=None, hook_post_device=None,
hook_pre_partition=None, hook_post_partition=None,
hook_pre_suffix=None, hook_post_suffix=None,
hook_pre_hash=None, hook_post_hash=None,
error_counter=None, yield_hash_dirs=False):
"""
Given a devices path and a data directory, yield (path, device,
partition) for all files in that directory
(devices|partitions|suffixes|hashes)_filter are meant to modify the list of
elements that will be iterated. eg: they can be used to exclude some
elements based on a custom condition defined by the caller.
hook_pre_(device|partition|suffix|hash) are called before yielding the
element, hook_pos_(device|partition|suffix|hash) are called after the
element was yielded. They are meant to do some pre/post processing.
eg: saving a progress status.
:param devices: parent directory of the devices to be audited
:param datadir: a directory located under self.devices. This should be
one of the DATADIR constants defined in the account,
container, and object servers.
:param suffix: path name suffix required for all names returned
(ignored if yield_hash_dirs is True)
:param mount_check: Flag to check if a mount check should be performed
on devices
:param logger: a logger object
:param devices_filter: a callable taking (devices, [list of devices]) as
parameters and returning a [list of devices]
:param partitions_filter: a callable taking (datadir_path, [list of parts])
as parameters and returning a [list of parts]
:param suffixes_filter: a callable taking (part_path, [list of suffixes])
as parameters and returning a [list of suffixes]
:param hashes_filter: a callable taking (suff_path, [list of hashes]) as
parameters and returning a [list of hashes]
:param hook_pre_device: a callable taking device_path as parameter
:param hook_post_device: a callable taking device_path as parameter
:param hook_pre_partition: a callable taking part_path as parameter
:param hook_post_partition: a callable taking part_path as parameter
:param hook_pre_suffix: a callable taking suff_path as parameter
:param hook_post_suffix: a callable taking suff_path as parameter
:param hook_pre_hash: a callable taking hash_path as parameter
:param hook_post_hash: a callable taking hash_path as parameter
:param error_counter: a dictionary used to accumulate error counts; may
add keys 'unmounted' and 'unlistable_partitions'
:param yield_hash_dirs: if True, yield hash dirs instead of individual
files
"""
device_dir = listdir(devices)
# randomize devices in case of process restart before sweep completed
shuffle(device_dir)
if devices_filter:
device_dir = devices_filter(devices, device_dir)
for device in device_dir:
if mount_check and not ismount(os.path.join(devices, device)):
if error_counter is not None:
error_counter.setdefault('unmounted', [])
error_counter['unmounted'].append(device)
if logger:
logger.warning(
'Skipping %s as it is not mounted', device)
continue
if hook_pre_device:
hook_pre_device(os.path.join(devices, device))
datadir_path = os.path.join(devices, device, datadir)
try:
partitions = listdir(datadir_path)
except OSError as e:
# NB: listdir ignores non-existent datadir_path
if error_counter is not None:
error_counter.setdefault('unlistable_partitions', [])
error_counter['unlistable_partitions'].append(datadir_path)
if logger:
logger.warning('Skipping %(datadir)s because %(err)s',
{'datadir': datadir_path, 'err': e})
continue
if partitions_filter:
partitions = partitions_filter(datadir_path, partitions)
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
if hook_pre_partition:
hook_pre_partition(part_path)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
if suffixes_filter:
suffixes = suffixes_filter(part_path, suffixes)
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
if hook_pre_suffix:
hook_pre_suffix(suff_path)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
if hashes_filter:
hashes = hashes_filter(suff_path, hashes)
for hsh in hashes:
hash_path = os.path.join(suff_path, hsh)
if hook_pre_hash:
hook_pre_hash(hash_path)
if yield_hash_dirs:
if os.path.isdir(hash_path):
yield hash_path, device, partition
else:
try:
files = sorted(listdir(hash_path), reverse=True)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for fname in files:
if suffix and not fname.endswith(suffix):
continue
path = os.path.join(hash_path, fname)
yield path, device, partition
if hook_post_hash:
hook_post_hash(hash_path)
if hook_post_suffix:
hook_post_suffix(suff_path)
if hook_post_partition:
hook_post_partition(part_path)
if hook_post_device:
hook_post_device(os.path.join(devices, device)) |
Will eventlet.sleep() for the appropriate time so that the max_rate
is never exceeded. If max_rate is 0, will not ratelimit. The
maximum recommended rate should not exceed (1000 * incr_by) a second
as eventlet.sleep() does involve some overhead. Returns running_time
that should be used for subsequent calls.
:param running_time: the running time in milliseconds of the next
allowable request. Best to start at zero.
:param max_rate: The maximum rate per second allowed for the process.
:param incr_by: How much to increment the counter. Useful if you want
to ratelimit 1024 bytes/sec and have differing sizes
of requests. Must be > 0 to engage rate-limiting
behavior.
:param rate_buffer: Number of seconds the rate counter can drop and be
allowed to catch up (at a faster than listed rate).
A larger number will result in larger spikes in rate
but better average accuracy. Must be > 0 to engage
rate-limiting behavior.
:return: The absolute time for the next interval in milliseconds; note
that time could have passed well beyond that point, but the next call
will catch that and skip the sleep. | def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
"""
Will eventlet.sleep() for the appropriate time so that the max_rate
is never exceeded. If max_rate is 0, will not ratelimit. The
maximum recommended rate should not exceed (1000 * incr_by) a second
as eventlet.sleep() does involve some overhead. Returns running_time
that should be used for subsequent calls.
:param running_time: the running time in milliseconds of the next
allowable request. Best to start at zero.
:param max_rate: The maximum rate per second allowed for the process.
:param incr_by: How much to increment the counter. Useful if you want
to ratelimit 1024 bytes/sec and have differing sizes
of requests. Must be > 0 to engage rate-limiting
behavior.
:param rate_buffer: Number of seconds the rate counter can drop and be
allowed to catch up (at a faster than listed rate).
A larger number will result in larger spikes in rate
but better average accuracy. Must be > 0 to engage
rate-limiting behavior.
:return: The absolute time for the next interval in milliseconds; note
that time could have passed well beyond that point, but the next call
will catch that and skip the sleep.
"""
warnings.warn(
'ratelimit_sleep() is deprecated; use the ``EventletRateLimiter`` '
'class instead.', DeprecationWarning, stacklevel=2
)
rate_limit = EventletRateLimiter(max_rate, rate_buffer=rate_buffer,
running_time=running_time)
rate_limit.wait(incr_by=incr_by)
return rate_limit.running_time |
Validates an X-Container-Sync-To header value, returning the
validated endpoint, realm, and realm_key, or an error string.
:param value: The X-Container-Sync-To header value to validate.
:param allowed_sync_hosts: A list of allowed hosts in endpoints,
if realms_conf does not apply.
:param realms_conf: An instance of
swift.common.container_sync_realms.ContainerSyncRealms to
validate against.
:returns: A tuple of (error_string, validated_endpoint, realm,
realm_key). The error_string will None if the rest of the
values have been validated. The validated_endpoint will be
the validated endpoint to sync to. The realm and realm_key
will be set if validation was done through realms_conf. | def validate_sync_to(value, allowed_sync_hosts, realms_conf):
"""
Validates an X-Container-Sync-To header value, returning the
validated endpoint, realm, and realm_key, or an error string.
:param value: The X-Container-Sync-To header value to validate.
:param allowed_sync_hosts: A list of allowed hosts in endpoints,
if realms_conf does not apply.
:param realms_conf: An instance of
swift.common.container_sync_realms.ContainerSyncRealms to
validate against.
:returns: A tuple of (error_string, validated_endpoint, realm,
realm_key). The error_string will None if the rest of the
values have been validated. The validated_endpoint will be
the validated endpoint to sync to. The realm and realm_key
will be set if validation was done through realms_conf.
"""
orig_value = value
value = value.rstrip('/')
if not value:
return (None, None, None, None)
if value.startswith('//'):
if not realms_conf:
return (None, None, None, None)
data = value[2:].split('/')
if len(data) != 4:
return (
'Invalid X-Container-Sync-To format %r' % orig_value,
None, None, None)
realm, cluster, account, container = data
realm_key = realms_conf.key(realm)
if not realm_key:
return ('No realm key for %r' % realm, None, None, None)
endpoint = realms_conf.endpoint(realm, cluster)
if not endpoint:
return (
'No cluster endpoint for %(realm)r %(cluster)r'
% {'realm': realm, 'cluster': cluster},
None, None, None)
return (
None,
'%s/%s/%s' % (endpoint.rstrip('/'), account, container),
realm.upper(), realm_key)
p = urlparse(value)
if p.scheme not in ('http', 'https'):
return (
'Invalid scheme %r in X-Container-Sync-To, must be "//", '
'"http", or "https".' % p.scheme,
None, None, None)
if not p.path:
return ('Path required in X-Container-Sync-To', None, None, None)
if p.params or p.query or p.fragment:
return (
'Params, queries, and fragments not allowed in '
'X-Container-Sync-To',
None, None, None)
if p.hostname not in allowed_sync_hosts:
return (
'Invalid host %r in X-Container-Sync-To' % p.hostname,
None, None, None)
return (None, value, None, None) |
Returns the number in a human readable format; for example 1048576 = "1Mi". | def human_readable(value):
"""
Returns the number in a human readable format; for example 1048576 = "1Mi".
"""
value = float(value)
index = -1
suffixes = 'KMGTPEZY'
while value >= 1024 and index + 1 < len(suffixes):
index += 1
value = round(value / 1024)
if index == -1:
return '%d' % value
return '%d%si' % (round(value), suffixes[index]) |
Update a recon cache entry item.
If ``item`` is an empty dict then any existing ``key`` in ``cache_entry``
will be deleted. Similarly if ``item`` is a dict and any of its values are
empty dicts then the corresponding key will be deleted from the nested dict
in ``cache_entry``.
We use nested recon cache entries when the object auditor
runs in parallel or else in 'once' mode with a specified subset of devices.
:param cache_entry: a dict of existing cache entries
:param key: key for item to update
:param item: value for item to update | def put_recon_cache_entry(cache_entry, key, item):
"""
Update a recon cache entry item.
If ``item`` is an empty dict then any existing ``key`` in ``cache_entry``
will be deleted. Similarly if ``item`` is a dict and any of its values are
empty dicts then the corresponding key will be deleted from the nested dict
in ``cache_entry``.
We use nested recon cache entries when the object auditor
runs in parallel or else in 'once' mode with a specified subset of devices.
:param cache_entry: a dict of existing cache entries
:param key: key for item to update
:param item: value for item to update
"""
if isinstance(item, dict):
if not item:
cache_entry.pop(key, None)
return
if key not in cache_entry or key in cache_entry and not \
isinstance(cache_entry[key], dict):
cache_entry[key] = {}
for k, v in item.items():
if v == {}:
cache_entry[key].pop(k, None)
else:
cache_entry[key][k] = v
else:
cache_entry[key] = item |
Update recon cache values
:param cache_dict: Dictionary of cache key/value pairs to write out
:param cache_file: cache file to update
:param logger: the logger to use to log an encountered error
:param lock_timeout: timeout (in seconds)
:param set_owner: Set owner of recon cache file | def dump_recon_cache(cache_dict, cache_file, logger, lock_timeout=2,
set_owner=None):
"""Update recon cache values
:param cache_dict: Dictionary of cache key/value pairs to write out
:param cache_file: cache file to update
:param logger: the logger to use to log an encountered error
:param lock_timeout: timeout (in seconds)
:param set_owner: Set owner of recon cache file
"""
try:
with lock_file(cache_file, lock_timeout, unlink=False) as cf:
cache_entry = {}
try:
existing_entry = cf.readline()
if existing_entry:
cache_entry = json.loads(existing_entry)
except ValueError:
# file doesn't have a valid entry, we'll recreate it
pass
for cache_key, cache_value in cache_dict.items():
put_recon_cache_entry(cache_entry, cache_key, cache_value)
tf = None
try:
with NamedTemporaryFile(dir=os.path.dirname(cache_file),
delete=False) as tf:
cache_data = json.dumps(cache_entry, ensure_ascii=True,
sort_keys=True)
tf.write(cache_data.encode('ascii') + b'\n')
if set_owner:
os.chown(tf.name, pwd.getpwnam(set_owner).pw_uid, -1)
renamer(tf.name, cache_file, fsync=False)
finally:
if tf is not None:
try:
os.unlink(tf.name)
except OSError as err:
if err.errno != errno.ENOENT:
raise
except (Exception, Timeout) as err:
logger.exception('Exception dumping recon cache: %s' % err) |
Load a recon cache file. Treats missing file as empty. | def load_recon_cache(cache_file):
"""
Load a recon cache file. Treats missing file as empty.
"""
try:
with open(cache_file) as fh:
return json.load(fh)
except IOError as e:
if e.errno == errno.ENOENT:
return {}
else:
raise
except ValueError: # invalid JSON
return {} |
Constant-time string comparison.
:params s1: the first string
:params s2: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks. | def streq_const_time(s1, s2):
"""Constant-time string comparison.
:params s1: the first string
:params s2: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks.
"""
if len(s1) != len(s2):
return False
result = 0
for (a, b) in zip(s1, s2):
result |= ord(a) ^ ord(b)
return result == 0 |
Returns an iterator of all pairs of elements from item_list.
:param item_list: items (no duplicates allowed) | def pairs(item_list):
"""
Returns an iterator of all pairs of elements from item_list.
:param item_list: items (no duplicates allowed)
"""
for i, item1 in enumerate(item_list):
for item2 in item_list[(i + 1):]:
yield (item1, item2) |
Decorator to declare which methods are accessible for different
type of servers:
* If option replication_server is None then this decorator
doesn't matter.
* If option replication_server is True then ONLY decorated with
this decorator methods will be started.
* If option replication_server is False then decorated with this
decorator methods will NOT be started.
:param func: function to mark accessible for replication | def replication(func):
"""
Decorator to declare which methods are accessible for different
type of servers:
* If option replication_server is None then this decorator
doesn't matter.
* If option replication_server is True then ONLY decorated with
this decorator methods will be started.
* If option replication_server is False then decorated with this
decorator methods will NOT be started.
:param func: function to mark accessible for replication
"""
func.replication = True
return func |
Decorator to declare which methods are publicly accessible as HTTP
requests
:param func: function to make public | def public(func):
"""
Decorator to declare which methods are publicly accessible as HTTP
requests
:param func: function to make public
"""
func.publicly_accessible = True
return func |
Decorator to declare which methods are privately accessible as HTTP
requests with an ``X-Backend-Allow-Private-Methods: True`` override
:param func: function to make private | def private(func):
"""
Decorator to declare which methods are privately accessible as HTTP
requests with an ``X-Backend-Allow-Private-Methods: True`` override
:param func: function to make private
"""
func.privately_accessible = True
return func |
quorum size as it applies to services that use 'replication' for data
integrity (Account/Container services). Object quorum_size is defined
on a storage policy basis.
Number of successful backend requests needed for the proxy to consider
the client request successful. | def quorum_size(n):
"""
quorum size as it applies to services that use 'replication' for data
integrity (Account/Container services). Object quorum_size is defined
on a storage policy basis.
Number of successful backend requests needed for the proxy to consider
the client request successful.
"""
return (n + 1) // 2 |
Transform ip string to an rsync-compatible form
Will return ipv4 addresses unchanged, but will nest ipv6 addresses
inside square brackets.
:param ip: an ip string (ipv4 or ipv6)
:returns: a string ip address | def rsync_ip(ip):
"""
Transform ip string to an rsync-compatible form
Will return ipv4 addresses unchanged, but will nest ipv6 addresses
inside square brackets.
:param ip: an ip string (ipv4 or ipv6)
:returns: a string ip address
"""
return '[%s]' % ip if is_valid_ipv6(ip) else ip |
Interpolate devices variables inside a rsync module template
:param template: rsync module template as a string
:param device: a device from a ring
:returns: a string with all variables replaced by device attributes | def rsync_module_interpolation(template, device):
"""
Interpolate devices variables inside a rsync module template
:param template: rsync module template as a string
:param device: a device from a ring
:returns: a string with all variables replaced by device attributes
"""
replacements = {
'ip': rsync_ip(device.get('ip', '')),
'port': device.get('port', ''),
'replication_ip': rsync_ip(device.get('replication_ip', '')),
'replication_port': device.get('replication_port', ''),
'region': device.get('region', ''),
'zone': device.get('zone', ''),
'device': device.get('device', ''),
'meta': device.get('meta', ''),
}
try:
module = template.format(**replacements)
except KeyError as e:
raise ValueError('Cannot interpolate rsync_module, invalid variable: '
'%s' % e)
return module |
Splits the str given and returns a properly stripped list of the comma
separated values. | def list_from_csv(comma_separated_str):
"""
Splits the str given and returns a properly stripped list of the comma
separated values.
"""
if comma_separated_str:
return [v.strip() for v in comma_separated_str.split(',') if v.strip()]
return [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.