code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
outer_definition = cls.message_definition()
if not outer_definition:
return util.get_package_for_module(cls.__module__)
return outer_definition.definition_package() | def definition_package(cls) | Helper method for creating creating the package of a definition.
Returns:
Name of package that definition belongs to. | 6.621135 | 6.502505 | 1.018244 |
return dict((item.name, item.number) for item in iter(cls)) | def to_dict(cls) | Make dictionary version of enumerated class.
Dictionary created this way can be used with def_num.
Returns:
A dict (name) -> number | 10.329203 | 9.958587 | 1.037216 |
for name, field in self.__by_name.items():
value = getattr(self, name)
if value is None:
if field.required:
raise ValidationError(
"Message %s is missing required field %s" %
(type(self).__name__, name))
else:
try:
if (isinstance(field, MessageField) and
issubclass(field.message_type, Message)):
if field.repeated:
for item in value:
item_message_value = field.value_to_message(
item)
item_message_value.check_initialized()
else:
message_value = field.value_to_message(value)
message_value.check_initialized()
except ValidationError as err:
if not hasattr(err, 'message_name'):
err.message_name = type(self).__name__
raise | def check_initialized(self) | Check class for initialization status.
Check that all required fields are initialized
Raises:
ValidationError: If message is not initialized. | 2.700298 | 2.47115 | 1.092729 |
message_type = type(self)
try:
field = message_type.field_by_name(name)
except KeyError:
raise AttributeError('Message %s has no field %s' % (
message_type.__name__, name))
return self.__tags.get(field.number) | def get_assigned_value(self, name) | Get the assigned value of an attribute.
Get the underlying value of an attribute. If value has not
been set, will not return the default for the field.
Args:
name: Name of attribute to get.
Returns:
Value of attribute, None if it has not been set. | 3.588796 | 4.360319 | 0.823058 |
message_type = type(self)
try:
field = message_type.field_by_name(name)
except KeyError:
if name not in message_type.__by_name:
raise AttributeError('Message %s has no field %s' % (
message_type.__name__, name))
if field.repeated:
self.__tags[field.number] = FieldList(field, [])
else:
self.__tags.pop(field.number, None) | def reset(self, name) | Reset assigned value for field.
Resetting a field will return it to its default value or None.
Args:
name: Name of field to reset. | 3.494228 | 3.856391 | 0.906087 |
value, variant = self.__unrecognized_fields.get(key, (value_default,
variant_default))
return value, variant | def get_unrecognized_field_info(self, key, value_default=None,
variant_default=None) | Get the value and variant of an unknown field in this message.
Args:
key: The name or number of the field to retrieve.
value_default: Value to be returned if the key isn't found.
variant_default: Value to be returned as variant if the key isn't
found.
Returns:
(value, variant), where value and variant are whatever was passed
to set_unrecognized_field. | 3.442383 | 4.703367 | 0.731897 |
if not isinstance(variant, Variant):
raise TypeError('Variant type %s is not valid.' % variant)
self.__unrecognized_fields[key] = value, variant | def set_unrecognized_field(self, key, value, variant) | Set an unrecognized field, used when decoding a message.
Args:
key: The name or number used to refer to this unknown value.
value: The value of the field.
variant: Type information needed to interpret the value or re-encode
it.
Raises:
TypeError: If the variant is not an instance of messages.Variant. | 4.445238 | 4.737941 | 0.938221 |
self.__field.validate_element(value)
return list.append(self, value) | def append(self, value) | Validate item appending to list. | 11.373289 | 8.729624 | 1.302838 |
self.__field.validate(sequence)
return list.extend(self, sequence) | def extend(self, sequence) | Validate extension of list. | 13.899603 | 8.684708 | 1.600469 |
self.__field.validate_element(value)
return list.insert(self, index, value) | def insert(self, index, value) | Validate item insertion to list. | 9.037871 | 7.463728 | 1.210906 |
if not isinstance(value, self.type):
# Authorize int values as float.
if isinstance(value, six.integer_types) and self.type == float:
return float(value)
if value is None:
if self.required:
raise ValidationError('Required field is missing')
else:
try:
name = self.name
except AttributeError:
raise ValidationError('Expected type %s for %s, '
'found %s (type %s)' %
(self.type, self.__class__.__name__,
value, type(value)))
else:
raise ValidationError(
'Expected type %s for field %s, found %s (type %s)' %
(self.type, name, value, type(value)))
return value | def validate_element(self, value) | Validate single element of field.
This is different from validate in that it is used on individual
values of repeated fields.
Args:
value: Value to validate.
Returns:
The value casted in the expected type.
Raises:
ValidationError if value is not expected type. | 2.946279 | 2.919302 | 1.009241 |
if not self.repeated:
return validate_element(value)
else:
# Must be a list or tuple, may not be a string.
if isinstance(value, (list, tuple)):
result = []
for element in value:
if element is None:
try:
name = self.name
except AttributeError:
raise ValidationError(
'Repeated values for %s '
'may not be None' % self.__class__.__name__)
else:
raise ValidationError(
'Repeated values for field %s '
'may not be None' % name)
result.append(validate_element(element))
return result
elif value is not None:
try:
name = self.name
except AttributeError:
raise ValidationError('%s is repeated. Found: %s' % (
self.__class__.__name__, value))
else:
raise ValidationError(
'Field %s is repeated. Found: %s' % (name, value))
return value | def __validate(self, value, validate_element) | Internal validation function.
Validate an internal value using a function to validate
individual elements.
Args:
value: Value to validate.
validate_element: Function to use to validate individual elements.
Raises:
ValidationError if value is not expected type. | 2.659888 | 2.680938 | 0.992148 |
# If value is str is it considered valid. Satisfies "required=True".
if isinstance(value, bytes):
try:
six.text_type(value, 'UTF-8')
except UnicodeDecodeError as err:
try:
_ = self.name
except AttributeError:
validation_error = ValidationError(
'Field encountered non-UTF-8 string %r: %s' % (value,
err))
else:
validation_error = ValidationError(
'Field %s encountered non-UTF-8 string %r: %s' % (
self.name, value, err))
validation_error.field_name = self.name
raise validation_error
else:
return super(StringField, self).validate_element(value)
return value | def validate_element(self, value) | Validate StringField allowing for str and unicode.
Raises:
ValidationError if a str value is not UTF-8. | 3.588481 | 3.35872 | 1.068407 |
if self.__type is None:
message_type = find_definition(
self.__type_name, self.message_definition())
if not (message_type is not Message and
isinstance(message_type, type) and
issubclass(message_type, Message)):
raise FieldDefinitionError(
'Invalid message class: %s' % message_type)
self.__type = message_type
return self.__type | def type(self) | Message type used for field. | 4.533702 | 3.786701 | 1.19727 |
if not isinstance(message, self.message_type):
raise DecodeError('Expected type %s, got %s: %r' %
(self.message_type.__name__,
type(message).__name__,
message))
return message | def value_from_message(self, message) | Convert a message to a value instance.
Used by deserializers to convert from underlying messages to
value of expected user type.
Args:
message: A message instance of type self.message_type.
Returns:
Value of self.message_type. | 2.931603 | 3.157708 | 0.928396 |
if not isinstance(value, self.type):
raise EncodeError('Expected type %s, got %s: %r' %
(self.type.__name__,
type(value).__name__,
value))
return value | def value_to_message(self, value) | Convert a value instance to a message.
Used by serializers to convert Python user types to underlying
messages for transmission.
Args:
value: A value of type self.type.
Returns:
An instance of type self.message_type. | 3.263709 | 3.4271 | 0.952324 |
if isinstance(value, (six.string_types, six.integer_types)):
# Validation of the value does not happen for delayed resolution
# enumerated types. Ignore if type is not yet resolved.
if self.__type:
self.__type(value)
return value
return super(EnumField, self).validate_default_element(value) | def validate_default_element(self, value) | Validate default element of Enum field.
Enum fields allow for delayed resolution of default values
when the type of the field has not been resolved. The default
value of a field may be a string or an integer. If the Enum
type of the field has been resolved, the default value is
validated against that type.
Args:
value: Value to validate.
Raises:
ValidationError if value is not expected message type. | 7.382207 | 6.515931 | 1.132947 |
if self.__type is None:
found_type = find_definition(
self.__type_name, self.message_definition())
if not (found_type is not Enum and
isinstance(found_type, type) and
issubclass(found_type, Enum)):
raise FieldDefinitionError(
'Invalid enum type: %s' % found_type)
self.__type = found_type
return self.__type | def type(self) | Enum type used for field. | 4.830503 | 4.036363 | 1.196746 |
try:
return self.__resolved_default
except AttributeError:
resolved_default = super(EnumField, self).default
if isinstance(resolved_default, (six.string_types,
six.integer_types)):
# pylint:disable=not-callable
resolved_default = self.type(resolved_default)
self.__resolved_default = resolved_default
return self.__resolved_default | def default(self) | Default for enum field.
Will cause resolution of Enum type and unresolved default value. | 3.186889 | 2.849088 | 1.118565 |
def Register(cls):
_CUSTOM_MESSAGE_CODECS[cls] = _Codec(encoder=encoder, decoder=decoder)
return cls
return Register | def RegisterCustomMessageCodec(encoder, decoder) | Register a custom encoder/decoder for this message class. | 4.372041 | 4.243657 | 1.030253 |
def Register(field):
_CUSTOM_FIELD_CODECS[field] = _Codec(encoder=encoder, decoder=decoder)
return field
return Register | def RegisterCustomFieldCodec(encoder, decoder) | Register a custom encoder/decoder for this field. | 4.818911 | 4.782876 | 1.007534 |
def Register(field_type):
_FIELD_TYPE_CODECS[field_type] = _Codec(
encoder=encoder, decoder=decoder)
return field_type
return Register | def RegisterFieldTypeCodec(encoder, decoder) | Register a custom encoder/decoder for all fields of this type. | 3.758915 | 4.073177 | 0.922846 |
result = _ProtoJsonApiTools.Get().encode_message(message)
return _IncludeFields(result, message, include_fields) | def MessageToJson(message, include_fields=None) | Convert the given message to JSON. | 20.840071 | 23.076933 | 0.903069 |
items = properties.items()
if sort_items:
items = sorted(items)
map_ = []
for key, value in items:
map_.append(additional_property_type.AdditionalProperty(
key=key, value=value))
return additional_property_type(additionalProperties=map_) | def DictToAdditionalPropertyMessage(properties, additional_property_type,
sort_items=False) | Convert the given dictionary to an AdditionalProperty message. | 2.292591 | 2.317397 | 0.989296 |
# TODO(jasmuth): craigcitro suggests a pretty-printer from apitools/gen.
indent = kwargs.get('indent', 0)
def IndentKwargs(kwargs):
kwargs = dict(kwargs)
kwargs['indent'] = kwargs.get('indent', 0) + 4
return kwargs
if isinstance(msg, list):
s = '['
for item in msg:
if multiline:
s += '\n' + ' ' * (indent + 4)
s += MessageToRepr(
item, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n' + ' ' * indent
s += ']'
return s
if isinstance(msg, messages.Message):
s = type(msg).__name__ + '('
if not kwargs.get('no_modules'):
s = msg.__module__ + '.' + s
names = sorted([field.name for field in msg.all_fields()])
for name in names:
field = msg.field_by_name(name)
if multiline:
s += '\n' + ' ' * (indent + 4)
value = getattr(msg, field.name)
s += field.name + '=' + MessageToRepr(
value, multiline=multiline, **IndentKwargs(kwargs)) + ','
if multiline:
s += '\n' + ' ' * indent
s += ')'
return s
if isinstance(msg, six.string_types):
if kwargs.get('shortstrings') and len(msg) > 100:
msg = msg[:100]
if isinstance(msg, datetime.datetime):
class SpecialTZInfo(datetime.tzinfo):
def __init__(self, offset):
super(SpecialTZInfo, self).__init__()
self.offset = offset
def __repr__(self):
s = 'TimeZoneOffset(' + repr(self.offset) + ')'
if not kwargs.get('no_modules'):
s = 'apitools.base.protorpclite.util.' + s
return s
msg = datetime.datetime(
msg.year, msg.month, msg.day, msg.hour, msg.minute, msg.second,
msg.microsecond, SpecialTZInfo(msg.tzinfo.utcoffset(0)))
return repr(msg) | def MessageToRepr(msg, multiline=False, **kwargs) | Return a repr-style string for a protorpc message.
protorpc.Message.__repr__ does not return anything that could be considered
python code. Adding this function lets us print a protorpc message in such
a way that it could be pasted into code later, and used to compare against
other things.
Args:
msg: protorpc.Message, the message to be repr'd.
multiline: bool, True if the returned string should have each field
assignment on its own line.
**kwargs: {str:str}, Additional flags for how to format the string.
Known **kwargs:
shortstrings: bool, True if all string values should be
truncated at 100 characters, since when mocking the contents
typically don't matter except for IDs, and IDs are usually
less than 100 characters.
no_modules: bool, True if the long module name should not be printed with
each type.
Returns:
str, A string of valid python (assuming the right imports have been made)
that recreates the message passed into this function. | 2.488763 | 2.386808 | 1.042716 |
if include_fields is None:
return encoded_message
result = json.loads(encoded_message)
for field_name in include_fields:
try:
value = _GetField(message, field_name.split('.'))
nullvalue = None
if isinstance(value, list):
nullvalue = []
except KeyError:
raise exceptions.InvalidDataError(
'No field named %s in message of type %s' % (
field_name, type(message)))
_SetField(result, field_name.split('.'), nullvalue)
return json.dumps(result) | def _IncludeFields(encoded_message, message, include_fields) | Add the requested fields to the encoded message. | 2.797394 | 2.847429 | 0.982428 |
destination = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if destination is None:
return message
pair_field = message.field_by_name(destination)
if not isinstance(pair_field, messages.MessageField):
raise exceptions.InvalidDataFromServerError(
'Unrecognized fields must be mapped to a compound '
'message type.')
pair_type = pair_field.message_type
# TODO(craigcitro): Add more error checking around the pair
# type being exactly what we suspect (field names, etc).
if isinstance(pair_type.value, messages.MessageField):
new_values = _DecodeUnknownMessages(
message, json.loads(encoded_message), pair_type)
else:
new_values = _DecodeUnrecognizedFields(message, pair_type)
setattr(message, destination, new_values)
# We could probably get away with not setting this, but
# why not clear it?
setattr(message, '_Message__unrecognized_fields', {})
return message | def _DecodeUnknownFields(message, encoded_message) | Rewrite unknown fields in message into message.destination. | 5.643299 | 5.551672 | 1.016504 |
field_type = pair_type.value.type
new_values = []
all_field_names = [x.name for x in message.all_fields()]
for name, value_dict in six.iteritems(encoded_message):
if name in all_field_names:
continue
value = PyValueToMessage(field_type, value_dict)
if pair_type.value.repeated:
value = _AsMessageList(value)
new_pair = pair_type(key=name, value=value)
new_values.append(new_pair)
return new_values | def _DecodeUnknownMessages(message, encoded_message, pair_type) | Process unknown fields in encoded_message of a message type. | 3.393642 | 3.335556 | 1.017414 |
new_values = []
codec = _ProtoJsonApiTools.Get()
for unknown_field in message.all_unrecognized_fields():
# TODO(craigcitro): Consider validating the variant if
# the assignment below doesn't take care of it. It may
# also be necessary to check it in the case that the
# type has multiple encodings.
value, _ = message.get_unrecognized_field_info(unknown_field)
value_type = pair_type.field_by_name('value')
if isinstance(value_type, messages.MessageField):
decoded_value = DictToMessage(value, pair_type.value.message_type)
else:
decoded_value = codec.decode_field(
pair_type.value, value)
try:
new_pair_key = str(unknown_field)
except UnicodeEncodeError:
new_pair_key = protojson.ProtoJson().decode_field(
pair_type.key, unknown_field)
new_pair = pair_type(key=new_pair_key, value=decoded_value)
new_values.append(new_pair)
return new_values | def _DecodeUnrecognizedFields(message, pair_type) | Process unrecognized fields in message. | 4.696662 | 4.725658 | 0.993864 |
source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if source is None:
return message
# CopyProtoMessage uses _ProtoJsonApiTools, which uses this message. Use
# the vanilla protojson-based copy function to avoid infinite recursion.
result = _CopyProtoMessageVanillaProtoJson(message)
pairs_field = message.field_by_name(source)
if not isinstance(pairs_field, messages.MessageField):
raise exceptions.InvalidUserInputError(
'Invalid pairs field %s' % pairs_field)
pairs_type = pairs_field.message_type
value_field = pairs_type.field_by_name('value')
value_variant = value_field.variant
pairs = getattr(message, source)
codec = _ProtoJsonApiTools.Get()
for pair in pairs:
encoded_value = codec.encode_field(value_field, pair.value)
result.set_unrecognized_field(pair.key, encoded_value, value_variant)
setattr(result, source, [])
return result | def _EncodeUnknownFields(message) | Remap unknown fields in message out of message.source. | 5.25918 | 5.195466 | 1.012263 |
try:
if field.repeated:
result = [base64.urlsafe_b64encode(byte) for byte in value]
else:
result = base64.urlsafe_b64encode(value)
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete) | def _SafeEncodeBytes(field, value) | Encode the bytes in value as urlsafe base64. | 2.703897 | 2.480128 | 1.090225 |
try:
result = base64.urlsafe_b64decode(str(value))
complete = True
except TypeError:
result = value
complete = False
return CodecResult(value=result, complete=complete) | def _SafeDecodeBytes(unused_field, value) | Decode the urlsafe base64 value into bytes. | 3.488965 | 3.204275 | 1.088847 |
if not encoded_message:
return message
decoded_message = json.loads(six.ensure_str(encoded_message))
for field in message.all_fields():
if (isinstance(field, messages.EnumField) and
field.name in decoded_message and
message.get_assigned_value(field.name) is None):
message.set_unrecognized_field(
field.name, decoded_message[field.name], messages.Variant.ENUM)
return message | def _ProcessUnknownEnums(message, encoded_message) | Add unknown enum values from encoded_message as unknown fields.
ProtoRPC diverges from the usual protocol buffer behavior here and
doesn't allow unknown fields. Throwing on unknown fields makes it
impossible to let servers add new enum values and stay compatible
with older clients, which isn't reasonable for us. We simply store
unrecognized enum values as unknown fields, and all is well.
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any unknown enums stored as unrecognized fields. | 3.169204 | 3.103385 | 1.021209 |
if not encoded_message:
return message
decoded_message = json.loads(six.ensure_str(encoded_message))
message_fields = [x.name for x in message.all_fields()] + list(
message.all_unrecognized_fields())
missing_fields = [x for x in decoded_message.keys()
if x not in message_fields]
for field_name in missing_fields:
message.set_unrecognized_field(field_name, decoded_message[field_name],
messages.Variant.STRING)
return message | def _ProcessUnknownMessages(message, encoded_message) | Store any remaining unknown fields as strings.
ProtoRPC currently ignores unknown values for which no type can be
determined (and logs a "No variant found" message). For the purposes
of reserializing, this is quite harmful (since it throws away
information). Here we simply add those as unknown fields of type
string (so that they can easily be reserialized).
Args:
message: Proto message we've decoded thus far.
encoded_message: JSON string we're decoding.
Returns:
message, with any remaining unrecognized fields saved. | 2.99649 | 2.809792 | 1.066445 |
raise exceptions.TypecheckError(
'Cannot set JSON enum mapping for non-enum "%s"' % enum_type)
if python_name not in enum_type.names():
raise exceptions.InvalidDataError(
'Enum value %s not a value for type %s' % (python_name, enum_type))
field_mappings = _JSON_ENUM_MAPPINGS.setdefault(enum_type, {})
_CheckForExistingMappings('enum', enum_type, python_name, json_name)
field_mappings[python_name] = json_name | def AddCustomJsonEnumMapping(enum_type, python_name, json_name,
package=None): # pylint: disable=unused-argument
if not issubclass(enum_type, messages.Enum) | Add a custom wire encoding for a given enum value.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
enum_type: (messages.Enum) An enum type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
package: (NoneType, optional) No effect, exists for legacy compatibility. | 4.362165 | 4.712683 | 0.925622 |
raise exceptions.TypecheckError(
'Cannot set JSON field mapping for '
'non-message "%s"' % message_type)
try:
_ = message_type.field_by_name(python_name)
except KeyError:
raise exceptions.InvalidDataError(
'Field %s not recognized for type %s' % (
python_name, message_type))
field_mappings = _JSON_FIELD_MAPPINGS.setdefault(message_type, {})
_CheckForExistingMappings('field', message_type, python_name, json_name)
field_mappings[python_name] = json_name | def AddCustomJsonFieldMapping(message_type, python_name, json_name,
package=None): # pylint: disable=unused-argument
if not issubclass(message_type, messages.Message) | Add a custom wire encoding for a given message field.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
message_type: (messages.Message) A message type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
package: (NoneType, optional) No effect, exists for legacy compatibility. | 3.773499 | 4.182185 | 0.902279 |
return _FetchRemapping(enum_type, 'enum',
python_name=python_name, json_name=json_name,
mappings=_JSON_ENUM_MAPPINGS) | def GetCustomJsonEnumMapping(enum_type, python_name=None, json_name=None) | Return the appropriate remapping for the given enum, or None. | 6.304752 | 5.019571 | 1.256034 |
return _FetchRemapping(message_type, 'field',
python_name=python_name, json_name=json_name,
mappings=_JSON_FIELD_MAPPINGS) | def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None) | Return the appropriate remapping for the given field, or None. | 6.313835 | 4.924222 | 1.2822 |
if python_name and json_name:
raise exceptions.InvalidDataError(
'Cannot specify both python_name and json_name '
'for %s remapping' % mapping_type)
if not (python_name or json_name):
raise exceptions.InvalidDataError(
'Must specify either python_name or json_name for %s remapping' % (
mapping_type,))
field_remappings = mappings.get(type_name, {})
if field_remappings:
if python_name:
return field_remappings.get(python_name)
elif json_name:
if json_name in list(field_remappings.values()):
return [k for k in field_remappings
if field_remappings[k] == json_name][0]
return None | def _FetchRemapping(type_name, mapping_type, python_name=None, json_name=None,
mappings=None) | Common code for fetching a key or value from a remapping dict. | 2.187247 | 2.118421 | 1.032489 |
if mapping_type == 'field':
getter = GetCustomJsonFieldMapping
elif mapping_type == 'enum':
getter = GetCustomJsonEnumMapping
remapping = getter(message_type, python_name=python_name)
if remapping is not None and remapping != json_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, python_name, remapping))
remapping = getter(message_type, json_name=json_name)
if remapping is not None and remapping != python_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, json_name, remapping)) | def _CheckForExistingMappings(mapping_type, message_type,
python_name, json_name) | Validate that no mappings exist for the given values. | 2.213546 | 2.194616 | 1.008626 |
# This really needs to live in extra_types, but extra_types needs
# to import this file to be able to register codecs.
# TODO(craigcitro): Split out a codecs module and fix this ugly
# import.
from apitools.base.py import extra_types
def _IsRepeatedJsonValue(msg):
if isinstance(msg, extra_types.JsonArray):
return True
if isinstance(msg, extra_types.JsonValue) and msg.array_value:
return True
return False
if not _IsRepeatedJsonValue(msg):
raise ValueError('invalid argument to _AsMessageList')
if isinstance(msg, extra_types.JsonValue):
msg = msg.array_value
if isinstance(msg, extra_types.JsonArray):
msg = msg.entries
return msg | def _AsMessageList(msg) | Convert the provided list-as-JsonValue to a list. | 5.194544 | 4.979972 | 1.043087 |
value = message.get_assigned_value(field.name)
if not isinstance(value, messages.Message):
return False
try:
additional_properties = value.field_by_name('additionalProperties')
except KeyError:
return False
else:
return additional_properties.repeated | def _IsMap(message, field) | Returns whether the "field" is actually a map-type. | 3.658957 | 3.408329 | 1.073534 |
assert _IsMap(message, field)
map_message = message.get_assigned_value(field.name)
additional_properties = map_message.get_assigned_value(
'additionalProperties')
for kv_pair in additional_properties:
yield kv_pair.key, kv_pair.value | def _MapItems(message, field) | Yields the (key, value) pair of the map values. | 4.585144 | 4.082939 | 1.123001 |
# This is a primitive leaf, no errors found down this path.
return
field_names = message.all_unrecognized_fields()
if field_names:
# This message is malformed. Stop recursing and report it.
yield _edges, field_names
return
# Recurse through all fields in the current message.
for field in message.all_fields():
value = message.get_assigned_value(field.name)
if field.repeated:
for i, item in enumerate(value):
repeated_edge = ProtoEdge(EdgeType.REPEATED, field.name, i)
iter_ = UnrecognizedFieldIter(item, _edges + (repeated_edge,))
for (e, y) in iter_:
yield e, y
elif _IsMap(message, field):
for key, item in _MapItems(message, field):
map_edge = ProtoEdge(EdgeType.MAP, field.name, key)
iter_ = UnrecognizedFieldIter(item, _edges + (map_edge,))
for (e, y) in iter_:
yield e, y
else:
scalar_edge = ProtoEdge(EdgeType.SCALAR, field.name, None)
iter_ = UnrecognizedFieldIter(value, _edges + (scalar_edge,))
for (e, y) in iter_:
yield e, y | def UnrecognizedFieldIter(message, _edges=()): # pylint: disable=invalid-name
if not isinstance(message, messages.Message) | Yields the locations of unrecognized fields within "message".
If a sub-message is found to have unrecognized fields, that sub-message
will not be searched any further. We prune the search of the sub-message
because we assume it is malformed and further checks will not yield
productive errors.
Args:
message: The Message instance to search.
_edges: Internal arg for passing state.
Yields:
(edges_to_message, field_names):
edges_to_message: List[ProtoEdge], The edges (relative to "message")
describing the path to the sub-message where the unrecognized
fields were found.
field_names: List[Str], The names of the field(s) that were
unrecognized in the sub-message. | 2.856856 | 2.845767 | 1.003897 |
for decoder in _GetFieldCodecs(field, 'decoder'):
result = decoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.MessageField):
field_value = self.decode_message(
field.message_type, json.dumps(value))
elif isinstance(field, messages.EnumField):
value = GetCustomJsonEnumMapping(
field.type, json_name=value) or value
try:
field_value = super(
_ProtoJsonApiTools, self).decode_field(field, value)
except messages.DecodeError:
if not isinstance(value, six.string_types):
raise
field_value = None
else:
field_value = super(
_ProtoJsonApiTools, self).decode_field(field, value)
return field_value | def decode_field(self, field, value) | Decode the given JSON value.
Args:
field: a messages.Field for the field we're decoding.
value: a python value we'd like to decode.
Returns:
A value suitable for assignment to field. | 3.973898 | 4.006192 | 0.991939 |
for encoder in _GetFieldCodecs(field, 'encoder'):
result = encoder(field, value)
value = result.value
if result.complete:
return value
if isinstance(field, messages.EnumField):
if field.repeated:
remapped_value = [GetCustomJsonEnumMapping(
field.type, python_name=e.name) or e.name for e in value]
else:
remapped_value = GetCustomJsonEnumMapping(
field.type, python_name=value.name)
if remapped_value:
return remapped_value
if (isinstance(field, messages.MessageField) and
not isinstance(field, message_types.DateTimeField)):
value = json.loads(self.encode_message(value))
return super(_ProtoJsonApiTools, self).encode_field(field, value) | def encode_field(self, field, value) | Encode the given value as JSON.
Args:
field: a messages.Field for the field we're encoding.
value: a value for field.
Returns:
A python value suitable for json.dumps. | 4.41792 | 4.333578 | 1.019462 |
# Use slices instead of an iterator when we have a flat list
if isinstance(iterator, list):
length = len(iterator)
for i in range(int(math.ceil(old_div(float(length), n)))):
yield iterator[i * n: (i + 1) * n]
else:
accumulator = []
for item in iterator:
accumulator.append(item)
if len(accumulator) == n:
yield accumulator
accumulator = []
# Yield what's left
if len(accumulator) != 0:
yield accumulator | def group_iter(iterator, n=2) | Given an iterator, it returns sub-lists made of n items.
(except the last that can have len < n) | 3.068973 | 3.084472 | 0.994975 |
class memodict(dict):
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
ret = self[key] = f(*key)
return ret
return memodict().__getitem__ | def memoize(f) | Memoization decorator for a function taking one or more arguments. | 1.954061 | 1.915194 | 1.020294 |
class memodict(dict):
def __missing__(self, key):
ret = self[key] = f(key)
return ret
return memodict().__getitem__ | def memoize_single_argument(f) | Memoization decorator for a function taking a single argument | 2.290882 | 2.344671 | 0.977059 |
return getattr(
importlib.import_module(
re.sub(
r"\.[^.]+$",
"",
taskpath)),
re.sub(
r"^.*\.",
"",
taskpath)) | def load_class_by_path(taskpath) | Given a taskpath, returns the main task class. | 3.730915 | 3.346429 | 1.114895 |
import socket
import errno
s = socket.socket()
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
s.connect((server, port))
except socket.timeout as err:
# this exception occurs only if timeout is set
if timeout:
return False
except Exception as err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
if not isinstance(err.args, tuple) or err.args[0] != errno.ETIMEDOUT:
pass # raise
else:
s.close()
return True
time.sleep(poll_interval) | def wait_for_net_service(server, port, timeout=None, poll_interval=0.1) | Wait for network service to appear
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception | 3.663835 | 3.646083 | 1.004869 |
from .queue import Queue
queue_obj = Queue(queue)
queue_obj.enqueue_raw_jobs(params_list, **kwargs) | def queue_raw_jobs(queue, params_list, **kwargs) | Queue some jobs on a raw queue | 3.969004 | 4.142934 | 0.958018 |
if len(params_list) == 0:
return []
if queue is None:
task_def = context.get_current_config().get("tasks", {}).get(main_task_path) or {}
queue = task_def.get("queue", "default")
from .queue import Queue
queue_obj = Queue(queue)
if queue_obj.is_raw:
raise Exception("Can't queue regular jobs on a raw queue")
all_ids = []
for params_group in group_iter(params_list, n=batch_size):
context.metric("jobs.status.queued", len(params_group))
# Insert the job in MongoDB
job_ids = Job.insert([{
"path": main_task_path,
"params": params,
"queue": queue,
"datequeued": datetime.datetime.utcnow(),
"status": "queued"
} for params in params_group], w=1, return_jobs=False)
all_ids += job_ids
queue_obj.notify(len(all_ids))
set_queues_size({queue: len(all_ids)})
return all_ids | def queue_jobs(main_task_path, params_list, queue=None, batch_size=1000) | Queue multiple jobs on a regular queue | 3.869026 | 3.807317 | 1.016208 |
if self.id is None:
return self
if full_data is True:
fields = None
elif isinstance(full_data, dict):
fields = full_data
else:
fields = {
"_id": 0,
"path": 1,
"params": 1,
"status": 1,
"retry_count": 1,
}
if start:
self.datestarted = datetime.datetime.utcnow()
self.set_data(self.collection.find_and_modify(
{
"_id": self.id,
"status": {"$nin": ["cancel", "abort", "maxretries"]}
},
{"$set": {
"status": "started",
"datestarted": self.datestarted,
"worker": self.worker.id
},
"$unset": {
"dateexpires": 1 # we don't want started jobs to expire unexpectedly
}},
projection=fields)
)
context.metric("jobs.status.started")
else:
self.set_data(self.collection.find_one({
"_id": self.id
}, projection=fields))
if self.data is None:
context.log.info(
"Job %s not found in MongoDB or status was cancelled!" %
self.id)
self.stored = True
return self | def fetch(self, start=False, full_data=True) | Get the current job data and possibly flag it as started. | 3.722093 | 3.588688 | 1.037174 |
if not self.saved and self.data and "progress" in self.data:
# TODO should we save more fields?
self.collection.update({"_id": self.id}, {"$set": {
"progress": self.data["progress"]
}})
self.saved = True | def save(self) | Persists the current job metadata to MongoDB. Will be called at each worker report. | 5.066667 | 3.976207 | 1.274246 |
now = datetime.datetime.utcnow()
for data in jobs_data:
if data["status"] == "started":
data["datestarted"] = now
no_storage = (statuses_no_storage is not None) and ("started" in statuses_no_storage)
if no_storage and return_jobs:
for data in jobs_data:
data["_id"] = ObjectId() # Give the job a temporary ID
else:
inserted = context.connections.mongodb_jobs.mrq_jobs.insert(
jobs_data,
manipulate=True,
w=w,
j=j
)
if return_jobs:
jobs = []
for data in jobs_data:
job = cls(data["_id"], queue=queue)
job.set_data(data)
job.statuses_no_storage = statuses_no_storage
job.stored = (not no_storage)
if data["status"] == "started":
job.datestarted = data["datestarted"]
jobs.append(job)
return jobs
else:
return inserted | def insert(cls, jobs_data, queue=None, statuses_no_storage=None, return_jobs=True, w=None, j=None) | Insert a job into MongoDB | 2.79873 | 2.778944 | 1.00712 |
original_exception = sys.exc_info()
if original_exception[0] is not None:
exc.original_exception = original_exception | def _attach_original_exception(self, exc) | Often, a retry will be raised inside an "except" block.
This Keep track of the first exception for debugging purposes | 3.72169 | 3.794581 | 0.980791 |
max_retries = max_retries
if max_retries is None:
max_retries = self.max_retries
if self.data.get("retry_count", 0) >= max_retries:
raise MaxRetriesInterrupt()
exc = RetryInterrupt()
exc.queue = queue or self.queue or self.data.get("queue") or "default"
exc.retry_count = self.data.get("retry_count", 0) + 1
exc.delay = delay
if exc.delay is None:
exc.delay = self.retry_delay
self._attach_original_exception(exc)
raise exc | def retry(self, queue=None, delay=None, max_retries=None) | Marks the current job as needing to be retried. Interrupts it. | 3.058318 | 2.929304 | 1.044042 |
if not queue:
if not self.data or not self.data.get("queue"):
self.fetch(full_data={"_id": 0, "queue": 1, "path": 1})
queue = self.data["queue"]
self._save_status("queued", updates={
"queue": queue,
"datequeued": datetime.datetime.utcnow(),
"retry_count": retry_count
}) | def requeue(self, queue=None, retry_count=0) | Requeues the current job. Doesn't interrupt it | 4.988682 | 5.033731 | 0.991051 |
if self.data is None:
return
context.log.debug("Starting %s(%s)" % (self.data["path"], self.data["params"]))
task_class = load_class_by_path(self.data["path"])
self.task = task_class()
self.task.is_main_task = True
if not self.task.max_concurrency:
result = self.task.run_wrapped(self.data["params"])
else:
if self.task.max_concurrency > 1:
raise NotImplementedError()
lock = None
try:
# TODO: implement a semaphore
lock = context.connections.redis.lock(self.redis_max_concurrency_key, timeout=self.timeout + 5)
if not lock.acquire(blocking=True, blocking_timeout=0):
raise MaxConcurrencyInterrupt()
result = self.task.run_wrapped(self.data["params"])
finally:
try:
if lock:
lock.release()
except LockError:
pass
self.save_success(result)
if context.get_current_config().get("trace_greenlets"):
# TODO: this is not the exact greenlet_time measurement because it doesn't
# take into account the last switch's time. This is why we force a last switch.
# This does cause a performance overhead. Instead, we should print the
# last timing directly from the trace() function in context?
# pylint: disable=protected-access
gevent.sleep(0)
current_greenlet = gevent.getcurrent()
t = (datetime.datetime.utcnow() - self.datestarted).total_seconds()
context.log.debug(
"Job %s success: %0.6fs total, %0.6fs in greenlet, %s switches" %
(self.id,
t,
current_greenlet._trace_time,
current_greenlet._trace_switches - 1)
)
else:
context.log.debug("Job %s success: %0.6fs total" % (
self.id, (datetime.datetime.utcnow() -
self.datestarted).total_seconds()
))
return result | def perform(self) | Loads and starts the main task for this job, the saves the result. | 4.75246 | 4.532958 | 1.048424 |
end_time = None
if timeout:
end_time = time.time() + timeout
while end_time is None or time.time() < end_time:
job_data = self.collection.find_one({
"_id": ObjectId(self.id),
"status": {"$nin": ["started", "queued"]}
}, projection=({
"_id": 0,
"result": 1,
"status": 1
} if not full_data else None))
if job_data:
return job_data
time.sleep(poll_interval)
raise Exception("Waited for job result for %s seconds, timeout." % timeout) | def wait(self, poll_interval=1, timeout=None, full_data=False) | Wait for this job to finish. | 2.846248 | 2.731022 | 1.042191 |
current_greenletid = id(gevent.getcurrent())
trace = "Job killed: %s" % reason
for greenlet, job in context._GLOBAL_CONTEXT["greenlets"].values():
greenletid = id(greenlet)
if job and job.id == self.id and greenletid != current_greenletid:
greenlet.kill(block=block)
trace += "\n\n--- Greenlet %s ---\n" % greenletid
trace += "".join(traceback.format_stack(greenlet.gr_frame))
context._GLOBAL_CONTEXT["greenlets"].pop(greenletid, None)
if reason == "timeout" and self.data["status"] != "timeout":
updates = {
"exceptiontype": "TimeoutInterrupt",
"traceback": trace
}
self._save_status("timeout", updates=updates, exception=False) | def kill(self, block=False, reason="unknown") | Forcefully kill all greenlets associated with this job | 5.074883 | 4.565139 | 1.11166 |
failure_date = datetime.datetime.utcnow()
new_history = {
"date": failure_date,
"status": status,
"exceptiontype": job_exc.__name__
}
traces = trace.split("---- Original exception: -----")
if len(traces) > 1:
new_history["original_traceback"] = traces[1]
worker = context.get_current_worker()
if worker:
new_history["worker"] = worker.id
new_history["traceback"] = traces[0]
self.collection.update({
"_id": self.id
}, {"$push": {"traceback_history": new_history}}) | def _save_traceback_history(self, status, trace, job_exc) | Create traceback history or add a new traceback to history. | 3.804832 | 3.680728 | 1.033717 |
urllib.parse.clear_cache()
re.purge()
linecache.clearcache()
copyreg.clear_extension_cache()
if hasattr(fnmatch, "purge"):
fnmatch.purge() # pylint: disable=no-member
elif hasattr(fnmatch, "_purge"):
fnmatch._purge() # pylint: disable=no-member
if hasattr(encodings, "_cache") and len(encodings._cache) > 0:
encodings._cache = {}
for handler in context.log.handlers:
handler.flush() | def trace_memory_clean_caches(self) | Avoid polluting results with some builtin python caches | 4.324944 | 4.075519 | 1.061201 |
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
gc.collect()
self._memory_start = self.worker.get_memory()["total"] | def trace_memory_start(self) | Starts measuring memory consumption | 10.825061 | 9.842453 | 1.099834 |
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
trace_type = context.get_current_config()["trace_memory_type"]
if trace_type:
filename = '%s/%s-%s.png' % (
context.get_current_config()["trace_memory_output_dir"],
trace_type,
self.id)
chain = objgraph.find_backref_chain(
random.choice(
objgraph.by_type(trace_type)
),
objgraph.is_proper_module
)
objgraph.show_chain(chain, filename=filename)
del filename
del chain
gc.collect()
self._memory_stop = self.worker.get_memory()["total"]
diff = self._memory_stop - self._memory_start
context.log.debug("Memory diff for job %s : %s" % (self.id, diff))
# We need to update it later than the results, we need them off memory
# already.
self.collection.update(
{"_id": self.id},
{"$set": {
"memory_diff": diff
}},
w=1
) | def trace_memory_stop(self) | Stops measuring memory consumption | 5.015399 | 4.990148 | 1.00506 |
all_queues_from_mongodb = Queue.all_known(sources=("jobs", ))
idprefix = self.id
if not idprefix.endswith("/"):
idprefix += "/"
return {q for q in all_queues_from_mongodb if q.startswith(idprefix)} | def get_known_subqueues(self) | Returns all known subqueues | 10.160137 | 9.6026 | 1.058061 |
if self.id.endswith("/"):
subqueues = self.get_known_subqueues()
if len(subqueues) == 0:
return 0
else:
with context.connections.redis.pipeline(transaction=False) as pipe:
for subqueue in subqueues:
pipe.get("queuesize:%s" % subqueue)
return [int(size or 0) for size in pipe.execute()]
else:
return int(context.connections.redis.get("queuesize:%s" % self.id) or 0) | def size(self) | Returns the total number of queued jobs on the queue | 3.631152 | 3.250754 | 1.117018 |
return [str(x["_id"]) for x in self.collection.find(
{"status": "queued"},
sort=[("_id", -1 if self.is_reverse else 1)],
projection={"_id": 1})
] | def list_job_ids(self, skip=0, limit=20) | Returns a list of job ids on a queue | 4.241097 | 4.134817 | 1.025704 |
if job_class is None:
from .job import Job
job_class = Job
count = 0
# TODO: remove _id sort after full migration to datequeued
sort_order = [("datequeued", -1 if self.is_reverse else 1), ("_id", -1 if self.is_reverse else 1)]
# MongoDB optimization: with many jobs it's faster to fetch the IDs first and do the atomic update second
# Some jobs may have been stolen by another worker in the meantime but it's a balance (should we over-fetch?)
# job_ids = None
# if max_jobs > 5:
# job_ids = [x["_id"] for x in self.collection.find(
# self.base_dequeue_query,
# limit=max_jobs,
# sort=sort_order,
# projection={"_id": 1}
# )]
# if len(job_ids) == 0:
# return
for i in range(max_jobs): # if job_ids is None else len(job_ids)):
# if job_ids is not None:
# query = {
# "status": "queued",
# "_id": job_ids[i]
# }
# sort_order = None
# else:
query = self.base_dequeue_query
job_data = self.collection.find_one_and_update(
query,
{"$set": {
"status": "started",
"datestarted": datetime.datetime.utcnow(),
"worker": worker.id if worker else None
}, "$unset": {
"dateexpires": 1 # we don't want started jobs to expire unexpectedly
}},
sort=sort_order,
return_document=ReturnDocument.AFTER,
projection={
"_id": 1,
"path": 1,
"params": 1,
"status": 1,
"retry_count": 1,
"queue": 1,
"datequeued": 1
}
)
if not job_data:
break
if worker:
worker.status = "spawn"
count += 1
context.metric("queues.%s.dequeued" % job_data["queue"], 1)
job = job_class(job_data["_id"], queue=self.id, start=False)
job.set_data(job_data)
job.datestarted = datetime.datetime.utcnow()
context.metric("jobs.status.started")
yield job
context.metric("queues.all.dequeued", count) | def dequeue_jobs(self, max_jobs=1, job_class=None, worker=None) | Fetch a maximum of max_jobs from this queue | 3.652496 | 3.630853 | 1.005961 |
return Response(
json.dumps(
dict(
*args,
**kwargs),
cls=MongoJSONEncoder),
mimetype='application/json') | def jsonify(*args, **kwargs) | jsonify with support for MongoDB ObjectId | 4.032166 | 3.343535 | 1.205959 |
cfg = get_current_config()
return username == cfg["dashboard_httpauth"].split(
":")[0] and pwd == cfg["dashboard_httpauth"].split(":")[1] | def check_auth(username, pwd) | This function is called to check if a username /
password combination is valid. | 6.791862 | 6.146551 | 1.104987 |
while True:
try:
self.manage()
except Exception as e: # pylint: disable=broad-except
log.error("When reporting: %s" % e)
finally:
time.sleep(self.config["report_interval"]) | def greenlet_manage(self) | This greenlet always runs in background to update current status
in MongoDB every N seconds. | 4.624247 | 4.148387 | 1.114709 |
start_time = time.time()
log.debug("Starting queue stats...")
# Fetch all known queues
queues = [Queue(q) for q in Queue.all_known()]
new_queues = {queue.id for queue in queues}
old_queues = set(self.queue_etas.keys())
for deleted_queue in old_queues.difference(new_queues):
self.queue_etas.pop(deleted_queue)
t = time.time()
stats = {}
for queue in queues:
cnt = queue.count_jobs_to_dequeue()
eta = self.queue_etas[queue.id].next(cnt, t=t)
# Number of jobs to dequeue, ETA, Time of stats
stats[queue.id] = "%d %s %d" % (cnt, eta if eta is not None else "N", int(t))
with connections.redis.pipeline(transaction=True) as pipe:
if random.randint(0, 100) == 0 or len(stats) == 0:
pipe.delete(self.redis_queuestats_key)
if len(stats) > 0:
pipe.hmset(self.redis_queuestats_key, stats)
pipe.execute()
log.debug("... done queue stats in %0.4fs" % (time.time() - start_time)) | def queuestats(self) | Compute ETAs for every known queue & subqueue | 3.86062 | 3.606116 | 1.070576 |
log.debug("Starting orchestration run for worker group %s" % group["_id"])
agents = self.fetch_worker_group_agents(group)
# Evaluate what workers are currently, rightfully there. They won't be touched.
for agent in agents:
desired_workers = self.get_desired_workers_for_agent(group, agent)
agent["new_desired_workers"] = []
agent["new_desired_workers"] = desired_workers
for agent in agents:
if sorted(agent["new_desired_workers"]) != sorted(agent.get("desired_workers", [])):
connections.mongodb_jobs.mrq_agents.update_one({"_id": agent["_id"]}, {"$set": {
"desired_workers": agent["new_desired_workers"]
}})
# Remember the date of the last successful orchestration (will be reported)
self.dateorchestrated = datetime.datetime.utcnow()
log.debug("Orchestration finished.") | def do_orchestrate(self, group) | Manage the desired workers of *all* the agents in the given group | 5.390863 | 5.082239 | 1.060726 |
from .context import get_current_job, set_current_job, log
if not pool_size:
return [func(*args) for args in iterable]
counter = itertools_count()
current_job = get_current_job()
def inner_func(*args):
next(counter)
if current_job:
set_current_job(current_job)
try:
ret = func(*args)
except Exception as exc:
trace = traceback.format_exc()
exc.subpool_traceback = trace
raise
if current_job:
set_current_job(None)
return ret
def inner_iterable():
if current_job:
set_current_job(current_job)
for x in iterable:
yield x
if current_job:
set_current_job(None)
start_time = time.time()
pool = gevent.pool.Pool(size=pool_size)
ret = pool.map(inner_func, inner_iterable())
pool.join(raise_error=True)
total_time = time.time() - start_time
log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time))
return ret | def subpool_map(pool_size, func, iterable) | Starts a Gevent pool and run a map. Takes care of setting current_job and cleaning up. | 2.83692 | 2.657787 | 1.067399 |
from .context import get_current_job, set_current_job, log
if not pool_size:
for args in iterable:
yield func(*args)
counter = itertools_count()
current_job = get_current_job()
def inner_func(*args):
next(counter)
if current_job:
set_current_job(current_job)
try:
ret = func(*args)
except Exception as exc:
trace = traceback.format_exc()
exc.subpool_traceback = trace
raise
if current_job:
set_current_job(None)
return ret
def inner_iterable():
if current_job:
set_current_job(current_job)
for x in iterable:
yield x
if current_job:
set_current_job(None)
start_time = time.time()
pool = gevent.pool.Pool(size=pool_size)
if unordered:
iterator = pool.imap_unordered(inner_func, inner_iterable(), maxsize=buffer_size or pool_size)
else:
iterator = pool.imap(inner_func, inner_iterable())
for x in iterator:
if flatten:
for y in x:
yield y
else:
yield x
pool.join(raise_error=True)
total_time = time.time() - start_time
log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time)) | def subpool_imap(pool_size, func, iterable, flatten=False, unordered=False, buffer_size=None) | Generator version of subpool_map. Should be used with unordered=True for optimal performance | 2.674273 | 2.64537 | 1.010926 |
params = task.get("params")
if params:
params = json.dumps(sorted(list(task["params"].items()), key=lambda x: x[0])) # pylint: disable=no-member
full = [str(task.get(x)) for x in ["path", "interval", "dailytime", "weekday", "monthday", "queue"]]
full.extend([str(params)])
return " ".join(full) | def _hash_task(task) | Returns a unique hash for identify a task and its params | 5.300741 | 4.611589 | 1.149439 |
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks}
if len(tasks_by_hash) != len(self.config_tasks):
raise Exception("Fatal error: there was a hash duplicate in the scheduled tasks config.")
for h, task in tasks_by_hash.items():
if task.get("monthday") and not task.get("dailytime"):
raise Exception("Fatal error: you can't schedule a task with 'monthday' and without 'dailytime' (%s)" % h)
if task.get("weekday") and not task.get("dailytime"):
raise Exception("Fatal error: you can't schedule a task with 'weekday' and without 'dailytime' (%s)" % h)
if not task.get("monthday") and not task.get("weekday") and not task.get("dailytime") and not task.get("interval"):
raise Exception("Fatal error: scheduler must be specified one of monthday,weekday,dailytime,interval. (%s)" % h) | def check_config_integrity(self) | Make sure the scheduler config is valid | 3.084219 | 2.928855 | 1.053046 |
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks}
for task in self.all_tasks:
if tasks_by_hash.get(task["hash"]):
del tasks_by_hash[task["hash"]]
else:
self.collection.remove({"_id": task["_id"]})
log.debug("Scheduler: deleted %s" % task["hash"])
# What remains are the new ones to be inserted
for h, task in tasks_by_hash.items():
task["hash"] = h
task["datelastqueued"] = datetime.datetime.fromtimestamp(0)
if task.get("dailytime"):
# Because MongoDB can store datetimes but not times,
# we add today's date to the dailytime.
# The date part will be discarded in check()
task["dailytime"] = datetime.datetime.combine(
datetime.datetime.utcnow(), task["dailytime"])
task["interval"] = 3600 * 24
# Avoid to queue task in check() if today dailytime is already passed
if datetime.datetime.utcnow().time() > task["dailytime"].time():
task["datelastqueued"] = datetime.datetime.utcnow()
self.collection.find_one_and_update({"hash": task["hash"]}, {"$set": task}, upsert=True)
log.debug("Scheduler: added %s" % task["hash"]) | def sync_config_tasks(self) | Performs the first sync of a list of tasks, often defined in the config file. | 3.765112 | 3.78726 | 0.994152 |
if redis is None:
redis = connections.redis
# http://redis.io/commands/INCR
now = int(time.time())
k = "ratelimit:%s:%s" % (key, now // per)
with redis.pipeline(transaction=True) as pipeline:
pipeline.incr(k, 1)
pipeline.expire(k, per + 10)
value = pipeline.execute()
current = int(value[0]) - 1
if current >= limit:
return 0
else:
return limit - current | def ratelimit(key, limit, per=1, redis=None) | Returns an integer with the number of available actions for the
current period in seconds. If zero, rate was already reached. | 3.016551 | 3.017033 | 0.99984 |
# Nothing to change!
if not config["print_mongodb"] and not config["trace_io"]:
return
from termcolor import cprint
# Print because we are very early and log() may not be ready yet.
cprint("Monkey-patching MongoDB methods...", "white")
def gen_monkey_patch(base_object, method):
base_method = getattr(base_object, method)
def mrq_monkey_patched(self, *args, **kwargs):
if config["trace_io"]:
comment = "mrq"
worker = get_current_worker()
job = get_current_job()
if job:
job.set_current_io({
"type": "mongodb.%s" % method,
"data": {
"collection": self.full_name
}
# Perf issue? All MongoDB data will get jsonified!
# "data": json.dumps(args)[0:300]
})
comment = {"job": job.id}
elif worker:
comment = {"worker": worker.id}
# Tag potentially expensive queries with their job id for easier debugging
if method in ["find", "find_and_modify", "count", "update_many", "update", "delete_many"]:
if len(args) > 0 and isinstance(args[0], dict) and "$comment" not in args[0]:
query = copy.copy(args[0])
query["$comment"] = comment
args = (query, ) + args[1:]
if config["print_mongodb"]:
if self.full_name in config.get("print_mongodb_hidden_collections", []):
cprint("[MONGO] %s.%s%s %s" % (
self.full_name, method, "-hidden-", kwargs
), "magenta")
else:
cprint("[MONGO] %s.%s%s %s" % (
self.full_name, method, args, kwargs
), "magenta")
if config.get("mongodb_pre_hook"):
config.get("mongodb_pre_hook")({
"collection": self.full_name,
"method": method,
"args": args,
"kwargs": kwargs,
"client": self.database.client,
"job": job
})
start_time = time.time()
ret = False
try:
ret = base_method(self, *args, **kwargs)
finally:
stop_time = time.time()
job = None
if config["trace_io"]:
job = get_current_job()
if job:
job.set_current_io(None)
if config.get("mongodb_post_hook"):
config.get("mongodb_post_hook")({
"collection": self.full_name,
"method": method,
"args": args,
"kwargs": kwargs,
"client": self.database.client,
"job": job,
"result": ret,
"time": stop_time - start_time
})
return ret
# Needed to avoid breaking mongokit
mrq_monkey_patched.__doc__ = method.__doc__
return mrq_monkey_patched
from pymongo.collection import Collection
pymongo_method_whitelist = (
"bulk_write",
"find", "find_one_and_delete", "find_one_and_replace", "find_one_and_update",
"update", "update_one", "update_many",
"drop",
"count",
"save",
"insert", "insert_one", "insert_many",
"replace_one",
"remove", "delete_one", "delete_many",
"find_and_modify",
"parallel_scan",
"options",
"aggregate",
"group", "distinct",
"rename",
"map_reduce", "inline_map_reduce",
"create_indexes", "create_index", "ensure_index", "drop_index", "reindex", "list_indexes"
)
for method in pymongo_method_whitelist:
if hasattr(Collection, method) and getattr(Collection, method).__name__ != "mrq_monkey_patched":
setattr(Collection, method, gen_monkey_patch(Collection, method)) | def patch_pymongo(config) | Monkey-patch pymongo's collections to add some logging | 2.837882 | 2.796557 | 1.014777 |
# Accept float(0.1), "0.1", "0.1-0.2"
def sleep():
if isinstance(seconds, float):
time.sleep(seconds)
elif isinstance(seconds, basestring):
# pylint: disable=maybe-no-member
if "-" in seconds:
time.sleep(random.uniform(
float(seconds.split("-")[0]),
float(seconds.split("-")[1])
))
else:
time.sleep(float(seconds))
def _patched_method(old_method, *args, **kwargs):
sleep()
return old_method(*args, **kwargs)
socket_methods = [
"send", "sendall", "sendto", "recv", "recvfrom", "recvfrom_into", "recv_into",
"connect", "connect_ex", "close"
]
from socket import socket as _socketmodule
from gevent.socket import socket as _geventmodule
from gevent.ssl import SSLSocket as _sslmodule # pylint: disable=no-name-in-module
for method in socket_methods:
patch_method(_socketmodule, method, _patched_method)
patch_method(_geventmodule, method, _patched_method)
patch_method(_sslmodule, method, _patched_method) | def patch_network_latency(seconds=0.01) | Add random latency to all I/O operations | 2.529671 | 2.554373 | 0.99033 |
self.report_worker(w=1)
while True:
try:
self.report_worker()
except Exception as e: # pylint: disable=broad-except
self.log.error("When reporting: %s" % e)
finally:
time.sleep(self.config["report_interval"]) | def greenlet_report(self) | This greenlet always runs in background to update current status
in MongoDB every N seconds.
Caution: it might get delayed when doing long blocking operations.
Should we do this in a thread instead? | 4.517224 | 4.012567 | 1.125769 |
while True:
try:
self.flush_logs()
except Exception as e: # pylint: disable=broad-except
self.log.error("When flushing logs: %s" % e)
finally:
time.sleep(self.config["report_interval"]) | def greenlet_logs(self) | This greenlet always runs in background to update current
logs in MongoDB every 10 seconds.
Caution: it might get delayed when doing long blocking operations.
Should we do this in a thread instead? | 4.049562 | 3.586531 | 1.129103 |
try:
queues = []
prefixes = [q for q in self.config["queues"] if q.endswith("/")]
known_subqueues = Queue.all_known(prefixes=prefixes)
for q in self.config["queues"]:
queues.append(Queue(q))
if q.endswith("/"):
for subqueue in known_subqueues:
if subqueue.startswith(q):
queues.append(Queue(subqueue))
self.queues = queues
except Exception as e: # pylint: disable=broad-except
self.log.error("When refreshing subqueues: %s", e)
if fatal:
raise | def refresh_queues(self, fatal=False) | Updates the list of currently known queues and subqueues | 3.346199 | 3.113523 | 1.074731 |
greenlets = []
for greenlet in list(self.gevent_pool):
g = {}
short_stack = []
stack = traceback.format_stack(greenlet.gr_frame)
for s in stack[1:]:
if "/gevent/hub.py" in s:
break
short_stack.append(s)
g["stack"] = short_stack
job = get_current_job(id(greenlet))
if job:
job.save()
if job.data:
g["path"] = job.data["path"]
g["datestarted"] = job.datestarted
g["id"] = str(job.id)
g["time"] = getattr(greenlet, "_trace_time", 0)
g["switches"] = getattr(greenlet, "_trace_switches", None)
# pylint: disable=protected-access
if job._current_io is not None:
g["io"] = job._current_io
greenlets.append(g)
# When faking network latency, all sockets are affected, including OS ones, but
# we still want reliable reports so this is disabled.
if (not with_memory) or (self.config["add_network_latency"] != "0" and self.config["add_network_latency"]):
cpu = {
"user": 0,
"system": 0,
"percent": 0
}
mem = {"rss": 0, "swap": 0, "total": 0}
else:
cpu_times = self.process.cpu_times()
cpu = {
"user": cpu_times.user,
"system": cpu_times.system,
"percent": self.process.cpu_percent(0)
}
mem = self.get_memory()
# Avoid sharing passwords or sensitive config!
whitelisted_config = [
"max_jobs",
"max_memory"
"greenlets",
"processes",
"queues",
"dequeue_strategy",
"scheduler",
"name",
"local_ip",
"external_ip",
"agent_id",
"worker_group"
]
io = None
if self._traced_io:
io = {}
for k, v in iteritems(self._traced_io):
if k == "total":
io[k] = v
else:
io[k] = sorted(list(v.items()), reverse=True, key=lambda x: x[1])
used_pool_slots = len(self.gevent_pool)
used_avg = self.pool_usage_average.next(used_pool_slots)
return {
"status": self.status,
"config": {k: v for k, v in iteritems(self.config) if k in whitelisted_config},
"done_jobs": self.done_jobs,
"usage_avg": used_avg / self.pool_size,
"datestarted": self.datestarted,
"datereported": datetime.datetime.utcnow(),
"name": self.name,
"io": io,
"_id": str(self.id),
"process": {
"pid": self.process.pid,
"cpu": cpu,
"mem": mem
# https://code.google.com/p/psutil/wiki/Documentation
# get_open_files
# get_connections
# get_num_ctx_switches
# get_num_fds
# get_io_counters
# get_nice
},
"jobs": greenlets
} | def get_worker_report(self, with_memory=False) | Returns a dict containing all the data we can about the current status of the worker and
its jobs. | 3.585325 | 3.539154 | 1.013046 |
while True:
now = datetime.datetime.utcnow()
for greenlet in list(self.gevent_pool):
job = get_current_job(id(greenlet))
if job and job.timeout and job.datestarted:
expires = job.datestarted + datetime.timedelta(seconds=job.timeout)
if now > expires:
job.kill(block=False, reason="timeout")
time.sleep(1) | def greenlet_timeouts(self) | This greenlet kills jobs in other greenlets if they timeout. | 4.682969 | 4.024083 | 1.163736 |
if self.config["processes"] > 1:
self.log.debug(
"Admin server disabled because of multiple processes.")
return
class Devnull(object):
def write(self, *_):
pass
from gevent import pywsgi
def admin_routes(env, start_response):
path = env["PATH_INFO"]
status = "200 OK"
res = ""
if path in ["/", "/report", "/report_mem"]:
report = self.get_worker_report(with_memory=(path == "/report_mem"))
res = bytes(json_stdlib.dumps(report, cls=MongoJSONEncoder), 'utf-8')
elif path == "/wait_for_idle":
self.wait_for_idle()
res = bytes("idle", "utf-8")
else:
status = "404 Not Found"
start_response(status, [('Content-Type', 'application/json')])
return [res]
server = pywsgi.WSGIServer((self.config["admin_ip"], self.config["admin_port"]), admin_routes, log=Devnull())
try:
self.log.debug("Starting admin server on port %s" % self.config["admin_port"])
server.serve_forever()
except Exception as e: # pylint: disable=broad-except
self.log.debug("Error in admin server : %s" % e) | def greenlet_admin(self) | This greenlet is used to get status information about the worker
when --admin_port was given | 3.017244 | 2.937162 | 1.027265 |
# Be mindful that this is being executed in a different greenlet than the work_* methods.
while True:
time.sleep(0.01)
with self.work_lock:
if self.status != "wait":
continue
if len(self.gevent_pool) > 0:
continue
# Force a refresh of the current subqueues, one might just have been created.
self.refresh_queues()
# We might be dequeueing a new subqueue. Double check that we don't have anything more to do
outcome, dequeue_jobs = self.work_once(free_pool_slots=1, max_jobs=None)
if outcome is "wait" and dequeue_jobs == 0:
break | def wait_for_idle(self) | Waits until the worker has nothing more to do. Very useful in tests | 9.507502 | 8.884959 | 1.070067 |
self.work_init()
self.work_loop(max_jobs=self.max_jobs, max_time=self.max_time)
self.work_stop() | def work(self) | Starts the work loop. | 4.956993 | 4.119397 | 1.20333 |
dequeued_jobs = 0
available_queues = [
queue for queue in self.queues
if queue.root_id not in self.paused_queues and
queue.id not in self.paused_queues
]
for queue_i in range(len(available_queues)):
queue = available_queues[(queue_i + self.queue_offset) % len(available_queues)]
max_jobs_per_queue = free_pool_slots - dequeued_jobs
if max_jobs_per_queue <= 0:
queue_i -= 1
break
if self.config["dequeue_strategy"] == "parallel":
max_jobs_per_queue = max(1, int(max_jobs_per_queue / (len(available_queues) - queue_i)))
for job in queue.dequeue_jobs(
max_jobs=max_jobs_per_queue,
job_class=self.job_class,
worker=self
):
dequeued_jobs += 1
self.gevent_pool.spawn(self.perform_job, job)
# At the next pass, start at the next queue to avoid always dequeuing the same one
if self.config["dequeue_strategy"] == "parallel":
self.queue_offset = (self.queue_offset + queue_i + 1) % len(self.queues)
# TODO consider this when dequeuing jobs to have strict limits
if max_jobs and self.done_jobs >= max_jobs:
self.log.info("Reached max_jobs=%s" % self.done_jobs)
return "break", dequeued_jobs
# We seem to have exhausted available jobs, we can sleep for a
# while.
if dequeued_jobs == 0:
if self.config["dequeue_strategy"] == "burst":
self.log.info("Burst mode: stopping now because queues were empty")
return "break", dequeued_jobs
return "wait", dequeued_jobs
return None, dequeued_jobs | def work_once(self, free_pool_slots=1, max_jobs=None) | Does one lookup for new jobs, inside the inner work loop | 3.3976 | 3.410588 | 0.996192 |
if len(self.queues_with_notify) > 0:
# https://github.com/antirez/redis/issues/874
connections.redis.blpop(*(self.queues_with_notify + [max(1, int(self.config["max_latency"]))]))
else:
gevent.sleep(self.config["max_latency"]) | def work_wait(self) | Wait for new jobs to arrive | 6.670034 | 6.439069 | 1.035869 |
if self.config["trace_memory"]:
job.trace_memory_start()
set_current_job(job)
try:
job.perform()
except MaxConcurrencyInterrupt:
self.log.error("Max concurrency reached")
job._save_status("maxconcurrency", exception=True)
except RetryInterrupt:
self.log.error("Caught retry")
job.save_retry(sys.exc_info()[1])
except MaxRetriesInterrupt:
self.log.error("Max retries reached")
job._save_status("maxretries", exception=True)
except AbortInterrupt:
self.log.error("Caught abort")
job.save_abort()
except TimeoutInterrupt:
self.log.error("Job timeouted after %s seconds" % job.timeout)
job._save_status("timeout", exception=True)
except JobInterrupt:
self.log.error("Job interrupted")
job._save_status("interrupt", exception=True)
except Exception:
self.log.error("Job failed")
job._save_status("failed", exception=True)
finally:
set_current_job(None)
self.done_jobs += 1
if self.config["trace_memory"]:
job.trace_memory_stop() | def perform_job(self, job) | Wraps a job.perform() call with timeout logic and exception handlers.
This is the first call happening inside the greenlet. | 2.570103 | 2.520159 | 1.019817 |
self.log.info("Forced shutdown...")
self.status = "killing"
self.gevent_pool.kill(exception=JobInterrupt, block=False)
raise StopRequested() | def shutdown_now(self) | Forced shutdown: interrupts all the jobs. | 12.666078 | 9.097375 | 1.392278 |
if len(job_ids) == 0 or self.use_large_ids:
return job_ids
elif isinstance(job_ids[0], ObjectId):
return [x.binary for x in job_ids]
else:
return [bytes.fromhex(str(x)) for x in job_ids] | def serialize_job_ids(self, job_ids) | Returns job_ids serialized for storage in Redis | 3.062304 | 2.843415 | 1.076981 |
if len(job_ids) == 0 or self.use_large_ids:
return job_ids
else:
return [binascii.hexlify(x.encode('utf-8') if (PY3 and isinstance(x, str)) else x).decode('ascii')
for x in job_ids] | def unserialize_job_ids(self, job_ids) | Unserialize job_ids stored in Redis | 3.595101 | 3.559162 | 1.010098 |
queue = self.id
if self.id.endswith("/"):
queue = self.root_id
return queue | def _get_pausable_id(self) | Get the queue id (either id or root_id) that should be used to pause/unpause the current queue
TODO: handle subqueues with more than one level, e.g. "queue/subqueue/" | 14.788465 | 6.488349 | 2.279234 |
root_is_paused = False
if self.root_id != self.id:
root_is_paused = context.connections.redis.sismember(redis_key("paused_queues"), self.root_id)
return root_is_paused or context.connections.redis.sismember(redis_key("paused_queues"), self.id) | def is_paused(self) | Returns wether the queue is paused or not.
Warning: this does NOT ensure that the queue was effectively added to
the set of paused queues. See the 'paused_queues_refresh_interval' option. | 4.15696 | 3.434378 | 1.210397 |
prefix = context.get_current_config()["redis_prefix"]
queues = []
for key in context.connections.redis.keys():
if key.startswith(prefix):
queues.append(Queue(key[len(prefix) + 3:]))
return queues | def all_active(cls) | List active queues, based on their lengths in Redis. Warning, uses the unscalable KEYS redis command | 7.156478 | 5.61237 | 1.275126 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.