code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
parts = domain.split('.')
if len(parts) == 1:
yield parts[0]
else:
for i in range(len(parts), 1, -1):
yield ".".join(parts[-i:])
|
def _domain_variants(domain)
|
>>> list(_domain_variants("foo.bar.example.com"))
['foo.bar.example.com', 'bar.example.com', 'example.com']
>>> list(_domain_variants("example.com"))
['example.com']
>>> list(_domain_variants("localhost"))
['localhost']
| 2.309054 | 2.603086 | 0.887045 |
joined_regexes = "|".join(r for r in regexes if r)
if not joined_regexes:
return None
if use_re2:
import re2
return re2.compile(joined_regexes, flags=flags, max_mem=max_mem)
return re.compile(joined_regexes, flags=flags)
|
def _combined_regex(regexes, flags=re.IGNORECASE, use_re2=False, max_mem=None)
|
Return a compiled regex combined (using OR) from a list of ``regexes``.
If there is nothing to combine, None is returned.
re2 library (https://github.com/axiak/pyre2) often can match and compile
large regexes much faster than stdlib re module (10x is not uncommon),
but there are some gotchas:
* in case of "DFA out of memory" errors use ``max_mem`` argument
to increase the amount of memory re2 is allowed to use.
| 2.111137 | 2.444226 | 0.863724 |
options = options or {}
for optname in self.options:
if optname == 'match-case': # TODO
continue
if optname not in options:
raise ValueError("Rule requires option %s" % optname)
if optname == 'domain':
if not self._domain_matches(options['domain']):
return False
continue
if options[optname] != self.options[optname]:
return False
return self._url_matches(url)
|
def match_url(self, url, options=None)
|
Return if this rule matches the URL.
What to do if rule is matched is up to developer. Most likely
``.is_exception`` attribute should be taken in account.
| 3.611305 | 3.639492 | 0.992255 |
if self.is_comment:
return False
if self.is_html_rule: # HTML rules are not supported yet
return False
options = options or {}
keys = set(options.keys())
if not keys.issuperset(self._options_keys):
# some of the required options are not given
return False
return True
|
def matching_supported(self, options=None)
|
Return whether this rule can return meaningful result,
given the `options` dict. If some options are missing,
then rule shouldn't be matched against, and this function
returns False.
No options:
>>> rule = AdblockRule("swf|")
>>> rule.matching_supported({})
True
Option is used in the rule, but its value is not available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({})
False
Option is used in the rule, and option value is available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({'domain': 'example.com', 'third-party': False})
True
Rule is a comment:
>>> rule = AdblockRule("!this is not a rule")
>>> rule.matching_supported({})
False
| 5.465715 | 5.075259 | 1.076933 |
if not rule:
return rule
# Check if the rule isn't already regexp
if rule.startswith('/') and rule.endswith('/'):
if len(rule) > 1:
rule = rule[1:-1]
else:
raise AdblockParsingError('Invalid rule')
return rule
# escape special regex characters
rule = re.sub(r"([.$+?{}()\[\]\\])", r"\\\1", rule)
# XXX: the resulting regex must use non-capturing groups (?:
# for performance reasons; also, there is a limit on number
# of capturing groups, no using them would prevent building
# a single regex out of several rules.
# Separator character ^ matches anything but a letter, a digit, or
# one of the following: _ - . %. The end of the address is also
# accepted as separator.
rule = rule.replace("^", "(?:[^\w\d_\-.%]|$)")
# * symbol
rule = rule.replace("*", ".*")
# | in the end means the end of the address
if rule[-1] == '|':
rule = rule[:-1] + '$'
# || in the beginning means beginning of the domain name
if rule[:2] == '||':
# XXX: it is better to use urlparse for such things,
# but urlparse doesn't give us a single regex.
# Regex is based on http://tools.ietf.org/html/rfc3986#appendix-B
if len(rule) > 2:
# | | complete part |
# | scheme | of the domain |
rule = r"^(?:[^:/?#]+:)?(?://(?:[^/?#]*\.)?)?" + rule[2:]
elif rule[0] == '|':
# | in the beginning means start of the address
rule = '^' + rule[1:]
# other | symbols should be escaped
# we have "|$" in our regexp - do not touch it
rule = re.sub("(\|)[^$]", r"\|", rule)
return rule
|
def rule_to_regex(cls, rule)
|
Convert AdBlock rule to a regular expression.
| 6.214052 | 6.007249 | 1.034425 |
if general_re and general_re.search(url):
return True
rules = []
if 'domain' in options and domain_required_rules:
src_domain = options['domain']
for domain in _domain_variants(src_domain):
if domain in domain_required_rules:
rules.extend(domain_required_rules[domain])
rules.extend(rules_with_options)
if self.skip_unsupported_rules:
rules = [rule for rule in rules if rule.matching_supported(options)]
return any(rule.match_url(url, options) for rule in rules)
|
def _matches(self, url, options,
general_re, domain_required_rules, rules_with_options)
|
Return if ``url``/``options`` are matched by rules defined by
``general_re``, ``domain_required_rules`` and ``rules_with_options``.
``general_re`` is a compiled regex for rules without options.
``domain_required_rules`` is a {domain: [rules_which_require_it]}
mapping.
``rules_with_options`` is a list of AdblockRule instances that
don't require any domain, but have other options.
| 2.956396 | 3.360999 | 0.879618 |
# check directives are correct
if _format == "%S":
def _fromatter(seconds):
return "{:.2f}".format(seconds)
elif _format == "%I":
def _fromatter(seconds):
return "{0}".format(int(seconds * 1000))
else:
_format = _format.replace("%h", "{hrs:02d}")
_format = _format.replace("%m", "{mins:02d}")
_format = _format.replace("%s", "{secs:02d}")
_format = _format.replace("%i", "{millis:03d}")
try:
i = _format.index("%")
raise TimeFormatError("Unknow time format directive '{0}'".format(_format[i:i+2]))
except ValueError:
pass
def _fromatter(seconds):
millis = int(seconds * 1000)
hrs, millis = divmod(millis, 3600000)
mins, millis = divmod(millis, 60000)
secs, millis = divmod(millis, 1000)
return _format.format(hrs=hrs, mins=mins, secs=secs, millis=millis)
return _fromatter
|
def seconds_to_str_fromatter(_format)
|
Accepted format directives: %i %s %m %h
| 2.175119 | 2.141628 | 1.015638 |
if not mode in [self.STRICT_MIN_LENGTH, self.DROP_TRAILING_SILENCE,
self.STRICT_MIN_LENGTH | self.DROP_TRAILING_SILENCE, 0]:
raise ValueError("Wrong value for mode")
self._mode = mode
self._strict_min_length = (mode & self.STRICT_MIN_LENGTH) != 0
self._drop_tailing_silence = (mode & self.DROP_TRAILING_SILENCE) != 0
|
def set_mode(self, mode)
|
:Parameters:
`mode` : *(int)*
New mode, must be one of:
- `StreamTokenizer.STRICT_MIN_LENGTH`
- `StreamTokenizer.DROP_TRAILING_SILENCE`
- `StreamTokenizer.STRICT_MIN_LENGTH | StreamTokenizer.DROP_TRAILING_SILENCE`
- `0`
See `StreamTokenizer.__init__` for more information about the mode.
| 3.322514 | 1.999731 | 1.661481 |
self._reinitialize()
if callback is not None:
self._deliver = callback
while True:
frame = data_source.read()
if frame is None:
break
self._current_frame += 1
self._process(frame)
self._post_process()
if callback is None:
_ret = self._tokens
self._tokens = None
return _ret
|
def tokenize(self, data_source, callback=None)
|
Read data from `data_source`, one frame a time, and process the read frames in
order to detect sequences of frames that make up valid tokens.
:Parameters:
`data_source` : instance of the :class:`DataSource` class that implements a `read` method.
'read' should return a slice of signal, i.e. frame (of whatever \
type as long as it can be processed by validator) and None if \
there is no more signal.
`callback` : an optional 3-argument function.
If a `callback` function is given, it will be called each time a valid token
is found.
:Returns:
A list of tokens if `callback` is None. Each token is tuple with the following elements:
.. code python
(data, start, end)
where `data` is a list of read frames, `start`: index of the first frame in the
original data and `end` : index of the last frame.
| 4.499291 | 4.212724 | 1.068024 |
if self._current >= len(self._data):
return None
self._current += 1
return self._data[self._current - 1]
|
def read(self)
|
Read one character from buffer.
:Returns:
Current character or None if end of buffer is reached
| 3.873673 | 3.670472 | 1.055361 |
if not isinstance(data, basestring):
raise ValueError("data must an instance of basestring")
self._data = data
self._current = 0
|
def set_data(self, data)
|
Set a new data buffer.
:Parameters:
`data` : a basestring object
New data buffer.
| 5.4413 | 4.459911 | 1.220047 |
signal = AudioEnergyValidator._convert(data, self.sample_width)
return AudioEnergyValidator._signal_log_energy(signal) >= self._energy_threshold
|
def is_valid(self, data)
|
Check if data is valid. Audio data will be converted into an array (of
signed values) of which the log energy is computed. Log energy is computed
as follows:
.. code:: python
arr = AudioEnergyValidator._convert(signal, sample_width)
energy = float(numpy.dot(arr, arr)) / len(arr)
log_energy = 10. * numpy.log10(energy)
:Parameters:
`data` : either a *string* or a *Bytes* buffer
`data` is converted into a numerical array using the `sample_width`
given in the constructor.
:Returns:
True if `log_energy` >= `energy_threshold`, False otherwise.
| 16.494259 | 5.058117 | 3.260949 |
if len(data_buffer) % (self.sample_width * self.channels) != 0:
raise ValueError("length of data_buffer must be a multiple of (sample_width * channels)")
self._buffer = data_buffer
self._index = 0
self._left = 0 if self._buffer is None else len(self._buffer)
|
def set_data(self, data_buffer)
|
Set new data for this audio stream.
:Parameters:
`data_buffer` : str, basestring, Bytes
a string buffer with a length multiple of (sample_width * channels)
| 2.934616 | 2.557859 | 1.147294 |
if len(data_buffer) % (self.sample_width * self.channels) != 0:
raise ValueError("length of data_buffer must be a multiple of (sample_width * channels)")
self._buffer += data_buffer
self._left += len(data_buffer)
|
def append_data(self, data_buffer)
|
Append data to this audio stream
:Parameters:
`data_buffer` : str, basestring, Bytes
a buffer with a length multiple of (sample_width * channels)
| 3.385754 | 2.497061 | 1.355896 |
# Disable post_save during manage.py loaddata
if kwargs.get("raw", False):
return False
user, created = kwargs["instance"], kwargs["created"]
disabled = getattr(user, "_disable_account_creation", not settings.ACCOUNT_CREATE_ON_SAVE)
if created and not disabled:
Account.create(user=user)
|
def user_post_save(sender, **kwargs)
|
After User.save is called we check to see if it was a created user. If so,
we check if the User object wants account creation. If all passes we
create an Account object.
We only run on user creation to avoid having to check for existence on
each call to User.save.
| 6.326437 | 5.236073 | 1.208241 |
kwargs = {"user": self.request.user, "initial": self.get_initial()}
if self.request.method in ["POST", "PUT"]:
kwargs.update({
"data": self.request.POST,
"files": self.request.FILES,
})
return kwargs
|
def get_form_kwargs(self)
|
Returns the keyword arguments for instantiating the form.
| 2.008101 | 1.812596 | 1.107859 |
if not settings.ACCOUNT_PASSWORD_USE_HISTORY:
return False
if hasattr(user, "password_expiry"):
# user-specific value
expiry = user.password_expiry.expiry
else:
# use global value
expiry = settings.ACCOUNT_PASSWORD_EXPIRY
if expiry == 0: # zero indicates no expiration
return False
try:
# get latest password info
latest = user.password_history.latest("timestamp")
except PasswordHistory.DoesNotExist:
return False
now = datetime.datetime.now(tz=pytz.UTC)
expiration = latest.timestamp + datetime.timedelta(seconds=expiry)
if expiration < now:
return True
else:
return False
|
def check_password_expired(user)
|
Return True if password is expired and system is using
password expiration, False otherwise.
| 3.573053 | 3.48214 | 1.026108 |
def decorator(view_func):
@functools.wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if is_authenticated(request.user):
return view_func(request, *args, **kwargs)
return handle_redirect_to_login(
request,
redirect_field_name=redirect_field_name,
login_url=login_url
)
return _wrapped_view
if func:
return decorator(func)
return decorator
|
def login_required(func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None)
|
Decorator for views that checks that the user is logged in, redirecting
to the log in page if necessary.
| 1.672116 | 1.739295 | 0.961376 |
bits = token.split_contents()
if len(bits) == 2:
user = bits[1]
as_var = None
elif len(bits) == 4:
user = bits[1]
as_var = bits[3]
else:
raise template.TemplateSyntaxError("'{0}' takes either two or four arguments".format(bits[0]))
return UserDisplayNode(user, as_var)
|
def do_user_display(parser, token)
|
Example usage::
{% user_display user %}
or if you need to use in a {% blocktrans %}::
{% user_display user as user_display}
{% blocktrans %}{{ user_display }} has sent you a gift.{% endblocktrans %}
| 1.906242 | 2.030379 | 0.93886 |
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument"
" (path to a view)" % bits[0]
)
viewname = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == "as":
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise template.TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNextNode(viewname, args, kwargs, asvar)
|
def urlnext(parser, token)
|
{% url %} copied from Django 1.7.
| 1.794623 | 1.652182 | 1.086214 |
if all([key in context for key in ["redirect_field_name", "redirect_field_value"]]):
if context["redirect_field_value"]:
url += "?" + urlencode({
context["redirect_field_name"]: context["redirect_field_value"],
})
return url
|
def add_next(self, url, context)
|
With both `redirect_field_name` and `redirect_field_value` available in
the context, add on a querystring to handle "next" redirecting.
| 3.025425 | 2.194296 | 1.378768 |
if "permakey" in request.headers:
# Extract the permakey from the headers
permakey = request.headers.get("permakey")
# In production, probably should have some exception handling Here
# in case the permakey is an empty string or some other bad value
payload = self._decode(permakey, verify=verify)
# Sometimes, the application will call _verify(...return_payload=True)
# So, let's make sure to handle this scenario.
if return_payload:
return payload
# Retrieve the user from the database
user_id = payload.get("user_id", None)
user = userid_table.get(user_id)
# If wer cannot find a user, then this method should return
# is_valid == False
# reason == some text for why
# status == some status code, probably a 401
if not user_id or not user:
is_valid = False
reason = "No user found"
status = 401
else:
# After finding a user, make sure the permakey matches,
# or else return a bad status or some other error.
# In production, both this scenario, and the above "No user found"
# scenario should return an identical message and status code.
# This is to prevent your application accidentally
# leaking information about the existence or non-existence of users.
is_valid = user.permakey == permakey
reason = None if is_valid else "Permakey mismatch"
status = 200 if is_valid else 401
return is_valid, status, reason
else:
return super()._verify(
request=request,
return_payload=return_payload,
verify=verify,
raise_missing=raise_missing,
request_args=request_args,
request_kwargs=request_kwargs,
*args,
**kwargs
)
|
def _verify(
self,
request,
return_payload=False,
verify=True,
raise_missing=False,
request_args=None,
request_kwargs=None,
*args,
**kwargs
)
|
If there is a "permakey", then we will verify the token by checking the
database. Otherwise, just do the normal verification.
Typically, any method that begins with an underscore in sanic-jwt should
not be touched. In this case, we are trying to break the rules a bit to handle
a unique use case: handle both expirable and non-expirable tokens.
| 4.419889 | 4.027415 | 1.097451 |
if item in self: # noqa
item = getattr(self, item)
return item()
|
def get(self, item)
|
Helper method to avoid calling getattr
| 9.370337 | 7.731609 | 1.211952 |
return {
x.lower()[10:]: app_config.get(x)
for x in filter(lambda x: x.startswith("SANIC_JWT"), app_config)
}
|
def extract_presets(app_config)
|
Pull the application's configurations for Sanic JWT
| 7.207795 | 4.142883 | 1.739802 |
if len(args) > 1:
kwargs.update({"authenticate": args[1]})
return Initialize(args[0], **kwargs)
|
def initialize(*args, **kwargs)
|
Functional approach to initializing Sanic JWT. This was the original
method, but was replaced by the Initialize class. It is recommended to use
the class because it is more flexible. There is no current plan to remove
this method, but it may be depracated in the future.
| 6.538343 | 4.601611 | 1.420881 |
# Depracation notices
if "SANIC_JWT_HANDLER_PAYLOAD_SCOPES" in self.app.config:
raise exceptions.InvalidConfiguration(
"SANIC_JWT_HANDLER_PAYLOAD_SCOPES has been deprecated. "
"Instead, pass your handler method (not an import path) as "
"initialize(add_scopes_to_payload=my_scope_extender)"
)
if "SANIC_JWT_PAYLOAD_HANDLER" in self.app.config:
raise exceptions.InvalidConfiguration(
"SANIC_JWT_PAYLOAD_HANDLER has been deprecated. "
"Instead, you will need to subclass Authentication. "
)
if "SANIC_JWT_HANDLER_PAYLOAD_EXTEND" in self.app.config:
raise exceptions.InvalidConfiguration(
"SANIC_JWT_HANDLER_PAYLOAD_EXTEND has been deprecated. "
"Instead, you will need to subclass Authentication. "
"Check out the documentation for more information."
)
|
def __check_deprecated(self)
|
Checks for deprecated configuration keys
| 3.914055 | 3.805525 | 1.028519 |
for mapping in endpoint_mappings:
if all(map(self.config.get, mapping.keys)):
self.__add_single_endpoint(
mapping.cls, mapping.endpoint, mapping.is_protected
)
self.bp.exception(exceptions.SanicJWTException)(
self.responses.exception_response
)
if not self.instance_is_blueprint:
url_prefix = self._get_url_prefix()
self.instance.blueprint(self.bp, url_prefix=url_prefix)
|
def __add_endpoints(self)
|
Initialize the Sanic JWT Blueprint and add to the instance initialized
| 7.261592 | 5.891119 | 1.232634 |
config = self.config
if "class_views" in self.kwargs:
class_views = self.kwargs.pop("class_views")
for route, view in class_views:
if issubclass(view, endpoints.BaseEndpoint) and isinstance(
route, str
):
self.bp.add_route(
view.as_view(
self.responses,
config=self.config,
instance=self.instance,
),
route,
strict_slashes=config.strict_slashes(),
)
else:
raise exceptions.InvalidClassViewsFormat()
|
def __add_class_views(self)
|
Include any custom class views on the Sanic JWT Blueprint
| 3.952794 | 3.364558 | 1.174833 |
# msg took from BaseAuthentication
msg = (
"Sanic JWT was not initialized properly. It did not received "
"an instance of {}"
)
if not issubclass(self.authentication_class, Authentication):
raise exceptions.InitializationFailure(
message=msg.format("Authentication")
)
if not issubclass(self.configuration_class, Configuration):
raise exceptions.InitializationFailure(
message=msg.format("Configuration")
)
if not issubclass(self.responses_class, Responses):
raise exceptions.InitializationFailure(
message=msg.format("Responses")
)
|
def __check_classes(self)
|
Check if any of the default classes (`Authentication`, `Configuration`
and / or `Responses`) have been overwitten and if they're still valid
| 4.09703 | 3.260998 | 1.256373 |
config = self.config
# Initialize instance of the Authentication class
self.instance.auth = self.authentication_class(self.app, config=config)
init_handlers = (
handlers if config.auth_mode() else auth_mode_agnostic_handlers
)
for handler in init_handlers:
if handler.keys is None:
self.__check_method_in_auth(handler.name, handler.exception)
else:
if all(map(config.get, handler.keys)):
self.__check_method_in_auth(
handler.name, handler.exception
)
for handler in init_handlers:
if handler.name in self.kwargs:
method = self.kwargs.pop(handler.name)
setattr(self.instance.auth, handler.name, method)
|
def __initialize_instance(self)
|
Take any predefined methods/handlers and insert them into Sanic JWT
| 4.823294 | 4.303688 | 1.120735 |
handler_to_enable = filter(lambda h: h.keys is not None, handlers)
for handler in handler_to_enable:
if handler.name in self.kwargs:
for k in handler.keys:
self.kwargs.update({k: True})
self.config = self.configuration_class(self.app.config, **self.kwargs)
|
def __load_configuration(self)
|
Configure settings for the instance in the following order:
1. Sanic JWT's defaults
2. Custom Configuration class
3. Key word arguments passed to Initialize
| 5.406297 | 5.608552 | 0.963938 |
uid = self.config.user_id()
if isinstance(user, dict):
user_id = user.get(uid)
elif hasattr(user, "to_dict"):
_to_dict = await utils.call(user.to_dict)
user_id = _to_dict.get(uid)
else:
raise exceptions.InvalidRetrieveUserObject()
if asdict:
return {uid: user_id}
return user_id
|
async def _get_user_id(self, user, *, asdict=False)
|
Get a user_id from a user object. If `asdict` is True, will return
it as a dict with `config.user_id` as key. The `asdict` keyword
defaults to `False`.
| 3.852259 | 3.447374 | 1.117447 |
delta = timedelta(seconds=self.config.expiration_delta())
exp = datetime.utcnow() + delta
additional = {"exp": exp}
for option in ["iss", "iat", "nbf", "aud"]:
setting = "claim_{}".format(option.lower())
if setting in self.config: # noqa
attr = self.config.get(setting)
if attr:
self.claims.append(option)
method_name = "build_claim_{}".format(option)
method = getattr(utils, method_name)
additional.update({option: method(attr, self.config)})
payload.update(additional)
if self._custom_claims:
custom_claims = {}
for claim in self._custom_claims:
custom_claims[claim.get_key()] = await utils.call(
claim.setup, payload, user
)
payload.update(custom_claims)
return payload
|
async def add_claims(self, payload, user, *args, **kwargs)
|
Injects standard claims into the payload for: exp, iss, iat, nbf, aud.
And, custom claims, if they exist
| 3.976106 | 3.691758 | 1.077022 |
try:
is_valid, status, reasons = self._verify(
request,
request_args=request_args,
request_kwargs=request_kwargs,
)
except Exception as e:
logger.debug(e.args)
if self.config.debug():
raise e
args = e.args if isinstance(e, SanicJWTException) else []
raise exceptions.Unauthorized(*args)
return is_valid, status, reasons
|
def _check_authentication(self, request, request_args, request_kwargs)
|
Checks a request object to determine if that request contains a valid,
and authenticated JWT.
It returns a tuple:
1. Boolean whether the request is authenticated with a valid JWT
2. HTTP status code
3. Reasons (if any) for a potential authentication failure
| 4.584579 | 4.126944 | 1.110889 |
secret = self._get_secret()
algorithm = self._get_algorithm()
kwargs = {}
for claim in self.claims:
if claim != "exp":
setting = "claim_{}".format(claim.lower())
if setting in self.config: # noqa
value = self.config.get(setting)
kwargs.update({claim_label[claim]: value})
kwargs["leeway"] = int(self.config.leeway())
if "claim_aud" in self.config: # noqa
kwargs["audience"] = self.config.claim_aud()
if "claim_iss" in self.config: # noqa
kwargs["issuer"] = self.config.claim_iss()
decoded = jwt.decode(
token,
secret,
algorithms=[algorithm],
verify=verify,
options={"verify_exp": self.config.verify_exp()},
**kwargs
)
return decoded
|
def _decode(self, token, verify=True)
|
Take a JWT and return a decoded payload. Optionally, will verify
the claims on the token.
| 2.746236 | 2.631017 | 1.043793 |
payload = await utils.call(self.build_payload, user)
if (
not isinstance(payload, dict)
or self.config.user_id() not in payload
):
raise exceptions.InvalidPayload
payload = await utils.call(self.add_claims, payload, user)
extend_payload_args = inspect.getfullargspec(self.extend_payload)
args = [payload]
if "user" in extend_payload_args.args:
args.append(user)
payload = await utils.call(self.extend_payload, *args)
if self.config.scopes_enabled():
scopes = await utils.call(self.add_scopes_to_payload, user)
if not isinstance(scopes, (tuple, list)):
scopes = [scopes]
payload[self.config.scopes_name()] = scopes
claims = self.claims + [x.get_key() for x in self._custom_claims]
missing = [x for x in claims if x not in payload]
if missing:
logger.debug("")
raise exceptions.MissingRegisteredClaim(missing=missing)
return payload
|
async def _get_payload(self, user)
|
Given a user object, create a payload and extend it as configured.
| 3.361009 | 3.15175 | 1.066394 |
if refresh_token:
cookie_token_name_key = "cookie_refresh_token_name"
else:
cookie_token_name_key = "cookie_access_token_name"
cookie_token_name = getattr(self.config, cookie_token_name_key)
return request.cookies.get(cookie_token_name(), None)
|
def _get_token_from_cookies(self, request, refresh_token)
|
Extract the token if present inside the request cookies.
| 2.877366 | 2.591135 | 1.110466 |
header = request.headers.get(self.config.authorization_header(), None)
if header is None:
return None
else:
header_prefix_key = "authorization_header_prefix"
header_prefix = getattr(self.config, header_prefix_key)
if header_prefix():
try:
prefix, token = header.split(" ")
if prefix != header_prefix():
raise Exception
except Exception:
raise exceptions.InvalidAuthorizationHeader()
else:
token = header
if refresh_token:
token = request.json.get(self.config.refresh_token_name())
return token
|
def _get_token_from_headers(self, request, refresh_token)
|
Extract the token if present inside the headers of a request.
| 3.402554 | 3.23166 | 1.052881 |
if refresh_token:
query_string_token_name_key = "query_string_refresh_token_name"
else:
query_string_token_name_key = "query_string_access_token_name"
query_string_token_name = getattr(
self.config, query_string_token_name_key
)
return request.args.get(query_string_token_name(), None)
|
def _get_token_from_query_string(self, request, refresh_token)
|
Extract the token if present from the request args.
| 2.774218 | 2.480403 | 1.118454 |
if self.config.cookie_set():
token = self._get_token_from_cookies(request, refresh_token)
if token:
return token
else:
if self.config.cookie_strict():
raise exceptions.MissingAuthorizationCookie()
if self.config.query_string_set():
token = self._get_token_from_query_string(request, refresh_token)
if token:
return token
else:
if self.config.query_string_strict():
raise exceptions.MissingAuthorizationQueryArg()
token = self._get_token_from_headers(request, refresh_token)
if token:
return token
raise exceptions.MissingAuthorizationHeader()
|
def _get_token(self, request, refresh_token=False)
|
Extract a token from a request object.
| 2.386865 | 2.251217 | 1.060255 |
try:
token = self._get_token(request)
is_valid = True
reason = None
except (
exceptions.MissingAuthorizationCookie,
exceptions.MissingAuthorizationQueryArg,
exceptions.MissingAuthorizationHeader,
) as e:
token = None
is_valid = False
reason = list(e.args)
status = e.status_code if self.config.debug() else 401
if raise_missing:
if not self.config.debug():
e.status_code = 401
raise e
if token:
try:
payload = self._decode(token, verify=verify)
if verify:
if self._extra_verifications:
self._verify_extras(payload)
if self._custom_claims:
self._verify_custom_claims(payload)
except (
jwt.exceptions.ExpiredSignatureError,
jwt.exceptions.InvalidIssuerError,
jwt.exceptions.ImmatureSignatureError,
jwt.exceptions.InvalidIssuedAtError,
jwt.exceptions.InvalidAudienceError,
InvalidVerificationError,
InvalidCustomClaimError,
) as e:
# Make sure that the reasons all end with '.' for consistency
reason = [
x if x.endswith(".") else "{}.".format(x)
for x in list(e.args)
]
payload = None
status = 401
is_valid = False
except jwt.exceptions.DecodeError as e:
self._reasons = e.args
# Make sure that the reasons all end with '.' for consistency
reason = (
[
x if x.endswith(".") else "{}.".format(x)
for x in list(e.args)
]
if self.config.debug()
else "Auth required."
)
logger.debug(e.args)
is_valid = False
payload = None
status = 400 if self.config.debug() else 401
else:
payload = None
if return_payload:
return payload
status = 200 if is_valid else status
return is_valid, status, reason
|
def _verify(
self,
request,
return_payload=False,
verify=True,
raise_missing=False,
request_args=None,
request_kwargs=None,
*args,
**kwargs
)
|
Verify that a request object is authenticated.
| 2.762938 | 2.726427 | 1.013392 |
payload = self._verify(
request, return_payload=True, verify=verify, *args, **kwargs
)
return payload
|
def extract_payload(self, request, verify=True, *args, **kwargs)
|
Extract a payload from a request object.
| 4.681256 | 4.164984 | 1.123955 |
payload = self.extract_payload(request)
if not payload:
return None
scopes_attribute = self.config.scopes_name()
return payload.get(scopes_attribute, None)
|
def extract_scopes(self, request)
|
Extract scopes from a request object.
| 4.746887 | 4.130601 | 1.1492 |
payload = self.extract_payload(request)
user_id_attribute = self.config.user_id()
return payload.get(user_id_attribute, None)
|
def extract_user_id(self, request)
|
Extract a user id from a request object.
| 4.15063 | 3.710526 | 1.11861 |
payload = await self._get_payload(user)
secret = self._get_secret(True)
algorithm = self._get_algorithm()
return jwt.encode(payload, secret, algorithm=algorithm).decode("utf-8")
|
async def generate_access_token(self, user)
|
Generate an access token for a given user.
| 3.551738 | 3.167809 | 1.121197 |
refresh_token = await utils.call(self.config.generate_refresh_token())
user_id = await self._get_user_id(user)
await utils.call(
self.store_refresh_token,
user_id=user_id,
refresh_token=refresh_token,
request=request,
)
return refresh_token
|
async def generate_refresh_token(self, request, user)
|
Generate a refresh token for a given user.
| 3.368185 | 3.193872 | 1.054577 |
if isinstance(df, (pd.DataFrame, pd.Series)):
return df.iloc[0:shape], df.iloc[shape:]
else:
return df[0:shape], df[shape:]
|
def tsplit(df, shape)
|
Split array into two parts.
| 2.427065 | 2.225804 | 1.090422 |
if all([isinstance(df, (pd.DataFrame, pd.Series)) for df in [x, y]]):
return pd.concat([x, y], axis=axis)
else:
if axis == 0:
return np.concatenate([x, y])
else:
return np.column_stack([x, y])
|
def concat(x, y, axis=0)
|
Concatenate a sequence of pandas or numpy objects into one entity.
| 2.240561 | 2.067863 | 1.083516 |
shape = df.shape
if len(shape) == 1:
return df.reshape(shape[0], 1)
else:
return df
|
def reshape_1d(df)
|
If parameter is 1D row vector then convert it into 2D matrix.
| 2.465646 | 2.037216 | 1.210302 |
if isinstance(df, (pd.DataFrame, pd.Series)):
return df.iloc[index]
else:
return df[index, :]
|
def idx(df, index)
|
Universal indexing for numpy and pandas objects.
| 2.949727 | 2.749841 | 1.07269 |
pbar = tqdm(total=rounds)
def callback(_, ):
pbar.update(1)
return callback
|
def xgb_progressbar(rounds=1000)
|
Progressbar for xgboost using tqdm library.
Examples
--------
>>> model = xgb.train(params, X_train, 1000, callbacks=[xgb_progress(1000), ])
| 6.633271 | 13.236678 | 0.501128 |
if isinstance(model, (Regressor, Classifier)):
self.models.append(model)
else:
raise ValueError('Unrecognized estimator.')
|
def add(self, model)
|
Adds a single model.
Parameters
----------
model : `Estimator`
| 5.977561 | 5.627124 | 1.062276 |
result_train = []
result_test = []
y = None
for model in self.models:
result = model.stack(k=k, stratify=stratify, shuffle=shuffle, seed=seed, full_test=full_test)
train_df = pd.DataFrame(result.X_train, columns=generate_columns(result.X_train, model.name))
test_df = pd.DataFrame(result.X_test, columns=generate_columns(result.X_test, model.name))
result_train.append(train_df)
result_test.append(test_df)
if y is None:
y = result.y_train
result_train = pd.concat(result_train, axis=1)
result_test = pd.concat(result_test, axis=1)
if add_diff:
result_train = feature_combiner(result_train)
result_test = feature_combiner(result_test)
ds = Dataset(X_train=result_train, y_train=y, X_test=result_test)
return ds
|
def stack(self, k=5, stratify=False, shuffle=True, seed=100, full_test=True, add_diff=False)
|
Stacks sequence of models.
Parameters
----------
k : int, default 5
Number of folds.
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
full_test : bool, default True
If True then evaluate test dataset on the full data otherwise take the mean of every fold.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> stack_ds = pipeline.stack(k=10, seed=111)
| 1.934144 | 2.078803 | 0.930412 |
result_train = []
result_test = []
y = None
for model in self.models:
result = model.blend(proportion=proportion, stratify=stratify, seed=seed, indices=indices)
train_df = pd.DataFrame(result.X_train, columns=generate_columns(result.X_train, model.name))
test_df = pd.DataFrame(result.X_test, columns=generate_columns(result.X_test, model.name))
result_train.append(train_df)
result_test.append(test_df)
if y is None:
y = result.y_train
result_train = pd.concat(result_train, axis=1, ignore_index=True)
result_test = pd.concat(result_test, axis=1, ignore_index=True)
if add_diff:
result_train = feature_combiner(result_train)
result_test = feature_combiner(result_test)
return Dataset(X_train=result_train, y_train=y, X_test=result_test)
|
def blend(self, proportion=0.2, stratify=False, seed=100, indices=None, add_diff=False)
|
Blends sequence of models.
Parameters
----------
proportion : float, default 0.2
stratify : bool, default False
seed : int, default False
indices : list(np.ndarray,np.ndarray), default None
Two numpy arrays that contain indices for train/test slicing.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.blend(seed=15)
>>> # Custom indices
>>> train_index = np.array(range(250))
>>> test_index = np.array(range(250,333))
>>> res = model_rf.blend(indicies=(train_index,test_index))
| 1.892546 | 2.078174 | 0.910677 |
p = Optimizer(self.models, test_size=test_size, scorer=scorer)
return p.minimize(method)
|
def find_weights(self, scorer, test_size=0.2, method='SLSQP')
|
Finds optimal weights for weighted average of models.
Parameters
----------
scorer : function
Scikit-learn like metric.
test_size : float, default 0.2
method : str
Type of solver. Should be one of:
- 'Nelder-Mead'
- 'Powell'
- 'CG'
- 'BFGS'
- 'Newton-CG'
- 'L-BFGS-B'
- 'TNC'
- 'COBYLA'
- 'SLSQP'
- 'dogleg'
- 'trust-ncg'
Returns
-------
list
| 5.245756 | 9.535483 | 0.55013 |
return self.apply(lambda x: np.average(x, axis=0, weights=weights))
|
def weight(self, weights)
|
Applies weighted mean to models.
Parameters
----------
weights : list
Returns
-------
np.ndarray
Examples
----------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.weight([0.8,0.2])
| 4.977151 | 6.738085 | 0.73866 |
features = [f for f in features if f in train.columns]
for column in features:
if full:
categories = pd.concat([train[column], test[column]]).dropna().unique()
else:
categories = train[column].dropna().unique()
train[column] = train[column].astype('category', categories=categories)
test[column] = test[column].astype('category', categories=categories)
train = pd.get_dummies(train, columns=features, dummy_na=dummy_na, sparse=sparse)
test = pd.get_dummies(test, columns=features, dummy_na=dummy_na, sparse=sparse)
# d_cols = train.columns[(train == 0).all()]
# train.drop(d_cols, 1, inplace=True)
# test.drop(d_cols, 1, inplace=True)
return train, test
|
def onehot_features(train, test, features, full=False, sparse=False, dummy_na=True)
|
Encode categorical features using a one-hot scheme.
Parameters
----------
train : pd.DataFrame
test : pd.DataFrame
features : list
Column names in the DataFrame to be encoded.
full : bool, default False
Whether use all columns from train/test or only from train.
sparse : bool, default False
Whether the dummy columns should be sparse or not.
dummy_na : bool, default True
Add a column to indicate NaNs, if False NaNs are ignored.
Returns
-------
train : pd.DataFrame
test : pd.DataFrame
| 1.807366 | 1.908769 | 0.946875 |
for column in features:
if full:
vs = pd.concat([train[column], test[column]])
labels, indexer = pd.factorize(vs, sort=sort)
else:
labels, indexer = pd.factorize(train[column], sort=sort)
train[column] = indexer.get_indexer(train[column])
test[column] = indexer.get_indexer(test[column])
if na_value != -1:
train[column] = train[column].replace(-1, na_value)
test[column] = test[column].replace(-1, na_value)
return train, test
|
def factorize(train, test, features, na_value=-9999, full=False, sort=True)
|
Factorize categorical features.
Parameters
----------
train : pd.DataFrame
test : pd.DataFrame
features : list
Column names in the DataFrame to be encoded.
na_value : int, default -9999
full : bool, default False
Whether use all columns from train/test or only from train.
sort : bool, default True
Sort by values.
Returns
-------
train : pd.DataFrame
test : pd.DataFrame
| 1.928013 | 2.054681 | 0.938352 |
def group_woe(group):
event = float(group.sum())
non_event = group.shape[0] - event
rel_event = event / event_total
rel_non_event = non_event / non_event_total
return np.log(rel_non_event / rel_event) * 100
if df[target_name].nunique() > 2:
raise ValueError('Target column should be binary (1/0).')
event_total = float(df[df[target_name] == 1.0].shape[0])
non_event_total = float(df.shape[0] - event_total)
woe_vals = df.groupby(feature_name)[target_name].transform(group_woe)
return woe_vals
|
def woe(df, feature_name, target_name)
|
Calculate weight of evidence.
Parameters
----------
df: Dataframe
feature_name: str
Column name to encode.
target_name: str
Target column name.
Returns
-------
Series
| 2.559036 | 2.652428 | 0.96479 |
def group_mean(group):
group_size = float(group.shape[0])
if C is None:
return (group.mean() * group_size + global_mean) / group_size
else:
return (group.mean() * group_size + global_mean * C) / (group_size + C)
global_mean = df[target_name].mean()
return df.groupby(feature_name)[target_name].transform(group_mean)
|
def mean_target(df, feature_name, target_name, C=None)
|
Mean target.
Original idea: Stanislav Semenov
Parameters
----------
C : float, default None
Regularization coefficient. The higher, the more conservative result.
The optimal value lies between 10 and 50 depending on the data.
feature_name : str
target_name : str
df: DataFrame
Returns
-------
Series
| 2.602562 | 2.595631 | 1.00267 |
import xgboost as xgb
f_train = model.predict(xgb.DMatrix(X_train), pred_leaf=True)
f_test = model.predict(xgb.DMatrix(X_test), pred_leaf=True)
enc = OneHotEncoder()
enc.fit(f_train)
return enc.transform(f_train), enc.transform(f_test)
|
def xgb_to_features(model, X_train, X_test)
|
Converts xgboost model into categorical features.
Reference:
"Practical Lessons from Predicting Clicks on Ads at Facebook"
https://research.fb.com/publications/practical-lessons-from-predicting-clicks-on-ads-at-facebook/
| 2.067023 | 1.997741 | 1.03468 |
if not self.loaded:
self.load()
if stratify:
stratify = self.y_train
else:
stratify = None
if indices is None:
X_train, X_test, y_train, y_test = train_test_split(self.X_train, self._y_train,
test_size=test_size,
random_state=seed,
stratify=stratify, )
else:
X_train, y_train = idx(self.X_train, indices[0]), self.y_train[indices[0]]
X_test, y_test = idx(self.X_train, indices[1]), self.y_train[indices[1]]
if inplace:
self._X_train, self._X_test, self._y_train, self._y_test = X_train, X_test, y_train, y_test
return X_train, y_train, X_test, y_test
|
def split(self, test_size=0.1, stratify=False, inplace=False, seed=33, indices=None)
|
Splits train set into two parts (train/test).
Parameters
----------
test_size : float, default 0.1
stratify : bool, default False
inplace : bool, default False
If `True` then dataset's train/test sets will be replaced with new data.
seed : int, default 33
indices : list(np.ndarray, np.ndarray), default None
Two numpy arrays that contain indices for train/test slicing.
Returns
-------
X_train : np.ndarray
y_train : np.ndarray
X_test : np.ndarray
y_test : np.ndarray
Examples
--------
>>> train_index = np.array(range(250))
>>> test_index = np.array(range(250,333))
>>> res = dataset.split(indices=(train_index,test_index))
>>> res = dataset.split(test_size=0.3,seed=1111)
| 1.678186 | 1.818038 | 0.923075 |
if stratify:
kf = StratifiedKFold(n_splits=k, random_state=seed, shuffle=shuffle)
else:
kf = KFold(n_splits=k, random_state=seed, shuffle=shuffle)
for train_index, test_index in kf.split(self.X_train, self.y_train):
X_train, y_train = idx(self.X_train, train_index), self.y_train[train_index]
X_test, y_test = idx(self.X_train, test_index), self.y_train[test_index]
yield X_train, y_train, X_test, y_test, train_index, test_index
|
def kfold(self, k=5, stratify=False, shuffle=True, seed=33)
|
K-Folds cross validation iterator.
Parameters
----------
k : int, default 5
stratify : bool, default False
shuffle : bool, default True
seed : int, default 33
Yields
-------
X_train, y_train, X_test, y_test, train_index, test_index
| 1.446228 | 1.407865 | 1.027249 |
if self._hash is None:
m = hashlib.new('md5')
if self._preprocessor is None:
# generate hash from numpy array
m.update(numpy_buffer(self._X_train))
m.update(numpy_buffer(self._y_train))
if self._X_test is not None:
m.update(numpy_buffer(self._X_test))
if self._y_test is not None:
m.update(numpy_buffer(self._y_test))
elif callable(self._preprocessor):
# generate hash from user defined object (source code)
m.update(inspect.getsource(self._preprocessor).encode('utf-8'))
self._hash = m.hexdigest()
return self._hash
|
def hash(self)
|
Return md5 hash for current dataset.
| 2.631541 | 2.405409 | 1.09401 |
if not isinstance(ds, Dataset):
raise ValueError('Expected `Dataset`, got %s.' % ds)
X_train = concat(ds.X_train, self.X_train, axis=axis)
y_train = concat(ds.y_train, self.y_train, axis=axis)
if ds.X_test is not None:
X_test = concat(ds.X_test, self.X_test, axis=axis)
else:
X_test = None
if ds.y_test is not None:
y_test = concat(ds.y_test, self.y_test, axis=axis)
else:
y_test = None
if inplace:
self._X_train = X_train
self._y_train = y_train
if X_test is not None:
self._X_test = X_test
if y_test is not None:
self._y_test = y_test
return None
return Dataset(X_train, y_train, X_test, y_test)
|
def merge(self, ds, inplace=False, axis=1)
|
Merge two datasets.
Parameters
----------
axis : {0,1}
ds : `Dataset`
inplace : bool, default False
Returns
-------
`Dataset`
| 1.592796 | 1.624605 | 0.980421 |
self._X_train = csc_matrix(self._X_train)
self._X_test = csc_matrix(self._X_test)
|
def to_csc(self)
|
Convert Dataset to scipy's Compressed Sparse Column matrix.
| 2.801785 | 2.220721 | 1.261656 |
self._X_train = csr_matrix(self._X_train)
self._X_test = csr_matrix(self._X_test)
|
def to_csr(self)
|
Convert Dataset to scipy's Compressed Sparse Row matrix.
| 2.633983 | 2.184759 | 1.205617 |
if hasattr(self._X_train, 'todense'):
self._X_train = self._X_train.todense()
self._X_test = self._X_test.todense()
|
def to_dense(self)
|
Convert sparse Dataset to dense matrix.
| 2.581041 | 2.098183 | 1.230131 |
m = hashlib.new('md5')
m.update(self.hash.encode('utf-8'))
for key in sorted(params.keys()):
h_string = ('%s-%s' % (key, params[key])).encode('utf-8')
m.update(h_string)
return m.hexdigest()
|
def _dhash(self, params)
|
Generate hash of the dictionary object.
| 2.691644 | 2.570881 | 1.046973 |
if self.use_cache:
pdict = {'k': k, 'stratify': stratify, 'shuffle': shuffle, 'seed': seed, 'test_size': test_size}
if indices is not None:
pdict['train_index'] = np_hash(indices[0])
pdict['test_index'] = np_hash(indices[1])
dhash = self._dhash(pdict)
c = Cache(dhash, prefix='v')
if c.available:
logger.info('Loading %s\'s validation results from cache.' % self._name)
elif (self.dataset.X_train is None) and (self.dataset.y_train is None):
self.dataset.load()
scores = []
y_true = []
y_pred = []
if k == 1:
X_train, y_train, X_test, y_test = self.dataset.split(test_size=test_size, stratify=stratify,
seed=seed, indices=indices)
if self.use_cache and c.available:
prediction = c.retrieve('0')
else:
prediction = self._predict(X_train, y_train, X_test, y_test)
if self.use_cache:
c.store('0', prediction)
if scorer is not None:
scores.append(scorer(y_test, prediction))
y_true.append(y_test)
y_pred.append(prediction)
else:
for i, fold in enumerate(self.dataset.kfold(k, stratify=stratify, seed=seed, shuffle=shuffle)):
X_train, y_train, X_test, y_test, train_index, test_index = fold
if self.use_cache and c.available:
prediction = c.retrieve(str(i))
else:
prediction = None
if prediction is None:
logger.info('Calculating %s\'s fold #%s' % (self._name, i + 1))
prediction = self._predict(X_train, y_train, X_test, y_test)
if self.use_cache:
c.store(str(i), prediction)
if scorer is not None:
scores.append(scorer(y_test, prediction))
y_true.append(y_test)
y_pred.append(prediction)
if scorer is not None:
report_score(scores, scorer)
return y_true, y_pred
|
def validate(self, scorer=None, k=1, test_size=0.1, stratify=False, shuffle=True, seed=100, indices=None)
|
Evaluate score by cross-validation.
Parameters
----------
scorer : function(y_true,y_pred), default None
Scikit-learn like metric that returns a score.
k : int, default 1
The number of folds for validation.
If k=1 then randomly split X_train into two parts otherwise use K-fold approach.
test_size : float, default 0.1
Size of the test holdout if k=1.
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
indices : list(np.array,np.array), default None
Two numpy arrays that contain indices for train/test slicing. (train_index,test_index)
Returns
-------
y_true: list
Actual labels.
y_pred: list
Predicted labels.
Examples
--------
>>> # Custom indices
>>> train_index = np.array(range(250))
>>> test_index = np.array(range(250,333))
>>> res = model_rf.validate(mean_absolute_error,indices=(train_index,test_index))
| 2.101317 | 2.155689 | 0.974777 |
train = None
test = []
if self.use_cache:
pdict = {'k': k, 'stratify': stratify, 'shuffle': shuffle, 'seed': seed, 'full_test': full_test}
dhash = self._dhash(pdict)
c = Cache(dhash, prefix='s')
if c.available:
logger.info('Loading %s\'s stack results from cache.' % self._name)
train = c.retrieve('train')
test = c.retrieve('test')
y_train = c.retrieve('y_train')
return Dataset(X_train=train, y_train=y_train, X_test=test)
elif not self.dataset.loaded:
self.dataset.load()
for i, fold in enumerate(self.dataset.kfold(k, stratify=stratify, seed=seed, shuffle=shuffle)):
X_train, y_train, X_test, y_test, train_index, test_index = fold
logger.info('Calculating %s\'s fold #%s' % (self._name, i + 1))
if full_test:
prediction = reshape_1d(self._predict(X_train, y_train, X_test, y_test))
else:
xt_shape = X_test.shape[0]
x_t = concat(X_test, self.dataset.X_test)
prediction_concat = reshape_1d(self._predict(X_train, y_train, x_t))
prediction, prediction_test = tsplit(prediction_concat, xt_shape)
test.append(prediction_test)
if train is None:
train = np.zeros((self.dataset.X_train.shape[0], prediction.shape[1]))
train[test_index] = prediction
if full_test:
logger.info('Calculating %s\'s test data' % self._name)
test = self._predict(self.dataset.X_train, self.dataset.y_train, self.dataset.X_test)
else:
test = np.mean(test, axis=0)
test = reshape_1d(test)
if self.use_cache:
c.store('train', train)
c.store('test', test)
c.store('y_train', self.dataset.y_train)
return Dataset(X_train=train, y_train=self.dataset.y_train, X_test=test)
|
def stack(self, k=5, stratify=False, shuffle=True, seed=100, full_test=True)
|
Stack a single model. You should rarely be using this method. Use `ModelsPipeline.stack` instead.
Parameters
----------
k : int, default 5
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
full_test : bool, default True
If `True` then evaluate test dataset on the full data otherwise take the mean of every fold.
Returns
-------
`Dataset` with out of fold predictions.
| 2.444574 | 2.447735 | 0.998709 |
if self.use_cache:
pdict = {'proportion': proportion, 'stratify': stratify, 'seed': seed, 'indices': indices}
if indices is not None:
pdict['train_index'] = np_hash(indices[0])
pdict['test_index'] = np_hash(indices[1])
dhash = self._dhash(pdict)
c = Cache(dhash, prefix='b')
if c.available:
logger.info('Loading %s\'s blend results from cache.' % self._name)
train = c.retrieve('train')
test = c.retrieve('test')
y_train = c.retrieve('y_train')
return Dataset(X_train=train, y_train=y_train, X_test=test)
elif not self.dataset.loaded:
self.dataset.load()
X_train, y_train, X_test, y_test = self.dataset.split(test_size=proportion, stratify=stratify,
seed=seed, indices=indices)
xt_shape = X_test.shape[0]
x_t = concat(X_test, self.dataset.X_test)
prediction_concat = reshape_1d(self._predict(X_train, y_train, x_t))
new_train, new_test = tsplit(prediction_concat, xt_shape)
if self.use_cache:
c.store('train', new_train)
c.store('test', new_test)
c.store('y_train', y_test)
return Dataset(new_train, y_test, new_test)
|
def blend(self, proportion=0.2, stratify=False, seed=100, indices=None)
|
Blend a single model.
You should rarely be using this method. Use `ModelsPipeline.blend` instead.
Parameters
----------
proportion : float, default 0.2
Test size holdout.
stratify : bool, default False
seed : int, default 100
indices : list(np.ndarray,np.ndarray), default None
Two numpy arrays that contain indices for train/test slicing. (train_index,test_index)
Returns
-------
`Dataset`
| 3.132591 | 3.051775 | 1.026482 |
# Credits to: https://github.com/joblib/joblib/blob/04b001861e1dd03a857b7b419c336de64e05714c/joblib/hashing.py
if isinstance(ndarray, (pd.Series, pd.DataFrame)):
ndarray = ndarray.values
if ndarray.flags.c_contiguous:
obj_c_contiguous = ndarray
elif ndarray.flags.f_contiguous:
obj_c_contiguous = ndarray.T
else:
obj_c_contiguous = ndarray.flatten()
obj_c_contiguous = obj_c_contiguous.view(np.uint8)
if hasattr(np, 'getbuffer'):
return np.getbuffer(obj_c_contiguous)
else:
return memoryview(obj_c_contiguous)
|
def numpy_buffer(ndarray)
|
Creates a buffer from c_contiguous numpy ndarray.
| 3.549568 | 3.448649 | 1.029263 |
if not os.path.exists(self._hash_dir):
os.makedirs(self._hash_dir)
if isinstance(data, pd.DataFrame):
columns = data.columns.tolist()
np.save(os.path.join(self._hash_dir, key), data.values)
json.dump(columns, open(os.path.join(self._hash_dir, '%s.json' % key), 'w'))
else:
np.save(os.path.join(self._hash_dir, key), data)
|
def store(self, key, data)
|
Takes an array and stores it in the cache.
| 1.961933 | 1.917922 | 1.022947 |
column_file = os.path.join(self._hash_dir, '%s.json' % key)
cache_file = os.path.join(self._hash_dir, '%s.npy' % key)
if os.path.exists(cache_file):
data = np.load(cache_file)
if os.path.exists(column_file):
with open(column_file, 'r') as json_file:
columns = json.load(json_file)
data = pd.DataFrame(data, columns=columns)
else:
return None
return data
|
def retrieve(self, key)
|
Retrieves a cached array if possible.
| 2.351834 | 2.20527 | 1.06646 |
if hasattr(sys, 'pypy_version_info'):
return (sys.version_info[0], sys.pypy_version_info.major,
sys.pypy_version_info.minor)
else:
return (sys.version_info[0], sys.version_info[1])
|
def _pep425_version()
|
:return:
A tuple of integers representing the Python version number
| 2.046251 | 1.87512 | 1.091264 |
try:
import _manylinux
return bool(_manylinux.manylinux1_compatible)
except (ImportError, AttributeError):
pass
# Check for glibc 2.5
try:
proc = ctypes.CDLL(None)
gnu_get_libc_version = proc.gnu_get_libc_version
gnu_get_libc_version.restype = ctypes.c_char_p
ver = gnu_get_libc_version()
if not isinstance(ver, str_cls):
ver = ver.decode('ascii')
match = re.match(r'(\d+)\.(\d+)', ver)
return match and match.group(1) == '2' and int(match.group(2)) >= 5
except (AttributeError):
return False
|
def _pep425_supports_manylinux()
|
:return:
A boolean indicating if the machine can use manylinux1 packages
| 2.689173 | 2.577302 | 1.043406 |
try:
soabi = sysconfig.get_config_var('SOABI')
if soabi:
if soabi.startswith('cpython-'):
return 'cp%s' % soabi.split('-')[1]
return soabi.replace('.', '_').replace('-', '_')
except (IOError, NameError):
pass
impl = _pep425_implementation()
suffix = ''
if impl == 'cp':
suffix += 'm'
if sys.maxunicode == 0x10ffff and sys.version_info < (3, 3):
suffix += 'u'
return '%s%s%s' % (impl, ''.join(map(str_cls, _pep425_version())), suffix)
|
def _pep425_get_abi()
|
:return:
A unicode string of the system abi. Will be something like: "cp27m",
"cp33m", etc.
| 3.023723 | 2.931614 | 1.031419 |
tags = []
versions = []
version_info = _pep425_version()
major = version_info[:-1]
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = _pep425_implementation()
abis = []
abi = _pep425_get_abi()
if abi:
abis.append(abi)
abi3 = _pep425_implementation() == 'cp' and sys.version_info >= (3,)
if abi3:
abis.append('abi3')
abis.append('none')
if sys.platform == 'darwin':
plat_ver = platform.mac_ver()
ver_parts = plat_ver[0].split('.')
minor = int(ver_parts[1])
arch = plat_ver[2]
if sys.maxsize == 2147483647:
arch = 'i386'
arches = []
while minor > 5:
arches.append('macosx_10_%s_%s' % (minor, arch))
arches.append('macosx_10_%s_intel' % (minor,))
arches.append('macosx_10_%s_universal' % (minor,))
minor -= 1
else:
if sys.platform == 'win32':
if 'amd64' in sys.version.lower():
arches = ['win_amd64']
arches = [sys.platform]
elif hasattr(os, 'uname'):
(plat, _, _, _, machine) = os.uname()
plat = plat.lower().replace('/', '')
machine.replace(' ', '_').replace('/', '_')
if plat == 'linux' and sys.maxsize == 2147483647:
machine = 'i686'
arch = '%s_%s' % (plat, machine)
if _pep425_supports_manylinux():
arches = [arch.replace('linux', 'manylinux1'), arch]
else:
arches = [arch]
for abi in abis:
for arch in arches:
tags.append(('%s%s' % (impl, versions[0]), abi, arch))
if abi3:
for version in versions[1:]:
for arch in arches:
tags.append(('%s%s' % (impl, version), 'abi3', arch))
for arch in arches:
tags.append(('py%s' % (versions[0][0]), 'none', arch))
tags.append(('%s%s' % (impl, versions[0]), 'none', 'any'))
tags.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
for i, version in enumerate(versions):
tags.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
tags.append(('py%s' % (version[0]), 'none', 'any'))
tags.append(('py2.py3', 'none', 'any'))
return tags
|
def _pep425tags()
|
:return:
A list of 3-element tuples with unicode strings or None:
[0] implementation tag - cp33, pp27, cp26, py2, py2.py3
[1] abi tag - cp26m, None
[2] arch tag - linux_x86_64, macosx_10_10_x85_64, etc
| 2.220657 | 2.195899 | 1.011275 |
x_bytes = int(math.ceil(math.log(x, 2) / 8.0))
y_bytes = int(math.ceil(math.log(y, 2) / 8.0))
num_bytes = max(x_bytes, y_bytes)
byte_string = b'\x04'
byte_string += int_to_bytes(x, width=num_bytes)
byte_string += int_to_bytes(y, width=num_bytes)
return cls(byte_string)
|
def from_coords(cls, x, y)
|
Creates an ECPoint object from the X and Y integer coordinates of the
point
:param x:
The X coordinate, as an integer
:param y:
The Y coordinate, as an integer
:return:
An ECPoint object
| 2.197099 | 2.062053 | 1.065491 |
data = self.native
first_byte = data[0:1]
# Uncompressed
if first_byte == b'\x04':
remaining = data[1:]
field_len = len(remaining) // 2
x = int_from_bytes(remaining[0:field_len])
y = int_from_bytes(remaining[field_len:])
return (x, y)
if first_byte not in set([b'\x02', b'\x03']):
raise ValueError(unwrap(
'''
Invalid EC public key - first byte is incorrect
'''
))
raise ValueError(unwrap(
'''
Compressed representations of EC public keys are not supported due
to patent US6252960
'''
))
|
def to_coords(self)
|
Returns the X and Y coordinates for this EC point, as native Python
integers
:return:
A 2-element tuple containing integers (X, Y)
| 5.177841 | 4.457948 | 1.161485 |
if not isinstance(private_key, byte_cls) and not isinstance(private_key, Asn1Value):
raise TypeError(unwrap(
'''
private_key must be a byte string or Asn1Value, not %s
''',
type_name(private_key)
))
if algorithm == 'rsa':
if not isinstance(private_key, RSAPrivateKey):
private_key = RSAPrivateKey.load(private_key)
params = Null()
elif algorithm == 'dsa':
if not isinstance(private_key, DSAPrivateKey):
private_key = DSAPrivateKey.load(private_key)
params = DSAParams()
params['p'] = private_key['p']
params['q'] = private_key['q']
params['g'] = private_key['g']
public_key = private_key['public_key']
private_key = private_key['private_key']
elif algorithm == 'ec':
if not isinstance(private_key, ECPrivateKey):
private_key = ECPrivateKey.load(private_key)
else:
private_key = private_key.copy()
params = private_key['parameters']
del private_key['parameters']
else:
raise ValueError(unwrap(
'''
algorithm must be one of "rsa", "dsa", "ec", not %s
''',
repr(algorithm)
))
private_key_algo = PrivateKeyAlgorithm()
private_key_algo['algorithm'] = PrivateKeyAlgorithmId(algorithm)
private_key_algo['parameters'] = params
container = cls()
container._algorithm = algorithm
container['version'] = Integer(0)
container['private_key_algorithm'] = private_key_algo
container['private_key'] = private_key
# Here we save the DSA public key if possible since it is not contained
# within the PKCS#8 structure for a DSA key
if algorithm == 'dsa':
container._public_key = public_key
return container
|
def wrap(cls, private_key, algorithm)
|
Wraps a private key in a PrivateKeyInfo structure
:param private_key:
A byte string or Asn1Value object of the private key
:param algorithm:
A unicode string of "rsa", "dsa" or "ec"
:return:
A PrivateKeyInfo object
| 2.180042 | 2.036631 | 1.070416 |
if self.algorithm == 'dsa':
params = self['private_key_algorithm']['parameters']
return Integer(pow(
params['g'].native,
self['private_key'].parsed.native,
params['p'].native
))
if self.algorithm == 'rsa':
key = self['private_key'].parsed
return RSAPublicKey({
'modulus': key['modulus'],
'public_exponent': key['public_exponent'],
})
if self.algorithm == 'ec':
curve_type, details = self.curve
if curve_type == 'implicit_ca':
raise ValueError(unwrap(
'''
Unable to compute public key for EC key using Implicit CA
parameters
'''
))
if curve_type == 'specified':
if details['field_id']['field_type'] == 'characteristic_two_field':
raise ValueError(unwrap(
'''
Unable to compute public key for EC key over a
characteristic two field
'''
))
curve = PrimeCurve(
details['field_id']['parameters'],
int_from_bytes(details['curve']['a']),
int_from_bytes(details['curve']['b'])
)
base_x, base_y = self['private_key_algorithm']['parameters'].chosen['base'].to_coords()
base_point = PrimePoint(curve, base_x, base_y)
elif curve_type == 'named':
if details not in ('secp192r1', 'secp224r1', 'secp256r1', 'secp384r1', 'secp521r1'):
raise ValueError(unwrap(
'''
Unable to compute public key for EC named curve %s,
parameters not currently included
''',
details
))
base_point = {
'secp192r1': SECP192R1_BASE_POINT,
'secp224r1': SECP224R1_BASE_POINT,
'secp256r1': SECP256R1_BASE_POINT,
'secp384r1': SECP384R1_BASE_POINT,
'secp521r1': SECP521R1_BASE_POINT,
}[details]
public_point = base_point * self['private_key'].parsed['private_key'].native
return ECPointBitString.from_coords(public_point.x, public_point.y)
|
def _compute_public_key(self)
|
Computes the public key corresponding to the current private key.
:return:
For RSA keys, an RSAPublicKey object. For DSA keys, an Integer
object. For EC keys, an ECPointBitString.
| 2.738171 | 2.596045 | 1.054747 |
if self.algorithm == 'rsa':
return self['private_key'].parsed
if self.algorithm == 'dsa':
params = self['private_key_algorithm']['parameters']
return DSAPrivateKey({
'version': 0,
'p': params['p'],
'q': params['q'],
'g': params['g'],
'public_key': self.public_key,
'private_key': self['private_key'].parsed,
})
if self.algorithm == 'ec':
output = self['private_key'].parsed
output['parameters'] = self['private_key_algorithm']['parameters']
output['public_key'] = self.public_key
return output
|
def unwrap(self)
|
Unwraps the private key into an RSAPrivateKey, DSAPrivateKey or
ECPrivateKey object
:return:
An RSAPrivateKey, DSAPrivateKey or ECPrivateKey object
| 2.678163 | 2.341287 | 1.143885 |
if self._algorithm is None:
self._algorithm = self['private_key_algorithm']['algorithm'].native
return self._algorithm
|
def algorithm(self)
|
:return:
A unicode string of "rsa", "dsa" or "ec"
| 9.06269 | 5.777138 | 1.568716 |
if self._bit_size is None:
if self.algorithm == 'rsa':
prime = self['private_key'].parsed['modulus'].native
elif self.algorithm == 'dsa':
prime = self['private_key_algorithm']['parameters']['p'].native
elif self.algorithm == 'ec':
prime = self['private_key'].parsed['private_key'].native
self._bit_size = int(math.ceil(math.log(prime, 2)))
modulus = self._bit_size % 8
if modulus != 0:
self._bit_size += 8 - modulus
return self._bit_size
|
def bit_size(self)
|
:return:
The bit size of the private key, as an integer
| 3.089659 | 2.822469 | 1.094665 |
if self._public_key is None:
if self.algorithm == 'ec':
key = self['private_key'].parsed
if key['public_key']:
self._public_key = key['public_key'].untag()
else:
self._public_key = self._compute_public_key()
else:
self._public_key = self._compute_public_key()
return self._public_key
|
def public_key(self)
|
:return:
If an RSA key, an RSAPublicKey object. If a DSA key, an Integer
object. If an EC key, an ECPointBitString object.
| 3.428432 | 3.25128 | 1.054487 |
if self._fingerprint is None:
params = self['private_key_algorithm']['parameters']
key = self['private_key'].parsed
if self.algorithm == 'rsa':
to_hash = '%d:%d' % (
key['modulus'].native,
key['public_exponent'].native,
)
elif self.algorithm == 'dsa':
public_key = self.public_key
to_hash = '%d:%d:%d:%d' % (
params['p'].native,
params['q'].native,
params['g'].native,
public_key.native,
)
elif self.algorithm == 'ec':
public_key = key['public_key'].native
if public_key is None:
public_key = self.public_key.native
if params.name == 'named':
to_hash = '%s:' % params.chosen.native
to_hash = to_hash.encode('utf-8')
to_hash += public_key
elif params.name == 'implicit_ca':
to_hash = public_key
elif params.name == 'specified':
to_hash = '%s:' % params.chosen['field_id']['parameters'].native
to_hash = to_hash.encode('utf-8')
to_hash += b':' + params.chosen['curve']['a'].native
to_hash += b':' + params.chosen['curve']['b'].native
to_hash += public_key
if isinstance(to_hash, str_cls):
to_hash = to_hash.encode('utf-8')
self._fingerprint = hashlib.sha256(to_hash).digest()
return self._fingerprint
|
def fingerprint(self)
|
Creates a fingerprint that can be compared with a public key to see if
the two form a pair.
This fingerprint is not compatible with fingerprints generated by any
other software.
:return:
A byte string that is a sha256 hash of selected components (based
on the key type)
| 2.6072 | 2.510405 | 1.038557 |
if not isinstance(public_key, byte_cls) and not isinstance(public_key, Asn1Value):
raise TypeError(unwrap(
'''
public_key must be a byte string or Asn1Value, not %s
''',
type_name(public_key)
))
if algorithm != 'rsa':
raise ValueError(unwrap(
'''
algorithm must "rsa", not %s
''',
repr(algorithm)
))
algo = PublicKeyAlgorithm()
algo['algorithm'] = PublicKeyAlgorithmId(algorithm)
algo['parameters'] = Null()
container = cls()
container['algorithm'] = algo
if isinstance(public_key, Asn1Value):
public_key = public_key.untag().dump()
container['public_key'] = ParsableOctetBitString(public_key)
return container
|
def wrap(cls, public_key, algorithm)
|
Wraps a public key in a PublicKeyInfo structure
:param public_key:
A byte string or Asn1Value object of the public key
:param algorithm:
A unicode string of "rsa"
:return:
A PublicKeyInfo object
| 3.13083 | 2.634705 | 1.188304 |
if self.algorithm == 'rsa':
return self['public_key'].parsed
key_type = self.algorithm.upper()
a_an = 'an' if key_type == 'EC' else 'a'
raise ValueError(unwrap(
'''
Only RSA public keys may be unwrapped - this key is %s %s public
key
''',
a_an,
key_type
))
|
def unwrap(self)
|
Unwraps an RSA public key into an RSAPublicKey object. Does not support
DSA or EC public keys since they do not have an unwrapped form.
:return:
An RSAPublicKey object
| 7.818736 | 6.677591 | 1.170892 |
if self.algorithm != 'ec':
raise ValueError(unwrap(
'''
Only EC keys have a curve, this key is %s
''',
self.algorithm.upper()
))
params = self['algorithm']['parameters']
chosen = params.chosen
if params.name == 'implicit_ca':
value = None
else:
value = chosen.native
return (params.name, value)
|
def curve(self)
|
Returns information about the curve used for an EC key
:raises:
ValueError - when the key is not an EC key
:return:
A two-element tuple, with the first element being a unicode string
of "implicit_ca", "specified" or "named". If the first element is
"implicit_ca", the second is None. If "specified", the second is
an OrderedDict that is the native version of SpecifiedECDomain. If
"named", the second is a unicode string of the curve name.
| 9.116862 | 5.079272 | 1.794915 |
if self.algorithm != 'dsa':
raise ValueError(unwrap(
'''
Only DSA keys are generated using a hash algorithm, this key is
%s
''',
self.algorithm.upper()
))
parameters = self['algorithm']['parameters']
if parameters.native is None:
return None
byte_len = math.log(parameters['q'].native, 2) / 8
return 'sha1' if byte_len <= 20 else 'sha2'
|
def hash_algo(self)
|
Returns the name of the family of hash algorithms used to generate a
DSA key
:raises:
ValueError - when the key is not a DSA key
:return:
A unicode string of "sha1" or "sha2" or None if no parameters are
present
| 6.209733 | 4.743741 | 1.309037 |
if self._sha1 is None:
self._sha1 = hashlib.sha1(byte_cls(self['public_key'])).digest()
return self._sha1
|
def sha1(self)
|
:return:
The SHA1 hash of the DER-encoded bytes of this public key info
| 5.531743 | 4.834927 | 1.144121 |
if self._sha256 is None:
self._sha256 = hashlib.sha256(byte_cls(self['public_key'])).digest()
return self._sha256
|
def sha256(self)
|
:return:
The SHA-256 hash of the DER-encoded bytes of this public key info
| 4.160009 | 3.748952 | 1.109646 |
setup_file = os.path.join(package_root, 'setup.py')
git_wc_proc = subprocess.Popen(
['git', 'status', '--porcelain', '-uno'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=package_root
)
git_wc_status, _ = git_wc_proc.communicate()
if len(git_wc_status) > 0:
print(git_wc_status.decode('utf-8').rstrip(), file=sys.stderr)
print('Unable to perform release since working copy is not clean', file=sys.stderr)
return False
git_tag_proc = subprocess.Popen(
['git', 'tag', '-l', '--contains', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=package_root
)
tag, tag_error = git_tag_proc.communicate()
if len(tag_error) > 0:
print(tag_error.decode('utf-8').rstrip(), file=sys.stderr)
print('Error looking for current git tag', file=sys.stderr)
return False
if len(tag) == 0:
print('No git tag found on HEAD', file=sys.stderr)
return False
tag = tag.decode('ascii').strip()
setuptools.sandbox.run_setup(
setup_file,
['sdist', 'bdist_wheel', '--universal']
)
twine.cli.dispatch(['upload', 'dist/%s-%s*' % (package_name, tag)])
setuptools.sandbox.run_setup(
setup_file,
['clean']
)
|
def run()
|
Creates a sdist .tar.gz and a bdist_wheel --univeral .whl and uploads
them to pypi
:return:
A bool - if the packaging and upload process was successful
| 2.251318 | 2.188574 | 1.028669 |
xml_report_path = os.path.join(package_root, 'coverage.xml')
if os.path.exists(xml_report_path):
os.unlink(xml_report_path)
cov = coverage.Coverage(include='%s/*.py' % package_name)
cov.start()
from .tests import run as run_tests
result = run_tests()
print()
if ci:
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for other_package in other_packages:
for test_class in _load_package_tests(other_package):
suite.addTest(loader.loadTestsFromTestCase(test_class))
if suite.countTestCases() > 0:
print('Running tests from other modularcrypto packages')
sys.stdout.flush()
runner_result = unittest.TextTestRunner(stream=sys.stdout, verbosity=1).run(suite)
result = runner_result.wasSuccessful() and result
print()
sys.stdout.flush()
cov.stop()
cov.save()
cov.report(show_missing=False)
print()
sys.stdout.flush()
if ci:
cov.xml_report()
if ci and result and os.path.exists(xml_report_path):
_codecov_submit()
print()
return result
|
def run(ci=False)
|
Runs the tests while measuring coverage
:param ci:
If coverage is being run in a CI environment - this triggers trying to
run the tests for the rest of modularcrypto and uploading coverage data
:return:
A bool - if the tests ran successfully
| 2.978148 | 2.745761 | 1.084635 |
proc = subprocess.Popen(
['git'] + params,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd
)
stdout, stderr = proc.communicate()
code = proc.wait()
if code != 0:
e = OSError('git exit code was non-zero')
e.stdout = stdout
raise e
return stdout.decode('utf-8').strip()
|
def _git_command(params, cwd)
|
Executes a git command, returning the output
:param params:
A list of the parameters to pass to git
:param cwd:
The working directory to execute git in
:return:
A 2-element tuple of (stdout, stderr)
| 2.497513 | 2.405366 | 1.038309 |
output = {}
for line in data.splitlines():
line = line.strip()
if not line or '=' not in line:
continue
parts = line.split('=')
if len(parts) != 2:
continue
name = parts[0]
value = parts[1]
if len(value) > 1:
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
output[name] = value
return output
|
def _parse_env_var_file(data)
|
Parses a basic VAR="value data" file contents into a dict
:param data:
A unicode string of the file data
:return:
A dict of parsed name/value data
| 1.970241 | 1.987518 | 0.991307 |
if sys.platform == 'darwin':
version = _plat.mac_ver()[0]
_plat_ver_info = tuple(map(int, version.split('.')))
if _plat_ver_info < (10, 12):
name = 'OS X'
else:
name = 'macOS'
return '%s %s' % (name, version)
elif sys.platform == 'win32':
_win_ver = sys.getwindowsversion()
_plat_ver_info = (_win_ver[0], _win_ver[1])
return 'Windows %s' % _plat.win32_ver()[0]
elif sys.platform in ['linux', 'linux2']:
if os.path.exists('/etc/os-release'):
with open('/etc/os-release', 'r', encoding='utf-8') as f:
pairs = _parse_env_var_file(f.read())
if 'NAME' in pairs and 'VERSION_ID' in pairs:
return '%s %s' % (pairs['NAME'], pairs['VERSION_ID'])
version = pairs['VERSION_ID']
elif 'PRETTY_NAME' in pairs:
return pairs['PRETTY_NAME']
elif 'NAME' in pairs:
return pairs['NAME']
else:
raise ValueError('No suitable version info found in /etc/os-release')
elif os.path.exists('/etc/lsb-release'):
with open('/etc/lsb-release', 'r', encoding='utf-8') as f:
pairs = _parse_env_var_file(f.read())
if 'DISTRIB_DESCRIPTION' in pairs:
return pairs['DISTRIB_DESCRIPTION']
else:
raise ValueError('No suitable version info found in /etc/lsb-release')
else:
return 'Linux'
else:
return '%s %s' % (_plat.system(), _plat.release())
|
def _platform_name()
|
Returns information about the current operating system and version
:return:
A unicode string containing the OS name and version
| 1.876466 | 1.828416 | 1.02628 |
dir_patterns, file_patterns = _gitignore(root)
paths = []
prefix = os.path.abspath(root) + os.sep
for base, dirs, files in os.walk(root):
for d in dirs:
for dir_pattern in dir_patterns:
if fnmatch(d, dir_pattern):
dirs.remove(d)
break
for f in files:
skip = False
for file_pattern in file_patterns:
if fnmatch(f, file_pattern):
skip = True
break
if skip:
continue
full_path = os.path.join(base, f)
if full_path[:len(prefix)] == prefix:
full_path = full_path[len(prefix):]
paths.append(full_path)
return sorted(paths)
|
def _list_files(root)
|
Lists all of the files in a directory, taking into account any .gitignore
file that is present
:param root:
A unicode filesystem path
:return:
A list of unicode strings, containing paths of all files not ignored
by .gitignore with root, using relative paths
| 2.095335 | 2.095963 | 0.999701 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.