code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
self._owner_type = value
if value == 'User':
self._fields['owner'] = entity_fields.OneToOneField(User)
if hasattr(self, 'owner'):
# pylint:disable=no-member
self.owner = User(
self._server_config,
id=self.owner.id if isinstance(self.owner, Entity)
else self.owner
)
elif value == 'Usergroup':
self._fields['owner'] = entity_fields.OneToOneField(UserGroup)
if hasattr(self, 'owner'):
# pylint:disable=no-member
self.owner = UserGroup(
self._server_config,
id=self.owner.id if isinstance(self.owner, Entity)
else self.owner
) | def owner_type(self, value) | Set ``owner_type`` to the given value.
In addition:
* Update the internal type of the ``owner`` field.
* Update the value of the ``owner`` field if a value is already set. | 2.461724 | 2.525848 | 0.974613 |
attrs = super(Host, self).get_values()
if '_owner_type' in attrs and attrs['_owner_type'] is not None:
attrs['owner_type'] = attrs.pop('_owner_type')
else:
attrs.pop('_owner_type')
return attrs | def get_values(self) | Correctly set the ``owner_type`` attribute. | 3.648998 | 2.712677 | 1.345165 |
return Host(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() | def create(self, create_missing=None) | Manually fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1449749
<https://bugzilla.redhat.com/show_bug.cgi?id=1449749>`_. | 16.941069 | 18.14599 | 0.933598 |
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(self.path('errata/applicability'), **kwargs)
return _handle_response(response, self._server_config, synchronous) | def errata_applicability(self, synchronous=True, **kwargs) | Force regenerate errata applicability
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all content decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | 6.776036 | 6.809268 | 0.995119 |
if attrs is None:
attrs = self.read_json()
if ignore is None:
ignore = set()
if 'parameters' in attrs:
attrs['host_parameters_attributes'] = attrs.pop('parameters')
else:
ignore.add('host_parameters_attributes')
if 'content_facet_attributes' not in attrs:
ignore.add('content_facet_attributes')
ignore.add('compute_attributes')
ignore.add('interfaces_attributes')
ignore.add('root_pass')
# Image entity requires compute_resource_id to initialize as it is
# part of its path. The thing is that entity_mixins.read() initializes
# entities by id only.
# Workaround is to add image to ignore, call entity_mixins.read()
# and then add 'manually' initialized image to the result.
# If image_id is None set image to None as it is done by default.
ignore.add('image')
# host id is required for interface initialization
ignore.add('interface')
ignore.add('build_status_label')
result = super(Host, self).read(entity, attrs, ignore, params)
if attrs.get('image_id'):
result.image = Image(
server_config=self._server_config,
id=attrs.get('image_id'),
compute_resource=attrs.get('compute_resource_id'),
)
else:
result.image = None
if 'interfaces' in attrs and attrs['interfaces']:
result.interface = [
Interface(
self._server_config,
host=result.id,
id=interface['id'],
)
for interface in attrs['interfaces']
]
if 'build_status_label' in attrs:
result.build_status_label = attrs['build_status_label']
return result | def read(self, entity=None, attrs=None, ignore=None, params=None) | Deal with oddly named and structured data returned by the server.
For more information, see `Bugzilla #1235019
<https://bugzilla.redhat.com/show_bug.cgi?id=1235019>`_
and `Bugzilla #1449749
<https://bugzilla.redhat.com/show_bug.cgi?id=1449749>`_.
`content_facet_attributes` are returned only in case any of facet
attributes were actually set.
Also add image to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
image. | 4.256613 | 3.906388 | 1.089655 |
if which in (
'enc',
'errata',
'errata/apply',
'errata/applicability',
'facts',
'packages',
'power',
'puppetclass_ids',
'smart_class_parameters',
'smart_variables',
'module_streams',
):
return '{0}/{1}'.format(
super(Host, self).path(which='self'),
which
)
elif which in ('bulk/install_content',):
return '{0}/{1}'.format(
super(Host, self).path(which='base'),
which
)
elif which in ('upload_facts',):
return '{0}/{1}'.format(
super(Host, self).path(which='base'),
'facts'
)
return super(Host, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
bulk/install_content
/api/hosts/:host_id/bulk/install_content
errata
/api/hosts/:host_id/errata
power
/api/hosts/:host_id/power
errata/apply
/api/hosts/:host_id/errata/apply
puppetclass_ids
/api/hosts/:host_id/puppetclass_ids
smart_class_parameters
/api/hosts/:host_id/smart_class_parameters
smart_variables
/api/hosts/:host_id/smart_class_variables
module_streams
/api/hosts/:host_id/module_streams
Otherwise, call ``super``. | 5.166258 | 2.464466 | 2.096299 |
results = self.search_json(fields, query)['results']
results = self.search_normalize(results)
entities = []
for result in results:
image = result.get('image')
if image is not None:
del result['image']
entity = type(self)(self._server_config, **result)
if image:
entity.image = Image(
server_config=self._server_config,
id=image,
compute_resource=AbstractComputeResource(
server_config=self._server_config,
id=result.get('compute_resource')
),
)
entities.append(entity)
if filters is not None:
entities = self.search_filter(entities, filters)
return entities | def search(self, fields=None, query=None, filters=None) | Search for entities.
:param fields: A set naming which fields should be used when generating
a search query. If ``None``, all values on the entity are used. If
an empty set, no values are used.
:param query: A dict containing a raw search query. This is melded in
to the generated search query like so: ``{generated:
query}.update({manual: query})``.
:param filters: A dict. Used to filter search results locally.
:return: A list of entities, all of type ``type(self)``. | 3.34545 | 3.213245 | 1.041144 |
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None:
entity = type(self)(
self._server_config,
compute_resource=self.compute_resource, # pylint:disable=E1101
)
if ignore is None:
ignore = set()
ignore.add('compute_resource')
return super(Image, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`Image` requires that an
``compute_resource`` be provided, so this technique will not work. Do
this instead::
entity = type(self)(compute_resource=self.compute_resource.id) | 5.982015 | 6.1198 | 0.977485 |
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None:
entity = type(self)(
self._server_config,
host=self.host, # pylint:disable=no-member
)
if attrs is None:
attrs = self.read_json()
if ignore is None:
ignore = set()
ignore.add('host')
# type-specific fields
if attrs['type'] != 'bmc':
ignore.add('password')
ignore.add('provider')
ignore.add('username')
if attrs['type'] != 'bond':
ignore.add('mode')
ignore.add('bond_options')
if attrs['type'] != 'virtual':
ignore.add('attached_to')
ignore.add('tag')
if attrs['type'] != 'bridge' and attrs['type'] != 'bond':
ignore.add('attached_devices')
return super(Interface, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`Interface` requires that a ``host`` must be provided,
so this technique will not work. Do this instead::
entity = type(self)(host=self.host)
In addition, some of interface fields are specific to its ``type`` and
are never returned for different ``type`` so ignoring all the redundant
fields. | 4.58369 | 4.608776 | 0.994557 |
for interface in results:
interface[u'host_id'] = self.host.id # pylint:disable=no-member
return super(Interface, self).search_normalize(results) | def search_normalize(self, results) | Append host id to search results to be able to initialize found
:class:`Interface` successfully | 5.839902 | 3.999965 | 1.459988 |
payload = super(LifecycleEnvironment, self).create_payload()
if (_get_version(self._server_config) < Version('6.1') and
'prior_id' in payload):
payload['prior'] = payload.pop('prior_id')
return payload | def create_payload(self) | Rename the payload key "prior_id" to "prior".
For more information, see `Bugzilla #1238757
<https://bugzilla.redhat.com/show_bug.cgi?id=1238757>`_. | 7.919258 | 5.115788 | 1.548003 |
# We call `super` first b/c it populates `self.organization`, and we
# need that field to perform a search a little later.
super(LifecycleEnvironment, self).create_missing()
if (self.name != 'Library' and # pylint:disable=no-member
not hasattr(self, 'prior')):
results = self.search({'organization'}, {u'name': u'Library'})
if len(results) != 1:
raise APIResponseError(
u'Could not find the "Library" lifecycle environment for '
u'organization {0}. Search results: {1}'
.format(self.organization, results) # pylint:disable=E1101
)
self.prior = results[0] | def create_missing(self) | Automatically populate additional instance attributes.
When a new lifecycle environment is created, it must either:
* Reference a parent lifecycle environment in the tree of lifecycle
environments via the ``prior`` field, or
* have a name of "Library".
Within a given organization, there can only be a single lifecycle
environment with a name of 'Library'. This lifecycle environment is at
the root of a tree of lifecycle environments, so its ``prior`` field is
blank.
This method finds the 'Library' lifecycle environment within the
current organization and points to it via the ``prior`` field. This is
not done if the current lifecycle environment has a name of 'Library'. | 6.380426 | 4.688013 | 1.361009 |
attrs = self.create_json(create_missing)
return Location(self._server_config, id=attrs['id']).read() | def create(self, create_missing=None) | Manually fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1216236
<https://bugzilla.redhat.com/show_bug.cgi?id=1216236>`_. | 15.172518 | 15.538402 | 0.976453 |
payload = super(Media, self).create_payload()
if 'path_' in payload:
payload['path'] = payload.pop('path_')
return {u'medium': payload} | def create_payload(self) | Wrap submitted data within an extra dict and rename ``path_``.
For more information on wrapping submitted data, see `Bugzilla #1151220
<https://bugzilla.redhat.com/show_bug.cgi?id=1151220>`_. | 6.457785 | 4.961896 | 1.301475 |
return Media(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() | def create(self, create_missing=None) | Manually fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1219653
<https://bugzilla.redhat.com/show_bug.cgi?id=1219653>`_. | 16.124317 | 19.890377 | 0.810659 |
payload = super(Media, self).update_payload(fields)
if 'path_' in payload:
payload['path'] = payload.pop('path_')
return {u'medium': payload} | def update_payload(self, fields=None) | Wrap submitted data within an extra dict. | 5.659167 | 5.025246 | 1.126147 |
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None:
entity = type(self)(
self._server_config,
operatingsystem=self.operatingsystem, # pylint:disable=E1101
)
if ignore is None:
ignore = set()
ignore.add('operatingsystem')
return super(OperatingSystemParameter, self).read(
entity,
attrs,
ignore,
params
) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`OperatingSystemParameter` requires that an
``operatingsystem`` be provided, so this technique will not work. Do
this instead::
entity = type(self)(operatingsystem=self.operatingsystem.id) | 6.50243 | 6.156016 | 1.056272 |
if which in (
'download_debug_certificate',
'subscriptions',
'subscriptions/delete_manifest',
'subscriptions/manifest_history',
'subscriptions/refresh_manifest',
'subscriptions/upload',
'sync_plans',
):
return '{0}/{1}'.format(
super(Organization, self).path(which='self'),
which
)
return super(Organization, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
download_debug_certificate
/organizations/<id>/download_debug_certificate
subscriptions
/organizations/<id>/subscriptions
subscriptions/upload
/organizations/<id>/subscriptions/upload
subscriptions/delete_manifest
/organizations/<id>/subscriptions/delete_manifest
subscriptions/refresh_manifest
/organizations/<id>/subscriptions/refresh_manifest
sync_plans
/organizations/<id>/sync_plans
Otherwise, call ``super``. | 6.365432 | 2.753125 | 2.312075 |
return Organization(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() | def create(self, create_missing=None) | Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1230873
<https://bugzilla.redhat.com/show_bug.cgi?id=1230873>`_. | 15.874147 | 18.254915 | 0.869582 |
org_payload = super(Organization, self).update_payload(fields)
payload = {u'organization': org_payload}
if 'redhat_repository_url' in org_payload:
rh_repo_url = org_payload.pop('redhat_repository_url')
payload['redhat_repository_url'] = rh_repo_url
return payload | def update_payload(self, fields=None) | Wrap submitted data within an extra dict. | 3.243574 | 3.119601 | 1.03974 |
payload = super(OSDefaultTemplate, self).update_payload(fields)
return {'os_default_template': payload} | def update_payload(self, fields=None) | Wrap payload in ``os_default_template``
relates to `Redmine #21169`_.
.. _Redmine #21169: http://projects.theforeman.org/issues/21169 | 7.025033 | 4.683788 | 1.499861 |
payload = super(OverrideValue, self).create_payload()
if hasattr(self, 'smart_class_parameter'):
del payload['smart_class_parameter_id']
if hasattr(self, 'smart_variable'):
del payload['smart_variable_id']
return payload | def create_payload(self) | Remove ``smart_class_parameter_id`` or ``smart_variable_id`` | 4.716619 | 2.378838 | 1.98274 |
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None:
if hasattr(self, 'smart_class_parameter'):
entity = type(self)(
self._server_config,
# pylint:disable=no-member
smart_class_parameter=self.smart_class_parameter,
)
elif hasattr(self, 'smart_variable'):
entity = type(self)(
self._server_config,
# pylint:disable=no-member
smart_variable=self.smart_variable,
)
if ignore is None:
ignore = set()
ignore.update(['smart_class_parameter', 'smart_variable'])
return super(OverrideValue, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`OverrideValue` requires that an
``smart_class_parameter`` or ``smart_varaiable`` be provided, so this
technique will not work. Do this instead::
entity = type(self)(
smart_class_parameter=self.smart_class_parameter)
entity = type(self)(smart_variable=self.smart_variable) | 4.349434 | 3.513267 | 1.238003 |
if entity is None:
entity = type(self)(
self._server_config,
**{self._parent_type: self._parent_id}
)
if ignore is None:
ignore = set()
for field_name in self._path_fields:
ignore.add(field_name)
return super(Parameter, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Ignore path related fields as they're never returned by the server
and are only added to entity to be able to use proper path. | 3.884094 | 3.302973 | 1.175939 |
if which == 'sync':
return '{0}/{1}'.format(
super(Product, self).path(which='self'),
which,
)
return super(Product, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
sync
/products/<product_id>/sync
``super`` is called otherwise. | 6.011649 | 4.480042 | 1.341873 |
if attrs is None:
attrs = self.read_json()
if _get_version(self._server_config) < Version('6.1'):
org = _get_org(self._server_config, attrs['organization']['label'])
attrs['organization'] = org.get_values()
if ignore is None:
ignore = set()
ignore.add('sync_plan')
result = super(Product, self).read(entity, attrs, ignore, params)
if 'sync_plan' in attrs:
sync_plan_id = attrs.get('sync_plan_id')
if sync_plan_id is None:
result.sync_plan = None
else:
result.sync_plan = SyncPlan(
server_config=self._server_config,
id=sync_plan_id,
organization=result.organization,
)
return result | def read(self, entity=None, attrs=None, ignore=None, params=None) | Fetch an attribute missing from the server's response.
Also add sync plan to the responce if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
sync plan.
For more information, see `Bugzilla #1237283
<https://bugzilla.redhat.com/show_bug.cgi?id=1237283>`_ and
`nailgun#261 <https://github.com/SatelliteQE/nailgun/issues/261>`_. | 3.155862 | 3.205262 | 0.984588 |
results = self.search_json(fields, query)['results']
results = self.search_normalize(results)
entities = []
for result in results:
sync_plan = result.get('sync_plan')
if sync_plan is not None:
del result['sync_plan']
entity = type(self)(self._server_config, **result)
if sync_plan:
entity.sync_plan = SyncPlan(
server_config=self._server_config,
id=sync_plan,
organization=Organization(
server_config=self._server_config,
id=result.get('organization')
),
)
entities.append(entity)
if filters is not None:
entities = self.search_filter(entities, filters)
return entities | def search(self, fields=None, query=None, filters=None) | Search for entities with missing attribute
:param fields: A set naming which fields should be used when generating
a search query. If ``None``, all values on the entity are used. If
an empty set, no values are used.
:param query: A dict containing a raw search query. This is melded in
to the generated search query like so: ``{generated:
query}.update({manual: query})``.
:param filters: A dict. Used to filter search results locally.
:return: A list of entities, all of type ``type(self)``.
For more information, see `Bugzilla #1237283
<https://bugzilla.redhat.com/show_bug.cgi?id=1237283>`_ and
`nailgun#261 <https://github.com/SatelliteQE/nailgun/issues/261>`_. | 3.109607 | 3.130068 | 0.993463 |
flattened_results = []
for key in results.keys():
for item in results[key]:
flattened_results.append(item)
return super(PuppetClass, self).search_normalize(flattened_results) | def search_normalize(self, results) | Flattens results.
:meth:`nailgun.entity_mixins.EntitySearchMixin.search_normalize`
expects structure like
list(dict_1(name: class_1), dict_2(name: class_2)),
while Puppet Class entity returns dictionary with lists of subclasses
split by main puppet class. | 3.492381 | 2.544305 | 1.372627 |
if which in ('smart_class_parameters', 'smart_variables'):
return '{0}/{1}'.format(
super(PuppetClass, self).path(which='self'),
which
)
return super(PuppetClass, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
smart_class_parameters
/api/puppetclasses/:puppetclass_id/smart_class_parameters
Otherwise, call ``super``. | 6.671987 | 3.440286 | 1.93937 |
return Realm(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() | def create(self, create_missing=None) | Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1232855
<https://bugzilla.redhat.com/show_bug.cgi?id=1232855>`_. | 17.088955 | 18.162062 | 0.940915 |
if which in ('cancel',):
return '{0}/{1}'.format(
super(RecurringLogic, self).path(which='self'),
which
)
return super(RecurringLogic, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.RecurringLogic.path``.
The format of the returned path depends on the value of ``which``:
cancel
/foreman_tasks/api/recurring_logics/:id/cancel
Otherwise, call ``super``. | 7.415153 | 4.011883 | 1.848297 |
return Registry(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() | def create(self, create_missing=None) | Manually fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1479391
<https://bugzilla.redhat.com/show_bug.cgi?id=1479391>`_. | 17.347433 | 18.362986 | 0.944696 |
if attrs is None:
attrs = self.read_json()
if ignore is None:
ignore = set()
ignore.add('password')
return super(Registry, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Do not read the ``password`` argument. | 3.218741 | 2.863797 | 1.123942 |
if which in (
'errata',
'files',
'packages',
'module_streams',
'puppet_modules',
'remove_content',
'sync',
'import_uploads',
'upload_content'):
return '{0}/{1}'.format(
super(Repository, self).path(which='self'),
which
)
return super(Repository, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
errata
/repositories/<id>/errata
files
/repositories/<id>/files
packages
/repositories/<id>/packages
module_streams
/repositories/<id>/module_streams
puppet_modules
/repositories/<id>/puppet_modules
remove_content
/repositories/<id>/remove_content
sync
/repositories/<id>/sync
upload_content
/repositories/<id>/upload_content
import_uploads
/repositories/<id>/import_uploads
``super`` is called otherwise. | 7.40257 | 2.568132 | 2.882473 |
if getattr(self, 'content_type', '') == 'docker':
self._fields['docker_upstream_name'].required = True
super(Repository, self).create_missing() | def create_missing(self) | Conditionally mark ``docker_upstream_name`` as required.
Mark ``docker_upstream_name`` as required if ``content_type`` is
"docker". | 9.450454 | 4.027848 | 2.346279 |
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.post(self.path('upload_content'), **kwargs)
json = _handle_response(response, self._server_config, synchronous)
if json['status'] != 'success':
raise APIResponseError(
# pylint:disable=no-member
'Received error when uploading file {0} to repository {1}: {2}'
.format(kwargs.get('files'), self.id, json)
)
return json | def upload_content(self, synchronous=True, **kwargs) | Upload a file or files to the current repository.
Here is an example of how to upload content::
with open('my_content.rpm') as content:
repo.upload_content(files={'content': content})
This method accepts the same keyword arguments as Requests. As a
result, the following examples can be adapted for use here:
* `POST a Multipart-Encoded File`_
* `POST Multiple Multipart-Encoded Files`_
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
:raises nailgun.entities.APIResponseError: If the response has a status
other than "success".
.. _POST a Multipart-Encoded File:
http://docs.python-requests.org/en/latest/user/quickstart/#post-a-multipart-encoded-file
.. _POST Multiple Multipart-Encoded Files:
http://docs.python-requests.org/en/latest/user/advanced/#post-multiple-multipart-encoded-files | 5.214803 | 4.799483 | 1.086534 |
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
if uploads:
data = {'uploads': uploads}
elif upload_ids:
data = {'upload_ids': upload_ids}
response = client.put(self.path('import_uploads'), data, **kwargs)
json = _handle_response(response, self._server_config, synchronous)
return json | def import_uploads(self, uploads=None, upload_ids=None, synchronous=True,
**kwargs) | Import uploads into a repository
It expects either a list of uploads or upload_ids (but not both).
:param uploads: Array of uploads to be imported
:param upload_ids: Array of upload ids to be imported
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | 4.004417 | 4.233878 | 0.945804 |
if 'data' not in kwargs:
kwargs['data'] = dict()
kwargs['data']['product_id'] = self.product.id
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.get(self.path('available_repositories'), **kwargs)
return _handle_response(response, self._server_config) | def available_repositories(self, **kwargs) | Lists available repositories for the repository set
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | 4.970191 | 5.119611 | 0.970814 |
if 'data' not in kwargs:
kwargs['data'] = dict()
kwargs['data']['product_id'] = self.product.id
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(self.path('enable'), **kwargs)
return _handle_response(response, self._server_config, synchronous) | def enable(self, synchronous=True, **kwargs) | Enables the RedHat Repository
RedHat Repos needs to be enabled first, so that we can sync it.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | 5.081763 | 5.171352 | 0.982676 |
if which in (
'available_repositories',
'enable',
'disable',
):
return '{0}/{1}'.format(
super(RepositorySet, self).path(which='self'),
which
)
return super(RepositorySet, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
available_repositories
/repository_sets/<id>/available_repositories
enable
/repository_sets/<id>/enable
disable
/repository_sets/<id>/disable
``super`` is called otherwise. | 5.681325 | 3.506889 | 1.620047 |
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None:
entity = type(self)(
self._server_config,
product=self.product, # pylint:disable=no-member
)
if ignore is None:
ignore = set()
return super(RepositorySet, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`RepositorySet` requires that a ``product`` be
provided, so this technique will not work. Do this instead::
entity = type(self)(product=self.product.id) | 7.529768 | 7.231661 | 1.041222 |
if which == 'deploy':
return '{0}/{1}'.format(
super(RHCIDeployment, self).path(which='self'),
which
)
return super(RHCIDeployment, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
deploy
/deployments/<id>/deploy
``super`` is called otherwise. | 6.581847 | 6.659297 | 0.98837 |
if which == 'clone':
return '{0}/{1}'.format(
super(Role, self).path(which='self'),
which
)
return super(Role, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
clone
/api/roles/:role_id/clone
Otherwise, call ``super``. | 5.734941 | 3.930031 | 1.459261 |
if which in ('refresh',):
return '{0}/{1}'.format(
super(SmartProxy, self).path(which='self'),
which
)
return super(SmartProxy, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
refresh
/api/smart_proxies/:id/refresh
Otherwise, call ``super``. | 6.087478 | 4.156931 | 1.464416 |
kwargs = kwargs.copy()
kwargs.update(self._server_config.get_client_kwargs())
# Check if environment_id was sent and substitute it to the path
# but do not pass it to requests
if 'environment' in kwargs:
if isinstance(kwargs['environment'], Environment):
environment_id = kwargs.pop('environment').id
else:
environment_id = kwargs.pop('environment')
path = '{0}/environments/{1}/import_puppetclasses'.format(
self.path(), environment_id)
else:
path = '{0}/import_puppetclasses'.format(self.path())
return _handle_response(
client.post(path, **kwargs), self._server_config, synchronous) | def import_puppetclasses(self, synchronous=True, **kwargs) | Import puppet classes from puppet Capsule.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | 3.650635 | 3.843216 | 0.949891 |
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None:
entity = type(self)(
self._server_config,
user=self.user, # pylint:disable=no-member
)
if ignore is None:
ignore = set()
ignore.add('user')
return super(SSHKey, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`SSHKey` requires that an ``user`` be
provided, so this technique will not work. Do this instead::
entity = type(self)(user=self.user.id) | 6.414429 | 6.672499 | 0.961323 |
for sshkey in results:
sshkey[u'user_id'] = self.user.id # pylint:disable=no-member
return super(SSHKey, self).search_normalize(results) | def search_normalize(self, results) | Append user id to search results to be able to initialize found
:class:`User` successfully | 5.565806 | 4.806537 | 1.157966 |
payload = super(Subnet, self).create_payload()
if 'from_' in payload:
payload['from'] = payload.pop('from_')
return {u'subnet': payload} | def create_payload(self) | Wrap submitted data within an extra dict.
For more information, see `Bugzilla #1151220
<https://bugzilla.redhat.com/show_bug.cgi?id=1151220>`_.
In addition, rename the ``from_`` field to ``from``. | 5.147008 | 4.006837 | 1.284556 |
if attrs is None:
attrs = self.read_json()
attrs['from_'] = attrs.pop('from')
if ignore is None:
ignore = set()
if attrs is not None and 'parameters' in attrs:
attrs['subnet_parameters_attributes'] = attrs.pop('parameters')
else:
ignore.add('subnet_parameters_attributes')
ignore.add('discovery')
ignore.add('remote_execution_proxy')
return super(Subnet, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Fetch as many attributes as possible for this entity.
Do not read the ``discovery`` attribute. For more information, see
`Bugzilla #1217146
<https://bugzilla.redhat.com/show_bug.cgi?id=1217146>`_.
In addition, rename the ``from_`` field to ``from``. | 4.303441 | 3.420065 | 1.258292 |
payload = super(Subnet, self).update_payload(fields)
if 'from_' in payload:
payload['from'] = payload.pop('from_')
return {u'subnet': payload} | def update_payload(self, fields=None) | Wrap submitted data within an extra dict. | 4.556025 | 4.223598 | 1.078707 |
if which in (
'delete_manifest',
'manifest_history',
'refresh_manifest',
'upload'):
_check_for_value('organization', self.get_values())
# pylint:disable=no-member
return self.organization.path('subscriptions/{0}'.format(which))
return super(Subscription, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
delete_manifest
/katello/api/v2/organizations/:organization_id/subscriptions/delete_manifest
manifest_history
/katello/api/v2/organizations/:organization_id/subscriptions/manifest_history
refresh_manifest
/katello/api/v2/organizations/:organization_id/subscriptions/refresh_manifest
upload
/katello/api/v2/organizations/:organization_id/subscriptions/upload | 8.641638 | 5.157073 | 1.675686 |
return Subscription(
self._server_config,
organization=payload['organization_id'],
).path(which) | def _org_path(self, which, payload) | A helper method for generating paths with organization IDs in them.
:param which: A path such as "manifest_history" that has an
organization ID in it.
:param payload: A dict with an "organization_id" key in it.
:returns: A string. The requested path. | 16.495596 | 19.216799 | 0.858395 |
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.get(
self._org_path('manifest_history', kwargs['data']),
**kwargs
)
return _handle_response(response, self._server_config, synchronous) | def manifest_history(self, synchronous=True, **kwargs) | Obtain manifest history for subscriptions.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | 6.747229 | 7.092048 | 0.951379 |
if ignore is None:
ignore = set()
ignore.add('organization')
return super(Subscription, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Ignore ``organization`` field as it's never returned by the server
and is only added to entity to be able to use organization path
dependent helpers. | 3.599579 | 2.758832 | 1.304747 |
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(
self._org_path('refresh_manifest', kwargs['data']),
**kwargs
)
return _handle_response(
response,
self._server_config,
synchronous,
timeout=1500,
) | def refresh_manifest(self, synchronous=True, **kwargs) | Refresh previously imported manifest for Red Hat provider.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | 6.832124 | 7.127978 | 0.958494 |
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.post(
self._org_path('upload', kwargs['data']),
**kwargs
)
# Setting custom timeout as manifest upload can take enormously huge
# amount of time. See BZ#1339696 for more details
return _handle_response(
response,
self._server_config,
synchronous,
timeout=1500,
) | def upload(self, synchronous=True, **kwargs) | Upload a subscription manifest.
Here is an example of how to use this method::
with open('my_manifest.zip') as manifest:
sub.upload({'organization_id': org.id}, manifest)
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | 8.956636 | 9.681503 | 0.925129 |
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None:
entity = type(self)(
self._server_config,
organization=self.organization, # pylint:disable=no-member
)
if ignore is None:
ignore = set()
ignore.add('organization')
return super(SyncPlan, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`SyncPlan` requires that an ``organization`` be
provided, so this technique will not work. Do this instead::
entity = type(self)(organization=self.organization.id) | 6.654758 | 6.656343 | 0.999762 |
data = super(SyncPlan, self).create_payload()
if isinstance(data.get('sync_date'), datetime):
data['sync_date'] = data['sync_date'].strftime('%Y-%m-%d %H:%M:%S')
return data | def create_payload(self) | Convert ``sync_date`` to a string.
The ``sync_date`` instance attribute on the current object is not
affected. However, the ``'sync_date'`` key in the dict returned by
``create_payload`` is a string. | 3.157405 | 2.393468 | 1.319176 |
if which in ('add_products', 'remove_products'):
return '{0}/{1}'.format(
super(SyncPlan, self).path(which='self'),
which
)
return super(SyncPlan, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
add_products
/katello/api/v2/organizations/:organization_id/sync_plans/:sync_plan_id/add_products
remove_products
/katello/api/v2/organizations/:organization_id/sync_plans/:sync_plan_id/remove_products | 6.103478 | 3.50092 | 1.743392 |
data = super(SyncPlan, self).update_payload(fields)
if isinstance(data.get('sync_date'), datetime):
data['sync_date'] = data['sync_date'].strftime('%Y-%m-%d %H:%M:%S')
return data | def update_payload(self, fields=None) | Convert ``sync_date`` to a string if datetime object provided. | 2.873914 | 2.034838 | 1.412355 |
if which == 'subscriptions':
return '{0}/{1}/{2}'.format(
super(System, self).path('base'),
self.uuid, # pylint:disable=no-member
which,
)
if hasattr(self, 'uuid') and (which is None or which == 'self'):
return '{0}/{1}'.format(
super(System, self).path('base'),
self.uuid # pylint:disable=no-member
)
return super(System, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
This method contains a workaround for `Bugzilla #1202917`_.
Most entities are uniquely identified by an ID. ``System`` is a bit
different: it has both an ID and a UUID, and the UUID is used to
uniquely identify a ``System``.
Return a path in the format ``katello/api/v2/systems/<uuid>`` if a UUID
is available and:
* ``which is None``, or
* ``which == 'this'``.
.. _Bugzilla #1202917:
https://bugzilla.redhat.com/show_bug.cgi?id=1202917
Finally, return a path in the form
``katello/api/v2/systems/<uuid>/subscriptions`` if ``'subscriptions'``
is passed in. | 3.229431 | 2.866353 | 1.126669 |
if attrs is None:
attrs = self.read_json()
attrs['last_checkin'] = attrs.pop('checkin_time')
attrs['host_collections'] = attrs.pop('hostCollections')
attrs['installed_products'] = attrs.pop('installedProducts')
if ignore is None:
ignore = set()
ignore.update(['facts', 'organization', 'type'])
return super(System, self).read(entity, attrs, ignore, params) | def read(self, entity=None, attrs=None, ignore=None, params=None) | Fetch as many attributes as possible for this entity.
Do not read the ``facts``, ``organization`` or ``type`` attributes.
For more information, see `Bugzilla #1202917
<https://bugzilla.redhat.com/show_bug.cgi?id=1202917>`_. | 4.505253 | 3.628291 | 1.241701 |
if which:
return '{0}/{1}'.format(
super(Template, self).path(which='base'), which)
return super(Template, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
import
/templates/import
export
/templates/export | 5.285975 | 5.717311 | 0.924556 |
return UserGroup(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() | def create(self, create_missing=None) | Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1301658
<https://bugzilla.redhat.com/show_bug.cgi?id=1301658>`_. | 14.840643 | 16.317326 | 0.909502 |
if which and which in ('deploy_script'):
return '{0}/{1}'.format(
super(VirtWhoConfig, self).path(which='self'), which)
return super(VirtWhoConfig, self).path(which) | def path(self, which=None) | Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
deploy_script
/foreman_virt_who_configure/api/v2/configs/:id/deploy_script
``super`` is called otherwise. | 9.470144 | 5.543985 | 1.708183 |
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.get(self.path('deploy_script'), **kwargs)
return _handle_response(response, self._server_config, synchronous) | def deploy_script(self, synchronous=True, **kwargs) | Helper for Config's deploy_script method.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | 6.189687 | 6.224748 | 0.994368 |
logger.info("checking docker can run")
version = docker_client.version()["ApiVersion"]
docker_client.containers.run("hello-world")
logger.debug(f"using docker API version {version}") | def ensure_docker_can_run() -> None | :raises docker.errors.ContainerError
:raises docker.errors.ImageNotFound
:raises docker.errors.APIError | 5.956478 | 5.410358 | 1.10094 |
logger.info(f"checking whether docker has network {network_name}")
ipam_pool = docker.types.IPAMPool(subnet=subnet_cidr)
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
networks = docker_client.networks.list(names=DOCKER_STARCRAFT_NETWORK)
output = networks[0].short_id if networks else None
if not output:
logger.info("network not found, creating ...")
output = docker_client.networks.create(DOCKER_STARCRAFT_NETWORK, ipam=ipam_config).short_id
logger.debug(f"docker network id: {output}") | def ensure_local_net(
network_name: str = DOCKER_STARCRAFT_NETWORK,
subnet_cidr: str = SUBNET_CIDR
) -> None | Create docker local net if not found.
:raises docker.errors.APIError | 2.589627 | 2.60234 | 0.995115 |
logger.info(f"checking if there is local image {local_image}")
docker_images = docker_client.images.list(local_image)
if len(docker_images) and docker_images[0].short_id is not None:
logger.info(f"image {local_image} found locally.")
return
logger.info("image not found locally, creating...")
pkg_docker_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "local_docker")
base_dir = os.path.join(starcraft_base_dir, "docker")
logger.info(f"copying files from {pkg_docker_dir} to {base_dir}.")
distutils.dir_util.copy_tree(pkg_docker_dir, base_dir)
starcraft_zip_file = f"{base_dir}/starcraft.zip"
if not os.path.exists(starcraft_zip_file):
logger.info(f"downloading starcraft.zip to {starcraft_zip_file}")
download_file(starcraft_binary_link, starcraft_zip_file)
logger.info(f"pulling image {parent_image}, this may take a while...")
pulled_image = docker_client.images.pull(parent_image)
pulled_image.tag(java_image)
logger.info(f"building local image {local_image}, this may take a while...")
docker_client.images.build(path=base_dir, dockerfile="game.dockerfile", tag=local_image)
logger.info(f"successfully built image {local_image}") | def ensure_local_image(
local_image: str,
parent_image: str = SC_PARENT_IMAGE,
java_image: str = SC_JAVA_IMAGE,
starcraft_base_dir: str = SCBW_BASE_DIR,
starcraft_binary_link: str = SC_BINARY_LINK,
) -> None | Check if `local_image` is present locally. If it is not, pull parent images and build.
This includes pulling starcraft binary.
:raises docker.errors.ImageNotFound
:raises docker.errors.APIError | 2.210519 | 2.184565 | 1.011881 |
logger.debug("checking docker-machine presence")
# noinspection PyBroadException
try:
out = subprocess \
.check_output(["docker-machine", "version"]) \
.decode("utf-8") \
.replace("docker-machine.exe", "") \
.replace("docker-machine", "") \
.strip()
logger.debug(f"using docker machine version {out}")
return True
except Exception:
logger.debug(f"docker machine not present")
return False | def check_dockermachine() -> bool | Checks that docker-machine is available on the computer
:raises FileNotFoundError if docker-machine is not present | 3.101441 | 2.914352 | 1.064196 |
if not check_dockermachine():
return None
# noinspection PyBroadException
try:
out = subprocess.check_output(['docker-machine', 'ip'])
return out.decode("utf-8").strip()
except Exception:
logger.debug(f"docker machine not present")
return None | def dockermachine_ip() -> Optional[str] | Gets IP address of the default docker machine
Returns None if no docker-machine executable
in the PATH and if there no Docker machine
with name default present | 3.173737 | 3.476294 | 0.912966 |
callback_lower_drive_letter = lambda pat: pat.group(1).lower()
host_mount = re.sub(r"^([a-zA-Z])\:", callback_lower_drive_letter, host_mount)
host_mount = re.sub(r"^([a-z])", "//\\1", host_mount)
host_mount = re.sub(r"\\", "/", host_mount)
return host_mount | def xoscmounts(host_mount) | Cross OS compatible mount dirs | 3.28299 | 3.154138 | 1.040852 |
return [container.short_id for container in
docker_client.containers.list(filters={"name": name_filter})] | def running_containers(name_filter: str) -> List[str] | :raises docker.exceptions.APIError | 3.389586 | 2.642121 | 1.282903 |
for container in docker_client.containers.list(filters={"name": name_filter}, all=True):
container.stop()
container.remove() | def remove_game_containers(name_filter: str) -> None | :raises docker.exceptions.APIError | 3.381243 | 2.364065 | 1.430267 |
container = docker_client.containers.get(container_id)
return container.wait()["StatusCode"] | def container_exit_code(container_id: str) -> Optional[int] | :raises docker.errors.NotFound
:raises docker.errors.APIError | 3.4216 | 3.256428 | 1.050722 |
if not players:
raise GameException("at least one player must be specified")
game_dir = launch_params["game_dir"]
game_name = launch_params["game_name"]
if os.path.exists(f"{game_dir}/{game_name}"):
logger.info(f"removing existing game results of {game_name}")
shutil.rmtree(f"{game_dir}/{game_name}")
for nth_player, player in enumerate(players):
launch_image(player, nth_player=nth_player, num_players=len(players), **launch_params)
logger.debug("checking if game has launched properly...")
time.sleep(1)
start_containers = running_containers(game_name + "_")
if len(start_containers) != len(players):
raise DockerException("some containers exited prematurely, please check logs")
if not launch_params["headless"]:
for index, player in enumerate(players if show_all else players[:1]):
port = launch_params["vnc_base_port"] + index
host = launch_params["vnc_host"]
logger.info(f"launching vnc viewer for {player} on address {host}:{port}")
launch_vnc_viewer(host, port)
logger.info("\n"
"In headful mode, you must specify and start the game manually.\n"
"Select the map, wait for bots to join the game "
"and then start the game.")
logger.info(f"waiting until game {game_name} is finished...")
running_time = time.time()
while True:
containers = running_containers(game_name)
if len(containers) == 0: # game finished
break
if len(containers) >= 2: # update the last time when there were multiple containers
running_time = time.time()
if len(containers) == 1 and time.time() - running_time > MAX_TIME_RUNNING_SINGLE_CONTAINER:
raise ContainerException(
f"One lingering container has been found after single container "
f"timeout ({MAX_TIME_RUNNING_SINGLE_CONTAINER} sec), the game probably crashed.")
logger.debug(f"waiting. {containers}")
wait_callback()
exit_codes = [container_exit_code(container) for container in containers]
# remove containers before throwing exception
logger.debug("removing game containers")
remove_game_containers(game_name)
if any(exit_code == EXIT_CODE_REALTIME_OUTED for exit_code in exit_codes):
raise RealtimeOutedException(f"some of the game containers has realtime outed.")
if any(exit_code == 1 for exit_code in exit_codes):
raise ContainerException(f"some of the game containers has finished with error exit code.")
if read_overwrite:
logger.info("overwriting bot files")
for nth_player, player in enumerate(players):
if isinstance(player, BotPlayer):
logger.debug(f"overwriting files for {player}")
distutils.dir_util.copy_tree(
f"{game_dir}/{game_name}/write_{nth_player}",
player.read_dir
) | def launch_game(
players: List[Player],
launch_params: Dict[str, Any],
show_all: bool,
read_overwrite: bool,
wait_callback: Callable
) -> None | :raises DockerException, ContainerException, RealtimeOutedException | 3.639447 | 3.401266 | 1.070027 |
version_regex = re.compile(
'__version__\\s*=\\s*(?P<q>[\'"])(?P<version>\\d+(\\.\\d+)*(-(alpha|beta|rc)(\\.\\d+)?)?)(?P=q)'
)
here = path.abspath(path.dirname(__file__))
init_location = path.join(here, "CHAID/__init__.py")
with open(init_location) as init_file:
for line in init_file:
match = version_regex.search(line)
if not match:
raise Exception(
"Couldn't read version information from '{0}'".format(init_location)
)
return match.group('version') | def get_version() | Read version from __init__.py | 2.955821 | 2.915688 | 1.013765 |
if weighted:
m_ij = n_ij / n_ij
nan_mask = np.isnan(m_ij)
m_ij[nan_mask] = 0.000001 # otherwise it breaks the chi-squared test
w_ij = m_ij
n_ij_col_sum = n_ij.sum(axis=1)
n_ij_row_sum = n_ij.sum(axis=0)
alpha, beta, eps = (1, 1, 1)
while eps > 10e-6:
alpha = alpha * np.vstack(n_ij_col_sum / m_ij.sum(axis=1))
beta = n_ij_row_sum / (alpha * w_ij).sum(axis=0)
eps = np.max(np.absolute(w_ij * alpha * beta - m_ij))
m_ij = w_ij * alpha * beta
else:
m_ij = (np.vstack(n_ij.sum(axis=1)) * n_ij.sum(axis=0)) / n_ij.sum().astype(float)
dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)
chi, p_val = stats.chisquare(n_ij, f_exp=m_ij, ddof=n_ij.size - 1 - dof, axis=None)
return (chi, p_val, dof) | def chisquare(n_ij, weighted) | Calculates the chisquare for a matrix of ind_v x dep_v
for the unweighted and SPSS weighted case | 2.666548 | 2.763201 | 0.965021 |
if isinstance(dep, ContinuousColumn):
return self.best_con_split(ind, dep)
else:
return self.best_cat_heuristic_split(ind, dep) | def best_split(self, ind, dep) | determine which splitting function to apply | 5.09685 | 4.428976 | 1.150797 |
split = Split(None, None, None, None, 0)
is_normal = stats.normaltest(self.dep_population)[1] > 0.05
sig_test = stats.bartlett if is_normal else stats.levene
response_set = dep.arr
if dep.weights is not None:
response_set = dep.arr * dep.weights
for i, ind_var in enumerate(ind):
ind_var = ind_var.deep_copy()
unique = np.unique(ind_var.arr)
keyed_set = {}
for col in unique:
matched_elements = np.compress(ind_var.arr == col, response_set)
keyed_set[col] = matched_elements
while next(ind_var.possible_groupings(), None) is not None:
choice, highest_p_join, split_score = None, None, None
for comb in ind_var.possible_groupings():
col1_keyed_set = keyed_set[comb[0]]
col2_keyed_set = keyed_set[comb[1]]
dof = len(np.concatenate((col1_keyed_set, col2_keyed_set))) - 2
score, p_split = sig_test(col1_keyed_set, col2_keyed_set)
if choice is None or p_split > highest_p_join or (p_split == highest_p_join and score > split_score):
choice, highest_p_join, split_score = comb, p_split, score
sufficient_split = highest_p_join < self.alpha_merge and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
invalid_reason = None
sufficient_split = highest_p_join < self.alpha_merge
if not sufficient_split: invalid_reason = InvalidSplitReason.ALPHA_MERGE
sufficient_split = sufficient_split and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
if not sufficient_split: invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
if sufficient_split and len(keyed_set.values()) > 1:
dof = len(np.concatenate(list(keyed_set.values()))) - 2
score, p_split = sig_test(*keyed_set.values())
temp_split = Split(i, ind_var.groups(), score, p_split, dof, split_name=ind_var.name)
better_split = not split.valid() or p_split < split.p or (p_split == split.p and score > split.score)
if better_split:
split, temp_split = temp_split, split
score_threshold = self.split_threshold * split.score
if temp_split.valid() and temp_split.score >= score_threshold:
for sur in temp_split.surrogates:
if sur.column_id != i and sur.score >= score_threshold:
split.surrogates.append(sur)
temp_split.surrogates = []
split.surrogates.append(temp_split)
break
else:
split.invalid_reason = invalid_reason
ind_var.group(choice[0], choice[1])
keyed_set[choice[0]] = np.concatenate((keyed_set[choice[1]], keyed_set[choice[0]]))
del keyed_set[choice[1]]
if split.valid():
split.sub_split_values(ind[split.column_id].metadata)
return split | def best_con_split(self, ind, dep) | determine best continuous variable split | 3.2091 | 3.165049 | 1.013918 |
vectorised_array = []
variable_types = variable_types or ['nominal'] * ndarr.shape[1]
for ind, col_type in enumerate(variable_types):
title = None
if split_titles is not None: title = split_titles[ind]
if col_type == 'ordinal':
col = OrdinalColumn(ndarr[:, ind], name=title)
elif col_type == 'nominal':
col = NominalColumn(ndarr[:, ind], name=title)
else:
raise NotImplementedError('Unknown independent variable type ' + col_type)
vectorised_array.append(col)
if dep_variable_type == 'categorical':
observed = NominalColumn(arr, weights=weights)
elif dep_variable_type == 'continuous':
observed = ContinuousColumn(arr, weights=weights)
else:
raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)
config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,
'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }
return Tree(vectorised_array, observed, config) | def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,
min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,
variable_types=None, dep_variable_type='categorical') | Create a CHAID object from numpy
Parameters
----------
ndarr : numpy.ndarray
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray
1-dimensional array of the dependent variable associated with
ndarr
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
split_titles : array-like
array of names for the independent variables in the data
variable_types : array-like or dict
array of variable types, or dict of column names to variable types.
Supported variable types are the strings 'nominal' or 'ordinal' in
lower case | 2.139397 | 2.360675 | 0.906265 |
self._tree_store = []
self.node(np.arange(0, self.data_size, dtype=np.int), self.vectorised_array, self.observed) | def build_tree(self) | Build chaid tree | 14.435658 | 12.681715 | 1.138305 |
ind_df = df[list(i_variables.keys())]
ind_values = ind_df.values
dep_values = df[d_variable].values
weights = df[weight] if weight is not None else None
return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,
min_child_node_size, list(ind_df.columns.values), split_threshold, weights,
list(i_variables.values()), dep_variable_type) | def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,
min_parent_node_size=30, min_child_node_size=30, split_threshold=0,
weight=None, dep_variable_type='categorical') | Helper method to pre-process a pandas data frame in order to run CHAID
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_variables : dict
dict of instance variable names with their variable types. Supported
variable types are the strings 'nominal' or 'ordinal' in lower case
d_variable : string
the name of the dependent variable in the dataframe
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
split_threshold : float
the variation in chi-score such that surrogate splits are created
(default 0)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
min_child_node_size : float
the threshold value of the number of respondents that each child node must
contain (default 30)
weight : array-like
the respondent weights. If passed, weighted chi-square calculation is run
dep_variable_type : str
the type of dependent variable. Supported variable types are 'categorical' or
'continuous' | 2.513508 | 2.92552 | 0.859166 |
depth += 1
if self.max_depth < depth:
terminal_node = Node(choices=parent_decisions, node_id=self.node_count,
parent=parent, indices=rows, dep_v=dep)
self._tree_store.append(terminal_node)
self.node_count += 1
terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH
return self._tree_store
split = self._stats.best_split(ind, dep)
node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,
parent=parent, split=split)
self._tree_store.append(node)
parent = self.node_count
self.node_count += 1
if not split.valid():
return self._tree_store
for index, choices in enumerate(split.splits):
correct_rows = np.in1d(ind[split.column_id].arr, choices)
dep_slice = dep[correct_rows]
ind_slice = [vect[correct_rows] for vect in ind]
row_slice = rows[correct_rows]
if self.min_parent_node_size < len(dep_slice.arr):
self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,
parent_decisions=split.split_map[index])
else:
terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,
parent=parent, indices=row_slice, dep_v=dep_slice)
terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE
self._tree_store.append(terminal_node)
self.node_count += 1
return self._tree_store | def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None) | internal method to create a node in the tree | 2.747458 | 2.730523 | 1.006202 |
tree = TreeLibTree()
for node in self:
tree.create_node(node, node.node_id, parent=node.parent)
return tree | def to_tree(self) | returns a TreeLib tree | 5.779142 | 3.932735 | 1.469497 |
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred | def node_predictions(self) | Determines which rows fall into which node | 4.695901 | 4.279437 | 1.097318 |
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred | def model_predictions(self) | Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell | 7.962221 | 6.999008 | 1.137621 |
sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr])
return float((self.model_predictions() == sub_observed).sum()) / self.data_size | def accuracy(self) | Calculates the accuracy of the tree by comparing
the model predictions to the dataset
(TP + TN) / (TP + TN + FP + FN) == (T / (T + F)) | 10.092752 | 9.10848 | 1.108061 |
if len(collection) == 1:
yield [ collection ]
return
first = collection[0]
for smaller in self.bell_set(collection[1:]):
for n, subset in enumerate(smaller):
if not ordinal or (ordinal and is_sorted(smaller[:n] + [[ first ] + subset] + smaller[n+1:], self._nan)):
yield smaller[:n] + [[ first ] + subset] + smaller[n+1:]
if not ordinal or (ordinal and is_sorted([ [ first ] ] + smaller, self._nan)):
yield [ [ first ] ] + smaller | def bell_set(self, collection, ordinal=False) | Calculates the Bell set | 3.362149 | 3.381064 | 0.994405 |
return NominalColumn(self.arr, metadata=self.metadata, name=self.name,
missing_id=self._missing_id, substitute=False, weights=self.weights) | def deep_copy(self) | Returns a deep copy. | 13.737251 | 12.385728 | 1.109119 |
try:
unique = np.unique(vect)
except:
unique = set(vect)
unique = [
x for x in unique if not isinstance(x, float) or not isnan(x)
]
arr = np.copy(vect)
for new_id, value in enumerate(unique):
np.place(arr, arr==value, new_id)
self.metadata[new_id] = value
arr = arr.astype(np.float)
np.place(arr, np.isnan(arr), -1)
self.arr = arr
if -1 in arr:
self.metadata[-1] = self._missing_id | def substitute_values(self, vect) | Internal method to substitute integers into the vector, and construct
metadata to convert back to the original vector.
np.nan is always given -1, all other objects are given integers in
order of apperence.
Parameters
----------
vect : np.array
the vector in which to substitute values in | 3.80951 | 3.487476 | 1.09234 |
return OrdinalColumn(self.arr, metadata=self.metadata, name=self.name,
missing_id=self._missing_id, substitute=True,
groupings=self._groupings, weights=self.weights) | def deep_copy(self) | Returns a deep copy. | 10.663899 | 9.850515 | 1.082573 |
return ContinuousColumn(self.arr, metadata=self.metadata, missing_id=self._missing_id, weights=self.weights) | def deep_copy(self) | Returns a deep copy. | 15.837879 | 14.413843 | 1.098796 |
for i, arr in enumerate(self.splits):
self.split_map[i] = [sub.get(x, x) for x in arr]
for split in self.surrogates:
split.sub_split_values(sub) | def sub_split_values(self, sub) | Substitutes the splits with other values into the split_map | 4.729121 | 3.370438 | 1.403117 |
if self.column_id is not None and len(sub) > self.column_id:
self.split_name = sub[self.column_id]
for split in self.surrogates:
split.name_columns(sub) | def name_columns(self, sub) | Substitutes the split column index with a human readable string | 4.564116 | 3.824975 | 1.193241 |
f = [0] * (1 << len(x))
for i in range(len(x)):
for S in range(1 << i):
f[S | (1 << i)] = f[S] + x[i]
for A in range(1 << len(x)):
for B in range(1 << len(x)):
if A & B == 0 and f[A] == f[B] and 3 * f[A] == f[-1]:
return (A, B, ((1 << len(x)) - 1) ^ A ^ B)
return None | def three_partition(x) | partition a set of integers in 3 parts of same total value
:param x: table of non negative values
:returns: triplet of the integers encoding the sets, or None otherwise
:complexity: :math:`O(2^{2n})` | 2.64892 | 2.74034 | 0.966639 |
n = len(A)
x = [randint(0, 1000000) for j in range(n)]
return mult(A, mult(B, x)) == mult(C, x) | def freivalds(A, B, C) | Tests matrix product AB=C by Freivalds
:param A: n by n numerical matrix
:param B: same
:param C: same
:returns: False with high probability if AB != C
:complexity:
:math:`O(n^2)` | 4.029083 | 5.284429 | 0.762444 |
n = len(p)
opt = [[0] * (cmax + 1) for _ in range(n + 1)]
sel = [[False] * (cmax + 1) for _ in range(n + 1)]
# --- basic case
for cap in range(p[0], cmax + 1):
opt[0][cap] = v[0]
sel[0][cap] = True
# --- induction case
for i in range(1, n):
for cap in range(cmax + 1):
if cap >= p[i] and opt[i-1][cap - p[i]] + v[i] > opt[i-1][cap]:
opt[i][cap] = opt[i-1][cap - p[i]] + v[i]
sel[i][cap] = True
else:
opt[i][cap] = opt[i-1][cap]
sel[i][cap] = False
# --- reading solution
cap = cmax
solution = []
for i in range(n-1, -1, -1):
if sel[i][cap]:
solution.append(i)
cap -= p[i]
return (opt[n - 1][cmax], solution) | def knapsack(p, v, cmax) | Knapsack problem: select maximum value set of items if total size not more than capacity
:param p: table with size of items
:param v: table with value of items
:param cmax: capacity of bag
:requires: number of items non-zero
:returns: value optimal solution, list of item indexes in solution
:complexity: O(n * cmax), for n = number of items | 1.864559 | 2.005346 | 0.929794 |
n = len(p)
# Plus grande valeur obtenable avec objets ≤ i et capacité c
pgv = [[0] * (cmax + 1) for _ in range(n)]
for c in range(cmax + 1): # Initialisation
pgv[0][c] = v[0] if c >= p[0] else 0
pred = {} # Prédécesseurs pour mémoriser les choix faits
for i in range(1, n):
for c in range(cmax + 1):
pgv[i][c] = pgv[i - 1][c] # Si on ne prend pas l'objet i
pred[(i, c)] = (i - 1, c)
# Est-ce que prendre l'objet i est préférable ?
if c >= p[i] and pgv[i - 1][c - p[i]] + v[i] > pgv[i][c]:
pgv[i][c] = pgv[i - 1][c - p[i]] + v[i]
pred[(i, c)] = (i - 1, c - p[i]) # On marque le prédécesseur
# On pourrait s'arrêter là, mais si on veut un sous-ensemble d'objets
# optimal, il faut remonter les marquages
cursor = (n - 1, cmax)
chosen = []
while cursor in pred:
# Si la case prédécesseur a une capacité inférieure
if pred[cursor][1] < cursor[1]:
# C'est qu'on a ramassé l'objet sur le chemin
chosen.append(cursor[0])
cursor = pred[cursor]
if cursor[1] > 0: # A-t-on pris le premier objet ?
# (La première ligne n'a pas de prédécesseur.)
chosen.append(cursor[0])
return pgv[n - 1][cmax], chosen | def knapsack2(p, v, cmax) | Knapsack problem: select maximum value set of items if total size not more than capacity.
alternative implementation with same behavior.
:param p: table with size of items
:param v: table with value of items
:param cmax: capacity of bag
:requires: number of items non-zero
:returns: value optimal solution, list of item indexes in solution
:complexity: O(n * cmax), for n = number of items | 3.411138 | 3.63468 | 0.938497 |
x = sorted(x) # make copies
y = sorted(y) # to save arguments
return sum(x[i] * y[-i - 1] for i in range(len(x))) | def min_scalar_prod(x, y) | Permute vector to minimize scalar product
:param x:
:param y: x, y are vectors of same size
:returns: min sum x[i] * y[sigma[i]] over all permutations sigma
:complexity: O(n log n) | 5.214518 | 5.633354 | 0.925651 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.