code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
grid_axes = [] for _, param in self.tunables: grid_axes.append(param.get_grid_axis(self.grid_width)) return grid_axes
def _generate_grid(self)
Get the all possible values for each of the tunables.
6.93407
4.540054
1.52731
self.X = X self.y = y
def fit(self, X, y)
Fit Args: X (np.array): Array of hyperparameter values with shape (n_samples, len(tunables)) y (np.array): Array of scores with shape (n_samples, )
4.358816
5.635737
0.773424
used_vectors = set(tuple(v) for v in self.X) # if every point has been used before, gridding is done. grid_size = self.grid_width ** len(self.tunables) if len(used_vectors) == grid_size: return None all_vectors = set(itertools.product(*self._grid_axes)) remaining_vectors = all_vectors - used_vectors candidates = np.array(list(map(np.array, remaining_vectors))) np.random.shuffle(candidates) return candidates[0:n]
def _candidates_from_grid(self, n=1000)
Get unused candidates from the grid or parameters.
4.503586
4.298337
1.047751
candidates = np.zeros((n, len(self.tunables))) for i, tunable in enumerate(self.tunables): param = tunable[1] lo, hi = param.range if param.is_integer: column = np.random.randint(lo, hi + 1, size=n) else: diff = hi - lo column = lo + diff * np.random.rand(n) candidates[:, i] = column return candidates
def _random_candidates(self, n=1000)
Generate a matrix of random parameters, column by column.
3.053833
2.81354
1.085406
# If using a grid, generate a list of previously unused grid points if self.grid: return self._candidates_from_grid(n) # If not using a grid, generate a list of vectors where each parameter # is chosen uniformly at random else: return self._random_candidates(n)
def _create_candidates(self, n=1000)
Generate random hyperparameter vectors Args: n (int, optional): number of candidates to generate. Defaults to 1000. Returns: candidates (np.array): Array of candidate hyperparameter vectors with shape (n_samples, len(tunables))
6.55407
6.433138
1.018798
proposed_params = [] for i in range(n): # generate a list of random candidate vectors. If self.grid == True # each candidate will be a vector that has not been used before. candidate_params = self._create_candidates() # create_candidates() returns None when every grid point # has been tried if candidate_params is None: return None # predict() returns a tuple of predicted values for each candidate predictions = self.predict(candidate_params) # acquire() evaluates the list of predictions, selects one, # and returns its index. idx = self._acquire(predictions) # inverse transform acquired hyperparameters # based on hyparameter type params = {} for i in range(candidate_params[idx, :].shape[0]): inverse_transformed = self.tunables[i][1].inverse_transform( candidate_params[idx, i] ) params[self.tunables[i][0]] = inverse_transformed proposed_params.append(params) return params if n == 1 else proposed_params
def propose(self, n=1)
Use the trained model to propose a new set of parameters. Args: n (int, optional): number of candidates to propose Returns: Mapping of tunable name to proposed value. If called with n>1 then proposal is a list of dictionaries.
5.640444
5.910098
0.954374
if isinstance(X, dict): X = [X] y = [y] # transform the list of dictionaries into a np array X_raw for i in range(len(X)): each = X[i] # update best score and hyperparameters if y[i] > self._best_score: self._best_score = y[i] self._best_hyperparams = X[i] vectorized = [] for tunable in self.tunables: vectorized.append(each[tunable[0]]) if self.X_raw is not None: self.X_raw = np.append( self.X_raw, np.array([vectorized], dtype=object), axis=0, ) else: self.X_raw = np.array([vectorized], dtype=object) self.y_raw = np.append(self.y_raw, y) # transforms each hyperparameter based on hyperparameter type x_transformed = np.array([], dtype=np.float64) if len(self.X_raw.shape) > 1 and self.X_raw.shape[1] > 0: x_transformed = self.tunables[0][1].fit_transform( self.X_raw[:, 0], self.y_raw, ).astype(float) for i in range(1, self.X_raw.shape[1]): transformed = self.tunables[i][1].fit_transform( self.X_raw[:, i], self.y_raw, ).astype(float) x_transformed = np.column_stack((x_transformed, transformed)) self.fit(x_transformed, self.y_raw)
def add(self, X, y)
Add data about known tunable hyperparameter configurations and scores. Refits model with all data. Args: X (Union[Dict[str, object], List[Dict[str, object]]]): dict or list of dicts of hyperparameter combinations. Keys may only be the name of a tunable, and the dictionary must contain values for all tunables. y (Union[float, List[float]]): float or list of floats of scores of the hyperparameter combinations. Order of scores must match the order of the hyperparameter dictionaries that the scores corresponds
2.421749
2.391428
1.012679
candidates = np.where(self.dpp_vector == 0) return None if len(candidates[0]) == 0 else candidates[0]
def _get_candidates(self)
Finds the pipelines that are not yet tried. Returns: np.array: Indices corresponding to columns in ``dpp_matrix`` that haven't been tried on ``X``. ``None`` if all pipelines have been tried on X.
7.107063
4.999839
1.421458
# generate a list of all the untried candidate pipelines candidates = self._get_candidates() # get_candidates() returns None when every possibility has been tried if candidates is None: return None # predict() returns a predicted values for each candidate predictions = self.predict(candidates) # acquire() evaluates the list of predictions, selects one, and returns # its index. idx = self._acquire(predictions) return candidates[idx]
def propose(self)
Use the trained model to propose a new pipeline. Returns: int: Index corresponding to pipeline to try in ``dpp_matrix``.
8.292885
7.861891
1.054821
for each in X: self.dpp_vector[each] = X[each] self.fit(self.dpp_vector.reshape(1, -1))
def add(self, X)
Add data about known pipeline and scores. Updates ``dpp_vector`` and refits model with all data. Args: X (dict): mapping of pipeline indices to scores. Keys must correspond to the index of a column in ``dpp_matrix`` and values are the corresponding score for pipeline on the dataset.
7.703953
4.414092
1.745309
return { 'azure_client_id': self.client_id, 'azure_location': self.location, 'azure_secret': self.secret, 'azure_subscription_id': self.subscription_id, 'azure_tenant_id': self.tenant_id, }
def to_vars_dict(self)
Return local state which is relevant for the cluster setup process.
2.432954
2.231553
1.090251
with self.__lock: if self._resource_client is None: log.debug("Making Azure `ServicePrincipalcredentials` object" " with tenant=%r, client_id=%r, secret=%r ...", self.tenant_id, self.client_id, ('<redacted>' if self.secret else None)) credentials = ServicePrincipalCredentials( tenant=self.tenant_id, client_id=self.client_id, secret=self.secret, ) log.debug("Initializing Azure `ComputeManagementclient` ...") self._compute_client = ComputeManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `NetworkManagementclient` ...") self._network_client = NetworkManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `ResourceManagementclient` ...") self._resource_client = ResourceManagementClient(credentials, self.subscription_id) log.info("Azure API clients initialized.")
def _init_az_api(self)
Initialise client objects for talking to Azure API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``.
2.590849
2.425483
1.068179
self._init_az_api() cluster_name, node_name = instance_id self._init_inventory(cluster_name) for name, api_version in [ # we must delete resources in a specific order: e.g., # a public IP address cannot be deleted if it's still # in use by a NIC... (node_name, '2018-06-01'), (node_name + '-nic', '2018-10-01'), (node_name + '-public-ip', '2018-10-01'), (node_name + '-disk', '2018-09-30'), (self._make_storage_account_name( cluster_name, node_name), '2018-07-01'), ]: rsc_id = self._inventory[name] log.debug("Deleting resource %s (`%s`) ...", name, rsc_id) oper = self._resource_client.resources.delete_by_id(rsc_id, api_version) oper.wait() del self._inventory[name] self._vm_details.pop(node_name, None) # if this was the last VM to be deleted, clean up leftover resource group with self.__lock: if len(self._inventory) == 2: log.debug("Cleaning up leftover resource group ...") oper = self._resource_client.resource_groups.delete(cluster_name) oper.wait() self._inventory = {}
def stop_instance(self, instance_id)
Stops the instance gracefully. :param str instance_id: instance identifier
3.829992
3.971461
0.964379
self._init_az_api() cluster_name, node_name = instance_id # XXX: keep in sync with contents of `vm_deployment_template` ip_name = ('{node_name}-public-ip'.format(node_name=node_name)) ip = self._network_client.public_ip_addresses.get(cluster_name, ip_name) if (ip.provisioning_state == 'Succeeded' and ip.ip_address): return [ip.ip_address] else: return []
def get_ips(self, instance_id)
Retrieves all IP addresses associated to a given instance. :return: tuple (IPs)
4.754891
4.92478
0.965503
self._init_az_api() # Here, it's always better if we update the instance. vm = self._get_vm(instance_id, force_reload=True) # FIXME: should we rather check `vm.instance_view.statuses` # and search for `.code == "PowerState/running"`? or # `vm.instance_view.vm_agent.statuses` and search for `.code # == 'ProvisioningState/suceeded'`? return vm.provisioning_state == u'Succeeded'
def is_instance_running(self, instance_id)
Check if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise
8.32863
8.743457
0.952556
self._init_az_api() if force_reload: # Remove from cache and get from server again self._inventory = {} cluster_name, node_name = instance_id self._init_inventory(cluster_name) # if instance is known, return it if node_name not in self._vm_details: vm_info = self._compute_client.virtual_machines.get( cluster_name, node_name, 'instanceView') self._vm_details[node_name] = vm_info try: return self._vm_details[node_name] except KeyError: raise InstanceNotFoundError( "Instance `{instance_id}` not found" .format(instance_id=instance_id))
def _get_vm(self, instance_id, force_reload=True)
Return details on the VM with the given name. :param str node_name: instance identifier :param bool force_reload: if ``True``, skip searching caches and reload instance from server and immediately reload instance data from cloud provider :return: py:class:`novaclient.v1_1.servers.Server` - instance :raises: `InstanceError` is returned if the instance can't be found in the local cache or in the cloud.
3.829458
4.177329
0.916724
last = -1 unit = s[last].lower() if unit.isdigit(): # `s` is a integral number return int(s) if unit == 'b': # ignore the the 'b' or 'B' suffix last -= 1 unit = s[last].lower() if unit == 'i': k = 1024 last -= 1 unit = s[last].lower() else: k = 1000 # convert the substring of `s` that does not include the suffix if unit.isdigit(): return int(s[0:(last+1)]) if unit == 'k': return int(float(s[0:last])*k) if unit == 'm': return int(float(s[0:last])*k*k) if unit == 'g': return int(float(s[0:last])*k*k*k) if unit == 't': return int(float(s[0:last])*k*k*k*k) if unit == 'p': return int(float(s[0:last])*k*k*k*k*k) if unit == 'e': return int(float(s[0:last])*k*k*k*k*k*k) if unit == 'z': return int(float(s[0:last])*k*k*k*k*k*k*k) if unit == 'y': return int(float(s[0:last])*k*k*k*k*k*k*k*k)
def to_bytes(s)
Convert string `s` to an integer number of bytes. Suffixes like 'KB', 'MB', 'GB' (up to 'YB'), with or without the trailing 'B', are allowed and properly accounted for. Case is ignored in suffixes. Examples:: >>> to_bytes('12') 12 >>> to_bytes('12B') 12 >>> to_bytes('12KB') 12000 >>> to_bytes('1G') 1000000000 Binary units 'KiB', 'MiB' etc. are also accepted: >>> to_bytes('1KiB') 1024 >>> to_bytes('1MiB') 1048576
1.845013
1.833031
1.006537
node_information = {} ssh = node.connect() if not ssh: log.error("Unable to connect to node %s", node.name) return (_in, _out, _err) = ssh.exec_command("(type >& /dev/null -a srun && echo slurm) \ || (type >& /dev/null -a qconf && echo sge) \ || (type >& /dev/null -a pbsnodes && echo pbs) \ || echo UNKNOWN") node_information['type'] = _out.read().strip() (_in, _out, _err) = ssh.exec_command("arch") node_information['architecture'] = _out.read().strip() if node_information['type'] == 'slurm': inspect_slurm_cluster(ssh, node_information) elif node_information['type'] == 'sge': inspect_sge_cluster(ssh, node_information) ssh.close() return node_information
def inspect_node(node)
This function accept a `elasticluster.cluster.Node` class, connects to a node and tries to discover the kind of batch system installed, and some other information.
3.535964
3.273503
1.080177
auth_section = 'auth/elasticluster_%s' % cluster.name resource_section = 'resource/elasticluster_%s' % cluster.name cfg = RawConfigParser() cfg.add_section(auth_section) frontend_node = cluster.get_ssh_to_node() cfg.set(auth_section, 'type', 'ssh') cfg.set(auth_section, 'username', frontend_node.image_user) cluster_info = inspect_node(frontend_node) cfg.add_section(resource_section) cfg.set(resource_section, 'enabled', 'yes') cfg.set(resource_section, 'transport', 'ssh') cfg.set(resource_section, 'frontend', frontend_node.preferred_ip) if not cluster_info: log.error("Unable to gather enough information from the cluster. " "Following informatino are only partial!") cluster_info = {'architecture': 'unknown', 'type': 'unknown', 'max_cores': -1, 'max_cores_per_job': -1, 'max_memory_per_core': -1, 'max_walltime': '672hours'} cfg.set(resource_section, 'type', cluster_info['type']) cfg.set(resource_section, 'architecture', cluster_info['architecture']) cfg.set(resource_section, 'max_cores', cluster_info.get('max_cores', 1)) cfg.set(resource_section, 'max_cores_per_job', cluster_info.get('max_cores_per_job', 1)) cfg.set(resource_section, 'max_memory_per_core', cluster_info.get('max_memory_per_core', '2GB')) cfg.set(resource_section, 'max_walltime', cluster_info.get('max_walltime', '672hours')) cfgstring = StringIO() cfg.write(cfgstring) return cfgstring.getvalue()
def create_gc3pie_config_snippet(cluster)
Create a configuration file snippet to be used with GC3Pie.
2.729829
2.721869
1.002925
return { 'gcloud_client_id': self._client_id, 'gcloud_client_secret': self._client_secret, 'gcloud_network': self._network, 'gcloud_project_id': self._project_id, 'gcloud_zone': self._zone, }
def to_vars_dict(self)
Return local state which is relevant for the cluster setup process.
2.581742
2.346472
1.100265
# ensure only one thread runs the authentication process, if needed with GoogleCloudProvider.__gce_lock: # check for existing connection if not self._gce: version = pkg_resources.get_distribution("elasticluster").version http = googleapiclient.http.set_user_agent(httplib2.Http(), "elasticluster/%s" % version) credentials = self._get_credentials() self._auth_http = credentials.authorize(http) self._gce = build(GCE_API_NAME, GCE_API_VERSION, http=http) return self._gce
def _connect(self)
Connects to the cloud web services. If this is the first authentication, a web browser will be started to authenticate against google and provide access to elasticluster. :return: A Resource object with methods for interacting with the service.
5.42065
4.377532
1.238289
with GoogleCloudProvider.__gce_lock: return request.execute(http=self._auth_http)
def _execute_request(self, request)
Helper method to execute a request, since a lock should be used to not fire up multiple requests at the same time. :return: Result of `request.execute`
20.729311
16.902273
1.226421
gce = self._connect() status = response['status'] while status != 'DONE' and response: # wait a random amount of time (up to `wait` seconds) if wait: time.sleep(1 + random.randrange(wait)) operation_id = response['name'] # Identify if this is a per-zone resource if 'zone' in response: zone_name = response['zone'].split('/')[-1] request = gce.zoneOperations().get( project=self._project_id, operation=operation_id, zone=zone_name) else: request = gce.globalOperations().get( project=self._project_id, operation=operation_id) response = self._execute_request(request) if response: status = response['status'] return response
def _wait_until_done(self, response, wait=30)
Blocks until the operation status is done for the given operation. :param response: The response object used in a previous GCE call. :param int wait: Wait up to this number of seconds in between successive polling of the GCE status.
3.024899
2.823117
1.071475
if not instance_id: log.info("Instance to pause has no instance id.") return gce = self._connect() try: request = gce.instances().stop(project=self._project_id, instance=instance_id, zone=self._zone) operation = self._execute_request(request) response = self._wait_until_done(operation) self._check_response(response) return {"instance_id": instance_id} except HttpError as e: log.error("Error stopping instance: `%s", e) raise InstanceError("Error stopping instance `%s`", e)
def pause_instance(self, instance_id)
Pauses the instance, retaining disk and config. :param str instance_id: instance identifier :raises: `InstanceError` if instance cannot be paused :return: dict - information needed to restart instance.
3.571195
3.365828
1.061015
if not paused_info.get("instance_id"): log.info("Instance to stop has no instance id.") return gce = self._connect() try: request = gce.instances().start(project=self._project_id, instance=paused_info["instance_id"], zone=self._zone) operation = self._execute_request(request) response = self._wait_until_done(operation) self._check_response(response) return except HttpError as e: log.error("Error restarting instance: `%s", e) raise InstanceError("Error restarting instance `%s`", e)
def resume_instance(self, paused_info)
Restarts a paused instance, retaining disk and config. :param str instance_id: instance identifier :raises: `InstanceError` if instance cannot be resumed. :return: dict - information needed to restart instance.
3.480171
3.427508
1.015365
if not instance_id: log.info("Instance to stop has no instance id") return gce = self._connect() try: request = gce.instances().delete(project=self._project_id, instance=instance_id, zone=self._zone) response = self._execute_request(request) self._check_response(response) except HttpError as e: # If the instance does not exist, we get a 404 if e.resp.status == 404: raise InstanceNotFoundError( "Instance `{instance_id}` was not found" .format(instance_id=instance_id)) else: raise InstanceError( "Could not stop instance `{instance_id}`: `{e}`" .format(instance_id=instance_id, e=e)) except CloudProviderError as e: raise InstanceError( "Could not stop instance `{instance_id}`: `{e}`" .format(instance_id=instance_id, e=e))
def stop_instance(self, instance_id)
Stops the instance gracefully. :param str instance_id: instance identifier :raises: `InstanceError` if instance can not be stopped
2.292792
2.337511
0.980869
gce = self._connect() try: request = gce.instances().list( project=self._project_id, filter=filter, zone=self._zone) response = self._execute_request(request) self._check_response(response) except (HttpError, CloudProviderError) as e: raise InstanceError("could not retrieve all instances on the " "cloud: ``" % e) if response and 'items' in response: return response['items'] else: return list()
def list_instances(self, filter=None)
List instances on GCE, optionally filtering the results. :param str filter: Filter specification; see https://developers.google.com/compute/docs/reference/latest/instances/list for details. :return: list of instances
4.312201
4.570655
0.943453
if not instance_id: raise InstanceError("could not retrieve the ip address for node: " "no associated instance id") gce = self._connect() instances = gce.instances() try: request = instances.get(instance=instance_id, project=self._project_id, zone=self._zone) response = self._execute_request(request) ip_public = None # If the instance is in status TERMINATED, then there will be # no IP addresses. if response and response['status'] in ('STOPPING', 'TERMINATED'): log.info("node '%s' state is '%s'; no IP address(es)" % (instance_id, response['status'])) return [None] if response and "networkInterfaces" in response: interfaces = response['networkInterfaces'] if interfaces: if "accessConfigs" in interfaces[0]: ip_public = interfaces[0]['accessConfigs'][0]['natIP'] if ip_public: return [ip_public] else: raise InstanceError("could not retrieve the ip address for " "node `%s`, please check the node " "through the cloud provider interface" % instance_id) except (HttpError, CloudProviderError) as e: raise InstanceError('could not retrieve the ip address of `%s`: ' '`%s`' % (instance_id, e))
def get_ips(self, instance_id)
Retrieves the ip addresses (public) from the cloud provider by the given instance id. :param str instance_id: id of the instance :return: list (ips) :raises: InstanceError if the ip could not be retrieved.
3.464906
3.391874
1.021531
items = self.list_instances(filter=('name eq "%s"' % instance_id)) for item in items: if item['status'] == 'RUNNING': return True return False
def is_instance_running(self, instance_id)
Check whether the instance is up and running. :param str instance_id: instance identifier :reutrn: True if instance is running, False otherwise
3.879334
4.209188
0.921635
return { # connection data (= what is in the "openrc" file) 'os_auth_url': self._os_auth_url, 'os_cacert': (self._os_cacert or ''), 'os_password': self._os_password, 'os_project_domain_name': self._os_project_domain_name, 'os_region_name': self._os_region_name, 'os_tenant_name': self._os_tenant_name, 'os_user_domain_name': self._os_user_domain_name, 'os_username': self._os_username, # API versioning 'os_compute_api_version': self._compute_api_version, 'os_identity_api_version': self._identity_api_version, 'os_image_api_version': self._image_api_version, 'os_network_api_version': self._network_api_version, 'os_volume_api_version': self._volume_api_version, }
def to_vars_dict(self)
Return local state which is relevant for the cluster setup process.
1.989242
1.913207
1.039742
if not self.nova_client: log.debug("Initializing OpenStack API clients:" " OS_AUTH_URL='%s'" " OS_USERNAME='%s'" " OS_USER_DOMAIN_NAME='%s'" " OS_PROJECT_NAME='%s'" " OS_PROJECT_DOMAIN_NAME='%s'" " OS_REGION_NAME='%s'" " OS_CACERT='%s'" "", self._os_auth_url, self._os_username, self._os_user_domain_name, self._os_tenant_name, self._os_project_domain_name, self._os_region_name, self._os_cacert) sess = self.__init_keystone_session() log.debug("Creating OpenStack Compute API (Nova) v%s client ...", self._compute_api_version) self.nova_client = nova_client.Client( self._compute_api_version, session=sess, region_name=self._os_region_name, cacert=self._os_cacert) log.debug("Creating OpenStack Network API (Neutron) client ...") self.neutron_client = neutron_client.Client( #self._network_api_version, ## doesn't work as of Neutron Client 2 :-( session=sess, region_name=self._os_region_name, ca_cert=self._os_cacert) # FIXME: Glance's `Client` class does not take an explicit # `cacert` parameter, instead it relies on the `session` # argument being "A keystoneauth1 session that should be # used for transport" -- I presume this means that # `cacert` only needs to be set there. Is this true of # other OpenStack client classes as well? log.debug("Creating OpenStack Image API (Glance) v%s client ...", self._image_api_version) self.glance_client = glance_client.Client( self._image_api_version, session=sess, region_name=self._os_region_name) log.debug("Creating OpenStack Volume API (Cinder) v%s client ...", self._volume_api_version) self.cinder_client = cinder_client.Client( self._volume_api_version, session=sess, region_name=self._os_region_name, cacert=self._os_cacert)
def _init_os_api(self)
Initialise client objects for talking to OpenStack API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``.
2.45078
2.426512
1.010001
api = self._identity_api_version # for readability tried = [] if api in ['3', None]: sess = self.__init_keystone_session_v3(check=(api is None)) tried.append('v3') if sess: return sess if api in ['2', None]: sess = self.__init_keystone_session_v2(check=(api is None)) tried.append('v2') if sess: return sess raise RuntimeError( "Cannot establish Keystone session (tried: {0})." .format(', '.join(tried)))
def __init_keystone_session(self)
Create and return a Keystone session object.
3.583292
3.479296
1.02989
from keystoneauth1 import loading as keystone_v2 loader = keystone_v2.get_plugin_loader('password') auth = loader.load_from_options( auth_url=self._os_auth_url, username=self._os_username, password=self._os_password, project_name=self._os_tenant_name, ) sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert) if check: log.debug("Checking that Keystone API v2 session works...") try: # if session is invalid, the following will raise some exception nova = nova_client.Client(self._compute_api_version, session=sess, cacert=self._os_cacert) nova.flavors.list() except keystoneauth1.exceptions.NotFound as err: log.warning("Creating Keystone v2 session failed: %s", err) return None except keystoneauth1.exceptions.ClientException as err: log.error("OpenStack server rejected request (likely configuration error?): %s", err) return None # FIXME: should we be raising an error instead? # if we got to this point, v2 session is valid log.info("Using Keystone API v2 session to authenticate to OpenStack") return sess
def __init_keystone_session_v2(self, check=False)
Create and return a session object using Keystone API v2.
3.112595
2.9895
1.041176
try: # may fail on Python 2.6? from keystoneauth1.identity import v3 as keystone_v3 except ImportError: log.warning("Cannot load Keystone API v3 library.") return None auth = keystone_v3.Password( auth_url=self._os_auth_url, username=self._os_username, password=self._os_password, user_domain_name=self._os_user_domain_name, project_domain_name=self._os_project_domain_name, project_name=self._os_tenant_name, ) sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert) if check: log.debug("Checking that Keystone API v3 session works...") try: # if session is invalid, the following will raise some exception nova = nova_client.Client(self._compute_api_version, session=sess) nova.flavors.list() except keystoneauth1.exceptions.NotFound as err: log.warning("Creating Keystone v3 session failed: %s", err) return None except keystoneauth1.exceptions.ClientException as err: log.error("OpenStack server rejected request (likely configuration error?): %s", err) return None # FIXME: should we be raising an error instead? # if we got to this point, v3 session is valid log.info("Using Keystone API v3 session to authenticate to OpenStack") return sess
def __init_keystone_session_v3(self, check=False)
Return a new session object, created using Keystone API v3. .. note:: Note that the only supported authN method is password authentication; token or other plug-ins are not currently supported.
2.91346
2.893518
1.006892
ver = os.getenv('OS_IDENTITY_API_VERSION', '') if ver == '3': log.debug( "Using OpenStack Identity API v3" " because of environmental variable setting `OS_IDENTITY_API_VERSION=3`") return '3' elif ver == '2' or ver.startswith('2.'): log.debug( "Using OpenStack Identity API v2" " because of environmental variable setting `OS_IDENTITY_API_VERSION=2`") return '2' elif self._os_auth_url.endswith('/v3'): log.debug( "Using OpenStack Identity API v3 because of `/v3` ending in auth URL;" " set environmental variable OS_IDENTITY_API_VERSION to force use of Identity API v2 instead.") return '3' elif self._os_auth_url.endswith('/v2.0'): log.debug( "Using OpenStack Identity API v2 because of `/v2.0` ending in auth URL;" " set environmental variable OS_IDENTITY_API_VERSION to force use of Identity API v3 instead.") return '2' else: # auto-detection failed, need to probe return None
def __detect_os_identity_api_version(self)
Return preferred OpenStack Identity API version (either one of the two strings ``'2'`` or ``'3'``) or ``None``. The following auto-detection strategies are tried (in this order): #. Read the environmental variable `OS_IDENTITY_API_VERSION` and check if its value is one of the two strings ``'2'`` or ``'3'``; #. Check if a version tag like ``/v3`` or ``/v2.0`` ends the OpenStack auth URL. If none of the above worked, return ``None``. For more information on ``OS_IDENTITY_API_VERSION``, please see `<https://docs.openstack.org/developer/python-openstackclient/authentication.html>`_.
2.409939
2.209211
1.09086
self._init_os_api() instance = self._load_instance(instance_id) try: ip_addrs = set([self.floating_ip]) except AttributeError: ip_addrs = set([]) for ip_addr in sum(instance.networks.values(), []): ip_addrs.add(ip_addr) log.debug("VM `%s` has IP addresses %r", instance_id, ip_addrs) return list(ip_addrs)
def get_ips(self, instance_id)
Retrieves all IP addresses associated to a given instance. :return: tuple (IPs)
3.950622
4.26611
0.926048
self._init_os_api() log.debug("Checking existence of security group(s) %s ...", names) try: # python-novaclient < 8.0.0 security_groups = self.nova_client.security_groups.list() existing = set(sg.name for sg in security_groups) except AttributeError: security_groups = self.neutron_client.list_security_groups()['security_groups'] existing = set(sg[u'name'] for sg in security_groups) # TODO: We should be able to create the security group if it # doesn't exist and at least add a rule to accept ssh access. # Also, we should be able to add new rules to a security group # if needed. nonexisting = set(names) - existing if nonexisting: raise SecurityGroupError( "Security group(s) `{0}` do not exist" .format(', '.join(nonexisting))) # if we get to this point, all sec groups exist return True
def _check_security_groups(self, names)
Raise an exception if any of the named security groups does not exist. :param List[str] groups: List of security group names :raises: `SecurityGroupError` if group does not exist
3.545183
3.483439
1.017725
self._init_os_api() try: # python-novaclient < 8.0.0 return self.nova_client.images.list() except AttributeError: # ``glance_client.images.list()`` returns a generator, but callers # of `._get_images()` expect a Python list return list(self.glance_client.images.list())
def _get_images(self)
Get available images. We cache the results in order to reduce network usage.
6.564459
5.877031
1.116969
log.debug( "Trying to allocate floating IP for VM `%s` on network(s) %r", instance.id, network_ids) try: # on python-novaclient>=8.0.0 this fails with # `AttributeError` since the `Client.floating_ips` # attribute has been removed return self._allocate_address_nova(instance, network_ids) except AttributeError: return self._allocate_address_neutron(instance, network_ids)
def _allocate_address(self, instance, network_ids)
Allocates a floating/public ip address to the given instance, dispatching to either the Compute or Network API depending on installed packages. :param instance: instance to assign address to :param list network_id: List of IDs (as strings) of networks where to request allocation the floating IP. :return: public ip address
4.52017
4.545615
0.994402
self._init_os_api() with OpenStackCloudProvider.__node_start_lock: # Use the `novaclient` API (works with python-novaclient <8.0.0) free_ips = [ip for ip in self.nova_client.floating_ips.list() if not ip.fixed_ip] if not free_ips: log.debug("Trying to allocate a new floating IP ...") free_ips.append(self.nova_client.floating_ips.create()) if free_ips: ip = free_ips.pop() else: raise RuntimeError( "Could not allocate floating IP for VM {0}" .format(instance_id)) instance.add_floating_ip(ip) return ip.ip
def _allocate_address_nova(self, instance, network_ids)
Allocates a floating/public ip address to the given instance, using the OpenStack Compute ('Nova') API. :param instance: instance to assign address to :param list network_id: List of IDs (as strings) of networks where to request allocation the floating IP. **Ignored** (only used by the corresponding Neutron API function). :return: public ip address
4.366883
4.487564
0.973108
return self._run_playbook(cluster, self._playbook_path, extra_args)
def setup_cluster(self, cluster, extra_args=tuple())
Configure the cluster by running an Ansible playbook. The ElastiCluster configuration attribute `<kind>_groups` determines, for each node kind, what Ansible groups nodes of that kind are assigned to. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt.
9.590517
11.485837
0.834986
if self._resume_playbook_path is not None: return self._run_playbook(cluster, self._resume_playbook_path, extra_args) else: log.warning("No resume playbook is available - falling back to the setup " "playbook, which could be slow.") return self.setup_cluster(cluster, extra_args)
def resume_cluster(self, cluster, extra_args=tuple())
As `setup_cluster`, but prefers to run a resume playbook, if one is available. A resume playbook is a playbook which is designed to restart a cluster after it has been paused, and can be more efficient than a setup playbook (since it can assume that the required software is already installed). If no such playbook is available, it will use the standard setup playbook and print a warning. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt.
4.534053
3.315658
1.367467
inventory_data = defaultdict(list) for node in cluster.get_all_nodes(): if node.preferred_ip is None: log.warning( "Ignoring node `{0}`: No IP address." .format(node.name)) continue if node.kind not in self.groups: # FIXME: should this raise a `ConfigurationError` instead? log.warning( "Ignoring node `{0}`:" " Node kind `{1}` not defined in cluster!" .format(node.name, node.kind)) continue extra_vars = ['ansible_user=%s' % node.image_user] ip_addr, port = parse_ip_address_and_port(node.preferred_ip) if port != 22: extra_vars.append('ansible_port=%s' % port) if node.kind in self.environment: extra_vars.extend('%s=%s' % (k, v) for k, v in self.environment[node.kind].items()) for group in self.groups[node.kind]: inventory_data[group].append( (node.name, ip_addr, ' '.join(extra_vars))) if not inventory_data: log.info("No inventory file was created.") return None # create a temporary file to pass to ansible, since the # api is not stable yet... if self._storage_path_tmp: if not self._storage_path: self._storage_path = tempfile.mkdtemp() elasticluster.log.warning( "Writing inventory file to tmp dir `%s`", self._storage_path) inventory_path = os.path.join( self._storage_path, (cluster.name + '.inventory')) log.debug("Writing Ansible inventory to file `%s` ...", inventory_path) with open(inventory_path, 'w+') as inventory_file: for section, hosts in inventory_data.items(): # Ansible throws an error "argument of type 'NoneType' is not # iterable" if a section is empty, so ensure we have something # to write in there if hosts: inventory_file.write("\n[" + section + "]\n") for host in hosts: hostline = "{0} ansible_host={1} {2}\n".format(*host) inventory_file.write(hostline) return inventory_path
def _build_inventory(self, cluster)
Builds the inventory for the given cluster and returns its path :param cluster: cluster to build inventory for :type cluster: :py:class:`elasticluster.cluster.Cluster`
3.384818
3.352163
1.009741
assert self.params.func, "No subcommand defined in `ElastiCluster.main()" try: return self.params.func() except Exception as err: log.error("Error: %s", err) if self.params.verbose > 2: import traceback traceback.print_exc() print("Aborting because of errors: {err}.".format(err=err)) sys.exit(1)
def main(self)
This is the main entry point of the ElastiCluster CLI. First the central configuration is created, which can be altered through the command line interface. Then the given command from the command line interface is called.
5.759469
4.982806
1.155869
if click.confirm(prompt, **extra_args): return True else: # abort if msg: sys.stderr.write(msg) sys.stderr.write('\n') sys.exit(exitcode)
def confirm_or_abort(prompt, exitcode=os.EX_TEMPFAIL, msg=None, **extra_args)
Prompt user for confirmation and exit on negative reply. Arguments `prompt` and `extra_args` will be passed unchanged to `click.confirm`:func: (which is used for actual prompting). :param str prompt: Prompt string to display. :param int exitcode: Program exit code if negative reply given. :param str msg: Message to display before exiting.
2.458109
3.878791
0.633731
added = [] changed = {} for key, value in kv.items(): if key not in os.environ: added.append(key) else: changed[key] = os.environ[key] os.environ[key] = value yield # restore pristine process environment for key in added: del os.environ[key] for key in changed: os.environ[key] = changed[key]
def environment(**kv)
Context manager to run Python code with a modified UNIX process environment. All key/value pairs in the keyword arguments are added (or changed, if the key names an existing environmental variable) in the process environment upon entrance into the context. Changes are undone upon exit: added environmental variables are removed from the environment, and those whose value was changed are reset to their pristine value.
2.566612
2.273992
1.128681
translated = [] subst = { 'h': list(str(addr)), 'p': list(str(port)), 'r': list(str(user)), '%': ['%'], } escaped = False for char in command: if char == '%': escaped = True continue if escaped: try: translated.extend(subst[char]) escaped = False continue except KeyError: raise ValueError( "Unknown digraph `%{0}`" " in proxy command string `{1}`" .format(char, command)) else: translated.append(char) continue return ''.join(translated)
def expand_ssh_proxy_command(command, user, addr, port=22)
Expand spacial digraphs ``%h``, ``%p``, and ``%r``. Return a copy of `command` with the following string substitutions applied: * ``%h`` is replaced by *addr* * ``%p`` is replaced by *port* * ``%r`` is replaced by *user* * ``%%`` is replaced by ``%``. See also: man page ``ssh_config``, section "TOKENS".
4.255947
4.004621
1.062759
# try different strategies and use first one that succeeeds try: return os.cpu_count() # Py3 only except AttributeError: pass try: import multiprocessing return multiprocessing.cpu_count() except ImportError: # no multiprocessing? pass except NotImplementedError: # multiprocessing cannot determine CPU count pass try: from subprocess32 import check_output ncpus = check_output('nproc') return int(ncpus) except CalledProcessError: # no `/usr/bin/nproc` pass except (ValueError, TypeError): # unexpected output from `nproc` pass except ImportError: # no subprocess32? pass try: from subprocess import check_output ncpus = check_output('nproc') return int(ncpus) except CalledProcessError: # no `/usr/bin/nproc` pass except (ValueError, TypeError): # unexpected output from `nproc` pass except ImportError: # no subprocess.check_call (Py 2.6) pass raise RuntimeError("Cannot determine number of processors")
def get_num_processors()
Return number of online processor cores.
3.106613
3.132013
0.99189
if k1 in mapping: if more: return has_nested_keys(mapping[k1], *more) else: return True else: return False
def has_nested_keys(mapping, k1, *more)
Return ``True`` if `mapping[k1][k2]...[kN]` is valid. Example:: >>> D = { ... 'a': { ... 'x':0, ... 'y':{ ... 'z': 1, ... }, ... }, ... 'b': 3 ... } >>> has_nested_keys(D, 'a', 'x') True >>> has_nested_keys(D, 'a', 'y', 'z') True >>> has_nested_keys(D, 'a', 'q') False When a single key is passed, this is just another way of writing ``k1 in mapping``:: >>> has_nested_keys(D, 'b') True
1.799679
4.289853
0.41952
# we assume one and only one of the regexps will match for regexp in _IP_ADDRESS_RE: match = regexp.search(addr) if not match: continue # can raise netaddr.AddrFormatError ip_addr = netaddr.IPAddress(match.group('ip_addr')) try: port = match.group('port') except IndexError: port = None if port is not None: port = int(port[1:]) # skip leading `:` else: port = default_port return ip_addr, port # parse failed raise netaddr.AddrFormatError( "Could not extract IP address and port from `{1}`" .format(addr))
def parse_ip_address_and_port(addr, default_port=22)
Return a pair (IP address, port) extracted from string `addr`. Different formats are accepted for the address/port string: * IPv6 literals in square brackets, with or without an optional port specification, as used in URLs:: >>> parse_ip_address_and_port('[fe80::dead:beef]:1234') (IPAddress('fe80::dead:beef'), 1234) >>> parse_ip_address_and_port('[fe80::dead:beef]') (IPAddress('fe80::dead:beef'), 22) * IPv6 literals with a "local interface" specification:: >>> parse_ip_address_and_port('[fe80::dead:beef%eth0]') (IPAddress('fe80::dead:beef'), 22) >>> parse_ip_address_and_port('fe80::dead:beef%eth0') (IPAddress('fe80::dead:beef'), 22) * bare IPv6 addresses:: >>> parse_ip_address_and_port('fe80::dead:beef') (IPAddress('fe80::dead:beef'), 22) >>> parse_ip_address_and_port('2001:db8:5ca1:1f0:f816:3eff:fe05:f40f') (IPAddress('2001:db8:5ca1:1f0:f816:3eff:fe05:f40f'), 22) * IPv4 addresses, with or without an additional port specification:: >>> parse_ip_address_and_port('192.0.2.123') (IPAddress('192.0.2.123'), 22) >>> parse_ip_address_and_port('192.0.2.123:999') (IPAddress('192.0.2.123'), 999) Note that the default port can be changed by passing an additional parameter:: >>> parse_ip_address_and_port('192.0.2.123', 987) (IPAddress('192.0.2.123'), 987) >>> parse_ip_address_and_port('fe80::dead:beef', 987) (IPAddress('fe80::dead:beef'), 987) :raise netaddr.AddrFormatError: Upon parse failure, e.g., syntactically incorrect IP address.
3.602484
3.425287
1.051732
prev_handler = signal.getsignal(signum) signal.signal(signum, handler) yield signal.signal(signum, prev_handler)
def sighandler(signum, handler)
Context manager to run code with UNIX signal `signum` bound to `handler`. The existing handler is saved upon entering the context and restored upon exit. The `handler` argument may be anything that can be passed to Python's `signal.signal <https://docs.python.org/2/library/signal.html#signal.signal>`_ standard library call.
2.108839
2.806251
0.751479
cwd = os.getcwd() tmpdir = tempfile.mkdtemp(suffix, prefix, dir) os.chdir(tmpdir) yield os.chdir(cwd) if delete: shutil.rmtree(tmpdir, ignore_errors=True)
def temporary_dir(delete=True, dir=None, prefix='elasticluster.', suffix='.d')
Make a temporary directory and make it current for the code in this context. Delete temporary directory upon exit from the context, unless ``delete=False`` is passed in the arguments. Arguments *suffix*, *prefix* and *dir* are exactly as in :func:`tempfile.mkdtemp()` (but have different defaults).
2.233401
2.512055
0.889073
delay = int(delay) if handler is None: def default_handler(signum, frame): raise RuntimeError("{:d} seconds timeout expired".format(delay)) handler = default_handler prev_sigalrm_handler = signal.getsignal(signal.SIGALRM) signal.signal(signal.SIGALRM, handler) signal.alarm(delay) yield signal.alarm(0) signal.signal(signal.SIGALRM, prev_sigalrm_handler)
def timeout(delay, handler=None)
Context manager to run code and deliver a SIGALRM signal after `delay` seconds. Note that `delay` must be a whole number; otherwise it is converted to an integer by Python's `int()` built-in function. For floating-point numbers, that means rounding off to the nearest integer from below. If the optional argument `handler` is supplied, it must be a callable that is invoked if the alarm triggers while the code is still running. If no `handler` is provided (default), then a `RuntimeError` with message ``Timeout`` is raised.
2.036374
2.222646
0.916194
# `warnings.formatwarning` produces multi-line output that does # not look good in a log file, so let us replace it with something # simpler... return ('{category}: {message}' .format(message=message, category=category.__name__))
def format_warning_oneline(message, category, filename, lineno, file=None, line=None)
Format a warning for logging. The returned value should be a single-line string, for better logging style (although this is not enforced by the code). This methods' arguments have the same meaning of the like-named arguments from `warnings.formatwarning`.
7.50691
8.511263
0.881997
global _warnings_showwarning if capture: assert _warnings_showwarning is None _warnings_showwarning = warnings.showwarning # `warnings.showwarning` must be a function, a generic # callable object is not accepted ... warnings.showwarning = _WarningsLogger(logger, format_warning_oneline).__call__ else: assert _warnings_showwarning is not None warnings.showwarning = _warnings_showwarning _warnings_showwarning = None
def redirect_warnings(capture=True, logger='py.warnings')
If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations.
4.628515
4.638669
0.997811
pass
def start_instance(self, key_name, public_key_path, private_key_path, security_group, flavor, image_id, image_userdata, username=None, node_name=None)
Starts a new instance on the cloud using the given properties. Multiple instances might be started in different threads at the same time. The implementation should handle any problems regarding this itself. :param str key_name: name of the ssh key to connect :param str public_key_path: path to ssh public key :param str private_key_path: path to ssh private key :param str security_group: firewall rule definition to apply on the instance :param str flavor: machine type to use for the instance :param str image_name: image type (os) to use for the instance :param str image_userdata: command to execute after startup :param str username: username for the given ssh key, default None :return: str - instance id of the started instance
6,482.572266
77,491.765625
0.083655
result = list() for element in [e.strip() for e in values.split(',')]: for item in [i for i in known if i.name == element or i.id == element]: result.append(item) return result
def __get_name_or_id(values, known)
Return list of values that match attribute ``.id`` or ``.name`` of any object in list `known`. :param str values: comma-separated list (i.e., a Python string) of items :param list known: list of libcloud items to filter :return: list of the libcloud items that match the given values
3.459106
3.644947
0.949014
result = OrderedDict() for kv in cmdline.strip().split(): if '=' in kv: # limit max split to only 1, to correctly handle cases like # `root=UUID=c9d37675-ef02-42f0-8900-a72ec2cd0f56` k, v = kv.split('=', 1) result[k] = v else: # represent "boolean" flags like `ro` as a key with value `None` result[kv] = None return result
def _parse_linux_cmdline(cmdline)
Parse a Linux boot parameter line into key/value pairs.
8.363512
7.755284
1.078428
# try to be compatible with Py2.4 parts = [] for k, v in kv.items(): if v is None: parts.append(str(k)) else: parts.append('%s=%s' % (k, v)) return ' '.join(parts)
def _assemble_linux_cmdline(kv)
Given a dictionary, assemble a Linux boot command line.
3.041569
3.035236
1.002086
kv = _parse_linux_cmdline(cmdline) if state == 'absent': try: del kv[name] except KeyError: pass elif state == 'present': kv[name] = value return _assemble_linux_cmdline(kv)
def _edit_linux_cmdline(cmdline, state, name, value=None)
Return a new Linux command line, with parameter `name` added, replaced, or removed.
2.77816
2.577359
1.07791
config = str(config) # make a copy so we can alter it matches = list(self._GRUB_KERNEL_BOOT_ENTRY.finditer(config)) # process matches in reverse order, so replacing one match # does not alter the start/end positions of other matches for match in reversed(matches): start = match.end() # Linux command line extends up to the newline if config[start] == '\n': cmdline = '' else: end = config.find('\n', start) cmdline = config[start:end] new_cmdline = _assemble_linux_cmdline(cmdline, state, name, value) config = config[:start] + new_cmdline + config[end:] return config
def edit(self, config, state, name, value=None)
Change all occurrences of `name` in a kernel boot line in the given `config` text.
5.157408
4.867019
1.059665
config = str(config) # make a copy so we can alter it pos = config.find(self._GRUB_CMDLINE_VAR) while pos > -1: # quote char can be `'` or `"` quote_pos = pos + len(self._GRUB_CMDLINE_VAR) quote_char = config[quote_pos] start = quote_pos + 1 # string ends with matching quote end = config.index(quote_char, start) cmdline = config[start:end] new_cmdline = _edit_linux_cmdline(cmdline, state, name, value) config = config[:start] + new_cmdline + config[end:] delta = len(new_cmdline) - len(cmdline) pos = config.find(self._GRUB_CMDLINE_VAR, end + delta) return config
def edit(self, config, state, name, value=None)
Change all occurrences of `name` in a kernel boot line in the given `config` text. It is expected that `config` is the contents of a file following the syntax of ``/etc/default/grub``:file:. .. warning:: This module only does a very crude textual search and replace: it is assumed that input lines in have the form ``KEY="value"`` (quote characters can be double ``"`` or single ``'``), and that the ``value`` string spans a single line and contains all relevant kernel boot parameters. However, the GRUB docs state that :file:``/etc/default/grub`` "is sourced by a shell script, and so must be valid POSIX shell input; normally, it will just be a sequence of ``KEY=value`` lines". In particular, the following cases are valid POSIX shell input but will be mishandled by this module: - It is assumed that all ``KEY=value`` assignments are on a single line. Multi-line strings will make the module error out. - Variable substitutions in the ``value`` part will not be detected. - Escaped quotes will be treated as regular quotes, i.e., there is no way to embed a ``"`` or a ``'`` character in a ``KEY=value`` line with this module. - String concatenation is not supported: whereas the POSIX shell interprets a line ``KEY="foo"'bar'`` as assigning the string ``foobar`` to ``KEY``, this module will only operate on the ``"foo"`` part.
3.332218
3.353832
0.993555
cluster_template = self.params.cluster if self.params.cluster_name: cluster_name = self.params.cluster_name else: cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) if cluster_template not in creator.cluster_conf: raise ClusterNotFound( "No cluster template named `{0}`" .format(cluster_template)) # possibly overwrite node mix from config cluster_nodes_conf = creator.cluster_conf[cluster_template]['nodes'] for kind, num in self.params.nodes_override.items(): if kind not in cluster_nodes_conf: raise ConfigurationError( "No node group `{kind}` defined" " in cluster template `{template}`" .format(kind=kind, template=cluster_template)) cluster_nodes_conf[kind]['num'] = num # First, check if the cluster is already created. try: cluster = creator.load_cluster(cluster_name) except ClusterNotFound: try: cluster = creator.create_cluster( cluster_template, cluster_name) except ConfigurationError as err: log.error("Starting cluster %s: %s", cluster_template, err) return try: print("Starting cluster `{0}` with:".format(cluster.name)) for cls in cluster.nodes: print("* {0:d} {1} nodes.".format(len(cluster.nodes[cls]), cls)) print("(This may take a while...)") min_nodes = dict((kind, cluster_nodes_conf[kind]['min_num']) for kind in cluster_nodes_conf) cluster.start(min_nodes, self.params.max_concurrent_requests) if self.params.no_setup: print("NOT configuring the cluster as requested.") else: print("Configuring the cluster ...") print("(this too may take a while)") ok = cluster.setup() if ok: print( "\nYour cluster `{0}` is ready!" .format(cluster.name)) else: print( "\nWARNING: YOUR CLUSTER `{0}` IS NOT READY YET!" .format(cluster.name)) print(cluster_summary(cluster)) except (KeyError, ImageError, SecurityGroupError, ClusterError) as err: log.error("Could not start cluster `%s`: %s", cluster.name, err) raise
def execute(self)
Starts a new cluster.
3.595728
3.502825
1.026522
cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as err: log.error("Cannot stop cluster `%s`: %s", cluster_name, err) return os.EX_NOINPUT if not self.params.yes: confirm_or_abort( "Do you want really want to stop cluster `{cluster_name}`?" .format(cluster_name=cluster_name), msg="Aborting upon user request.") print("Destroying cluster `%s` ..." % cluster_name) cluster.stop(force=self.params.force, wait=self.params.wait)
def execute(self)
Stops the cluster if it's running.
4.417225
4.107154
1.075495
cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as e: log.error("Cannot load cluster `%s`: %s", cluster_name, e) return os.EX_NOINPUT if not self.params.yes: confirm_or_abort( "Do you want really want to pause cluster `{cluster_name}`?" .format(cluster_name=cluster_name), msg="Aborting upon user request.") print("Pausing cluster `%s` ..." % cluster_name) cluster.pause()
def execute(self)
Pause the cluster if it is running.
4.613403
4.170209
1.106276
cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as e: log.error("Cannot load cluster `%s`: %s", cluster_name, e) return os.EX_NOINPUT print("Resuming cluster `%s` ..." % cluster_name) cluster.resume()
def execute(self)
Resume the cluster if it is paused.
4.553427
4.066553
1.119726
creator = make_creator(self.params.config, storage_path=self.params.storage) cluster_name = self.params.cluster try: cluster = creator.load_cluster(cluster_name) if self.params.update: cluster.update() except (ClusterNotFound, ConfigurationError) as ex: log.error("Listing nodes from cluster %s: %s", cluster_name, ex) return if self.params.pretty_json: print(json.dumps(cluster, default=dict, indent=4)) elif self.params.json: print(json.dumps(cluster, default=dict)) else: print(cluster_summary(cluster)) for cls in cluster.nodes: print("%s nodes:" % cls) print("") for node in cluster.nodes[cls]: txt = [" " + i for i in node.pprint().splitlines()] print(' - ' + "\n".join(txt)[4:]) print("")
def execute(self)
Lists all nodes within the specified cluster with certain information like id and ip.
4.252455
3.992018
1.06524
result = self.__dict__.copy() for key in omit: if key in result: del result[key] return result
def to_dict(self, omit=())
Return a (shallow) copy of self cast to a dictionary, optionally omitting some key/value pairs.
2.908552
2.471998
1.1766
keys = Struct.keys(self) for key in ( '_cloud_provider', '_naming_policy', '_setup_provider', 'known_hosts_file', 'repository', ): if key in keys: keys.remove(key) return keys
def keys(self)
Only expose some of the attributes when using as a dictionary
7.709485
6.92149
1.113848
for key, attr in [ ('ssh_to', 'ssh_to'), ]: oldvalue = getattr(self, attr) try: newvalue = cluster_config[key] except KeyError: continue if key in cluster_config and newvalue != oldvalue: setattr(self, attr, newvalue) log.debug( "Configuration attribute `%s` updated: %s -> %s", key, oldvalue, newvalue)
def update_config(self, cluster_config)
Update current configuration. This method is usually called after loading a `Cluster` instance from a persistent storage. Note that not all fields are actually updated, but only those that can be safely updated.
3.495997
3.471428
1.007077
if not self._NODE_KIND_RE.match(kind): raise ValueError( "Invalid name `{kind}`. The `kind` argument may only contain" " alphanumeric characters, and must not end with a digit." .format(kind=kind)) if kind not in self.nodes: self.nodes[kind] = [] # To ease json dump/load, use `extra` dictionary to # instantiate Node class extra.update( cloud_provider=self._cloud_provider, cluster_name=self.name, flavor=flavor, image_id=image_id, image_user=image_user, image_userdata=image_userdata, kind=kind, security_group=security_group, ) for attr in ( 'flavor', 'image_id', 'image_user', 'image_userdata', 'security_group', 'user_key_name', 'user_key_private', 'user_key_public', ): if attr not in extra: extra[attr] = getattr(self, attr) if not name: # `extra` contains key `kind` already name = self._naming_policy.new(**extra) else: self._naming_policy.use(kind, name) node = Node(name=name, **extra) self.nodes[kind].append(node) return node
def add_node(self, kind, image_id, image_user, flavor, security_group, image_userdata='', name=None, **extra)
Adds a new node to the cluster. This factory method provides an easy way to add a new node to the cluster by specifying all relevant parameters. The node does not get started nor setup automatically, this has to be done manually afterwards. :param str kind: kind of node to start. this refers to the groups defined in the ansible setup provider :py:class:`elasticluster.providers.AnsibleSetupProvider` Please note that this can only contain alphanumeric characters and hyphens (and must not end with a digit), as it is used to build a valid hostname :param str image_id: image id to use for the cloud instance (e.g. ami on amazon) :param str image_user: user to login on given image :param str flavor: machine type to use for cloud instance :param str security_group: security group that defines firewall rules to the instance :param str image_userdata: commands to execute after instance starts :param str name: name of this node, automatically generated if None :raises: ValueError: `kind` argument is an invalid string. :return: created :py:class:`Node`
3.408718
3.298651
1.033367
for i in range(num): self.add_node(kind, image_id, image_user, flavor, security_group, image_userdata=image_userdata, **extra)
def add_nodes(self, kind, num, image_id, image_user, flavor, security_group, image_userdata='', **extra)
Helper method to add multiple nodes of the same kind to a cluster. :param str kind: kind of node to start. this refers to the groups defined in the ansible setup provider :py:class:`elasticluster.providers.AnsibleSetupProvider` :param int num: number of nodes to add of this kind :param str image_id: image id to use for the cloud instance (e.g. ami on amazon) :param str image_user: user to login on given image :param str flavor: machine type to use for cloud instance :param str security_group: security group that defines firewall rules to the instance :param str image_userdata: commands to execute after instance starts
2.055588
3.31126
0.620787
if node.kind not in self.nodes: raise NodeNotFound("Unable to remove node %s: invalid node type `%s`.", node.name, node.kind) else: try: index = self.nodes[node.kind].index(node) if self.nodes[node.kind][index]: del self.nodes[node.kind][index] if stop: node.stop() self._naming_policy.free(node.kind, node.name) self.repository.save_or_update(self) remaining_nodes = self.get_all_nodes() self._gather_node_ip_addresses( remaining_nodes, self.start_timeout, self.ssh_probe_timeout, remake=True) except ValueError: raise NodeNotFound("Node %s not found in cluster" % node.name)
def remove_node(self, node, stop=False)
Removes a node from the cluster. By default, it doesn't also stop the node, just remove from the known hosts of this cluster. :param node: node to remove :type node: :py:class:`Node` :param stop: Stop the node :type stop: bool
4.63737
4.639515
0.999538
nodes = self.get_all_nodes() log.info( "Starting cluster nodes (timeout: %d seconds) ...", self.start_timeout) if max_concurrent_requests == 0: try: max_concurrent_requests = 4 * get_num_processors() except RuntimeError: log.warning( "Cannot determine number of processors!" " will start nodes sequentially...") max_concurrent_requests = 1 if max_concurrent_requests > 1: nodes = self._start_nodes_parallel(nodes, max_concurrent_requests) else: nodes = self._start_nodes_sequentially(nodes) # checkpoint cluster state self.repository.save_or_update(self) not_started_nodes = self._check_starting_nodes(nodes, self.start_timeout) # now that all nodes are up, checkpoint cluster state again self.repository.save_or_update(self) # Try to connect to each node to gather IP addresses and SSH host keys started_nodes = nodes - not_started_nodes if not started_nodes: raise ClusterSizeError("No nodes could be started!") log.info( "Checking SSH connection to nodes (timeout: %d seconds) ...", self.start_timeout) self._gather_node_ip_addresses( started_nodes, self.start_timeout, self.ssh_probe_timeout) # It's possible that the node.connect() call updated the # `preferred_ip` attribute, so, let's save the cluster again. self.repository.save_or_update(self) # A lot of things could go wrong when starting the cluster. # Check that the minimum number of nodes within each groups is # reachable. Raise `ClusterSizeError()` if not. self._check_cluster_size(self._compute_min_nodes(min_nodes))
def start(self, min_nodes=None, max_concurrent_requests=0)
Starts up all the instances in the cloud. To speed things up, all instances are started in a seperate thread. To make sure ElastiCluster is not stopped during creation of an instance, it will overwrite the sigint handler. As soon as the last started instance is returned and saved to the repository, sigint is executed as usual. A VM instance is considered 'up and running' as soon as an SSH connection can be established. If the startup timeout is reached before all instances are started, ElastiCluster stops the cluster and terminates all VM instances. This method is blocking and might take some time depending on the amount of instances to start. :param min_nodes: minimum number of nodes to start in case the quota is reached before all instances are up :type min_nodes: dict [node_kind] = number :param int max_concurrent_requests: Issue at most this number of requests to start VMs; if 1 or less, start nodes one at a time (sequentially). The special value ``0`` means run 4 threads for each available processor.
4.401256
4.178784
1.053238
with timeout(lapse, raise_timeout_error): try: while nodes: nodes = set(node for node in nodes if not node.is_alive()) if nodes: log.debug("Waiting for %d more nodes to come up ...", len(nodes)) time.sleep(self.polling_interval) except TimeoutError: log.error("Some nodes did not start correctly" " within the given %d-seconds timeout: %s", lapse, ', '.join(node.name for node in nodes)) # return list of not-yet-started nodes, # so we can exclude them from coming rounds return nodes
def _check_starting_nodes(self, nodes, lapse)
Wait until all given nodes are alive, for max `lapse` seconds.
5.09739
4.778043
1.066836
# for convenience, we might set this to ``None`` if the file cannot # be opened -- but we do not want to forget the cluster-wide # setting in case the error is transient known_hosts_path = self.known_hosts_file # If run with remake=True, deletes known_hosts_file so that it will # be recreated. Prevents "Invalid host key" errors if remake and os.path.isfile(known_hosts_path): os.remove(known_hosts_path) # Create the file if it's not present, otherwise the # following lines will raise an error try: fd = open(known_hosts_path, 'a') fd.close() except IOError as err: log.warning("Error opening SSH 'known hosts' file `%s`: %s", known_hosts_path, err) known_hosts_path = None keys = paramiko.hostkeys.HostKeys(known_hosts_path) with timeout(lapse, raise_timeout_error): try: while nodes: for node in copy(nodes): ssh = node.connect( keyfile=known_hosts_path, timeout=ssh_timeout) if ssh: log.info("Connection to node `%s` successful," " using IP address %s to connect.", node.name, node.connection_ip()) # Add host keys to the keys object. for host, key in ssh.get_host_keys().items(): for keytype, keydata in key.items(): keys.add(host, keytype, keydata) self._save_keys_to_known_hosts_file(keys) nodes.remove(node) if nodes: time.sleep(self.polling_interval) except TimeoutError: log.error( "Some nodes of the cluster were unreachable" " within the given %d-seconds timeout: %s", lapse, ', '.join(node.name for node in nodes)) # return list of nodes return nodes
def _gather_node_ip_addresses(self, nodes, lapse, ssh_timeout, remake=False)
Connect via SSH to each node. Return set of nodes that could not be reached with `lapse` seconds.
4.318869
4.263417
1.013006
# finding all node groups with an unsatisfied amount of nodes unsatisfied = 0 for kind, required in min_nodes.items(): available = len(self.nodes[kind]) if available < required: log.error( "Not enough nodes of kind `%s`:" " %d required, but only %d available.", ) unsatisfied += 1 if unsatisfied: raise ClusterSizeError()
def _check_cluster_size(self, min_nodes)
Checks the size of the cluster to fit the needs of the user. It considers the minimum values for the node groups if present. Otherwise it will imply the user wants the amount of specified nodes at least. :param min_nodes: minimum number of nodes for each kind :type min_nodes: dict [node_kind] = number :raises: ClusterError in case the size does not fit the minimum number specified by the user.
6.15641
5.251412
1.172334
for kind, nodes in self.nodes.items(): for node in nodes: if node.name == nodename: return node else: raise NodeNotFound( "Node `{0}` not found in cluster `{1}`" .format(nodename, self.name))
def get_node_by_name(self, nodename)
Return the node corresponding with name `nodename` :params nodename: Name of the node :type nodename: str
3.081062
3.17838
0.969381
log.debug("Stopping cluster `%s` ...", self.name) failed = self._stop_all_nodes(wait) if failed: if force: self._delete_saved_data() log.warning( "Not all cluster nodes have been terminated." " However, as requested, data about the cluster" " has been removed from local storage.") else: self.repository.save_or_update(self) log.warning( "Not all cluster nodes have been terminated." " Fix errors above and re-run `elasticluster stop %s`", self.name) else: self._delete_saved_data()
def stop(self, force=False, wait=False)
Terminate all VMs in this cluster and delete its repository. :param bool force: remove cluster from storage even if not all nodes could be stopped.
6.155062
6.019617
1.022501
log.info("Pausing cluster `%s` ...", self.name) failed = self._pause_all_nodes() if os.path.exists(self.known_hosts_file): os.remove(self.known_hosts_file) self.repository.save_or_update(self) if failed: log.warning( "Not all cluster nodes have been successfully " "stopped. Some nodes may still be running - " "check error messages above and consider " "re-running `elasticluster pause %s` if " "necessary.", self.name)
def pause(self)
Pause all VMs in this cluster and store data so that they can be restarted later.
6.16455
5.829879
1.057406
log.info("Resuming cluster `%s` ...", self.name) failed = self._resume_all_nodes() for node in self.get_all_nodes(): node.update_ips() self._gather_node_ip_addresses( self.get_all_nodes(), self.start_timeout, self.ssh_probe_timeout) self.repository.save_or_update(self) if failed: log.warning( "Not all cluster nodes have been successfully " "restarted. Check error messages above and consider " "re-running `elasticluster resume %s` if " "necessary.", self.name) return if not self._setup_provider.resume_cluster(self): log.warning("Elasticluster was not able to guarantee that the " "cluster restarted correctly - check the errors " "above and check your config.")
def resume(self)
Resume all paused VMs in this cluster.
7.063498
7.00324
1.008604
failed = 0 for node in self.get_all_nodes(): if not node.instance_id: log.warning( "Node `%s` has no instance ID." " Assuming it did not start correctly," " so removing it anyway from the cluster.", node.name) self.nodes[node.kind].remove(node) continue # try and stop node try: # wait and pause for and recheck. node.stop(wait) self.nodes[node.kind].remove(node) log.debug( "Removed node `%s` from cluster `%s`", node.name, self.name) except InstanceNotFoundError as err: log.info( "Node `%s` (instance ID `%s`) was not found;" " assuming it has already been terminated.", node.name, node.instance_id) except Exception as err: failed += 1 log.error( "Could not stop node `%s` (instance ID `%s`): %s %s", node.name, node.instance_id, err, err.__class__) return failed
def _stop_all_nodes(self, wait=False)
Terminate all cluster nodes. Return number of failures.
3.66435
3.486243
1.051088
failed = 0 def _pause_specific_node(node): if not node.instance_id: log.warning("Node `%s` has no instance id." " It is either already stopped, or" " never created properly. Not attempting" " to stop it again.", node.name) return None try: return node.pause() except Exception as err: log.error( "Could not stop node `%s` (instance ID `%s`): %s %s", node.name, node.instance_id, err, err.__class__) node.update_ips() return None nodes = self.get_all_nodes() thread_pool = self._make_thread_pool(max_thread_pool_size) for node, state in zip(nodes, thread_pool.map(_pause_specific_node, nodes)): if state is None: failed += 1 else: self.paused_nodes[node.name] = state return failed
def _pause_all_nodes(self, max_thread_pool_size=0)
Pause all cluster nodes - ensure that we store data so that in the future the nodes can be restarted. :return: int - number of failures.
3.682718
3.779426
0.974412
if ssh_to is None: ssh_to = self.ssh_to # first try to interpret `ssh_to` as a node name if ssh_to: try: return self.get_node_by_name(ssh_to) except NodeNotFound: pass # next, ensure `ssh_to` is a class name if ssh_to: try: parts = self._naming_policy.parse(ssh_to) log.warning( "Node `%s` not found." " Trying to find other node in class `%s` ...", ssh_to, parts['kind']) ssh_to = parts['kind'] except ValueError: # it's already a class name pass # try getting first node of kind `ssh_to` if ssh_to: try: nodes = self.nodes[ssh_to] except KeyError: raise ConfigurationError( "Invalid configuration item `ssh_to={ssh_to}` in cluster `{name}`:" " node class `{ssh_to}` does not exist in this cluster." .format(ssh_to=ssh_to, name=self.name)) try: return nodes[0] except IndexError: log.warning( "Chosen `ssh_to` class `%s` is empty: unable to " "get the choosen frontend node from that class.", ssh_to) # If we reach this point, `ssh_to` was not set or the # preferred class was empty. Try "natural" `ssh_to` values. for kind in ['ssh', 'login', 'frontend', 'master']: try: nodes = self.nodes[kind] return nodes[0] except (KeyError, IndexError): pass # ... if all else fails, return first node for kind in sorted(self.nodes.keys()): if self.nodes[kind]: return self.nodes[kind][0] # Uh-oh, no nodes in this cluster! raise NodeNotFound("Unable to find a valid frontend:" " cluster has no nodes!")
def get_ssh_to_node(self, ssh_to=None)
Return target node for SSH/SFTP connections. The target node is the first node of the class specified in the configuration file as ``ssh_to`` (but argument ``ssh_to`` can override this choice). If not ``ssh_to`` has been specified in this cluster's config, then try node class names ``ssh``, ``login``, ``frontend``, and ``master``: if any of these is non-empty, return the first node. If all else fails, return the first node of the first class (in alphabetic order). :return: :py:class:`Node` :raise: :py:class:`elasticluster.exceptions.NodeNotFound` if no valid frontend node is found
3.879643
3.440445
1.127657
try: # setup the cluster using the setup provider ret = self._setup_provider.setup_cluster(self, extra_args) except Exception as err: log.error( "The cluster hosts are up and running," " but %s failed to set the cluster up: %s", self._setup_provider.HUMAN_READABLE_NAME, err) ret = False if not ret: log.warning( "Cluster `%s` not yet configured. Please, re-run " "`elasticluster setup %s` and/or check your configuration", self.name, self.name) return ret
def setup(self, extra_args=tuple())
Configure the cluster nodes. Actual action is delegated to the :py:class:`elasticluster.providers.AbstractSetupProvider` that was provided at construction time. :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: bool - True on success, False otherwise
6.522827
5.039343
1.29438
for node in self.get_all_nodes(): try: node.update_ips() # If we previously did not have a preferred_ip or the # preferred_ip is not in the current list, then try to connect # to one of the node ips and update the preferred_ip. if node.ips and \ not (node.preferred_ip and \ node.preferred_ip in node.ips): node.connect() except InstanceError as ex: log.warning("Ignoring error updating information on node %s: %s", node, ex) self.repository.save_or_update(self)
def update(self)
Update connection information of all nodes in this cluster. It happens, for example, that public ip's are not available immediately, therefore calling this method might help.
5.783468
5.458794
1.059477
match = NodeNamingPolicy._NODE_NAME_RE.match(name) if match: return match.groupdict() else: raise ValueError( "Cannot parse node name `{name}`" .format(name=name))
def parse(name)
Return dict of parts forming `name`. Raise `ValueError` if string `name` cannot be correctly parsed. The default implementation uses `NodeNamingPolicy._NODE_NAME_RE` to parse the name back into constituent parts. This is ideally the inverse of :meth:`format` -- it should be able to parse a node name string into the parameter values that were used to form it.
5.505216
2.892976
1.902959
log.info("Starting node `%s` from image `%s` with flavor %s ...", self.name, self.image_id, self.flavor) self.instance_id = self._cloud_provider.start_instance( self.user_key_name, self.user_key_public, self.user_key_private, self.security_group, self.flavor, self.image_id, self.image_userdata, username=self.image_user, node_name=("%s-%s" % (self.cluster_name, self.name)), **self.extra) log.debug("Node `%s` has instance ID `%s`", self.name, self.instance_id)
def start(self)
Start the node on the cloud using the given instance properties. This method is non-blocking: as soon as the node id is returned from the cloud provider, it will return. The `is_alive`:meth: and `update_ips`:meth: methods should be used to further gather details about the state of the node.
3.950057
3.62427
1.08989
if self.instance_id is not None: log.info("Shutting down node `%s` (VM instance `%s`) ...", self.name, self.instance_id) self._cloud_provider.stop_instance(self.instance_id) if wait: while self.is_alive(): time.sleep(1) # When an instance is terminated, the EC2 cloud provider will # basically return it as "running" state. Setting the # `instance_id` attribute to None will force `is_alive()` # method not to check with the cloud provider, and forever # forgetting about the instance id. self.instance_id = None
def stop(self, wait=False)
Terminate the VM instance launched on the cloud for this specific node.
5.90739
5.56945
1.060677
if self.instance_id is None: raise ValueError("Trying to stop unstarted node.") resp = self._cloud_provider.pause_instance(self.instance_id) self.preferred_ip = None return resp
def pause(self)
Pause the VM instance and return the info needed to restart it.
8.097794
7.309725
1.107811
ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if keyfile and os.path.exists(keyfile): ssh.load_host_keys(keyfile) # Try connecting using the `preferred_ip`, if # present. Otherwise, try all of them and set `preferred_ip` # using the first that is working. ips = self.ips[:] # This is done in order to "sort" the IPs and put the preferred_ip first. if self.preferred_ip: if self.preferred_ip in ips: ips.remove(self.preferred_ip) else: # Preferred is changed? log.debug( "IP address %s does not seem to belong to %s anymore." " Ignoring it.", self.preferred_ip, self.name) self.preferred_ip = ips[0] for ip in itertools.chain([self.preferred_ip], ips): if not ip: continue log.debug( "Trying to connect to host %s using IP address %s ...", self.name, ip) try: addr, port = parse_ip_address_and_port(ip, SSH_PORT) extra = { 'allow_agent': True, 'key_filename': self.user_key_private, 'look_for_keys': False, 'timeout': timeout, 'username': self.image_user, } if self.ssh_proxy_command: proxy_command = expand_ssh_proxy_command( self.ssh_proxy_command, self.image_user, addr, port) from paramiko.proxy import ProxyCommand extra['sock'] = ProxyCommand(proxy_command) log.debug("Using proxy command `%s`.", proxy_command) ssh.connect(str(addr), port=port, **extra) log.debug( "Connection to %s succeeded on port %d," " will use this IP address for future connections.", ip, port) if ip != self.preferred_ip: self.preferred_ip = ip # Connection successful. return ssh except socket.error as ex: log.debug( "Host %s (%s) not reachable within %d seconds: %s -- %r", self.name, ip, timeout, ex, type(ex)) except paramiko.BadHostKeyException as ex: log.error( "Invalid SSH host key for %s (%s): %s.", self.name, ip, ex) except paramiko.SSHException as ex: log.debug( "Ignoring error connecting to %s: %s -- %r", self.name, ex, type(ex)) return None
def connect(self, keyfile=None, timeout=5)
Connect to the node via SSH. :param keyfile: Path to the SSH host key. :param timeout: Maximum time to wait (in seconds) for the TCP connection to be established. :return: :py:class:`paramiko.SSHClient` - ssh connection or None on failure
3.064887
3.09489
0.990306
self.ips = self._cloud_provider.get_ips(self.instance_id) if self.preferred_ip not in self.ips: self.preferred_ip = None return self.ips[:]
def update_ips(self)
Retrieves the public and private ip of the instance by using the cloud provider. In some cases the public ip assignment takes some time, but this method is non blocking. To check for a public ip, consider calling this method multiple times during a certain timeout.
5.244298
3.919624
1.337959
result = dict(self) for key in omit: if key in result: del result[key] return result
def to_dict(self, omit=())
Return a (shallow) copy of self cast to a dictionary, optionally omitting some key/value pairs.
3.122561
2.534493
1.232026
ips = ', '.join(ip for ip in self.ips if ip) return % (self.name, self.preferred_ip, ips, self.instance_id, self.flavor)
def pprint(self)
Pretty print information about the node. :return: str - representaion of a node in pretty print
11.417851
12.400836
0.920732
return { 'aws_access_key_id': self._access_key, 'aws_secret_access_key': self._secret_key, 'aws_region': self._region_name, 'aws_vpc_name': (self._vpc or ''), 'aws_vpc_id': (self._vpc_id or ''), }
def to_vars_dict(self)
Return local state which is relevant for the cluster setup process.
2.682016
2.409533
1.113086
# check for existing connection if self._ec2_connection: return self._ec2_connection try: log.debug("Connecting to EC2 endpoint %s", self._ec2host) # connect to webservice ec2_connection = boto.ec2.connect_to_region( self._region_name, aws_access_key_id=self._access_key, aws_secret_access_key=self._secret_key, is_secure=self._secure, host=self._ec2host, port=self._ec2port, path=self._ec2path, ) # With the loose setting `BOTO_USE_ENDPOINT_HEURISTICS` # which is necessary to work around issue #592, Boto will # now accept *any* string as an AWS region name; # furthermore, it *always* returns a connection object -- # so the only way to check that we are not going to run # into trouble is to check that there *is* a valid host # name on the other end of the connection. if ec2_connection.host: log.debug("EC2 connection has been successful.") else: raise CloudProviderError( "Cannot establish connection to EC2 region {0}" .format(self._region_name)) if not self._vpc: vpc_connection = None self._vpc_id = None else: vpc_connection, self._vpc_id = self._find_vpc_by_name(self._vpc) except Exception as err: log.error("Error connecting to EC2: %s", err) raise self._ec2_connection, self._vpc_connection = ( ec2_connection, vpc_connection) return self._ec2_connection
def _connect(self)
Connect to the EC2 cloud provider. :return: :py:class:`boto.ec2.connection.EC2Connection` :raises: Generic exception on error
3.821883
3.719001
1.027664
instance = self._load_instance(instance_id) instance.terminate() del self._instances[instance_id]
def stop_instance(self, instance_id)
Stops the instance gracefully. :param str instance_id: instance identifier
4.134608
6.130091
0.674477
self._load_instance(instance_id) instance = self._load_instance(instance_id) IPs = [ip for ip in (instance.private_ip_address, instance.ip_address) if ip] # We also need to check if there is any floating IP associated if self.request_floating_ip and not self._vpc: # We need to list the floating IPs for this instance floating_ips = [ip for ip in self._ec2_connection.get_all_addresses() if ip.instance_id == instance.id] if not floating_ips: log.debug("Public ip address has to be assigned through " "elasticluster.") ip = self._allocate_address(instance) # This is probably the preferred IP we want to use IPs.insert(0, ip) else: IPs = [ip.public_ip for ip in floating_ips] + IPs return list(set(IPs))
def get_ips(self, instance_id)
Retrieves the private and public ip addresses for a given instance. :return: list (ips)
4.318125
4.331546
0.996902
instance = self._load_instance(instance_id) if instance.update() == "running": # If the instance is up&running, ensure it has an IP # address. if not instance.ip_address and self.request_floating_ip: log.debug("Public ip address has to be assigned through " "elasticluster.") self._allocate_address(instance) instance.update() return True else: return False
def is_instance_running(self, instance_id)
Checks if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise
7.66084
8.189274
0.935472
connection = self._connect() free_addresses = [ ip for ip in connection.get_all_addresses() if not ip.instance_id] if not free_addresses: try: address = connection.allocate_address() except Exception as ex: log.error("Unable to allocate a public IP address to instance `%s`", instance.id) return None try: address = free_addresses.pop() instance.use_ip(address) return address.public_ip except Exception as ex: log.error("Unable to associate IP address %s to instance `%s`", address, instance.id) return None
def _allocate_address(self, instance)
Allocates a free public ip address to the given instance :param instance: instance to assign address to :type instance: py:class:`boto.ec2.instance.Reservation` :return: public ip address
3.417352
3.35112
1.019764