code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def prepare_new_container(soa_dir, synapse_service_dir, service, instance, mac):
"""Update iptables to include rules for a new (not yet running) MAC address"""
ensure_shared_chains() # probably already set, but just to be safe
service_group = ServiceGroup(service, instance)
service_group.update_rules(soa_dir, synapse_service_dir)
iptables.insert_rule("PAASTA", dispatch_rule(service_group.chain_name, mac)) | Update iptables to include rules for a new (not yet running) MAC address | prepare_new_container | python | Yelp/paasta | paasta_tools/firewall.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py | Apache-2.0 |
def firewall_flock(flock_path=DEFAULT_FIREWALL_FLOCK_PATH):
"""Grab an exclusive flock to avoid concurrent iptables updates"""
with io.FileIO(flock_path, "w") as f:
with timed_flock(f, seconds=DEFAULT_FIREWALL_FLOCK_TIMEOUT_SECS):
yield | Grab an exclusive flock to avoid concurrent iptables updates | firewall_flock | python | Yelp/paasta | paasta_tools/firewall.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py | Apache-2.0 |
def load_flinkeks_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> FlinkEksDeploymentConfig:
"""Read a service instance's configuration for Flink on EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "flinkeks", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = FlinkEksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return FlinkEksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
) | Read a service instance's configuration for Flink on EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_flinkeks_instance_config | python | Yelp/paasta | paasta_tools/flinkeks_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/flinkeks_tools.py | Apache-2.0 |
def get_pool(self) -> Optional[str]:
"""
Parses flink_pool from a specific Flink Deployment instance's configuration data, using key 'spot'.
Args:
flink_deployment_config_data: The FlinkDeploymentConfig for a specific Flink yelpsoa instance
Returns:
The flink pool string.
"""
spot_config = self.config_dict.get("spot", None)
if spot_config is False:
return "flink"
else:
# if not set or True, Flink instance defaults to use flink-spot pool
return "flink-spot" |
Parses flink_pool from a specific Flink Deployment instance's configuration data, using key 'spot'.
Args:
flink_deployment_config_data: The FlinkDeploymentConfig for a specific Flink yelpsoa instance
Returns:
The flink pool string.
| get_pool | python | Yelp/paasta | paasta_tools/flink_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py | Apache-2.0 |
def load_flink_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> FlinkDeploymentConfig:
"""Read a service instance's configuration for Flink.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "flink", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = FlinkDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return FlinkDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
) | Read a service instance's configuration for Flink.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_flink_instance_config | python | Yelp/paasta | paasta_tools/flink_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py | Apache-2.0 |
def _filter_for_endpoint(json_response: Any, endpoint: str) -> Mapping[str, Any]:
"""
Filter json response to include only a subset of fields.
"""
if endpoint == "config":
return {
key: value for (key, value) in json_response.items() if key in CONFIG_KEYS
}
if endpoint == "overview":
return {
key: value for (key, value) in json_response.items() if key in OVERVIEW_KEYS
}
if endpoint == "jobs":
return json_response
if endpoint.startswith("jobs"):
return {
key: value
for (key, value) in json_response.items()
if key in JOB_DETAILS_KEYS
}
return json_response |
Filter json response to include only a subset of fields.
| _filter_for_endpoint | python | Yelp/paasta | paasta_tools/flink_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py | Apache-2.0 |
def get_flink_jobs_from_paasta_api_client(
service: str, instance: str, client: PaastaOApiClient
) -> FlinkJobs:
"""Get flink jobs for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink jobs in the flink cluster"""
return client.service.list_flink_cluster_jobs(
service=service,
instance=instance,
) | Get flink jobs for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink jobs in the flink cluster | get_flink_jobs_from_paasta_api_client | python | Yelp/paasta | paasta_tools/flink_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py | Apache-2.0 |
async def get_flink_job_details_from_paasta_api_client(
service: str, instance: str, job_id: str, client: PaastaOApiClient
) -> FlinkJobDetails:
"""Get flink job details for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink jobs in the flink cluster"""
return client.service.get_flink_cluster_job_details(
service=service,
instance=instance,
job_id=job_id,
) | Get flink job details for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink jobs in the flink cluster | get_flink_job_details_from_paasta_api_client | python | Yelp/paasta | paasta_tools/flink_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py | Apache-2.0 |
def get_flink_config_from_paasta_api_client(
service: str, instance: str, client: PaastaOApiClient
) -> FlinkConfig:
"""Get flink config for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink cluster configurations"""
return client.service.get_flink_cluster_config(
service=service,
instance=instance,
) | Get flink config for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink cluster configurations | get_flink_config_from_paasta_api_client | python | Yelp/paasta | paasta_tools/flink_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py | Apache-2.0 |
def get_flink_overview_from_paasta_api_client(
service: str, instance: str, client: PaastaOApiClient
) -> FlinkClusterOverview:
"""Get flink cluster overview for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink cluster overview"""
return client.service.get_flink_cluster_overview(
service=service,
instance=instance,
) | Get flink cluster overview for (service, instance) pair by connecting to the paasta api endpoint.
Appends exception to output list if any.
:param service: The service name
:param instance: The instance of the service to retrieve
:param client: The paasta api client
:returns: Flink cluster overview | get_flink_overview_from_paasta_api_client | python | Yelp/paasta | paasta_tools/flink_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/flink_tools.py | Apache-2.0 |
def get_deploy_group_mappings(
soa_dir: str, service: str
) -> Tuple[Dict[str, V1_Mapping], V2_Mappings]:
"""Gets mappings from service:deploy_group to services-service:paasta-hash-image_version,
where hash is the current SHA at the HEAD of branch_name and image_version
can be used to provide additional version information for the Docker image.
This is done for all services in soa_dir.
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary mapping service:deploy_group to a dictionary
containing:
- 'docker_image': something like "services-service:paasta-hash". This is
relative to the paasta docker registry.
- 'desired_state': either 'start' or 'stop'. Says whether this branch
should be running.
- 'force_bounce': An arbitrary value, which may be None. A change in this
value should trigger a bounce, even if the other properties of this app
have not changed.
"""
mappings: Dict[str, V1_Mapping] = {}
v2_mappings: V2_Mappings = {"deployments": {}, "controls": {}}
git_url = get_git_url(service=service, soa_dir=soa_dir)
# Some pseudo-services like toolboxes explicitly have no git_url, and therefore no deployments
if git_url is None:
return mappings, v2_mappings
# Most of the time of this function is in two parts:
# 1. getting remote refs from git. (Mostly IO, just waiting for git to get back to us.)
# 2. loading instance configs. (Mostly CPU, copy.deepcopying yaml over and over again)
# Let's do these two things in parallel.
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
remote_refs_future = executor.submit(remote_git.list_remote_refs, git_url)
service_configs = get_instance_configs_for_service(soa_dir=soa_dir, service=service)
deploy_group_branch_mappings = {
config.get_branch(): config.get_deploy_group() for config in service_configs
}
if not deploy_group_branch_mappings:
log.info("Service %s has no valid deploy groups. Skipping.", service)
return mappings, v2_mappings
remote_refs = remote_refs_future.result()
tag_by_deploy_group = {
dg: get_latest_deployment_tag(remote_refs, dg)
for dg in set(deploy_group_branch_mappings.values())
}
state_by_branch_and_sha = get_desired_state_by_branch_and_sha(remote_refs)
for control_branch, deploy_group in deploy_group_branch_mappings.items():
(deploy_ref_name, deploy_ref_sha, image_version) = tag_by_deploy_group[
deploy_group
]
if deploy_ref_name in remote_refs:
commit_sha = remote_refs[deploy_ref_name]
control_branch_alias = f"{service}:paasta-{control_branch}"
control_branch_alias_v2 = f"{service}:{control_branch}"
docker_image = build_docker_image_name(service, commit_sha, image_version)
desired_state, force_bounce = state_by_branch_and_sha.get(
(control_branch, deploy_ref_sha), ("start", None)
)
log.info("Mapping %s to docker image %s", control_branch, docker_image)
v2_mappings["deployments"][deploy_group] = {
"docker_image": docker_image,
"git_sha": commit_sha,
"image_version": image_version,
}
mappings[control_branch_alias] = {
"docker_image": docker_image,
"desired_state": desired_state,
"force_bounce": force_bounce,
}
v2_mappings["controls"][control_branch_alias_v2] = {
"desired_state": desired_state,
"force_bounce": force_bounce,
}
return mappings, v2_mappings | Gets mappings from service:deploy_group to services-service:paasta-hash-image_version,
where hash is the current SHA at the HEAD of branch_name and image_version
can be used to provide additional version information for the Docker image.
This is done for all services in soa_dir.
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary mapping service:deploy_group to a dictionary
containing:
- 'docker_image': something like "services-service:paasta-hash". This is
relative to the paasta docker registry.
- 'desired_state': either 'start' or 'stop'. Says whether this branch
should be running.
- 'force_bounce': An arbitrary value, which may be None. A change in this
value should trigger a bounce, even if the other properties of this app
have not changed.
| get_deploy_group_mappings | python | Yelp/paasta | paasta_tools/generate_deployments_for_service.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/generate_deployments_for_service.py | Apache-2.0 |
async def get_spool(spool_url: str) -> SpoolInfo:
"""Query hacheck for the state of a task, and parse the result into a dictionary."""
if spool_url is None:
return None
# TODO: aiohttp says not to create a session per request. Fix this.
async with aiohttp.ClientSession(timeout=HACHECK_TIMEOUT) as session:
async with session.get(
spool_url, headers={"User-Agent": get_user_agent()}
) as response:
if response.status == 200:
return {"state": "up"}
regex = "".join(
[
"^",
r"Service (?P<service>.+)",
r" in (?P<state>.+) state",
r"(?: since (?P<since>[0-9.]+))?",
r"(?: until (?P<until>[0-9.]+))?",
r"(?:: (?P<reason>.*))?",
"$",
]
)
response_text = await response.text()
match = re.match(regex, response_text)
groupdict = match.groupdict()
info: SpoolInfo = {}
info["service"] = groupdict["service"]
info["state"] = groupdict["state"]
if "since" in groupdict:
info["since"] = float(groupdict["since"] or 0)
if "until" in groupdict:
info["until"] = float(groupdict["until"] or 0)
if "reason" in groupdict:
info["reason"] = groupdict["reason"]
return info | Query hacheck for the state of a task, and parse the result into a dictionary. | get_spool | python | Yelp/paasta | paasta_tools/hacheck.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/hacheck.py | Apache-2.0 |
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has an exact set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = set(list_chain(chain))
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
insert_rule(chain, rule)
extra_rules = current_rules - set(rules)
if extra_rules:
delete_rules(chain, extra_rules) | Idempotently ensure a chain exists and has an exact set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
| ensure_chain | python | Yelp/paasta | paasta_tools/iptables.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/iptables.py | Apache-2.0 |
def reorder_chain(chain_name):
"""Ensure that any REJECT rules are last, and any LOG rules are second-to-last"""
table = iptc.Table(iptc.Table.FILTER)
with iptables_txn(table):
rules = list_chain(chain_name)
chain = iptc.Chain(table, chain_name)
# sort the rules by rule_key, which uses (RULE_TARGET_SORT_ORDER, idx)
sorted_rules_with_indices = sorted(enumerate(rules), key=_rule_sort_key)
for new_index, (old_index, rule) in enumerate(sorted_rules_with_indices):
if new_index == old_index:
continue
log.debug(f"reordering chain {chain_name} rule {rule} to #{new_index}")
chain.replace_rule(rule.to_iptc(), new_index) | Ensure that any REJECT rules are last, and any LOG rules are second-to-last | reorder_chain | python | Yelp/paasta | paasta_tools/iptables.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/iptables.py | Apache-2.0 |
def list_chain(chain_name):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
table = iptc.Table(iptc.Table.FILTER)
chain = iptc.Chain(table, chain_name)
# TODO: is there any way to do this without listing all chains? (probably slow)
# If the chain doesn't exist, chain.rules will be an empty list, so we need
# to make sure the chain actually _does_ exist.
if chain in table.chains:
return tuple(Rule.from_iptc(rule) for rule in chain.rules)
else:
raise ChainDoesNotExist(chain_name) | List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
| list_chain | python | Yelp/paasta | paasta_tools/iptables.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/iptables.py | Apache-2.0 |
def load_kafkacluster_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> KafkaClusterDeploymentConfig:
"""Read a service instance's configuration for KafkaCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "kafkacluster", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = KafkaClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return KafkaClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
) | Read a service instance's configuration for KafkaCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_kafkacluster_instance_config | python | Yelp/paasta | paasta_tools/kafkacluster_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kafkacluster_tools.py | Apache-2.0 |
def load_kubernetes_service_config_no_cache(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> "KubernetesDeploymentConfig":
"""Read a service instance's configuration for kubernetes.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "kubernetes", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = KubernetesDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return KubernetesDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
) | Read a service instance's configuration for kubernetes.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_kubernetes_service_config_no_cache | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def load_kubernetes_service_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> "KubernetesDeploymentConfig":
"""Read a service instance's configuration for kubernetes.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
return load_kubernetes_service_config_no_cache(
service=service,
instance=instance,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
) | Read a service instance's configuration for kubernetes.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_kubernetes_service_config | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def __new__(
cls,
component: Optional[str] = None,
config_file: Optional[str] = None,
context: Optional[str] = None,
) -> "KubeClient":
"""By @lru_cache'ing this function, repeated instantiations of KubeClient with the same arguments will return the
exact same object. This makes it possible to effectively cache function calls that take a KubeClient as an
argument."""
return super().__new__(cls) | By @lru_cache'ing this function, repeated instantiations of KubeClient with the same arguments will return the
exact same object. This makes it possible to effectively cache function calls that take a KubeClient as an
argument. | __new__ | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def allowlist_denylist_to_requirements(
allowlist: DeployWhitelist, denylist: DeployBlacklist
) -> List[Tuple[str, str, List[str]]]:
"""Converts deploy_whitelist and deploy_blacklist to a list of
requirements, which can be converted to node affinities.
"""
requirements = []
# convert whitelist into a node selector req
if allowlist:
location_type, alloweds = allowlist
requirements.append((to_node_label(location_type), "In", alloweds))
# convert blacklist into multiple node selector reqs
if denylist:
# not going to prune for duplicates, or group blacklist items for
# same location_type. makes testing easier and k8s can handle it.
for location_type, not_allowed in denylist:
requirements.append((to_node_label(location_type), "NotIn", [not_allowed]))
return requirements | Converts deploy_whitelist and deploy_blacklist to a list of
requirements, which can be converted to node affinities.
| allowlist_denylist_to_requirements | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def raw_selectors_to_requirements(
raw_selectors: Mapping[str, NodeSelectorConfig]
) -> List[Tuple[str, str, List[str]]]:
"""Converts certain node_selectors into requirements, which can be
converted to node affinities.
"""
requirements: List[Tuple[str, str, List[str]]] = []
for label, configs in raw_selectors.items():
operator_configs: List[NodeSelectorOperator] = []
if type(configs) is not list or len(configs) == 0:
continue
elif type(configs[0]) is str:
# specifying an array/list of strings for a label is shorthand
# for the "In" operator
operator_configs = [
NodeSelectorInNotIn(
{"operator": "In", "values": cast(List[str], configs)}
)
]
else:
# configs should already be a List[NodeSelectorOperator]
operator_configs = cast(List[NodeSelectorOperator], configs)
label = to_node_label(label)
for config in operator_configs:
if config["operator"] in {"In", "NotIn"}:
config = cast(NodeSelectorInNotIn, config)
values = config["values"]
elif config["operator"] in {"Exists", "DoesNotExist"}:
config = cast(NodeSelectorExistsDoesNotExist, config)
values = []
elif config["operator"] in {"Gt", "Lt"}:
config = cast(NodeSelectorGtLt, config)
# config["value"] is validated by jsonschema to be an int. but,
# k8s expects singleton list of the int represented as a str
# for these operators.
values = [str(config["value"])]
else:
raise ValueError(
f"Unknown k8s node affinity operator: {config['operator']}"
)
requirements.append((label, config["operator"], values))
return requirements | Converts certain node_selectors into requirements, which can be
converted to node affinities.
| raw_selectors_to_requirements | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_bounce_method(self) -> str:
"""Get the bounce method specified in the service's kubernetes configuration."""
# map existing bounce methods to k8s equivalents.
# but if there's an EBS volume we must downthenup to free up the volume.
# in the future we may support stateful sets to dynamically create the volumes
bounce_method = self.config_dict.get("bounce_method", "crossover")
if self.get_aws_ebs_volumes() and not bounce_method == "downthenup":
raise Exception(
"If service instance defines an EBS volume it must use a downthenup bounce_method"
)
return bounce_method | Get the bounce method specified in the service's kubernetes configuration. | get_bounce_method | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_autoscaling_scaling_policy(
self,
max_replicas: int,
autoscaling_params: AutoscalingParamsDict,
) -> Dict:
"""Returns the k8s HPA scaling policy in raw JSON. Requires k8s v1.18
to work.
"""
# The HPA scaling algorithm is as follows. Every sync period (default:
# 15 seconds), the HPA will:
# 1. determine what the desired capacity is from metrics
# 2. apply min/max replica scaling limits
# 3. rate-limit the scaling magnitude (e.g. scale down by no more than
# 30% of current replicas)
# 4. constrain the scaling magnitude by the period seconds (e.g. scale
# down by no more than 30% of current replicas per 60 seconds)
# 5. record the desired capacity, then pick the highest capacity from
# the stabilization window (default: last 300 seconds) as the final
# desired capacity.
# - the idea is to stabilize scaling against (heavily) fluctuating
# metrics
policy = {
"scaleDown": {
"stabilizationWindowSeconds": 300,
# the policy in a human-readable way: scale down every 60s by
# at most 30% of current replicas.
"selectPolicy": "Max",
"policies": [{"type": "Percent", "value": 30, "periodSeconds": 60}],
}
}
policy["scaleDown"].update(autoscaling_params.get("scaledown_policies", {}))
return policy | Returns the k8s HPA scaling policy in raw JSON. Requires k8s v1.18
to work.
| get_autoscaling_scaling_policy | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_sanitised_volume_name(self, volume_name: str, length_limit: int = 0) -> str:
"""I know but we really aren't allowed many characters..."""
volume_name = volume_name.rstrip("/")
sanitised = volume_name.replace("/", "slash-").replace(".", "dot-")
sanitised_name = sanitise_kubernetes_name(sanitised)
if length_limit and len(sanitised_name) > length_limit:
sanitised_name = (
sanitised_name[0 : length_limit - 6]
+ "--"
+ hashlib.md5(sanitised_name.encode("ascii")).hexdigest()[:4]
)
return sanitised_name | I know but we really aren't allowed many characters... | get_sanitised_volume_name | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_readiness_check_script(
self, system_paasta_config: SystemPaastaConfig
) -> List[str]:
"""Script to check if a service is up in smartstack / envoy"""
enable_envoy_check = self.get_enable_envoy_readiness_check(system_paasta_config)
enable_nerve_check = self.get_enable_nerve_readiness_check(system_paasta_config)
if enable_nerve_check and enable_envoy_check:
return system_paasta_config.get_envoy_nerve_readiness_check_script()
elif enable_envoy_check:
return system_paasta_config.get_envoy_readiness_check_script()
else:
return system_paasta_config.get_nerve_readiness_check_script() | Script to check if a service is up in smartstack / envoy | get_readiness_check_script | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_env_vars_that_use_secrets(self) -> Tuple[Dict[str, str], Dict[str, str]]:
"""Returns two dictionaries of environment variable name->value; the first is vars that use non-shared
secrets, and the second is vars that use shared secrets.
The values of the dictionaries are the secret refs as formatted in yelpsoa-configs, e.g. "SECRET(foo)"
or "SHARED_SECRET(bar)". These can be decoded with get_secret_name_from_ref.
"""
secret_env_vars = {}
shared_secret_env_vars = {}
for k, v in self.get_env().items():
if is_secret_ref(v):
if is_shared_secret(v):
shared_secret_env_vars[k] = v
else:
secret_env_vars[k] = v
return secret_env_vars, shared_secret_env_vars | Returns two dictionaries of environment variable name->value; the first is vars that use non-shared
secrets, and the second is vars that use shared secrets.
The values of the dictionaries are the secret refs as formatted in yelpsoa-configs, e.g. "SECRET(foo)"
or "SHARED_SECRET(bar)". These can be decoded with get_secret_name_from_ref.
| get_env_vars_that_use_secrets | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_hacheck_prestop_sleep_seconds(self) -> int:
"""The number of seconds to sleep between hadown and terminating the hacheck container. We want hacheck to be
up for slightly longer than the main container is, so we default to pre_stop_drain_seconds + 1.
It doesn't super matter if hacheck goes down before the main container -- if it's down, healthchecks will fail
and the service will be removed from smartstack, which is the same effect we get after running hadown.
"""
# Everywhere this value is currently used (hacheck sidecar or gunicorn sidecar), we can pretty safely
# assume that the service is in smartstack.
return self.get_prestop_sleep_seconds(is_in_smartstack=True) + 1 | The number of seconds to sleep between hadown and terminating the hacheck container. We want hacheck to be
up for slightly longer than the main container is, so we default to pre_stop_drain_seconds + 1.
It doesn't super matter if hacheck goes down before the main container -- if it's down, healthchecks will fail
and the service will be removed from smartstack, which is the same effect we get after running hadown.
| get_hacheck_prestop_sleep_seconds | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_datastore_credentials_secrets_volume(self) -> V1Volume:
"""
All credentials are stored in 1 Kubernetes Secret, which are mapped on an item->path
structure to /datastore/<datastore>/<credential>/<password file>.
"""
datastore_credentials = self.get_datastore_credentials()
if not datastore_credentials:
return None
# Assume k8s secret exists if its configmap signature exists
secret_hash = self.get_datastore_credentials_secret_hash()
if not secret_hash:
log.warning(
f"Expected to find datastore_credentials secret signature {self.get_datastore_credentials_secret_name()} for {self.get_service()}.{self.get_instance()} on {self.get_namespace()}"
)
return None
secrets_with_custom_mountpaths = []
for datastore, credentials in datastore_credentials.items():
# mypy loses type hints on '.items' and throws false positives. unfortunately have to type: ignore
# https://github.com/python/mypy/issues/7178
for credential in credentials: # type: ignore
secrets_with_custom_mountpaths.append(
{
"key": get_vault_key_secret_name(
f"secrets/datastore/{datastore}/{credential}"
),
"mode": mode_to_int("0444"),
"path": f"{datastore}/{credential}/credentials",
}
)
return V1Volume(
name=self.get_datastore_secret_volume_name(),
secret=V1SecretVolumeSource(
secret_name=self.get_datastore_credentials_secret_name(),
default_mode=mode_to_int("0444"),
items=secrets_with_custom_mountpaths,
optional=False,
),
) |
All credentials are stored in 1 Kubernetes Secret, which are mapped on an item->path
structure to /datastore/<datastore>/<credential>/<password file>.
| get_datastore_credentials_secrets_volume | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_boto_secret_signature_name(self) -> str:
"""
Keep the following signature naming convention so that bounces do not happen because boto_keys configmap signatures already exist, see PAASTA-17910
Note: Since hashing is done only on a portion of secret, it may explode if service or instance names are too long
"""
secret_instance = limit_size_with_hash(
f"paasta-boto-key-{self.get_sanitised_deployment_name()}"
)
return f"{self.get_namespace()}-secret-{self.get_sanitised_service_name()}-{secret_instance}-signature" |
Keep the following signature naming convention so that bounces do not happen because boto_keys configmap signatures already exist, see PAASTA-17910
Note: Since hashing is done only on a portion of secret, it may explode if service or instance names are too long
| get_boto_secret_signature_name | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_datastore_credentials_signature_name(self) -> str:
"""
All datastore credentials are stored in a single Kubernetes secret, so they share a name
"""
return _get_secret_signature_name(
self.get_namespace(),
"datastore-credentials",
self.get_service(),
# key is on instances, which get their own configurations
key_name=self.get_instance(),
) |
All datastore credentials are stored in a single Kubernetes secret, so they share a name
| get_datastore_credentials_signature_name | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def set_autoscaled_instances(
self, instance_count: int, kube_client: KubeClient
) -> None:
"""Set the number of instances in the same way that the autoscaler does."""
set_instances_for_kubernetes_service(
kube_client=kube_client, service_config=self, instance_count=instance_count
) | Set the number of instances in the same way that the autoscaler does. | set_autoscaled_instances | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_desired_instances(self) -> int:
"""For now if we have an EBS instance it means we can only have 1 instance
since we can't attach to multiple instances. In the future we might support
statefulsets which are clever enough to manage EBS for you"""
instances = super().get_desired_instances()
if self.get_aws_ebs_volumes() and instances not in [1, 0]:
raise Exception(
"Number of instances must be 1 or 0 if an EBS volume is defined."
)
return instances | For now if we have an EBS instance it means we can only have 1 instance
since we can't attach to multiple instances. In the future we might support
statefulsets which are clever enough to manage EBS for you | get_desired_instances | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_enable_nerve_readiness_check(
self, system_paasta_config: SystemPaastaConfig
) -> bool:
"""Enables a k8s readiness check on the Pod to ensure that all registrations
are UP on the local synapse haproxy"""
return self.config_dict.get("bounce_health_params", {}).get(
"check_haproxy", system_paasta_config.get_enable_nerve_readiness_check()
) | Enables a k8s readiness check on the Pod to ensure that all registrations
are UP on the local synapse haproxy | get_enable_nerve_readiness_check | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_enable_envoy_readiness_check(
self, system_paasta_config: SystemPaastaConfig
) -> bool:
"""Enables a k8s readiness check on the Pod to ensure that all registrations
are UP on the local Envoy"""
return self.config_dict.get("bounce_health_params", {}).get(
"check_envoy", system_paasta_config.get_enable_envoy_readiness_check()
) | Enables a k8s readiness check on the Pod to ensure that all registrations
are UP on the local Envoy | get_enable_envoy_readiness_check | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_namespace(self) -> str:
"""Get namespace from config, default to 'paasta'"""
return self.config_dict.get(
"namespace", f"paastasvc-{self.get_sanitised_service_name()}"
) | Get namespace from config, default to 'paasta' | get_namespace | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def format_kubernetes_job(
self,
job_label: str,
deadline_seconds: int = 3600,
keep_routable_ip: bool = False,
include_sidecars: bool = False,
) -> V1Job:
"""Create the config for launching the deployment as a Job
:param str job_label: value to set for the "job type" label
:param int deadline_seconds: maximum allowed duration for the job
:param bool keep_routable_ip: maintain routable IP annotation in pod template
:param bool include_sidecars: do not discard sidecar containers when building pod spec
:return: job object
"""
additional_labels = {paasta_prefixed(JOB_TYPE_LABEL_NAME): job_label}
try:
docker_url = self.get_docker_url()
git_sha = get_git_sha_from_dockerurl(docker_url, long=True)
system_paasta_config = load_system_paasta_config()
image_version = self.get_image_version()
if image_version is not None:
additional_labels[paasta_prefixed("image_version")] = image_version
pod_template = self.get_pod_template_spec(
git_sha=git_sha,
system_paasta_config=system_paasta_config,
restart_on_failure=False,
include_sidecars=include_sidecars,
force_no_routable_ip=not keep_routable_ip,
)
pod_template.metadata.labels.update(additional_labels)
complete_config = V1Job(
api_version="batch/v1",
kind="Job",
metadata=self.get_kubernetes_metadata(git_sha),
spec=V1JobSpec(
active_deadline_seconds=deadline_seconds,
ttl_seconds_after_finished=0, # remove job resource after completion
template=pod_template,
),
)
complete_config.metadata.labels.update(additional_labels)
except Exception as e:
raise InvalidKubernetesConfig(e, self.get_service(), self.get_instance())
log.debug(
f"Complete configuration for job instance is: {complete_config}",
)
return complete_config | Create the config for launching the deployment as a Job
:param str job_label: value to set for the "job type" label
:param int deadline_seconds: maximum allowed duration for the job
:param bool keep_routable_ip: maintain routable IP annotation in pod template
:param bool include_sidecars: do not discard sidecar containers when building pod spec
:return: job object
| format_kubernetes_job | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def format_kubernetes_app(self) -> Union[V1Deployment, V1StatefulSet]:
"""Create the configuration that will be passed to the Kubernetes REST API."""
try:
system_paasta_config = load_system_paasta_config()
docker_url = self.get_docker_url()
git_sha = get_git_sha_from_dockerurl(docker_url, long=True)
complete_config: Union[V1StatefulSet, V1Deployment]
if self.get_persistent_volumes():
complete_config = V1StatefulSet(
api_version="apps/v1",
kind="StatefulSet",
metadata=self.get_kubernetes_metadata(git_sha),
spec=V1StatefulSetSpec(
service_name=self.get_sanitised_deployment_name(),
volume_claim_templates=self.get_volume_claim_templates(),
replicas=self.get_desired_instances(),
revision_history_limit=0,
selector=V1LabelSelector(
match_labels={
"paasta.yelp.com/service": self.get_service(),
"paasta.yelp.com/instance": self.get_instance(),
}
),
template=self.get_pod_template_spec(
git_sha=git_sha, system_paasta_config=system_paasta_config
),
pod_management_policy=self.get_pod_management_policy(),
),
)
else:
complete_config = V1Deployment(
api_version="apps/v1",
kind="Deployment",
metadata=self.get_kubernetes_metadata(git_sha),
spec=V1DeploymentSpec(
replicas=self.get_desired_instances(),
min_ready_seconds=self.get_min_task_uptime(),
selector=V1LabelSelector(
match_labels={
"paasta.yelp.com/service": self.get_service(),
"paasta.yelp.com/instance": self.get_instance(),
}
),
revision_history_limit=0,
template=self.get_pod_template_spec(
git_sha=git_sha, system_paasta_config=system_paasta_config
),
strategy=self.get_deployment_strategy_config(),
),
)
prometheus_shard = self.get_prometheus_shard()
if prometheus_shard:
complete_config.metadata.labels[
"paasta.yelp.com/prometheus_shard"
] = prometheus_shard
image_version = self.get_image_version()
if image_version is not None:
complete_config.metadata.labels[
"paasta.yelp.com/image_version"
] = image_version
# DO NOT ADD LABELS AFTER THIS LINE
config_hash = get_config_hash(
self.sanitize_for_config_hash(complete_config),
force_bounce=self.get_force_bounce(),
)
complete_config.metadata.labels["yelp.com/paasta_config_sha"] = config_hash
complete_config.metadata.labels["paasta.yelp.com/config_sha"] = config_hash
complete_config.spec.template.metadata.labels[
"yelp.com/paasta_config_sha"
] = config_hash
complete_config.spec.template.metadata.labels[
"paasta.yelp.com/config_sha"
] = config_hash
except Exception as e:
raise InvalidKubernetesConfig(e, self.get_service(), self.get_instance())
log.debug("Complete configuration for instance is: %s", complete_config)
return complete_config | Create the configuration that will be passed to the Kubernetes REST API. | format_kubernetes_app | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def has_routable_ip(
self,
service_namespace_config: ServiceNamespaceConfig,
system_paasta_config: SystemPaastaConfig,
) -> str:
"""Return whether the routable_ip label should be true or false.
Services with a `prometheus_port` defined or that use certain sidecars must have a routable IP
address to allow Prometheus shards to scrape metrics.
"""
if (
self.config_dict.get("routable_ip", False)
or service_namespace_config.is_in_smartstack()
or self.get_prometheus_port() is not None
or self.should_use_metrics_provider(METRICS_PROVIDER_UWSGI)
or self.should_use_metrics_provider(METRICS_PROVIDER_GUNICORN)
):
return "true"
return "false" | Return whether the routable_ip label should be true or false.
Services with a `prometheus_port` defined or that use certain sidecars must have a routable IP
address to allow Prometheus shards to scrape metrics.
| has_routable_ip | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_node_selector(self) -> Mapping[str, str]:
"""Converts simple node restrictions into node selectors. Unlike node
affinities, selectors will show up in `kubectl describe`.
"""
raw_selectors: Mapping[str, Any] = self.config_dict.get("node_selectors", {})
node_selectors = {
to_node_label(label): value
for label, value in raw_selectors.items()
if type(value) is str
}
node_selectors["yelp.com/pool"] = self.get_pool()
return node_selectors | Converts simple node restrictions into node selectors. Unlike node
affinities, selectors will show up in `kubectl describe`.
| get_node_selector | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_node_affinity(
self, pool_node_affinities: Dict[str, Dict[str, List[str]]] = None
) -> Optional[V1NodeAffinity]:
"""Converts deploy_whitelist and deploy_blacklist in node affinities.
note: At the time of writing, `kubectl describe` does not show affinities,
only selectors. To see affinities, use `kubectl get pod -o json` instead.
"""
requirements = allowlist_denylist_to_requirements(
allowlist=self.get_deploy_whitelist(),
denylist=self.get_deploy_blacklist(),
)
node_selectors = self.config_dict.get("node_selectors", {})
requirements.extend(
raw_selectors_to_requirements(
raw_selectors=node_selectors,
)
)
# PAASTA-18198: To improve AZ balance with Karpenter, we temporarily allow specifying zone affinities per pool
if pool_node_affinities and self.get_pool() in pool_node_affinities:
current_pool_node_affinities = pool_node_affinities[self.get_pool()]
# If the service already has a node selector for a zone, we don't want to override it
if current_pool_node_affinities and not contains_zone_label(node_selectors):
requirements.extend(
raw_selectors_to_requirements(
raw_selectors=current_pool_node_affinities,
)
)
preferred_terms = []
for node_selectors_prefered_config_dict in self.config_dict.get(
"node_selectors_preferred", []
):
preferred_terms.append(
V1PreferredSchedulingTerm(
weight=node_selectors_prefered_config_dict["weight"],
preference=V1NodeSelectorTerm(
match_expressions=[
V1NodeSelectorRequirement(
key=key,
operator=op,
values=vs,
)
for key, op, vs in raw_selectors_to_requirements(
raw_selectors=node_selectors_prefered_config_dict[
"preferences"
]
)
]
),
)
)
# package everything into a node affinity - lots of layers :P
if len(requirements) == 0 and len(preferred_terms) == 0:
return None
required_term = (
V1NodeSelectorTerm(
match_expressions=[
V1NodeSelectorRequirement(
key=key,
operator=op,
values=vs,
)
for key, op, vs in requirements
]
)
if requirements
else None
)
if not preferred_terms:
preferred_terms = None
return V1NodeAffinity(
required_during_scheduling_ignored_during_execution=(
V1NodeSelector(node_selector_terms=[required_term])
if required_term
else None
),
preferred_during_scheduling_ignored_during_execution=preferred_terms,
) | Converts deploy_whitelist and deploy_blacklist in node affinities.
note: At the time of writing, `kubectl describe` does not show affinities,
only selectors. To see affinities, use `kubectl get pod -o json` instead.
| get_node_affinity | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_pod_anti_affinity(self) -> Optional[V1PodAntiAffinity]:
"""
Converts the given anti-affinity on service and instance to pod
affinities with the "paasta.yelp.com" prefixed label selector
:return:
"""
required_terms = self.get_pod_required_anti_affinity_terms()
preferred_terms = self.get_pod_preferred_anti_affinity_terms()
if required_terms is None and preferred_terms is None:
return None
return V1PodAntiAffinity(
required_during_scheduling_ignored_during_execution=required_terms,
preferred_during_scheduling_ignored_during_execution=preferred_terms,
) |
Converts the given anti-affinity on service and instance to pod
affinities with the "paasta.yelp.com" prefixed label selector
:return:
| get_pod_anti_affinity | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def _kube_affinity_condition_to_label_selector(
self, condition: KubeAffinityCondition
) -> Optional[V1LabelSelector]:
"""Converts the given condition to label selectors with paasta prefix"""
labels = {}
if "service" in condition:
labels[PAASTA_ATTRIBUTE_PREFIX + "service"] = condition.get("service")
if "instance" in condition:
labels[PAASTA_ATTRIBUTE_PREFIX + "instance"] = condition.get("instance")
return V1LabelSelector(match_labels=labels) if labels else None | Converts the given condition to label selectors with paasta prefix | _kube_affinity_condition_to_label_selector | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def sanitize_for_config_hash(
self, config: Union[V1Deployment, V1StatefulSet]
) -> Mapping[str, Any]:
"""Removes some data from config to make it suitable for
calculation of config hash.
:param config: complete_config hash to sanitise
:returns: sanitised copy of complete_config hash
"""
ahash = config.to_dict() # deep convert to dict
ahash["paasta_secrets"] = get_kubernetes_secret_hashes(
service=self.get_service(),
environment_variables=self.get_env(),
namespace=self.get_namespace(),
)
# remove data we dont want used to hash configs
# replica count
if ahash["spec"] is not None:
ahash["spec"].pop("replicas", None)
if ahash["metadata"] is not None:
ahash["metadata"]["namespace"] = None
# soa-configs SHA
try:
for container in ahash["spec"]["template"]["spec"]["containers"]:
container["env"] = [
e
for e in container["env"]
if e.get("name", "") != "PAASTA_SOA_CONFIGS_SHA"
]
except TypeError: # any of the values can be None
pass
return ahash | Removes some data from config to make it suitable for
calculation of config hash.
:param config: complete_config hash to sanitise
:returns: sanitised copy of complete_config hash
| sanitize_for_config_hash | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_termination_grace_period(
self, service_namespace_config: ServiceNamespaceConfig
) -> Optional[int]:
"""Return the number of seconds that kubernetes should wait for pre-stop hooks to finish (or for the main
process to exit after signaling) before forcefully terminating the pod.
For smartstack services, defaults to a value long enough to allow the default pre-stop hook to finish.
For non-smartstack services, defaults to None (kubernetes default of 30s).
"""
if service_namespace_config.is_in_smartstack():
default = self.get_hacheck_prestop_sleep_seconds() + 1
if self.get_pre_stop_wait_for_connections_to_complete(
service_namespace_config
):
# If the max timeout is more than 30 minutes, cap it to 30 minutes.
# Most services with ultra-long timeouts are probably able to handle SIGTERM gracefully anyway.
default += int(
math.ceil(
min(
1800,
service_namespace_config.get_longest_timeout_ms() / 1000,
)
)
)
else:
default = None
return self.get_lifecycle_dict().get(
"termination_grace_period_seconds", default
) | Return the number of seconds that kubernetes should wait for pre-stop hooks to finish (or for the main
process to exit after signaling) before forcefully terminating the pod.
For smartstack services, defaults to a value long enough to allow the default pre-stop hook to finish.
For non-smartstack services, defaults to None (kubernetes default of 30s).
| get_termination_grace_period | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_all_kubernetes_services_running_here() -> List[Tuple[str, str, int]]:
"""Returns all k8s paasta services, even if not in smartstack. Returns a service, instance, port
tuple to match the return value of other similar functions"""
services = []
try:
pods = get_k8s_pods()
except requests.exceptions.ConnectionError:
log.debug("Failed to connect to the kublet when trying to get pods")
return []
for pod in pods["items"]:
try:
service = pod["metadata"]["labels"]["paasta.yelp.com/service"]
instance = pod["metadata"]["labels"]["paasta.yelp.com/instance"]
services.append((service, instance, 0))
except KeyError:
log.debug(f"Skipping listing what looks like a non-paasta pod: {pod}")
return services | Returns all k8s paasta services, even if not in smartstack. Returns a service, instance, port
tuple to match the return value of other similar functions | get_all_kubernetes_services_running_here | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def list_all_paasta_deployments(kube_client: KubeClient) -> Sequence[KubeDeployment]:
"""Gets deployments in all namespaces by passing the service label selector"""
label_selectors = "paasta.yelp.com/service"
return list_deployments_in_all_namespaces(
kube_client=kube_client, label_selector=label_selectors
) | Gets deployments in all namespaces by passing the service label selector | list_all_paasta_deployments | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def filter_nodes_by_blacklist(
nodes: Sequence[V1Node], blacklist: DeployBlacklist, whitelist: DeployWhitelist
) -> Sequence[V1Node]:
"""Takes an input list of nodes and filters them based on the given blacklist.
The blacklist is in the form of:
[["location_type", "location]]
Where the list inside is something like ["region", "uswest1-prod"]
:returns: The list of nodes after the filter
"""
if whitelist:
whitelist = (paasta_prefixed(whitelist[0]), whitelist[1])
blacklist = [(paasta_prefixed(entry[0]), entry[1]) for entry in blacklist]
return [
node
for node in nodes
if host_passes_whitelist(node.metadata.labels, whitelist)
and host_passes_blacklist(node.metadata.labels, blacklist)
] | Takes an input list of nodes and filters them based on the given blacklist.
The blacklist is in the form of:
[["location_type", "location]]
Where the list inside is something like ["region", "uswest1-prod"]
:returns: The list of nodes after the filter
| filter_nodes_by_blacklist | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def update_secret(
kube_client: KubeClient,
service_name: str,
secret_name: str,
secret_data: Dict[str, str],
namespace: str,
) -> None:
"""
Expect secret_name to exist, e.g. kubectl get secret
:param service_name: Expect unsanitised service name
:param secret_data: Expect a mapping of string-to-string where values are base64-encoded
:param namespace: Unsanitized namespace of a service that will use the secret
:raises ApiException:
"""
kube_client.core.replace_namespaced_secret(
name=secret_name,
namespace=namespace,
body=V1Secret(
metadata=V1ObjectMeta(
name=secret_name,
labels={
"yelp.com/paasta_service": sanitise_label_value(service_name),
"paasta.yelp.com/service": sanitise_label_value(service_name),
},
),
data=secret_data,
),
) |
Expect secret_name to exist, e.g. kubectl get secret
:param service_name: Expect unsanitised service name
:param secret_data: Expect a mapping of string-to-string where values are base64-encoded
:param namespace: Unsanitized namespace of a service that will use the secret
:raises ApiException:
| update_secret | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_secret_signature(
kube_client: KubeClient,
signature_name: str,
namespace: str,
) -> Optional[str]:
"""
:param signature_name: Expect the signature to exist in kubernetes configmap
:return: Kubernetes configmap as a signature
:raises ApiException:
"""
try:
signature = kube_client.core.read_namespaced_config_map(
name=signature_name,
namespace=namespace,
)
except ApiException as e:
if e.status == 404:
return None
else:
raise
if not signature:
return None
else:
return signature.data["signature"] |
:param signature_name: Expect the signature to exist in kubernetes configmap
:return: Kubernetes configmap as a signature
:raises ApiException:
| get_secret_signature | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def update_secret_signature(
kube_client: KubeClient,
service_name: str,
signature_name: str,
secret_signature: str,
namespace: str,
) -> None:
"""
:param service_name: Expect unsanitised service_name
:param signature_name: Expect signature_name to exist in kubernetes configmap
:param secret_signature: Signature to replace with
:raises ApiException:
"""
kube_client.core.replace_namespaced_config_map(
name=signature_name,
namespace=namespace,
body=V1ConfigMap(
metadata=V1ObjectMeta(
name=signature_name,
labels={
"yelp.com/paasta_service": sanitise_label_value(service_name),
"paasta.yelp.com/service": sanitise_label_value(service_name),
},
),
data={"signature": secret_signature},
),
) |
:param service_name: Expect unsanitised service_name
:param signature_name: Expect signature_name to exist in kubernetes configmap
:param secret_signature: Signature to replace with
:raises ApiException:
| update_secret_signature | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def create_secret_signature(
kube_client: KubeClient,
service_name: str,
signature_name: str,
secret_signature: str,
namespace: str,
) -> None:
"""
:param service_name: Expect unsanitised service_name
:param signature_name: Expected properly formatted signature, see _get_secret_signature_name()
:param secret_signature: Signature value
:param namespace: Unsanitized namespace of a service that will use the signature
"""
kube_client.core.create_namespaced_config_map(
namespace=namespace,
body=V1ConfigMap(
metadata=V1ObjectMeta(
name=signature_name,
labels={
"yelp.com/paasta_service": sanitise_label_value(service_name),
"paasta.yelp.com/service": sanitise_label_value(service_name),
},
),
data={"signature": secret_signature},
),
) |
:param service_name: Expect unsanitised service_name
:param signature_name: Expected properly formatted signature, see _get_secret_signature_name()
:param secret_signature: Signature value
:param namespace: Unsanitized namespace of a service that will use the signature
| create_secret_signature | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def sanitise_kubernetes_name(
service: str,
) -> str:
"""
Sanitizes kubernetes name so that hyphen (-) can be used a delimeter
"""
name = service.replace("_", "--")
if name.startswith("--"):
name = name.replace("--", "underscore-", 1)
return name.lower() |
Sanitizes kubernetes name so that hyphen (-) can be used a delimeter
| sanitise_kubernetes_name | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def create_pod_topology_spread_constraints(
service: str,
instance: str,
topology_spread_constraints: List[TopologySpreadConstraintDict],
) -> List[V1TopologySpreadConstraint]:
"""
Applies cluster-level topology spread constraints to every Pod template.
This allows us to configure default topology spread constraints on EKS where we cannot configure the scheduler.
"""
if not topology_spread_constraints:
return []
selector = V1LabelSelector(
match_labels={
"paasta.yelp.com/service": service,
"paasta.yelp.com/instance": instance,
}
)
pod_topology_spread_constraints = []
for constraint in topology_spread_constraints:
pod_topology_spread_constraints.append(
V1TopologySpreadConstraint(
label_selector=selector,
topology_key=constraint.get(
"topology_key", None
), # ValueError will be raised if unset
max_skew=constraint.get("max_skew", 1),
when_unsatisfiable=constraint.get(
"when_unsatisfiable", "ScheduleAnyway"
),
)
)
return pod_topology_spread_constraints |
Applies cluster-level topology spread constraints to every Pod template.
This allows us to configure default topology spread constraints on EKS where we cannot configure the scheduler.
| create_pod_topology_spread_constraints | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_pod_hostname(kube_client: KubeClient, pod: V1Pod) -> str:
"""Gets the hostname of a pod's node from labels"""
if not pod.spec.node_name: # can be none, if pod not yet scheduled
return "NotScheduled"
try:
node = kube_client.core.read_node(name=pod.spec.node_name)
except ApiException:
# fall back to node name (which has the IP) if node somehow doesnt exist
return pod.spec.node_name
# if label has disappeared (say we changed it), default to node name
return node.metadata.labels.get("yelp.com/hostname", pod.spec.node_name) | Gets the hostname of a pod's node from labels | get_pod_hostname | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def _get_secret_signature_name(
namespace: str, secret_identifier: str, service_name: str, key_name: str
) -> str:
"""
:param namespace: Unsanitised namespace of a service that will use the signature
:param secret_identifier: Identifies the type of secret
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised signature name as kubernetes configmap name with at most 253 characters
"""
return limit_size_with_hash(
"-".join(
[
namespace,
secret_identifier,
sanitise_kubernetes_name(service_name),
sanitise_kubernetes_name(key_name),
"signature",
]
),
limit=253,
) |
:param namespace: Unsanitised namespace of a service that will use the signature
:param secret_identifier: Identifies the type of secret
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised signature name as kubernetes configmap name with at most 253 characters
| _get_secret_signature_name | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_paasta_secret_name(namespace: str, service_name: str, key_name: str) -> str:
"""
Use whenever creating or references a PaaSTA secret
:param namespace: Unsanitised namespace of a service that will use the signature
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised PaaSTA secret name
"""
return _get_secret_name(
namespace=namespace,
secret_identifier="secret",
service_name=service_name,
key_name=key_name,
) |
Use whenever creating or references a PaaSTA secret
:param namespace: Unsanitised namespace of a service that will use the signature
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised PaaSTA secret name
| get_paasta_secret_name | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_paasta_secret_signature_name(
namespace: str, service_name: str, key_name: str
) -> str:
"""
Get PaaSTA signature name stored as kubernetes configmap
:param namespace: Unsanitised namespace of a service that will use the signature
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised PaaSTA signature name
"""
return _get_secret_signature_name(
namespace=namespace,
secret_identifier="secret",
service_name=service_name,
key_name=key_name,
) |
Get PaaSTA signature name stored as kubernetes configmap
:param namespace: Unsanitised namespace of a service that will use the signature
:param service_name: Unsanitised service_name
:param key_name: Name of the actual secret, typically specified in a configuration file
:return: Sanitised PaaSTA signature name
| get_paasta_secret_signature_name | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_secret(
kube_client: KubeClient,
secret_name: str,
key_name: str,
*,
namespace: str,
decode: bool = True,
) -> Union[str, bytes]:
"""
:param secret_name: Expect properly formatted kubernetes secret name and that it exists
:param key_name: Expect key_name to be a key in a data section
:raises ApiException:
:raises KeyError: if key_name does not exists in kubernetes secret's data section
"""
secret_data = kube_client.core.read_namespaced_secret(
name=secret_name, namespace=namespace
).data[key_name]
# String secrets (e.g. yaml config files) need to be decoded
# Binary secrets (e.g. TLS Keystore or binary certificate files) cannot be decoded
if decode:
return base64.b64decode(secret_data).decode("utf-8")
return base64.b64decode(secret_data) |
:param secret_name: Expect properly formatted kubernetes secret name and that it exists
:param key_name: Expect key_name to be a key in a data section
:raises ApiException:
:raises KeyError: if key_name does not exists in kubernetes secret's data section
| get_secret | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def patch_namespaced_configmap(
name: str,
body: Dict[str, str],
*,
namespace: str,
kube_client: KubeClient,
) -> V1ConfigMap:
"""
Patches a configmap with the given body. The body should be a dictionary of key-value pairs.
"""
try:
return kube_client.core.patch_namespaced_config_map(
name=name, namespace=namespace, body=body
)
except ApiException as e:
if e.status == 404:
raise ValueError(f"ConfigMap {name} not found in namespace {namespace}")
else:
raise |
Patches a configmap with the given body. The body should be a dictionary of key-value pairs.
| patch_namespaced_configmap | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_or_create_namespaced_configmap(
configmap: str,
*,
namespace: str,
kube_client: KubeClient,
) -> Tuple[V1ConfigMap, bool]:
"""
Returns a 2-tuple of (the configmap, a bool representing whether it was just created)
"""
try:
return (
kube_client.core.read_namespaced_config_map(
name=configmap, namespace=namespace
),
False,
)
except ApiException as e:
if e.status == 404:
configmap = V1ConfigMap(
metadata=V1ObjectMeta(name=configmap, namespace=namespace),
data={},
)
return (
kube_client.core.create_namespaced_config_map(
namespace=namespace, body=configmap
),
True,
)
else:
raise |
Returns a 2-tuple of (the configmap, a bool representing whether it was just created)
| get_or_create_namespaced_configmap | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_authenticating_services(soa_dir: str = DEFAULT_SOA_DIR) -> Set[str]:
"""Load list of services participating in authenticated traffic"""
authenticating_services_conf_path = os.path.join(soa_dir, "authenticating.yaml")
config = service_configuration_lib.read_yaml_file(authenticating_services_conf_path)
return set(config.get("services", [])) | Load list of services participating in authenticated traffic | get_authenticating_services | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def add_volumes_for_authenticating_services(
service_name: str,
config_volumes: List[ProjectedSAVolume],
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[ProjectedSAVolume]:
"""Add projected service account volume to the list of volumes if service
participates in authenticated traffic. In case of changes, a new list is returned,
no updates in-place.
:param str service_name: name of the service
:param List[ProjectedSAVolume] config_volumes: existing projected volumes from service config
:param str soa_dir: path to SOA configurations directory
:return: updated list of projected service account volumes
"""
token_config = load_system_paasta_config().get_service_auth_token_volume_config()
if (
token_config
and service_name in get_authenticating_services(soa_dir)
and not any(volume == token_config for volume in config_volumes)
):
config_volumes = [token_config, *config_volumes]
return config_volumes | Add projected service account volume to the list of volumes if service
participates in authenticated traffic. In case of changes, a new list is returned,
no updates in-place.
:param str service_name: name of the service
:param List[ProjectedSAVolume] config_volumes: existing projected volumes from service config
:param str soa_dir: path to SOA configurations directory
:return: updated list of projected service account volumes
| add_volumes_for_authenticating_services | python | Yelp/paasta | paasta_tools/kubernetes_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py | Apache-2.0 |
def get_healthcheck_mode(self) -> str:
"""Get the healthcheck mode for the service. In most cases, this will match the mode
of the service, but we do provide the opportunity for users to specify both. Default to the mode
if no healthcheck_mode is specified.
"""
healthcheck_mode = self.get("healthcheck_mode", None)
if not healthcheck_mode:
return self.get_mode()
else:
return healthcheck_mode | Get the healthcheck mode for the service. In most cases, this will match the mode
of the service, but we do provide the opportunity for users to specify both. Default to the mode
if no healthcheck_mode is specified.
| get_healthcheck_mode | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_mode(self) -> str:
"""Get the mode that the service runs in and check that we support it.
If the mode is not specified, we check whether the service uses smartstack
in order to determine the appropriate default value. If proxy_port is specified
in the config, the service uses smartstack, and we can thus safely assume its mode is http.
If the mode is not defined and the service does not use smartstack, we set the mode to None.
"""
mode = self.get("mode", None)
if mode is None:
if not self.is_in_smartstack():
return None
else:
return "http"
elif mode in ["http", "tcp", "https"]:
return mode
else:
raise InvalidSmartstackMode("Unknown mode: %s" % mode) | Get the mode that the service runs in and check that we support it.
If the mode is not specified, we check whether the service uses smartstack
in order to determine the appropriate default value. If proxy_port is specified
in the config, the service uses smartstack, and we can thus safely assume its mode is http.
If the mode is not defined and the service does not use smartstack, we set the mode to None.
| get_mode | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_longest_timeout_ms(self) -> int:
"""Calculate the longest amount of time a connection to this service might stay open."""
return max(
[self.get_timeout_server_ms()]
+ list(self.get("endpoint_timeouts", {}).values())
) | Calculate the longest amount of time a connection to this service might stay open. | get_longest_timeout_ms | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_drain_method(self, service_namespace_config: ServiceNamespaceConfig) -> str:
"""Get the drain method specified in the service's configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain method specified in the config, or 'noop' if not specified"""
default = "noop"
# Default to hacheck draining if the service is in smartstack
if service_namespace_config.is_in_smartstack():
default = "hacheck"
return self.config_dict.get("drain_method", default) | Get the drain method specified in the service's configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain method specified in the config, or 'noop' if not specified | get_drain_method | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_drain_method_params(
self, service_namespace_config: ServiceNamespaceConfig
) -> Dict:
"""Get the drain method parameters specified in the service's configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain_method_params dictionary specified in the config, or {} if not specified"""
default: Dict = {}
if service_namespace_config.is_in_smartstack():
default = {"delay": 60}
return self.config_dict.get("drain_method_params", default) | Get the drain method parameters specified in the service's configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain_method_params dictionary specified in the config, or {} if not specified | get_drain_method_params | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_instances(self, with_limit: bool = True) -> int:
"""Gets the number of instances for a service, ignoring whether the user has requested
the service to be started or stopped"""
if self.is_autoscaling_enabled():
autoscaled_instances = self.get_autoscaled_instances()
if autoscaled_instances is None:
return self.get_max_instances()
else:
limited_instances = (
self.limit_instance_count(autoscaled_instances)
if with_limit
else autoscaled_instances
)
return limited_instances
else:
instances = self.config_dict.get("instances", 1)
log.debug("Autoscaling not enabled, returning %d instances" % instances)
return instances | Gets the number of instances for a service, ignoring whether the user has requested
the service to be started or stopped | get_instances | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_desired_instances(self) -> int:
"""Get the number of instances specified in zookeeper or the service's configuration.
If the number of instances in zookeeper is less than min_instances, returns min_instances.
If the number of instances in zookeeper is greater than max_instances, returns max_instances.
Defaults to 0 if not specified in the config.
:returns: The number of instances specified in the config, 0 if not
specified or if desired_state is not 'start'.
"""
if self.get_desired_state() == "start":
return self.get_instances()
else:
log.debug("Instance is set to stop. Returning '0' instances")
return 0 | Get the number of instances specified in zookeeper or the service's configuration.
If the number of instances in zookeeper is less than min_instances, returns min_instances.
If the number of instances in zookeeper is greater than max_instances, returns max_instances.
Defaults to 0 if not specified in the config.
:returns: The number of instances specified in the config, 0 if not
specified or if desired_state is not 'start'.
| get_desired_instances | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_healthcheck_for_instance(
service: str,
instance: str,
service_manifest: LongRunningServiceConfig,
random_port: int,
soa_dir: str = DEFAULT_SOA_DIR,
) -> Tuple[Optional[str], Optional[str]]:
"""
Returns healthcheck for a given service instance in the form of a tuple (mode, healthcheck_command)
or (None, None) if no healthcheck
"""
namespace = service_manifest.get_nerve_namespace()
smartstack_config = load_service_namespace_config(
service=service, namespace=namespace, soa_dir=soa_dir
)
mode = service_manifest.get_healthcheck_mode(smartstack_config)
hostname = socket.getfqdn()
if mode == "http" or mode == "https":
path = service_manifest.get_healthcheck_uri(smartstack_config)
healthcheck_command = "%s://%s:%d%s" % (mode, hostname, random_port, path)
elif mode == "tcp":
healthcheck_command = "%s://%s:%d" % (mode, hostname, random_port)
elif mode == "cmd":
healthcheck_command = service_manifest.get_healthcheck_cmd()
else:
mode = None
healthcheck_command = None
return (mode, healthcheck_command) |
Returns healthcheck for a given service instance in the form of a tuple (mode, healthcheck_command)
or (None, None) if no healthcheck
| get_healthcheck_for_instance | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def load_service_namespace_config(
service: str, namespace: str, soa_dir: str = DEFAULT_SOA_DIR
) -> ServiceNamespaceConfig:
"""Attempt to read the configuration for a service's namespace in a more strict fashion.
Retrieves the following keys:
- proxy_port: the proxy port defined for the given namespace
- healthcheck_mode: the mode for the healthcheck (http or tcp)
- healthcheck_port: An alternate port to use for health checking
- healthcheck_uri: URI target for healthchecking
- healthcheck_timeout_s: healthcheck timeout in seconds
- healthcheck_body_expect: an expected string in healthcheck response body
- updown_timeout_s: updown_service timeout in seconds
- timeout_connect_ms: proxy frontend timeout in milliseconds
- timeout_server_ms: proxy server backend timeout in milliseconds
- retries: the number of retries on a proxy backend
- mode: the mode the service is run in (http or tcp)
- routes: a list of tuples of (source, destination)
- discover: the scope at which to discover services e.g. 'habitat'
- advertise: a list of scopes to advertise services at e.g. ['habitat', 'region']
- extra_advertise: a list of tuples of (source, destination)
e.g. [('region:dc6-prod', 'region:useast1-prod')]
- extra_healthcheck_headers: a dict of HTTP headers that must
be supplied when health checking. E.g. { 'Host': 'example.com' }
- lb_policy: Envoy load balancer policies. E.g. "ROUND_ROBIN"
:param service: The service name
:param namespace: The namespace to read
:param soa_dir: The SOA config directory to read from
:returns: A dict of the above keys, if they were defined
"""
smartstack_config = service_configuration_lib.read_extra_service_information(
service_name=service,
extra_info="smartstack",
soa_dir=soa_dir,
deepcopy=False,
)
namespace_config_from_file = smartstack_config.get(namespace, {})
service_namespace_config = ServiceNamespaceConfig()
# We can't really use .get, as we don't want the key to be in the returned
# dict at all if it doesn't exist in the config file.
# We also can't just copy the whole dict, as we only care about some keys
# and there's other things that appear in the smartstack section in
# several cases.
key_whitelist = {
"healthcheck_mode",
"healthcheck_uri",
"healthcheck_port",
"healthcheck_timeout_s",
"healthcheck_body_expect",
"updown_timeout_s",
"proxy_port",
"timeout_connect_ms",
"timeout_server_ms",
"retries",
"mode",
"discover",
"advertise",
"extra_healthcheck_headers",
"lb_policy",
"endpoint_timeouts",
}
for key, value in namespace_config_from_file.items():
if key in key_whitelist:
service_namespace_config[key] = value
# Other code in paasta_tools checks 'mode' after the config file
# is loaded, so this ensures that it is set to the appropriate default
# if not otherwise specified, even if appropriate default is None.
service_namespace_config["mode"] = service_namespace_config.get_mode()
if "routes" in namespace_config_from_file:
service_namespace_config["routes"] = [
(route["source"], dest)
for route in namespace_config_from_file["routes"]
for dest in route["destinations"]
]
if "extra_advertise" in namespace_config_from_file:
service_namespace_config["extra_advertise"] = [
(src, dst)
for src in namespace_config_from_file["extra_advertise"]
for dst in namespace_config_from_file["extra_advertise"][src]
]
return service_namespace_config | Attempt to read the configuration for a service's namespace in a more strict fashion.
Retrieves the following keys:
- proxy_port: the proxy port defined for the given namespace
- healthcheck_mode: the mode for the healthcheck (http or tcp)
- healthcheck_port: An alternate port to use for health checking
- healthcheck_uri: URI target for healthchecking
- healthcheck_timeout_s: healthcheck timeout in seconds
- healthcheck_body_expect: an expected string in healthcheck response body
- updown_timeout_s: updown_service timeout in seconds
- timeout_connect_ms: proxy frontend timeout in milliseconds
- timeout_server_ms: proxy server backend timeout in milliseconds
- retries: the number of retries on a proxy backend
- mode: the mode the service is run in (http or tcp)
- routes: a list of tuples of (source, destination)
- discover: the scope at which to discover services e.g. 'habitat'
- advertise: a list of scopes to advertise services at e.g. ['habitat', 'region']
- extra_advertise: a list of tuples of (source, destination)
e.g. [('region:dc6-prod', 'region:useast1-prod')]
- extra_healthcheck_headers: a dict of HTTP headers that must
be supplied when health checking. E.g. { 'Host': 'example.com' }
- lb_policy: Envoy load balancer policies. E.g. "ROUND_ROBIN"
:param service: The service name
:param namespace: The namespace to read
:param soa_dir: The SOA config directory to read from
:returns: A dict of the above keys, if they were defined
| load_service_namespace_config | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_proxy_port_for_instance(
service_config: LongRunningServiceConfig,
) -> Optional[int]:
"""Get the proxy_port defined in the first namespace configuration for a
service instance.
This means that the namespace first has to be loaded from the service instance's
configuration, and then the proxy_port has to loaded from the smartstack configuration
for that namespace.
:param service_config: The instance of the services LongRunningServiceConfig
:returns: The proxy_port for the service instance, or None if not defined"""
registration = service_config.get_registrations()[0]
service, namespace, _, __ = decompose_job_id(registration)
nerve_dict = load_service_namespace_config(
service=service, namespace=namespace, soa_dir=service_config.soa_dir
)
return nerve_dict.get("proxy_port") | Get the proxy_port defined in the first namespace configuration for a
service instance.
This means that the namespace first has to be loaded from the service instance's
configuration, and then the proxy_port has to loaded from the smartstack configuration
for that namespace.
:param service_config: The instance of the services LongRunningServiceConfig
:returns: The proxy_port for the service instance, or None if not defined | get_proxy_port_for_instance | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def host_passes_blacklist(
host_attributes: Mapping[str, str], blacklist: DeployBlacklist
) -> bool:
"""
:param host: A single host attributes dict
:param blacklist: A list of lists like [["location_type", "location"], ["foo", "bar"]]
:returns: boolean, True if the host gets passed the blacklist
"""
try:
for location_type, location in blacklist:
if host_attributes.get(location_type) == location:
return False
except ValueError as e:
log.error(f"Errors processing the following blacklist: {blacklist}")
log.error("I will assume the host does not pass\nError was: %s" % e)
return False
return True |
:param host: A single host attributes dict
:param blacklist: A list of lists like [["location_type", "location"], ["foo", "bar"]]
:returns: boolean, True if the host gets passed the blacklist
| host_passes_blacklist | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def host_passes_whitelist(
host_attributes: Mapping[str, str], whitelist: DeployWhitelist
) -> bool:
"""
:param host: A single host attributes dict.
:param whitelist: A 2 item list like ["location_type", ["location1", 'location2']]
:returns: boolean, True if the host gets past the whitelist
"""
# No whitelist, so disable whitelisting behavior.
if whitelist is None or len(whitelist) == 0:
return True
try:
(location_type, locations) = whitelist
if host_attributes.get(location_type) in locations:
return True
except ValueError as e:
log.error(f"Errors processing the following whitelist: {whitelist}")
log.error("I will assume the host does not pass\nError was: %s" % e)
return False
return False |
:param host: A single host attributes dict.
:param whitelist: A 2 item list like ["location_type", ["location1", 'location2']]
:returns: boolean, True if the host gets past the whitelist
| host_passes_whitelist | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_all_namespaces(
soa_dir: str = DEFAULT_SOA_DIR,
) -> Sequence[Tuple[str, ServiceNamespaceConfig]]:
"""Get all the smartstack namespaces across all services.
This is mostly so synapse can get everything it needs in one call.
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of the form (service.namespace, namespace_config)"""
rootdir = os.path.abspath(soa_dir)
namespace_list: List[Tuple[str, ServiceNamespaceConfig]] = []
for srv_dir in os.listdir(rootdir):
namespace_list.extend(get_all_namespaces_for_service(srv_dir, soa_dir))
return namespace_list | Get all the smartstack namespaces across all services.
This is mostly so synapse can get everything it needs in one call.
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of the form (service.namespace, namespace_config) | get_all_namespaces | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_all_namespaces_for_service(
service: str, soa_dir: str = DEFAULT_SOA_DIR, full_name: bool = True
) -> Sequence[Tuple[str, ServiceNamespaceConfig]]:
"""Get all the smartstack namespaces listed for a given service name.
:param service: The service name
:param soa_dir: The SOA config directory to read from
:param full_name: A boolean indicating if the service name should be prepended to the namespace in the
returned tuples as described below (Default: True)
:returns: A list of tuples of the form (service<SPACER>namespace, namespace_config) if full_name is true,
otherwise of the form (namespace, namespace_config)
"""
service_config = service_configuration_lib.read_service_configuration(
service, soa_dir
)
smartstack = service_config.get("smartstack", {})
namespace_list = []
for namespace in smartstack:
if full_name:
name = compose_job_id(service, namespace)
else:
name = namespace
namespace_list.append((name, smartstack[namespace]))
return namespace_list | Get all the smartstack namespaces listed for a given service name.
:param service: The service name
:param soa_dir: The SOA config directory to read from
:param full_name: A boolean indicating if the service name should be prepended to the namespace in the
returned tuples as described below (Default: True)
:returns: A list of tuples of the form (service<SPACER>namespace, namespace_config) if full_name is true,
otherwise of the form (namespace, namespace_config)
| get_all_namespaces_for_service | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def get_expected_instance_count_for_namespace(
service: str,
namespace: str,
instance_type_class: Type[LongRunningServiceConfig],
cluster: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> int:
"""Get the number of expected instances for a namespace, based on the number
of instances set to run on that namespace as specified in service configuration files.
:param service: The service's name
:param namespace: The namespace for that service to check
instance_type_class: The type of the instance, options are e.g. KubernetesDeploymentConfig,
:param soa_dir: The SOA configuration directory to read from
:returns: An integer value of the # of expected instances for the namespace"""
total_expected = 0
if not cluster:
cluster = load_system_paasta_config().get_cluster()
pscl = PaastaServiceConfigLoader(
service=service, soa_dir=soa_dir, load_deployments=False
)
for job_config in pscl.instance_configs(
cluster=cluster, instance_type_class=instance_type_class
):
if f"{service}.{namespace}" in job_config.get_registrations():
total_expected += job_config.get_instances()
return total_expected | Get the number of expected instances for a namespace, based on the number
of instances set to run on that namespace as specified in service configuration files.
:param service: The service's name
:param namespace: The namespace for that service to check
instance_type_class: The type of the instance, options are e.g. KubernetesDeploymentConfig,
:param soa_dir: The SOA configuration directory to read from
:returns: An integer value of the # of expected instances for the namespace | get_expected_instance_count_for_namespace | python | Yelp/paasta | paasta_tools/long_running_service_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py | Apache-2.0 |
def reserve_unique_mac_address(lock_directory):
"""Pick and reserve a unique mac address for a container
returns (mac_address, lockfile)
where the mac address is a string in the form of 00:00:00:00:00:00
and lockfile is a file object that holds an exclusive lock
"""
for x in range(100):
random_hex = "{:08x}".format(random.getrandbits(32))
mac_address = ":".join(
MAC_ADDRESS_PREFIX
+ (random_hex[0:2], random_hex[2:4], random_hex[4:6], random_hex[6:8])
)
lock_filepath = os.path.join(lock_directory, mac_address)
lock_file = obtain_lock(lock_filepath)
if lock_file is not None:
return (mac_address, lock_file)
raise MacAddressException("Unable to pick unique MAC address") | Pick and reserve a unique mac address for a container
returns (mac_address, lockfile)
where the mac address is a string in the form of 00:00:00:00:00:00
and lockfile is a file object that holds an exclusive lock
| reserve_unique_mac_address | python | Yelp/paasta | paasta_tools/mac_address.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mac_address.py | Apache-2.0 |
def obtain_lock(lock_filepath):
"""Open and obtain a flock on the parameter. Returns a file if successful, None if not"""
lock_file = open(lock_filepath, "w")
try:
fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
return lock_file
except IOError as err:
if err.errno != errno.EAGAIN:
raise
lock_file.close()
return None | Open and obtain a flock on the parameter. Returns a file if successful, None if not | obtain_lock | python | Yelp/paasta | paasta_tools/mac_address.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mac_address.py | Apache-2.0 |
def base_api(mesos_config_path: Optional[str] = None):
"""Helper function for making all API requests
:returns: a function that can be called to make a request
"""
leader = get_mesos_leader(mesos_config_path)
def execute_request(method, endpoint, timeout=(3, 2), **kwargs):
url = "http://%s:%d%s" % (leader, MESOS_MASTER_PORT, endpoint)
s = Session()
s.auth = (get_principal(), get_secret())
req = Request(method, url, **kwargs)
prepared = s.prepare_request(req)
try:
resp = s.send(prepared, timeout=timeout)
resp.raise_for_status()
return resp
except HTTPError:
raise HTTPError("Error executing API request calling %s." % url)
return execute_request | Helper function for making all API requests
:returns: a function that can be called to make a request
| base_api | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def master_api(mesos_config_path: Optional[str] = None):
"""Helper function for making API requests to the /master API endpoints
:returns: a function that can be called to make a request to /master
"""
def execute_master_api_request(method, endpoint, **kwargs):
base_api_client = base_api(mesos_config_path=mesos_config_path)
return base_api_client(method, "/master%s" % endpoint, **kwargs)
return execute_master_api_request | Helper function for making API requests to the /master API endpoints
:returns: a function that can be called to make a request to /master
| master_api | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def reserve_api():
"""Helper function for making API requests to the /reserve API endpoints
:returns: a function that can be called to make a request to /reserve
"""
def execute_reserve_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(method, "/reserve%s" % endpoint, **kwargs)
return execute_reserve_api_request | Helper function for making API requests to the /reserve API endpoints
:returns: a function that can be called to make a request to /reserve
| reserve_api | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def unreserve_api():
"""Helper function for making API requests to the /unreserve API endpoints
:returns: a function that can be called to make a request to /unreserve
"""
def execute_unreserve_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(method, "/unreserve%s" % endpoint, **kwargs)
return execute_unreserve_api_request | Helper function for making API requests to the /unreserve API endpoints
:returns: a function that can be called to make a request to /unreserve
| unreserve_api | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def maintenance_api():
"""Helper function for making API requests to the /master/maintenance API endpoints
:returns: a function that can be called to make a request to /master/maintenance
"""
def execute_schedule_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(
method, "/maintenance%s" % endpoint, timeout=(3, 10), **kwargs
)
return execute_schedule_api_request | Helper function for making API requests to the /master/maintenance API endpoints
:returns: a function that can be called to make a request to /master/maintenance
| maintenance_api | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_schedule_client():
"""Helper function for making API requests to the /master/maintenance/schedule API endpoints
:returns: a function that can be called to make a request to /master/maintenance/schedule
"""
def execute_schedule_api_request(method, endpoint, **kwargs):
maintenance_api_client = maintenance_api()
return maintenance_api_client(method, "/schedule%s" % endpoint, **kwargs)
return execute_schedule_api_request | Helper function for making API requests to the /master/maintenance/schedule API endpoints
:returns: a function that can be called to make a request to /master/maintenance/schedule
| get_schedule_client | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_maintenance_schedule():
"""Makes a GET_MAINTENANCE_SCHEDULE request to the operator api
:returns: a GET_MAINTENANCE_SCHEDULE response
"""
client_fn = operator_api()
return client_fn(data={"type": "GET_MAINTENANCE_SCHEDULE"}) | Makes a GET_MAINTENANCE_SCHEDULE request to the operator api
:returns: a GET_MAINTENANCE_SCHEDULE response
| get_maintenance_schedule | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_maintenance_status(mesos_config_path: Optional[str] = None):
"""Makes a GET_MAINTENANCE_STATUS request to the operator api
:returns: a GET_MAINTENANCE_STATUS response
"""
client_fn = operator_api(mesos_config_path=mesos_config_path)
return client_fn(data={"type": "GET_MAINTENANCE_STATUS"}) | Makes a GET_MAINTENANCE_STATUS request to the operator api
:returns: a GET_MAINTENANCE_STATUS response
| get_maintenance_status | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def schedule():
"""Get the Mesos maintenance schedule. This contains hostname/ip mappings and their maintenance window.
:returns: GET_MAINTENANCE_SCHEDULE response text
"""
try:
schedule = get_maintenance_schedule()
except HTTPError:
raise HTTPError("Error getting maintenance schedule.")
return schedule.text | Get the Mesos maintenance schedule. This contains hostname/ip mappings and their maintenance window.
:returns: GET_MAINTENANCE_SCHEDULE response text
| schedule | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_hosts_with_state(
state, system_paasta_config: Optional[SystemPaastaConfig] = None
) -> List[str]:
"""Helper function to check the maintenance status and return all hosts
listed as being in a current state
:param state: State we are interested in ('down_machines' or 'draining_machines')
:returns: A list of hostnames in the specified state or an empty list if no machines
"""
mesos_config_path = get_mesos_config_path(system_paasta_config)
try:
status = get_maintenance_status(mesos_config_path).json()
status = status["get_maintenance_status"]["status"]
except HTTPError:
raise HTTPError("Error getting maintenance status.")
if not status or state not in status:
return []
if "id" in status[state][0]:
return [machine["id"]["hostname"] for machine in status[state]]
else:
return [machine["hostname"] for machine in status[state]] | Helper function to check the maintenance status and return all hosts
listed as being in a current state
:param state: State we are interested in ('down_machines' or 'draining_machines')
:returns: A list of hostnames in the specified state or an empty list if no machines
| get_hosts_with_state | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_draining_hosts(system_paasta_config: Optional[SystemPaastaConfig] = None):
"""Returns a list of hostnames that are marked as draining
:returns: a list of strings representing hostnames
"""
return get_hosts_with_state(
state="draining_machines", system_paasta_config=system_paasta_config
) | Returns a list of hostnames that are marked as draining
:returns: a list of strings representing hostnames
| get_draining_hosts | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_hosts_forgotten_draining(grace=0):
"""Find hosts that are still marked as draining (rather than down) after the start
of their maintenance window.
:param grace: integer number of nanoseconds to allow a host to be left in the draining
state after the start of its maintenance window before we consider it forgotten.
:returns: a list of hostnames of hosts forgotten draining
"""
draining_hosts = get_draining_hosts()
log.debug("draining_hosts: %s" % draining_hosts)
hosts_past_maintenance_start = get_hosts_past_maintenance_start(grace=grace)
log.debug("hosts_past_maintenance_start: %s" % hosts_past_maintenance_start)
forgotten_draining = list(
set(draining_hosts).intersection(hosts_past_maintenance_start)
)
log.debug("forgotten_draining: %s" % forgotten_draining)
return forgotten_draining | Find hosts that are still marked as draining (rather than down) after the start
of their maintenance window.
:param grace: integer number of nanoseconds to allow a host to be left in the draining
state after the start of its maintenance window before we consider it forgotten.
:returns: a list of hostnames of hosts forgotten draining
| get_hosts_forgotten_draining | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_hosts_forgotten_down(grace=0):
"""Find hosts that are still marked as down (rather than up) after the end
of their maintenance window.
:param grace: integer number of nanoseconds to allow a host to be left in the down
state after the end of its maintenance window before we consider it forgotten.
:returns: a list of hostnames of hosts forgotten down
"""
down_hosts = get_down_hosts()
log.debug("down_hosts: %s" % down_hosts)
hosts_past_maintenance_end = get_hosts_past_maintenance_end(grace=grace)
log.debug("hosts_past_maintenance_end: %s" % hosts_past_maintenance_end)
forgotten_down = list(set(down_hosts).intersection(hosts_past_maintenance_end))
log.debug("forgotten_down: %s" % forgotten_down)
return forgotten_down | Find hosts that are still marked as down (rather than up) after the end
of their maintenance window.
:param grace: integer number of nanoseconds to allow a host to be left in the down
state after the end of its maintenance window before we consider it forgotten.
:returns: a list of hostnames of hosts forgotten down
| get_hosts_forgotten_down | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def parse_timedelta(value):
"""Return the delta in nanoseconds.
:param value: a string containing a time format supported by :mod:`pytimeparse`
:returns: an integer (or float) representing the specified delta in nanoseconds
"""
error_msg = "'%s' is not a valid time expression" % value
try:
seconds = timeparse.timeparse(value)
except TypeError:
raise argparse.ArgumentTypeError(error_msg)
if not seconds:
raise argparse.ArgumentTypeError(error_msg)
return seconds_to_nanoseconds(seconds) | Return the delta in nanoseconds.
:param value: a string containing a time format supported by :mod:`pytimeparse`
:returns: an integer (or float) representing the specified delta in nanoseconds
| parse_timedelta | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def parse_datetime(value):
"""Return the datetime in nanoseconds.
:param value: a string containing a datetime supported by :mod:`dateutil.parser`
:returns: an integer (or float) representing the specified datetime in nanoseconds
"""
error_msg = "'%s' is not a valid datetime expression" % value
try:
dt = parser.parse(value)
except Exception:
raise argparse.ArgumentTypeError(error_msg)
if not dt:
raise argparse.ArgumentTypeError(error_msg)
return datetime_to_nanoseconds(dt) | Return the datetime in nanoseconds.
:param value: a string containing a datetime supported by :mod:`dateutil.parser`
:returns: an integer (or float) representing the specified datetime in nanoseconds
| parse_datetime | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def build_maintenance_payload(hostnames, maint_type):
"""Creates the JSON payload necessary to bring the specified hostnames up/down for maintenance.
:param hostnames: a list of hostnames
:returns: a dictionary representing the list of machines to bring up/down for maintenance
"""
return {
"type": maint_type.upper(),
maint_type.lower(): {"machines": get_machine_ids(hostnames)},
} | Creates the JSON payload necessary to bring the specified hostnames up/down for maintenance.
:param hostnames: a list of hostnames
:returns: a dictionary representing the list of machines to bring up/down for maintenance
| build_maintenance_payload | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def hostnames_to_components(hostnames, resolve=False):
"""Converts a list of 'host[|ip]' entries into namedtuples containing 'host' and 'ip' attributes,
optionally performing a DNS lookup to resolve the hostname into an IP address
:param hostnames: a list of hostnames where each hostname can be of the form 'host[|ip]'
:param resolve: boolean representing whether to lookup the IP address corresponding to the hostname via DNS
:returns: a namedtuple containing the hostname and IP components
"""
components = []
for hostname in hostnames:
# This is to allow specifying a hostname as "hostname|ipaddress"
# to avoid querying DNS for the IP.
if "|" in hostname:
(host, ip) = hostname.split("|")
components.append(Hostname(host=host, ip=ip))
else:
try:
ip = gethostbyname(hostname) if resolve else None
except gaierror:
log.error(f"Failed to resolve IP for {hostname}, continuing regardless")
continue
components.append(Hostname(host=hostname, ip=ip))
return components | Converts a list of 'host[|ip]' entries into namedtuples containing 'host' and 'ip' attributes,
optionally performing a DNS lookup to resolve the hostname into an IP address
:param hostnames: a list of hostnames where each hostname can be of the form 'host[|ip]'
:param resolve: boolean representing whether to lookup the IP address corresponding to the hostname via DNS
:returns: a namedtuple containing the hostname and IP components
| hostnames_to_components | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_machine_ids(hostnames):
"""Helper function to convert a list of hostnames into a JSON list of hostname/ip pairs.
:param hostnames: a list of hostnames
:returns: a dictionary representing the list of machines to bring up/down for maintenance
"""
machine_ids = []
components = hostnames_to_components(hostnames, resolve=True)
for component in components:
machine_id = {"hostname": component.host, "ip": component.ip}
machine_ids.append(machine_id)
return machine_ids | Helper function to convert a list of hostnames into a JSON list of hostname/ip pairs.
:param hostnames: a list of hostnames
:returns: a dictionary representing the list of machines to bring up/down for maintenance
| get_machine_ids | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def build_reservation_payload(resources):
"""Creates the JSON payload needed to dynamically (un)reserve resources in mesos.
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: a dictionary that can be sent to Mesos to (un)reserve resources
"""
payload = []
for resource in resources:
payload.append(
{
"name": resource.name,
"type": "SCALAR",
"scalar": {"value": resource.amount},
"role": MAINTENANCE_ROLE,
"reservation": {"principal": get_principal()},
}
)
return payload | Creates the JSON payload needed to dynamically (un)reserve resources in mesos.
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: a dictionary that can be sent to Mesos to (un)reserve resources
| build_reservation_payload | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def build_maintenance_schedule_payload(
hostnames, start=None, duration=None, drain=True
):
"""Creates the JSON payload needed to (un)schedule maintenance on the specified hostnames.
:param hostnames: a list of hostnames
:param start: the time to start the maintenance, represented as number of nanoseconds since the epoch
:param duration: length of the maintenance window, represented as number of nanoseconds since the epoch
:param drain: boolean to note whether we are draining (True) the specified hosts or undraining (False) them
:returns: a dictionary that can be sent to Mesos to (un)schedule maintenance
"""
schedule = get_maintenance_schedule().json()["get_maintenance_schedule"]["schedule"]
machine_ids = get_machine_ids(hostnames)
if drain:
unavailability = dict()
unavailability["start"] = dict()
unavailability["start"]["nanoseconds"] = int(start)
unavailability["duration"] = dict()
unavailability["duration"]["nanoseconds"] = int(duration)
window = dict()
window["machine_ids"] = machine_ids
window["unavailability"] = unavailability
if schedule:
for existing_window in schedule["windows"]:
for existing_machine_id in existing_window["machine_ids"]:
# If we already have a maintenance window scheduled for one of the hosts,
# replace it with the new window.
if existing_machine_id in machine_ids:
existing_window["machine_ids"].remove(existing_machine_id)
if not existing_window["machine_ids"]:
schedule["windows"].remove(existing_window)
if drain:
windows = schedule["windows"] + [window]
else:
windows = schedule["windows"]
elif drain:
windows = [window]
else:
windows = []
payload = dict()
payload["windows"] = windows
return {
"type": "UPDATE_MAINTENANCE_SCHEDULE",
"update_maintenance_schedule": {"schedule": payload},
} | Creates the JSON payload needed to (un)schedule maintenance on the specified hostnames.
:param hostnames: a list of hostnames
:param start: the time to start the maintenance, represented as number of nanoseconds since the epoch
:param duration: length of the maintenance window, represented as number of nanoseconds since the epoch
:param drain: boolean to note whether we are draining (True) the specified hosts or undraining (False) them
:returns: a dictionary that can be sent to Mesos to (un)schedule maintenance
| build_maintenance_schedule_payload | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def load_credentials(mesos_secrets="/nail/etc/mesos-slave-secret"):
"""Loads the mesos-slave credentials from the specified file. These credentials will be used for all
maintenance API requests.
:param mesos_secrets: optional argument specifying the path to the file containing the mesos-slave credentials
:returns: a tuple of the form (username, password)
"""
try:
with open(mesos_secrets) as data_file:
data = json.load(data_file)
except EnvironmentError:
log.error(
"maintenance calls must be run on a Mesos slave containing valid credentials (%s)"
% mesos_secrets
)
raise
try:
username = data["principal"]
password = data["secret"]
except KeyError:
log.error(
"%s does not contain Mesos slave credentials in the expected format. "
"See http://mesos.apache.org/documentation/latest/authentication/ for details"
% mesos_secrets
)
raise
return Credentials(file=mesos_secrets, principal=username, secret=password) | Loads the mesos-slave credentials from the specified file. These credentials will be used for all
maintenance API requests.
:param mesos_secrets: optional argument specifying the path to the file containing the mesos-slave credentials
:returns: a tuple of the form (username, password)
| load_credentials | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.