code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def reserve(slave_id, resources):
"""Dynamically reserve resources in mesos to prevent tasks from using them.
:param slave_id: the id of the mesos slave
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: boolean where 0 represents success and 1 is a failure
"""
log.info(f"Dynamically reserving resources on {slave_id}: {resources}")
payload = _make_operator_reservation_request_payload(
slave_id=slave_id,
payload=build_reservation_payload(resources),
request_type="reserve_resources",
)
client_fn = operator_api()
try:
print(payload)
reserve_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error adding dynamic reservation.")
return reserve_output | Dynamically reserve resources in mesos to prevent tasks from using them.
:param slave_id: the id of the mesos slave
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: boolean where 0 represents success and 1 is a failure
| reserve | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def unreserve(slave_id, resources):
"""Dynamically unreserve resources in mesos to allow tasks to using them.
:param slave_id: the id of the mesos slave
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: boolean where 0 represents success and 1 is a failure
"""
log.info(f"Dynamically unreserving resources on {slave_id}: {resources}")
payload = _make_operator_reservation_request_payload(
slave_id=slave_id,
payload=build_reservation_payload(resources),
request_type="unreserve_resources",
)
client_fn = operator_api()
try:
unreserve_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error adding dynamic unreservation.")
return unreserve_output | Dynamically unreserve resources in mesos to allow tasks to using them.
:param slave_id: the id of the mesos slave
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: boolean where 0 represents success and 1 is a failure
| unreserve | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def components_to_hosts(components):
"""Convert a list of Component namedtuples to a list of their hosts
:param components: a list of Component namedtuples
:returns: list of the hosts associated with each Component
"""
hosts = []
for component in components:
hosts.append(component.host)
return hosts | Convert a list of Component namedtuples to a list of their hosts
:param components: a list of Component namedtuples
:returns: list of the hosts associated with each Component
| components_to_hosts | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def reserve_all_resources(hostnames):
"""Dynamically reserve all available resources on the specified hosts
:param hostnames: list of hostnames to reserve resources on
"""
mesos_state = a_sync.block(get_mesos_master().state_summary)
components = hostnames_to_components(hostnames)
hosts = components_to_hosts(components)
known_slaves = [
slave for slave in mesos_state["slaves"] if slave["hostname"] in hosts
]
for slave in known_slaves:
hostname = slave["hostname"]
log.info("Reserving all resources on %s" % hostname)
slave_id = slave["id"]
resources = []
for resource in ["disk", "mem", "cpus", "gpus"]:
free_resource = (
slave["resources"][resource] - slave["used_resources"][resource]
)
for role in slave["reserved_resources"]:
free_resource -= slave["reserved_resources"][role][resource]
resources.append(Resource(name=resource, amount=free_resource))
try:
reserve(slave_id=slave_id, resources=resources)
except HTTPError:
raise HTTPError(
f"Failed reserving all of the resources on {hostname} ({slave_id}). Aborting."
) | Dynamically reserve all available resources on the specified hosts
:param hostnames: list of hostnames to reserve resources on
| reserve_all_resources | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def unreserve_all_resources(hostnames):
"""Dynamically unreserve all available resources on the specified hosts
:param hostnames: list of hostnames to unreserve resources on
"""
mesos_state = a_sync.block(get_mesos_master().state_summary)
components = hostnames_to_components(hostnames)
hosts = components_to_hosts(components)
known_slaves = [
slave for slave in mesos_state["slaves"] if slave["hostname"] in hosts
]
for slave in known_slaves:
hostname = slave["hostname"]
log.info("Unreserving all resources on %s" % hostname)
slave_id = slave["id"]
resources = []
if MAINTENANCE_ROLE in slave["reserved_resources"]:
for resource in ["disk", "mem", "cpus", "gpus"]:
reserved_resource = slave["reserved_resources"][MAINTENANCE_ROLE][
resource
]
resources.append(Resource(name=resource, amount=reserved_resource))
try:
unreserve(slave_id=slave_id, resources=resources)
except HTTPError:
raise HTTPError(
f"Failed unreserving all of the resources on {hostname} ({slave_id}). Aborting."
) | Dynamically unreserve all available resources on the specified hosts
:param hostnames: list of hostnames to unreserve resources on
| unreserve_all_resources | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def drain(hostnames, start, duration, reserve_resources=True):
"""Schedules a maintenance window for the specified hosts and marks them as draining.
:param hostnames: a list of hostnames
:param start: the time to start the maintenance, represented as number of nanoseconds since the epoch
:param duration: length of the maintenance window, represented as number of nanoseconds since the epoch
:param reserve_resources: bool setting to also reserve the free resources on the agent before the drain call
:returns: None
"""
log.info("Draining: %s" % hostnames)
if reserve_resources:
try:
reserve_all_resources(hostnames)
except HTTPError as e:
log.warning("Failed to reserve resources, will continue to drain: %s" % e)
payload = build_maintenance_schedule_payload(hostnames, start, duration, drain=True)
client_fn = operator_api()
try:
drain_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance drain.")
return drain_output | Schedules a maintenance window for the specified hosts and marks them as draining.
:param hostnames: a list of hostnames
:param start: the time to start the maintenance, represented as number of nanoseconds since the epoch
:param duration: length of the maintenance window, represented as number of nanoseconds since the epoch
:param reserve_resources: bool setting to also reserve the free resources on the agent before the drain call
:returns: None
| drain | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def undrain(hostnames, unreserve_resources=True):
"""Unschedules the maintenance window for the specified hosts and unmarks them as draining. They are ready for
regular use.
:param hostnames: a list of hostnames
:param unreserve_resources: bool setting to also unreserve resources on the agent before the undrain call
:returns: None
"""
log.info("Undraining: %s" % hostnames)
if unreserve_resources:
try:
unreserve_all_resources(hostnames)
except HTTPError as e:
log.warning(
"Failed to unreserve resources, will continue to undrain: %s" % e
)
payload = build_maintenance_schedule_payload(hostnames, drain=False)
client_fn = get_schedule_client()
client_fn = operator_api()
try:
undrain_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance undrain.")
return undrain_output | Unschedules the maintenance window for the specified hosts and unmarks them as draining. They are ready for
regular use.
:param hostnames: a list of hostnames
:param unreserve_resources: bool setting to also unreserve resources on the agent before the undrain call
:returns: None
| undrain | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def down(hostnames):
"""Marks the specified hostnames as being down for maintenance, and makes them unavailable for use.
:param hostnames: a list of hostnames
:returns: None
"""
log.info("Bringing down: %s" % hostnames)
payload = build_maintenance_payload(hostnames, "start_maintenance")
client_fn = operator_api()
try:
down_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance down.")
return down_output | Marks the specified hostnames as being down for maintenance, and makes them unavailable for use.
:param hostnames: a list of hostnames
:returns: None
| down | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def up(hostnames):
"""Marks the specified hostnames as no longer being down for maintenance, and makes them available for use.
:param hostnames: a list of hostnames
:returns: None
"""
log.info("Bringing up: %s" % hostnames)
payload = build_maintenance_payload(hostnames, "stop_maintenance")
client_fn = operator_api()
try:
up_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance up.")
return up_output | Marks the specified hostnames as no longer being down for maintenance, and makes them available for use.
:param hostnames: a list of hostnames
:returns: None
| up | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def raw_status():
"""Get the Mesos maintenance status. This contains hostname/ip mappings for hosts that are either marked as being
down for maintenance or draining.
:returns: Response Object containing status
"""
try:
status = get_maintenance_status()
except HTTPError:
raise HTTPError("Error performing maintenance status.")
return status | Get the Mesos maintenance status. This contains hostname/ip mappings for hosts that are either marked as being
down for maintenance or draining.
:returns: Response Object containing status
| raw_status | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def friendly_status():
"""Display the Mesos maintenance status in a human-friendly way.
:returns: Text representation of the human-friendly status
"""
status = raw_status().json()["get_maintenance_status"]["status"]
ret = ""
for machine in status.get("draining_machines", []):
ret += "{} ({}): Draining\n".format(
machine["id"]["hostname"], machine["id"]["ip"]
)
for machine in status.get("down_machines", []):
ret += "{} ({}): Down\n".format(machine["hostname"], machine["ip"])
return ret | Display the Mesos maintenance status in a human-friendly way.
:returns: Text representation of the human-friendly status
| friendly_status | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def is_host_drained(hostname):
"""Checks if a host has drained successfully by confirming it is
draining and currently running 0 tasks
:param hostname: hostname to check
:returns: True or False
"""
return (
is_host_draining(hostname=hostname)
and get_count_running_tasks_on_slave(hostname) == 0
) | Checks if a host has drained successfully by confirming it is
draining and currently running 0 tasks
:param hostname: hostname to check
:returns: True or False
| is_host_drained | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_hosts_past_maintenance_start(grace=0):
"""Get a list of hosts that have reached the start of their maintenance window
:param grace: integer number of nanoseconds to allow a host to be left in the draining
state after the start of its maintenance window before we consider it past its maintenance start
:returns: List of hostnames
"""
schedules = get_maintenance_schedule().json()["get_maintenance_schedule"][
"schedule"
]
current_time = datetime_to_nanoseconds(now()) - grace
ret = []
if "windows" in schedules:
for window in schedules["windows"]:
if window["unavailability"]["start"]["nanoseconds"] < current_time:
ret += [host["hostname"] for host in window["machine_ids"]]
log.debug(f"Hosts past maintenance start: {ret}")
return ret | Get a list of hosts that have reached the start of their maintenance window
:param grace: integer number of nanoseconds to allow a host to be left in the draining
state after the start of its maintenance window before we consider it past its maintenance start
:returns: List of hostnames
| get_hosts_past_maintenance_start | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_hosts_past_maintenance_end(grace=0):
"""Get a list of hosts that have reached the end of their maintenance window
:param grace: integer number of nanoseconds to allow a host to be left in the down
state after the end of its maintenance window before we consider it past its maintenance end
:returns: List of hostnames
"""
schedules = get_maintenance_schedule().json()["get_maintenance_schedule"][
"schedule"
]
current_time = datetime_to_nanoseconds(now()) - grace
ret = []
if "windows" in schedules:
for window in schedules["windows"]:
end = (
window["unavailability"]["start"]["nanoseconds"]
+ window["unavailability"]["duration"]["nanoseconds"]
)
if end < current_time:
ret += [host["hostname"] for host in window["machine_ids"]]
log.debug(f"Hosts past maintenance end: {ret}")
return ret | Get a list of hosts that have reached the end of their maintenance window
:param grace: integer number of nanoseconds to allow a host to be left in the down
state after the end of its maintenance window before we consider it past its maintenance end
:returns: List of hostnames
| get_hosts_past_maintenance_end | python | Yelp/paasta | paasta_tools/mesos_maintenance.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py | Apache-2.0 |
def get_mesos_config_path(
system_paasta_config: Optional[SystemPaastaConfig] = None,
) -> str:
"""
Determine where to find the configuration for mesos-cli.
"""
if system_paasta_config is None:
system_paasta_config = load_system_paasta_config()
return system_paasta_config.get_mesos_cli_config().get(
"path", DEFAULT_MESOS_CLI_CONFIG_LOCATION
) |
Determine where to find the configuration for mesos-cli.
| get_mesos_config_path | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_mesos_leader(mesos_config_path: Optional[str] = None) -> str:
"""Get the current mesos-master leader's hostname.
Attempts to determine this by using mesos.cli to query ZooKeeper.
:returns: The current mesos-master hostname"""
try:
url = get_mesos_master(mesos_config_path).host
except mesos_exceptions.MasterNotAvailableException:
log.debug("mesos.cli failed to provide the master host")
raise
log.debug("mesos.cli thinks the master host is: %s" % url)
hostname = urlparse(url).hostname
log.debug("The parsed master hostname is: %s" % hostname)
# This check is necessary, as if we parse a value such as 'localhost:5050',
# it won't have a hostname attribute
if hostname:
try:
host = socket.gethostbyaddr(hostname)[0]
fqdn = socket.getfqdn(host)
except (socket.error, socket.herror, socket.gaierror, socket.timeout):
log.debug("Failed to convert mesos leader hostname to fqdn!")
raise
log.debug("Mesos Leader: %s" % fqdn)
return fqdn
else:
raise ValueError("Expected to receive a valid URL, got: %s" % url) | Get the current mesos-master leader's hostname.
Attempts to determine this by using mesos.cli to query ZooKeeper.
:returns: The current mesos-master hostname | get_mesos_leader | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def find_mesos_leader(cluster):
"""Find the leader with redirect given one mesos master."""
master = (
load_system_paasta_config().get_cluster_fqdn_format().format(cluster=cluster)
)
if master is None:
raise ValueError("Mesos master is required to find leader")
url = f"http://{master}:{MESOS_MASTER_PORT}/redirect"
try:
# Timeouts here are for connect, read
response = requests.get(url, timeout=(5, 30))
except Exception as e:
raise MesosLeaderUnavailable(e)
hostname = urlparse(response.url).hostname
return f"{hostname}:{MESOS_MASTER_PORT}" | Find the leader with redirect given one mesos master. | find_mesos_leader | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def get_current_tasks(job_id: str) -> List[Task]:
"""Returns a list of all the tasks with a given job id.
:param job_id: the job id of the tasks.
:return tasks: a list of mesos.cli.Task.
"""
mesos_master = get_mesos_master()
framework_tasks = await mesos_master.tasks(fltr=job_id, active_only=False)
return framework_tasks | Returns a list of all the tasks with a given job id.
:param job_id: the job id of the tasks.
:return tasks: a list of mesos.cli.Task.
| get_current_tasks | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def get_running_tasks_from_frameworks(job_id=""):
"""Will include tasks from active and completed frameworks
but NOT orphaned tasks
"""
active_framework_tasks = await get_current_tasks(job_id)
running_tasks = filter_running_tasks(active_framework_tasks)
return running_tasks | Will include tasks from active and completed frameworks
but NOT orphaned tasks
| get_running_tasks_from_frameworks | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def get_all_running_tasks() -> Collection[Task]:
"""Will include all running tasks; for now orphans are not included"""
framework_tasks = await get_current_tasks("")
mesos_master = get_mesos_master()
framework_tasks += await mesos_master.orphan_tasks()
running_tasks = filter_running_tasks(framework_tasks)
return running_tasks | Will include all running tasks; for now orphans are not included | get_all_running_tasks | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def get_cached_list_of_all_current_tasks():
"""Returns a cached list of all mesos tasks.
This function is used by 'paasta status' and 'paasta_serviceinit status'
to avoid re-querying mesos master and re-parsing json to get mesos.Task objects.
The async_ttl_cache decorator caches the list for 600 seconds.
ttl doesn't really matter for this function because when we run 'paasta status'
the corresponding HTTP request to mesos master is cached by requests_cache.
:return tasks: a list of mesos.Task
"""
return await get_current_tasks("") | Returns a cached list of all mesos tasks.
This function is used by 'paasta status' and 'paasta_serviceinit status'
to avoid re-querying mesos master and re-parsing json to get mesos.Task objects.
The async_ttl_cache decorator caches the list for 600 seconds.
ttl doesn't really matter for this function because when we run 'paasta status'
the corresponding HTTP request to mesos master is cached by requests_cache.
:return tasks: a list of mesos.Task
| get_cached_list_of_all_current_tasks | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def get_cached_list_of_running_tasks_from_frameworks():
"""Returns a cached list of all running mesos tasks.
See the docstring for get_cached_list_of_all_current_tasks().
:return tasks: a list of mesos.Task
"""
return [
task
for task in filter_running_tasks(await get_cached_list_of_all_current_tasks())
] | Returns a cached list of all running mesos tasks.
See the docstring for get_cached_list_of_all_current_tasks().
:return tasks: a list of mesos.Task
| get_cached_list_of_running_tasks_from_frameworks | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def get_cached_list_of_not_running_tasks_from_frameworks():
"""Returns a cached list of mesos tasks that are NOT running.
See the docstring for get_cached_list_of_all_current_tasks().
:return tasks: a list of mesos.Task"""
return [
task
for task in filter_not_running_tasks(
await get_cached_list_of_all_current_tasks()
)
] | Returns a cached list of mesos tasks that are NOT running.
See the docstring for get_cached_list_of_all_current_tasks().
:return tasks: a list of mesos.Task | get_cached_list_of_not_running_tasks_from_frameworks | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def get_non_running_tasks_from_frameworks(job_id: str = "") -> List[Task]:
"""Will include tasks from active and completed frameworks
but NOT orphaned tasks
"""
active_framework_tasks = await get_current_tasks(job_id)
not_running_tasks = filter_not_running_tasks(active_framework_tasks)
return not_running_tasks | Will include tasks from active and completed frameworks
but NOT orphaned tasks
| get_non_running_tasks_from_frameworks | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_first_status_timestamp_string(task: Task) -> str:
"""Gets the first status timestamp from a task id and returns a human
readable string with the local time and a humanized duration:
``2015-01-30T08:45 (an hour ago)``
"""
first_status_timestamp = get_first_status_timestamp(task)
if first_status_timestamp is None:
return "Unknown"
else:
first_status_datetime = datetime.datetime.fromtimestamp(first_status_timestamp)
return "{} ({})".format(
first_status_datetime.strftime("%Y-%m-%dT%H:%M"),
humanize.naturaltime(first_status_datetime),
) | Gets the first status timestamp from a task id and returns a human
readable string with the local time and a humanized duration:
``2015-01-30T08:45 (an hour ago)``
| get_first_status_timestamp_string | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def get_cpu_usage(task: Task) -> str:
"""Calculates a metric of used_cpu/allocated_cpu
To do this, we take the total number of cpu-seconds the task has consumed,
(the sum of system and user time), OVER the total cpu time the task
has been allocated.
The total time a task has been allocated is the total time the task has
been running (https://github.com/mesosphere/mesos/blob/0b092b1b0/src/webui/master/static/js/controllers.js#L140)
multiplied by the "shares" a task has.
"""
try:
start_time = round(task["statuses"][0]["timestamp"])
current_time = int(datetime.datetime.now().strftime("%s"))
duration_seconds = current_time - start_time
cpu_shares = await get_cpu_shares(task)
allocated_seconds = duration_seconds * cpu_shares
task_stats = await task.stats()
used_seconds = task_stats.get("cpus_system_time_secs", 0.0) + task_stats.get(
"cpus_user_time_secs", 0.0
)
if allocated_seconds == 0:
return "Undef"
percent = round(100 * (used_seconds / allocated_seconds), 1)
percent_string = "%s%%" % percent
if percent > 90:
return PaastaColors.red(percent_string)
else:
return percent_string
except (AttributeError, SlaveDoesNotExist):
return "None"
except TimeoutError:
return "Timed Out" | Calculates a metric of used_cpu/allocated_cpu
To do this, we take the total number of cpu-seconds the task has consumed,
(the sum of system and user time), OVER the total cpu time the task
has been allocated.
The total time a task has been allocated is the total time the task has
been running (https://github.com/mesosphere/mesos/blob/0b092b1b0/src/webui/master/static/js/controllers.js#L140)
multiplied by the "shares" a task has.
| get_cpu_usage | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def format_running_mesos_task_row(
task: Task, get_short_task_id: Callable[[str], str]
) -> Tuple[str, ...]:
"""Returns a pretty formatted string of a running mesos task attributes"""
short_task_id = get_short_task_id(task["id"])
short_hostname_future = asyncio.ensure_future(
results_or_unknown(get_short_hostname_from_task(task))
)
mem_usage_future = asyncio.ensure_future(results_or_unknown(get_mem_usage(task)))
cpu_usage_future = asyncio.ensure_future(results_or_unknown(get_cpu_usage(task)))
first_status_timestamp = get_first_status_timestamp_string(task)
await asyncio.wait([short_hostname_future, mem_usage_future, cpu_usage_future])
return (
short_task_id,
short_hostname_future.result(),
mem_usage_future.result(),
cpu_usage_future.result(),
first_status_timestamp,
) | Returns a pretty formatted string of a running mesos task attributes | format_running_mesos_task_row | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def format_non_running_mesos_task_row(
task: Task, get_short_task_id: Callable[[str], str]
) -> Tuple[str, ...]:
"""Returns a pretty formatted string of a running mesos task attributes"""
return (
PaastaColors.grey(get_short_task_id(task["id"])),
PaastaColors.grey(await results_or_unknown(get_short_hostname_from_task(task))),
PaastaColors.grey(get_first_status_timestamp_string(task)),
PaastaColors.grey(task["state"]),
) | Returns a pretty formatted string of a running mesos task attributes | format_non_running_mesos_task_row | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def zip_tasks_verbose_output(table, stdstreams):
"""Zip a list of strings (table) with a list of lists (stdstreams)
:param table: a formatted list of tasks
:param stdstreams: for each task, a list of lines from stdout/stderr tail
"""
if len(table) != len(stdstreams):
raise ValueError("Can only zip same-length lists")
output = []
for i in range(len(table)):
output.append(table[i])
output.extend([line for line in stdstreams[i]])
return output | Zip a list of strings (table) with a list of lists (stdstreams)
:param table: a formatted list of tasks
:param stdstreams: for each task, a list of lines from stdout/stderr tail
| zip_tasks_verbose_output | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def format_task_list(
tasks: Sequence[Task],
list_title: str,
table_header: Sequence[str],
get_short_task_id: Callable[[str], str],
format_task_row: Callable[
[Task, Callable[[str], str]], Awaitable[Union[Sequence[str], str]]
],
grey: bool,
tail_lines: int,
) -> List[str]:
"""Formats a list of tasks, returns a list of output lines
:param tasks: List of tasks as returned by get_*_tasks_from_all_frameworks.
:param list_title: 'Running Tasks:' or 'Non-Running Tasks'.
:param table_header: List of column names used in the tasks table.
:param get_short_task_id: A function which given a task_id returns a short task_id suitable for printing.
:param format_task_row: Formatting function, works on a task and a get_short_task_id function.
:param tail_lines (int): number of lines of stdout/stderr to tail, as obtained from the Mesos sandbox.
:param grey: If True, the list will be made less visually prominent.
:return output: Formatted output (list of output lines).
"""
if not grey:
def colorize(x):
return x
else:
def colorize(x):
return PaastaColors.grey(x)
output = []
output.append(colorize(" %s" % list_title))
table_rows: List[Union[str, Sequence[str]]] = [
[colorize(th) for th in table_header]
]
if tasks:
task_row_futures = [
asyncio.ensure_future(format_task_row(task, get_short_task_id))
for task in tasks
]
await asyncio.wait(task_row_futures)
for future in task_row_futures:
table_rows.append(future.result())
tasks_table = [" %s" % row for row in format_table(table_rows)]
if tail_lines == 0:
output.extend(tasks_table)
else:
stdstreams = []
for task in tasks:
stdstreams.append(
await format_stdstreams_tail_for_task(
task, get_short_task_id, nlines=tail_lines
)
)
output.append(tasks_table[0]) # header
output.extend(zip_tasks_verbose_output(tasks_table[1:], stdstreams))
return output | Formats a list of tasks, returns a list of output lines
:param tasks: List of tasks as returned by get_*_tasks_from_all_frameworks.
:param list_title: 'Running Tasks:' or 'Non-Running Tasks'.
:param table_header: List of column names used in the tasks table.
:param get_short_task_id: A function which given a task_id returns a short task_id suitable for printing.
:param format_task_row: Formatting function, works on a task and a get_short_task_id function.
:param tail_lines (int): number of lines of stdout/stderr to tail, as obtained from the Mesos sandbox.
:param grey: If True, the list will be made less visually prominent.
:return output: Formatted output (list of output lines).
| format_task_list | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def status_mesos_tasks_verbose(
filter_string: str, get_short_task_id: Callable[[str], str], tail_lines: int = 0
) -> str:
"""Returns detailed information about the mesos tasks for a service.
:param filter_string: An id used for looking up Mesos tasks
:param get_short_task_id: A function which given a
task_id returns a short task_id suitable for
printing.
:param tail_lines: int representing the number of lines of stdout/err to
report.
"""
output: List[str] = []
running_and_active_tasks = select_tasks_by_id(
await get_cached_list_of_running_tasks_from_frameworks(), filter_string
)
list_title = "Running Tasks:"
table_header = [
"Mesos Task ID",
"Host deployed to",
"Ram",
"CPU",
"Deployed at what localtime",
]
output.extend(
await format_task_list(
tasks=running_and_active_tasks,
list_title=list_title,
table_header=table_header,
get_short_task_id=get_short_task_id,
format_task_row=format_running_mesos_task_row,
grey=False,
tail_lines=tail_lines,
)
)
non_running_tasks = select_tasks_by_id(
await get_cached_list_of_not_running_tasks_from_frameworks(), filter_string
)
# Order the tasks by timestamp
non_running_tasks.sort(key=lambda task: get_first_status_timestamp_string(task))
non_running_tasks_ordered = list(reversed(non_running_tasks[-10:]))
list_title = "Non-Running Tasks"
table_header = [
"Mesos Task ID",
"Host deployed to",
"Deployed at what localtime",
"Status",
]
output.extend(
await format_task_list(
tasks=non_running_tasks_ordered,
list_title=list_title,
table_header=table_header,
get_short_task_id=get_short_task_id,
format_task_row=format_non_running_mesos_task_row,
grey=True,
tail_lines=tail_lines,
)
)
return "\n".join(output) | Returns detailed information about the mesos tasks for a service.
:param filter_string: An id used for looking up Mesos tasks
:param get_short_task_id: A function which given a
task_id returns a short task_id suitable for
printing.
:param tail_lines: int representing the number of lines of stdout/err to
report.
| status_mesos_tasks_verbose | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_local_slave_state(hostname=None):
"""Fetches mesos slave state and returns it as a dict.
:param hostname: The host from which to fetch slave state. If not specified, defaults to the local machine."""
if hostname is None:
hostname = socket.getfqdn()
stats_uri = f"http://{hostname}:{MESOS_SLAVE_PORT}/state"
try:
headers = {"User-Agent": get_user_agent()}
response = requests.get(stats_uri, timeout=10, headers=headers)
if response.status_code == 404:
fallback_stats_uri = f"http://{hostname}:{MESOS_SLAVE_PORT}/state.json"
response = requests.get(fallback_stats_uri, timeout=10, headers=headers)
except requests.ConnectionError as e:
raise MesosSlaveConnectionError(
"Could not connect to the mesos slave to see which services are running\n"
"on %s. Is the mesos-slave running?\n"
"Error was: %s\n" % (e.request.url, str(e))
)
response.raise_for_status()
return json.loads(response.text) | Fetches mesos slave state and returns it as a dict.
:param hostname: The host from which to fetch slave state. If not specified, defaults to the local machine. | get_local_slave_state | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_all_tasks_from_state(
mesos_state: MesosState, include_orphans: bool = False
) -> Sequence[MesosTask]:
"""Given a mesos state, find the tasks from all frameworks.
:param mesos_state: the mesos_state
:returns: a list of tasks
"""
tasks = [
task
for framework in mesos_state.get("frameworks", [])
for task in framework.get("tasks", [])
]
if include_orphans:
tasks += mesos_state.get("orphan_tasks", [])
return tasks | Given a mesos state, find the tasks from all frameworks.
:param mesos_state: the mesos_state
:returns: a list of tasks
| get_all_tasks_from_state | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_zookeeper_config(state):
"""Returns dict, containing the zookeeper hosts and path.
:param state: mesos state dictionary"""
re_zk = re.match(r"^zk://([^/]*)/(.*)$", state["flags"]["zk"])
return {"hosts": re_zk.group(1), "path": re_zk.group(2)} | Returns dict, containing the zookeeper hosts and path.
:param state: mesos state dictionary | get_zookeeper_config | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_number_of_mesos_masters(host, path):
"""Returns an array, containing mesos masters
:param zk_config: dict containing information about zookeeper config.
Masters register themselves in zookeeper by creating ``info_`` entries.
We count these entries to get the number of masters.
"""
zk = KazooClient(hosts=host, read_only=True)
zk.start()
try:
root_entries = zk.get_children(path)
result = [
info
for info in root_entries
if info.startswith("json.info_") or info.startswith("info_")
]
return len(result)
finally:
zk.stop()
zk.close() | Returns an array, containing mesos masters
:param zk_config: dict containing information about zookeeper config.
Masters register themselves in zookeeper by creating ``info_`` entries.
We count these entries to get the number of masters.
| get_number_of_mesos_masters | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_all_slaves_for_blacklist_whitelist(
blacklist: DeployBlacklist, whitelist: DeployWhitelist
):
"""
A wrapper function to get all slaves and filter according to
provided blacklist and whitelist.
:param blacklist: a blacklist, used to filter mesos slaves by attribute
:param whitelist: a whitelist, used to filter mesos slaves by attribute
:returns: a list of mesos slave objects, filtered by those which are acceptable
according to the provided blacklist and whitelists.
"""
all_slaves = get_slaves()
return filter_mesos_slaves_by_blacklist(all_slaves, blacklist, whitelist) |
A wrapper function to get all slaves and filter according to
provided blacklist and whitelist.
:param blacklist: a blacklist, used to filter mesos slaves by attribute
:param whitelist: a whitelist, used to filter mesos slaves by attribute
:returns: a list of mesos slave objects, filtered by those which are acceptable
according to the provided blacklist and whitelists.
| get_all_slaves_for_blacklist_whitelist | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_mesos_slaves_grouped_by_attribute(slaves, attribute):
"""Returns a dictionary of unique values and the corresponding hosts for a given Mesos attribute
:param slaves: a list of mesos slaves to group
:param attribute: an attribute to filter
:returns: a dictionary of the form {'<attribute_value>': [<list of hosts with attribute=attribute_value>]}
(response can contain multiple 'attribute_value)
"""
sorted_slaves = sorted(
slaves,
key=lambda slave: (
slave["attributes"].get(attribute) is None,
slave["attributes"].get(attribute),
),
)
return {
key: list(group)
for key, group in itertools.groupby(
sorted_slaves, key=lambda slave: slave["attributes"].get(attribute)
)
if key
} | Returns a dictionary of unique values and the corresponding hosts for a given Mesos attribute
:param slaves: a list of mesos slaves to group
:param attribute: an attribute to filter
:returns: a dictionary of the form {'<attribute_value>': [<list of hosts with attribute=attribute_value>]}
(response can contain multiple 'attribute_value)
| get_mesos_slaves_grouped_by_attribute | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def filter_mesos_slaves_by_blacklist(
slaves, blacklist: DeployBlacklist, whitelist: DeployWhitelist
):
"""Takes an input list of slaves and filters them based on the given blacklist.
The blacklist is in the form of:
[["location_type", "location]]
Where the list inside is something like ["region", "uswest1-prod"]
:returns: The list of mesos slaves after the filter
"""
filtered_slaves = []
for slave in slaves:
if host_passes_blacklist(
slave["attributes"], blacklist
) and host_passes_whitelist(slave["attributes"], whitelist):
filtered_slaves.append(slave)
return filtered_slaves | Takes an input list of slaves and filters them based on the given blacklist.
The blacklist is in the form of:
[["location_type", "location]]
Where the list inside is something like ["region", "uswest1-prod"]
:returns: The list of mesos slaves after the filter
| filter_mesos_slaves_by_blacklist | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
async def get_mesos_task_count_by_slave(
mesos_state: MesosState,
slaves_list: Sequence[Dict] = None,
pool: Optional[str] = None,
) -> List[Dict]:
"""Get counts of running tasks per mesos slave.
:param mesos_state: mesos state dict
:param slaves_list: a list of slave dicts to count running tasks for.
:param pool: pool of slaves to return (None means all)
:returns: list of slave dicts {'task_count': SlaveTaskCount}
"""
all_mesos_tasks = await get_all_running_tasks() # empty string = all app ids
slaves = {
slave["id"]: {"count": 0, "slave": slave}
for slave in mesos_state.get("slaves", [])
}
for task in all_mesos_tasks:
try:
task_slave = await task.slave()
if task_slave["id"] not in slaves:
log.debug("Slave {} not found for task".format(task_slave["id"]))
continue
else:
slaves[task_slave["id"]]["count"] += 1
task_framework = await task.framework()
log.debug(f"Task framework: {task_framework.name}")
except SlaveDoesNotExist:
log.debug(
"Tried to get mesos slaves for task {}, but none existed.".format(
task["id"]
)
)
continue
if slaves_list:
for slave in slaves_list:
slave["task_counts"] = SlaveTaskCount(
**slaves[slave["task_counts"].slave["id"]]
)
slaves_with_counts = list(slaves_list)
elif pool:
slaves_with_counts = [
{"task_counts": SlaveTaskCount(**slave_counts)}
for slave_counts in slaves.values()
if slave_counts["slave"]["attributes"].get("pool", "default") == pool
]
else:
slaves_with_counts = [
{"task_counts": SlaveTaskCount(**slave_counts)}
for slave_counts in slaves.values()
]
for slave in slaves_with_counts:
log.debug(
"Slave: {}, running {} tasks".format(
slave["task_counts"].slave["hostname"],
slave["task_counts"].count,
)
)
return slaves_with_counts | Get counts of running tasks per mesos slave.
:param mesos_state: mesos state dict
:param slaves_list: a list of slave dicts to count running tasks for.
:param pool: pool of slaves to return (None means all)
:returns: list of slave dicts {'task_count': SlaveTaskCount}
| get_mesos_task_count_by_slave | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_count_running_tasks_on_slave(hostname: str) -> int:
"""Return the number of tasks running on a particular slave
or 0 if the slave is not found.
:param hostname: hostname of the slave
:returns: integer count of mesos tasks"""
mesos_state = a_sync.block(get_mesos_master().state_summary)
task_counts = a_sync.block(get_mesos_task_count_by_slave, mesos_state)
counts = [
slave["task_counts"].count
for slave in task_counts
if slave["task_counts"].slave["hostname"] == hostname
]
if counts:
return counts[0]
else:
return 0 | Return the number of tasks running on a particular slave
or 0 if the slave is not found.
:param hostname: hostname of the slave
:returns: integer count of mesos tasks | get_count_running_tasks_on_slave | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def mesos_services_running_here(
framework_filter, parse_service_instance_from_executor_id, hostname=None
):
"""See what paasta_native services are being run by a mesos-slave on this host.
:param framework_filter: a function that returns true if we should consider a given framework.
:param parse_service_instance_from_executor_id: A function that returns a tuple of (service, instance) from the
executor ID.
:param hostname: Hostname to fetch mesos slave state from. See get_local_slave_state.
:returns: A list of triples of (service, instance, port)"""
slave_state = get_local_slave_state(hostname=hostname)
frameworks = [
fw for fw in slave_state.get("frameworks", []) if framework_filter(fw)
]
executors = [
ex
for fw in frameworks
for ex in fw.get("executors", [])
if "TASK_RUNNING" in [t["state"] for t in ex.get("tasks", [])]
]
srv_list = []
for executor in executors:
try:
srv_name, srv_instance = parse_service_instance_from_executor_id(
executor["id"]
)
except ValueError:
log.error(
"Failed to decode paasta service instance from {}".format(
executor["id"]
)
)
continue
if "ports" in executor["resources"]:
srv_port = int(re.findall("[0-9]+", executor["resources"]["ports"])[0])
else:
srv_port = None
srv_list.append((srv_name, srv_instance, srv_port))
return srv_list | See what paasta_native services are being run by a mesos-slave on this host.
:param framework_filter: a function that returns true if we should consider a given framework.
:param parse_service_instance_from_executor_id: A function that returns a tuple of (service, instance) from the
executor ID.
:param hostname: Hostname to fetch mesos slave state from. See get_local_slave_state.
:returns: A list of triples of (service, instance, port) | mesos_services_running_here | python | Yelp/paasta | paasta_tools/mesos_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py | Apache-2.0 |
def get_sensu_team_data(team):
"""Takes a team and returns the dictionary of Sensu configuration
settings for that team. The data is in this format:
https://github.com/Yelp/sensu_handlers#teams
Returns an empty dictionary if there is nothing to return.
Not all teams specify all the different types of configuration settings.
for example, a team may not specify a `notification_email`. It is up
to the caller of this function to handle that case.
"""
global_team_data = _load_sensu_team_data()["team_data"]
return global_team_data.get(team, {}) | Takes a team and returns the dictionary of Sensu configuration
settings for that team. The data is in this format:
https://github.com/Yelp/sensu_handlers#teams
Returns an empty dictionary if there is nothing to return.
Not all teams specify all the different types of configuration settings.
for example, a team may not specify a `notification_email`. It is up
to the caller of this function to handle that case.
| get_sensu_team_data | python | Yelp/paasta | paasta_tools/monitoring_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/monitoring_tools.py | Apache-2.0 |
def read_monitoring_config(service, soa_dir=DEFAULT_SOA_DIR):
"""Read a service's monitoring.yaml file.
:param service: The service name
:param soa_dir: THe SOA configuration directory to read from
:returns: A dictionary of whatever was in soa_dir/name/monitoring.yaml"""
rootdir = os.path.abspath(soa_dir)
monitoring_file = os.path.join(rootdir, service, "monitoring.yaml")
monitor_conf = service_configuration_lib.read_monitoring(monitoring_file)
return monitor_conf | Read a service's monitoring.yaml file.
:param service: The service name
:param soa_dir: THe SOA configuration directory to read from
:returns: A dictionary of whatever was in soa_dir/name/monitoring.yaml | read_monitoring_config | python | Yelp/paasta | paasta_tools/monitoring_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/monitoring_tools.py | Apache-2.0 |
def list_teams():
"""Loads team data from the system. Returns a set of team names (or empty
set).
"""
team_data = _load_sensu_team_data()
teams = set(team_data.get("team_data", {}).keys())
return teams | Loads team data from the system. Returns a set of team names (or empty
set).
| list_teams | python | Yelp/paasta | paasta_tools/monitoring_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/monitoring_tools.py | Apache-2.0 |
def check_under_replication(
instance_config: LongRunningServiceConfig,
expected_count: int,
num_available: int,
sub_component: Optional[str] = None,
) -> Tuple[bool, str, str]:
"""Check if a component/sub_component is under-replicated and returns both the result of the check in the form of a
boolean and a human-readable text to be used in logging or monitoring events.
"""
crit_threshold = instance_config.get_replication_crit_percentage()
# Keep output short, with rest of context in description. This is because
# by default, Slack-Sensu messages have a 400 char limit, incl. the output.
# If it is too long, the runbook and tip won't show up.
if sub_component is not None:
output = ("{} has {}/{} replicas of {} available (threshold: {}%)").format(
instance_config.job_id,
num_available,
expected_count,
sub_component,
crit_threshold,
)
else:
output = ("{} has {}/{} replicas available (threshold: {}%)").format(
instance_config.job_id, num_available, expected_count, crit_threshold
)
under_replicated, _ = is_under_replicated(
num_available, expected_count, crit_threshold
)
if under_replicated:
description = (
"This replication alert means that PaaSTA can't keep the\n"
"requested number of replicas up and healthy in the cluster for "
"the instance {service}.{instance}.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply be unhealthy. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Increase the instance count\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s {service} -i {instance} -c {cluster} -vv\n"
).format(
service=instance_config.service,
instance=instance_config.instance,
cluster=instance_config.cluster,
)
else:
description = (
"{} is well-replicated because it has over {}% of its "
"expected replicas up."
).format(instance_config.job_id, crit_threshold)
return under_replicated, output, description | Check if a component/sub_component is under-replicated and returns both the result of the check in the form of a
boolean and a human-readable text to be used in logging or monitoring events.
| check_under_replication | python | Yelp/paasta | paasta_tools/monitoring_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/monitoring_tools.py | Apache-2.0 |
def load_monkrelaycluster_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> MonkRelayClusterDeploymentConfig:
"""Read a service instance's configuration for MonkRelayCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "monkrelays", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = MonkRelayClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return MonkRelayClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
) | Read a service instance's configuration for MonkRelayCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_monkrelaycluster_instance_config | python | Yelp/paasta | paasta_tools/monkrelaycluster_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/monkrelaycluster_tools.py | Apache-2.0 |
def load_nrtsearchserviceeks_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> NrtsearchServiceEksDeploymentConfig:
"""Read a service instance's configuration for Nrtsearch.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "nrtsearchserviceeks", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = NrtsearchServiceEksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return NrtsearchServiceEksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
) | Read a service instance's configuration for Nrtsearch.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_nrtsearchserviceeks_instance_config | python | Yelp/paasta | paasta_tools/nrtsearchserviceeks_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/nrtsearchserviceeks_tools.py | Apache-2.0 |
def load_nrtsearchservice_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> NrtsearchServiceDeploymentConfig:
"""Read a service instance's configuration for Nrtsearch.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "nrtsearchservice", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = NrtsearchServiceDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return NrtsearchServiceDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
) | Read a service instance's configuration for Nrtsearch.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_nrtsearchservice_instance_config | python | Yelp/paasta | paasta_tools/nrtsearchservice_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/nrtsearchservice_tools.py | Apache-2.0 |
def log_to_paasta(log_line):
"""Add the event to the standard PaaSTA logging backend."""
line = "oom-killer killed {} on {} (container_id: {}).".format(
"a %s process" % log_line.process_name
if log_line.process_name
else "a process",
log_line.hostname,
log_line.container_id,
)
_log(
service=log_line.service,
instance=log_line.instance,
component="oom",
cluster=log_line.cluster,
level=DEFAULT_LOGLEVEL,
line=line,
) | Add the event to the standard PaaSTA logging backend. | log_to_paasta | python | Yelp/paasta | paasta_tools/oom_logger.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/oom_logger.py | Apache-2.0 |
def clusters(self) -> Iterable[str]:
"""Returns an iterator that yields cluster names for the service.
:returns: iterator that yields cluster names.
"""
if self._clusters is None:
self._clusters = list_clusters(service=self._service, soa_dir=self._soa_dir)
for cluster in self._clusters:
yield cluster | Returns an iterator that yields cluster names for the service.
:returns: iterator that yields cluster names.
| clusters | python | Yelp/paasta | paasta_tools/paasta_service_config_loader.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/paasta_service_config_loader.py | Apache-2.0 |
def instances(
self, cluster: str, instance_type_class: Type[InstanceConfig_T]
) -> Iterable[str]:
"""Returns an iterator that yields instance names as strings.
:param cluster: The cluster name
:param instance_type_class: a subclass of InstanceConfig
:returns: an iterator that yields instance names
"""
if (cluster, instance_type_class) not in self._framework_configs:
self._refresh_framework_config(cluster, instance_type_class)
for instance in self._framework_configs.get((cluster, instance_type_class), []):
yield instance | Returns an iterator that yields instance names as strings.
:param cluster: The cluster name
:param instance_type_class: a subclass of InstanceConfig
:returns: an iterator that yields instance names
| instances | python | Yelp/paasta | paasta_tools/paasta_service_config_loader.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/paasta_service_config_loader.py | Apache-2.0 |
def instance_configs(
self, cluster: str, instance_type_class: Type[InstanceConfig_T]
) -> Iterable[InstanceConfig_T]:
"""Returns an iterator that yields InstanceConfig objects.
:param cluster: The cluster name
:param instance_type_class: a subclass of InstanceConfig
:returns: an iterator that yields instances of KubernetesDeploymentConfig, etc.
:raises NotImplementedError: when it doesn't know how to create a config for instance_type_class
"""
if (cluster, instance_type_class) not in self._framework_configs:
self._refresh_framework_config(cluster, instance_type_class)
for instance, config in self._framework_configs.get(
(cluster, instance_type_class), {}
).items():
try:
yield self._create_service_config(
cluster, instance, config, instance_type_class
)
except NoDeploymentsAvailable:
pass | Returns an iterator that yields InstanceConfig objects.
:param cluster: The cluster name
:param instance_type_class: a subclass of InstanceConfig
:returns: an iterator that yields instances of KubernetesDeploymentConfig, etc.
:raises NotImplementedError: when it doesn't know how to create a config for instance_type_class
| instance_configs | python | Yelp/paasta | paasta_tools/paasta_service_config_loader.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/paasta_service_config_loader.py | Apache-2.0 |
def _create_service_config(
self,
cluster: str,
instance: str,
config: utils.InstanceConfigDict,
config_class: Type[InstanceConfig_T],
) -> InstanceConfig_T:
"""Create a service instance's configuration for kubernetes.
:param cluster: The cluster to read the configuration for
:param instance: The instance of the service to retrieve
:param config: the framework instance config.
:returns: An instance of config_class
"""
merged_config = self._get_merged_config(config)
temp_instance_config = config_class(
service=self._service,
cluster=cluster,
instance=instance,
config_dict=merged_config,
branch_dict=None,
soa_dir=self._soa_dir,
)
branch_dict = self._get_branch_dict(cluster, instance, temp_instance_config)
return config_class(
service=self._service,
cluster=cluster,
instance=instance,
config_dict=merged_config,
branch_dict=branch_dict,
soa_dir=self._soa_dir,
) | Create a service instance's configuration for kubernetes.
:param cluster: The cluster to read the configuration for
:param instance: The instance of the service to retrieve
:param config: the framework instance config.
:returns: An instance of config_class
| _create_service_config | python | Yelp/paasta | paasta_tools/paasta_service_config_loader.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/paasta_service_config_loader.py | Apache-2.0 |
def _make_determine_wants_func(ref_mutator):
"""Returns a safer version of ref_mutator, suitable for passing as the
determine_wants argument to dulwich's send_pack method. The returned
function will not delete or modify any existing refs."""
def determine_wants(old_refs):
refs = {k.decode("UTF-8"): v.decode("UTF-8") for k, v in old_refs.items()}
new_refs = ref_mutator(refs)
new_refs = {k.encode("UTF-8"): v.encode("UTF-8") for k, v in new_refs.items()}
new_refs.update(old_refs) # Make sure we don't delete/modify anything.
return new_refs
return determine_wants | Returns a safer version of ref_mutator, suitable for passing as the
determine_wants argument to dulwich's send_pack method. The returned
function will not delete or modify any existing refs. | _make_determine_wants_func | python | Yelp/paasta | paasta_tools/remote_git.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py | Apache-2.0 |
def make_force_push_mutate_refs_func(targets, sha):
"""Create a 'force push' function that will inform send_pack that we want
to mark a certain list of target branches/tags to point to a particular
git_sha.
:param targets: List of branches/tags to point at the input sha
:param sha: The git sha to point the branches/tags at
:returns: A function to do the ref manipulation that a dulwich client can use"""
def mutate_refs(refs):
for target in targets:
refs[target.encode("UTF-8")] = sha.encode("UTF-8")
return refs
return mutate_refs | Create a 'force push' function that will inform send_pack that we want
to mark a certain list of target branches/tags to point to a particular
git_sha.
:param targets: List of branches/tags to point at the input sha
:param sha: The git sha to point the branches/tags at
:returns: A function to do the ref manipulation that a dulwich client can use | make_force_push_mutate_refs_func | python | Yelp/paasta | paasta_tools/remote_git.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py | Apache-2.0 |
def create_remote_refs(git_url, ref_mutator, force=False):
"""Creates refs (tags, branches) on a remote git repo.
:param git_url: the URL or path to the remote git repo.
:param ref_mutator: A function that determines the new refs to create on
the remote repo. This gets passed a dictionary of the
remote server's refs in the format {name : hash, ...},
and should return a dictionary of the same format.
:param force: Bool, defaults to false. If true we will overwrite
refs even if they are already set.
:returns: The map of refs, with our changes applied.
"""
client, path = dulwich.client.get_transport_and_path(git_url)
if force is False:
determine_wants = _make_determine_wants_func(ref_mutator)
else:
determine_wants = ref_mutator
# We know we don't need to push any objects.
def generate_pack_contents(have, want):
return []
return client.send_pack(path, determine_wants, generate_pack_contents) | Creates refs (tags, branches) on a remote git repo.
:param git_url: the URL or path to the remote git repo.
:param ref_mutator: A function that determines the new refs to create on
the remote repo. This gets passed a dictionary of the
remote server's refs in the format {name : hash, ...},
and should return a dictionary of the same format.
:param force: Bool, defaults to false. If true we will overwrite
refs even if they are already set.
:returns: The map of refs, with our changes applied.
| create_remote_refs | python | Yelp/paasta | paasta_tools/remote_git.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py | Apache-2.0 |
def list_remote_refs(git_url):
"""Get the refs from a remote git repo as a dictionary of name->hash."""
client, path = dulwich.client.get_transport_and_path(git_url)
try:
refs = client.fetch_pack(path, lambda refs: [], None, lambda data: None)
return {k.decode("UTF-8"): v.decode("UTF-8") for k, v in refs.items()}
except dulwich.errors.HangupException as e:
raise LSRemoteException(f"Unable to fetch remote refs from {git_url}: {e}") | Get the refs from a remote git repo as a dictionary of name->hash. | list_remote_refs | python | Yelp/paasta | paasta_tools/remote_git.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py | Apache-2.0 |
def get_authors(git_url, from_sha, to_sha):
"""Gets the list of authors who contributed to a git changeset.
Currently only supports fetching this in a very "yelpy" way by
executing a gitolite command"""
matches = re.match("(?P<git_server>.*):(?P<git_repo>.*)", git_url)
if matches is None:
return (1, f"could not understand the git url {git_url} for authors detection")
git_server = matches.group("git_server")
git_repo = matches.group("git_repo")
if git_server is None:
return (
1,
f"could not understand the git server in {git_url} for authors detection",
)
if git_repo is None:
return (
1,
f"could not understand the git repo in {git_url} for authors detection",
)
if "git.yelpcorp.com" in git_server:
ssh_command = (
f"ssh {git_server} authors-of-changeset {git_repo} {from_sha} {to_sha}"
)
return _run(command=ssh_command, timeout=5.0)
else:
# TODO: PAASTA-16927: support getting authors for services on GHE
return 1, f"Fetching authors not supported for {git_server}" | Gets the list of authors who contributed to a git changeset.
Currently only supports fetching this in a very "yelpy" way by
executing a gitolite command | get_authors | python | Yelp/paasta | paasta_tools/remote_git.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py | Apache-2.0 |
def is_shared_secret_from_secret_name(soa_dir: str, secret_name: str) -> bool:
"""Alternative way of figuring if a secret is shared, directly from the secret_name."""
secret_path = os.path.join(
soa_dir, SHARED_SECRET_SERVICE, "secrets", f"{secret_name}.json"
)
return os.path.isfile(secret_path) | Alternative way of figuring if a secret is shared, directly from the secret_name. | is_shared_secret_from_secret_name | python | Yelp/paasta | paasta_tools/secret_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/secret_tools.py | Apache-2.0 |
def get_hpa_overrides(kube_client: KubeClient) -> Dict[str, Dict[str, HpaOverride]]:
"""
Load autoscaling overrides from the ConfigMap once.
This function reads the "paasta-autoscaling-overrides" ConfigMap in the "paasta" namespace
and extracts all valid (non-expired) overrides to return a dictionary mapping
service.instance pairs to override data (currently, just min_instances and when the
override should expire by).
The incoming ConfigMap is expected to have the following format:
{
$SERVICE_A.$INSTANCE_A: {
"min_instances": 2,
"expire_after": "2023-10-01T00:00:00Z"
},
$SERVICE_A.$INSTANCE_B: {
"min_instances": 3,
"expire_after": "2023-10-01T00:00:00Z"
},
...
},
$SERVICE_B.$INSTANCE_A: {
"min_instances": 1,
"expire_after": "2023-10-01T00:00:00Z"
},
$SERVICE_B.$INSTANCE_B: {
"min_instances": 2,
"expire_after": "2023-10-01T00:00:00Z"
},
...
}
"""
overrides: Dict[str, Dict[str, HpaOverride]] = {}
try:
configmap = get_namespaced_configmap(
name=AUTOSCALING_OVERRIDES_CONFIGMAP_NAME,
namespace=AUTOSCALING_OVERRIDES_CONFIGMAP_NAMESPACE,
kube_client=kube_client,
)
if configmap and configmap.data:
current_time = time.time()
for service_instance, override_json in configmap.data.items():
try:
service, instance = service_instance.split(".")
override_metadata = json.loads(override_json)
expire_after = override_metadata.get("expire_after")
min_instances = override_metadata.get("min_instances")
if expire_after and min_instances:
if current_time < expire_after:
if service not in overrides:
overrides[service] = {}
overrides[service][instance] = {
"min_instances": min_instances,
"expire_after": expire_after,
}
log.info(
f"Found valid HPA override for {service}: "
f"{override_metadata.get('min_instances')} (expires at {expire_after})"
)
else:
log.info(
f"Ignoring expired HPA override for {service}.{instance}"
f"(expired at {expire_after})"
)
else:
log.warning(
f"Invalid HPA override for {service}.{instance}: "
f"missing 'min_instances' or 'expire_after': {override_metadata}"
)
except Exception:
log.exception(
f"Error parsing override for {service} - proceeding without overrides for this service."
)
except Exception:
# If ConfigMap doesn't exist or there's an error, just return empty dict
log.exception(
f"Unable to load the {AUTOSCALING_OVERRIDES_CONFIGMAP_NAME} ConfigMap - proceeding without overrides"
)
return overrides |
Load autoscaling overrides from the ConfigMap once.
This function reads the "paasta-autoscaling-overrides" ConfigMap in the "paasta" namespace
and extracts all valid (non-expired) overrides to return a dictionary mapping
service.instance pairs to override data (currently, just min_instances and when the
override should expire by).
The incoming ConfigMap is expected to have the following format:
{
$SERVICE_A.$INSTANCE_A: {
"min_instances": 2,
"expire_after": "2023-10-01T00:00:00Z"
},
$SERVICE_A.$INSTANCE_B: {
"min_instances": 3,
"expire_after": "2023-10-01T00:00:00Z"
},
...
},
$SERVICE_B.$INSTANCE_A: {
"min_instances": 1,
"expire_after": "2023-10-01T00:00:00Z"
},
$SERVICE_B.$INSTANCE_B: {
"min_instances": 2,
"expire_after": "2023-10-01T00:00:00Z"
},
...
}
| get_hpa_overrides | python | Yelp/paasta | paasta_tools/setup_kubernetes_job.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_kubernetes_job.py | Apache-2.0 |
def _minify_promql(query: str) -> str:
"""
Given a PromQL query, return the same query with most whitespace collapsed.
This is useful for allowing us to nicely format queries in code, but minimize the size of our
queries when they're actually sent to Prometheus by the adapter.
"""
trimmed_query = []
# while we could potentially do some regex magic, we want to ensure
# that we don't mess up any labels (even though they really shouldn't
# have any whitespace in them in the first place) - thus we just just
# strip any leading/trailing whitespace and leave everything else alone
for line in query.split("\n"):
trimmed_query.append(line.strip())
return (" ".join(trimmed_query)).strip() |
Given a PromQL query, return the same query with most whitespace collapsed.
This is useful for allowing us to nicely format queries in code, but minimize the size of our
queries when they're actually sent to Prometheus by the adapter.
| _minify_promql | python | Yelp/paasta | paasta_tools/setup_prometheus_adapter_config.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py | Apache-2.0 |
def create_instance_active_requests_scaling_rule(
service: str,
instance_config: KubernetesDeploymentConfig,
metrics_provider_config: MetricsProviderDict,
paasta_cluster: str,
) -> PrometheusAdapterRule:
"""
Creates a Prometheus adapter rule config for a given service instance.
"""
instance = instance_config.instance
namespace = instance_config.get_namespace()
desired_active_requests_per_replica = metrics_provider_config.get(
"desired_active_requests_per_replica",
DEFAULT_DESIRED_ACTIVE_REQUESTS_PER_REPLICA,
)
moving_average_window = metrics_provider_config.get(
"moving_average_window_seconds",
DEFAULT_ACTIVE_REQUESTS_AUTOSCALING_MOVING_AVERAGE_WINDOW,
)
deployment_name = get_kubernetes_app_name(service=service, instance=instance)
# In order for autoscaling to work safely while a service migrates from one namespace to another, the HPA needs to
# make sure that the deployment in the new namespace is scaled up enough to handle _all_ the load.
# This is because once the new deployment is 100% healthy, cleanup_kubernetes_job will delete the deployment out of
# the old namespace all at once, suddenly putting all the load onto the deployment in the new namespace.
# To ensure this, we must:
# - DO NOT filter on namespace in worker_filter_terms (which is used when calculating desired_instances).
# - DO filter on namespace in replica_filter_terms (which is used to calculate current_replicas).
# This makes sure that desired_instances includes load from all namespaces, but that the scaling ratio calculated
# by (desired_instances / current_replicas) is meaningful for each namespace.
worker_filter_terms = f"paasta_cluster='{paasta_cluster}',paasta_service='{service}',paasta_instance='{instance}'"
replica_filter_terms = f"paasta_cluster='{paasta_cluster}',deployment='{deployment_name}',namespace='{namespace}'"
current_replicas = f"""
sum(
label_join(
(
kube_deployment_spec_replicas{{{replica_filter_terms}}} >= 0
or
max_over_time(
kube_deployment_spec_replicas{{{replica_filter_terms}}}[{DEFAULT_EXTRAPOLATION_TIME}s]
)
),
"kube_deployment", "", "deployment"
)
) by (kube_deployment)
"""
# Envoy tracks metrics at the smartstack namespace level. In most cases the paasta instance name matches the smartstack namespace.
# In rare cases, there are custom registration added to instance configs.
# If there is no custom registration the envoy and instance names match and no need to update the worker_filter_terms.
# If there is a single custom registration for an instance, we will process the registration value and extract the value to be used.
# The registrations usually follow the format of {service_name}.{smartstack_name}. Hence we split the string by dot and extract the last token.
# More than one custom registrations are not supported and config validation takes care of rejecting such configs.
registrations = instance_config.get_registrations()
mesh_instance = registrations[0].split(".")[-1] if len(registrations) == 1 else None
envoy_filter_terms = f"paasta_cluster='{paasta_cluster}',paasta_service='{service}',paasta_instance='{mesh_instance or instance}'"
# envoy-based metrics have no labels corresponding to the k8s resources that they
# front, but we can trivially add one in since our deployment names are of the form
# {service_name}-{instance_name} - which are both things in `worker_filter_terms` so
# it's safe to unconditionally add.
# This is necessary as otherwise the HPA/prometheus adapter does not know what these
# metrics are for.
total_load = f"""
(
sum(
label_replace(
paasta_instance:envoy_cluster__egress_cluster_upstream_rq_active{{{envoy_filter_terms}}},
"kube_deployment", "{deployment_name}", "", ""
)
) by (kube_deployment)
)
"""
desired_instances_at_each_point_in_time = f"""
{total_load} / {desired_active_requests_per_replica}
"""
desired_instances = f"""
avg_over_time(
(
{desired_instances_at_each_point_in_time}
)[{moving_average_window}s:]
)
"""
# The prometheus HPA adapter needs kube_deployment and kube_namespace labels attached to the metrics its scaling on.
# The envoy-based metrics have no labels corresponding to the k8s resources, so we can add them in.
metrics_query = f"""
label_replace(
label_replace(
{desired_instances} / {current_replicas},
"kube_deployment", "{deployment_name}", "", ""
),
"kube_namespace", "{namespace}", "", ""
)
"""
series_query = f"""
k8s:deployment:pods_status_ready{{{worker_filter_terms}}}
"""
metric_name = f"{deployment_name}-active-requests-prom"
return {
"name": {"as": metric_name},
"seriesQuery": _minify_promql(series_query),
"resources": {"template": "kube_<<.Resource>>"},
"metricsQuery": _minify_promql(metrics_query),
} |
Creates a Prometheus adapter rule config for a given service instance.
| create_instance_active_requests_scaling_rule | python | Yelp/paasta | paasta_tools/setup_prometheus_adapter_config.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py | Apache-2.0 |
def create_instance_uwsgi_scaling_rule(
service: str,
instance_config: KubernetesDeploymentConfig,
metrics_provider_config: MetricsProviderDict,
paasta_cluster: str,
) -> PrometheusAdapterRule:
"""
Creates a Prometheus adapter rule config for a given service instance.
"""
instance = instance_config.instance
namespace = instance_config.get_namespace()
setpoint = metrics_provider_config["setpoint"]
moving_average_window = metrics_provider_config.get(
"moving_average_window_seconds", DEFAULT_UWSGI_AUTOSCALING_MOVING_AVERAGE_WINDOW
)
deployment_name = get_kubernetes_app_name(service=service, instance=instance)
# In order for autoscaling to work safely while a service migrates from one namespace to another, the HPA needs to
# make sure that the deployment in the new namespace is scaled up enough to handle _all_ the load.
# This is because once the new deployment is 100% healthy, cleanup_kubernetes_job will delete the deployment out of
# the old namespace all at once, suddenly putting all the load onto the deployment in the new namespace.
# To ensure this, we must:
# - DO NOT filter on namespace in worker_filter_terms (which is used when calculating desired_instances).
# - DO filter on namespace in replica_filter_terms (which is used to calculate current_replicas).
# This makes sure that desired_instances includes load from all namespaces, but that the scaling ratio calculated
# by (desired_instances / current_replicas) is meaningful for each namespace.
worker_filter_terms = f"paasta_cluster='{paasta_cluster}',paasta_service='{service}',paasta_instance='{instance}'"
replica_filter_terms = f"paasta_cluster='{paasta_cluster}',kube_deployment='{deployment_name}',namespace='{namespace}'"
# k8s:deployment:pods_status_ready is a metric created by summing kube_pod_status_ready
# over paasta service/instance/cluster. it counts the number of ready pods in a paasta
# deployment.
ready_pods = f"""
(sum(
k8s:deployment:pods_status_ready{{{worker_filter_terms}}} >= 0
or
max_over_time(
k8s:deployment:pods_status_ready{{{worker_filter_terms}}}[{DEFAULT_EXTRAPOLATION_TIME}s]
)
) by (kube_deployment))
"""
# as mentioned above: we want to get the overload by counting load across namespces - but we need
# to divide by the ready pods in the target namespace - which is done by using a namespace filter here
ready_pods_namespaced = f"""
(sum(
k8s:deployment:pods_status_ready{{{replica_filter_terms}}} >= 0
or
max_over_time(
k8s:deployment:pods_status_ready{{{replica_filter_terms}}}[{DEFAULT_EXTRAPOLATION_TIME}s]
)
) by (kube_deployment))
"""
load_per_instance = f"""
avg(
uwsgi_worker_busy{{{worker_filter_terms}}}
) by (kube_pod, kube_deployment)
"""
missing_instances = f"""
clamp_min(
{ready_pods} - count({load_per_instance}) by (kube_deployment),
0
)
"""
total_load = f"""
(
sum(
{load_per_instance}
) by (kube_deployment)
+
{missing_instances}
)
"""
desired_instances_at_each_point_in_time = f"""
{total_load} / {setpoint}
"""
desired_instances = f"""
avg_over_time(
(
{desired_instances_at_each_point_in_time}
)[{moving_average_window}s:]
)
"""
# our Prometheus query is calculating a desired number of replicas, and then k8s wants that expressed as an average utilization
# so as long as we divide by the number that k8s ends up multiplying by, we should be able to convince k8s to run any arbitrary
# number of replicas.
# k8s happens to multiply by the # of ready pods - so we divide by that rather than by the amount of current replicas (which may
# include non-ready pods)
# ref: https://github.com/kubernetes/kubernetes/blob/7ec1a89a509906dad9fd6a4635d7bfc157b47790/pkg/controller/podautoscaler/replica_calculator.go#L278
metrics_query = f"""
{desired_instances} / {ready_pods_namespaced}
"""
metric_name = f"{deployment_name}-uwsgi-prom"
return {
"name": {"as": metric_name},
"seriesQuery": f"uwsgi_worker_busy{{{worker_filter_terms}}}",
"resources": {"template": "kube_<<.Resource>>"},
"metricsQuery": _minify_promql(metrics_query),
} |
Creates a Prometheus adapter rule config for a given service instance.
| create_instance_uwsgi_scaling_rule | python | Yelp/paasta | paasta_tools/setup_prometheus_adapter_config.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py | Apache-2.0 |
def create_instance_uwsgi_v2_scaling_rule(
service: str,
instance_config: KubernetesDeploymentConfig,
metrics_provider_config: MetricsProviderDict,
paasta_cluster: str,
) -> PrometheusAdapterRule:
"""
Creates a Prometheus adapter rule config for a given service instance.
"""
instance = instance_config.instance
moving_average_window = metrics_provider_config.get(
"moving_average_window_seconds", DEFAULT_UWSGI_AUTOSCALING_MOVING_AVERAGE_WINDOW
)
deployment_name = get_kubernetes_app_name(service=service, instance=instance)
# In order for autoscaling to work safely while a service migrates from one namespace to another, the HPA needs to
# make sure that the deployment in the new namespace is scaled up enough to handle _all_ the load.
# This is because once the new deployment is 100% healthy, cleanup_kubernetes_job will delete the deployment out of
# the old namespace all at once, suddenly putting all the load onto the deployment in the new namespace.
# To ensure this, we must NOT filter on namespace in worker_filter_terms (which is used when calculating total_load.
# This makes sure that desired_instances includes load from all namespaces.
worker_filter_terms = f"paasta_cluster='{paasta_cluster}',paasta_service='{service}',paasta_instance='{instance}'"
# k8s:deployment:pods_status_ready is a metric created by summing kube_pod_status_ready
# over paasta service/instance/cluster. it counts the number of ready pods in a paasta
# deployment.
ready_pods = f"""
(sum(
k8s:deployment:pods_status_ready{{{worker_filter_terms}}} >= 0
or
max_over_time(
k8s:deployment:pods_status_ready{{{worker_filter_terms}}}[{DEFAULT_EXTRAPOLATION_TIME}s]
)
) by (kube_deployment))
"""
load_per_instance = f"""
avg(
uwsgi_worker_busy{{{worker_filter_terms}}}
) by (kube_pod, kube_deployment)
"""
missing_instances = f"""
clamp_min(
{ready_pods} - count({load_per_instance}) by (kube_deployment),
0
)
"""
total_load = f"""
(
sum(
{load_per_instance}
) by (kube_deployment)
+
{missing_instances}
)
"""
total_load_smoothed = f"""
avg_over_time(
(
{total_load}
)[{moving_average_window}s:]
)
"""
metric_name = f"{deployment_name}-uwsgi-v2-prom"
return {
"name": {"as": metric_name},
"seriesQuery": f"uwsgi_worker_busy{{{worker_filter_terms}}}",
"resources": {"template": "kube_<<.Resource>>"},
"metricsQuery": _minify_promql(total_load_smoothed),
} |
Creates a Prometheus adapter rule config for a given service instance.
| create_instance_uwsgi_v2_scaling_rule | python | Yelp/paasta | paasta_tools/setup_prometheus_adapter_config.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py | Apache-2.0 |
def create_instance_piscina_scaling_rule(
service: str,
instance_config: KubernetesDeploymentConfig,
metrics_provider_config: MetricsProviderDict,
paasta_cluster: str,
) -> PrometheusAdapterRule:
"""
Creates a Prometheus adapter rule config for a given service instance.
"""
instance = instance_config.instance
namespace = instance_config.get_namespace()
setpoint = metrics_provider_config["setpoint"]
moving_average_window = metrics_provider_config.get(
"moving_average_window_seconds",
DEFAULT_PISCINA_AUTOSCALING_MOVING_AVERAGE_WINDOW,
)
deployment_name = get_kubernetes_app_name(service=service, instance=instance)
# In order for autoscaling to work safely while a service migrates from one namespace to another, the HPA needs to
# make sure that the deployment in the new namespace is scaled up enough to handle _all_ the load.
# This is because once the new deployment is 100% healthy, cleanup_kubernetes_job will delete the deployment out of
# the old namespace all at once, suddenly putting all the load onto the deployment in the new namespace.
# To ensure this, we must:
# - DO NOT filter on namespace in worker_filter_terms (which is used when calculating desired_instances).
# - DO filter on namespace in replica_filter_terms (which is used to calculate current_replicas).
# This makes sure that desired_instances includes load from all namespaces, but that the scaling ratio calculated
# by (desired_instances / current_replicas) is meaningful for each namespace.
worker_filter_terms = f"paasta_cluster='{paasta_cluster}',paasta_service='{service}',paasta_instance='{instance}'"
replica_filter_terms = f"paasta_cluster='{paasta_cluster}',deployment='{deployment_name}',namespace='{namespace}'"
current_replicas = f"""
sum(
label_join(
(
kube_deployment_spec_replicas{{{replica_filter_terms}}} >= 0
or
max_over_time(
kube_deployment_spec_replicas{{{replica_filter_terms}}}[{DEFAULT_EXTRAPOLATION_TIME}s]
)
),
"kube_deployment", "", "deployment"
)
) by (kube_deployment)
"""
# k8s:deployment:pods_status_ready is a metric created by summing kube_pod_status_ready
# over paasta service/instance/cluster. it counts the number of ready pods in a paasta
# deployment.
ready_pods = f"""
(sum(
k8s:deployment:pods_status_ready{{{worker_filter_terms}}} >= 0
or
max_over_time(
k8s:deployment:pods_status_ready{{{worker_filter_terms}}}[{DEFAULT_EXTRAPOLATION_TIME}s]
)
) by (kube_deployment))
"""
load_per_instance = f"""
(piscina_pool_utilization{{{worker_filter_terms}}})
"""
missing_instances = f"""
clamp_min(
{ready_pods} - count({load_per_instance}) by (kube_deployment),
0
)
"""
total_load = f"""
(
sum(
{load_per_instance}
) by (kube_deployment)
+
{missing_instances}
)
"""
desired_instances_at_each_point_in_time = f"""
{total_load} / {setpoint}
"""
desired_instances = f"""
avg_over_time(
(
{desired_instances_at_each_point_in_time}
)[{moving_average_window}s:]
)
"""
metrics_query = f"""
{desired_instances} / {current_replicas}
"""
return {
"name": {"as": f"{deployment_name}-piscina-prom"},
"seriesQuery": f"piscina_pool_utilization{{{worker_filter_terms}}}",
"resources": {"template": "kube_<<.Resource>>"},
"metricsQuery": _minify_promql(metrics_query),
} |
Creates a Prometheus adapter rule config for a given service instance.
| create_instance_piscina_scaling_rule | python | Yelp/paasta | paasta_tools/setup_prometheus_adapter_config.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py | Apache-2.0 |
def create_instance_gunicorn_scaling_rule(
service: str,
instance_config: KubernetesDeploymentConfig,
metrics_provider_config: MetricsProviderDict,
paasta_cluster: str,
) -> PrometheusAdapterRule:
"""
Creates a Prometheus adapter rule config for a given service instance.
"""
instance = instance_config.instance
namespace = instance_config.get_namespace()
setpoint = metrics_provider_config["setpoint"]
moving_average_window = metrics_provider_config.get(
"moving_average_window_seconds",
DEFAULT_GUNICORN_AUTOSCALING_MOVING_AVERAGE_WINDOW,
)
deployment_name = get_kubernetes_app_name(service=service, instance=instance)
# In order for autoscaling to work safely while a service migrates from one namespace to another, the HPA needs to
# make sure that the deployment in the new namespace is scaled up enough to handle _all_ the load.
# This is because once the new deployment is 100% healthy, cleanup_kubernetes_job will delete the deployment out of
# the old namespace all at once, suddenly putting all the load onto the deployment in the new namespace.
# To ensure this, we must:
# - DO NOT filter on namespace in worker_filter_terms (which is used when calculating desired_instances).
# - DO filter on namespace in replica_filter_terms (which is used to calculate current_replicas).
# This makes sure that desired_instances includes load from all namespaces, but that the scaling ratio calculated
# by (desired_instances / current_replicas) is meaningful for each namespace.
worker_filter_terms = f"paasta_cluster='{paasta_cluster}',paasta_service='{service}',paasta_instance='{instance}'"
replica_filter_terms = f"paasta_cluster='{paasta_cluster}',deployment='{deployment_name}',namespace='{namespace}'"
current_replicas = f"""
sum(
label_join(
(
kube_deployment_spec_replicas{{{replica_filter_terms}}} >= 0
or
max_over_time(
kube_deployment_spec_replicas{{{replica_filter_terms}}}[{DEFAULT_EXTRAPOLATION_TIME}s]
)
),
"kube_deployment", "", "deployment"
)
) by (kube_deployment)
"""
# k8s:deployment:pods_status_ready is a metric created by summing kube_pod_status_ready
# over paasta service/instance/cluster. it counts the number of ready pods in a paasta
# deployment.
ready_pods = f"""
(sum(
k8s:deployment:pods_status_ready{{{worker_filter_terms}}} >= 0
or
max_over_time(
k8s:deployment:pods_status_ready{{{worker_filter_terms}}}[{DEFAULT_EXTRAPOLATION_TIME}s]
)
) by (kube_deployment))
"""
load_per_instance = f"""
avg(
gunicorn_worker_busy{{{worker_filter_terms}}}
) by (kube_pod, kube_deployment)
"""
missing_instances = f"""
clamp_min(
{ready_pods} - count({load_per_instance}) by (kube_deployment),
0
)
"""
total_load = f"""
(
sum(
{load_per_instance}
) by (kube_deployment)
+
{missing_instances}
)
"""
desired_instances_at_each_point_in_time = f"""
{total_load} / {setpoint}
"""
desired_instances = f"""
avg_over_time(
(
{desired_instances_at_each_point_in_time}
)[{moving_average_window}s:]
)
"""
metrics_query = f"""
{desired_instances} / {current_replicas}
"""
metric_name = f"{deployment_name}-gunicorn-prom"
return {
"name": {"as": metric_name},
"seriesQuery": f"gunicorn_worker_busy{{{worker_filter_terms}}}",
"resources": {"template": "kube_<<.Resource>>"},
"metricsQuery": _minify_promql(metrics_query),
} |
Creates a Prometheus adapter rule config for a given service instance.
| create_instance_gunicorn_scaling_rule | python | Yelp/paasta | paasta_tools/setup_prometheus_adapter_config.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py | Apache-2.0 |
def get_rules_for_service_instance(
service_name: str,
instance_config: KubernetesDeploymentConfig,
paasta_cluster: str,
) -> List[PrometheusAdapterRule]:
"""
Returns a list of Prometheus Adapter rules for a given service instance. For now, this
will always be a 0 or 1-element list - but when we support scaling on multiple metrics
we will return N rules for a given service instance.
"""
rules: List[PrometheusAdapterRule] = []
for metrics_provider_type in ALL_METRICS_PROVIDERS:
metrics_provider_config = instance_config.get_autoscaling_metrics_provider(
metrics_provider_type
)
if metrics_provider_config is None:
log.debug(
f"Skipping {service_name}.{instance_config.instance} - no Prometheus-based autoscaling configured for {metrics_provider_type}"
)
continue
rule = create_instance_scaling_rule(
service=service_name,
instance_config=instance_config,
metrics_provider_config=metrics_provider_config,
paasta_cluster=paasta_cluster,
)
if rule is not None:
rules.append(rule)
return rules |
Returns a list of Prometheus Adapter rules for a given service instance. For now, this
will always be a 0 or 1-element list - but when we support scaling on multiple metrics
we will return N rules for a given service instance.
| get_rules_for_service_instance | python | Yelp/paasta | paasta_tools/setup_prometheus_adapter_config.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py | Apache-2.0 |
def create_prometheus_adapter_config(
paasta_cluster: str, soa_dir: Path
) -> PrometheusAdapterConfig:
"""
Given a paasta cluster and a soaconfigs directory, create the necessary Prometheus adapter
config to autoscale services.
Currently supports the following metrics providers:
* uwsgi
"""
rules: List[PrometheusAdapterRule] = []
# get_services_for_cluster() returns a list of (service, instance) tuples, but this
# is not great for us: if we were to iterate over that we'd end up getting duplicates
# for every service as PaastaServiceConfigLoader does not expose a way to get configs
# for a single instance by name. instead, we get the unique set of service names and then
# let PaastaServiceConfigLoader iterate over instances for us later
services = {
service_name
for service_name, _ in get_services_for_cluster(
cluster=paasta_cluster, instance_type="kubernetes", soa_dir=str(soa_dir)
)
}
services.update(
{
service_name
for service_name, _ in get_services_for_cluster(
cluster=paasta_cluster, instance_type="eks", soa_dir=str(soa_dir)
)
}
)
for service_name in services:
config_loader = PaastaServiceConfigLoader(
service=service_name, soa_dir=str(soa_dir)
)
for instance_type_class in K8S_INSTANCE_TYPE_CLASSES:
for instance_config in config_loader.instance_configs(
cluster=paasta_cluster,
instance_type_class=instance_type_class,
):
rules.extend(
get_rules_for_service_instance(
service_name=service_name,
instance_config=instance_config,
paasta_cluster=paasta_cluster,
)
)
return {
# we sort our rules so that we can easily compare between two different configmaps
# as otherwise we'd need to do fancy order-independent comparisons between the two
# sets of rules later due to the fact that we're not iterating in a deterministic
# way and can add rules in any arbitrary order
"rules": sorted(rules, key=lambda rule: rule["name"]["as"]),
} |
Given a paasta cluster and a soaconfigs directory, create the necessary Prometheus adapter
config to autoscale services.
Currently supports the following metrics providers:
* uwsgi
| create_prometheus_adapter_config | python | Yelp/paasta | paasta_tools/setup_prometheus_adapter_config.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py | Apache-2.0 |
def retrieve_haproxy_csv(
synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str, scope: str
) -> Iterable[Dict[str, str]]:
"""Retrieves the haproxy csv from the haproxy web interface
:param synapse_host: A host that this check should contact for replication information.
:param synapse_port: A integer that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:param scope: scope
:returns reader: a csv.DictReader object
"""
synapse_uri = synapse_haproxy_url_format.format(
host=synapse_host, port=synapse_port, scope=scope
)
# timeout after 1 second and retry 3 times
haproxy_request = requests.Session()
haproxy_request.headers.update({"User-Agent": get_user_agent()})
haproxy_request.mount("http://", requests.adapters.HTTPAdapter(max_retries=3))
haproxy_request.mount("https://", requests.adapters.HTTPAdapter(max_retries=3))
haproxy_response = haproxy_request.get(synapse_uri, timeout=1)
haproxy_data = haproxy_response.text
reader = csv.DictReader(haproxy_data.splitlines())
return reader | Retrieves the haproxy csv from the haproxy web interface
:param synapse_host: A host that this check should contact for replication information.
:param synapse_port: A integer that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:param scope: scope
:returns reader: a csv.DictReader object
| retrieve_haproxy_csv | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def get_backends(
service: str, synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str
) -> List[HaproxyBackend]:
"""Fetches the CSV from haproxy and returns a list of backends,
regardless of their state.
:param service: If None, return backends for all services, otherwise only return backends for this particular
service.
:param synapse_host: A host that this check should contact for replication information.
:param synapse_port: A integer that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:returns backends: A list of dicts representing the backends of all
services or the requested service
"""
if service:
services = [service]
else:
services = None
return get_multiple_backends(
services,
synapse_host=synapse_host,
synapse_port=synapse_port,
synapse_haproxy_url_format=synapse_haproxy_url_format,
) | Fetches the CSV from haproxy and returns a list of backends,
regardless of their state.
:param service: If None, return backends for all services, otherwise only return backends for this particular
service.
:param synapse_host: A host that this check should contact for replication information.
:param synapse_port: A integer that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:returns backends: A list of dicts representing the backends of all
services or the requested service
| get_backends | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def get_multiple_backends(
services: Optional[Collection[str]],
synapse_host: str,
synapse_port: int,
synapse_haproxy_url_format: str,
) -> List[HaproxyBackend]:
"""Fetches the CSV from haproxy and returns a list of backends,
regardless of their state.
:param services: If None, return backends for all services, otherwise only return backends for these particular
services.
:param synapse_host: A host that this check should contact for replication information.
:param synapse_port: A integer that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:returns backends: A list of dicts representing the backends of all
services or the requested service
"""
if services is not None and len(services) == 1:
(scope,) = services
else:
# Maybe if there's like two or three services we could make two queries, or find the longest common substring.
# For now let's just hope this is rare and fetch all data.
scope = ""
reader = retrieve_haproxy_csv(
synapse_host,
synapse_port,
synapse_haproxy_url_format=synapse_haproxy_url_format,
scope=scope,
)
backends = []
for line in reader:
# clean up two irregularities of the CSV output, relative to
# DictReader's behavior there's a leading "# " for no good reason:
line["pxname"] = line.pop("# pxname")
# and there's a trailing comma on every line:
line.pop("")
# Look for the service in question and ignore the fictional
# FRONTEND/BACKEND hosts, use starts_with so that hosts that are UP
# with 1/X healthchecks to go before going down get counted as UP:
ha_slave, ha_service = line["svname"], line["pxname"]
if (services is None or ha_service in services) and ha_slave not in (
"FRONTEND",
"BACKEND",
):
backends.append(cast(HaproxyBackend, line))
return backends | Fetches the CSV from haproxy and returns a list of backends,
regardless of their state.
:param services: If None, return backends for all services, otherwise only return backends for these particular
services.
:param synapse_host: A host that this check should contact for replication information.
:param synapse_port: A integer that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:returns backends: A list of dicts representing the backends of all
services or the requested service
| get_multiple_backends | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def load_smartstack_info_for_service(
service: str,
namespace: str,
blacklist: DeployBlacklist,
system_paasta_config: SystemPaastaConfig,
soa_dir: str = DEFAULT_SOA_DIR,
) -> Dict[str, Dict[str, int]]:
"""Retrieves number of available backends for given service
:param service: A service name
:param namespace: A Smartstack namespace
:param blacklist: A list of blacklisted location tuples in the form (location, value)
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
:param soa_dir: SOA dir
:returns: a dictionary of the form
::
{
'location_type': {
'unique_location_name': {
'service.instance': <# ofavailable backends>
},
'other_unique_location_name': ...
}
}
"""
service_namespace_config = long_running_service_tools.load_service_namespace_config(
service=service, namespace=namespace, soa_dir=soa_dir
)
discover_location_type = service_namespace_config.get_discover()
return get_smartstack_replication_for_attribute(
attribute=discover_location_type,
service=service,
namespace=namespace,
blacklist=blacklist,
system_paasta_config=system_paasta_config,
) | Retrieves number of available backends for given service
:param service: A service name
:param namespace: A Smartstack namespace
:param blacklist: A list of blacklisted location tuples in the form (location, value)
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
:param soa_dir: SOA dir
:returns: a dictionary of the form
::
{
'location_type': {
'unique_location_name': {
'service.instance': <# ofavailable backends>
},
'other_unique_location_name': ...
}
}
| load_smartstack_info_for_service | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def get_smartstack_replication_for_attribute(
attribute: str,
service: str,
namespace: str,
blacklist: DeployBlacklist,
system_paasta_config: SystemPaastaConfig,
) -> Dict[str, Dict[str, int]]:
"""Loads smartstack replication from a host with the specified attribute
:param attribute: a Mesos attribute
:param service: A service name, like 'example_service'
:param namespace: A particular smartstack namespace to inspect, like 'main'
:param blacklist: A list of blacklisted location tuples in the form of (location, value)
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
:returns: a dictionary of the form {'<unique_attribute_value>': <smartstack replication hash>}
(the dictionary will contain keys for unique all attribute values)
"""
replication_info = {}
filtered_slaves = mesos_tools.get_all_slaves_for_blacklist_whitelist(
blacklist=blacklist, whitelist=None
)
if not filtered_slaves:
raise NoSlavesAvailableError
attribute_slave_dict = mesos_tools.get_mesos_slaves_grouped_by_attribute(
slaves=filtered_slaves, attribute=attribute
)
full_name = compose_job_id(service, namespace)
for value, hosts in attribute_slave_dict.items():
# arbitrarily choose the first host with a given attribute to query for replication stats
synapse_host = hosts[0]["hostname"]
repl_info = get_replication_for_services(
synapse_host=synapse_host,
synapse_port=system_paasta_config.get_synapse_port(),
synapse_haproxy_url_format=system_paasta_config.get_synapse_haproxy_url_format(),
services=[full_name],
)
replication_info[value] = repl_info
return replication_info | Loads smartstack replication from a host with the specified attribute
:param attribute: a Mesos attribute
:param service: A service name, like 'example_service'
:param namespace: A particular smartstack namespace to inspect, like 'main'
:param blacklist: A list of blacklisted location tuples in the form of (location, value)
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
:returns: a dictionary of the form {'<unique_attribute_value>': <smartstack replication hash>}
(the dictionary will contain keys for unique all attribute values)
| get_smartstack_replication_for_attribute | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def get_replication_for_all_services(
synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str
) -> Dict[str, int]:
"""Returns the replication level for all services known to this synapse haproxy
:param synapse_host: The host that this check should contact for replication information.
:param synapse_port: The port that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available replicas.
"""
backends = get_multiple_backends(
services=None,
synapse_host=synapse_host,
synapse_port=synapse_port,
synapse_haproxy_url_format=synapse_haproxy_url_format,
)
return collections.Counter([b["pxname"] for b in backends if backend_is_up(b)]) | Returns the replication level for all services known to this synapse haproxy
:param synapse_host: The host that this check should contact for replication information.
:param synapse_port: The port that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available replicas.
| get_replication_for_all_services | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def get_replication_for_services(
synapse_host: str,
synapse_port: int,
synapse_haproxy_url_format: str,
services: Collection[str],
) -> Dict[str, int]:
"""Returns the replication level for the provided services
This check is intended to be used with an haproxy load balancer, and
relies on the implementation details of that choice.
:param synapse_host: The host that this check should contact for replication information.
:param synapse_port: The port that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:param services: A list of strings that are the service names
that should be checked for replication.
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available
replicas
:returns None: If it cannot connect to the specified synapse host and port
"""
backends = get_multiple_backends(
services=services,
synapse_host=synapse_host,
synapse_port=synapse_port,
synapse_haproxy_url_format=synapse_haproxy_url_format,
)
counter = collections.Counter([b["pxname"] for b in backends if backend_is_up(b)])
return {sn: counter[sn] for sn in services} | Returns the replication level for the provided services
This check is intended to be used with an haproxy load balancer, and
relies on the implementation details of that choice.
:param synapse_host: The host that this check should contact for replication information.
:param synapse_port: The port that this check should contact for replication information.
:param synapse_haproxy_url_format: The format of the synapse haproxy URL.
:param services: A list of strings that are the service names
that should be checked for replication.
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available
replicas
:returns None: If it cannot connect to the specified synapse host and port
| get_replication_for_services | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def ip_port_hostname_from_svname(svname: str) -> Tuple[str, int, str]:
"""This parses the haproxy svname that smartstack creates.
In old versions of synapse, this is in the format ip:port_hostname.
In versions newer than dd5843c987740a5d5ce1c83b12b258b7253784a8 it is
hostname_ip:port
:param svname: A svname, in either of the formats described above
:returns ip_port_hostname: A tuple of ip, port, hostname.
"""
# split into parts
parts = set(svname.split("_"))
# find those that can be split by : - this is the ip:port
# there will only be 1 of these
ip_ports = {part for part in parts if len(part.split(":")) == 2}
# the one *not* in the list is the hostname
hostname = parts.difference(ip_ports).pop()
ip, port = ip_ports.pop().split(":")
return ip, int(port), hostname | This parses the haproxy svname that smartstack creates.
In old versions of synapse, this is in the format ip:port_hostname.
In versions newer than dd5843c987740a5d5ce1c83b12b258b7253784a8 it is
hostname_ip:port
:param svname: A svname, in either of the formats described above
:returns ip_port_hostname: A tuple of ip, port, hostname.
| ip_port_hostname_from_svname | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def match_backends_and_pods(
backends: Iterable[HaproxyBackend], pods: Iterable[V1Pod]
) -> List[Tuple[Optional[HaproxyBackend], Optional[V1Pod]]]:
"""Returns tuples of matching (backend, pod) pairs, as matched by IP. Each backend will be listed exactly
once. If a backend does not match with a pod, (backend, None) will be included.
If a pod's IP does not match with any backends, (None, pod) will be included.
:param backends: An iterable of haproxy backend dictionaries, e.g. the list returned by
smartstack_tools.get_multiple_backends.
:param pods: An iterable of V1Pod objects.
"""
# { ip : [backend1, backend2], ... }
backends_by_ip: DefaultDict[str, List[HaproxyBackend]] = collections.defaultdict(
list
)
backend_pod_pairs = []
for backend in backends:
ip, port, _ = ip_port_hostname_from_svname(backend["svname"])
backends_by_ip[ip].append(backend)
for pod in pods:
ip = pod.status.pod_ip
for backend in backends_by_ip.pop(ip, [None]):
backend_pod_pairs.append((backend, pod))
# we've been popping in the above loop, so anything left didn't match a k8s pod.
for backends in backends_by_ip.values():
for backend in backends:
backend_pod_pairs.append((backend, None))
return backend_pod_pairs | Returns tuples of matching (backend, pod) pairs, as matched by IP. Each backend will be listed exactly
once. If a backend does not match with a pod, (backend, None) will be included.
If a pod's IP does not match with any backends, (None, pod) will be included.
:param backends: An iterable of haproxy backend dictionaries, e.g. the list returned by
smartstack_tools.get_multiple_backends.
:param pods: An iterable of V1Pod objects.
| match_backends_and_pods | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def get_replication_for_instance(
self, instance_config: LongRunningServiceConfig
) -> Dict[str, Dict[str, Dict[str, int]]]:
"""Returns the number of registered instances in each discoverable
location for each service dicrovery provider.
:param instance_config: An instance of LongRunningServiceConfig.
:returns: a dict {'service_discovery_provider': {'location_type': {'service.instance': int}}}
"""
replication_infos = {}
for provider in self._service_discovery_providers:
replication_info = {}
attribute_host_dict = self.get_allowed_locations_and_hosts(instance_config)
instance_pool = instance_config.get_pool()
for location, hosts in attribute_host_dict.items():
# Try to get information from all available hosts in the pool before giving up
hostnames = self.get_hostnames_in_pool(hosts, instance_pool)
for hostname in hostnames:
try:
replication_info[location] = self._get_replication_info(
location, hostname, instance_config, provider
)
break
except Exception as e:
log.warning(
f"Error while getting replication info for {location} from {hostname}: {e}"
)
if hostname == hostnames[-1]:
# Last hostname failed, giving up
raise
replication_infos[provider.NAME] = replication_info
return replication_infos | Returns the number of registered instances in each discoverable
location for each service dicrovery provider.
:param instance_config: An instance of LongRunningServiceConfig.
:returns: a dict {'service_discovery_provider': {'location_type': {'service.instance': int}}}
| get_replication_for_instance | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def _get_replication_info(
self,
location: str,
hostname: str,
instance_config: LongRunningServiceConfig,
provider: ServiceDiscoveryProvider,
) -> Dict[str, int]:
"""Returns service.instance and the number of instances registered in smartstack
at the location as a dict.
:param location: A string that identifies a habitat, a region and etc.
:param hostname: A mesos slave hostname to read replication information from.
:param instance_config: An instance of LongRunningServiceConfig.
:returns: A dict {"service.instance": number_of_instances}.
"""
full_name = compose_job_id(instance_config.service, instance_config.instance)
key = (location, provider.NAME)
replication_info = self._cache.get(key)
if replication_info is None:
replication_info = provider.get_replication_for_all_services(hostname)
self._cache[key] = replication_info
return {full_name: replication_info[full_name]} | Returns service.instance and the number of instances registered in smartstack
at the location as a dict.
:param location: A string that identifies a habitat, a region and etc.
:param hostname: A mesos slave hostname to read replication information from.
:param instance_config: An instance of LongRunningServiceConfig.
:returns: A dict {"service.instance": number_of_instances}.
| _get_replication_info | python | Yelp/paasta | paasta_tools/smartstack_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/smartstack_tools.py | Apache-2.0 |
def setup_volume_mounts(volumes: List[DockerVolume]) -> Dict[str, str]:
"""
Returns Docker volume mount configurations in the format expected by Spark.
"""
conf = {}
# XXX: why are these necessary?
extra_volumes: List[DockerVolume] = cast(
"List[DockerVolume]",
[
{"containerPath": "/etc/passwd", "hostPath": "/etc/passwd", "mode": "RO"},
{"containerPath": "/etc/group", "hostPath": "/etc/group", "mode": "RO"},
],
)
seen_paths: Set[str] = set() # dedupe volumes, just in case
for index, volume in enumerate(volumes + extra_volumes):
host_path, container_path, mode = (
volume["hostPath"],
volume["containerPath"],
volume["mode"],
)
if host_path in seen_paths:
log.warning(f"Skipping {host_path} - already added a binding for it.")
continue
seen_paths.add(host_path)
# the names here don't matter too much, so we just use the index in the volume
# list as an arbitrary name
conf[
f"spark.kubernetes.executor.volumes.hostPath.{index}.mount.path"
] = container_path
conf[
f"spark.kubernetes.executor.volumes.hostPath.{index}.options.path"
] = host_path
conf[
f"spark.kubernetes.executor.volumes.hostPath.{index}.mount.readOnly"
] = str(mode.lower() == "ro").lower()
return conf |
Returns Docker volume mount configurations in the format expected by Spark.
| setup_volume_mounts | python | Yelp/paasta | paasta_tools/spark_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/spark_tools.py | Apache-2.0 |
def get_spark_driver_monitoring_annotations(
spark_config: Dict[str, str],
) -> Dict[str, str]:
"""
Returns Spark driver pod annotations - currently used for Prometheus metadata.
"""
annotations: Dict[str, str] = {}
ui_port_str = spark_config.get("spark.ui.port")
if ui_port_str:
annotations.update(
{
"prometheus.io/port": ui_port_str,
"prometheus.io/path": "/metrics/prometheus",
}
)
paasta_service_non_truncated = spark_config.get(
"spark.kubernetes.executor.annotation.paasta.yelp.com/service"
)
paasta_instance_non_truncated = spark_config.get(
"spark.kubernetes.executor.annotation.paasta.yelp.com/instance"
)
if paasta_service_non_truncated and paasta_instance_non_truncated:
annotations.update(
{
"paasta.yelp.com/service": paasta_service_non_truncated,
"paasta.yelp.com/instance": paasta_instance_non_truncated,
}
)
return annotations |
Returns Spark driver pod annotations - currently used for Prometheus metadata.
| get_spark_driver_monitoring_annotations | python | Yelp/paasta | paasta_tools/spark_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/spark_tools.py | Apache-2.0 |
def get_spark_driver_monitoring_labels(
spark_config: Dict[str, str],
user: str,
) -> Dict[str, str]:
"""
Returns Spark driver pod labels - generally for Prometheus metric relabeling.
"""
ui_port_str = str(spark_config.get("spark.ui.port", ""))
labels = {
"paasta.yelp.com/prometheus_shard": SPARK_PROMETHEUS_SHARD,
"spark.yelp.com/user": user,
"spark.yelp.com/driver_ui_port": ui_port_str,
}
return labels |
Returns Spark driver pod labels - generally for Prometheus metric relabeling.
| get_spark_driver_monitoring_labels | python | Yelp/paasta | paasta_tools/spark_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/spark_tools.py | Apache-2.0 |
def get_cluster_name(self):
""":returns The name of the Tron cluster"""
try:
return self["cluster_name"]
except KeyError:
raise TronNotConfigured(
"Could not find name of Tron cluster in system Tron config"
) | :returns The name of the Tron cluster | get_cluster_name | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def get_url(self):
""":returns The URL for the Tron master's API"""
try:
return self["url"]
except KeyError:
raise TronNotConfigured(
"Could not find URL of Tron master in system Tron config"
) | :returns The URL for the Tron master's API | get_url | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def parse_time_variables(command: str, parse_time: datetime.datetime = None) -> str:
"""Parses an input string and uses the Tron-style dateparsing
to replace time variables. Currently supports only the date/time
variables listed in the tron documentation:
http://tron.readthedocs.io/en/latest/command_context.html#built-in-cc
:param input_string: input string to be parsed
:param parse_time: Reference Datetime object to parse the date and time strings, defaults to now.
:returns: A string with the date and time variables replaced
"""
if parse_time is None:
parse_time = datetime.datetime.now()
# We build up a tron context object that has the right
# methods to parse tron-style time syntax
job_context = tron_command_context.JobRunContext(
tron_command_context.CommandContext()
)
# The tron context object needs the run_time attribute set so it knows
# how to interpret the date strings
job_context.job_run.run_time = parse_time
return StringFormatter(job_context).format(command) | Parses an input string and uses the Tron-style dateparsing
to replace time variables. Currently supports only the date/time
variables listed in the tron documentation:
http://tron.readthedocs.io/en/latest/command_context.html#built-in-cc
:param input_string: input string to be parsed
:param parse_time: Reference Datetime object to parse the date and time strings, defaults to now.
:returns: A string with the date and time variables replaced
| parse_time_variables | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def _get_tron_k8s_cluster_override(cluster: str) -> Optional[str]:
"""
Return the name of a compute cluster if there's a different compute cluster that should be used to run a Tronjob.
Will return None if no override mapping is present
We have certain Tron masters that are named differently from the compute cluster that should actually be used (
e.g., we might have tron-XYZ-test-prod, but instead of scheduling on XYZ-test-prod, we'd like to schedule jobs
on test-prod).
To control this, we have an optional config item that we'll puppet onto Tron masters that need this type of
tron master -> compute cluster override which this function will read.
"""
return (
load_system_paasta_config()
.get_tron_k8s_cluster_overrides()
.get(
cluster,
None,
)
) |
Return the name of a compute cluster if there's a different compute cluster that should be used to run a Tronjob.
Will return None if no override mapping is present
We have certain Tron masters that are named differently from the compute cluster that should actually be used (
e.g., we might have tron-XYZ-test-prod, but instead of scheduling on XYZ-test-prod, we'd like to schedule jobs
on test-prod).
To control this, we have an optional config item that we'll puppet onto Tron masters that need this type of
tron master -> compute cluster override which this function will read.
| _get_tron_k8s_cluster_override | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def get_secret_volumes(self) -> List[TronSecretVolume]: # type: ignore
"""Adds the secret_volume_name to the object so tron/task_processing can load it downstream without replicating code."""
secret_volumes = super().get_secret_volumes()
tron_secret_volumes = []
for secret_volume in secret_volumes:
tron_secret_volume = TronSecretVolume(
secret_volume_name=self.get_secret_volume_name(
secret_volume["secret_name"]
),
secret_name=secret_volume["secret_name"],
container_path=secret_volume["container_path"],
items=secret_volume.get("items", []),
)
# we have a different place where the default can come from (tron) and we don't want to insert the wrong default here
if "default_mode" in secret_volume:
tron_secret_volume["default_mode"] = secret_volume["default_mode"]
tron_secret_volumes.append(tron_secret_volume)
return tron_secret_volumes | Adds the secret_volume_name to the object so tron/task_processing can load it downstream without replicating code. | get_secret_volumes | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def get_node_affinities(self) -> Optional[List[Dict[str, Union[str, List[str]]]]]:
"""Converts deploy_whitelist and deploy_blacklist in node affinities.
NOTE: At the time of writing, `kubectl describe` does not show affinities,
only selectors. To see affinities, use `kubectl get pod -o json` instead.
WARNING: At the time of writing, we only used requiredDuringSchedulingIgnoredDuringExecution node affinities in Tron as we currently have
no use case for preferredDuringSchedulingIgnoredDuringExecution node affinities.
"""
requirements = allowlist_denylist_to_requirements(
allowlist=self.get_deploy_whitelist(),
denylist=self.get_deploy_blacklist(),
)
node_selectors = self.config_dict.get("node_selectors", {})
requirements.extend(
raw_selectors_to_requirements(
raw_selectors=node_selectors,
)
)
system_paasta_config = load_system_paasta_config()
if system_paasta_config.get_enable_tron_tsc():
# PAASTA-18198: To improve AZ balance with Karpenter, we temporarily allow specifying zone affinities per pool
pool_node_affinities = system_paasta_config.get_pool_node_affinities()
if pool_node_affinities and self.get_pool() in pool_node_affinities:
current_pool_node_affinities = pool_node_affinities[self.get_pool()]
# If the service already has a node selector for a zone, we don't want to override it
if current_pool_node_affinities and not contains_zone_label(
node_selectors
):
requirements.extend(
raw_selectors_to_requirements(
raw_selectors=current_pool_node_affinities,
)
)
if not requirements:
return None
return [
{"key": key, "operator": op, "value": value}
for key, op, value in requirements
] | Converts deploy_whitelist and deploy_blacklist in node affinities.
NOTE: At the time of writing, `kubectl describe` does not show affinities,
only selectors. To see affinities, use `kubectl get pod -o json` instead.
WARNING: At the time of writing, we only used requiredDuringSchedulingIgnoredDuringExecution node affinities in Tron as we currently have
no use case for preferredDuringSchedulingIgnoredDuringExecution node affinities.
| get_node_affinities | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def get_pool(self) -> str:
"""
Returns the default pool override if pool is not defined in the action configuration.
This is useful for environments like spam to allow us to default the pool to spam but allow users to
override this value. To control this, we have an optional config item that we'll puppet onto Tron masters
which this function will read.
"""
if self.get_executor() == "spark":
pool = load_system_paasta_config().get_default_spark_driver_pool_override()
else:
pool = self.config_dict.get(
"pool", load_system_paasta_config().get_tron_default_pool_override()
)
return pool |
Returns the default pool override if pool is not defined in the action configuration.
This is useful for environments like spam to allow us to default the pool to spam but allow users to
override this value. To control this, we have an optional config item that we'll puppet onto Tron masters
which this function will read.
| get_pool | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def format_tron_job_dict(job_config: TronJobConfig, k8s_enabled: bool = False):
"""Generate a dict of tronfig for a job, from the TronJobConfig.
:param job_config: TronJobConfig
"""
action_dict = {
action_config.get_action_name(): format_tron_action_dict(
action_config=action_config,
)
for action_config in job_config.get_actions()
}
result = {
"node": job_config.get_node(),
"schedule": job_config.get_schedule(),
"actions": action_dict,
"monitoring": job_config.get_monitoring(),
"queueing": job_config.get_queueing(),
"run_limit": job_config.get_run_limit(),
"all_nodes": job_config.get_all_nodes(),
"enabled": job_config.get_enabled(),
"allow_overlap": job_config.get_allow_overlap(),
"max_runtime": job_config.get_max_runtime(),
"time_zone": job_config.get_time_zone(),
"expected_runtime": job_config.get_expected_runtime(),
}
cleanup_config = job_config.get_cleanup_action()
if cleanup_config:
cleanup_action = format_tron_action_dict(
action_config=cleanup_config,
)
result["cleanup_action"] = cleanup_action
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None} | Generate a dict of tronfig for a job, from the TronJobConfig.
:param job_config: TronJobConfig
| format_tron_job_dict | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def load_tron_service_config_no_cache(
service,
cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
"""Load all configured jobs for a service, and any additional config values."""
config = read_extra_service_information(
service_name=service, extra_info=f"tron-{cluster}", soa_dir=soa_dir
)
jobs = filter_templates_from_config(config)
job_configs = [
TronJobConfig(
name=name,
service=service,
cluster=cluster,
config_dict=job,
load_deployments=load_deployments,
soa_dir=soa_dir,
for_validation=for_validation,
)
for name, job in jobs.items()
]
return job_configs | Load all configured jobs for a service, and any additional config values. | load_tron_service_config_no_cache | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def create_complete_config(
service: str,
cluster: str,
soa_dir: str = DEFAULT_SOA_DIR,
k8s_enabled: bool = False,
dry_run: bool = False,
):
"""Generate a namespace configuration file for Tron, for a service."""
job_configs = load_tron_service_config(
service=service,
cluster=cluster,
load_deployments=True,
soa_dir=soa_dir,
for_validation=dry_run,
)
preproccessed_config = {}
preproccessed_config["jobs"] = {
job_config.get_name(): format_tron_job_dict(
job_config=job_config, k8s_enabled=k8s_enabled
)
for job_config in job_configs
}
return yaml.dump(preproccessed_config, Dumper=Dumper, default_flow_style=False) | Generate a namespace configuration file for Tron, for a service. | create_complete_config | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def list_tron_clusters(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[str]:
"""Returns the Tron clusters a service is configured to deploy to."""
search_re = r"/tron-([0-9a-z-_]*)\.yaml$"
service_dir = os.path.join(soa_dir, service)
clusters = []
for filename in glob.glob(f"{service_dir}/*.yaml"):
cluster_re_match = re.search(search_re, filename)
if cluster_re_match is not None:
clusters.append(cluster_re_match.group(1))
return clusters | Returns the Tron clusters a service is configured to deploy to. | list_tron_clusters | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def parse_service_instance_from_executor_id(task_id: str) -> Tuple[str, str]:
"""Parses tron mesos task ids, like schematizer.traffic_generator.28414.turnstyle.46da87d7-6092-4ed4-b926-ffa7b21c7785"""
try:
service, job, job_run, action, uuid = task_id.split(".")
except Exception as e:
log.warning(
f"Couldn't parse the mesos task id into a valid tron job: {task_id}: {e}"
)
service, job, action = "unknown_service", "unknown_job", "unknown_action"
return service, f"{job}.{action}" | Parses tron mesos task ids, like schematizer.traffic_generator.28414.turnstyle.46da87d7-6092-4ed4-b926-ffa7b21c7785 | parse_service_instance_from_executor_id | python | Yelp/paasta | paasta_tools/tron_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/tron_tools.py | Apache-2.0 |
def get_namespace(self) -> str:
"""Get namespace from config, default to the value from INSTANCE_TYPE_TO_K8S_NAMESPACE for this instance type, 'paasta' if that isn't defined."""
return self.config_dict.get(
"namespace",
INSTANCE_TYPE_TO_K8S_NAMESPACE.get(self.get_instance_type(), "paasta"),
) | Get namespace from config, default to the value from INSTANCE_TYPE_TO_K8S_NAMESPACE for this instance type, 'paasta' if that isn't defined. | get_namespace | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_cpu_quota(self) -> float:
"""Gets the --cpu-quota option to be passed to docker
Calculation: (cpus + cpus_burst_add) * cfs_period_us
:returns: The number to be passed to the --cpu-quota docker flag"""
cpu_burst_add = self.get_cpu_burst_add()
return (self.get_cpus() + cpu_burst_add) * self.get_cpu_period() | Gets the --cpu-quota option to be passed to docker
Calculation: (cpus + cpus_burst_add) * cfs_period_us
:returns: The number to be passed to the --cpu-quota docker flag | get_cpu_quota | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_cap_add(self) -> Iterable[DockerParameter]:
"""Get the --cap-add options to be passed to docker
Generated from the cap_add configuration option, which is a list of
capabilities.
Example configuration: {'cap_add': ['IPC_LOCK', 'SYS_PTRACE']}
:returns: A generator of cap_add options to be passed as --cap-add flags"""
for value in self.config_dict.get("cap_add", []):
yield {"key": "cap-add", "value": f"{value}"} | Get the --cap-add options to be passed to docker
Generated from the cap_add configuration option, which is a list of
capabilities.
Example configuration: {'cap_add': ['IPC_LOCK', 'SYS_PTRACE']}
:returns: A generator of cap_add options to be passed as --cap-add flags | get_cap_add | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_cap_drop(self) -> Iterable[DockerParameter]:
"""Generates --cap-drop options to be passed to docker by default, which
makes them not able to perform special privilege escalation stuff
https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
"""
for cap in CAPS_DROP:
yield {"key": "cap-drop", "value": cap} | Generates --cap-drop options to be passed to docker by default, which
makes them not able to perform special privilege escalation stuff
https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
| get_cap_drop | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_cap_args(self) -> Iterable[DockerParameter]:
"""Generate all --cap-add/--cap-drop parameters, ensuring not to have overlapping settings"""
cap_adds = list(self.get_cap_add())
if cap_adds and is_using_unprivileged_containers():
log.warning(
"Unprivileged containerizer detected, adding capabilities will not work properly"
)
yield from cap_adds
added_caps = [cap["value"] for cap in cap_adds]
for cap in self.get_cap_drop():
if cap["value"] not in added_caps:
yield cap | Generate all --cap-add/--cap-drop parameters, ensuring not to have overlapping settings | get_cap_args | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def format_docker_parameters(
self,
with_labels: bool = True,
system_paasta_config: Optional["SystemPaastaConfig"] = None,
) -> List[DockerParameter]:
"""Formats extra flags for running docker. Will be added in the format
`["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command
Note: values must be strings
:param with_labels: Whether to build docker parameters with or without labels
:returns: A list of parameters to be added to docker run"""
parameters: List[DockerParameter] = [
{"key": "memory-swap", "value": self.get_mem_swap()},
{"key": "cpu-period", "value": "%s" % int(self.get_cpu_period())},
{"key": "cpu-quota", "value": "%s" % int(self.get_cpu_quota())},
]
if self.use_docker_disk_quota(system_paasta_config=system_paasta_config):
parameters.append(
{
"key": "storage-opt",
"value": f"size={int(self.get_disk() * 1024 * 1024)}",
}
)
if with_labels:
parameters.extend(
[
{"key": "label", "value": "paasta_service=%s" % self.service},
{"key": "label", "value": "paasta_instance=%s" % self.instance},
]
)
extra_docker_args = self.get_extra_docker_args()
if extra_docker_args:
for key, value in extra_docker_args.items():
parameters.extend([{"key": key, "value": value}])
parameters.extend(self.get_docker_init())
parameters.extend(self.get_cap_args())
return parameters | Formats extra flags for running docker. Will be added in the format
`["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command
Note: values must be strings
:param with_labels: Whether to build docker parameters with or without labels
:returns: A list of parameters to be added to docker run | format_docker_parameters | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.