code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def makefile_responds_to(target):
"""Runs `make --question <target>` to detect if a makefile responds to the
specified target."""
# According to http://www.gnu.org/software/make/manual/make.html#index-exit-status-of-make,
# 0 means OK, 1 means the target is not up to date, and 2 means error
returncode, _ = _run(["make", "--question", target], timeout=5)
return returncode != 2 | Runs `make --question <target>` to detect if a makefile responds to the
specified target. | makefile_responds_to | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
def makefile_check():
"""Detects if you have a makefile and runs some sanity tests against
it to ensure it is paasta-ready"""
makefile_path = is_file_in_dir("Makefile", os.getcwd())
if makefile_path:
print(PaastaCheckMessages.MAKEFILE_FOUND)
if makefile_has_a_tab(makefile_path):
print(PaastaCheckMessages.MAKEFILE_HAS_A_TAB)
else:
print(PaastaCheckMessages.MAKEFILE_HAS_NO_TABS)
if makefile_has_docker_tag(makefile_path):
print(PaastaCheckMessages.MAKEFILE_HAS_DOCKER_TAG)
else:
print(PaastaCheckMessages.MAKEFILE_HAS_NO_DOCKER_TAG)
if makefile_responds_to("cook-image"):
print(PaastaCheckMessages.MAKEFILE_RESPONDS_BUILD_IMAGE)
else:
print(PaastaCheckMessages.MAKEFILE_RESPONDS_BUILD_IMAGE_FAIL)
if makefile_responds_to("itest"):
print(PaastaCheckMessages.MAKEFILE_RESPONDS_ITEST)
else:
print(PaastaCheckMessages.MAKEFILE_RESPONDS_ITEST_FAIL)
if makefile_responds_to("test"):
print(PaastaCheckMessages.MAKEFILE_RESPONDS_TEST)
else:
print(PaastaCheckMessages.MAKEFILE_RESPONDS_TEST_FAIL)
else:
print(PaastaCheckMessages.MAKEFILE_MISSING) | Detects if you have a makefile and runs some sanity tests against
it to ensure it is paasta-ready | makefile_check | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
def get_deploy_groups_used_by_framework(instance_type, service, soa_dir):
"""This is a kind of funny function that gets all the instances for specified
service and framework, and massages it into a form that matches up with what
deploy.yaml's steps look like. This is only so we can compare it 1-1
with what deploy.yaml has for linting.
:param instance_type: one of the entries in utils.INSTANCE_TYPES
:param service: the service name
:param soa_dir: The SOA configuration directory to read from
:returns: a list of deploy group names used by the service.
"""
deploy_groups = []
for cluster in list_clusters(service, soa_dir):
for _, instance in get_service_instance_list(
service=service,
cluster=cluster,
instance_type=instance_type,
soa_dir=soa_dir,
):
try:
config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
load_deployments=False,
instance_type=instance_type,
)
deploy_groups.append(config.get_deploy_group())
except NotImplementedError:
pass
return set(filter(None, deploy_groups)) | This is a kind of funny function that gets all the instances for specified
service and framework, and massages it into a form that matches up with what
deploy.yaml's steps look like. This is only so we can compare it 1-1
with what deploy.yaml has for linting.
:param instance_type: one of the entries in utils.INSTANCE_TYPES
:param service: the service name
:param soa_dir: The SOA configuration directory to read from
:returns: a list of deploy group names used by the service.
| get_deploy_groups_used_by_framework | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
def deployments_check(service, soa_dir):
"""Checks for consistency between deploy.yaml and the kubernetes/etc yamls"""
the_return = True
pipeline_deploy_groups = get_pipeline_deploy_groups(
service=service, soa_dir=soa_dir
)
framework_deploy_groups = {}
in_deploy_not_frameworks = set(pipeline_deploy_groups)
for it in INSTANCE_TYPES:
framework_deploy_groups[it] = get_deploy_groups_used_by_framework(
it, service, soa_dir
)
in_framework_not_deploy = set(framework_deploy_groups[it]) - set(
pipeline_deploy_groups
)
in_deploy_not_frameworks -= set(framework_deploy_groups[it])
if len(in_framework_not_deploy) > 0:
print(
"{} There are some instance(s) you have asked to run in {} that".format(
x_mark(), it
)
)
print(" do not have a corresponding entry in deploy.yaml:")
print(" %s" % PaastaColors.bold(", ".join(in_framework_not_deploy)))
print(" You should probably configure these to use a 'deploy_group' or")
print(
" add entries to deploy.yaml for them so they are deployed to those clusters."
)
the_return = False
if len(in_deploy_not_frameworks) > 0:
print(
"%s There are some instance(s) in deploy.yaml that are not referenced"
% x_mark()
)
print(" by any instance:")
print(" %s" % PaastaColors.bold((", ".join(in_deploy_not_frameworks))))
print(
" You should probably delete these deploy.yaml entries if they are unused."
)
the_return = False
if the_return is True:
print(success("All entries in deploy.yaml correspond to a paasta instance"))
for it in INSTANCE_TYPES:
if len(framework_deploy_groups[it]) > 0:
print(
success(
"All %s instances have a corresponding deploy.yaml entry" % it
)
)
return the_return | Checks for consistency between deploy.yaml and the kubernetes/etc yamls | deployments_check | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
def sensu_check(service, service_path, soa_dir):
"""Check whether monitoring.yaml exists in service directory,
and that the team name is declared.
:param service: name of service currently being examined
:param service_path: path to location of monitoring.yaml file"""
if is_file_in_dir("monitoring.yaml", service_path):
print(PaastaCheckMessages.SENSU_MONITORING_FOUND)
team = get_team(service=service, overrides={}, soa_dir=soa_dir)
if team is None:
print(PaastaCheckMessages.SENSU_TEAM_MISSING)
else:
print(PaastaCheckMessages.sensu_team_found(team))
else:
print(PaastaCheckMessages.SENSU_MONITORING_MISSING) | Check whether monitoring.yaml exists in service directory,
and that the team name is declared.
:param service: name of service currently being examined
:param service_path: path to location of monitoring.yaml file | sensu_check | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
def service_dir_check(service, soa_dir):
"""Check whether directory service exists in /nail/etc/services
:param service: string of service name we wish to inspect
"""
try:
validate_service_name(service, soa_dir)
print(PaastaCheckMessages.service_dir_found(service, soa_dir))
except NoSuchService:
print(PaastaCheckMessages.service_dir_missing(service, soa_dir)) | Check whether directory service exists in /nail/etc/services
:param service: string of service name we wish to inspect
| service_dir_check | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
def smartstack_check(service, service_path, soa_dir):
"""Check whether smartstack.yaml exists in service directory, and the proxy
ports are declared. Print appropriate message depending on outcome.
:param service: name of service currently being examined
:param service_path: path to location of smartstack.yaml file"""
if is_file_in_dir("smartstack.yaml", service_path):
print(PaastaCheckMessages.SMARTSTACK_YAML_FOUND)
instances = get_all_namespaces_for_service(service=service, soa_dir=soa_dir)
if len(instances) > 0:
for namespace, config in get_all_namespaces_for_service(
service=service, soa_dir=soa_dir, full_name=False
):
if "proxy_port" in config:
print(
PaastaCheckMessages.smartstack_port_found(
namespace, config.get("proxy_port")
)
)
else:
print(PaastaCheckMessages.SMARTSTACK_PORT_MISSING)
else:
print(PaastaCheckMessages.SMARTSTACK_PORT_MISSING) | Check whether smartstack.yaml exists in service directory, and the proxy
ports are declared. Print appropriate message depending on outcome.
:param service: name of service currently being examined
:param service_path: path to location of smartstack.yaml file | smartstack_check | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
def paasta_check(args):
"""Analyze the service in the PWD to determine if it is paasta ready
:param args: argparse.Namespace obj created from sys.args by cli"""
soa_dir = args.yelpsoa_config_root
service = figure_out_service_name(args, soa_dir)
service_path = os.path.join(soa_dir, service)
service_dir_check(service, soa_dir)
deploy_check(service_path)
deploy_has_security_check(service, soa_dir)
git_repo_check(service, soa_dir)
docker_check()
makefile_check()
deployments_check(service, soa_dir)
sensu_check(service, service_path, soa_dir)
smartstack_check(service, service_path, soa_dir)
paasta_validate_soa_configs(service, service_path) | Analyze the service in the PWD to determine if it is paasta ready
:param args: argparse.Namespace obj created from sys.args by cli | paasta_check | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
def paasta_list(args):
"""Print a list of Yelp services currently running
:param args: argparse.Namespace obj created from sys.args by cli"""
if args.print_instances:
services = list_service_instances(args.soa_dir)
elif args.all:
services = list_services(args.soa_dir)
else:
services = list_paasta_services(args.soa_dir)
for service in services:
print(service) | Print a list of Yelp services currently running
:param args: argparse.Namespace obj created from sys.args by cli | paasta_list | python | Yelp/paasta | paasta_tools/cli/cmds/list.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/list.py | Apache-2.0 |
def perform_http_healthcheck(url, timeout):
"""Returns true if healthcheck on url succeeds, false otherwise
:param url: the healthcheck url
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
try:
with Timeout(seconds=timeout):
try:
res = requests.get(url, verify=False)
except requests.ConnectionError:
return (False, "http request failed: connection failed")
except TimeoutError:
return (False, "http request timed out after %d seconds" % timeout)
if "content-type" in res.headers and "," in res.headers["content-type"]:
print(
PaastaColors.yellow(
"Multiple content-type headers detected in response."
" The Mesos healthcheck system will treat this as a failure!"
)
)
return (False, "http request succeeded, code %d" % res.status_code)
# check if response code is valid per https://mesosphere.github.io/marathon/docs/health-checks.html
elif res.status_code >= 200 and res.status_code < 400:
return (True, "http request succeeded, code %d" % res.status_code)
else:
return (False, "http request failed, code %s" % str(res.status_code)) | Returns true if healthcheck on url succeeds, false otherwise
:param url: the healthcheck url
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
| perform_http_healthcheck | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def perform_tcp_healthcheck(url, timeout):
"""Returns true if successfully connects to host and port, false otherwise
:param url: the healthcheck url (in the form tcp://host:port)
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
url_elem = urlparse(url)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((url_elem.hostname, url_elem.port))
sock.close()
if result == 0:
return (True, "tcp connection succeeded")
else:
return (False, "%s (timeout %d seconds)" % (os.strerror(result), timeout)) | Returns true if successfully connects to host and port, false otherwise
:param url: the healthcheck url (in the form tcp://host:port)
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
| perform_tcp_healthcheck | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def perform_cmd_healthcheck(docker_client, container_id, command, timeout):
"""Returns true if return code of command is 0 when executed inside container, false otherwise
:param docker_client: Docker client object
:param container_id: Docker container id
:param command: command to execute
:param timeout: timeout in seconds
:returns: True if command exits with return code 0, false otherwise
"""
(output, return_code) = execute_in_container(
docker_client, container_id, command, timeout
)
if return_code == 0:
return (True, output)
else:
return (False, output) | Returns true if return code of command is 0 when executed inside container, false otherwise
:param docker_client: Docker client object
:param container_id: Docker container id
:param command: command to execute
:param timeout: timeout in seconds
:returns: True if command exits with return code 0, false otherwise
| perform_cmd_healthcheck | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def run_healthcheck_on_container(
docker_client, container_id, healthcheck_mode, healthcheck_data, timeout
):
"""Performs healthcheck on a container
:param container_id: Docker container id
:param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd'
:param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd'
:param timeout: timeout in seconds for individual check
:returns: a tuple of (bool, output string)
"""
healthcheck_result = (False, "unknown")
if healthcheck_mode == "cmd":
healthcheck_result = perform_cmd_healthcheck(
docker_client, container_id, healthcheck_data, timeout
)
elif healthcheck_mode == "http" or healthcheck_mode == "https":
healthcheck_result = perform_http_healthcheck(healthcheck_data, timeout)
elif healthcheck_mode == "tcp":
healthcheck_result = perform_tcp_healthcheck(healthcheck_data, timeout)
else:
print(
PaastaColors.yellow(
"Healthcheck mode '%s' is not currently supported!" % healthcheck_mode
)
)
sys.exit(1)
return healthcheck_result | Performs healthcheck on a container
:param container_id: Docker container id
:param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd'
:param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd'
:param timeout: timeout in seconds for individual check
:returns: a tuple of (bool, output string)
| run_healthcheck_on_container | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def simulate_healthcheck_on_service(
instance_config,
docker_client,
container_id,
healthcheck_mode,
healthcheck_data,
healthcheck_enabled,
):
"""Simulates Marathon-style healthcheck on given service if healthcheck is enabled
:param instance_config: service manifest
:param docker_client: Docker client object
:param container_id: Docker container id
:param healthcheck_data: tuple url to healthcheck
:param healthcheck_enabled: boolean
:returns: healthcheck_passed: boolean
"""
healthcheck_link = PaastaColors.cyan(healthcheck_data)
if healthcheck_enabled:
grace_period = instance_config.get_healthcheck_grace_period_seconds()
timeout = instance_config.get_healthcheck_timeout_seconds()
interval = instance_config.get_healthcheck_interval_seconds()
max_failures = instance_config.get_healthcheck_max_consecutive_failures()
print(
"\nStarting health check via %s (waiting %s seconds before "
"considering failures due to grace period):"
% (healthcheck_link, grace_period)
)
# silently start performing health checks until grace period ends or first check succeeds
graceperiod_end_time = time.time() + grace_period
after_grace_period_attempts = 0
healthchecking = True
def _stream_docker_logs(container_id, generator):
while healthchecking:
try:
# the generator will block until another log line is available
log_line = next(generator).decode("utf-8").rstrip("\n")
if healthchecking:
print(f"container [{container_id[:12]}]: {log_line}")
else:
# stop streaming at first opportunity, since generator.close()
# cant be used until the container is dead
break
except StopIteration: # natural end of logs
break
docker_logs_generator = docker_client.logs(
container_id, stderr=True, stream=True
)
threading.Thread(
target=_stream_docker_logs,
daemon=True,
args=(container_id, docker_logs_generator),
).start()
while True:
# First inspect the container for early exits
container_state = docker_client.inspect_container(container_id)
if not container_state["State"]["Running"]:
print(
PaastaColors.red(
"Container exited with code {}".format(
container_state["State"]["ExitCode"]
)
)
)
healthcheck_passed = False
break
healthcheck_passed, healthcheck_output = run_healthcheck_on_container(
docker_client, container_id, healthcheck_mode, healthcheck_data, timeout
)
# Yay, we passed the healthcheck
if healthcheck_passed:
print(
"{}'{}' (via {})".format(
PaastaColors.green("Healthcheck succeeded!: "),
healthcheck_output,
healthcheck_link,
)
)
break
# Otherwise, print why we failed
if time.time() < graceperiod_end_time:
color = PaastaColors.grey
msg = "(disregarded due to grace period)"
extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})"
else:
# If we've exceeded the grace period, we start incrementing attempts
after_grace_period_attempts += 1
color = PaastaColors.red
msg = "(Attempt {} of {})".format(
after_grace_period_attempts, max_failures
)
extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})"
print("{}{}".format(color(f"Healthcheck failed! {msg}"), extra_msg))
if after_grace_period_attempts == max_failures:
break
time.sleep(interval)
healthchecking = False # end docker logs stream
else:
print(
"\nPaaSTA would have healthchecked your service via\n%s" % healthcheck_link
)
healthcheck_passed = True
return healthcheck_passed | Simulates Marathon-style healthcheck on given service if healthcheck is enabled
:param instance_config: service manifest
:param docker_client: Docker client object
:param container_id: Docker container id
:param healthcheck_data: tuple url to healthcheck
:param healthcheck_enabled: boolean
:returns: healthcheck_passed: boolean
| simulate_healthcheck_on_service | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def docker_pull_image(docker_url):
"""Pull an image via ``docker pull``. Uses the actual pull command instead of the python
bindings due to the docker auth/registry transition. Once we are past Docker 1.6
we can use better credential management, but for now this function assumes the
user running the command has already been authorized for the registry"""
print(
"Please wait while the image (%s) is pulled (times out after 30m)..."
% docker_url,
file=sys.stderr,
)
with Timeout(
seconds=1800, error_message=f"Timed out pulling docker image from {docker_url}"
), open(os.devnull, mode="wb") as DEVNULL:
ret, _ = _run("docker pull %s" % docker_url, stream=True, stdin=DEVNULL)
if ret != 0:
print(
"\nPull failed. Are you authorized to run docker commands?",
file=sys.stderr,
)
sys.exit(ret) | Pull an image via ``docker pull``. Uses the actual pull command instead of the python
bindings due to the docker auth/registry transition. Once we are past Docker 1.6
we can use better credential management, but for now this function assumes the
user running the command has already been authorized for the registry | docker_pull_image | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def get_container_id(docker_client, container_name):
"""Use 'docker_client' to find the container we started, identifiable by
its 'container_name'. If we can't find the id, raise
LostContainerException.
"""
containers = docker_client.containers(all=False)
for container in containers:
if "/%s" % container_name in container.get("Names", []):
return container.get("Id")
raise LostContainerException(
"Can't find the container I just launched so I can't do anything else.\n"
"Try docker 'ps --all | grep %s' to see where it went.\n"
"Here were all the containers:\n"
"%s" % (container_name, containers)
) | Use 'docker_client' to find the container we started, identifiable by
its 'container_name'. If we can't find the id, raise
LostContainerException.
| get_container_id | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def get_local_run_environment_vars(instance_config, port0, framework):
"""Returns a dictionary of environment variables to simulate what would be available to
a paasta service running in a container"""
hostname = socket.getfqdn()
docker_image = instance_config.get_docker_image()
if docker_image == "":
# In a local_run environment, the docker_image may not be available
# so we can fall-back to the injected DOCKER_TAG per the paasta contract
docker_image = os.environ["DOCKER_TAG"]
env = {
"HOST": hostname,
"PAASTA_DOCKER_IMAGE": docker_image,
"PAASTA_LAUNCHED_BY": get_possible_launched_by_user_variable_from_env(),
"PAASTA_HOST": hostname,
# Kubernetes instances remove PAASTA_CLUSTER, so we need to re-add it ourselves
"PAASTA_CLUSTER": instance_config.get_cluster(),
}
return env | Returns a dictionary of environment variables to simulate what would be available to
a paasta service running in a container | get_local_run_environment_vars | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def assume_aws_role(
instance_config: InstanceConfig,
service: str,
assume_role_arn: str,
assume_pod_identity: bool,
use_okta_role: bool,
aws_account: str,
) -> AWSSessionCreds:
"""Runs AWS cli to assume into the correct role, then extract and return the ENV variables from that session"""
pod_identity = instance_config.get_iam_role()
if assume_role_arn:
pod_identity = assume_role_arn
if assume_pod_identity and not pod_identity:
print(
f"Error: --assume-pod-identity passed but no pod identity was found for this instance ({instance_config.instance})",
file=sys.stderr,
)
sys.exit(1)
if pod_identity and (assume_pod_identity or assume_role_arn):
print(
"Calling aws-okta to assume role {} using account {}".format(
pod_identity, aws_account
)
)
elif use_okta_role:
print(f"Calling aws-okta using account {aws_account}")
elif "AWS_ROLE_ARN" in os.environ and "AWS_WEB_IDENTITY_TOKEN_FILE" in os.environ:
# Get a session using the current pod identity
print(
f"Found Pod Identity token in env. Assuming into role {os.environ['AWS_ROLE_ARN']}."
)
boto_session = boto3.Session()
credentials = boto_session.get_credentials()
assumed_creds_dict: AWSSessionCreds = {
"AWS_ACCESS_KEY_ID": credentials.access_key,
"AWS_SECRET_ACCESS_KEY": credentials.secret_key,
"AWS_SESSION_TOKEN": credentials.token,
"AWS_SECURITY_TOKEN": credentials.token,
}
return assumed_creds_dict
else:
# use_okta_role, assume_pod_identity, and assume_role are all empty, and there's no
# pod identity (web identity token) in the env. This shouldn't happen
print(
"Error: assume_aws_role called without required arguments and no pod identity env",
file=sys.stderr,
)
sys.exit(1)
# local-run will sometimes run as root - make sure that we get the actual
# users AWS credentials instead of looking for non-existent root AWS
# credentials
if os.getuid() == 0:
aws_okta_cmd = [
"sudo",
"-u",
get_username(),
f"HOME=/nail/home/{get_username()}",
"aws-okta",
"-a",
aws_account,
"-o",
"json",
]
else:
aws_okta_cmd = ["aws-okta", "-a", aws_account, "-o", "json"]
cmd = subprocess.run(aws_okta_cmd, stdout=subprocess.PIPE)
if cmd.returncode != 0:
print(
"Error calling aws-okta. Remove --assume-pod-identity to run without pod identity role",
file=sys.stderr,
)
sys.exit(1)
cmd_output = json.loads(cmd.stdout.decode("utf-8"))
if not use_okta_role:
boto_session = boto3.Session(
aws_access_key_id=cmd_output["AccessKeyId"],
aws_secret_access_key=cmd_output["SecretAccessKey"],
aws_session_token=cmd_output["SessionToken"],
)
sts_client = boto_session.client("sts")
assumed_role = sts_client.assume_role(
RoleArn=pod_identity, RoleSessionName=f"{get_username()}-local-run"
)
# The contents of "Credentials" key from assume_role is the same as from aws-okta
cmd_output = assumed_role["Credentials"]
creds_dict: AWSSessionCreds = {
"AWS_ACCESS_KEY_ID": cmd_output["AccessKeyId"],
"AWS_SECRET_ACCESS_KEY": cmd_output["SecretAccessKey"],
"AWS_SESSION_TOKEN": cmd_output["SessionToken"],
"AWS_SECURITY_TOKEN": cmd_output["SessionToken"],
}
return creds_dict | Runs AWS cli to assume into the correct role, then extract and return the ENV variables from that session | assume_aws_role | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def run_docker_container(
docker_client,
service,
instance,
docker_url,
volumes,
interactive,
command,
healthcheck,
healthcheck_only,
user_port,
instance_config,
secret_provider_name,
soa_dir=DEFAULT_SOA_DIR,
dry_run=False,
json_dict=False,
framework=None,
secret_provider_kwargs={},
skip_secrets=False,
assume_pod_identity=False,
assume_role_arn="",
use_okta_role=False,
assume_role_aws_account: Optional[str] = None,
use_service_auth_token: bool = False,
use_sso_service_auth_token: bool = False,
):
"""docker-py has issues running a container with a TTY attached, so for
consistency we execute 'docker run' directly in both interactive and
non-interactive modes.
In non-interactive mode when the run is complete, stop the container and
remove it (with docker-py).
"""
if user_port:
if check_if_port_free(user_port):
chosen_port = user_port
else:
print(
PaastaColors.red(
"The chosen port is already in use!\n"
"Try specifying another one, or omit (--port|-o) and paasta will find a free one for you"
),
file=sys.stderr,
)
sys.exit(1)
else:
chosen_port = pick_random_port(service)
environment = instance_config.get_env()
secret_volumes = {} # type: ignore
if not skip_secrets:
# if secrets_for_owner_team enabled in yelpsoa for service
if is_secrets_for_teams_enabled(service, soa_dir):
try:
kube_client = KubeClient(
config_file=KUBE_CONFIG_USER_PATH, context=instance_config.cluster
)
secret_environment = get_kubernetes_secret_env_variables(
kube_client, environment, service, instance_config.get_namespace()
)
secret_volumes = get_kubernetes_secret_volumes(
kube_client,
instance_config.get_secret_volumes(),
service,
instance_config.get_namespace(),
)
except Exception as e:
print(
f"Failed to retrieve kubernetes secrets with {e.__class__.__name__}: {e}"
)
print(
"If you don't need the secrets for local-run, you can add --skip-secrets"
)
sys.exit(1)
else:
try:
secret_environment = decrypt_secret_environment_variables(
secret_provider_name=secret_provider_name,
environment=environment,
soa_dir=soa_dir,
service_name=instance_config.get_service(),
cluster_name=instance_config.cluster,
secret_provider_kwargs=secret_provider_kwargs,
)
secret_volumes = decrypt_secret_volumes(
secret_provider_name=secret_provider_name,
secret_volumes_config=instance_config.get_secret_volumes(),
soa_dir=soa_dir,
service_name=instance_config.get_service(),
cluster_name=instance_config.cluster,
secret_provider_kwargs=secret_provider_kwargs,
)
except Exception as e:
print(f"Failed to decrypt secrets with {e.__class__.__name__}: {e}")
print(
"If you don't need the secrets for local-run, you can add --skip-secrets"
)
sys.exit(1)
environment.update(secret_environment)
if (
assume_role_arn
or assume_pod_identity
or use_okta_role
or "AWS_WEB_IDENTITY_TOKEN_FILE" in os.environ
):
aws_creds = assume_aws_role(
instance_config,
service,
assume_role_arn,
assume_pod_identity,
use_okta_role,
assume_role_aws_account,
)
environment.update(aws_creds)
if use_service_auth_token:
environment["YELP_SVC_AUTHZ_TOKEN"] = get_service_auth_token()
elif use_sso_service_auth_token:
environment["YELP_SVC_AUTHZ_TOKEN"] = get_sso_auth_token()
local_run_environment = get_local_run_environment_vars(
instance_config=instance_config, port0=chosen_port, framework=framework
)
environment.update(local_run_environment)
net = instance_config.get_net()
memory = instance_config.get_mem()
container_name = get_container_name()
docker_params = instance_config.format_docker_parameters()
healthcheck_mode, healthcheck_data = get_healthcheck_for_instance(
service, instance, instance_config, chosen_port, soa_dir=soa_dir
)
if healthcheck_mode is None:
container_port = None
interactive = True
elif not user_port and not healthcheck and not healthcheck_only:
container_port = None
else:
try:
container_port = instance_config.get_container_port()
except AttributeError:
container_port = None
simulate_healthcheck = (
healthcheck_only or healthcheck
) and healthcheck_mode is not None
for container_mount_path, secret_content in secret_volumes.items():
temp_secret_folder = tempfile.mktemp(dir=os.environ.get("TMPDIR", "/nail/tmp"))
os.makedirs(temp_secret_folder, exist_ok=True)
temp_secret_filename = os.path.join(temp_secret_folder, str(uuid.uuid4()))
# write the secret contents
# Permissions will automatically be set to readable by "users" group
# TODO: Make this readable only by "nobody" user? What about other non-standard users that people sometimes use inside the container?
# -rw-r--r-- 1 dpopes users 3.2K Nov 28 19:16 854bdbad-30b8-4681-ae4e-854cb28075c5
try:
# First try to write the file as a string
# This is for text like config files
with open(temp_secret_filename, "w") as f:
f.write(secret_content)
except TypeError:
# If that fails, try to write it as bytes
# This is for binary files like TLS keys
with open(temp_secret_filename, "wb") as fb:
fb.write(secret_content)
# Append this to the list of volumes passed to docker run
volumes.append(f"{temp_secret_filename}:{container_mount_path}:ro")
docker_run_args = dict(
memory=memory,
chosen_port=chosen_port,
container_port=container_port,
container_name=container_name,
volumes=volumes,
env=environment,
interactive=interactive,
detach=simulate_healthcheck,
docker_hash=docker_url,
command=command,
net=net,
docker_params=docker_params,
)
docker_run_cmd = get_docker_run_cmd(**docker_run_args)
joined_docker_run_cmd = " ".join(docker_run_cmd)
if dry_run:
if json_dict:
print(json.dumps(docker_run_args))
else:
print(json.dumps(docker_run_cmd))
return 0
else:
print("Running docker command:\n%s" % PaastaColors.grey(joined_docker_run_cmd))
merged_env = {**os.environ, **environment}
if interactive or not simulate_healthcheck:
# NOTE: This immediately replaces us with the docker run cmd. Docker
# run knows how to clean up the running container in this situation.
wrapper_path = shutil.which("paasta_docker_wrapper")
# To properly simulate mesos, we pop the PATH, which is not available to
# The executor
merged_env.pop("PATH")
execlpe(wrapper_path, *docker_run_cmd, merged_env)
# For testing, when execlpe is patched out and doesn't replace us, we
# still want to bail out.
return 0
container_started = False
container_id = None
try:
(returncode, output) = _run(docker_run_cmd, env=merged_env)
if returncode != 0:
print(
"Failure trying to start your container!"
"Returncode: %d"
"Output:"
"%s"
""
"Fix that problem and try again."
"http://y/paasta-troubleshooting" % (returncode, output),
sep="\n",
)
# Container failed to start so no need to cleanup; just bail.
sys.exit(1)
container_started = True
container_id = get_container_id(docker_client, container_name)
print("Found our container running with CID %s" % container_id)
if simulate_healthcheck:
healthcheck_result = simulate_healthcheck_on_service(
instance_config=instance_config,
docker_client=docker_client,
container_id=container_id,
healthcheck_mode=healthcheck_mode,
healthcheck_data=healthcheck_data,
healthcheck_enabled=healthcheck,
)
def _output_exit_code():
returncode = docker_client.inspect_container(container_id)["State"][
"ExitCode"
]
print(f"Container exited: {returncode})")
if healthcheck_only:
if container_started:
_output_exit_code()
_cleanup_container(docker_client, container_id)
if healthcheck_mode is None:
print(
"--healthcheck-only, but no healthcheck is defined for this instance!"
)
sys.exit(1)
elif healthcheck_result is True:
sys.exit(0)
else:
sys.exit(1)
running = docker_client.inspect_container(container_id)["State"]["Running"]
if running:
print("Your service is now running! Tailing stdout and stderr:")
for line in docker_client.logs(
container_id,
stderr=True,
stream=True,
):
# writing to sys.stdout.buffer lets us write the raw bytes we
# get from the docker client without having to convert them to
# a utf-8 string
sys.stdout.buffer.write(line)
sys.stdout.flush()
else:
_output_exit_code()
returncode = 3
except KeyboardInterrupt:
returncode = 3
# Cleanup if the container exits on its own or interrupted.
if container_started:
returncode = docker_client.inspect_container(container_id)["State"]["ExitCode"]
_cleanup_container(docker_client, container_id)
return returncode | docker-py has issues running a container with a TTY attached, so for
consistency we execute 'docker run' directly in both interactive and
non-interactive modes.
In non-interactive mode when the run is complete, stop the container and
remove it (with docker-py).
| run_docker_container | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def format_command_for_type(command, instance_type, date):
"""
Given an instance_type, return a function that appropriately formats
the command to be run.
"""
if instance_type == "tron":
interpolated_command = parse_time_variables(command, date)
return interpolated_command
else:
return command |
Given an instance_type, return a function that appropriately formats
the command to be run.
| format_command_for_type | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def configure_and_run_docker_container(
docker_client,
docker_url,
docker_sha,
service,
instance,
cluster,
system_paasta_config,
args,
assume_role_aws_account,
pull_image=False,
dry_run=False,
):
"""
Run Docker container by image hash with args set in command line.
Function prints the output of run command in stdout.
"""
if instance is None and args.healthcheck_only:
print("With --healthcheck-only, --instance MUST be provided!", file=sys.stderr)
return 1
if instance is None and not sys.stdin.isatty():
print(
"--instance and --cluster must be specified when using paasta local-run without a tty!",
file=sys.stderr,
)
return 1
soa_dir = args.yelpsoa_config_root
volumes = args.volumes
load_deployments = (docker_url is None or pull_image) and not docker_sha
interactive = args.interactive
try:
if instance is None:
instance_type = "adhoc"
instance = "interactive"
instance_config = get_default_interactive_config(
service=service,
cluster=cluster,
soa_dir=soa_dir,
load_deployments=load_deployments,
)
interactive = True
else:
instance_type = validate_service_instance(
service, instance, cluster, soa_dir
)
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
)
except NoConfigurationForServiceError as e:
print(str(e), file=sys.stderr)
return 1
except NoDeploymentsAvailable:
print(
PaastaColors.red(
"Error: No deployments.json found in %(soa_dir)s/%(service)s. "
"You can generate this by running: "
"generate_deployments_for_service -d %(soa_dir)s -s %(service)s"
% {"soa_dir": soa_dir, "service": service}
),
sep="\n",
file=sys.stderr,
)
return 1
if docker_sha is not None:
instance_config.branch_dict = {
"git_sha": docker_sha,
"docker_image": build_docker_image_name(service=service, sha=docker_sha),
"desired_state": "start",
"force_bounce": None,
}
if docker_url is None:
try:
docker_url = instance_config.get_docker_url()
except NoDockerImageError:
if instance_config.get_deploy_group() is None:
print(
PaastaColors.red(
f"Error: {service}.{instance} has no 'deploy_group' set. Please set one so "
"the proper image can be used to run for this service."
),
sep="",
file=sys.stderr,
)
else:
print(
PaastaColors.red(
"Error: No sha has been marked for deployment for the %s deploy group.\n"
"Please ensure this service has either run through a jenkins pipeline "
"or paasta mark-for-deployment has been run for %s\n"
% (instance_config.get_deploy_group(), service)
),
sep="",
file=sys.stderr,
)
return 1
if pull_image:
docker_pull_image(docker_url)
for volume in instance_config.get_volumes(
system_paasta_config.get_volumes(),
):
if os.path.exists(volume["hostPath"]):
volumes.append(
"{}:{}:{}".format(
volume["hostPath"], volume["containerPath"], volume["mode"].lower()
)
)
else:
print(
PaastaColors.yellow(
"Warning: Path %s does not exist on this host. Skipping this binding."
% volume["hostPath"]
),
file=sys.stderr,
)
if interactive is True and args.cmd is None:
command = "bash"
elif args.cmd:
command = args.cmd
else:
command_from_config = instance_config.get_cmd()
if command_from_config:
command = format_command_for_type(
command=command_from_config, instance_type=instance_type, date=args.date
)
else:
command = instance_config.get_args()
secret_provider_kwargs = {
"vault_cluster_config": system_paasta_config.get_vault_cluster_config(),
"vault_auth_method": args.vault_auth_method,
"vault_token_file": args.vault_token_file,
}
return run_docker_container(
docker_client=docker_client,
service=service,
instance=instance,
docker_url=docker_url,
volumes=volumes,
interactive=interactive,
command=command,
healthcheck=args.healthcheck,
healthcheck_only=args.healthcheck_only,
user_port=args.user_port,
instance_config=instance_config,
soa_dir=args.yelpsoa_config_root,
dry_run=dry_run,
json_dict=args.dry_run_json_dict,
framework=instance_type,
secret_provider_name=system_paasta_config.get_secret_provider_name(),
secret_provider_kwargs=secret_provider_kwargs,
skip_secrets=args.skip_secrets,
assume_pod_identity=args.assume_pod_identity,
assume_role_arn=args.assume_role_arn,
assume_role_aws_account=assume_role_aws_account,
use_okta_role=args.use_okta_role,
use_service_auth_token=args.use_service_auth_token,
use_sso_service_auth_token=args.use_sso_service_auth_token,
) |
Run Docker container by image hash with args set in command line.
Function prints the output of run command in stdout.
| configure_and_run_docker_container | python | Yelp/paasta | paasta_tools/cli/cmds/local_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py | Apache-2.0 |
def build_component_descriptions(components: Mapping[str, Mapping[str, Any]]) -> str:
"""Returns a colored description string for every log component
based on its help attribute"""
output = []
for k, v in components.items():
output.append(" {}: {}".format(v["color"](k), v["help"]))
return "\n".join(output) | Returns a colored description string for every log component
based on its help attribute | build_component_descriptions | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def check_timestamp_in_range(
timestamp: datetime.datetime,
start_time: datetime.datetime,
end_time: datetime.datetime,
) -> bool:
"""A convenience function to check if a datetime.datetime timestamp is within the given start and end times,
returns true if start_time or end_time is None
:param timestamp: The timestamp to check
:param start_time: The start of the interval
:param end_time: The end of the interval
:return: True if timestamp is within start_time and end_time range, False otherwise
"""
if timestamp is not None and start_time is not None and end_time is not None:
if timestamp.tzinfo is None:
timestamp = pytz.utc.localize(timestamp)
return start_time < timestamp < end_time
else:
return True | A convenience function to check if a datetime.datetime timestamp is within the given start and end times,
returns true if start_time or end_time is None
:param timestamp: The timestamp to check
:param start_time: The start of the interval
:param end_time: The end of the interval
:return: True if timestamp is within start_time and end_time range, False otherwise
| check_timestamp_in_range | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def paasta_log_line_passes_filter(
line: str,
levels: Sequence[str],
service: str,
components: Iterable[str],
clusters: Sequence[str],
instances: List[str],
pods: Iterable[str] = None,
start_time: datetime.datetime = None,
end_time: datetime.datetime = None,
) -> bool:
"""Given a (JSON-formatted) log line, return True if the line should be
displayed given the provided levels, components, and clusters; return False
otherwise.
NOTE: Pods are optional as services that use Mesos do not operate with pods.
"""
try:
parsed_line = json.loads(line)
except ValueError:
log.debug("Trouble parsing line as json. Skipping. Line: %r" % line)
return False
if (
(instances is None or parsed_line.get("instance") in instances)
and (parsed_line.get("level") is None or parsed_line.get("level") in levels)
and parsed_line.get("component") in components
and (
parsed_line.get("cluster") in clusters
or parsed_line.get("cluster") == ANY_CLUSTER
)
):
timestamp = isodate.parse_datetime(parsed_line.get("timestamp"))
if check_timestamp_in_range(timestamp, start_time, end_time):
return True
return False | Given a (JSON-formatted) log line, return True if the line should be
displayed given the provided levels, components, and clusters; return False
otherwise.
NOTE: Pods are optional as services that use Mesos do not operate with pods.
| paasta_log_line_passes_filter | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def extract_utc_timestamp_from_log_line(line: str) -> datetime.datetime:
"""
Extracts the timestamp from a log line of the format "<timestamp> <other data>" and returns a UTC datetime object
or None if it could not parse the line
"""
# Extract ISO 8601 date per http://www.pelagodesign.com/blog/2009/05/20/iso-8601-date-validation-that-doesnt-suck/
iso_re = (
r"^([\+-]?\d{4}(?!\d{2}\b))((-?)((0[1-9]|1[0-2])(\3([12]\d|0[1-9]|3[01]))?|W([0-4]\d|5[0-2])(-?[1-7])?|"
r"(00[1-9]|0[1-9]\d|[12]\d{2}|3([0-5]\d|6[1-6])))([T\s]((([01]\d|2[0-3])((:?)[0-5]\d)?|24\:?00)([\.,]\d+"
r"(?!:))?)?(\17[0-5]\d([\.,]\d+)?)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?)?)? "
)
tokens = re.match(iso_re, line)
if not tokens:
# Could not parse line
return None
timestamp = tokens.group(0).strip()
dt = isodate.parse_datetime(timestamp)
utc_timestamp = datetime_convert_timezone(dt, dt.tzinfo, tz.tzutc())
return utc_timestamp |
Extracts the timestamp from a log line of the format "<timestamp> <other data>" and returns a UTC datetime object
or None if it could not parse the line
| extract_utc_timestamp_from_log_line | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def print_log(
line: str,
requested_levels: Sequence[str],
raw_mode: bool = False,
strip_headers: bool = False,
) -> None:
"""Mostly a stub to ease testing. Eventually this may do some formatting or
something.
"""
if raw_mode:
# suppress trailing newline since scribereader already attached one
print(line, end=" ", flush=True)
else:
print(
prettify_log_line(line, requested_levels, strip_headers),
flush=True,
) | Mostly a stub to ease testing. Eventually this may do some formatting or
something.
| print_log | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def prettify_timestamp(timestamp: datetime.datetime) -> str:
"""Returns more human-friendly form of 'timestamp' without microseconds and
in local time.
"""
dt = isodate.parse_datetime(timestamp)
pretty_timestamp = datetime_from_utc_to_local(dt)
return pretty_timestamp.strftime("%Y-%m-%d %H:%M:%S") | Returns more human-friendly form of 'timestamp' without microseconds and
in local time.
| prettify_timestamp | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def prettify_log_line(
line: str, requested_levels: Sequence[str], strip_headers: bool
) -> str:
"""Given a line from the log, which is expected to be JSON and have all the
things we expect, return a pretty formatted string containing relevant values.
"""
try:
parsed_line = json.loads(line)
except ValueError:
log.debug("Trouble parsing line as json. Skipping. Line: %r" % line)
return "Invalid JSON: %s" % line
try:
if strip_headers:
return "%(timestamp)s %(message)s" % (
{
"timestamp": prettify_timestamp(parsed_line["timestamp"]),
"message": parsed_line["message"],
}
)
else:
return "%(timestamp)s %(component)s - %(message)s" % (
{
"timestamp": prettify_timestamp(parsed_line["timestamp"]),
"component": prettify_component(parsed_line["component"]),
"message": parsed_line["message"],
}
)
except KeyError:
log.debug(
"JSON parsed correctly but was missing a key. Skipping. Line: %r" % line
)
return "JSON missing keys: %s" % line | Given a line from the log, which is expected to be JSON and have all the
things we expect, return a pretty formatted string containing relevant values.
| prettify_log_line | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def register_log_reader(name):
"""Returns a decorator that registers a log reader class at a given name
so get_log_reader_classes can find it."""
def outer(log_reader_class):
_log_reader_classes[name] = log_reader_class
return log_reader_class
return outer | Returns a decorator that registers a log reader class at a given name
so get_log_reader_classes can find it. | register_log_reader | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def run_code_over_scribe_envs(
self,
clusters: Sequence[str],
components: Iterable[str],
callback: Callable[..., None],
) -> None:
"""Iterates over the scribe environments for a given set of clusters and components, executing
functions for each component
:param clusters: The set of clusters
:param components: The set of components
:param callback: The callback function. Gets called with (component_name, stream_info, scribe_env, cluster)
The cluster field will only be set if the component is set to per_cluster
"""
scribe_envs: Set[str] = set()
for cluster in clusters:
scribe_envs.update(self.determine_scribereader_envs(components, cluster))
log.debug("Connect to these scribe envs to tail scribe logs: %s" % scribe_envs)
for scribe_env in scribe_envs:
# These components all get grouped in one call for backwards compatibility
grouped_components = {"build", "deploy", "monitoring"}
if any([component in components for component in grouped_components]):
stream_info = self.get_stream_info("default")
callback(components, stream_info, scribe_env, cluster=None)
non_defaults = set(components) - grouped_components
for component in non_defaults:
stream_info = self.get_stream_info(component)
if stream_info.per_cluster:
for cluster in clusters:
callback([component], stream_info, scribe_env, cluster=cluster)
else:
callback([component], stream_info, scribe_env, cluster=None) | Iterates over the scribe environments for a given set of clusters and components, executing
functions for each component
:param clusters: The set of clusters
:param components: The set of components
:param callback: The callback function. Gets called with (component_name, stream_info, scribe_env, cluster)
The cluster field will only be set if the component is set to per_cluster
| run_code_over_scribe_envs | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def tail_logs(
self,
service: str,
levels: Sequence[str],
components: Iterable[str],
clusters: Sequence[str],
instances: List[str],
pods: Iterable[str] = None,
raw_mode: bool = False,
strip_headers: bool = False,
) -> None:
"""Sergeant function for spawning off all the right log tailing functions.
NOTE: This function spawns concurrent processes and doesn't necessarily
worry about cleaning them up! That's because we expect to just exit the
main process when this function returns (as main() does). Someone calling
this function directly with something like "while True: tail_paasta_logs()"
may be very sad.
NOTE: We try pretty hard to suppress KeyboardInterrupts to prevent big
useless stack traces, but it turns out to be non-trivial and we fail ~10%
of the time. We decided we could live with it and we're shipping this to
see how it fares in real world testing.
Here are some things we read about this problem:
* http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
* http://jtushman.github.io/blog/2014/01/14/python-%7C-multiprocessing-and-interrupts/
* http://bryceboe.com/2010/08/26/python-multiprocessing-and-keyboardinterrupt/
We could also try harder to terminate processes from more places. We could
use process.join() to ensure things have a chance to die. We punted these
things.
It's possible this whole multiprocessing strategy is wrong-headed. If you
are reading this code to curse whoever wrote it, see discussion in
PAASTA-214 and https://reviewboard.yelpcorp.com/r/87320/ and feel free to
implement one of the other options.
"""
queue: Queue = Queue()
spawned_processes = []
def callback(
components: Iterable[str],
stream_info: ScribeComponentStreamInfo,
scribe_env: str,
cluster: str,
) -> None:
kw = {
"scribe_env": scribe_env,
"service": service,
"levels": levels,
"components": components,
"clusters": clusters,
"instances": instances,
"pods": pods,
"queue": queue,
"filter_fn": stream_info.filter_fn,
}
if stream_info.per_cluster:
kw["stream_name"] = stream_info.stream_name_fn(service, cluster)
kw["clusters"] = [cluster]
else:
kw["stream_name"] = stream_info.stream_name_fn(service)
log.debug(
"Running the equivalent of 'scribereader {} {} {}'".format(
self.get_scribereader_selector(scribe_env),
scribe_env,
kw["stream_name"],
)
)
process = Process(target=self.scribe_tail, kwargs=kw)
spawned_processes.append(process)
process.start()
self.run_code_over_scribe_envs(
clusters=clusters, components=components, callback=callback
)
# Pull things off the queue and output them. If any thread dies we are no
# longer presenting the user with the full picture so we quit.
#
# This is convenient for testing, where a fake scribe_tail() can emit a
# fake log and exit. Without the thread aliveness check, we would just sit
# here forever even though the threads doing the tailing are all gone.
#
# NOTE: A noisy tailer in one scribe_env (such that the queue never gets
# empty) will prevent us from ever noticing that another tailer has died.
while True:
try:
# This is a blocking call with a timeout for a couple reasons:
#
# * If the queue is empty and we get_nowait(), we loop very tightly
# and accomplish nothing.
#
# * Testing revealed a race condition where print_log() is called
# and even prints its message, but this action isn't recorded on
# the patched-in print_log(). This resulted in test flakes. A short
# timeout seems to soothe this behavior: running this test 10 times
# with a timeout of 0.0 resulted in 2 failures; running it with a
# timeout of 0.1 resulted in 0 failures.
#
# * There's a race where thread1 emits its log line and exits
# before thread2 has a chance to do anything, causing us to bail
# out via the Queue Empty and thread aliveness check.
#
# We've decided to live with this for now and see if it's really a
# problem. The threads in test code exit pretty much immediately
# and a short timeout has been enough to ensure correct behavior
# there, so IRL with longer start-up times for each thread this
# will surely be fine.
#
# UPDATE: Actually this is leading to a test failure rate of about
# 1/10 even with timeout of 1s. I'm adding a sleep to the threads
# in test code to smooth this out, then pulling the trigger on
# moving that test to integration land where it belongs.
line = queue.get(block=True, timeout=0.1)
print_log(line, levels, raw_mode, strip_headers)
except Empty:
try:
# If there's nothing in the queue, take this opportunity to make
# sure all the tailers are still running.
running_processes = [tt.is_alive() for tt in spawned_processes]
if not running_processes or not all(running_processes):
log.warning(
"Quitting because I expected %d log tailers to be alive but only %d are alive."
% (len(spawned_processes), running_processes.count(True))
)
for process in spawned_processes:
if process.is_alive():
process.terminate()
break
except KeyboardInterrupt:
# Die peacefully rather than printing N threads worth of stack
# traces.
#
# This extra nested catch is because it's pretty easy to be in
# the above try block when the user hits Ctrl-C which otherwise
# dumps a stack trace.
log.warning("Terminating.")
break
except KeyboardInterrupt:
# Die peacefully rather than printing N threads worth of stack
# traces.
log.warning("Terminating.")
break | Sergeant function for spawning off all the right log tailing functions.
NOTE: This function spawns concurrent processes and doesn't necessarily
worry about cleaning them up! That's because we expect to just exit the
main process when this function returns (as main() does). Someone calling
this function directly with something like "while True: tail_paasta_logs()"
may be very sad.
NOTE: We try pretty hard to suppress KeyboardInterrupts to prevent big
useless stack traces, but it turns out to be non-trivial and we fail ~10%
of the time. We decided we could live with it and we're shipping this to
see how it fares in real world testing.
Here are some things we read about this problem:
* http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
* http://jtushman.github.io/blog/2014/01/14/python-%7C-multiprocessing-and-interrupts/
* http://bryceboe.com/2010/08/26/python-multiprocessing-and-keyboardinterrupt/
We could also try harder to terminate processes from more places. We could
use process.join() to ensure things have a chance to die. We punted these
things.
It's possible this whole multiprocessing strategy is wrong-headed. If you
are reading this code to curse whoever wrote it, see discussion in
PAASTA-214 and https://reviewboard.yelpcorp.com/r/87320/ and feel free to
implement one of the other options.
| tail_logs | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def scribe_tail(
self,
scribe_env: str,
stream_name: str,
service: str,
levels: Sequence[str],
components: Iterable[str],
clusters: Sequence[str],
instances: List[str],
pods: Iterable[str],
queue: Queue,
filter_fn: Callable,
parse_fn: Callable = None,
) -> None:
"""Creates a scribetailer for a particular environment.
When it encounters a line that it should report, it sticks it into the
provided queue.
This code is designed to run in a thread as spawned by tail_paasta_logs().
"""
try:
log.debug(f"Going to tail {stream_name} scribe stream in {scribe_env}")
host, port = scribereader.get_tail_host_and_port(
**scribe_env_to_locations(scribe_env),
)
tailer = scribereader.get_stream_tailer(stream_name, host, port)
for line in tailer:
if parse_fn:
line = parse_fn(line, clusters, service)
if filter_fn(
line, levels, service, components, clusters, instances, pods
):
queue.put(line)
except KeyboardInterrupt:
# Die peacefully rather than printing N threads worth of stack
# traces.
pass
except StreamTailerSetupError as e:
if "No data in stream" in str(e):
log.warning(f"Scribe stream {stream_name} is empty on {scribe_env}")
log.warning(
"Don't Panic! This may or may not be a problem depending on if you expect there to be"
)
log.warning("output within this stream.")
# Enter a wait so the process isn't considered dead.
# This is just a large number, since apparently some python interpreters
# don't like being passed sys.maxsize.
sleep(2**16)
else:
raise | Creates a scribetailer for a particular environment.
When it encounters a line that it should report, it sticks it into the
provided queue.
This code is designed to run in a thread as spawned by tail_paasta_logs().
| scribe_tail | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def determine_scribereader_envs(
self, components: Iterable[str], cluster: str
) -> Set[str]:
"""Returns a list of environments that scribereader needs to connect
to based on a given list of components and the cluster involved.
Some components are in certain environments, regardless of the cluster.
Some clusters do not match up with the scribe environment names, so
we figure that out here"""
envs: List[str] = []
for component in components:
# If a component has a 'source_env', we use that
# otherwise we lookup what scribe env is associated with a given cluster
env = LOG_COMPONENTS[component].get(
"source_env", self.cluster_to_scribe_env(cluster)
)
if "additional_source_envs" in LOG_COMPONENTS[component]:
envs += LOG_COMPONENTS[component]["additional_source_envs"]
envs.append(env)
return set(envs) | Returns a list of environments that scribereader needs to connect
to based on a given list of components and the cluster involved.
Some components are in certain environments, regardless of the cluster.
Some clusters do not match up with the scribe environment names, so
we figure that out here | determine_scribereader_envs | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def cluster_to_scribe_env(self, cluster: str) -> str:
"""Looks up the particular scribe env associated with a given paasta cluster.
Scribe has its own "environment" key, which doesn't always map 1:1 with our
cluster names, so we have to maintain a manual mapping.
This mapping is deployed as a config file via puppet as part of the public
config deployed to every server.
"""
env = self.cluster_map.get(cluster, None)
if env is None:
print("I don't know where scribe logs for %s live?" % cluster)
sys.exit(1)
else:
return env | Looks up the particular scribe env associated with a given paasta cluster.
Scribe has its own "environment" key, which doesn't always map 1:1 with our
cluster names, so we have to maintain a manual mapping.
This mapping is deployed as a config file via puppet as part of the public
config deployed to every server.
| cluster_to_scribe_env | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def scribe_env_to_locations(scribe_env) -> Mapping[str, Any]:
"""Converts a scribe environment to a dictionary of locations. The
return value is meant to be used as kwargs for `scribereader.get_tail_host_and_port`.
"""
locations = {"ecosystem": None, "region": None, "superregion": None}
if scribe_env in scribereader.PROD_REGIONS:
locations["region"] = scribe_env
elif scribe_env in scribereader.PROD_SUPERREGIONS:
locations["superregion"] = scribe_env
else: # non-prod envs are expressed as ecosystems
locations["ecosystem"] = scribe_env
return locations | Converts a scribe environment to a dictionary of locations. The
return value is meant to be used as kwargs for `scribereader.get_tail_host_and_port`.
| scribe_env_to_locations | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def generate_start_end_time(
from_string: str = "30m", to_string: str = None
) -> Tuple[datetime.datetime, datetime.datetime]:
"""Parses the --from and --to command line arguments to create python
datetime objects representing the start and end times for log retrieval
:param from_string: The --from argument, defaults to 30 minutes
:param to_string: The --to argument, defaults to the time right now
:return: A tuple containing start_time, end_time, which specify the interval of log retrieval
"""
if to_string is None:
end_time = datetime.datetime.utcnow()
else:
# Try parsing as a a natural time duration first, if that fails move on to
# parsing as an ISO-8601 timestamp
to_duration = timeparse(to_string)
if to_duration is not None:
end_time = datetime.datetime.utcnow() - datetime.timedelta(
seconds=to_duration
)
else:
end_time = isodate.parse_datetime(to_string)
if not end_time:
raise ValueError(
"--to argument not in ISO8601 format and not a valid pytimeparse duration"
)
from_duration = timeparse(from_string)
if from_duration is not None:
start_time = datetime.datetime.utcnow() - datetime.timedelta(
seconds=from_duration
)
else:
start_time = isodate.parse_datetime(from_string)
if not start_time:
raise ValueError(
"--from argument not in ISO8601 format and not a valid pytimeparse duration"
)
# Covert the timestamps to something timezone aware
start_time = pytz.utc.localize(start_time)
end_time = pytz.utc.localize(end_time)
if start_time > end_time:
raise ValueError("Start time bigger than end time")
return start_time, end_time | Parses the --from and --to command line arguments to create python
datetime objects representing the start and end times for log retrieval
:param from_string: The --from argument, defaults to 30 minutes
:param to_string: The --to argument, defaults to the time right now
:return: A tuple containing start_time, end_time, which specify the interval of log retrieval
| generate_start_end_time | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def paasta_logs(args: argparse.Namespace) -> int:
"""Print the logs for as Paasta service.
:param args: argparse.Namespace obj created from sys.args by cli"""
soa_dir = args.soa_dir
service = figure_out_service_name(args, soa_dir)
clusters = args.cluster
if (
args.cluster is None
or args.instance is None
or len(args.instance.split(",")) > 2
):
print(
PaastaColors.red("You must specify one cluster and one instance."),
file=sys.stderr,
)
return 1
if verify_instances(args.instance, service, clusters, soa_dir):
return 1
instance = args.instance
if args.pods is None:
pods = None
else:
pods = args.pods.split(",")
components = args.components
if "app_output" in args.components:
components.remove("app_output")
components.add("stdout")
components.add("stderr")
if args.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
levels = [DEFAULT_LOGLEVEL, "debug"]
log.debug(f"Going to get logs for {service} on cluster {clusters}")
log_reader = get_log_reader(components)
if not validate_filtering_args(args, log_reader):
return 1
# They haven't specified what kind of filtering they want, decide for them
if args.line_count is None and args.time_from is None and not args.tail:
return pick_default_log_mode(
args, log_reader, service, levels, components, clusters, instance, pods
)
if args.tail:
print(
PaastaColors.cyan("Tailing logs and applying filters..."), file=sys.stderr
)
log_reader.tail_logs(
service=service,
levels=levels,
components=components,
clusters=clusters,
instances=[instance],
pods=pods,
raw_mode=args.raw_mode,
strip_headers=args.strip_headers,
)
return 0
# If the logger doesn't support offsetting the number of lines by a particular line number
# there is no point in distinguishing between a positive/negative number of lines since it
# can only get the last N lines
if not log_reader.SUPPORTS_LINE_OFFSET and args.line_count is not None:
args.line_count = abs(args.line_count)
# Handle line based filtering
if args.line_count is not None and args.line_offset is None:
log_reader.print_last_n_logs(
service=service,
line_count=args.line_count,
levels=levels,
components=components,
clusters=clusters,
instances=[instance],
pods=pods,
raw_mode=args.raw_mode,
strip_headers=args.strip_headers,
)
return 0
elif args.line_count is not None and args.line_offset is not None:
log_reader.print_logs_by_offset(
service=service,
line_count=args.line_count,
line_offset=args.line_offset,
levels=levels,
components=components,
clusters=clusters,
instances=[instance],
pods=pods,
raw_mode=args.raw_mode,
strip_headers=args.strip_headers,
)
return 0
# Handle time based filtering
try:
start_time, end_time = generate_start_end_time(args.time_from, args.time_to)
except ValueError as e:
print(PaastaColors.red(str(e)), file=sys.stderr)
return 1
log_reader.print_logs_by_time(
service=service,
start_time=start_time,
end_time=end_time,
levels=levels,
components=components,
clusters=clusters,
instances=[instance],
pods=pods,
raw_mode=args.raw_mode,
strip_headers=args.strip_headers,
)
return 0 | Print the logs for as Paasta service.
:param args: argparse.Namespace obj created from sys.args by cli | paasta_logs | python | Yelp/paasta | paasta_tools/cli/cmds/logs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py | Apache-2.0 |
def can_run_metric_watcher_threads(
service: str,
soa_dir: str,
) -> bool:
"""
Cannot run slo and metric watcher threads together for now.
SLO Watcher Threads take precedence over metric watcher threads.
Metric Watcher Threads can run if there are no SLOs available.
"""
slo_files = get_files_of_type_in_dir(
file_type="slo", service=service, soa_dir=soa_dir
)
rollback_files = get_files_of_type_in_dir(
file_type="rollback", service=service, soa_dir=soa_dir
)
return bool(not slo_files and rollback_files) |
Cannot run slo and metric watcher threads together for now.
SLO Watcher Threads take precedence over metric watcher threads.
Metric Watcher Threads can run if there are no SLOs available.
| can_run_metric_watcher_threads | python | Yelp/paasta | paasta_tools/cli/cmds/mark_for_deployment.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/mark_for_deployment.py | Apache-2.0 |
def get_slack_channel(self) -> str:
"""Safely get some slack channel to post to. Defaults to ``DEFAULT_SLACK_CHANNEL``.
Currently only uses the first slack channel available, and doesn't support
multi-channel notifications."""
if self.deploy_info.get("slack_notify", True):
try:
channel = self.deploy_info.get("slack_channels")[0]
# Nightly jenkins builds will often re-deploy master. This causes Slack noise that wasn't present before
# the auto-rollbacks work.
if self.deployment_version == self.old_deployment_version:
print(
f"Rollback image matches rollforward image: {self.deployment_version}, "
f"Sending slack notifications to {DEFAULT_SLACK_CHANNEL} instead of {channel}."
)
return DEFAULT_SLACK_CHANNEL
else:
return channel
except (IndexError, AttributeError, TypeError):
return DEFAULT_SLACK_CHANNEL
else:
return DEFAULT_SLACK_CHANNEL | Safely get some slack channel to post to. Defaults to ``DEFAULT_SLACK_CHANNEL``.
Currently only uses the first slack channel available, and doesn't support
multi-channel notifications. | get_slack_channel | python | Yelp/paasta | paasta_tools/cli/cmds/mark_for_deployment.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/mark_for_deployment.py | Apache-2.0 |
def paasta_pause_service_autoscaler(args):
"""With a given cluster and duration, pauses the paasta service autoscaler
in that cluster for duration minutes"""
if args.duration > MAX_PAUSE_DURATION:
if not args.force:
print(
"Specified duration: {d} longer than max: {m}".format(
d=args.duration, m=MAX_PAUSE_DURATION
)
)
print("If you are really sure, run again with --force")
return 3
if args.info:
return_code = get_service_autoscale_pause_time(args.cluster)
elif args.resume:
return_code = delete_service_autoscale_pause_time(args.cluster)
_log_audit(action="resume-service-autoscaler", cluster=args.cluster)
else:
minutes = args.duration
return_code = update_service_autoscale_pause_time(args.cluster, minutes)
_log_audit(
action="pause-service-autoscaler",
action_details={"duration": minutes},
cluster=args.cluster,
)
return return_code | With a given cluster and duration, pauses the paasta service autoscaler
in that cluster for duration minutes | paasta_pause_service_autoscaler | python | Yelp/paasta | paasta_tools/cli/cmds/pause_service_autoscaler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/pause_service_autoscaler.py | Apache-2.0 |
def is_docker_image_already_in_registry(service: str, soa_dir: str, sha: str, image_version: Optional[str] = None) -> bool: # type: ignore
"""Verifies that docker image exists in the paasta registry.
:param service: name of the service
:param sha: git sha
:returns: True, False or raises requests.exceptions.RequestException
"""
registry_uri = get_service_docker_registry(service, soa_dir)
repository, tag = build_docker_image_name(service, sha, image_version).split(":", 1)
creds = read_docker_registry_creds(registry_uri)
uri = f"{registry_uri}/v2/{repository}/manifests/{tag}"
with requests.Session() as s:
try:
url = "https://" + uri
r = (
s.head(url, timeout=30)
if creds[0] is None
else s.head(url, auth=creds, timeout=30)
)
except SSLError:
# If no auth creds, fallback to trying http
if creds[0] is not None:
raise
url = "http://" + uri
r = s.head(url, timeout=30)
if r.status_code == 200:
return True
elif r.status_code == 404:
return False # No Such Repository Error
r.raise_for_status() | Verifies that docker image exists in the paasta registry.
:param service: name of the service
:param sha: git sha
:returns: True, False or raises requests.exceptions.RequestException
| is_docker_image_already_in_registry | python | Yelp/paasta | paasta_tools/cli/cmds/push_to_registry.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/push_to_registry.py | Apache-2.0 |
def get_versions_for_service(
service: str, deploy_groups: Collection[str], soa_dir: str
) -> Mapping[DeploymentVersion, Tuple[str, str]]:
"""Returns a dictionary of 2-tuples of the form (timestamp, deploy_group) for each version tuple of (deploy sha, image_version)"""
if service is None:
return {}
git_url = get_git_url(service=service, soa_dir=soa_dir)
all_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir)
deploy_groups, _ = validate_given_deploy_groups(all_deploy_groups, deploy_groups)
previously_deployed_versions: Dict[DeploymentVersion, Tuple[str, str]] = {}
for ref, sha in list_remote_refs(git_url).items():
regex_match = extract_tags(ref)
try:
deploy_group = regex_match["deploy_group"]
tstamp = regex_match["tstamp"]
image_version = regex_match["image_version"]
except KeyError:
pass
else:
# Now we filter and dedup by picking the most recent sha for a deploy group
# Note that all strings are greater than ''
if deploy_group in deploy_groups:
version = DeploymentVersion(sha=sha, image_version=image_version)
tstamp_so_far = previously_deployed_versions.get(version, ("all", ""))[
1
]
if tstamp > tstamp_so_far:
previously_deployed_versions[version] = (tstamp, deploy_group)
return previously_deployed_versions | Returns a dictionary of 2-tuples of the form (timestamp, deploy_group) for each version tuple of (deploy sha, image_version) | get_versions_for_service | python | Yelp/paasta | paasta_tools/cli/cmds/rollback.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/rollback.py | Apache-2.0 |
def paasta_rollback(args: argparse.Namespace) -> int:
"""Call mark_for_deployment with rollback parameters
:param args: contains all the arguments passed onto the script: service,
deploy groups and sha. These arguments will be verified and passed onto
mark_for_deployment.
"""
soa_dir = args.soa_dir
service = figure_out_service_name(args, soa_dir)
deploy_info = get_deploy_info(service=service, soa_dir=args.soa_dir)
if not can_user_deploy_service(deploy_info, service):
return 1
git_url = get_git_url(service, soa_dir)
if args.all_deploy_groups:
given_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir)
else:
given_deploy_groups = {
deploy_group
for deploy_group in args.deploy_groups.split(",")
if deploy_group
}
all_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir)
deploy_groups, invalid = validate_given_deploy_groups(
all_deploy_groups, given_deploy_groups
)
if len(invalid) > 0:
print(
PaastaColors.yellow(
"These deploy groups are not valid and will be skipped: %s.\n"
% (",").join(invalid)
)
)
if len(deploy_groups) == 0 and not args.all_deploy_groups:
print(
PaastaColors.red(
"ERROR: No valid deploy groups specified for %s.\n Use the flag -a to rollback all valid deploy groups for this service"
% (service)
)
)
return 1
versions = get_versions_for_service(service, deploy_groups, soa_dir)
commit = args.commit
image_version = args.image_version
new_version = DeploymentVersion(sha=commit, image_version=image_version)
if not commit:
print("Please specify a commit to mark for rollback (-k, --commit).")
list_previous_versions(
service, deploy_groups, bool(given_deploy_groups), versions
)
return 1
elif new_version not in versions and not args.force:
print(
PaastaColors.red(
f"This version {new_version} has never been deployed before."
)
)
print("Please double check it or use --force to skip this verification.\n")
list_previous_versions(
service, deploy_groups, bool(given_deploy_groups), versions
)
return 1
try:
validate_full_git_sha(args.commit)
except argparse.ArgumentTypeError as e:
print(PaastaColors.red(f"Error: {e}"))
return 1
# TODO: Add similar check for when image_version is empty and no-commit redeploys is enforced for requested deploy_group
returncode = 0
for deploy_group in deploy_groups:
rolled_back_from = get_currently_deployed_version(service, deploy_group)
returncode |= mark_for_deployment(
git_url=git_url,
service=service,
deploy_group=deploy_group,
commit=commit,
image_version=image_version,
)
# we could also gate this by the return code from m-f-d, but we probably care more about someone wanting to
# rollback than we care about if the underlying machinery was successfully able to complete the request
if rolled_back_from != new_version:
audit_action_details = {
"rolled_back_from": str(rolled_back_from),
"rolled_back_to": str(new_version),
"rollback_type": RollbackTypes.USER_INITIATED_ROLLBACK.value,
"deploy_group": deploy_group,
}
_log_audit(
action="rollback", action_details=audit_action_details, service=service
)
if returncode == 0:
print(
PaastaColors.yellow(
f"WARNING: You MUST manually revert changes in Git! Use 'git revert {rolled_back_from.sha}', and go through the normal push process. "
)
)
print(
PaastaColors.yellow(
f"WARNING: Failing to do so means that Jenkins will redeploy the latest code on the next scheduled build!"
)
)
return returncode | Call mark_for_deployment with rollback parameters
:param args: contains all the arguments passed onto the script: service,
deploy groups and sha. These arguments will be verified and passed onto
mark_for_deployment.
| paasta_rollback | python | Yelp/paasta | paasta_tools/cli/cmds/rollback.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/rollback.py | Apache-2.0 |
def _add_and_update_args(parser: argparse.ArgumentParser):
"""common args for `add` and `update`."""
parser.add_argument(
"-p",
"--plain-text",
required=False,
type=str,
help="Optionally specify the secret as a command line argument",
)
parser.add_argument(
"-i",
"--stdin",
required=False,
action="store_true",
default=False,
help="Optionally pass the plaintext from stdin",
)
parser.add_argument(
"--cross-env-motivation",
required=False,
type=str,
help=(
"Provide motivation in case the same value is being duplicated "
"across multiple runtime environments when adding or updating a secret"
),
metavar="MOTIVATION",
)
parser.add_argument(
"-n",
"--secret-name",
type=check_secret_name,
required=True,
help="The name of the secret to create/update, "
"this is the name you will reference in your "
"services yaml files and should "
"be unique per service.",
)
parser.add_argument( # type: ignore
"-c",
"--clusters",
help="A comma-separated list of clusters to create secrets for. "
"Note: this is translated to ecosystems because Vault is run "
"at an ecosystem level. As a result you can only have different "
"secrets per ecosystem. (it is not possible for example to encrypt "
"a different value for pnw-prod vs nova-prod. "
"Defaults to all clusters in which the service runs. "
"For example: --clusters pnw-prod,nova-prod ",
).completer = lazy_choices_completer(list_clusters) | common args for `add` and `update`. | _add_and_update_args | python | Yelp/paasta | paasta_tools/cli/cmds/secret.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/secret.py | Apache-2.0 |
def get_docker_image(
args: argparse.Namespace, instance_config: InstanceConfig
) -> Optional[str]:
"""
Since the Docker image digest used to launch the Spark cluster is obtained by inspecting local
Docker images, we need to ensure that the Docker image exists locally or is pulled in all scenarios.
"""
# docker image is built locally then pushed
if args.build:
return build_and_push_docker_image(args)
docker_url = ""
if args.image:
docker_url = args.image
else:
try:
docker_url = instance_config.get_docker_url()
except NoDockerImageError:
print(
PaastaColors.red(
"Error: No sha has been marked for deployment for the %s deploy group.\n"
"Please ensure this service has either run through a jenkins pipeline "
"or paasta mark-for-deployment has been run for %s\n"
% (instance_config.get_deploy_group(), args.service)
),
sep="",
file=sys.stderr,
)
return None
print(
"Please wait while the image (%s) is pulled (times out after 5m)..."
% docker_url,
file=sys.stderr,
)
# Need sudo for credentials when pulling images from paasta docker registry (docker-paasta.yelpcorp.com)
# However, in CI env, we can't connect to docker via root and we can pull with user `jenkins`
is_ci_env = "CI" in os.environ
cmd_prefix = "" if is_ci_env else "sudo -H "
retcode, _ = _run(f"{cmd_prefix}docker pull {docker_url}", stream=True, timeout=300)
if retcode != 0:
print(
"\nPull failed. Are you authorized to run docker commands?",
file=sys.stderr,
)
return None
return docker_url |
Since the Docker image digest used to launch the Spark cluster is obtained by inspecting local
Docker images, we need to ensure that the Docker image exists locally or is pulled in all scenarios.
| get_docker_image | python | Yelp/paasta | paasta_tools/cli/cmds/spark_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py | Apache-2.0 |
def get_spark_env(
args: argparse.Namespace,
spark_conf_str: str,
aws_creds: Tuple[Optional[str], Optional[str], Optional[str]],
ui_port: str,
system_paasta_config: SystemPaastaConfig,
) -> Dict[str, str]:
"""Create the env config dict to configure on the docker container"""
spark_env = {}
access_key, secret_key, session_token = aws_creds
if access_key:
spark_env["AWS_ACCESS_KEY_ID"] = access_key
spark_env["AWS_SECRET_ACCESS_KEY"] = secret_key
if session_token is not None:
spark_env["AWS_SESSION_TOKEN"] = session_token
spark_env["AWS_DEFAULT_REGION"] = args.aws_region
spark_env["PAASTA_LAUNCHED_BY"] = get_possible_launched_by_user_variable_from_env()
spark_env["PAASTA_INSTANCE_TYPE"] = "spark"
# Run spark (and mesos framework) as root.
spark_env["SPARK_USER"] = "root"
spark_env["SPARK_OPTS"] = spark_conf_str
# Default configs to start the jupyter notebook server
if args.cmd == "jupyter-lab":
spark_env["JUPYTER_RUNTIME_DIR"] = "/source/.jupyter"
spark_env["JUPYTER_DATA_DIR"] = "/source/.jupyter"
spark_env["JUPYTER_CONFIG_DIR"] = "/source/.jupyter"
elif args.cmd == "history-server":
dirs = args.work_dir.split(":")
spark_env["SPARK_LOG_DIR"] = dirs[1]
if not args.spark_args or not args.spark_args.startswith(
"spark.history.fs.logDirectory"
):
print(
"history-server requires spark.history.fs.logDirectory in spark-args",
file=sys.stderr,
)
sys.exit(1)
spark_env["SPARK_HISTORY_OPTS"] = (
f"-D{args.spark_args} " f"-Dspark.history.ui.port={ui_port}"
)
spark_env["SPARK_DAEMON_CLASSPATH"] = "/opt/spark/extra_jars/*"
spark_env["SPARK_NO_DAEMONIZE"] = "true"
if args.get_eks_token_via_iam_user:
with open(SPARK_DRIVER_IAM_USER) as f:
config = ConfigParser()
config.read_file(f)
# these env variables are consumed by a script specified in the spark kubeconfig - and which will result in a tightly-scoped IAM identity being used for EKS cluster access
spark_env["GET_EKS_TOKEN_AWS_ACCESS_KEY_ID"] = config["default"][
"aws_access_key_id"
]
spark_env["GET_EKS_TOKEN_AWS_SECRET_ACCESS_KEY"] = config["default"][
"aws_secret_access_key"
]
spark_env["KUBECONFIG"] = system_paasta_config.get_spark_iam_user_kubeconfig()
else:
spark_env["KUBECONFIG"] = system_paasta_config.get_spark_kubeconfig()
return spark_env | Create the env config dict to configure on the docker container | get_spark_env | python | Yelp/paasta | paasta_tools/cli/cmds/spark_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py | Apache-2.0 |
def _calculate_docker_shared_memory_size(shm_size: Optional[str]) -> str:
"""In Order of preference:
1. Argument: --docker-shm-size
3. Default
"""
if shm_size:
return shm_size
return DEFAULT_DOCKER_SHM_SIZE | In Order of preference:
1. Argument: --docker-shm-size
3. Default
| _calculate_docker_shared_memory_size | python | Yelp/paasta | paasta_tools/cli/cmds/spark_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py | Apache-2.0 |
def build_and_push_docker_image(args: argparse.Namespace) -> Optional[str]:
"""
Build an image if the default Spark service image is not preferred.
The image needs to be pushed to a registry for the Spark executors
to pull.
"""
if not makefile_responds_to("cook-image"):
print(
"A local Makefile with a 'cook-image' target is required for --build",
file=sys.stderr,
)
return None
default_tag = "{}-{}".format(DEFAULT_SPARK_DOCKER_IMAGE_PREFIX, get_username())
docker_tag = os.environ.get("DOCKER_TAG", default_tag)
os.environ["DOCKER_TAG"] = docker_tag
cook_return = paasta_cook_image(
args=None, service=args.service, soa_dir=args.yelpsoa_config_root
)
if cook_return != 0:
return None
registry_uri = args.docker_registry or _get_adhoc_docker_registry(
service=args.service,
soa_dir=args.yelpsoa_config_root,
)
docker_url = f"{registry_uri}/{docker_tag}"
command = f"docker tag {docker_tag} {docker_url}"
print(PaastaColors.grey(command))
retcode, _ = _run(command, stream=True)
if retcode != 0:
return None
if registry_uri != DEFAULT_SPARK_DOCKER_REGISTRY:
command = "sudo -H docker push %s" % docker_url
else:
command = "docker push %s" % docker_url
print(PaastaColors.grey(command))
retcode, output = _run(command, stream=False)
if retcode != 0:
return None
# With unprivileged docker, the digest on the remote registry may not match the digest
# in the local environment. Because of this, we have to parse the digest message from the
# server response and use downstream when launching spark executors
# Output from `docker push` with unprivileged docker looks like
# Using default tag: latest
# The push refers to repository [docker-dev.yelpcorp.com/paasta-spark-run-dpopes:latest]
# latest: digest: sha256:0a43aa65174a400bd280d48d460b73eb49b0ded4072c9e173f919543bf693557
# With privileged docker, the last line has an extra "size: 123"
# latest: digest: sha256:0a43aa65174a400bd280d48d460b73eb49b0ded4072c9e173f919543bf693557 size: 52
digest_line = output.split("\n")[-1]
digest_match = re.match(r"[^:]*: [^:]*: (?P<digest>[^\s]*)", digest_line)
if not digest_match:
raise ValueError(f"Could not determine digest from output: {output}")
digest = digest_match.group("digest")
image_url = f"{docker_url}@{digest}"
# If the local digest doesn't match the remote digest AND the registry is
# non-default (which requires requires authentication, and consequently sudo),
# downstream `docker run` commands will fail trying to authenticate.
# To work around this, we can proactively `sudo docker pull` here so that
# the image exists locally and can be `docker run` without sudo
if registry_uri != DEFAULT_SPARK_DOCKER_REGISTRY:
command = f"sudo -H docker pull {image_url}"
print(PaastaColors.grey(command))
retcode, output = _run(command, stream=False)
if retcode != 0:
raise NoDockerImageError(f"Could not pull {image_url}: {output}")
return image_url |
Build an image if the default Spark service image is not preferred.
The image needs to be pushed to a registry for the Spark executors
to pull.
| build_and_push_docker_image | python | Yelp/paasta | paasta_tools/cli/cmds/spark_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py | Apache-2.0 |
def update_args_from_tronfig(args: argparse.Namespace) -> Optional[Dict[str, str]]:
"""
Load and check the following config fields from the provided Tronfig.
- executor
- pool
- iam_role
- iam_role_provider
- force_spark_resource_configs
- max_runtime
- command
- env
- spark_args
Returns: environment variables dictionary or None if failed.
"""
action_dict = parse_tronfig(args.tronfig, args.job_id)
if action_dict is None:
print(
PaastaColors.red(f"Unable to get configs from job-id: {args.job_id}"),
file=sys.stderr,
)
return None
# executor === spark
if action_dict.get("executor", "") != "spark":
print(
PaastaColors.red("Invalid Tronfig: executor should be 'spark'"),
file=sys.stderr,
)
return None
# iam_role / aws_profile
if (
"iam_role" in action_dict
and action_dict.get("iam_role_provider", "aws") != "aws"
):
print(
PaastaColors.red("Invalid Tronfig: iam_role_provider should be 'aws'"),
file=sys.stderr,
)
return None
# Other args: map Tronfig YAML fields to spark-run CLI args
fields_to_args = {
"pool": "pool",
"iam_role": "assume_aws_role",
"force_spark_resource_configs": "force_spark_resource_configs",
"max_runtime": "timeout_job_runtime",
"command": "cmd",
"spark_args": "spark_args",
}
for field_name, arg_name in fields_to_args.items():
if field_name in action_dict:
value = action_dict[field_name]
# Convert spark_args values from dict to a string "k1=v1 k2=v2"
if field_name == "spark_args":
value = " ".join([f"{k}={v}" for k, v in dict(value).items()])
# Beautify for printing
arg_name_str = (f"--{arg_name.replace('_', '-')}").ljust(31, " ")
# Only load iam_role value if --aws-profile is not set
if field_name == "iam_role" and args.aws_profile is not None:
print(
PaastaColors.yellow(
f"Ignoring Tronfig: `{field_name} : {value}`, since `--aws-profile` is provided. "
f"We are giving higher priority to `--aws-profile` in case of paasta spark-run adhoc runs."
),
)
continue
if hasattr(args, arg_name):
print(
PaastaColors.yellow(
f"Overwriting args with Tronfig: {arg_name_str} => {field_name} : {value}"
),
)
setattr(args, arg_name, value)
# env (currently paasta spark-run does not support Spark driver secrets environment variables)
return action_dict.get("env", dict()) |
Load and check the following config fields from the provided Tronfig.
- executor
- pool
- iam_role
- iam_role_provider
- force_spark_resource_configs
- max_runtime
- command
- env
- spark_args
Returns: environment variables dictionary or None if failed.
| update_args_from_tronfig | python | Yelp/paasta | paasta_tools/cli/cmds/spark_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py | Apache-2.0 |
def make_mutate_refs_func(service_config, force_bounce, desired_state):
"""Create a function that will inform send_pack that we want to create tags
corresponding to the set of branches passed, with the given force_bounce
and desired_state parameters. These tags will point at the current tip of
the branch they associate with.
dulwich's send_pack wants a function that takes a dictionary of ref name
to sha and returns a modified version of that dictionary. send_pack will
then diff what is returned versus what was passed in, and inform the remote
git repo of our desires."""
def mutate_refs(refs):
deploy_group = service_config.get_deploy_group()
(_, head_sha, _) = get_latest_deployment_tag(refs, deploy_group)
refs[
format_tag(service_config.get_branch(), force_bounce, desired_state)
] = head_sha
return refs
return mutate_refs | Create a function that will inform send_pack that we want to create tags
corresponding to the set of branches passed, with the given force_bounce
and desired_state parameters. These tags will point at the current tip of
the branch they associate with.
dulwich's send_pack wants a function that takes a dictionary of ref name
to sha and returns a modified version of that dictionary. send_pack will
then diff what is returned versus what was passed in, and inform the remote
git repo of our desires. | make_mutate_refs_func | python | Yelp/paasta | paasta_tools/cli/cmds/start_stop_restart.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/start_stop_restart.py | Apache-2.0 |
def paasta_start_or_stop(args, desired_state):
"""Requests a change of state to start or stop given branches of a service."""
soa_dir = args.soa_dir
pargs = apply_args_filters(args)
if len(pargs) == 0:
return 1
affected_services = {
s for service_list in pargs.values() for s in service_list.keys()
}
if len(affected_services) > 1:
print(
PaastaColors.red("Warning: trying to start/stop/restart multiple services:")
)
for cluster, services_instances in pargs.items():
print("Cluster %s:" % cluster)
for service, instances in services_instances.items():
print(" Service %s:" % service)
print(" Instances %s" % ",".join(instances.keys()))
if sys.stdin.isatty():
confirm = choice.Binary("Are you sure you want to continue?", False).ask()
else:
confirm = False
if not confirm:
print()
print("exiting")
return 1
if not all(
[
can_user_deploy_service(get_deploy_info(service, soa_dir), service)
for service in affected_services
]
):
print(PaastaColors.red("Exiting due to missing deploy permissions"))
return 1
invalid_deploy_groups = []
kubernetes_message_printed = False
affected_flinks = []
if args.clusters is None or args.instances is None:
if confirm_to_continue(pargs.items(), desired_state) is False:
print()
print("exiting")
return 1
for cluster, services_instances in pargs.items():
for service, instances in services_instances.items():
for instance in instances.keys():
service_config = get_instance_config(
service=service,
cluster=cluster,
instance=instance,
soa_dir=soa_dir,
load_deployments=False,
)
if isinstance(service_config, FlinkDeploymentConfig):
affected_flinks.append(service_config)
continue
try:
remote_refs = get_remote_refs(service, soa_dir)
except remote_git.LSRemoteException as e:
msg = (
"Error talking to the git server: %s\n"
"This PaaSTA command requires access to the git server to operate.\n"
"The git server may be down or not reachable from here.\n"
"Try again from somewhere where the git server can be reached, "
"like your developer environment."
) % str(e)
print(msg)
return 1
deploy_group = service_config.get_deploy_group()
(deploy_tag, _, _) = get_latest_deployment_tag(
remote_refs, deploy_group
)
if deploy_tag not in remote_refs:
invalid_deploy_groups.append(deploy_group)
else:
force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
if (
isinstance(service_config, KubernetesDeploymentConfig)
and not kubernetes_message_printed
):
print_kubernetes_message(desired_state)
kubernetes_message_printed = True
issue_state_change_for_service(
service_config=service_config,
force_bounce=force_bounce,
desired_state=desired_state,
)
return_val = 0
# TODO: Refactor to discover if set_state is available for given
# instance_type in API
if affected_flinks:
print_flink_message(desired_state)
system_paasta_config = load_system_paasta_config()
for service_config in affected_flinks:
cluster = service_config.cluster
service = service_config.service
instance = service_config.instance
is_eks = isinstance(service_config, FlinkEksDeploymentConfig)
client = get_paasta_oapi_client(
cluster=get_paasta_oapi_api_clustername(cluster=cluster, is_eks=is_eks),
system_paasta_config=system_paasta_config,
)
if not client:
print("Cannot get a paasta-api client")
exit(1)
try:
client.service.instance_set_state(
service=service,
instance=instance,
desired_state=desired_state,
)
except client.api_error as exc:
print(exc.reason)
return exc.status
return_val = 0
if invalid_deploy_groups:
print(f"No deploy tags found for {', '.join(invalid_deploy_groups)}.")
print(f"Has {service} been deployed there yet?")
return_val = 1
return return_val | Requests a change of state to start or stop given branches of a service. | paasta_start_or_stop | python | Yelp/paasta | paasta_tools/cli/cmds/start_stop_restart.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/start_stop_restart.py | Apache-2.0 |
def get_actual_deployments(
service: str, soa_dir: str
) -> Mapping[str, DeploymentVersion]:
"""Given a service, return a dict of instances->DeploymentVersions"""
config_loader = PaastaServiceConfigLoader(service=service, soa_dir=soa_dir)
clusters = list_clusters(service=service, soa_dir=soa_dir)
actual_deployments = {}
for cluster in clusters:
for instance_type in DEPLOYMENT_INSTANCE_CONFIG:
for instance_config in config_loader.instance_configs(
cluster=cluster, instance_type_class=instance_type
):
namespace = f"{cluster}.{instance_config.instance}"
actual_deployments[namespace] = get_deployment_version_from_dockerurl(
instance_config.get_docker_image()
)
if not actual_deployments:
print(
f"Warning: it looks like {service} has not been deployed anywhere yet!",
file=sys.stderr,
)
return actual_deployments | Given a service, return a dict of instances->DeploymentVersions | get_actual_deployments | python | Yelp/paasta | paasta_tools/cli/cmds/status.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py | Apache-2.0 |
def find_instance_types(status: Any) -> List[str]:
"""
find_instance_types finds the instance types from the status api response.
It iterates over all instance type registered in `INSTANCE_TYPE_WRITERS`.
:param status: paasta api status object
:return: the list of matching instance types
"""
types: List[str] = []
for instance_type in INSTANCE_TYPE_WRITERS.keys():
if status.get(instance_type) is not None:
types.append(instance_type)
return types |
find_instance_types finds the instance types from the status api response.
It iterates over all instance type registered in `INSTANCE_TYPE_WRITERS`.
:param status: paasta api status object
:return: the list of matching instance types
| find_instance_types | python | Yelp/paasta | paasta_tools/cli/cmds/status.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py | Apache-2.0 |
def report_status_for_cluster(
service: str,
cluster: str,
deploy_pipeline: Sequence[str],
actual_deployments: Mapping[str, DeploymentVersion],
instance_whitelist: Mapping[str, Type[InstanceConfig]],
system_paasta_config: SystemPaastaConfig,
lock: Lock,
verbose: int = 0,
new: bool = False,
all_namespaces: bool = False,
) -> Tuple[int, Sequence[str]]:
"""With a given service and cluster, prints the status of the instances
in that cluster"""
output = ["", "service: %s" % service, "cluster: %s" % cluster]
deployed_instances = []
instances = [
(instance, instance_config_class)
for instance, instance_config_class in instance_whitelist.items()
if instance_config_class in ALLOWED_INSTANCE_CONFIG
]
# Tron instance are not present in the deploy pipeline, so treat them as
# seen by default to avoid error messages
seen_instances = [
instance
for instance, instance_config_class in instance_whitelist.items()
if instance_config_class == TronActionConfig
]
for namespace in deploy_pipeline:
cluster_in_pipeline, instance = namespace.split(".")
seen_instances.append(instance)
if cluster_in_pipeline != cluster:
continue
if instances and instance not in instances:
continue
# Case: service deployed to cluster.instance
if namespace in actual_deployments:
deployed_instances.append(instance)
# Case: flink instances don't use `deployments.json`
elif instance_whitelist.get(instance) == FlinkDeploymentConfig:
deployed_instances.append(instance)
# Case: service NOT deployed to cluster.instance
else:
output.append(" instance: %s" % PaastaColors.red(instance))
output.append(" Git sha: None (not deployed yet)")
return_code = 0
return_codes = []
for deployed_instance, instance_config_class in instances:
return_codes.append(
paasta_status_on_api_endpoint(
cluster=cluster,
service=service,
instance=deployed_instance,
system_paasta_config=system_paasta_config,
lock=lock,
verbose=verbose,
new=new,
all_namespaces=all_namespaces,
is_eks=(instance_config_class in EKS_DEPLOYMENT_CONFIGS),
)
)
if any(return_codes):
return_code = 1
output.append(
report_invalid_whitelist_values(
whitelist=[instance[0] for instance in instances],
items=seen_instances,
item_type="instance",
)
)
return return_code, output | With a given service and cluster, prints the status of the instances
in that cluster | report_status_for_cluster | python | Yelp/paasta | paasta_tools/cli/cmds/status.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py | Apache-2.0 |
def report_invalid_whitelist_values(
whitelist: Iterable[str], items: Sequence[str], item_type: str
) -> str:
"""Warns the user if there are entries in ``whitelist`` which don't
correspond to any item in ``items``. Helps highlight typos.
"""
return_string = ""
bogus_entries = []
if whitelist is None:
return ""
for entry in whitelist:
if entry not in items:
bogus_entries.append(entry)
if len(bogus_entries) > 0:
return_string = (
"\n" "Warning: This service does not have any %s matching these names:\n%s"
) % (item_type, ",".join(bogus_entries))
return return_string | Warns the user if there are entries in ``whitelist`` which don't
correspond to any item in ``items``. Helps highlight typos.
| report_invalid_whitelist_values | python | Yelp/paasta | paasta_tools/cli/cmds/status.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py | Apache-2.0 |
def get_filters(
args,
) -> Sequence[Callable[[InstanceConfig], bool]]:
"""Figures out which filters to apply from an args object, and returns them
:param args: args object
:returns: list of functions that take an instance config and returns if the instance conf matches the filter
"""
filters = []
if args.service:
filters.append(lambda conf: conf.get_service() in args.service.split(","))
if args.clusters:
filters.append(lambda conf: conf.get_cluster() in args.clusters.split(","))
if args.instances:
filters.append(lambda conf: conf.get_instance() in args.instances.split(","))
if args.deploy_group:
filters.append(
lambda conf: conf.get_deploy_group() in args.deploy_group.split(",")
)
if args.registration:
normalized_regs = normalize_registrations(
service=args.service, registrations=args.registration.split(",")
)
filters.append(
lambda conf: any(
reg in normalized_regs
for reg in (
conf.get_registrations()
if hasattr(conf, "get_registrations")
else []
)
)
)
if args.owner:
owners = args.owner.split(",")
filters.append(
# If the instance owner is None, check the service owner, else check the instance owner
lambda conf: get_team(
overrides={}, service=conf.get_service(), soa_dir=args.soa_dir
)
in owners
if conf.get_team() is None
else conf.get_team() in owners
)
return filters | Figures out which filters to apply from an args object, and returns them
:param args: args object
:returns: list of functions that take an instance config and returns if the instance conf matches the filter
| get_filters | python | Yelp/paasta | paasta_tools/cli/cmds/status.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py | Apache-2.0 |
def apply_args_filters(
args,
) -> Mapping[str, Mapping[str, Mapping[str, Type[InstanceConfig]]]]:
"""
Take an args object and returns the dict of cluster:service:instances
Currently, will filter by clusters, instances, services, and deploy_groups
If no instances are found, will print a message and try to find matching instances
for each service
:param args: args object containing attributes to filter by
:returns: Dict of dicts, in format {cluster_name: {service_name: {instance1, instance2}}}
"""
clusters_services_instances: DefaultDict[
str, DefaultDict[str, Dict[str, Type[InstanceConfig]]]
] = defaultdict(lambda: defaultdict(dict))
if args.service_instance:
if args.service or args.instances:
print(
PaastaColors.red(
f"Invalid command. Do not include optional arguments -s or -i "
f"when using shorthand notation."
)
)
return clusters_services_instances
if "." in args.service_instance:
args.service, args.instances = args.service_instance.split(".", 1)
else:
print(PaastaColors.red(f'Use a "." to separate service and instance name'))
return clusters_services_instances
if args.service:
try:
validate_service_name(args.service, soa_dir=args.soa_dir)
except NoSuchService:
print(PaastaColors.red(f'The service "{args.service}" does not exist.'))
all_services = list_services(soa_dir=args.soa_dir)
suggestions = difflib.get_close_matches(
args.service, all_services, n=5, cutoff=0.5
)
if suggestions:
print(PaastaColors.red(f"Did you mean any of these?"))
for suggestion in suggestions:
print(PaastaColors.red(f" {suggestion}"))
return clusters_services_instances
all_services = [args.service]
else:
args.service = None
all_services = list_services(soa_dir=args.soa_dir)
if args.service is None and args.owner is None:
args.service = figure_out_service_name(args, soa_dir=args.soa_dir)
if args.clusters:
clusters = args.clusters.split(",")
else:
clusters = list_clusters()
if args.instances:
instances = args.instances.split(",")
else:
instances = None
filters = get_filters(args)
i_count = 0
for service in all_services:
if args.service and service != args.service:
continue
for instance_conf in get_instance_configs_for_service(
service, soa_dir=args.soa_dir, clusters=clusters, instances=instances
):
if all([f(instance_conf) for f in filters]):
cluster_service = clusters_services_instances[
instance_conf.get_cluster()
][service]
cluster_service[instance_conf.get_instance()] = instance_conf.__class__
i_count += 1
if i_count == 0 and args.service and args.instances:
for service in args.service.split(","):
verify_instances(args.instances, service, clusters)
return clusters_services_instances |
Take an args object and returns the dict of cluster:service:instances
Currently, will filter by clusters, instances, services, and deploy_groups
If no instances are found, will print a message and try to find matching instances
for each service
:param args: args object containing attributes to filter by
:returns: Dict of dicts, in format {cluster_name: {service_name: {instance1, instance2}}}
| apply_args_filters | python | Yelp/paasta | paasta_tools/cli/cmds/status.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py | Apache-2.0 |
def paasta_status(args) -> int:
"""Print the status of a Yelp service running on PaaSTA.
:param args: argparse.Namespace obj created from sys.args by cli"""
soa_dir = args.soa_dir
system_paasta_config = load_system_paasta_config()
return_codes = [0]
lock = Lock()
tasks = []
clusters_services_instances = apply_args_filters(args)
for cluster, service_instances in clusters_services_instances.items():
for service, instances in service_instances.items():
all_flink = all((i in FLINK_DEPLOYMENT_CONFIGS) for i in instances.values())
actual_deployments: Mapping[str, DeploymentVersion]
if all_flink:
actual_deployments = {}
else:
actual_deployments = get_actual_deployments(service, soa_dir)
if all_flink or actual_deployments:
deploy_pipeline = list(get_planned_deployments(service, soa_dir))
new = _use_new_paasta_status(args, system_paasta_config)
tasks.append(
(
report_status_for_cluster,
dict(
service=service,
cluster=cluster,
deploy_pipeline=deploy_pipeline,
actual_deployments=actual_deployments,
instance_whitelist=instances,
system_paasta_config=system_paasta_config,
lock=lock,
verbose=args.verbose,
new=new,
all_namespaces=args.all_namespaces,
),
)
)
else:
print(missing_deployments_message(service))
return_codes.append(1)
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
tasks = [executor.submit(t[0], **t[1]) for t in tasks] # type: ignore
try:
for future in concurrent.futures.as_completed(tasks): # type: ignore
return_code, output = future.result()
return_codes.append(return_code)
except KeyboardInterrupt:
# ideally we wouldn't need to reach into `ThreadPoolExecutor`
# internals, but so far this is the best way to stop all these
# threads until a public interface is added
executor._threads.clear() # type: ignore
concurrent.futures.thread._threads_queues.clear() # type: ignore
raise KeyboardInterrupt
return max(return_codes) | Print the status of a Yelp service running on PaaSTA.
:param args: argparse.Namespace obj created from sys.args by cli | paasta_status | python | Yelp/paasta | paasta_tools/cli/cmds/status.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py | Apache-2.0 |
def _backend_report(
normal_instance_count: int, up_backends: int, system_name: BackendType
) -> str:
"""Given that a service is in smartstack, this returns a human readable
report of the up backends"""
# TODO: Take into account a configurable threshold, PAASTA-1102
crit_threshold = 50
under_replicated, ratio = is_under_replicated(
num_available=up_backends,
expected_count=normal_instance_count,
crit_threshold=crit_threshold,
)
if under_replicated:
status = PaastaColors.red("Critical")
count = PaastaColors.red(
"(%d/%d, %d%%)" % (up_backends, normal_instance_count, ratio)
)
else:
status = PaastaColors.green("Healthy")
count = PaastaColors.green("(%d/%d)" % (up_backends, normal_instance_count))
up_string = PaastaColors.bold("UP")
return f"{status} - in {system_name} with {count} total backends {up_string} in this namespace." | Given that a service is in smartstack, this returns a human readable
report of the up backends | _backend_report | python | Yelp/paasta | paasta_tools/cli/cmds/status.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py | Apache-2.0 |
def get_schema_validator(file_type: str) -> Draft4Validator:
"""Get the correct schema to use for validation
:param file_type: what schema type should we validate against
"""
schema_path = f"schemas/{file_type}_schema.json"
autoscaling_path = "schemas/autoscaling_schema.json"
schema = pkgutil.get_data("paasta_tools.cli", schema_path).decode()
autoscaling_ref = pkgutil.get_data("paasta_tools.cli", autoscaling_path).decode()
# This bit of code loads the base schemas and any relevant "referenced" schemas
# into a shared "store" -- so that you can reference the shared schema without
# having to find the exact right path on disk in your schema file. If you want
# to reference one schema from another, you still have to include a
# {"$ref": "<schema_id>#field"} section in your JsonSchema
#
# (see https://python-jsonschema.readthedocs.io/en/v2.6.0/references/ and this
# stack overflow answer https://stackoverflow.com/a/65150457 for details)
#
# Also note that this functionality has changed significantly in modern versions
# of python-jsonschema, so if we ever update we'll need to do some work here.
base_schema = json.loads(schema)
autoscaling_schema = json.loads(autoscaling_ref)
store = {
"base": base_schema,
autoscaling_schema["$id"]: json.loads(autoscaling_ref),
}
resolver = RefResolver.from_schema(base_schema, store=store)
return Draft4Validator(
json.loads(schema),
resolver=resolver,
format_checker=FormatChecker(),
) | Get the correct schema to use for validation
:param file_type: what schema type should we validate against
| get_schema_validator | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def validate_rollback_bounds(
config: Dict[str, List[ConditionConfig]], file_loc: str
) -> bool:
"""
Ensure that at least one of upper_bound or lower_bound is set (and set to non-null values)
"""
errors = []
for source, queries in config.items():
for query in queries:
if not any(
(
query.get("lower_bound"),
query.get("upper_bound"),
),
):
errors.append(
f"{file_loc}:{source}: {query['query']} needs one of lower_bound OR upper_bound set."
)
for error in errors:
print(
failure(error, link=""), # TODO: point to actual docs once they exist
)
return len(errors) == 0 |
Ensure that at least one of upper_bound or lower_bound is set (and set to non-null values)
| validate_rollback_bounds | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def validate_schema(file_path: str, file_type: str) -> bool:
"""Check if the specified config file has a valid schema
:param file_path: path to file to validate
:param file_type: what schema type should we validate against
"""
try:
validator = get_schema_validator(file_type)
except Exception as e:
print(f"{SCHEMA_ERROR}: {file_type}, error: {e!r}")
return False
basename = os.path.basename(file_path)
config_file_object = get_config_file_dict(file_path)
try:
validator.validate(config_file_object)
if file_type in K8S_TYPES and not validate_instance_names(
config_file_object, file_path
):
return False
if file_type == "rollback" and not validate_rollback_bounds(
config_file_object["conditions"],
file_path,
):
return False
except ValidationError:
print(f"{SCHEMA_INVALID}: {file_path}")
errors = validator.iter_errors(config_file_object)
print(" Validation Message: %s" % exceptions.best_match(errors).message)
return False
except Exception as e:
print(f"{SCHEMA_ERROR}: {file_type}, error: {e!r}")
return False
else:
print(f"{SCHEMA_VALID}: {basename}")
return True | Check if the specified config file has a valid schema
:param file_path: path to file to validate
:param file_type: what schema type should we validate against
| validate_schema | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def validate_all_schemas(service_path: str) -> bool:
"""Finds all recognized config files in service directory,
and validates their schema.
:param service_path: path to location of configuration files
"""
path = os.path.join(service_path, "**/*.yaml")
returncode = True
for file_name in glob(path, recursive=True):
if os.path.islink(file_name):
continue
filename_without_service_path = os.path.relpath(file_name, start=service_path)
for file_type in SCHEMA_TYPES:
if filename_without_service_path.startswith(file_type):
if not validate_schema(file_name, file_type):
returncode = False
return returncode | Finds all recognized config files in service directory,
and validates their schema.
:param service_path: path to location of configuration files
| validate_all_schemas | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def check_service_path(service_path):
"""Check that the specified path exists and has yaml files
:param service_path: Path to directory that should contain yaml files
"""
if not service_path or not os.path.isdir(service_path):
print(
failure(
"%s is not a directory" % service_path,
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
if not glob(os.path.join(service_path, "*.yaml")):
print(
failure(
"%s does not contain any .yaml files" % service_path,
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
return True | Check that the specified path exists and has yaml files
:param service_path: Path to directory that should contain yaml files
| check_service_path | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def get_service_path(service, soa_dir):
"""Determine the path of the directory containing the conf files
:param service: Name of service
:param soa_dir: Directory containing soa configs for all services
"""
if service:
service_path = os.path.join(soa_dir, service)
else:
if soa_dir == os.getcwd():
service_path = os.getcwd()
else:
print(UNKNOWN_SERVICE)
return None
return service_path | Determine the path of the directory containing the conf files
:param service: Name of service
:param soa_dir: Directory containing soa configs for all services
| get_service_path | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def path_to_soa_dir_service(service_path):
"""Split a service_path into its soa_dir and service name components"""
soa_dir = os.path.dirname(service_path)
service = os.path.basename(service_path)
return soa_dir, service | Split a service_path into its soa_dir and service name components | path_to_soa_dir_service | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def validate_unique_instance_names(service_path):
"""Check that the service does not use the same instance name more than once"""
soa_dir, service = path_to_soa_dir_service(service_path)
check_passed = True
for cluster in list_clusters(service, soa_dir):
service_instances = get_service_instance_list(
service=service, cluster=cluster, soa_dir=soa_dir
)
instance_names = [service_instance[1] for service_instance in service_instances]
instance_name_to_count = Counter(instance_names)
duplicate_instance_names = [
instance_name
for instance_name, count in instance_name_to_count.items()
if count > 1
]
if duplicate_instance_names:
check_passed = False
print(
duplicate_instance_names_message(
service, cluster, duplicate_instance_names
)
)
else:
print(no_duplicate_instance_names_message(service, cluster))
return check_passed | Check that the service does not use the same instance name more than once | validate_unique_instance_names | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def validate_autoscaling_configs(service_path: str) -> bool:
"""Validate new autoscaling configurations that are not validated by jsonschema for the service of interest.
:param service_path: Path to directory containing soa conf yaml files for service
"""
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
link = ""
skip_cpu_override_validation_list = (
load_system_paasta_config().get_skip_cpu_override_validation_services()
)
for cluster in list_clusters(service, soa_dir):
for instance, instance_config in load_all_instance_configs_for_service(
service=service, cluster=cluster, soa_dir=soa_dir
):
if instance_config.get_instance_type() not in K8S_TYPES:
continue
instance_config = cast(LongRunningServiceConfig, instance_config)
if (
# instance_config is an `InstanceConfig` object, which doesn't have an `is_autoscaling_enabled()`
# method, but by asserting that the type is in K8S_TYPES, we know we're dealing with either
# a KubernetesDeploymentConfig or an EksDeploymentConfig, so the cast is safe.
instance_config.is_autoscaling_enabled()
# we should eventually make the python templates add the override comment
# to the correspoding YAML line, but until then we just opt these out of that validation
and __is_templated(
service,
soa_dir,
cluster,
workload=instance_config.get_instance_type(),
)
is False
):
autoscaling_params = instance_config.get_autoscaling_params()
should_skip_cpu_override_validation = (
service in skip_cpu_override_validation_list
)
seen_provider_types: Set[str] = set()
configured_provider_count = len(autoscaling_params["metrics_providers"])
for metrics_provider in autoscaling_params["metrics_providers"]:
try:
# Generic validation of the config
_validate_autoscaling_config(metrics_provider)
# Multi-metrics specific validation:
# 1. Bespoke policies cannot use multi-metrics scaling
# 2. Can't set the same metrics provider multiple times
if (
metrics_provider.get("decision_policy") == "bespoke"
and configured_provider_count > 1
):
raise AutoscalingValidationError(
f"cannot use bespoke autoscaling with HPA autoscaling"
)
if metrics_provider["type"] in seen_provider_types:
raise AutoscalingValidationError(
f"cannot set the same metrics provider multiple times: {metrics_provider['type']}"
)
seen_provider_types.add(metrics_provider["type"])
# Metrics-provider specific validations
if metrics_provider["type"] == METRICS_PROVIDER_ACTIVE_REQUESTS:
_validate_active_requests_autoscaling_configs(
instance_config, metrics_provider
)
elif metrics_provider["type"] == METRICS_PROVIDER_PROMQL:
_validate_arbitrary_promql_autoscaling_configs(
metrics_provider
)
elif (
metrics_provider["type"] == METRICS_PROVIDER_CPU
# to enable kew autoscaling we just set a decision policy of "bespoke", but
# the metrics_provider is (confusingly) left as "cpu"
and metrics_provider.get("decision_policy") != "bespoke"
and not should_skip_cpu_override_validation
):
# Do some extra validation below: we don't abstract that into the above function
# call because it needs a lot of extra information
# we need access to the comments, so we need to read the config with ruamel to be able
# to actually get them in a "nice" automated fashion
config = get_config_file_dict(
os.path.join(
soa_dir,
service,
f"{instance_config.get_instance_type()}-{cluster}.yaml",
),
use_ruamel=True,
)
if config[instance].get("cpus") is None:
# If we're using multiple scaling metrics and one of them is CPU, we must
# opt out of CPU autotuning
if configured_provider_count > 1:
link = "y/override-cpu-autotune"
raise AutoscalingValidationError(
"using CPU-based scaling with multiple scaling metrics requires explicit "
"'cpus' setting; see the following link for more info:"
)
# cpu autoscaled, but using autotuned values - can skip
continue
cpu_comment = _get_comments_for_key(
data=config[instance], key="cpus"
)
# we could probably have a separate error message if there's a comment that doesn't match
# the ack pattern, but that seems like overkill - especially for something that could cause
# a DAR if people aren't being careful.
if (
cpu_comment is None
or re.search(
pattern=OVERRIDE_CPU_AUTOTUNE_ACK_PATTERN,
string=cpu_comment,
)
is None
):
link = "y/override-cpu-autotune"
raise AutoscalingValidationError(
f"CPU override detected for a CPU-autoscaled instance; "
"see the following link for next steps:"
)
except AutoscalingValidationError as e:
returncode = False
print(
failure(
msg=f"Autoscaling validation failed for {service}.{instance} in {cluster}: {str(e)}",
link=link,
)
)
return returncode | Validate new autoscaling configurations that are not validated by jsonschema for the service of interest.
:param service_path: Path to directory containing soa conf yaml files for service
| validate_autoscaling_configs | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def paasta_validate_soa_configs(
service: str, service_path: str, verbose: bool = False
) -> bool:
"""Analyze the service in service_path to determine if the conf files are valid
:param service_path: Path to directory containing soa conf yaml files for service
"""
if not check_service_path(service_path):
return False
if not validate_service_name(service):
return False
checks: List[Callable[[str], bool]] = [
validate_all_schemas,
partial(validate_tron, verbose=verbose),
validate_paasta_objects,
validate_unique_instance_names,
validate_autoscaling_configs,
validate_secrets,
validate_min_max_instances,
validate_cpu_burst,
]
# NOTE: we're explicitly passing a list comprehension to all()
# instead of a generator expression so that we run all checks
# no matter what
return all([check(service_path) for check in checks]) | Analyze the service in service_path to determine if the conf files are valid
:param service_path: Path to directory containing soa conf yaml files for service
| paasta_validate_soa_configs | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def paasta_validate(args):
"""Generate a service_path from the provided args and call paasta_validate_soa_configs
:param args: argparse.Namespace obj created from sys.args by cli
"""
service_path = get_service_path(args.service, args.yelpsoa_config_root)
service = args.service or guess_service_name()
if not paasta_validate_soa_configs(service, service_path, args.verbose):
return 1 | Generate a service_path from the provided args and call paasta_validate_soa_configs
:param args: argparse.Namespace obj created from sys.args by cli
| paasta_validate | python | Yelp/paasta | paasta_tools/cli/cmds/validate.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py | Apache-2.0 |
def get_latest_marked_version(
git_url: str, deploy_group: str
) -> Optional[DeploymentVersion]:
"""Return the latest marked for deployment version or None"""
# TODO: correct this function for new tag format
refs = list_remote_refs(git_url)
_, sha, image_version = get_latest_deployment_tag(refs, deploy_group)
if sha:
return DeploymentVersion(sha=sha, image_version=image_version)
# We did not find a ref for this deploy group
return None | Return the latest marked for deployment version or None | get_latest_marked_version | python | Yelp/paasta | paasta_tools/cli/cmds/wait_for_deployment.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/wait_for_deployment.py | Apache-2.0 |
def validate_version_is_latest(
version: DeploymentVersion, git_url: str, deploy_group: str, service: str
):
"""Verify if the requested version is the latest marked for deployment.
Raise exception when the provided version is not the latest
marked for deployment in 'deploy_group' for 'service'.
"""
try:
marked_version = get_latest_marked_version(git_url, deploy_group)
except LSRemoteException as e:
print(
"Error talking to the git server: {}\n"
"It is not possible to verify that {} is marked for deployment in {}, "
"but I assume that it is marked and will continue waiting..".format(
e, version, deploy_group
)
)
return
if marked_version is None:
raise VersionError(
"ERROR: Nothing is marked for deployment "
"in {} for {}".format(deploy_group, service)
)
if version != marked_version:
raise VersionError(
"ERROR: The latest version marked for "
"deployment in {} is {}".format(deploy_group, marked_version)
) | Verify if the requested version is the latest marked for deployment.
Raise exception when the provided version is not the latest
marked for deployment in 'deploy_group' for 'service'.
| validate_version_is_latest | python | Yelp/paasta | paasta_tools/cli/cmds/wait_for_deployment.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/wait_for_deployment.py | Apache-2.0 |
def validate_deploy_group(deploy_group: str, service: str, soa_dir: str):
"""Validate deploy_group.
Raise exception if the specified deploy group is not used anywhere.
"""
in_use_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir)
_, invalid_deploy_groups = validate_given_deploy_groups(
in_use_deploy_groups, [deploy_group]
)
if len(invalid_deploy_groups) == 1:
raise DeployGroupError(
"ERROR: These deploy groups are not currently "
"used anywhere: {}.\n"
"You probably need one of these in-use deploy "
"groups?:\n {}".format(
",".join(invalid_deploy_groups), ",".join(in_use_deploy_groups)
)
) | Validate deploy_group.
Raise exception if the specified deploy group is not used anywhere.
| validate_deploy_group | python | Yelp/paasta | paasta_tools/cli/cmds/wait_for_deployment.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/wait_for_deployment.py | Apache-2.0 |
def _get_smartstack_proxy_ports_from_file(root, file):
"""Given a root and file (as from os.walk), attempt to return the highest
smartstack proxy port number (int) from that file. Returns 0 if there is no
smartstack proxy_port.
"""
ports = set()
with open(os.path.join(root, file)) as f:
data = yaml.safe_load(f)
if file.endswith("service.yaml") and "smartstack" in data:
# Specifying this in service.yaml is old and deprecated and doesn't
# support multiple namespaces.
ports = {int(data["smartstack"].get("proxy_port", 0))}
elif file.endswith("smartstack.yaml"):
for namespace in data.keys():
ports.add(data[namespace].get("proxy_port", 0))
return ports | Given a root and file (as from os.walk), attempt to return the highest
smartstack proxy port number (int) from that file. Returns 0 if there is no
smartstack proxy_port.
| _get_smartstack_proxy_ports_from_file | python | Yelp/paasta | paasta_tools/cli/fsm/autosuggest.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/fsm/autosuggest.py | Apache-2.0 |
def suggest_smartstack_proxy_port(
yelpsoa_config_root, range_min=19000, range_max=21000
):
"""Pick a random available port in the 19000-21000 block"""
available_proxy_ports = set(range(range_min, range_max + 1))
for root, dirs, files in os.walk(yelpsoa_config_root):
for f in files:
if f.endswith("smartstack.yaml"):
try:
used_ports = _get_smartstack_proxy_ports_from_file(root, f)
for used_port in used_ports:
available_proxy_ports.discard(used_port)
except Exception:
pass
available_proxy_ports.difference_update(get_inuse_ports_from_etc_services())
try:
return random.choice(list(available_proxy_ports))
except IndexError:
raise Exception(
f"There are no more ports available in the range [{range_min}, {range_max}]"
) | Pick a random available port in the 19000-21000 block | suggest_smartstack_proxy_port | python | Yelp/paasta | paasta_tools/cli/fsm/autosuggest.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/fsm/autosuggest.py | Apache-2.0 |
def get_deploy_durations_from_file(filename):
"""
filename: path to a file to be parsed for datetime data
The expected input is a paasta service log for the deploy events
The way I've been fetching them is by running 'internal logreader command' | grep deploy | grep event > filename
"""
file_object = open(filename, "r")
data = sorted(
[json.loads(line.rstrip("\n")) for line in file_object],
key=lambda x: get_datetime_from_ts(x["timestamp"]),
)
timedeltas = defaultdict(list)
last_time = dict()
instance_bitvector = defaultdict(bool) # defaults to False
for datum in data:
time = get_datetime_from_ts(datum["timestamp"])
instance = datum["instance"]
if "in progress" in datum["message"] and not instance_bitvector[instance]:
instance_bitvector[instance] = True
last_time[instance] = time
elif "finishing" in datum["message"]:
instance_bitvector[instance] = False
timedeltas[instance].append(time - last_time[instance])
return timedeltas |
filename: path to a file to be parsed for datetime data
The expected input is a paasta service log for the deploy events
The way I've been fetching them is by running 'internal logreader command' | grep deploy | grep event > filename
| get_deploy_durations_from_file | python | Yelp/paasta | paasta_tools/contrib/bounce_log_latency_parser.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/bounce_log_latency_parser.py | Apache-2.0 |
def host_to_ip(host: str, fallback: str) -> str:
"""Try to resolve a host to an IP with a fallback.
Because DNS resolution is relatively slow and can't be easily performed
using asyncio, we cheat a little and use a regex for well-formed hostnames
to try to guess the IP without doing real resolution.
A fallback is needed because in some cases the nerve registration does not
match an actual hostname (e.g. "prod-db15" or "prod-splunk-master").
"""
for match in (
re.match(r"^(\d+)-(\d+)-(\d+)-(\d+)-", host),
re.match(r"^ip-(\d+)-(\d+)-(\d+)-(\d+)", host),
):
if match:
return ".".join(match.groups())
else:
try:
return socket.gethostbyname(host)
except socket.gaierror:
return fallback | Try to resolve a host to an IP with a fallback.
Because DNS resolution is relatively slow and can't be easily performed
using asyncio, we cheat a little and use a regex for well-formed hostnames
to try to guess the IP without doing real resolution.
A fallback is needed because in some cases the nerve registration does not
match an actual hostname (e.g. "prod-db15" or "prod-splunk-master").
| host_to_ip | python | Yelp/paasta | paasta_tools/contrib/check_orphans.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/check_orphans.py | Apache-2.0 |
def get_container_type(container_name: str, instance_name: str) -> str:
"""
To differentiate between main service containers and sidecars
"""
if instance_name and container_name == kubernetes_tools.sanitise_kubernetes_name(
instance_name
):
return MAIN_CONTAINER_TYPE
else:
return container_name |
To differentiate between main service containers and sidecars
| get_container_type | python | Yelp/paasta | paasta_tools/contrib/get_running_task_allocation.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/get_running_task_allocation.py | Apache-2.0 |
def get_report_from_splunk(creds, app, filename, criteria_filter):
"""Expect a table containing at least the following fields:
criteria (<service> kubernetes-<cluster_name> <instance>)
service_owner (Optional)
project (Required to create tickets)
estimated_monthly_savings (Optional)
search_time (Unix time)
one of the following pairs:
- current_cpus and suggested_cpus
- current_mem and suggested_mem
- current_disk and suggested_disk
- suggested_hacheck_cpus
- suggested_cpu_burst_add
- suggested_min_instances
- suggested_max_instances
"""
url = f"https://splunk-api.yelpcorp.com/servicesNS/nobody/{app}/search/jobs/export"
search = (
'| inputlookup {filename} | search criteria="{criteria_filter}"'
'| eval _time = search_time | where _time > relative_time(now(),"-7d")'
).format(filename=filename, criteria_filter=criteria_filter)
log.debug(f"Sending this query to Splunk: {search}\n")
data = {"output_mode": "json", "search": search}
creds = creds.split(":")
resp = requests.post(url, data=data, auth=(creds[0], creds[1]))
resp_text = resp.text.split("\n")
log.info("Found {} services to rightsize".format(len(resp_text) - 1))
resp_text = [x for x in resp_text if x]
resp_text = [json.loads(x) for x in resp_text]
services_to_update = {}
for d in resp_text:
if "result" not in d:
raise ValueError(f"Splunk request didn't return any results: {resp_text}")
criteria = d["result"]["criteria"]
serv = {
"cluster": criteria.split(" ")[1],
"date": d["result"]["_time"].split(" ")[0],
"instance": criteria.split(" ")[2],
"money": d["result"].get("estimated_monthly_savings", 0),
"owner": d["result"].get("service_owner", "Unavailable"),
"project": d["result"].get("project", "Unavailable"),
"service": criteria.split(" ")[0],
# only mergeable fields below
"cpu_burst_add": d["result"].get("suggested_cpu_burst_add"),
"cpus": d["result"].get("suggested_cpus"),
"disk": d["result"].get("suggested_disk"),
"hacheck_cpus": d["result"].get("suggested_hacheck_cpus"),
"max_instances": d["result"].get("suggested_max_instances"),
"mem": d["result"].get("suggested_mem"),
"min_instances": d["result"].get("suggested_min_instances"),
"old_cpus": d["result"].get("current_cpus"),
"old_disk": d["result"].get("current_disk"),
"old_mem": d["result"].get("current_mem"),
}
# the report we get is all strings, so we need to convert them to the right types
field_conversions = {
"current_cpus": float,
"suggested_cpu_burst_add": float,
"suggested_cpus": float,
"suggested_disk": int,
"suggested_hacheck_cpus": float,
"suggested_max_instances": int,
"suggested_mem": int,
"suggested_min_instances": int,
# not quite sure why these are floats...they're ints in soaconfigs
"current_disk": _force_str_to_int,
"current_mem": _force_str_to_int,
}
# merge results if we've already seen rows for this service
# NOTE: this is necessary since the Splunk search can return multiple rows
# for the same (service, cluster, instance) tuple as the autotune query
# treats certain cpu allocation changes as if the tuple was entirely different.
# this is ostensibly due to a theory that if you update resource allocation, existing
# autotune data is potentially invalidated - but in practice this ends up hampering
# autotune for services with highly variable resource allocation - e.g., we have some services
# that have their cpu allocation tweaked by +/-.1 cpu pretty frequently, but then min/max autotune
# is never updated.
if criteria in services_to_update:
for key in serv:
# we probably don't want to merge any other fields since they're going to be strings :p
if key not in field_conversions:
continue
last_proposed_suggestion = services_to_update[criteria][key]
proposed_suggestion = serv[key]
# if both are non-null, take the max of the two
if (
last_proposed_suggestion is not None
and proposed_suggestion is not None
):
services_to_update[criteria][key] = max(
last_proposed_suggestion,
proposed_suggestion,
key=field_conversions[key],
)
# otherwise, if only one of these is non-null, use that one
elif last_proposed_suggestion is not None:
services_to_update[criteria][key] = last_proposed_suggestion
elif proposed_suggestion is not None:
services_to_update[criteria][key] = proposed_suggestion
# otherwise, if we didn't enter any of the above branches, we're essentially leaving in place the
# existing None
# otherwise, simply add the service to the final report
else:
services_to_update[criteria] = serv
return {
"search": search,
"results": services_to_update,
} | Expect a table containing at least the following fields:
criteria (<service> kubernetes-<cluster_name> <instance>)
service_owner (Optional)
project (Required to create tickets)
estimated_monthly_savings (Optional)
search_time (Unix time)
one of the following pairs:
- current_cpus and suggested_cpus
- current_mem and suggested_mem
- current_disk and suggested_disk
- suggested_hacheck_cpus
- suggested_cpu_burst_add
- suggested_min_instances
- suggested_max_instances
| get_report_from_splunk | python | Yelp/paasta | paasta_tools/contrib/paasta_update_soa_memcpu.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/paasta_update_soa_memcpu.py | Apache-2.0 |
def get_reviewers_in_group(group_name):
"""Using rbt's target-groups argument overrides our configured default review groups.
So we'll expand the group into usernames and pass those users in the group individually.
"""
rightsizer_reviewers = json.loads(
subprocess.check_output(
(
"rbt",
"api-get",
"--server",
"https://reviewboard.yelpcorp.com",
f"groups/{group_name}/users/",
)
).decode("UTF-8")
)
return [user.get("username", "") for user in rightsizer_reviewers.get("users", {})] | Using rbt's target-groups argument overrides our configured default review groups.
So we'll expand the group into usernames and pass those users in the group individually.
| get_reviewers_in_group | python | Yelp/paasta | paasta_tools/contrib/paasta_update_soa_memcpu.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/paasta_update_soa_memcpu.py | Apache-2.0 |
def nested_inc(op, _, attr_val, attr_name, state, step=1):
"""Increments relevant counter by step from args array"""
oph = state.setdefault(op, {})
nameh = oph.setdefault(attr_name, {})
nameh.setdefault(attr_val, 0)
nameh[attr_val] += step
return state | Increments relevant counter by step from args array | nested_inc | python | Yelp/paasta | paasta_tools/frameworks/constraints.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/constraints.py | Apache-2.0 |
def check_offer_constraints(offer, constraints, state):
"""Returns True if all constraints are satisfied by offer's attributes,
returns False otherwise. Prints a error message and re-raises if an error
was thrown."""
for (attr, op, val) in constraints:
try:
offer_attr = next((x for x in offer.attributes if x.name == attr), None)
if offer_attr is None:
print("Attribute not found for a constraint: %s" % attr)
return False
elif not (CONS_OPS[op](val, offer_attr.text.value, offer_attr.name, state)):
print(
"Constraint not satisfied: [{} {} {}] for {} with {}".format(
attr, op, val, offer_attr.text.value, state
)
)
return False
except Exception as err:
print(
"Error while matching constraint: [{} {} {}] {}".format(
attr, op, val, str(err)
)
)
raise err
return True | Returns True if all constraints are satisfied by offer's attributes,
returns False otherwise. Prints a error message and re-raises if an error
was thrown. | check_offer_constraints | python | Yelp/paasta | paasta_tools/frameworks/constraints.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/constraints.py | Apache-2.0 |
def update_constraint_state(offer, constraints, state, step=1):
"""Mutates state for each offer attribute found in constraints by calling
relevant UPDATE_OP lambda"""
for (attr, op, val) in constraints:
for oa in offer.attributes:
if attr == oa.name:
UPDATE_OPS[op](val, oa.text.value, attr, state, step) | Mutates state for each offer attribute found in constraints by calling
relevant UPDATE_OP lambda | update_constraint_state | python | Yelp/paasta | paasta_tools/frameworks/constraints.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/constraints.py | Apache-2.0 |
def launch_tasks_for_offers(
self, driver: MesosSchedulerDriver, offers
) -> List[TaskInfo]:
"""For each offer tries to launch all tasks that can fit in there.
Declines offer if no fitting tasks found."""
launched_tasks: List[TaskInfo] = []
for offer in offers:
with self.constraint_state_lock:
try:
tasks, new_state = self.tasks_and_state_for_offer(
driver, offer, self.constraint_state
)
if tasks is not None and len(tasks) > 0:
driver.launchTasks([offer.id], tasks)
for task in tasks:
self.task_store.add_task_if_doesnt_exist(
task["task_id"]["value"],
health=None,
mesos_task_state=TASK_STAGING,
offer=offer,
resources=task["resources"],
)
launched_tasks.extend(tasks)
self.constraint_state = new_state
else:
driver.declineOffer(offer.id)
except ConstraintFailAllTasksError:
self.log("Offer failed constraints for every task, rejecting 60s")
filters = {"refuse_seconds": 60}
driver.declineOffer(offer.id, filters)
return launched_tasks | For each offer tries to launch all tasks that can fit in there.
Declines offer if no fitting tasks found. | launch_tasks_for_offers | python | Yelp/paasta | paasta_tools/frameworks/native_scheduler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py | Apache-2.0 |
def task_fits(self, offer):
"""Checks whether the offer is big enough to fit the tasks"""
needed_resources = {
"cpus": self.service_config.get_cpus(),
"mem": self.service_config.get_mem(),
"disk": self.service_config.get_disk(),
}
for resource in offer.resources:
try:
if resource.scalar.value < needed_resources[resource.name]:
return False
except KeyError:
pass
return True | Checks whether the offer is big enough to fit the tasks | task_fits | python | Yelp/paasta | paasta_tools/frameworks/native_scheduler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py | Apache-2.0 |
def need_more_tasks(self, name, existingTasks, scheduledTasks):
"""Returns whether we need to start more tasks."""
num_have = 0
for task, parameters in existingTasks.items():
if self.is_task_new(name, task) and (
parameters.mesos_task_state in LIVE_TASK_STATES
):
num_have += 1
for task in scheduledTasks:
if task["name"] == name:
num_have += 1
return num_have < self.service_config.get_desired_instances() | Returns whether we need to start more tasks. | need_more_tasks | python | Yelp/paasta | paasta_tools/frameworks/native_scheduler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py | Apache-2.0 |
def tasks_and_state_for_offer(
self, driver: MesosSchedulerDriver, offer, state: ConstraintState
) -> Tuple[List[TaskInfo], ConstraintState]:
"""Returns collection of tasks that can fit inside an offer."""
tasks: List[TaskInfo] = []
offerCpus = 0.0
offerMem = 0.0
offerPorts: List[int] = []
for resource in offer.resources:
if resource.name == "cpus":
offerCpus += resource.scalar.value
elif resource.name == "mem":
offerMem += resource.scalar.value
elif resource.name == "ports":
for rg in resource.ranges.range:
# I believe mesos protobuf ranges are inclusive, but range() is exclusive
offerPorts += range(rg.begin, rg.end + 1)
remainingCpus = offerCpus
remainingMem = offerMem
remainingPorts = set(offerPorts)
base_task = self.service_config.base_task(self.system_paasta_config)
base_task["agent_id"]["value"] = offer["agent_id"]["value"]
task_mem = self.service_config.get_mem()
task_cpus = self.service_config.get_cpus()
# don't mutate existing state
new_constraint_state = copy.deepcopy(state)
total = 0
failed_constraints = 0
while self.need_more_tasks(
base_task["name"], self.task_store.get_all_tasks(), tasks
):
total += 1
if not (
remainingCpus >= task_cpus
and remainingMem >= task_mem
and self.offer_matches_pool(offer)
and len(remainingPorts) >= 1
):
break
if not (
check_offer_constraints(offer, self.constraints, new_constraint_state)
):
failed_constraints += 1
break
task_port = random.choice(list(remainingPorts))
task = copy.deepcopy(base_task)
task["task_id"] = {"value": "{}.{}".format(task["name"], uuid.uuid4().hex)}
task["container"]["docker"]["port_mappings"][0]["host_port"] = task_port
for resource in task["resources"]:
if resource["name"] == "ports":
resource["ranges"]["range"][0]["begin"] = task_port
resource["ranges"]["range"][0]["end"] = task_port
tasks.append(task)
remainingCpus -= task_cpus
remainingMem -= task_mem
remainingPorts -= {task_port}
update_constraint_state(offer, self.constraints, new_constraint_state)
# raise constraint error but only if no other tasks fit/fail the offer
if total > 0 and failed_constraints == total:
raise ConstraintFailAllTasksError
return tasks, new_constraint_state | Returns collection of tasks that can fit inside an offer. | tasks_and_state_for_offer | python | Yelp/paasta | paasta_tools/frameworks/native_scheduler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py | Apache-2.0 |
def healthiness_score(task_id):
"""Return a tuple that can be used as a key for sorting, that expresses our desire to keep this task around.
Higher values (things that sort later) are more desirable."""
params = all_tasks_with_params[task_id]
state_score = {
TASK_KILLING: 0,
TASK_FINISHED: 0,
TASK_FAILED: 0,
TASK_KILLED: 0,
TASK_LOST: 0,
TASK_ERROR: 0,
TASK_STAGING: 1,
TASK_STARTING: 2,
TASK_RUNNING: 3,
}[params.mesos_task_state]
# unhealthy tasks < healthy
# staging < starting < running
# old < new
return (
params.is_healthy,
state_score,
self.is_task_new(base_task_name, task_id),
) | Return a tuple that can be used as a key for sorting, that expresses our desire to keep this task around.
Higher values (things that sort later) are more desirable. | healthiness_score | python | Yelp/paasta | paasta_tools/frameworks/native_scheduler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py | Apache-2.0 |
def get_happy_tasks(self, tasks_with_params: Dict[str, MesosTaskParameters]):
"""Filter a dictionary of tasks->params to those that are running and not draining."""
happy_tasks = {}
for tid, params in tasks_with_params.items():
if params.mesos_task_state == TASK_RUNNING and not params.is_draining:
happy_tasks[tid] = params
return happy_tasks | Filter a dictionary of tasks->params to those that are running and not draining. | get_happy_tasks | python | Yelp/paasta | paasta_tools/frameworks/native_scheduler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py | Apache-2.0 |
def make_drain_task(self, task_id: str):
"""Return a DrainTask object, which is suitable for passing to drain methods."""
ports = []
params = self.task_store.get_task(task_id)
for resource in params.resources:
if resource["name"] == "ports":
for rg in resource["ranges"]["range"]:
for port in range(rg["begin"], rg["end"] + 1):
ports.append(port)
return DrainTask(
id=task_id, host=params.offer["agent_id"]["value"], ports=ports
) | Return a DrainTask object, which is suitable for passing to drain methods. | make_drain_task | python | Yelp/paasta | paasta_tools/frameworks/native_scheduler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py | Apache-2.0 |
def recreate_drain_method(self) -> None:
"""Re-instantiate self.drain_method. Should be called after self.service_config changes."""
self.drain_method = drain_lib.get_drain_method(
name=self.service_config.get_drain_method(
self.service_config.service_namespace_config
),
service=self.service_name,
instance=self.instance_name,
registrations=self.service_config.get_registrations(),
**self.service_config.get_drain_method_params(
self.service_config.service_namespace_config
),
) | Re-instantiate self.drain_method. Should be called after self.service_config changes. | recreate_drain_method | python | Yelp/paasta | paasta_tools/frameworks/native_scheduler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py | Apache-2.0 |
def base_task(
self, system_paasta_config: SystemPaastaConfig, portMappings=True
) -> TaskInfo:
"""Return a TaskInfo Dict with all the fields corresponding to the
configuration filled in.
Does not include task.agent_id or a task.id; those need to be
computed separately.
"""
docker_volumes = self.get_volumes(
system_volumes=system_paasta_config.get_volumes(),
)
task: TaskInfo = {
"name": "",
"task_id": {"value": ""},
"agent_id": {"value": ""},
"container": {
"type": "DOCKER",
"docker": {
"image": self.get_docker_url(),
"parameters": [
{"key": param["key"], "value": param["value"]}
for param in self.format_docker_parameters()
],
"network": self.get_mesos_network_mode(),
"port_mappings": [],
},
"volumes": [
{
"container_path": volume["containerPath"],
"host_path": volume["hostPath"],
"mode": volume["mode"].upper(),
}
for volume in docker_volumes
],
},
"command": {
"value": str(self.get_cmd()),
"uris": [
{
"value": system_paasta_config.get_dockercfg_location(),
"extract": False,
}
],
},
"resources": [
{
"name": "cpus",
"type": "SCALAR",
"scalar": {"value": self.get_cpus()},
},
{"name": "mem", "type": "SCALAR", "scalar": {"value": self.get_mem()}},
],
}
if portMappings:
task["container"]["docker"]["port_mappings"] = [
{
"container_port": self.get_container_port(),
# filled by tasks_and_state_for_offer()
"host_port": 0,
"protocol": "tcp",
}
]
task["resources"].append(
{
"name": "ports",
"type": "RANGES",
"ranges": {
# filled by tasks_and_state_for_offer
"range": [{"begin": 0, "end": 0}]
},
}
)
task["name"] = self.task_name(task)
return task | Return a TaskInfo Dict with all the fields corresponding to the
configuration filled in.
Does not include task.agent_id or a task.id; those need to be
computed separately.
| base_task | python | Yelp/paasta | paasta_tools/frameworks/native_service_config.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_service_config.py | Apache-2.0 |
def merge(self: _SelfT, **kwargs) -> "MesosTaskParameters":
"""Return a merged MesosTaskParameters object, where attributes in other take precedence over self."""
new_dict = copy.deepcopy(self.__dict__)
new_dict.update(kwargs)
return MesosTaskParameters(**new_dict) | Return a merged MesosTaskParameters object, where attributes in other take precedence over self. | merge | python | Yelp/paasta | paasta_tools/frameworks/task_store.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/task_store.py | Apache-2.0 |
def add_task_if_doesnt_exist(self, task_id: str, **kwargs) -> None:
"""Add a task if it does not already exist. If it already exists, do nothing."""
if self.get_task(task_id) is not None:
return
else:
self.overwrite_task(task_id, MesosTaskParameters(**kwargs)) | Add a task if it does not already exist. If it already exists, do nothing. | add_task_if_doesnt_exist | python | Yelp/paasta | paasta_tools/frameworks/task_store.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/task_store.py | Apache-2.0 |
def _get_task(self, task_id: str) -> Tuple[MesosTaskParameters, ZnodeStat]:
"""Like get_task, but also returns the ZnodeStat that self.zk_client.get() returns"""
try:
data, stat = self.zk_client.get("/%s" % task_id)
return MesosTaskParameters.deserialize(data), stat
except NoNodeError:
return None, None
except json.decoder.JSONDecodeError:
_log(
service=self.service_name,
instance=self.instance_name,
level="debug",
component="deploy",
line=f"Warning: found non-json-decodable value in zookeeper for task {task_id}: {data}",
)
return None, None | Like get_task, but also returns the ZnodeStat that self.zk_client.get() returns | _get_task | python | Yelp/paasta | paasta_tools/frameworks/task_store.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/task_store.py | Apache-2.0 |
def _format_remote_run_job_name(
job: V1Job,
user: str,
) -> str:
"""Format name for remote run job
:param V1Job job: job definition
:param str user: the user requesting the remote-run
:return: job name
"""
return limit_size_with_hash(f"remote-run-{user}-{job.metadata.name}") | Format name for remote run job
:param V1Job job: job definition
:param str user: the user requesting the remote-run
:return: job name
| _format_remote_run_job_name | python | Yelp/paasta | paasta_tools/kubernetes/remote_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py | Apache-2.0 |
def remote_run_start(
service: str,
instance: str,
cluster: str,
user: str,
interactive: bool,
recreate: bool,
max_duration: int,
is_toolbox: bool,
) -> RemoteRunOutcome:
"""Trigger remote-run job
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
:param bool interactive: whether it is expected to access the remote-run job interactively
:param bool recreate: whether to recreate remote-run job if existing
:param int max_duration: maximum allowed duration for the remote-ruh job
:param bool is_toolbox: requested job is for a toolbox container
:return: outcome of the operation, and resulting Kubernetes pod information
"""
kube_client = KubeClient()
# Load the service deployment settings
deployment_config = (
generate_toolbox_deployment(service, cluster, user)
if is_toolbox
else load_eks_service_config(service, instance, cluster)
)
# Set to interactive mode
if interactive and not is_toolbox:
deployment_config.config_dict["cmd"] = f"sleep {max_duration}"
# Create the app with a new name
formatted_job = deployment_config.format_kubernetes_job(
job_label=REMOTE_RUN_JOB_LABEL,
deadline_seconds=max_duration,
keep_routable_ip=is_toolbox,
)
job_name = _format_remote_run_job_name(formatted_job, user)
formatted_job.metadata.name = job_name
app_wrapper = get_application_wrapper(formatted_job)
app_wrapper.soa_config = deployment_config
# Launch pod
logger.info(f"Starting {job_name}")
try:
app_wrapper.create(kube_client)
except ApiException as e:
if e.status != 409:
raise
if recreate:
remote_run_stop(
service=service,
instance=instance,
cluster=cluster,
user=user,
is_toolbox=is_toolbox,
)
return remote_run_start(
service=service,
instance=instance,
cluster=cluster,
user=user,
interactive=interactive,
recreate=False,
max_duration=max_duration,
is_toolbox=is_toolbox,
)
return {
"status": 200,
"message": "Remote run sandbox started",
"job_name": job_name,
} | Trigger remote-run job
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
:param bool interactive: whether it is expected to access the remote-run job interactively
:param bool recreate: whether to recreate remote-run job if existing
:param int max_duration: maximum allowed duration for the remote-ruh job
:param bool is_toolbox: requested job is for a toolbox container
:return: outcome of the operation, and resulting Kubernetes pod information
| remote_run_start | python | Yelp/paasta | paasta_tools/kubernetes/remote_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py | Apache-2.0 |
def remote_run_ready(
service: str,
instance: str,
cluster: str,
job_name: str,
user: str,
is_toolbox: bool,
) -> RemoteRunOutcome:
"""Check if remote-run pod is ready
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str job_name: name of the remote-run job to check
:param bool is_toolbox: requested job is for a toolbox container
:return: job status, with pod info
"""
kube_client = KubeClient()
# Load the service deployment settings
deployment_config = (
generate_toolbox_deployment(service, cluster, user)
if is_toolbox
else load_eks_service_config(service, instance, cluster)
)
namespace = deployment_config.get_namespace()
pod = find_job_pod(kube_client, namespace, job_name)
if not pod:
return {"status": 404, "message": "No pod found"}
if pod.status.phase == "Running":
if pod.metadata.deletion_timestamp:
return {"status": 409, "message": "Pod is terminating"}
result: RemoteRunOutcome = {
"status": 200,
"message": "Pod ready",
"pod_name": pod.metadata.name,
"namespace": namespace,
}
if is_toolbox:
result["pod_address"] = pod.status.pod_ip
return result
return {
"status": 204,
"message": "Pod not ready",
} | Check if remote-run pod is ready
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str job_name: name of the remote-run job to check
:param bool is_toolbox: requested job is for a toolbox container
:return: job status, with pod info
| remote_run_ready | python | Yelp/paasta | paasta_tools/kubernetes/remote_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py | Apache-2.0 |
def remote_run_stop(
service: str,
instance: str,
cluster: str,
user: str,
is_toolbox: bool,
) -> RemoteRunOutcome:
"""Stop remote-run job
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
:param bool is_toolbox: requested job is for a toolbox container
:return: outcome of the operation
"""
kube_client = KubeClient()
# Load the service deployment settings
deployment_config = (
generate_toolbox_deployment(service, cluster, user)
if is_toolbox
else load_eks_service_config(service, instance, cluster)
)
# Rebuild the job metadata
formatted_job = deployment_config.format_kubernetes_job(
job_label=REMOTE_RUN_JOB_LABEL
)
job_name = _format_remote_run_job_name(formatted_job, user)
formatted_job.metadata.name = job_name
# Stop the job
logger.info(f"Stopping {job_name}")
app_wrapper = get_application_wrapper(formatted_job)
app_wrapper.soa_config = deployment_config
app_wrapper.deep_delete(kube_client)
return {"status": 200, "message": "Job successfully removed"} | Stop remote-run job
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
:param bool is_toolbox: requested job is for a toolbox container
:return: outcome of the operation
| remote_run_stop | python | Yelp/paasta | paasta_tools/kubernetes/remote_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py | Apache-2.0 |
def remote_run_token(
service: str,
instance: str,
cluster: str,
user: str,
) -> str:
"""Creates a short lived token for execing into a pod
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
"""
kube_client = KubeClient()
# Load the service deployment settings
deployment_config = load_eks_service_config(service, instance, cluster)
namespace = deployment_config.get_namespace()
# Rebuild the job metadata
formatted_job = deployment_config.format_kubernetes_job(
job_label=REMOTE_RUN_JOB_LABEL
)
job_name = _format_remote_run_job_name(formatted_job, user)
# Find pod and create exec token for it
pod = find_job_pod(kube_client, namespace, job_name)
if not pod:
raise RemoteRunError(f"Pod for {job_name} not found")
pod_name = pod.metadata.name
logger.info(f"Generating temporary service account token for {pod_name}")
service_account = create_remote_run_service_account(
kube_client, namespace, pod_name, user
)
role = create_pod_scoped_role(kube_client, namespace, pod_name, user)
bind_role_to_service_account(kube_client, namespace, service_account, role, user)
return create_temp_exec_token(kube_client, namespace, service_account) | Creates a short lived token for execing into a pod
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
| remote_run_token | python | Yelp/paasta | paasta_tools/kubernetes/remote_run.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.