code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def get_container_type(self) -> Optional[str]:
"""Get Mesos containerizer type.
Default to DOCKER if gpus are not used.
:returns: Mesos containerizer type, DOCKER or MESOS"""
if self.get_gpus() is not None:
container_type = "MESOS"
else:
container_type = "DOCKER"
return container_type | Get Mesos containerizer type.
Default to DOCKER if gpus are not used.
:returns: Mesos containerizer type, DOCKER or MESOS | get_container_type | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_env_dictionary(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""A dictionary of key/value pairs that represent environment variables
to be injected to the container environment"""
env = {
"PAASTA_SERVICE": self.service,
"PAASTA_INSTANCE": self.instance,
"PAASTA_CLUSTER": self.cluster,
"PAASTA_DEPLOY_GROUP": self.get_deploy_group(),
"PAASTA_DOCKER_IMAGE": self.get_docker_image(),
"PAASTA_RESOURCE_CPUS": str(self.get_cpus()),
"PAASTA_RESOURCE_MEM": str(self.get_mem()),
"PAASTA_RESOURCE_DISK": str(self.get_disk()),
}
if self.get_gpus() is not None:
env["PAASTA_RESOURCE_GPUS"] = str(self.get_gpus())
try:
env["PAASTA_GIT_SHA"] = get_git_sha_from_dockerurl(
self.get_docker_url(system_paasta_config=system_paasta_config)
)
except Exception:
pass
image_version = self.get_image_version()
if image_version is not None:
env["PAASTA_IMAGE_VERSION"] = image_version
team = self.get_team()
if team:
env["PAASTA_MONITORING_TEAM"] = team
instance_type = self.get_instance_type()
if instance_type:
env["PAASTA_INSTANCE_TYPE"] = instance_type
# Our workloads interact with AWS quite a lot, so it comes handy to
# propagate an "application ID" in the user-agent of API requests
# for debugging purposes (max length is 50 chars from AWS docs).
env["AWS_SDK_UA_APP_ID"] = f"{self.service}.{self.instance}"[:50]
user_env = self.config_dict.get("env", {})
env.update(user_env)
return {str(k): str(v) for (k, v) in env.items()} | A dictionary of key/value pairs that represent environment variables
to be injected to the container environment | get_env_dictionary | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_env(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""Basic get_env that simply returns the basic env, other classes
might need to override this getter for more implementation-specific
env getting"""
return self.get_env_dictionary(system_paasta_config=system_paasta_config) | Basic get_env that simply returns the basic env, other classes
might need to override this getter for more implementation-specific
env getting | get_env | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_args(self) -> Optional[List[str]]:
"""Get the docker args specified in the service's configuration.
If not specified in the config and if cmd is not specified, defaults to an empty array.
If not specified in the config but cmd is specified, defaults to null.
If specified in the config and if cmd is also specified, throws an exception. Only one may be specified.
:param service_config: The service instance's configuration dictionary
:returns: An array of args specified in the config,
``[]`` if not specified and if cmd is not specified,
otherwise None if not specified but cmd is specified"""
if self.get_cmd() is None:
return self.config_dict.get("args", [])
else:
args = self.config_dict.get("args", None)
if args is None:
return args
else:
# TODO validation stuff like this should be moved into a check_*
raise InvalidInstanceConfig(
"Instance configuration can specify cmd or args, but not both."
) | Get the docker args specified in the service's configuration.
If not specified in the config and if cmd is not specified, defaults to an empty array.
If not specified in the config but cmd is specified, defaults to null.
If specified in the config and if cmd is also specified, throws an exception. Only one may be specified.
:param service_config: The service instance's configuration dictionary
:returns: An array of args specified in the config,
``[]`` if not specified and if cmd is not specified,
otherwise None if not specified but cmd is specified | get_args | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_deploy_constraints(
self,
blacklist: DeployBlacklist,
whitelist: DeployWhitelist,
system_deploy_blacklist: DeployBlacklist,
system_deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Return the combination of deploy_blacklist and deploy_whitelist
as a list of constraints.
"""
return (
deploy_blacklist_to_constraints(blacklist)
+ deploy_whitelist_to_constraints(whitelist)
+ deploy_blacklist_to_constraints(system_deploy_blacklist)
+ deploy_whitelist_to_constraints(system_deploy_whitelist)
) | Return the combination of deploy_blacklist and deploy_whitelist
as a list of constraints.
| get_deploy_constraints | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_docker_image(self) -> str:
"""Get the docker image name (with tag) for a given service branch from
a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["docker_image"]
else:
return "" | Get the docker image name (with tag) for a given service branch from
a generated deployments.json file. | get_docker_image | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_image_version(self) -> Optional[str]:
"""Get additional information identifying the Docker image from a
generated deployments.json file."""
if self.branch_dict is not None and "image_version" in self.branch_dict:
return self.branch_dict["image_version"]
else:
return None | Get additional information identifying the Docker image from a
generated deployments.json file. | get_image_version | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_desired_state(self) -> str:
"""Get the desired state (either 'start' or 'stop') for a given service
branch from a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["desired_state"]
else:
return "start" | Get the desired state (either 'start' or 'stop') for a given service
branch from a generated deployments.json file. | get_desired_state | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_force_bounce(self) -> Optional[str]:
"""Get the force_bounce token for a given service branch from a generated
deployments.json file. This is a token that, when changed, indicates that
the instance should be recreated and bounced, even if no other
parameters have changed. This may be None or a string, generally a
timestamp.
"""
if self.branch_dict is not None:
return self.branch_dict["force_bounce"]
else:
return None | Get the force_bounce token for a given service branch from a generated
deployments.json file. This is a token that, when changed, indicates that
the instance should be recreated and bounced, even if no other
parameters have changed. This may be None or a string, generally a
timestamp.
| get_force_bounce | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_dependencies(self) -> Optional[Dict]:
"""Get the contents of the dependencies_dict pointed to by the dependency_reference or
'main' if no dependency_reference exists
Defaults to None if not specified in the config.
:returns: A list of dictionaries specified in the dependencies_dict, None if not specified
"""
dependencies = self.config_dict.get("dependencies")
if not dependencies:
return None
dependency_ref = self.get_dependencies_reference() or "main"
return dependencies.get(dependency_ref) | Get the contents of the dependencies_dict pointed to by the dependency_reference or
'main' if no dependency_reference exists
Defaults to None if not specified in the config.
:returns: A list of dictionaries specified in the dependencies_dict, None if not specified
| get_dependencies | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_outbound_firewall(self) -> Optional[str]:
"""Return 'block', 'monitor', or None as configured in security->outbound_firewall
Defaults to None if not specified in the config
:returns: A string specified in the config, None if not specified"""
security = self.config_dict.get("security")
if not security:
return None
return security.get("outbound_firewall") | Return 'block', 'monitor', or None as configured in security->outbound_firewall
Defaults to None if not specified in the config
:returns: A string specified in the config, None if not specified | get_outbound_firewall | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def color_text(color: str, text: str) -> str:
"""Return text that can be printed color.
:param color: ANSI color code
:param text: a string
:return: a string with ANSI color encoding"""
if os.getenv("NO_COLOR", "0") == "1":
return text
# any time text returns to default, we want to insert our color.
replaced = text.replace(PaastaColors.DEFAULT, PaastaColors.DEFAULT + color)
# then wrap the beginning and end in our color/default.
return color + replaced + PaastaColors.DEFAULT | Return text that can be printed color.
:param color: ANSI color code
:param text: a string
:return: a string with ANSI color encoding | color_text | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_git_url(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> str:
"""Get the git url for a service. Assumes that the service's
repo matches its name, and that it lives in services- i.e.
if this is called with the string 'test', the returned
url will be [email protected]:services/test.
:param service: The service name to get a URL for
:returns: A git url to the service's repository"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
# TODO: PAASTA-16927: get this from system config `.git_config`
default_location = format_git_url(
"git", "github.yelpcorp.com", f"services/{service}"
)
return general_config.get("git_url", default_location) | Get the git url for a service. Assumes that the service's
repo matches its name, and that it lives in services- i.e.
if this is called with the string 'test', the returned
url will be [email protected]:services/test.
:param service: The service name to get a URL for
:returns: A git url to the service's repository | get_git_url | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def register_log_writer(name: str) -> Callable[[_LogWriterTypeT], _LogWriterTypeT]:
"""Returns a decorator that registers that log writer class at a given name
so get_log_writer_class can find it."""
def outer(log_writer_class: _LogWriterTypeT) -> _LogWriterTypeT:
_log_writer_classes[name] = log_writer_class
return log_writer_class
return outer | Returns a decorator that registers that log writer class at a given name
so get_log_writer_class can find it. | register_log_writer | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def configure_log() -> None:
"""We will log to the yocalhost binded scribe."""
log_writer_config = load_system_paasta_config().get_log_writer()
global _log_writer
LogWriterClass = get_log_writer_class(log_writer_config["driver"])
_log_writer = LogWriterClass(**log_writer_config.get("options", {})) | We will log to the yocalhost binded scribe. | configure_log | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def format_log_line(
level: str,
cluster: str,
service: str,
instance: str,
component: str,
line: str,
timestamp: str = None,
) -> str:
"""Accepts a string 'line'.
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains 'line'.
"""
validate_log_component(component)
if not timestamp:
timestamp = _now()
line = remove_ansi_escape_sequences(line.strip())
message = json.dumps(
{
"timestamp": timestamp,
"level": level,
"cluster": cluster,
"service": service,
"instance": instance,
"component": component,
"message": line,
},
sort_keys=True,
)
return message | Accepts a string 'line'.
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains 'line'.
| format_log_line | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def format_audit_log_line(
cluster: str,
instance: str,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
timestamp: str = None,
) -> str:
"""Accepts:
* a string 'user' describing the user that initiated the action
* a string 'host' describing the server where the user initiated the action
* a string 'action' describing an action performed by paasta_tools
* a dict 'action_details' optional information about the action
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains details about an action performed on
a service/instance.
"""
if not timestamp:
timestamp = _now()
if not action_details:
action_details = {}
message = json.dumps(
{
"timestamp": timestamp,
"cluster": cluster,
"service": service,
"instance": instance,
"user": user,
"host": host,
"action": action,
"action_details": action_details,
},
sort_keys=True,
)
return message | Accepts:
* a string 'user' describing the user that initiated the action
* a string 'host' describing the server where the user initiated the action
* a string 'action' describing an action performed by paasta_tools
* a dict 'action_details' optional information about the action
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains details about an action performed on
a service/instance.
| format_audit_log_line | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
"""This expects someone (currently the paasta cli main()) to have already
configured the log object. We'll just write things to it.
"""
if level == "event":
print(f"[service {service}] {line}", file=sys.stdout)
elif level == "debug":
print(f"[service {service}] {line}", file=sys.stderr)
else:
raise NoSuchLogLevel
log_name = get_log_name_for_service(service)
formatted_line = format_log_line(
level, cluster, service, instance, component, line
)
clog.log_line(log_name, formatted_line) | This expects someone (currently the paasta cli main()) to have already
configured the log object. We'll just write things to it.
| log | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def timed_flock(fd: _AnyIO, seconds: int = 1) -> Iterator[None]:
"""Attempt to grab an exclusive flock with a timeout. Uses Timeout, so will
raise a TimeoutError if `seconds` elapses before the flock can be obtained
"""
# We don't want to wrap the user code in the timeout, just the flock grab
flock_context = flock(fd)
with Timeout(seconds=seconds):
flock_context.__enter__()
try:
yield
finally:
flock_context.__exit__(*sys.exc_info()) | Attempt to grab an exclusive flock with a timeout. Uses Timeout, so will
raise a TimeoutError if `seconds` elapses before the flock can be obtained
| timed_flock | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def _timeout(process: Popen) -> None:
"""Helper function for _run. It terminates the process.
Doesn't raise OSError, if we try to terminate a non-existing
process as there can be a very small window between poll() and kill()
"""
if process.poll() is None:
try:
# sending SIGKILL to the process
process.kill()
except OSError as e:
# No such process error
# The process could have been terminated meanwhile
if e.errno != errno.ESRCH:
raise | Helper function for _run. It terminates the process.
Doesn't raise OSError, if we try to terminate a non-existing
process as there can be a very small window between poll() and kill()
| _timeout | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_readable_files_in_glob(glob: str, path: str) -> List[str]:
"""
Returns a sorted list of files that are readable in an input glob by recursively searching a path
"""
globbed_files = []
for root, dirs, files in os.walk(path):
for f in files:
fn = os.path.join(root, f)
if os.path.isfile(fn) and os.access(fn, os.R_OK) and fnmatch(fn, glob):
globbed_files.append(fn)
return sorted(globbed_files) |
Returns a sorted list of files that are readable in an input glob by recursively searching a path
| get_readable_files_in_glob | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Reads Paasta configs in specified directory in lexicographical order and deep merges
the dictionaries (last file wins).
"""
if not os.path.isdir(path):
raise PaastaNotConfiguredError(
"Could not find system paasta configuration directory: %s" % path
)
if not os.access(path, os.R_OK):
raise PaastaNotConfiguredError(
"Could not read from system paasta configuration directory: %s" % path
)
try:
file_stats = frozenset(
{
(fn, os.stat(fn))
for fn in get_readable_files_in_glob(glob="*.json", path=path)
}
)
return parse_system_paasta_config(file_stats, path)
except IOError as e:
raise PaastaNotConfiguredError(
f"Could not load system paasta config file {e.filename}: {e.strerror}"
) |
Reads Paasta configs in specified directory in lexicographical order and deep merges
the dictionaries (last file wins).
| load_system_paasta_config | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def optionally_load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Tries to load the system paasta config, but will return an empty configuration if not available,
without raising.
"""
try:
return load_system_paasta_config(path=path)
except PaastaNotConfiguredError:
return SystemPaastaConfig({}, "") |
Tries to load the system paasta config, but will return an empty configuration if not available,
without raising.
| optionally_load_system_paasta_config | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def parse_system_paasta_config(
file_stats: FrozenSet[Tuple[str, os.stat_result]], path: str
) -> "SystemPaastaConfig":
"""Pass in a dictionary of filename -> os.stat_result, and this returns the merged parsed configs"""
config: SystemPaastaConfigDict = {}
for filename, _ in file_stats:
with open(filename) as f:
config = deep_merge_dictionaries(
json.load(f), config, allow_duplicate_keys=False
)
return SystemPaastaConfig(config, path) | Pass in a dictionary of filename -> os.stat_result, and this returns the merged parsed configs | parse_system_paasta_config | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_default_spark_driver_pool_override(self) -> str:
"""
If defined, fetches the override for what pool to run a Spark driver in.
Otherwise, returns the default Spark driver pool.
:returns: The default_spark_driver_pool_override specified in the paasta configuration
"""
return self.config_dict.get(
"default_spark_driver_pool_override", DEFAULT_SPARK_DRIVER_POOL
) |
If defined, fetches the override for what pool to run a Spark driver in.
Otherwise, returns the default Spark driver pool.
:returns: The default_spark_driver_pool_override specified in the paasta configuration
| get_default_spark_driver_pool_override | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_zk_hosts(self) -> str:
"""Get the zk_hosts defined in this hosts's cluster config file.
Strips off the zk:// prefix, if it exists, for use with Kazoo.
:returns: The zk_hosts specified in the paasta configuration
"""
try:
hosts = self.config_dict["zookeeper"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find zookeeper connection string in configuration directory: %s"
% self.directory
)
# how do python strings not have a method for doing this
if hosts.startswith("zk://"):
return hosts[len("zk://") :]
return hosts | Get the zk_hosts defined in this hosts's cluster config file.
Strips off the zk:// prefix, if it exists, for use with Kazoo.
:returns: The zk_hosts specified in the paasta configuration
| get_zk_hosts | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_system_docker_registry(self) -> str:
"""Get the docker_registry defined in this host's cluster config file.
:returns: The docker_registry specified in the paasta configuration
"""
try:
return self.config_dict["docker_registry"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find docker registry in configuration directory: %s"
% self.directory
) | Get the docker_registry defined in this host's cluster config file.
:returns: The docker_registry specified in the paasta configuration
| get_system_docker_registry | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_hacheck_sidecar_volumes(self) -> List[DockerVolume]:
"""Get the hacheck sidecar volumes defined in this host's hacheck_sidecar_volumes config file.
:returns: The list of volumes specified in the paasta configuration
"""
try:
volumes = self.config_dict["hacheck_sidecar_volumes"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find hacheck_sidecar_volumes in configuration directory: %s"
% self.directory
)
return _reorder_docker_volumes(list(volumes)) | Get the hacheck sidecar volumes defined in this host's hacheck_sidecar_volumes config file.
:returns: The list of volumes specified in the paasta configuration
| get_hacheck_sidecar_volumes | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_volumes(self) -> Sequence[DockerVolume]:
"""Get the volumes defined in this host's volumes config file.
:returns: The list of volumes specified in the paasta configuration
"""
try:
return self.config_dict["volumes"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find volumes in configuration directory: %s" % self.directory
) | Get the volumes defined in this host's volumes config file.
:returns: The list of volumes specified in the paasta configuration
| get_volumes | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_cluster(self) -> str:
"""Get the cluster defined in this host's cluster config file.
:returns: The name of the cluster defined in the paasta configuration
"""
try:
return self.config_dict["cluster"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find cluster in configuration directory: %s" % self.directory
) | Get the cluster defined in this host's cluster config file.
:returns: The name of the cluster defined in the paasta configuration
| get_cluster | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_log_writer(self) -> LogWriterConfig:
"""Get the log_writer configuration out of global paasta config
:returns: The log_writer dictionary.
"""
try:
return self.config_dict["log_writer"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find log_writer in configuration directory: %s"
% self.directory
) | Get the log_writer configuration out of global paasta config
:returns: The log_writer dictionary.
| get_log_writer | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_log_reader(self) -> LogReaderConfig:
"""Get the log_reader configuration out of global paasta config
:returns: the log_reader dictionary.
"""
try:
return self.config_dict["log_reader"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find log_reader in configuration directory: %s"
% self.directory
) | Get the log_reader configuration out of global paasta config
:returns: the log_reader dictionary.
| get_log_reader | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_log_readers(self) -> List[LogReaderConfig]:
"""Get the log_readers configuration out of global paasta config
:returns: the log_readers list of dicts.
"""
try:
return self.config_dict["log_readers"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find log_readers in configuration directory: %s"
% self.directory
) | Get the log_readers configuration out of global paasta config
:returns: the log_readers list of dicts.
| get_log_readers | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_metrics_provider(self) -> Optional[str]:
"""Get the metrics_provider configuration out of global paasta config
:returns: A string identifying the metrics_provider
"""
deployd_metrics_provider = self.config_dict.get("deployd_metrics_provider")
if deployd_metrics_provider is not None:
return deployd_metrics_provider
return self.config_dict.get("metrics_provider") | Get the metrics_provider configuration out of global paasta config
:returns: A string identifying the metrics_provider
| get_metrics_provider | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_synapse_haproxy_url_format(self) -> str:
"""Get a format string for the URL to query for haproxy-synapse state. This format string gets two keyword
arguments, host and port. Defaults to "http://{host:s}:{port:d}/;csv;norefresh".
:returns: A format string for constructing the URL of haproxy-synapse's status page.
"""
return self.config_dict.get(
"synapse_haproxy_url_format", DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT
) | Get a format string for the URL to query for haproxy-synapse state. This format string gets two keyword
arguments, host and port. Defaults to "http://{host:s}:{port:d}/;csv;norefresh".
:returns: A format string for constructing the URL of haproxy-synapse's status page.
| get_synapse_haproxy_url_format | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_envoy_admin_endpoint_format(self) -> str:
"""Get the format string for Envoy's admin interface."""
return self.config_dict.get(
"envoy_admin_endpoint_format", "http://{host:s}:{port:d}/{endpoint:s}"
) | Get the format string for Envoy's admin interface. | get_envoy_admin_endpoint_format | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_envoy_admin_port(self) -> int:
"""Get the port that Envoy's admin interface is listening on
from /etc/services."""
return socket.getservbyname(
self.config_dict.get("envoy_admin_domain_name", "envoy-admin")
) | Get the port that Envoy's admin interface is listening on
from /etc/services. | get_envoy_admin_port | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_git_config(self) -> Dict:
"""Gets git configuration. Includes repo names and their git servers.
:returns: the git config dict
"""
return self.config_dict.get(
"git_config",
{
"git_user": "git",
"repos": {
"yelpsoa-configs": {
"repo_name": "yelpsoa-configs",
"git_server": DEFAULT_SOA_CONFIGS_GIT_URL,
"deploy_server": DEFAULT_SOA_CONFIGS_GIT_URL,
},
},
},
) | Gets git configuration. Includes repo names and their git servers.
:returns: the git config dict
| get_git_config | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_gunicorn_exporter_sidecar_image_url(self) -> str:
"""Get the docker image URL for the gunicorn_exporter sidecar container"""
return self.config_dict.get(
"gunicorn_exporter_sidecar_image_url",
"docker-paasta.yelpcorp.com:443/gunicorn_exporter-k8s-sidecar:v0.24.0-yelp0",
) | Get the docker image URL for the gunicorn_exporter sidecar container | get_gunicorn_exporter_sidecar_image_url | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_readiness_check_prefix_template(self) -> List[str]:
"""A prefix that will be added to the beginning of the readiness check command. Meant for e.g. `flock` and
`timeout`."""
# We use flock+timeout here to work around issues discovered in PAASTA-17673:
# In k8s 1.18, probe timeout wasn't respected at all.
# When we upgraded to k8s 1.20, the timeout started being partially respected - k8s would stop waiting for a
# response, but wouldn't kill the command within the container (with the dockershim CRI).
# Flock prevents multiple readiness probes from running at once, using lots of CPU.
# The generous timeout allows for a slow readiness probe, but ensures that a truly-stuck readiness probe command
# will eventually be killed so another process can retry.
# Once we move off dockershim, we'll likely need to increase the readiness probe timeout, but we can then remove
# this wrapper.
return self.config_dict.get(
"readiness_check_prefix_template",
["flock", "-n", "/readiness_check_lock", "timeout", "120"],
) | A prefix that will be added to the beginning of the readiness check command. Meant for e.g. `flock` and
`timeout`. | get_readiness_check_prefix_template | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def _run(
command: Union[str, List[str]],
env: Mapping[str, str] = os.environ,
timeout: float = None,
log: bool = False,
stream: bool = False,
stdin: Any = None,
stdin_interrupt: bool = False,
popen_kwargs: Dict = {},
**kwargs: Any,
) -> Tuple[int, str]:
"""Given a command, run it. Return a tuple of the return code and any
output.
:param timeout: If specified, the command will be terminated after timeout
seconds.
:param log: If True, the _log will be handled by _run. If set, it is mandatory
to pass at least a :service: and a :component: parameter. Optionally you
can pass :cluster:, :instance: and :loglevel: parameters for logging.
We wanted to use plumbum instead of rolling our own thing with
subprocess.Popen but were blocked by
https://github.com/tomerfiliba/plumbum/issues/162 and our local BASH_FUNC
magic.
"""
output: List[str] = []
if log:
service = kwargs["service"]
component = kwargs["component"]
cluster = kwargs.get("cluster", ANY_CLUSTER)
instance = kwargs.get("instance", ANY_INSTANCE)
loglevel = kwargs.get("loglevel", DEFAULT_LOGLEVEL)
try:
if not isinstance(command, list):
command = shlex.split(command)
popen_kwargs["stdout"] = PIPE
popen_kwargs["stderr"] = STDOUT
popen_kwargs["stdin"] = stdin
popen_kwargs["env"] = env
process = Popen(command, **popen_kwargs)
if stdin_interrupt:
def signal_handler(signum: int, frame: FrameType) -> None:
process.stdin.write("\n".encode("utf-8"))
process.stdin.flush()
process.wait()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# start the timer if we specified a timeout
if timeout:
proctimer = threading.Timer(timeout, _timeout, [process])
proctimer.start()
outfn: Any = print if stream else output.append
for linebytes in iter(process.stdout.readline, b""):
line = linebytes.decode("utf-8", errors="replace").rstrip("\n")
outfn(line)
if log:
_log(
service=service,
line=line,
component=component,
level=loglevel,
cluster=cluster,
instance=instance,
)
# when finished, get the exit code
process.wait()
returncode = process.returncode
except OSError as e:
if log:
_log(
service=service,
line=e.strerror.rstrip("\n"),
component=component,
level=loglevel,
cluster=cluster,
instance=instance,
)
output.append(e.strerror.rstrip("\n"))
returncode = e.errno
except (KeyboardInterrupt, SystemExit):
# need to clean up the timing thread here
if timeout:
proctimer.cancel()
raise
else:
# Stop the timer
if timeout:
proctimer.cancel()
if returncode == -9:
output.append(f"Command '{command}' timed out (longer than {timeout}s)")
return returncode, "\n".join(output) | Given a command, run it. Return a tuple of the return code and any
output.
:param timeout: If specified, the command will be terminated after timeout
seconds.
:param log: If True, the _log will be handled by _run. If set, it is mandatory
to pass at least a :service: and a :component: parameter. Optionally you
can pass :cluster:, :instance: and :loglevel: parameters for logging.
We wanted to use plumbum instead of rolling our own thing with
subprocess.Popen but were blocked by
https://github.com/tomerfiliba/plumbum/issues/162 and our local BASH_FUNC
magic.
| _run | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_umask() -> int:
"""Get the current umask for this process. NOT THREAD SAFE."""
old_umask = os.umask(0o0022)
os.umask(old_umask)
return old_umask | Get the current umask for this process. NOT THREAD SAFE. | get_umask | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def compose_job_id(
name: str,
instance: str,
git_hash: Optional[str] = None,
config_hash: Optional[str] = None,
spacer: str = SPACER,
) -> str:
"""Compose a job/app id by concatenating its name, instance, git hash, and config hash.
:param name: The name of the service
:param instance: The instance of the service
:param git_hash: The git_hash portion of the job_id. If git_hash is set,
config_hash must also be set.
:param config_hash: The config_hash portion of the job_id. If config_hash
is set, git_hash must also be set.
:returns: <name><SPACER><instance> if no tag, or <name><SPACER><instance><SPACER><hashes>...
if extra hash inputs are provided.
"""
composed = f"{name}{spacer}{instance}"
if git_hash and config_hash:
composed = f"{composed}{spacer}{git_hash}{spacer}{config_hash}"
elif git_hash or config_hash:
raise InvalidJobNameError(
"invalid job id because git_hash (%s) and config_hash (%s) must "
"both be defined or neither can be defined" % (git_hash, config_hash)
)
return composed | Compose a job/app id by concatenating its name, instance, git hash, and config hash.
:param name: The name of the service
:param instance: The instance of the service
:param git_hash: The git_hash portion of the job_id. If git_hash is set,
config_hash must also be set.
:param config_hash: The config_hash portion of the job_id. If config_hash
is set, git_hash must also be set.
:returns: <name><SPACER><instance> if no tag, or <name><SPACER><instance><SPACER><hashes>...
if extra hash inputs are provided.
| compose_job_id | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def decompose_job_id(job_id: str, spacer: str = SPACER) -> Tuple[str, str, str, str]:
"""Break a composed job id into its constituent (service name, instance,
git hash, config hash) by splitting with ``spacer``.
:param job_id: The composed id of the job/app
:returns: A tuple (service name, instance, git hash, config hash) that
comprise the job_id
"""
decomposed = job_id.split(spacer)
if len(decomposed) == 2:
git_hash = None
config_hash = None
elif len(decomposed) == 4:
git_hash = decomposed[2]
config_hash = decomposed[3]
else:
raise InvalidJobNameError("invalid job id %s" % job_id)
return (decomposed[0], decomposed[1], git_hash, config_hash) | Break a composed job id into its constituent (service name, instance,
git hash, config hash) by splitting with ``spacer``.
:param job_id: The composed id of the job/app
:returns: A tuple (service name, instance, git hash, config hash) that
comprise the job_id
| decompose_job_id | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def build_docker_image_name(service: str) -> str:
"""docker-paasta.yelpcorp.com:443 is the URL for the Registry where PaaSTA
will look for your images.
:returns: a sanitized-for-Jenkins (s,/,-,g) version of the
service's path in git. E.g. For github.yelpcorp.com:services/foo the
docker image name is docker_registry/services-foo.
"""
docker_registry_url = get_service_docker_registry(service)
name = f"{docker_registry_url}/services-{service}"
return name | docker-paasta.yelpcorp.com:443 is the URL for the Registry where PaaSTA
will look for your images.
:returns: a sanitized-for-Jenkins (s,/,-,g) version of the
service's path in git. E.g. For github.yelpcorp.com:services/foo the
docker image name is docker_registry/services-foo.
| build_docker_image_name | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def check_docker_image(
service: str,
commit: str,
image_version: Optional[str] = None,
) -> bool:
"""Checks whether the given image for :service: with :tag: exists.
:raises: ValueError if more than one docker image with :tag: found.
:returns: True if there is exactly one matching image found.
"""
docker_client = get_docker_client()
image_name = build_docker_image_name(service)
docker_tag = build_docker_tag(service, commit, image_version)
images = docker_client.images(name=image_name)
# image['RepoTags'] may be None
# Fixed upstream but only in docker-py 2.
# https://github.com/docker/docker-py/issues/1401
result = [image for image in images if docker_tag in (image["RepoTags"] or [])]
if len(result) > 1:
raise ValueError(
f"More than one docker image found with tag {docker_tag}\n{result}"
)
return len(result) == 1 | Checks whether the given image for :service: with :tag: exists.
:raises: ValueError if more than one docker image with :tag: found.
:returns: True if there is exactly one matching image found.
| check_docker_image | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_files_of_type_in_dir(
file_type: str,
service: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[str]:
"""Recursively search path if type of file exists.
:param file_type: a string of a type of a file (kubernetes, slo, etc.)
:param service: a string of a service
:param soa_dir: a string of a path to a soa_configs directory
:return: a list
"""
# TODO: Only use INSTANCE_TYPES as input by making file_type Literal
service = "**" if service is None else service
soa_dir = DEFAULT_SOA_DIR if soa_dir is None else soa_dir
file_type += "-*.yaml"
return [
file_path
for file_path in glob.glob(
os.path.join(soa_dir, service, file_type),
recursive=True,
)
] | Recursively search path if type of file exists.
:param file_type: a string of a type of a file (kubernetes, slo, etc.)
:param service: a string of a service
:param soa_dir: a string of a path to a soa_configs directory
:return: a list
| get_files_of_type_in_dir | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def list_clusters(
service: str = None, soa_dir: str = DEFAULT_SOA_DIR, instance_type: str = None
) -> List[str]:
"""Returns a sorted list of clusters a service is configured to deploy to,
or all clusters if ``service`` is not specified.
Includes every cluster that has a ``kubernetes-*.yaml`` or ``tron-*.yaml`` file associated with it.
:param service: The service name. If unspecified, clusters running any service will be included.
:returns: A sorted list of cluster names
"""
clusters = set()
for cluster, _ in get_soa_cluster_deploy_files(
service=service, soa_dir=soa_dir, instance_type=instance_type
):
clusters.add(cluster)
return sorted(clusters) | Returns a sorted list of clusters a service is configured to deploy to,
or all clusters if ``service`` is not specified.
Includes every cluster that has a ``kubernetes-*.yaml`` or ``tron-*.yaml`` file associated with it.
:param service: The service name. If unspecified, clusters running any service will be included.
:returns: A sorted list of cluster names
| list_clusters | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_service_instance_list_no_cache(
service: str,
cluster: Optional[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[Tuple[str, str]]:
"""Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
"""
instance_types: Tuple[str, ...]
if not cluster:
cluster = load_system_paasta_config().get_cluster()
if instance_type in INSTANCE_TYPES:
instance_types = (instance_type,)
else:
instance_types = INSTANCE_TYPES
instance_list: List[Tuple[str, str]] = []
for srv_instance_type in instance_types:
instance_list.extend(
read_service_instance_names(
service=service,
instance_type=srv_instance_type,
cluster=cluster,
soa_dir=soa_dir,
)
)
log.debug("Enumerated the following instances: %s", instance_list)
return instance_list | Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
| get_service_instance_list_no_cache | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_service_instance_list(
service: str,
cluster: Optional[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[Tuple[str, str]]:
"""Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
"""
return get_service_instance_list_no_cache(
service=service, cluster=cluster, instance_type=instance_type, soa_dir=soa_dir
) | Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
| get_service_instance_list | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_services_for_cluster(
cluster: str = None, instance_type: str = None, soa_dir: str = DEFAULT_SOA_DIR
) -> List[Tuple[str, str]]:
"""Retrieve all services and instances defined to run in a cluster.
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (service, instance)
"""
if not cluster:
cluster = load_system_paasta_config().get_cluster()
rootdir = os.path.abspath(soa_dir)
log.debug(
"Retrieving all service instance names from %s for cluster %s", rootdir, cluster
)
instance_list: List[Tuple[str, str]] = []
for srv_dir in os.listdir(rootdir):
instance_list.extend(
get_service_instance_list(srv_dir, cluster, instance_type, soa_dir)
)
return instance_list | Retrieve all services and instances defined to run in a cluster.
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'kubernetes', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (service, instance)
| get_services_for_cluster | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_latest_deployment_tag(
refs: Dict[str, str], deploy_group: str
) -> Tuple[str, str, Optional[str]]:
"""Gets the latest deployment tag and sha for the specified deploy_group
:param refs: A dictionary mapping git refs to shas
:param deploy_group: The deployment group to return a deploy tag for
:returns: A tuple of the form (ref, sha, image_version) where ref is the
actual deployment tag (with the most recent timestamp), sha is
the sha it points at and image_version provides additional
version information about the image
"""
most_recent_dtime = None
most_recent_ref = None
most_recent_sha = None
most_recent_image_version = None
pattern = re.compile(
r"^refs/tags/paasta-%s(?:\+(?P<image_version>.*)){0,1}-(?P<dtime>\d{8}T\d{6})-deploy$"
% deploy_group
)
for ref_name, sha in refs.items():
match = pattern.match(ref_name)
if match:
gd = match.groupdict()
dtime = gd["dtime"]
if most_recent_dtime is None or dtime > most_recent_dtime:
most_recent_dtime = dtime
most_recent_ref = ref_name
most_recent_sha = sha
most_recent_image_version = gd["image_version"]
return most_recent_ref, most_recent_sha, most_recent_image_version | Gets the latest deployment tag and sha for the specified deploy_group
:param refs: A dictionary mapping git refs to shas
:param deploy_group: The deployment group to return a deploy tag for
:returns: A tuple of the form (ref, sha, image_version) where ref is the
actual deployment tag (with the most recent timestamp), sha is
the sha it points at and image_version provides additional
version information about the image
| get_latest_deployment_tag | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_config_hash(config: Any, force_bounce: str = None) -> str:
"""Create an MD5 hash of the configuration dictionary to be sent to
Kubernetes. Or anything really, so long as str(config) works. Returns
the first 8 characters so things are not really long.
:param config: The configuration to hash
:param force_bounce: a timestamp (in the form of a string) that is appended before hashing
that can be used to force a hash change
:returns: A MD5 hash of str(config)
"""
hasher = hashlib.md5()
hasher.update(
json.dumps(config, sort_keys=True).encode("UTF-8")
+ (force_bounce or "").encode("UTF-8")
)
return "config%s" % hasher.hexdigest()[:8] | Create an MD5 hash of the configuration dictionary to be sent to
Kubernetes. Or anything really, so long as str(config) works. Returns
the first 8 characters so things are not really long.
:param config: The configuration to hash
:param force_bounce: a timestamp (in the form of a string) that is appended before hashing
that can be used to force a hash change
:returns: A MD5 hash of str(config)
| get_config_hash | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_git_sha_from_dockerurl(docker_url: str, long: bool = False) -> str:
"""We encode the sha of the code that built a docker image *in* the docker
url. This function takes that url as input and outputs the sha.
"""
if ":paasta-" in docker_url:
deployment_version = get_deployment_version_from_dockerurl(docker_url)
git_sha = deployment_version.sha if deployment_version else ""
# Fall back to the old behavior if the docker_url does not follow the
# expected pattern
else:
parts = docker_url.split("/")
parts = parts[-1].split("-")
git_sha = parts[-1]
# Further ensure to only grab the image label in case not using paasta images
git_sha = git_sha.split(":")[-1]
return git_sha if long else git_sha[:8] | We encode the sha of the code that built a docker image *in* the docker
url. This function takes that url as input and outputs the sha.
| get_git_sha_from_dockerurl | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_image_version_from_dockerurl(docker_url: str) -> Optional[str]:
"""We can optionally encode additional metadata about the docker image *in*
the docker url. This function takes that url as input and outputs the sha.
"""
deployment_version = get_deployment_version_from_dockerurl(docker_url)
return deployment_version.image_version if deployment_version else None | We can optionally encode additional metadata about the docker image *in*
the docker url. This function takes that url as input and outputs the sha.
| get_image_version_from_dockerurl | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def is_under_replicated(
num_available: int, expected_count: int, crit_threshold: int
) -> Tuple[bool, float]:
"""Calculates if something is under replicated
:param num_available: How many things are up
:param expected_count: How many things you think should be up
:param crit_threshold: Int from 0-100
:returns: Tuple of (bool, ratio)
"""
if expected_count == 0:
ratio = 100.0
else:
ratio = (num_available / float(expected_count)) * 100
if ratio < int(crit_threshold):
return (True, ratio)
else:
return (False, ratio) | Calculates if something is under replicated
:param num_available: How many things are up
:param expected_count: How many things you think should be up
:param crit_threshold: Int from 0-100
:returns: Tuple of (bool, ratio)
| is_under_replicated | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def deploy_blacklist_to_constraints(
deploy_blacklist: DeployBlacklist,
) -> List[Constraint]:
"""Converts a blacklist of locations into tron appropriate constraints.
:param blacklist: List of lists of locations to blacklist
:returns: List of lists of constraints
"""
constraints: List[Constraint] = []
for blacklisted_location in deploy_blacklist:
constraints.append([blacklisted_location[0], "UNLIKE", blacklisted_location[1]])
return constraints | Converts a blacklist of locations into tron appropriate constraints.
:param blacklist: List of lists of locations to blacklist
:returns: List of lists of constraints
| deploy_blacklist_to_constraints | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def deploy_whitelist_to_constraints(
deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Converts a whitelist of locations into tron appropriate constraints
:param deploy_whitelist: List of lists of locations to whitelist
:returns: List of lists of constraints
"""
if deploy_whitelist is not None:
(region_type, regions) = deploy_whitelist
regionstr = "|".join(regions)
return [[region_type, "LIKE", regionstr]]
return [] | Converts a whitelist of locations into tron appropriate constraints
:param deploy_whitelist: List of lists of locations to whitelist
:returns: List of lists of constraints
| deploy_whitelist_to_constraints | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def format_table(
rows: Iterable[Union[str, Sequence[str]]], min_spacing: int = 2
) -> List[str]:
"""Formats a table for use on the command line.
:param rows: List of rows, each of which can either be a tuple of strings containing the row's values, or a string
to be inserted verbatim. Each row (except literal strings) should be the same number of elements as
all the others.
:returns: A string containing rows formatted as a table.
"""
list_rows = [r for r in rows if not isinstance(r, str)]
# If all of the rows are strings, we have nothing to do, so short-circuit.
if not list_rows:
return cast(List[str], rows)
widths = []
for i in range(len(list_rows[0])):
widths.append(max(terminal_len(r[i]) for r in list_rows))
expanded_rows = []
for row in rows:
if isinstance(row, str):
expanded_rows.append([row])
else:
expanded_row = []
for i, cell in enumerate(row):
if i == len(row) - 1:
padding = ""
else:
padding = " " * (widths[i] - terminal_len(cell))
expanded_row.append(cell + padding)
expanded_rows.append(expanded_row)
return [(" " * min_spacing).join(r) for r in expanded_rows] | Formats a table for use on the command line.
:param rows: List of rows, each of which can either be a tuple of strings containing the row's values, or a string
to be inserted verbatim. Each row (except literal strings) should be the same number of elements as
all the others.
:returns: A string containing rows formatted as a table.
| format_table | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def is_deploy_step(step: str) -> bool:
"""
Returns true if the given step deploys to an instancename
Returns false if the step is a predefined step-type, e.g. itest or command-*
"""
return not (
(step in DEPLOY_PIPELINE_NON_DEPLOY_STEPS) or (step.startswith("command-"))
) |
Returns true if the given step deploys to an instancename
Returns false if the step is a predefined step-type, e.g. itest or command-*
| is_deploy_step | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def ldap_user_search(
cn: str,
search_base: str,
search_ou: str,
ldap_host: str,
username: str,
password: str,
) -> Set[str]:
"""Connects to LDAP and raises a subclass of LDAPOperationResult when it fails"""
tls_config = ldap3.Tls(
validate=ssl.CERT_REQUIRED, ca_certs_file="/etc/ssl/certs/ca-certificates.crt"
)
server = ldap3.Server(ldap_host, use_ssl=True, tls=tls_config)
conn = ldap3.Connection(
server, user=username, password=password, raise_exceptions=True
)
conn.bind()
search_filter = f"(&(memberOf=CN={cn},{search_ou})(!(userAccountControl=514)))"
entries = conn.extend.standard.paged_search(
search_base=search_base,
search_scope=ldap3.SUBTREE,
search_filter=search_filter,
attributes=["sAMAccountName"],
paged_size=1000,
time_limit=10,
)
return {entry["attributes"]["sAMAccountName"] for entry in entries} | Connects to LDAP and raises a subclass of LDAPOperationResult when it fails | ldap_user_search | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def get_k8s_url_for_cluster(cluster: str) -> Optional[str]:
"""
Annoyingly, there's two layers of aliases: one to figure out what
k8s server url to use (this one) and another to figure out what
soaconfigs filename to use ;_;
This exists so that we can map something like `--cluster pnw-devc`
into spark-pnw-devc's k8s apiserver url without needing to update
any soaconfigs/alter folk's muscle memory.
Ideally we can get rid of this entirely once spark-run reads soaconfigs
in a manner more closely aligned to what we do with other paasta workloads
(i.e., have it automatically determine where to run based on soaconfigs
filenames - and not rely on explicit config)
"""
realized_cluster = (
load_system_paasta_config().get_eks_cluster_aliases().get(cluster, cluster)
)
return (
load_system_paasta_config()
.get_kube_clusters()
.get(realized_cluster, {})
.get("server")
) |
Annoyingly, there's two layers of aliases: one to figure out what
k8s server url to use (this one) and another to figure out what
soaconfigs filename to use ;_;
This exists so that we can map something like `--cluster pnw-devc`
into spark-pnw-devc's k8s apiserver url without needing to update
any soaconfigs/alter folk's muscle memory.
Ideally we can get rid of this entirely once spark-run reads soaconfigs
in a manner more closely aligned to what we do with other paasta workloads
(i.e., have it automatically determine where to run based on soaconfigs
filenames - and not rely on explicit config)
| get_k8s_url_for_cluster | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def maybe_load_previous_config(
filename: str, config_loader: Callable[[TextIO], dict]
) -> Optional[dict]:
"""Try to load configuration file
:param str filename: path to load from
:param Callable[[TextIO], dict] config_loader: parser for the configuration
:return: configuration data, None if loading fails
"""
try:
with open(filename, "r") as fp:
previous_config = config_loader(fp)
return previous_config
except Exception:
pass
return None | Try to load configuration file
:param str filename: path to load from
:param Callable[[TextIO], dict] config_loader: parser for the configuration
:return: configuration data, None if loading fails
| maybe_load_previous_config | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def write_json_configuration_file(filename: str, configuration: dict) -> None:
"""Atomically write configuration to JSON file
:param str filename: path to write to
:param dict configuration: configuration data
"""
with atomic_file_write(filename) as fp:
json.dump(
obj=configuration,
fp=fp,
indent=2,
sort_keys=True,
separators=(",", ": "),
) | Atomically write configuration to JSON file
:param str filename: path to write to
:param dict configuration: configuration data
| write_json_configuration_file | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def write_yaml_configuration_file(
filename: str, configuration: dict, check_existing: bool = True
) -> None:
"""Atomically write configuration to YAML file
:param str filename: path to write to
:param dict configuration: configuration data
:param bool check_existing: if existing file already matches config, do not overwrite
"""
if check_existing:
previous_config = maybe_load_previous_config(filename, yaml.safe_load)
if previous_config and previous_config == configuration:
return
with atomic_file_write(filename) as fp:
fp.write(
"# This file is automatically generated by paasta_tools.\n"
"# It was automatically generated at {now} on {host}.\n".format(
host=socket.getfqdn(), now=datetime.datetime.now().isoformat()
)
)
yaml.safe_dump(
configuration,
fp,
indent=2,
explicit_start=True,
default_flow_style=False,
allow_unicode=False,
) | Atomically write configuration to YAML file
:param str filename: path to write to
:param dict configuration: configuration data
:param bool check_existing: if existing file already matches config, do not overwrite
| write_yaml_configuration_file | python | Yelp/paasta | paasta_tools/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/utils.py | Apache-2.0 |
def __call__(self, request: Request) -> Response:
"""
Extracts relevant metadata from request, and checks if it is authorized
"""
token = request.headers.get("Authorization", "").strip()
token = token.split()[-1] if token else "" # removes "Bearer" prefix
auth_outcome = self.is_request_authorized(
request.path,
token,
request.method,
request.swagger_data.get("service", None),
)
if self.enforce and not auth_outcome.authorized:
return HTTPForbidden(
body=json.dumps({"reason": auth_outcome.reason}),
headers={"X-Auth-Failure-Reason": auth_outcome.reason},
content_type="application/json",
charset="utf-8",
)
return self.handler(request) |
Extracts relevant metadata from request, and checks if it is authorized
| __call__ | python | Yelp/paasta | paasta_tools/api/tweens/auth.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/api/tweens/auth.py | Apache-2.0 |
def is_request_authorized(
self,
path: str,
token: str,
method: str,
service: Optional[str],
) -> AuthorizationOutcome:
"""Check if API request is authorized
:param str path: API path
:param str token: authentication token
:param str method: http method
:return: auth outcome
"""
try:
response = self.session.post(
url=self.endpoint,
json={
"input": {
"path": path,
"backend": "paasta",
"token": token,
"method": method,
"service": service,
},
},
timeout=2,
).json()
except Exception as e:
logger.exception(f"Issue communicating with auth endpoint: {e}")
return AuthorizationOutcome(False, "Auth backend error")
auth_result_allowed = response.get("result", {}).get("allowed")
if auth_result_allowed is None:
return AuthorizationOutcome(False, "Malformed auth response")
if not auth_result_allowed:
reason = response["result"].get("reason", "Denied")
return AuthorizationOutcome(False, reason)
reason = response["result"].get("reason", "Ok")
return AuthorizationOutcome(True, reason) | Check if API request is authorized
:param str path: API path
:param str token: authentication token
:param str method: http method
:return: auth outcome
| is_request_authorized | python | Yelp/paasta | paasta_tools/api/tweens/auth.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/api/tweens/auth.py | Apache-2.0 |
def cprofile_tween_factory(handler, registry):
"""Tween for profiling API requests and sending them to scribe.
yelp_profiling does define a tween, but it is designed more for PaaSTA
services. So, we need to define our own.
"""
def cprofile_tween(request):
if yelp_profiling is None:
return handler(request)
config = PaastaCProfileConfig(registry.settings)
processor = YelpSOARequestProcessor(config, registry)
context_manager = CProfileContextManager(config, processor)
# uses the config and processor to decide whether or not to cprofile
# the request
with context_manager(request):
processor.begin_request(request)
status_code = 500
try:
response = handler(request)
status_code = response.status_code
return response
finally:
processor.end_request(request, status_code)
return cprofile_tween | Tween for profiling API requests and sending them to scribe.
yelp_profiling does define a tween, but it is designed more for PaaSTA
services. So, we need to define our own.
| cprofile_tween_factory | python | Yelp/paasta | paasta_tools/api/tweens/profiling.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/api/tweens/profiling.py | Apache-2.0 |
def set_autoscaling_override(request):
"""Set a temporary autoscaling override for a service/instance.
This endpoint creates or updates a ConfigMap entry with override information
including expiration time. The override will be applied by the autoscaler.
Required parameters:
- service: The service name
- instance: The instance name
- min_instances: The minimum number of instances to enforce
- expires_after: unix timestamp after which the override is no longer valid
"""
service = request.swagger_data.get("service")
instance = request.swagger_data.get("instance")
cluster = settings.cluster
soa_dir = settings.soa_dir
instance_config = get_instance_config(
service, instance, cluster, soa_dir, load_deployments=False
)
if not isinstance(instance_config, KubernetesDeploymentConfig):
error_message = (
f"Autoscaling is not supported for {service}.{instance} because instance type is not "
f"kubernetes."
)
raise ApiFailure(error_message, 501)
json_body = request.swagger_data.get("json_body", {})
min_instances_override = json_body.get("min_instances")
expire_after = json_body.get("expire_after")
if not isinstance(min_instances_override, int) or min_instances_override < 1:
raise ApiFailure("min_instances must be a positive integer", 400)
if not expire_after:
raise ApiFailure("expire_after is required", 400)
max_instances = instance_config.get_max_instances()
if max_instances is None:
raise ApiFailure(f"Autoscaling is not enabled for {service}.{instance}", 400)
if max_instances < min_instances_override:
raise ApiFailure(
f"min_instances ({min_instances_override}) cannot be greater than max_instances ({max_instances})",
400,
)
configmap, created = get_or_create_autoscaling_overrides_configmap()
if created:
log.info("Created new autoscaling overrides ConfigMap")
# i dunno why this is necessary, but a newly created configmap doesn't have a data field
# even when we set it in the create call
if not configmap.data:
configmap.data = {}
override_data = {
"min_instances": min_instances_override,
"created_at": datetime.now(timezone.utc).isoformat(),
# NOTE: we may want to also allow setting a max_instances override in the future, but if we do that
# we'd probably want to force folks to either set one or both and share the same expiration time
"expire_after": expire_after,
}
service_instance = f"{service}.{instance}"
existing_overrides = (
json.loads(configmap.data[service_instance])
if service_instance in configmap.data
else {}
)
merged_overrides = {**existing_overrides, **override_data}
serialized_overrides = json.dumps(merged_overrides)
patch_namespaced_configmap(
name=AUTOSCALING_OVERRIDES_CONFIGMAP_NAME,
namespace=AUTOSCALING_OVERRIDES_CONFIGMAP_NAMESPACE,
# this should only update the single entry for the $service.$instance key
# ain't k8s grand?
body={"data": {service_instance: serialized_overrides}},
kube_client=settings.kubernetes_client,
)
response_body = {
"service": service,
"instance": instance,
"cluster": cluster,
"min_instances": min_instances_override,
"expire_after": expire_after,
"status": "SUCCESS",
}
# NOTE: this is an HTTP 202 since actually updating the HPA happens asynchronously
# through setup_kubernetes_job
# XXX: should we try to patch things here as well?
return Response(json_body=response_body, status_code=202) | Set a temporary autoscaling override for a service/instance.
This endpoint creates or updates a ConfigMap entry with override information
including expiration time. The override will be applied by the autoscaler.
Required parameters:
- service: The service name
- instance: The instance name
- min_instances: The minimum number of instances to enforce
- expires_after: unix timestamp after which the override is no longer valid
| set_autoscaling_override | python | Yelp/paasta | paasta_tools/api/views/autoscaler.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/api/views/autoscaler.py | Apache-2.0 |
def api_failure_response(exc, request):
"""Construct an HTTP response with an error status code. This happens when
the API service has to stop on a 'hard' error. In contrast, the API service
continues to produce results on a 'soft' error. It will place a 'message'
field in the output. Multiple 'soft' errors are concatenated in the same
'message' field when errors happen in the same hierarchy.
"""
log.error(exc.msg)
response = Response("ERROR: %s" % exc.msg)
response.status_int = exc.err
return response | Construct an HTTP response with an error status code. This happens when
the API service has to stop on a 'hard' error. In contrast, the API service
continues to produce results on a 'soft' error. It will place a 'message'
field in the output. Multiple 'soft' errors are concatenated in the same
'message' field when errors happen in the same hierarchy.
| api_failure_response | python | Yelp/paasta | paasta_tools/api/views/exception.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/api/views/exception.py | Apache-2.0 |
def window_historical_load(historical_load, window_begin, window_end):
"""Filter historical_load down to just the datapoints lying between times window_begin and window_end, inclusive."""
filtered = []
for timestamp, value in historical_load:
if timestamp >= window_begin and timestamp <= window_end:
filtered.append((timestamp, value))
return filtered | Filter historical_load down to just the datapoints lying between times window_begin and window_end, inclusive. | window_historical_load | python | Yelp/paasta | paasta_tools/autoscaling/forecasting.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/autoscaling/forecasting.py | Apache-2.0 |
def moving_average_forecast_policy(
historical_load,
moving_average_window_seconds=DEFAULT_UWSGI_AUTOSCALING_MOVING_AVERAGE_WINDOW,
**kwargs,
):
"""Does a simple average of all historical load data points within the moving average window. Weights all data
points within the window equally."""
windowed_data = trailing_window_historical_load(
historical_load, moving_average_window_seconds
)
windowed_values = [value for timestamp, value in windowed_data]
return sum(windowed_values) / len(windowed_values) | Does a simple average of all historical load data points within the moving average window. Weights all data
points within the window equally. | moving_average_forecast_policy | python | Yelp/paasta | paasta_tools/autoscaling/forecasting.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/autoscaling/forecasting.py | Apache-2.0 |
def linreg_forecast_policy(
historical_load,
linreg_window_seconds,
linreg_extrapolation_seconds,
linreg_default_slope=0,
**kwargs,
):
"""Does a linear regression on the load data within the last linreg_window_seconds. For every time delta in
linreg_extrapolation_seconds, forecasts the value at that time delta from now, and returns the maximum of these
predicted values. (With linear extrapolation, it doesn't make sense to forecast at more than two points, as the max
load will always be at the first or last time delta.)
:param linreg_window_seconds: Consider all data from this many seconds ago until now.
:param linreg_extrapolation_seconds: A list of floats representing a number of seconds in the future at which to
predict the load. The highest prediction will be returned.
:param linreg_default_slope: If there is only one data point within the window, the equation for slope is undefined,
so we use this value (expressed in load/second) for prediction instead. Default is
0.
"""
window = trailing_window_historical_load(historical_load, linreg_window_seconds)
loads = [load for timestamp, load in window]
times = [timestamp for timestamp, load in window]
mean_time = sum(times) / len(times)
mean_load = sum(loads) / len(loads)
if len(window) > 1:
slope = sum((t - mean_time) * (l - mean_load) for t, l in window) / sum(
(t - mean_time) ** 2 for t in times
)
else:
slope = linreg_default_slope
intercept = mean_load - slope * mean_time
def predict(timestamp):
return slope * timestamp + intercept
if isinstance(linreg_extrapolation_seconds, (int, float)):
linreg_extrapolation_seconds = [linreg_extrapolation_seconds]
now, _ = historical_load[-1]
forecasted_values = [predict(now + delta) for delta in linreg_extrapolation_seconds]
return max(forecasted_values) | Does a linear regression on the load data within the last linreg_window_seconds. For every time delta in
linreg_extrapolation_seconds, forecasts the value at that time delta from now, and returns the maximum of these
predicted values. (With linear extrapolation, it doesn't make sense to forecast at more than two points, as the max
load will always be at the first or last time delta.)
:param linreg_window_seconds: Consider all data from this many seconds ago until now.
:param linreg_extrapolation_seconds: A list of floats representing a number of seconds in the future at which to
predict the load. The highest prediction will be returned.
:param linreg_default_slope: If there is only one data point within the window, the equation for slope is undefined,
so we use this value (expressed in load/second) for prediction instead. Default is
0.
| linreg_forecast_policy | python | Yelp/paasta | paasta_tools/autoscaling/forecasting.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/autoscaling/forecasting.py | Apache-2.0 |
def get_sso_auth_token(paasta_apis: bool = False) -> str:
"""Generate an authentication token for the calling user from the Single Sign On provider
:param bool paasta_apis: authenticate for PaaSTA APIs
"""
system_config = load_system_paasta_config()
client_id = (
system_config.get_api_auth_sso_oidc_client_id()
if paasta_apis
else system_config.get_service_auth_sso_oidc_client_id()
)
return get_and_cache_jwt_default(client_id) | Generate an authentication token for the calling user from the Single Sign On provider
:param bool paasta_apis: authenticate for PaaSTA APIs
| get_sso_auth_token | python | Yelp/paasta | paasta_tools/cli/authentication.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/authentication.py | Apache-2.0 |
def load_method(module_name, method_name):
"""Return a function given a module and method name.
:param module_name: a string
:param method_name: a string
:return: a function
"""
module = __import__(module_name, fromlist=[method_name])
method = getattr(module, method_name)
return method | Return a function given a module and method name.
:param module_name: a string
:param method_name: a string
:return: a function
| load_method | python | Yelp/paasta | paasta_tools/cli/cli.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py | Apache-2.0 |
def add_subparser(command, subparsers):
"""Given a command name, paasta_cmd, execute the add_subparser method
implemented in paasta_cmd.py.
Each paasta client command must implement a method called add_subparser.
This allows the client to dynamically add subparsers to its subparser, which
provides the benefits of argcomplete/argparse but gets it done in a modular
fashion.
:param command: a simple string - e.g. 'list'
:param subparsers: an ArgumentParser object"""
module_name = "paasta_tools.cli.cmds.%s" % command
add_subparser_fn = load_method(module_name, "add_subparser")
add_subparser_fn(subparsers) | Given a command name, paasta_cmd, execute the add_subparser method
implemented in paasta_cmd.py.
Each paasta client command must implement a method called add_subparser.
This allows the client to dynamically add subparsers to its subparser, which
provides the benefits of argcomplete/argparse but gets it done in a modular
fashion.
:param command: a simple string - e.g. 'list'
:param subparsers: an ArgumentParser object | add_subparser | python | Yelp/paasta | paasta_tools/cli/cli.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py | Apache-2.0 |
def get_argparser(commands=None):
"""Create and return argument parser for a set of subcommands.
:param commands: Union[None, List[str]] If `commands` argument is `None`,
add full parsers for all subcommands, if `commands` is empty list -
add thin parsers for all subcommands, otherwise - add full parsers for
subcommands in the argument.
"""
parser = PrintsHelpOnErrorArgumentParser(
description=(
"The PaaSTA command line tool. The 'paasta' command is the entry point "
"to multiple subcommands, see below.\n\n"
"You can see more help for individual commands by appending them with '--help', "
"for example, 'paasta status --help' or see the man page with 'man paasta status'."
),
epilog=(
"The 'paasta' command line tool is designed to be used by humans, and therefore has "
"command line completion for almost all options and uses pretty formatting when "
"possible."
),
# Suppressing usage prevents it from being printed twice upon print_help
usage=argparse.SUPPRESS,
)
# http://stackoverflow.com/a/8521644/812183
parser.add_argument(
"-V",
"--version",
action="version",
version=f"paasta-tools {paasta_tools.__version__}",
)
subparsers = parser.add_subparsers(dest="command", metavar="")
subparsers.required = True
# Adding a separate help subparser allows us to respond to "help" without --help
help_parser = subparsers.add_parser(
"help", help=f"run `paasta <subcommand> -h` for help"
)
help_parser.set_defaults(command=None)
# Build a list of subcommands to add them in alphabetical order later
command_choices: List[Tuple[str, Any]] = []
if commands is None:
for command in sorted(modules_in_pkg(cmds)):
command_choices.append(
(command, (add_subparser, [command, subparsers], {}))
)
elif commands:
for command in commands:
if command not in PAASTA_SUBCOMMANDS:
# could be external subcommand
continue
command_choices.append(
(
command,
(add_subparser, [PAASTA_SUBCOMMANDS[command], subparsers], {}),
)
)
else:
for command in PAASTA_SUBCOMMANDS.keys():
command_choices.append(
(
command,
(subparsers.add_parser, [command], dict(help="", add_help=False)),
)
)
for command in list_external_commands():
command_choices.append(
(command, (subparsers.add_parser, [command], dict(help="")))
)
for (_, (fn, args, kwds)) in sorted(command_choices, key=lambda e: e[0]):
fn(*args, **kwds)
return parser | Create and return argument parser for a set of subcommands.
:param commands: Union[None, List[str]] If `commands` argument is `None`,
add full parsers for all subcommands, if `commands` is empty list -
add thin parsers for all subcommands, otherwise - add full parsers for
subcommands in the argument.
| get_argparser | python | Yelp/paasta | paasta_tools/cli/cli.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py | Apache-2.0 |
def parse_args(argv):
"""Initialize autocompletion and configure the argument parser.
:return: an argparse.Namespace object mapping parameter names to the inputs
from sys.argv
"""
parser = get_argparser(commands=[])
argcomplete.autocomplete(parser)
args, _ = parser.parse_known_args(argv)
if args.command:
parser = get_argparser(commands=[args.command])
argcomplete.autocomplete(parser)
return parser.parse_args(argv), parser | Initialize autocompletion and configure the argument parser.
:return: an argparse.Namespace object mapping parameter names to the inputs
from sys.argv
| parse_args | python | Yelp/paasta | paasta_tools/cli/cli.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py | Apache-2.0 |
def main(argv=None):
"""Perform a paasta call. Read args from sys.argv and pass parsed args onto
appropriate command in paasta_cli/cmds directory.
Ensure we kill any child pids before we quit
"""
logging.basicConfig()
warnings.filterwarnings("ignore", category=DeprecationWarning)
# if we are an external command, we need to exec out early.
# The reason we exec out early is so we don't bother trying to parse
# "foreign" arguments, which would cause a stack trace.
if calling_external_command():
exec_subcommand(sys.argv)
try:
args, parser = parse_args(argv)
if args.command is None:
parser.print_help()
return_code = 0
else:
return_code = args.command(args)
except KeyboardInterrupt:
return_code = 130
sys.exit(return_code) | Perform a paasta call. Read args from sys.argv and pass parsed args onto
appropriate command in paasta_cli/cmds directory.
Ensure we kill any child pids before we quit
| main | python | Yelp/paasta | paasta_tools/cli/cli.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py | Apache-2.0 |
def make_copyfile_symlink_aware():
"""The reasoning behind this monkeypatch is that cookiecutter doesn't
respect symlinks at all, and at Yelp we use symlinks to reduce duplication
in the soa configs. Maybe cookie-cutter will accept a symlink-aware PR?
"""
orig_copyfile = shutil.copyfile
orig_copymode = shutil.copymode
def symlink_aware_copyfile(*args, **kwargs):
kwargs.setdefault("follow_symlinks", False)
orig_copyfile(*args, **kwargs)
def symlink_aware_copymode(*args, **kwargs):
kwargs.setdefault("follow_symlinks", False)
orig_copymode(*args, **kwargs)
shutil.copyfile = symlink_aware_copyfile
shutil.copymode = symlink_aware_copymode
try:
yield
finally:
shutil.copyfile = orig_copyfile
shutil.copymode = orig_copymode | The reasoning behind this monkeypatch is that cookiecutter doesn't
respect symlinks at all, and at Yelp we use symlinks to reduce duplication
in the soa configs. Maybe cookie-cutter will accept a symlink-aware PR?
| make_copyfile_symlink_aware | python | Yelp/paasta | paasta_tools/cli/fsm_cmd.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/fsm_cmd.py | Apache-2.0 |
def is_file_in_dir(file_name, path):
"""Recursively search path for file_name.
:param file_name: a string of a file name to find
:param path: a string path
:param file_ext: a string of a file extension
:return: a boolean
"""
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if fnmatch.fnmatch(filename, file_name):
return os.path.join(root, filename)
return False | Recursively search path for file_name.
:param file_name: a string of a file name to find
:param path: a string path
:param file_ext: a string of a file extension
:return: a boolean
| is_file_in_dir | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def validate_service_name(service, soa_dir=DEFAULT_SOA_DIR):
"""Determine whether directory named service exists in the provided soa_dir
:param service: a string of the name of the service you wish to check exists
:param soa_dir: directory to look for service names
:return : boolean True
:raises: NoSuchService exception
"""
if not service or not os.path.isdir(os.path.join(soa_dir, service)):
raise NoSuchService(service)
return True | Determine whether directory named service exists in the provided soa_dir
:param service: a string of the name of the service you wish to check exists
:param soa_dir: directory to look for service names
:return : boolean True
:raises: NoSuchService exception
| validate_service_name | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def list_paasta_services(soa_dir: str = DEFAULT_SOA_DIR):
"""Returns a sorted list of services that happen to have at
least one service.instance, which indicates it is on PaaSTA
"""
the_list = []
for service in list_services(soa_dir):
if list_all_instances_for_service(service, soa_dir=soa_dir):
the_list.append(service)
return the_list | Returns a sorted list of services that happen to have at
least one service.instance, which indicates it is on PaaSTA
| list_paasta_services | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def list_service_instances(soa_dir: str = DEFAULT_SOA_DIR):
"""Returns a sorted list of service<SPACER>instance names"""
the_list = []
for service in list_services(soa_dir):
for instance in list_all_instances_for_service(
service=service, soa_dir=soa_dir
):
the_list.append(compose_job_id(service, instance))
return the_list | Returns a sorted list of service<SPACER>instance names | list_service_instances | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def list_instances(**kwargs):
"""Returns a sorted list of all possible instance names
for tab completion. We try to guess what service you might be
operating on, otherwise we just provide *all* of them
"""
all_instances: Set[str] = set()
service = guess_service_name()
try:
validate_service_name(service)
all_instances = set(list_all_instances_for_service(service))
except NoSuchService:
for service in list_services():
for instance in list_all_instances_for_service(service):
all_instances.add(instance)
return sorted(all_instances) | Returns a sorted list of all possible instance names
for tab completion. We try to guess what service you might be
operating on, otherwise we just provide *all* of them
| list_instances | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def calculate_remote_masters(
cluster: str, system_paasta_config: SystemPaastaConfig
) -> Tuple[List[str], str]:
"""Given a cluster, do a DNS lookup of that cluster (which
happens to point, eventually, to the Mesos masters in that cluster).
Return IPs of those Mesos masters.
"""
cluster_fqdn = system_paasta_config.get_cluster_fqdn_format().format(
cluster=cluster
)
try:
_, _, ips = socket.gethostbyname_ex(cluster_fqdn)
output = None
except socket.gaierror as e:
output = f"ERROR while doing DNS lookup of {cluster_fqdn}:\n{e.strerror}\n "
ips = []
return (ips, output) | Given a cluster, do a DNS lookup of that cluster (which
happens to point, eventually, to the Mesos masters in that cluster).
Return IPs of those Mesos masters.
| calculate_remote_masters | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def check_ssh_on_master(master, timeout=10):
"""Given a master, attempt to ssh to the master and run a simple command
with sudo to verify that ssh and sudo work properly. Return a tuple of the
success status (True or False) and any output from attempting the check.
"""
check_command = "ssh -A -n -o StrictHostKeyChecking=no %s /bin/true" % master
rc, output = _run(check_command, timeout=timeout)
if rc == 0:
return (True, None)
if rc == 255: # ssh error
reason = "Return code was %d which probably means an ssh failure." % rc
hint = "HINT: Are you allowed to ssh to this machine %s?" % master
if rc == 1: # sudo error
reason = "Return code was %d which probably means a sudo failure." % rc
hint = "HINT: Is your ssh agent forwarded? (ssh-add -l)"
if rc == -9: # timeout error
reason = (
"Return code was %d which probably means ssh took too long and timed out."
% rc
)
hint = "HINT: Is there network latency? Try running somewhere closer to the cluster."
else: # unknown error
reason = "Return code was %d which is an unknown failure." % rc
hint = "HINT: Talk to #paasta and pastebin this output"
output = (
"ERROR cannot run check command %(check_command)s\n"
"%(reason)s\n"
"%(hint)s\n"
"Output from check command: %(output)s"
% {
"check_command": check_command,
"reason": reason,
"hint": hint,
"output": output,
}
)
return (False, output) | Given a master, attempt to ssh to the master and run a simple command
with sudo to verify that ssh and sudo work properly. Return a tuple of the
success status (True or False) and any output from attempting the check.
| check_ssh_on_master | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def run_on_master(
cluster,
system_paasta_config,
cmd_parts,
timeout=None,
err_code=-1,
graceful_exit=False,
stdin=None,
):
"""Find connectable master for :cluster: and :system_paasta_config: args and
invoke command from :cmd_parts:, wrapping it in ssh call.
:returns (exit code, output)
:param cluster: cluster to find master in
:param system_paasta_config: system configuration to lookup master data
:param cmd_parts: passed into paasta_tools.utils._run as command along with
ssh bits
:param timeout: see paasta_tools.utils._run documentation (default: None)
:param err_code: code to return along with error message when something goes
wrong (default: -1)
:param graceful_exit: wrap command in a bash script that waits for input and
kills the original command; trap SIGINT and send newline into stdin
"""
try:
master = connectable_master(cluster, system_paasta_config)
except NoMasterError as e:
return (err_code, str(e))
if graceful_exit:
# Signals don't travel over ssh, kill process when anything lands on stdin instead
# The procedure here is:
# 1. send process to background and capture it's pid
# 2. wait for stdin with timeout in a loop, exit when original process finished
# 3. kill original process if loop finished (something on stdin)
cmd_parts.append(
"& p=$!; "
+ "while ! read -t1; do ! kill -0 $p 2>/dev/null && kill $$; done; "
+ "kill $p; wait"
)
stdin = subprocess.PIPE
stdin_interrupt = True
popen_kwargs = {"preexec_fn": os.setsid}
else:
stdin_interrupt = False
popen_kwargs = {}
cmd_parts = [
"ssh",
"-q",
"-t",
"-t",
"-A",
master,
"sudo /bin/bash -c %s" % quote(" ".join(cmd_parts)),
]
log.debug("Running %s" % " ".join(cmd_parts))
return _run(
cmd_parts,
timeout=timeout,
stream=True,
stdin=stdin,
stdin_interrupt=stdin_interrupt,
popen_kwargs=popen_kwargs,
) | Find connectable master for :cluster: and :system_paasta_config: args and
invoke command from :cmd_parts:, wrapping it in ssh call.
:returns (exit code, output)
:param cluster: cluster to find master in
:param system_paasta_config: system configuration to lookup master data
:param cmd_parts: passed into paasta_tools.utils._run as command along with
ssh bits
:param timeout: see paasta_tools.utils._run documentation (default: None)
:param err_code: code to return along with error message when something goes
wrong (default: -1)
:param graceful_exit: wrap command in a bash script that waits for input and
kills the original command; trap SIGINT and send newline into stdin
| run_on_master | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def figure_out_service_name(args, soa_dir=DEFAULT_SOA_DIR):
"""Figures out and validates the input service name"""
service = args.service or guess_service_name()
try:
validate_service_name(service, soa_dir=soa_dir)
except NoSuchService as service_not_found:
print(service_not_found)
exit(1)
return service | Figures out and validates the input service name | figure_out_service_name | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def get_jenkins_build_output_url():
"""Returns the URL for Jenkins job's output.
Returns None if it's not available.
"""
build_output = os.environ.get("BUILD_URL")
if build_output:
build_output = build_output + "console"
return build_output | Returns the URL for Jenkins job's output.
Returns None if it's not available.
| get_jenkins_build_output_url | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def get_instance_config(
service: str,
instance: str,
cluster: str,
soa_dir: str = DEFAULT_SOA_DIR,
load_deployments: bool = False,
instance_type: Optional[str] = None,
) -> InstanceConfig:
"""Returns the InstanceConfig object for whatever type of instance
it is. (kubernetes)"""
if instance_type is None:
instance_type = validate_service_instance(
service=service, instance=instance, cluster=cluster, soa_dir=soa_dir
)
instance_config_loader = INSTANCE_TYPE_HANDLERS[instance_type].loader
if instance_config_loader is None:
raise NotImplementedError(
"instance is %s of type %s which is not supported by paasta"
% (instance, instance_type)
)
return instance_config_loader(
service=service,
instance=instance,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
) | Returns the InstanceConfig object for whatever type of instance
it is. (kubernetes) | get_instance_config | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def extract_tags(paasta_tag: str) -> Mapping[str, str]:
"""Returns a dictionary containing information from a git tag"""
regex = r"^refs/tags/(?:paasta-){1,2}(?P<deploy_group>[a-zA-Z0-9._-]+)(?:\+(?P<image_version>.*)){0,1}-(?P<tstamp>\d{8}T\d{6})-(?P<tag>.*?)$"
regex_match = re.match(regex, paasta_tag)
return regex_match.groupdict() if regex_match else {} | Returns a dictionary containing information from a git tag | extract_tags | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def validate_given_deploy_groups(
all_deploy_groups: Collection[str], args_deploy_groups: Collection[str]
) -> Tuple[Set[str], Set[str]]:
"""Given two lists of deploy groups, return the intersection and difference between them.
:param all_deploy_groups: instances actually belonging to a service
:param args_deploy_groups: the desired instances
:returns: a tuple with (common, difference) indicating deploy groups common in both
lists and those only in args_deploy_groups
"""
invalid_deploy_groups: Set[str]
valid_deploy_groups = set(args_deploy_groups).intersection(all_deploy_groups)
invalid_deploy_groups = set(args_deploy_groups).difference(all_deploy_groups)
return valid_deploy_groups, invalid_deploy_groups | Given two lists of deploy groups, return the intersection and difference between them.
:param all_deploy_groups: instances actually belonging to a service
:param args_deploy_groups: the desired instances
:returns: a tuple with (common, difference) indicating deploy groups common in both
lists and those only in args_deploy_groups
| validate_given_deploy_groups | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def pick_random_port(service_name):
"""Return a random port.
Tries to return the same port for the same service each time, when
possible.
"""
hash_key = f"{service_name},{getpass.getuser()}".encode("utf8")
hash_number = int(hashlib.sha1(hash_key).hexdigest(), 16)
preferred_port = 33000 + (hash_number % 25000)
return ephemeral_port_reserve.reserve("0.0.0.0", preferred_port) | Return a random port.
Tries to return the same port for the same service each time, when
possible.
| pick_random_port | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def trigger_deploys(
service: str,
system_config: Optional["SystemPaastaConfig"] = None,
) -> None:
"""Connects to the deploymentsd watcher on sysgit, which is an extremely simple
service that listens for a service string and then generates a service deployment"""
logline = f"Notifying soa-configs primary to generate a deployment for {service}"
_log(service=service, line=logline, component="deploy", level="event")
if not system_config:
system_config = load_system_paasta_config()
server = system_config.get_git_repo_config("yelpsoa-configs").get(
"deploy_server",
DEFAULT_SOA_CONFIGS_GIT_URL,
)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect((server, 5049))
client.send(f"{service}\n".encode("utf-8"))
finally:
client.close() | Connects to the deploymentsd watcher on sysgit, which is an extremely simple
service that listens for a service string and then generates a service deployment | trigger_deploys | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def verify_instances(
args_instances: str,
service: str,
clusters: Sequence[str],
soa_dir: str = DEFAULT_SOA_DIR,
) -> Sequence[str]:
"""Verify that a list of instances specified by user is correct for this service.
:param args_instances: a list of instances.
:param service: the service name
:param cluster: a list of clusters
:returns: a list of instances specified in args_instances without any exclusions.
"""
unverified_instances = args_instances.split(",")
service_instances: Set[str] = list_all_instances_for_service(
service, clusters=clusters, soa_dir=soa_dir
)
misspelled_instances: Sequence[str] = [
i for i in unverified_instances if i not in service_instances
]
if len(misspelled_instances) == 0:
return misspelled_instances
# Check for instances with suffixes other than Tron instances (i.e. Flink instances)
instances_without_suffixes = [x.split(".")[0] for x in unverified_instances]
misspelled_instances = [
i for i in instances_without_suffixes if i not in service_instances
]
if misspelled_instances:
suggestions: List[str] = []
for instance in misspelled_instances:
matches = difflib.get_close_matches(
instance, service_instances, n=5, cutoff=0.5
)
suggestions.extend(matches) # type: ignore
suggestions = list(set(suggestions))
if clusters:
message = "{} doesn't have any instances matching {} on {}.".format(
service,
", ".join(sorted(misspelled_instances)),
", ".join(sorted(clusters)),
)
else:
message = "{} doesn't have any instances matching {}.".format(
service, ", ".join(sorted(misspelled_instances))
)
print(PaastaColors.red(message))
if suggestions:
print("Did you mean any of these?")
for instance in sorted(suggestions):
print(" %s" % instance)
return misspelled_instances | Verify that a list of instances specified by user is correct for this service.
:param args_instances: a list of instances.
:param service: the service name
:param cluster: a list of clusters
:returns: a list of instances specified in args_instances without any exclusions.
| verify_instances | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def run_interactive_cli(
cmd: str, shell: str = "/bin/bash", term: str = "xterm-256color"
):
"""Runs interactive command in a pseudo terminal, handling terminal size management
:param str cmd: shell command
:param str shell: shell utility to use as wrapper
:param str term: terminal type
"""
cols, rows = shutil.get_terminal_size()
if not os.path.isabs(shell):
shell = shutil.which(shell)
wrapped_cmd = (
f"export SHELL={shell};"
f"export TERM={term};"
f"stty columns {cols} rows {rows};"
f"exec {cmd}"
)
pty.spawn([shell, "-c", wrapped_cmd]) | Runs interactive command in a pseudo terminal, handling terminal size management
:param str cmd: shell command
:param str shell: shell utility to use as wrapper
:param str term: terminal type
| run_interactive_cli | python | Yelp/paasta | paasta_tools/cli/utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py | Apache-2.0 |
def parse_duration_to_seconds(duration: str) -> Optional[int]:
"""Parse a duration string like '3h' or '30m' into seconds.
Args:
duration_str: A string representing a duration (e.g., "3h", "30m", "1d")
Returns:
The duration in seconds, or None if parsing failed
"""
if not duration:
return None
seconds = timeparse(duration)
return seconds | Parse a duration string like '3h' or '30m' into seconds.
Args:
duration_str: A string representing a duration (e.g., "3h", "30m", "1d")
Returns:
The duration in seconds, or None if parsing failed
| parse_duration_to_seconds | python | Yelp/paasta | paasta_tools/cli/cmds/autoscale.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/autoscale.py | Apache-2.0 |
def deploy_check(service_path):
"""Check whether deploy.yaml exists in service directory. Prints success or
error message.
:param service_path: path to a directory containing deploy.yaml"""
if is_file_in_dir("deploy.yaml", service_path):
print(PaastaCheckMessages.DEPLOY_YAML_FOUND)
else:
print(PaastaCheckMessages.DEPLOY_YAML_MISSING) | Check whether deploy.yaml exists in service directory. Prints success or
error message.
:param service_path: path to a directory containing deploy.yaml | deploy_check | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
def docker_check():
"""Check whether Dockerfile exists in service directory, and is valid.
Prints suitable message depending on outcome"""
docker_file_path = is_file_in_dir("Dockerfile", os.getcwd())
if docker_file_path:
print(PaastaCheckMessages.DOCKERFILE_FOUND)
else:
print(PaastaCheckMessages.DOCKERFILE_MISSING) | Check whether Dockerfile exists in service directory, and is valid.
Prints suitable message depending on outcome | docker_check | python | Yelp/paasta | paasta_tools/cli/cmds/check.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.