index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
725,544 | syne_tune.stopping_criterion | __call__ | null | def __call__(self, status: TuningStatus) -> bool:
if (
self.max_wallclock_time is not None
and status.wallclock_time > self.max_wallclock_time
):
logger.info(
f"reaching max wallclock time ({self.max_wallclock_time}), stopping there."
)
return True
if (
self.max_num_trials_started is not None
and status.num_trials_started > self.max_num_trials_started
):
logger.info(
f"reaching max number of trials started ({self.max_num_trials_started + 1}), stopping there."
)
return True
if (
self.max_num_trials_completed is not None
and status.num_trials_completed > self.max_num_trials_completed
):
logger.info(
f"reaching max number of trials completed ({self.max_num_trials_completed + 1}), stopping there."
)
return True
if (
self.max_num_trials_finished is not None
and status.num_trials_finished > self.max_num_trials_finished
):
logger.info(
f"reaching max number of trials finished ({self.max_num_trials_finished + 1}), stopping there."
)
return True
if self.max_cost is not None and status.cost > self.max_cost:
logger.info(f"reaching max cost ({self.max_cost}), stopping there.")
return True
if (
self.max_num_evaluations is not None
and status.overall_metric_statistics.count > self.max_num_evaluations
):
logger.info(
f"reaching {status.overall_metric_statistics.count + 1} evaluations, stopping there. "
)
return True
if (
self.max_metric_value is not None
and status.overall_metric_statistics.count > 0
):
max_metrics_observed = status.overall_metric_statistics.max_metrics
for metric, max_metric_accepted in self.max_metric_value.items():
if (
metric in max_metrics_observed
and max_metrics_observed[metric] > max_metric_accepted
):
logger.info(
f"found {metric} with value ({max_metrics_observed[metric]}), "
f"above the provided threshold {max_metric_accepted} stopping there."
)
return True
if (
self.min_metric_value is not None
and status.overall_metric_statistics.count > 0
):
min_metrics_observed = status.overall_metric_statistics.min_metrics
for metric, min_metric_accepted in self.min_metric_value.items():
if (
metric in min_metrics_observed
and min_metrics_observed[metric] < min_metric_accepted
):
logger.info(
f"found {metric} with value ({min_metrics_observed[metric]}), "
f"below the provided threshold {min_metric_accepted} stopping there."
)
return True
return False
| (self, status: syne_tune.tuning_status.TuningStatus) -> bool |
725,545 | syne_tune.stopping_criterion | __eq__ | null | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import numpy as np
from dataclasses import dataclass
from typing import Optional, Dict
from syne_tune.tuning_status import TuningStatus
logger = logging.getLogger(__name__)
@dataclass
class StoppingCriterion:
"""
Stopping criterion that can be used in a Tuner, for instance
:code:`Tuner(stop_criterion=StoppingCriterion(max_wallclock_time=3600), ...)`.
If several arguments are used, the combined criterion is true whenever
one of the atomic criteria is true.
In principle, ``stop_criterion`` for ``Tuner`` can be any lambda function, but
this class should be used with remote launching in order to ensure
proper serialization.
:param max_wallclock_time: Stop once this wallclock time is reached
:param max_num_evaluations: Stop once more than this number of metric
records have been reported
:param max_num_trials_started: Stop once more than this number of trials
have been started
:param max_num_trials_completed: Stop once more than this number of trials
have been completed. This does not include trials which were stopped
or failed
:param max_cost: Stop once total cost of evaluations larger than this value
:param max_num_trials_finished: Stop once more than this number of trials
have finished (i.e., completed, stopped, failed, or stopping)
:param min_metric_value: Dictionary with thresholds for selected metrics.
Stop once an evaluation reports a metric value below a threshold
:param max_metric_value: Dictionary with thresholds for selected metrics.
Stop once an evaluation reports a metric value above a threshold
"""
max_wallclock_time: float = None
max_num_evaluations: int = None
max_num_trials_started: int = None
max_num_trials_completed: int = None
max_cost: float = None
max_num_trials_finished: int = None
# minimum value for metrics, any value below this threshold will trigger a stop
min_metric_value: Optional[Dict[str, float]] = None
# maximum value for metrics, any value above this threshold will trigger a stop
max_metric_value: Optional[Dict[str, float]] = None
# todo we should have unit-test for all those cases.
def __call__(self, status: TuningStatus) -> bool:
if (
self.max_wallclock_time is not None
and status.wallclock_time > self.max_wallclock_time
):
logger.info(
f"reaching max wallclock time ({self.max_wallclock_time}), stopping there."
)
return True
if (
self.max_num_trials_started is not None
and status.num_trials_started > self.max_num_trials_started
):
logger.info(
f"reaching max number of trials started ({self.max_num_trials_started + 1}), stopping there."
)
return True
if (
self.max_num_trials_completed is not None
and status.num_trials_completed > self.max_num_trials_completed
):
logger.info(
f"reaching max number of trials completed ({self.max_num_trials_completed + 1}), stopping there."
)
return True
if (
self.max_num_trials_finished is not None
and status.num_trials_finished > self.max_num_trials_finished
):
logger.info(
f"reaching max number of trials finished ({self.max_num_trials_finished + 1}), stopping there."
)
return True
if self.max_cost is not None and status.cost > self.max_cost:
logger.info(f"reaching max cost ({self.max_cost}), stopping there.")
return True
if (
self.max_num_evaluations is not None
and status.overall_metric_statistics.count > self.max_num_evaluations
):
logger.info(
f"reaching {status.overall_metric_statistics.count + 1} evaluations, stopping there. "
)
return True
if (
self.max_metric_value is not None
and status.overall_metric_statistics.count > 0
):
max_metrics_observed = status.overall_metric_statistics.max_metrics
for metric, max_metric_accepted in self.max_metric_value.items():
if (
metric in max_metrics_observed
and max_metrics_observed[metric] > max_metric_accepted
):
logger.info(
f"found {metric} with value ({max_metrics_observed[metric]}), "
f"above the provided threshold {max_metric_accepted} stopping there."
)
return True
if (
self.min_metric_value is not None
and status.overall_metric_statistics.count > 0
):
min_metrics_observed = status.overall_metric_statistics.min_metrics
for metric, min_metric_accepted in self.min_metric_value.items():
if (
metric in min_metrics_observed
and min_metrics_observed[metric] < min_metric_accepted
):
logger.info(
f"found {metric} with value ({min_metrics_observed[metric]}), "
f"below the provided threshold {min_metric_accepted} stopping there."
)
return True
return False
| (self, other) |
725,548 | syne_tune.tuner | Tuner |
Controller of tuning loop, manages interplay between scheduler and
trial backend. Also, stopping criterion and number of workers are
maintained here.
:param trial_backend: Backend for trial evaluations
:param scheduler: Tuning algorithm for making decisions about which
trials to start, stop, pause, or resume
:param stop_criterion: Tuning stops when this predicates returns ``True``.
Called in each iteration with the current tuning status. It is
recommended to use :class:`StoppingCriterion`.
:param n_workers: Number of workers used here. Note that the backend
needs to support (at least) this number of workers to be run
in parallel
:param sleep_time: Time to sleep when all workers are busy. Defaults to
:const:`~syne_tune.constants.DEFAULT_SLEEP_TIME`
:param results_update_interval: Frequency at which results are updated and
stored (in seconds). Defaults to 10.
:param print_update_interval: Frequency at which result table is printed.
Defaults to 30.
:param max_failures: This many trial execution failures are allowed before
the tuning loop is aborted. Defaults to 1
:param tuner_name: Name associated with the tuning experiment, default to
the name of the entrypoint. Must consists of alpha-digits characters,
possibly separated by '-'. A postfix with a date time-stamp is added
to ensure uniqueness.
:param asynchronous_scheduling: Whether to use asynchronous scheduling
when scheduling new trials. If ``True``, trials are scheduled as soon as
a worker is available. If ``False``, the tuner waits that all trials
are finished before scheduling a new batch of size ``n_workers``.
Default to ``True``.
:param wait_trial_completion_when_stopping: How to deal with running
trials when stopping criterion is met. If ``True``, the tuner waits
until all trials are finished. If ``False``, all trials are terminated.
Defaults to ``False``.
:param callbacks: Called at certain times in the tuning loop, for example
when a result is seen. The default callback stores results every
``results_update_interval``.
:param metadata: Dictionary of user-metadata that will be persisted in
``{tuner_path}/{ST_METADATA_FILENAME}``, in addition to metadata provided by
the user. ``SMT_TUNER_CREATION_TIMESTAMP`` is always included which
measures the time-stamp when the tuner started to run.
:param suffix_tuner_name: If ``True``, a timestamp is appended to the
provided ``tuner_name`` that ensures uniqueness, otherwise the name is
left unchanged and is expected to be unique. Defaults to ``True``.
:param save_tuner: If ``True``, the :class:`Tuner` object is serialized at
the end of tuning, including its dependencies (e.g., scheduler). This
allows all details of the experiment to be recovered. Defaults to
``True``.
:param start_jobs_without_delay: Defaults to ``True``. If this is ``True``, the tuner
starts new jobs depending on scheduler decisions communicated to the
backend. For example, if a trial has just been stopped (by calling
``backend.stop_trial``), the tuner may start a new one immediately, even
if the SageMaker training job is still busy due to stopping delays.
This can lead to faster experiment runtime, because the backend is
temporarily going over its budget.
If set to ``False``, the tuner always asks the backend for the number of
busy workers, which guarantees that we never go over the ``n_workers``
budget. This makes a difference for backends where stopping or pausing
trials is not immediate (e.g., :class:`SageMakerBackend`). Not going
over budget means that ``n_workers`` can be set up to the available quota,
without running the risk of an exception due to the quota being
exceeded. If you get such exceptions, we recommend to use
``start_jobs_without_delay=False``. Also, if the SageMaker warm pool
feature is used, it is recommended to set
``start_jobs_without_delay=False``, since otherwise more than ``n_workers``
warm pools will be started, because existing ones are busy with
stopping when they should be reassigned.
:param trial_backend_path: If this is given, the path of ``trial_backend``
(where logs and checkpoints of trials are stored) is set to this.
Otherwise, it is set to ``self.tuner_path``, so that per-trial
information is written to the same path as tuning results.
If the backend is :class:`~syne_tune.backend.LocalBackend` and the
experiment is ru remotely, we recommend to set this, since otherwise
checkpoints and logs are synced to S3, along with tuning results, which
is costly and error-prone.
| class Tuner:
"""
Controller of tuning loop, manages interplay between scheduler and
trial backend. Also, stopping criterion and number of workers are
maintained here.
:param trial_backend: Backend for trial evaluations
:param scheduler: Tuning algorithm for making decisions about which
trials to start, stop, pause, or resume
:param stop_criterion: Tuning stops when this predicates returns ``True``.
Called in each iteration with the current tuning status. It is
recommended to use :class:`StoppingCriterion`.
:param n_workers: Number of workers used here. Note that the backend
needs to support (at least) this number of workers to be run
in parallel
:param sleep_time: Time to sleep when all workers are busy. Defaults to
:const:`~syne_tune.constants.DEFAULT_SLEEP_TIME`
:param results_update_interval: Frequency at which results are updated and
stored (in seconds). Defaults to 10.
:param print_update_interval: Frequency at which result table is printed.
Defaults to 30.
:param max_failures: This many trial execution failures are allowed before
the tuning loop is aborted. Defaults to 1
:param tuner_name: Name associated with the tuning experiment, default to
the name of the entrypoint. Must consists of alpha-digits characters,
possibly separated by '-'. A postfix with a date time-stamp is added
to ensure uniqueness.
:param asynchronous_scheduling: Whether to use asynchronous scheduling
when scheduling new trials. If ``True``, trials are scheduled as soon as
a worker is available. If ``False``, the tuner waits that all trials
are finished before scheduling a new batch of size ``n_workers``.
Default to ``True``.
:param wait_trial_completion_when_stopping: How to deal with running
trials when stopping criterion is met. If ``True``, the tuner waits
until all trials are finished. If ``False``, all trials are terminated.
Defaults to ``False``.
:param callbacks: Called at certain times in the tuning loop, for example
when a result is seen. The default callback stores results every
``results_update_interval``.
:param metadata: Dictionary of user-metadata that will be persisted in
``{tuner_path}/{ST_METADATA_FILENAME}``, in addition to metadata provided by
the user. ``SMT_TUNER_CREATION_TIMESTAMP`` is always included which
measures the time-stamp when the tuner started to run.
:param suffix_tuner_name: If ``True``, a timestamp is appended to the
provided ``tuner_name`` that ensures uniqueness, otherwise the name is
left unchanged and is expected to be unique. Defaults to ``True``.
:param save_tuner: If ``True``, the :class:`Tuner` object is serialized at
the end of tuning, including its dependencies (e.g., scheduler). This
allows all details of the experiment to be recovered. Defaults to
``True``.
:param start_jobs_without_delay: Defaults to ``True``. If this is ``True``, the tuner
starts new jobs depending on scheduler decisions communicated to the
backend. For example, if a trial has just been stopped (by calling
``backend.stop_trial``), the tuner may start a new one immediately, even
if the SageMaker training job is still busy due to stopping delays.
This can lead to faster experiment runtime, because the backend is
temporarily going over its budget.
If set to ``False``, the tuner always asks the backend for the number of
busy workers, which guarantees that we never go over the ``n_workers``
budget. This makes a difference for backends where stopping or pausing
trials is not immediate (e.g., :class:`SageMakerBackend`). Not going
over budget means that ``n_workers`` can be set up to the available quota,
without running the risk of an exception due to the quota being
exceeded. If you get such exceptions, we recommend to use
``start_jobs_without_delay=False``. Also, if the SageMaker warm pool
feature is used, it is recommended to set
``start_jobs_without_delay=False``, since otherwise more than ``n_workers``
warm pools will be started, because existing ones are busy with
stopping when they should be reassigned.
:param trial_backend_path: If this is given, the path of ``trial_backend``
(where logs and checkpoints of trials are stored) is set to this.
Otherwise, it is set to ``self.tuner_path``, so that per-trial
information is written to the same path as tuning results.
If the backend is :class:`~syne_tune.backend.LocalBackend` and the
experiment is ru remotely, we recommend to set this, since otherwise
checkpoints and logs are synced to S3, along with tuning results, which
is costly and error-prone.
"""
def __init__(
self,
trial_backend: TrialBackend,
scheduler: TrialScheduler,
stop_criterion: Callable[[TuningStatus], bool],
n_workers: int,
sleep_time: float = TUNER_DEFAULT_SLEEP_TIME,
results_update_interval: float = 10.0,
print_update_interval: float = 30.0,
max_failures: int = 1,
tuner_name: Optional[str] = None,
asynchronous_scheduling: bool = True,
wait_trial_completion_when_stopping: bool = False,
callbacks: Optional[List[TunerCallback]] = None,
metadata: Optional[dict] = None,
suffix_tuner_name: bool = True,
save_tuner: bool = True,
start_jobs_without_delay: bool = True,
trial_backend_path: Optional[str] = None,
):
self.trial_backend = trial_backend
self.scheduler = scheduler
self.n_workers = n_workers
self.sleep_time = sleep_time
self.results_update_interval = results_update_interval
self.stop_criterion = stop_criterion
self.asynchronous_scheduling = asynchronous_scheduling
self.wait_trial_completion_when_stopping = wait_trial_completion_when_stopping
self.metadata = self._enrich_metadata(metadata)
self.save_tuner = save_tuner
self.start_jobs_without_delay = start_jobs_without_delay
self.max_failures = max_failures
self.print_update_interval = print_update_interval
if tuner_name is not None:
check_valid_sagemaker_name(tuner_name)
else:
tuner_name = Path(self.trial_backend.entrypoint_path()).stem.replace(
"_", "-"
)
if suffix_tuner_name or tuner_name is None:
self.name = name_from_base(tuner_name, default="st-tuner")
else:
self.name = tuner_name
# we keep track of the last result seen to send it to schedulers when trials complete.
self.last_seen_result_per_trial = {}
self.trials_scheduler_stopped = set()
self.tuner_path = Path(experiment_path(tuner_name=self.name))
# inform the backend to the folder of the Tuner. This allows the local backend
# to store the logs and tuner results in the same folder.
self.trial_backend.set_path(
results_root=str(self.tuner_path)
if trial_backend_path is None
else trial_backend_path,
tuner_name=self.name,
)
self._init_callbacks(callbacks)
self.tuning_status = None
self.tuner_saver = None
self.status_printer = None
self._initialize_early_checkpoint_removal()
def _init_callbacks(self, callbacks: Optional[List[TunerCallback]]):
if callbacks is None:
callbacks = [self._default_callback()]
else:
if not any(
isinstance(callback, StoreResultsCallback) for callback in callbacks
):
logger.warning(
"None of the callbacks provided are of type StoreResultsCallback. "
"This means no tuning results will be written."
)
self.callbacks: List[TunerCallback] = callbacks
def _initialize_early_checkpoint_removal(self):
"""
If the scheduler supports early checkpoint removal, the specific callback
for this is created here and appended to ``self.callbacks``.
"""
if self.trial_backend.delete_checkpoints:
callback = (
self.scheduler.callback_for_checkpoint_removal(self.stop_criterion)
if isinstance(self.scheduler, RemoveCheckpointsSchedulerMixin)
else None
)
if callback is not None:
self.callbacks.append(callback)
def run(self):
"""Launches the tuning."""
done_trials_statuses = OrderedDict()
try:
logger.info(f"results of trials will be saved on {self.tuner_path}")
if self.tuning_status is None:
self.tuning_status = TuningStatus(
metric_names=self.scheduler.metric_names()
)
# prints the status every ``print_update_interval`` seconds
self.status_printer = RegularCallback(
callback=lambda tuning_status: logger.info(
"tuning status (last metric is reported)\n" + str(tuning_status)
),
call_seconds_frequency=self.print_update_interval,
)
# saves the tuner every ``results_update_interval`` seconds
if self.save_tuner:
self.tuner_saver = RegularCallback(
callback=lambda tuner: tuner.save(),
call_seconds_frequency=self.results_update_interval,
)
self.metadata[ST_TUNER_START_TIMESTAMP] = time.time()
for callback in self.callbacks:
callback.on_tuning_start(self)
self.tuner_path.mkdir(exist_ok=True, parents=True)
self._save_metadata()
# ``running_trial_ids`` contains the ids of all trials currently running,
# whether they were started from scratch or were resumed from a pausing
# state
running_trials_ids = set()
config_space_exhausted = False
stop_condition_reached = self._stop_condition()
while (
# we stop when either the stop condition is reached
not stop_condition_reached
# or when all trials are done if the wait_trial_completion is activated
or self.wait_trial_completion_when_stopping
and len(running_trials_ids) > 0
):
for callback in self.callbacks:
callback.on_loop_start()
new_done_trial_statuses, new_results = self._process_new_results(
running_trials_ids=running_trials_ids,
)
if new_results and self.save_tuner:
# Save tuner state only if there have been new results
self.tuner_saver(tuner=self)
# update the list of done trials and remove those from ``running_trials_ids``
# Note: It is important to update ``running_trials_ids`` before
# calling ``_schedule_new_tasks``.
# Otherwise, a trial can be registered as paused in
# ``_process_new_results``, and immediately be resumed in
# ``_schedule_new_tasks``. If ``new_done_trial_statuses`` is subtracted from
# ``running_trials_ids`` afterwards only, this trial is removed from
# ``running_trials_ids`` even though it is running. Also, its status remains
# paused, because the next call of ``_process_new_results`` only considers
# trials in ``running_trials_ids``.
done_trials_statuses.update(new_done_trial_statuses)
running_trials_ids.difference_update(new_done_trial_statuses.keys())
if (
config_space_exhausted
or self.wait_trial_completion_when_stopping
and stop_condition_reached
):
# if the search space is exhausted, we loop until the running trials are done or until the
# stop condition is reached
if len(running_trials_ids) > 0:
if config_space_exhausted:
logger.debug(
f"Configuration space exhausted, waiting for completion of running trials "
f"{running_trials_ids}"
)
else:
logger.debug(
f"Stopping criterion reached, waiting for completion of running trials "
f"{running_trials_ids}"
)
self._sleep()
else:
break
else:
try:
self._schedule_new_tasks(running_trials_ids=running_trials_ids)
except StopIteration:
logger.info(
"Tuning is finishing as the whole configuration space got exhausted."
)
config_space_exhausted = True
print(
"Tuning is finishing as the whole configuration space got exhausted."
)
self.status_printer(self.tuning_status)
for callback in self.callbacks:
callback.on_loop_end()
stop_condition_reached = self._stop_condition()
except Exception as e:
logger.error(
"An error happened during the tuning, cleaning up resources and logging final resources "
"before throwing the exception."
)
raise e
finally:
# graceful termination block called when the tuner reached its stop condition, when an error happened or
# when the job got interrupted (can happen in spot-instances or when sending a SIGINT signal with ctrl+C).
# the block displays the best configuration found and stops trials that may still be running.
print_best_metric_found(
tuning_status=self.tuning_status,
metric_names=self.scheduler.metric_names(),
mode=self.scheduler.metric_mode(),
)
# Callbacks (typically includes writing final results)
for callback in self.callbacks:
callback.on_tuning_end()
# Serialize Tuner object
if self.save_tuner:
self.save()
logger.info("Stopping trials that may still be running.")
self.trial_backend.stop_all()
# notify tuning status that jobs were stopped without having to query their status in the backend since
# we know that all trials were stopped
self.tuning_status.mark_running_job_as_stopped()
# in case too many errors were triggered, show log of last failed job and terminates with an error
if self.tuning_status.num_trials_failed > self.max_failures:
self._handle_failure(done_trials_statuses=done_trials_statuses)
logger.info(
f"Tuning finished, results of trials can be found on {self.tuner_path}"
)
def _sleep(self):
time.sleep(self.sleep_time)
for callback in self.callbacks:
callback.on_tuning_sleep(self.sleep_time)
@staticmethod
def _set_metadata(metadata: Dict[str, Any], name: str, value):
if name in metadata:
logger.warning(
f"Entry {name} in metadata is used, but will be overwritten:\n"
f"Old value: {metadata[name]}\n"
f"Overwrite: {value}\n"
)
metadata[name] = value
def _enrich_metadata(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
"""
:param metadata: Original metadata
:return: ``metadata`` enriched by default entries
"""
res = metadata if metadata is not None else dict()
self._set_metadata(res, ST_TUNER_CREATION_TIMESTAMP, time.time())
self._set_metadata(res, "entrypoint", self.trial_backend.entrypoint_path().stem)
self._set_metadata(res, "backend", str(type(self.trial_backend).__name__))
for key, value in self.scheduler.metadata().items():
self._set_metadata(res, key, value)
return res
def _save_metadata(self):
dump_json_with_numpy(self.metadata, self.tuner_path / ST_METADATA_FILENAME)
def _stop_condition(self) -> bool:
return (
self.stop_criterion(self.tuning_status)
or self.tuning_status.num_trials_failed > self.max_failures
)
def _process_new_results(
self, running_trials_ids: Set[int]
) -> (TrialAndStatusInformation, TrialIdAndResultList):
"""Communicates new results from the backend to the scheduler
Returns dictionary of trials which are not running, along with their
status, in ``done_trials_statuses``, and list of new results (tuples
``(trial_id, result)``), observed since the previous call, in
``new_results``.
:param running_trials_ids: Trials currently running
:return: ``(done_trials_statuses, new_results)``
"""
# fetch new results
trial_status_dict, new_results = self.trial_backend.fetch_status_results(
trial_ids=list(running_trials_ids)
)
for callback in self.callbacks:
callback.on_fetch_status_results(
trial_status_dict=trial_status_dict, new_results=new_results
)
assert len(running_trials_ids) <= self.n_workers
# Gets list of trials that are done with the new results.
# The trials can be finished for different reasons:
# - they completed,
# - they were stopped independently of the scheduler, e.g. due to a
# timeout argument or a manual interruption
# - scheduler decided to interrupt them.
# Note: ``done_trials`` includes trials which are paused.
done_trials_statuses = self._update_running_trials(
trial_status_dict, new_results
)
trial_status_dict.update(done_trials_statuses)
# update status with new results and all done trials
self.tuning_status.update(
trial_status_dict=trial_status_dict, new_results=new_results
)
return done_trials_statuses, new_results
def _schedule_new_tasks(self, running_trials_ids: Set[int]):
"""Schedules new tasks if resources are available or sleep.
Note: If ``start_jobs_without_delay`` is False, we ask the backend for
the number of busy workers, instead of trusting ``running_trials_ids``.
The latter does not contain trials which have been stopped or completed,
but the underlying job is still not completely done.
:param running_trials_ids: set if trial-ids currently running, gets
updated if new trials are scheduled.
"""
running_trials_threshold = self.n_workers if self.asynchronous_scheduling else 1
if self.start_jobs_without_delay:
# Assume that only the trials in ``running_trial_ids`` are busy (which
# is an underestimate for certain backends)
busy_trial_ids = None
num_busy_workers = len(running_trials_ids)
else:
# Ask backend how many workers are really busy
busy_trial_ids = self.trial_backend.busy_trial_ids()
num_busy_workers = len(busy_trial_ids)
if num_busy_workers >= running_trials_threshold:
# Note: For synchronous scheduling, we need to sleep here if at
# least one worker is busy
logger.debug(
f"{num_busy_workers} of {self.n_workers} workers are "
f"busy, wait for {self.sleep_time} seconds"
)
self._sleep()
else:
if not self.start_jobs_without_delay and num_busy_workers < len(
running_trials_ids
):
# In this case, the information from the backend is more recent
running_trials_ids = set(x[0] for x in busy_trial_ids)
# Schedule as many trials as we have free workers
for _ in range(self.n_workers - num_busy_workers):
trial = self._schedule_new_task()
trial_id = trial.trial_id
running_trials_ids.add(trial_id)
# Update tuning status
self.tuning_status.update(
trial_status_dict={trial_id: (trial, Status.in_progress)},
new_results=[],
)
def _schedule_new_task(self) -> Optional[TrialResult]:
"""Schedules a new task according to scheduler suggestion.
:return: Information for the trial suggested, ``None`` if the scheduler does
not suggest a new configuration (this can happen if its configuration
space is exhausted)
"""
suggestion = self.scheduler.suggest(trial_id=self.trial_backend.new_trial_id())
if suggestion is None:
logger.info("Searcher ran out of candidates, tuning job is stopping.")
raise StopIteration
elif suggestion.spawn_new_trial_id:
# we schedule a new trial, possibly using the checkpoint of ``checkpoint_trial_id``
# if given.
trial = self.trial_backend.start_trial(
config=suggestion.config.copy(),
checkpoint_trial_id=suggestion.checkpoint_trial_id,
)
self.scheduler.on_trial_add(trial=trial)
for callback in self.callbacks:
callback.on_start_trial(trial)
logger.info(f"(trial {trial.trial_id}) - scheduled {suggestion}")
return trial
else:
# suggestion is a trial_id to resume, with possibly a new configuration
log_msg = f"Resuming trial {suggestion.checkpoint_trial_id}"
if suggestion.config is not None:
log_msg += f" with new_config = {suggestion.config}"
logger.info(log_msg)
trial = self.trial_backend.resume_trial(
trial_id=suggestion.checkpoint_trial_id, new_config=suggestion.config
)
for callback in self.callbacks:
callback.on_resume_trial(trial)
return trial
def _handle_failure(self, done_trials_statuses: Dict[int, Tuple[Trial, str]]):
logger.error(f"Stopped as {self.max_failures} failures were reached")
for trial_id, (_, status) in done_trials_statuses.items():
if status == Status.failed:
logger.error(f"showing log of first failure")
stdout = "".join(self.trial_backend.stdout(trial_id))
stderr = "".join(self.trial_backend.stderr(trial_id))
logger.error(stdout)
logger.error(stderr)
raise ValueError(f"Trial - {trial_id} failed")
def save(self, folder: Optional[str] = None):
if folder is None:
tuner_serialized_path = self.tuner_path / ST_TUNER_DILL_FILENAME
else:
tuner_serialized_path = Path(folder) / ST_TUNER_DILL_FILENAME
with open(tuner_serialized_path, "wb") as f:
logger.debug(f"saving tuner in {tuner_serialized_path}")
dill.dump(self, f)
self.trial_backend.on_tuner_save() # callback
@staticmethod
def load(tuner_path: Optional[str]):
with open(Path(tuner_path) / ST_TUNER_DILL_FILENAME, "rb") as f:
tuner = dill.load(f)
tuner.tuner_path = Path(experiment_path(tuner_name=tuner.name))
return tuner
def _update_running_trials(
self,
trial_status_dict: TrialAndStatusInformation,
new_results: TrialIdAndResultList,
) -> TrialAndStatusInformation:
"""
Updates schedulers with new results and sends decision to stop/pause
trials to the backend. Trials can be finished because:
* the scheduler decided to stop or pause.
* the trial failed.
* the trial was stopped independently of the scheduler, e.g. due to a
timeout argument or a manual interruption.
* the trial completed.
:param trial_status_dict: Information on trials from
``trial_backend.fetch_status_results``
:param new_results: New results from ``trial_backend.fetch_status_results``
:return: Dictionary mapping trial-ids that are finished to status
"""
# gets the list of jobs from running_jobs that are done
done_trials = dict()
for trial_id, result in new_results:
if trial_id not in done_trials:
trial, status = trial_status_dict[trial_id]
# communicate new result to the searcher and the scheduler
self.last_seen_result_per_trial[trial_id] = result
decision = self.scheduler.on_trial_result(trial=trial, result=result)
for callback in self.callbacks:
callback.on_trial_result(
trial=trial,
status=status,
result=result,
decision=decision,
)
if decision == SchedulerDecision.STOP:
if status != Status.completed:
# we override the status immediately, this avoids calling the backend status another time to
# update after the change which may be expensive
status = Status.stopped
self.trial_backend.stop_trial(trial_id=trial_id, result=result)
self.scheduler.on_trial_remove(trial=trial)
done_trials[trial_id] = (trial, status)
self.trials_scheduler_stopped.add(trial_id)
elif decision == SchedulerDecision.PAUSE:
status = Status.paused
self.trial_backend.pause_trial(trial_id=trial_id, result=result)
self.scheduler.on_trial_remove(trial=trial)
done_trials[trial_id] = (trial, status)
for trial_id, (trial, status) in trial_status_dict.items():
# Status "completed", "stopped" and "failed" are signaled to scheduler.
# Status "in_progress" and "stopping" are not signaled, although the first one could be added
# to notify the scheduler of pending runtimes (even in the absence of new results).
if status == Status.completed:
# since the code above updates ``trial_status_dict[trial_id]`` after a pause/stop scheduling decision
# this callback is never called after a pause/stop scheduler decision.
if (
trial_id not in done_trials
or done_trials[trial_id][1] != Status.paused
):
logger.info(f"Trial trial_id {trial_id} completed.")
# If scheduler marks trial as ``Status.paused``, this overrides
# ``Status.completed`` (which was assigned because the job
# completed)
done_trial = done_trials.get(trial_id)
if done_trial is not None and done_trial[1] == Status.paused:
status = Status.paused
if trial_id not in self.last_seen_result_per_trial:
logger.error(
f"trial {trial_id} completed and no metrics got observed, corresponding log:"
)
stdout = "".join(self.trial_backend.stdout(trial_id))
stderr = "".join(self.trial_backend.stderr(trial_id))
logger.error(stdout)
logger.error(stderr)
raise ValueError(
f"trial {trial_id} completed and no metrics got observed"
)
last_result = self.last_seen_result_per_trial[trial_id]
if trial_id not in done_trials:
self.scheduler.on_trial_complete(trial, last_result)
if status == Status.completed:
for callback in self.callbacks:
callback.on_trial_complete(trial, last_result)
done_trials[trial_id] = (trial, status)
if status == Status.failed:
logger.info(f"Trial trial_id {trial_id} failed.")
self.scheduler.on_trial_error(trial)
done_trials[trial_id] = (trial, status)
# For the case when the trial is stopped independently of the scheduler, we choose to use
# scheduler.on_trial_error(...) since it was not the scheduler's decision to stop the trial.
if (
status == Status.stopped
and trial_id not in self.trials_scheduler_stopped
):
logger.info(
f"Trial trial_id {trial_id} was stopped independently of the scheduler."
)
self.scheduler.on_trial_error(trial)
done_trials[trial_id] = (trial, status)
return done_trials
@staticmethod
def _default_callback():
"""
:return: Default callback to store results
"""
return StoreResultsCallback()
def best_config(
self, metric: Optional[Union[str, int]] = 0
) -> Tuple[int, Dict[str, Any]]:
"""
:param metric: Indicates which metric to use, can be the index or a name of the metric.
default to 0 - first metric defined in the Scheduler
:return: the best configuration found while tuning for the metric given and the associated trial-id
"""
metric_name, metric_mode = metric_name_mode(
metric_names=self.scheduler.metric_names(),
metric_mode=self.scheduler.metric_mode(),
metric=metric,
)
trial_id, best_metric = print_best_metric_found(
self.tuning_status, metric_names=[metric_name], mode=metric_mode
)
config = self.trial_backend._trial_dict[trial_id].config
logger.info(
f"If you want to retrain the best configuration found, you can run: \n"
f"```tuner.trial_backend.start_trial(config={config})``` to start training from scratch\n"
f"or\n"
f"```tuner.trial_backend.start_trial(config={config}, checkpoint_trial_id={trial_id})``` to start from "
f"last checkpoint (your script should have stored a checkpoint)"
)
return trial_id, config
| (trial_backend: syne_tune.backend.trial_backend.TrialBackend, scheduler: syne_tune.optimizer.scheduler.TrialScheduler, stop_criterion: Callable[[syne_tune.tuning_status.TuningStatus], bool], n_workers: int, sleep_time: float = 5.0, results_update_interval: float = 10.0, print_update_interval: float = 30.0, max_failures: int = 1, tuner_name: Optional[str] = None, asynchronous_scheduling: bool = True, wait_trial_completion_when_stopping: bool = False, callbacks: Optional[List[syne_tune.tuner_callback.TunerCallback]] = None, metadata: Optional[dict] = None, suffix_tuner_name: bool = True, save_tuner: bool = True, start_jobs_without_delay: bool = True, trial_backend_path: Optional[str] = None) |
725,549 | syne_tune.tuner | __init__ | null | def __init__(
self,
trial_backend: TrialBackend,
scheduler: TrialScheduler,
stop_criterion: Callable[[TuningStatus], bool],
n_workers: int,
sleep_time: float = TUNER_DEFAULT_SLEEP_TIME,
results_update_interval: float = 10.0,
print_update_interval: float = 30.0,
max_failures: int = 1,
tuner_name: Optional[str] = None,
asynchronous_scheduling: bool = True,
wait_trial_completion_when_stopping: bool = False,
callbacks: Optional[List[TunerCallback]] = None,
metadata: Optional[dict] = None,
suffix_tuner_name: bool = True,
save_tuner: bool = True,
start_jobs_without_delay: bool = True,
trial_backend_path: Optional[str] = None,
):
self.trial_backend = trial_backend
self.scheduler = scheduler
self.n_workers = n_workers
self.sleep_time = sleep_time
self.results_update_interval = results_update_interval
self.stop_criterion = stop_criterion
self.asynchronous_scheduling = asynchronous_scheduling
self.wait_trial_completion_when_stopping = wait_trial_completion_when_stopping
self.metadata = self._enrich_metadata(metadata)
self.save_tuner = save_tuner
self.start_jobs_without_delay = start_jobs_without_delay
self.max_failures = max_failures
self.print_update_interval = print_update_interval
if tuner_name is not None:
check_valid_sagemaker_name(tuner_name)
else:
tuner_name = Path(self.trial_backend.entrypoint_path()).stem.replace(
"_", "-"
)
if suffix_tuner_name or tuner_name is None:
self.name = name_from_base(tuner_name, default="st-tuner")
else:
self.name = tuner_name
# we keep track of the last result seen to send it to schedulers when trials complete.
self.last_seen_result_per_trial = {}
self.trials_scheduler_stopped = set()
self.tuner_path = Path(experiment_path(tuner_name=self.name))
# inform the backend to the folder of the Tuner. This allows the local backend
# to store the logs and tuner results in the same folder.
self.trial_backend.set_path(
results_root=str(self.tuner_path)
if trial_backend_path is None
else trial_backend_path,
tuner_name=self.name,
)
self._init_callbacks(callbacks)
self.tuning_status = None
self.tuner_saver = None
self.status_printer = None
self._initialize_early_checkpoint_removal()
| (self, trial_backend: syne_tune.backend.trial_backend.TrialBackend, scheduler: syne_tune.optimizer.scheduler.TrialScheduler, stop_criterion: Callable[[syne_tune.tuning_status.TuningStatus], bool], n_workers: int, sleep_time: float = 5.0, results_update_interval: float = 10.0, print_update_interval: float = 30.0, max_failures: int = 1, tuner_name: Optional[str] = None, asynchronous_scheduling: bool = True, wait_trial_completion_when_stopping: bool = False, callbacks: Optional[List[syne_tune.tuner_callback.TunerCallback]] = None, metadata: Optional[dict] = None, suffix_tuner_name: bool = True, save_tuner: bool = True, start_jobs_without_delay: bool = True, trial_backend_path: Optional[str] = None) |
725,550 | syne_tune.tuner | _default_callback |
:return: Default callback to store results
| @staticmethod
def _default_callback():
"""
:return: Default callback to store results
"""
return StoreResultsCallback()
| () |
725,551 | syne_tune.tuner | _enrich_metadata |
:param metadata: Original metadata
:return: ``metadata`` enriched by default entries
| def _enrich_metadata(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
"""
:param metadata: Original metadata
:return: ``metadata`` enriched by default entries
"""
res = metadata if metadata is not None else dict()
self._set_metadata(res, ST_TUNER_CREATION_TIMESTAMP, time.time())
self._set_metadata(res, "entrypoint", self.trial_backend.entrypoint_path().stem)
self._set_metadata(res, "backend", str(type(self.trial_backend).__name__))
for key, value in self.scheduler.metadata().items():
self._set_metadata(res, key, value)
return res
| (self, metadata: Dict[str, Any]) -> Dict[str, Any] |
725,552 | syne_tune.tuner | _handle_failure | null | def _handle_failure(self, done_trials_statuses: Dict[int, Tuple[Trial, str]]):
logger.error(f"Stopped as {self.max_failures} failures were reached")
for trial_id, (_, status) in done_trials_statuses.items():
if status == Status.failed:
logger.error(f"showing log of first failure")
stdout = "".join(self.trial_backend.stdout(trial_id))
stderr = "".join(self.trial_backend.stderr(trial_id))
logger.error(stdout)
logger.error(stderr)
raise ValueError(f"Trial - {trial_id} failed")
| (self, done_trials_statuses: Dict[int, Tuple[syne_tune.backend.trial_status.Trial, str]]) |
725,553 | syne_tune.tuner | _init_callbacks | null | def _init_callbacks(self, callbacks: Optional[List[TunerCallback]]):
if callbacks is None:
callbacks = [self._default_callback()]
else:
if not any(
isinstance(callback, StoreResultsCallback) for callback in callbacks
):
logger.warning(
"None of the callbacks provided are of type StoreResultsCallback. "
"This means no tuning results will be written."
)
self.callbacks: List[TunerCallback] = callbacks
| (self, callbacks: Optional[List[syne_tune.tuner_callback.TunerCallback]]) |
725,554 | syne_tune.tuner | _initialize_early_checkpoint_removal |
If the scheduler supports early checkpoint removal, the specific callback
for this is created here and appended to ``self.callbacks``.
| def _initialize_early_checkpoint_removal(self):
"""
If the scheduler supports early checkpoint removal, the specific callback
for this is created here and appended to ``self.callbacks``.
"""
if self.trial_backend.delete_checkpoints:
callback = (
self.scheduler.callback_for_checkpoint_removal(self.stop_criterion)
if isinstance(self.scheduler, RemoveCheckpointsSchedulerMixin)
else None
)
if callback is not None:
self.callbacks.append(callback)
| (self) |
725,555 | syne_tune.tuner | _process_new_results | Communicates new results from the backend to the scheduler
Returns dictionary of trials which are not running, along with their
status, in ``done_trials_statuses``, and list of new results (tuples
``(trial_id, result)``), observed since the previous call, in
``new_results``.
:param running_trials_ids: Trials currently running
:return: ``(done_trials_statuses, new_results)``
| def _process_new_results(
self, running_trials_ids: Set[int]
) -> (TrialAndStatusInformation, TrialIdAndResultList):
"""Communicates new results from the backend to the scheduler
Returns dictionary of trials which are not running, along with their
status, in ``done_trials_statuses``, and list of new results (tuples
``(trial_id, result)``), observed since the previous call, in
``new_results``.
:param running_trials_ids: Trials currently running
:return: ``(done_trials_statuses, new_results)``
"""
# fetch new results
trial_status_dict, new_results = self.trial_backend.fetch_status_results(
trial_ids=list(running_trials_ids)
)
for callback in self.callbacks:
callback.on_fetch_status_results(
trial_status_dict=trial_status_dict, new_results=new_results
)
assert len(running_trials_ids) <= self.n_workers
# Gets list of trials that are done with the new results.
# The trials can be finished for different reasons:
# - they completed,
# - they were stopped independently of the scheduler, e.g. due to a
# timeout argument or a manual interruption
# - scheduler decided to interrupt them.
# Note: ``done_trials`` includes trials which are paused.
done_trials_statuses = self._update_running_trials(
trial_status_dict, new_results
)
trial_status_dict.update(done_trials_statuses)
# update status with new results and all done trials
self.tuning_status.update(
trial_status_dict=trial_status_dict, new_results=new_results
)
return done_trials_statuses, new_results
| (self, running_trials_ids: Set[int]) -> (typing.Dict[int, typing.Tuple[syne_tune.backend.trial_status.Trial, str]], typing.List[typing.Tuple[int, dict]]) |
725,556 | syne_tune.tuner | _save_metadata | null | def _save_metadata(self):
dump_json_with_numpy(self.metadata, self.tuner_path / ST_METADATA_FILENAME)
| (self) |
725,557 | syne_tune.tuner | _schedule_new_task | Schedules a new task according to scheduler suggestion.
:return: Information for the trial suggested, ``None`` if the scheduler does
not suggest a new configuration (this can happen if its configuration
space is exhausted)
| def _schedule_new_task(self) -> Optional[TrialResult]:
"""Schedules a new task according to scheduler suggestion.
:return: Information for the trial suggested, ``None`` if the scheduler does
not suggest a new configuration (this can happen if its configuration
space is exhausted)
"""
suggestion = self.scheduler.suggest(trial_id=self.trial_backend.new_trial_id())
if suggestion is None:
logger.info("Searcher ran out of candidates, tuning job is stopping.")
raise StopIteration
elif suggestion.spawn_new_trial_id:
# we schedule a new trial, possibly using the checkpoint of ``checkpoint_trial_id``
# if given.
trial = self.trial_backend.start_trial(
config=suggestion.config.copy(),
checkpoint_trial_id=suggestion.checkpoint_trial_id,
)
self.scheduler.on_trial_add(trial=trial)
for callback in self.callbacks:
callback.on_start_trial(trial)
logger.info(f"(trial {trial.trial_id}) - scheduled {suggestion}")
return trial
else:
# suggestion is a trial_id to resume, with possibly a new configuration
log_msg = f"Resuming trial {suggestion.checkpoint_trial_id}"
if suggestion.config is not None:
log_msg += f" with new_config = {suggestion.config}"
logger.info(log_msg)
trial = self.trial_backend.resume_trial(
trial_id=suggestion.checkpoint_trial_id, new_config=suggestion.config
)
for callback in self.callbacks:
callback.on_resume_trial(trial)
return trial
| (self) -> Optional[syne_tune.backend.trial_status.TrialResult] |
725,558 | syne_tune.tuner | _schedule_new_tasks | Schedules new tasks if resources are available or sleep.
Note: If ``start_jobs_without_delay`` is False, we ask the backend for
the number of busy workers, instead of trusting ``running_trials_ids``.
The latter does not contain trials which have been stopped or completed,
but the underlying job is still not completely done.
:param running_trials_ids: set if trial-ids currently running, gets
updated if new trials are scheduled.
| def _schedule_new_tasks(self, running_trials_ids: Set[int]):
"""Schedules new tasks if resources are available or sleep.
Note: If ``start_jobs_without_delay`` is False, we ask the backend for
the number of busy workers, instead of trusting ``running_trials_ids``.
The latter does not contain trials which have been stopped or completed,
but the underlying job is still not completely done.
:param running_trials_ids: set if trial-ids currently running, gets
updated if new trials are scheduled.
"""
running_trials_threshold = self.n_workers if self.asynchronous_scheduling else 1
if self.start_jobs_without_delay:
# Assume that only the trials in ``running_trial_ids`` are busy (which
# is an underestimate for certain backends)
busy_trial_ids = None
num_busy_workers = len(running_trials_ids)
else:
# Ask backend how many workers are really busy
busy_trial_ids = self.trial_backend.busy_trial_ids()
num_busy_workers = len(busy_trial_ids)
if num_busy_workers >= running_trials_threshold:
# Note: For synchronous scheduling, we need to sleep here if at
# least one worker is busy
logger.debug(
f"{num_busy_workers} of {self.n_workers} workers are "
f"busy, wait for {self.sleep_time} seconds"
)
self._sleep()
else:
if not self.start_jobs_without_delay and num_busy_workers < len(
running_trials_ids
):
# In this case, the information from the backend is more recent
running_trials_ids = set(x[0] for x in busy_trial_ids)
# Schedule as many trials as we have free workers
for _ in range(self.n_workers - num_busy_workers):
trial = self._schedule_new_task()
trial_id = trial.trial_id
running_trials_ids.add(trial_id)
# Update tuning status
self.tuning_status.update(
trial_status_dict={trial_id: (trial, Status.in_progress)},
new_results=[],
)
| (self, running_trials_ids: Set[int]) |
725,559 | syne_tune.tuner | _set_metadata | null | @staticmethod
def _set_metadata(metadata: Dict[str, Any], name: str, value):
if name in metadata:
logger.warning(
f"Entry {name} in metadata is used, but will be overwritten:\n"
f"Old value: {metadata[name]}\n"
f"Overwrite: {value}\n"
)
metadata[name] = value
| (metadata: Dict[str, Any], name: str, value) |
725,560 | syne_tune.tuner | _sleep | null | def _sleep(self):
time.sleep(self.sleep_time)
for callback in self.callbacks:
callback.on_tuning_sleep(self.sleep_time)
| (self) |
725,561 | syne_tune.tuner | _stop_condition | null | def _stop_condition(self) -> bool:
return (
self.stop_criterion(self.tuning_status)
or self.tuning_status.num_trials_failed > self.max_failures
)
| (self) -> bool |
725,562 | syne_tune.tuner | _update_running_trials |
Updates schedulers with new results and sends decision to stop/pause
trials to the backend. Trials can be finished because:
* the scheduler decided to stop or pause.
* the trial failed.
* the trial was stopped independently of the scheduler, e.g. due to a
timeout argument or a manual interruption.
* the trial completed.
:param trial_status_dict: Information on trials from
``trial_backend.fetch_status_results``
:param new_results: New results from ``trial_backend.fetch_status_results``
:return: Dictionary mapping trial-ids that are finished to status
| def _update_running_trials(
self,
trial_status_dict: TrialAndStatusInformation,
new_results: TrialIdAndResultList,
) -> TrialAndStatusInformation:
"""
Updates schedulers with new results and sends decision to stop/pause
trials to the backend. Trials can be finished because:
* the scheduler decided to stop or pause.
* the trial failed.
* the trial was stopped independently of the scheduler, e.g. due to a
timeout argument or a manual interruption.
* the trial completed.
:param trial_status_dict: Information on trials from
``trial_backend.fetch_status_results``
:param new_results: New results from ``trial_backend.fetch_status_results``
:return: Dictionary mapping trial-ids that are finished to status
"""
# gets the list of jobs from running_jobs that are done
done_trials = dict()
for trial_id, result in new_results:
if trial_id not in done_trials:
trial, status = trial_status_dict[trial_id]
# communicate new result to the searcher and the scheduler
self.last_seen_result_per_trial[trial_id] = result
decision = self.scheduler.on_trial_result(trial=trial, result=result)
for callback in self.callbacks:
callback.on_trial_result(
trial=trial,
status=status,
result=result,
decision=decision,
)
if decision == SchedulerDecision.STOP:
if status != Status.completed:
# we override the status immediately, this avoids calling the backend status another time to
# update after the change which may be expensive
status = Status.stopped
self.trial_backend.stop_trial(trial_id=trial_id, result=result)
self.scheduler.on_trial_remove(trial=trial)
done_trials[trial_id] = (trial, status)
self.trials_scheduler_stopped.add(trial_id)
elif decision == SchedulerDecision.PAUSE:
status = Status.paused
self.trial_backend.pause_trial(trial_id=trial_id, result=result)
self.scheduler.on_trial_remove(trial=trial)
done_trials[trial_id] = (trial, status)
for trial_id, (trial, status) in trial_status_dict.items():
# Status "completed", "stopped" and "failed" are signaled to scheduler.
# Status "in_progress" and "stopping" are not signaled, although the first one could be added
# to notify the scheduler of pending runtimes (even in the absence of new results).
if status == Status.completed:
# since the code above updates ``trial_status_dict[trial_id]`` after a pause/stop scheduling decision
# this callback is never called after a pause/stop scheduler decision.
if (
trial_id not in done_trials
or done_trials[trial_id][1] != Status.paused
):
logger.info(f"Trial trial_id {trial_id} completed.")
# If scheduler marks trial as ``Status.paused``, this overrides
# ``Status.completed`` (which was assigned because the job
# completed)
done_trial = done_trials.get(trial_id)
if done_trial is not None and done_trial[1] == Status.paused:
status = Status.paused
if trial_id not in self.last_seen_result_per_trial:
logger.error(
f"trial {trial_id} completed and no metrics got observed, corresponding log:"
)
stdout = "".join(self.trial_backend.stdout(trial_id))
stderr = "".join(self.trial_backend.stderr(trial_id))
logger.error(stdout)
logger.error(stderr)
raise ValueError(
f"trial {trial_id} completed and no metrics got observed"
)
last_result = self.last_seen_result_per_trial[trial_id]
if trial_id not in done_trials:
self.scheduler.on_trial_complete(trial, last_result)
if status == Status.completed:
for callback in self.callbacks:
callback.on_trial_complete(trial, last_result)
done_trials[trial_id] = (trial, status)
if status == Status.failed:
logger.info(f"Trial trial_id {trial_id} failed.")
self.scheduler.on_trial_error(trial)
done_trials[trial_id] = (trial, status)
# For the case when the trial is stopped independently of the scheduler, we choose to use
# scheduler.on_trial_error(...) since it was not the scheduler's decision to stop the trial.
if (
status == Status.stopped
and trial_id not in self.trials_scheduler_stopped
):
logger.info(
f"Trial trial_id {trial_id} was stopped independently of the scheduler."
)
self.scheduler.on_trial_error(trial)
done_trials[trial_id] = (trial, status)
return done_trials
| (self, trial_status_dict: Dict[int, Tuple[syne_tune.backend.trial_status.Trial, str]], new_results: List[Tuple[int, dict]]) -> Dict[int, Tuple[syne_tune.backend.trial_status.Trial, str]] |
725,563 | syne_tune.tuner | best_config |
:param metric: Indicates which metric to use, can be the index or a name of the metric.
default to 0 - first metric defined in the Scheduler
:return: the best configuration found while tuning for the metric given and the associated trial-id
| def best_config(
self, metric: Optional[Union[str, int]] = 0
) -> Tuple[int, Dict[str, Any]]:
"""
:param metric: Indicates which metric to use, can be the index or a name of the metric.
default to 0 - first metric defined in the Scheduler
:return: the best configuration found while tuning for the metric given and the associated trial-id
"""
metric_name, metric_mode = metric_name_mode(
metric_names=self.scheduler.metric_names(),
metric_mode=self.scheduler.metric_mode(),
metric=metric,
)
trial_id, best_metric = print_best_metric_found(
self.tuning_status, metric_names=[metric_name], mode=metric_mode
)
config = self.trial_backend._trial_dict[trial_id].config
logger.info(
f"If you want to retrain the best configuration found, you can run: \n"
f"```tuner.trial_backend.start_trial(config={config})``` to start training from scratch\n"
f"or\n"
f"```tuner.trial_backend.start_trial(config={config}, checkpoint_trial_id={trial_id})``` to start from "
f"last checkpoint (your script should have stored a checkpoint)"
)
return trial_id, config
| (self, metric: Union[str, int, NoneType] = 0) -> Tuple[int, Dict[str, Any]] |
725,564 | syne_tune.tuner | load | null | @staticmethod
def load(tuner_path: Optional[str]):
with open(Path(tuner_path) / ST_TUNER_DILL_FILENAME, "rb") as f:
tuner = dill.load(f)
tuner.tuner_path = Path(experiment_path(tuner_name=tuner.name))
return tuner
| (tuner_path: Optional[str]) |
725,565 | syne_tune.tuner | run | Launches the tuning. | def run(self):
"""Launches the tuning."""
done_trials_statuses = OrderedDict()
try:
logger.info(f"results of trials will be saved on {self.tuner_path}")
if self.tuning_status is None:
self.tuning_status = TuningStatus(
metric_names=self.scheduler.metric_names()
)
# prints the status every ``print_update_interval`` seconds
self.status_printer = RegularCallback(
callback=lambda tuning_status: logger.info(
"tuning status (last metric is reported)\n" + str(tuning_status)
),
call_seconds_frequency=self.print_update_interval,
)
# saves the tuner every ``results_update_interval`` seconds
if self.save_tuner:
self.tuner_saver = RegularCallback(
callback=lambda tuner: tuner.save(),
call_seconds_frequency=self.results_update_interval,
)
self.metadata[ST_TUNER_START_TIMESTAMP] = time.time()
for callback in self.callbacks:
callback.on_tuning_start(self)
self.tuner_path.mkdir(exist_ok=True, parents=True)
self._save_metadata()
# ``running_trial_ids`` contains the ids of all trials currently running,
# whether they were started from scratch or were resumed from a pausing
# state
running_trials_ids = set()
config_space_exhausted = False
stop_condition_reached = self._stop_condition()
while (
# we stop when either the stop condition is reached
not stop_condition_reached
# or when all trials are done if the wait_trial_completion is activated
or self.wait_trial_completion_when_stopping
and len(running_trials_ids) > 0
):
for callback in self.callbacks:
callback.on_loop_start()
new_done_trial_statuses, new_results = self._process_new_results(
running_trials_ids=running_trials_ids,
)
if new_results and self.save_tuner:
# Save tuner state only if there have been new results
self.tuner_saver(tuner=self)
# update the list of done trials and remove those from ``running_trials_ids``
# Note: It is important to update ``running_trials_ids`` before
# calling ``_schedule_new_tasks``.
# Otherwise, a trial can be registered as paused in
# ``_process_new_results``, and immediately be resumed in
# ``_schedule_new_tasks``. If ``new_done_trial_statuses`` is subtracted from
# ``running_trials_ids`` afterwards only, this trial is removed from
# ``running_trials_ids`` even though it is running. Also, its status remains
# paused, because the next call of ``_process_new_results`` only considers
# trials in ``running_trials_ids``.
done_trials_statuses.update(new_done_trial_statuses)
running_trials_ids.difference_update(new_done_trial_statuses.keys())
if (
config_space_exhausted
or self.wait_trial_completion_when_stopping
and stop_condition_reached
):
# if the search space is exhausted, we loop until the running trials are done or until the
# stop condition is reached
if len(running_trials_ids) > 0:
if config_space_exhausted:
logger.debug(
f"Configuration space exhausted, waiting for completion of running trials "
f"{running_trials_ids}"
)
else:
logger.debug(
f"Stopping criterion reached, waiting for completion of running trials "
f"{running_trials_ids}"
)
self._sleep()
else:
break
else:
try:
self._schedule_new_tasks(running_trials_ids=running_trials_ids)
except StopIteration:
logger.info(
"Tuning is finishing as the whole configuration space got exhausted."
)
config_space_exhausted = True
print(
"Tuning is finishing as the whole configuration space got exhausted."
)
self.status_printer(self.tuning_status)
for callback in self.callbacks:
callback.on_loop_end()
stop_condition_reached = self._stop_condition()
except Exception as e:
logger.error(
"An error happened during the tuning, cleaning up resources and logging final resources "
"before throwing the exception."
)
raise e
finally:
# graceful termination block called when the tuner reached its stop condition, when an error happened or
# when the job got interrupted (can happen in spot-instances or when sending a SIGINT signal with ctrl+C).
# the block displays the best configuration found and stops trials that may still be running.
print_best_metric_found(
tuning_status=self.tuning_status,
metric_names=self.scheduler.metric_names(),
mode=self.scheduler.metric_mode(),
)
# Callbacks (typically includes writing final results)
for callback in self.callbacks:
callback.on_tuning_end()
# Serialize Tuner object
if self.save_tuner:
self.save()
logger.info("Stopping trials that may still be running.")
self.trial_backend.stop_all()
# notify tuning status that jobs were stopped without having to query their status in the backend since
# we know that all trials were stopped
self.tuning_status.mark_running_job_as_stopped()
# in case too many errors were triggered, show log of last failed job and terminates with an error
if self.tuning_status.num_trials_failed > self.max_failures:
self._handle_failure(done_trials_statuses=done_trials_statuses)
logger.info(
f"Tuning finished, results of trials can be found on {self.tuner_path}"
)
| (self) |
725,566 | syne_tune.tuner | save | null | def save(self, folder: Optional[str] = None):
if folder is None:
tuner_serialized_path = self.tuner_path / ST_TUNER_DILL_FILENAME
else:
tuner_serialized_path = Path(folder) / ST_TUNER_DILL_FILENAME
with open(tuner_serialized_path, "wb") as f:
logger.debug(f"saving tuner in {tuner_serialized_path}")
dill.dump(self, f)
self.trial_backend.on_tuner_save() # callback
| (self, folder: Optional[str] = None) |
725,573 | syne_tune | read_version | null | def read_version():
with open(Path(__file__).parent / "version", "r") as f:
return f.readline().strip().replace('"', "")
| () |
725,582 | awpy.parser.demoparser | DemoParser | DemoParser can parse, load and clean data from a CSGO demofile. Can be instantiated without a specified demofile.
Attributes:
demofile (string): A string denoting the path to the demo file, which ends in .dem
Defaults to ''
outpath (string): Path where to save the outputfile to. Default is current directory
demo_id (string): A unique demo name/game id. Default is inferred from demofile name
output_file (str): The output file name. Default is 'demoid'+".json"
log (bool): A boolean indicating if the log should print to stdout. Default is False
parse_rate (int, optional): One of 128, 64, 32, 16, 8, 4, 2, or 1.
The lower the value, the more frames are collected. Indicates spacing between parsed demo frames in ticks. Default is 128.
parse_frames (bool): Flag if you want to parse frames (trajectory data) or not. Default is True
parse_kill_frames (bool): Flag if you want to parse frames on kills. Default is False
trade_time (int, optional): Length of the window for a trade (in seconds). Default is 5.
dmg_rolled (bool): Boolean if you want damages rolled up
(since multiple damages for a player can happen in 1 tick from the same weapon.)
Default is False
parse_chat (bool): Flag if you want to parse chat messages. Default is False
buy_style (string): Buy style string, one of "hltv" or "csgo"
Default is "hltv"
json_indentation (bool): Whether the json file should be pretty printed
with indentation (larger, more readable) or not (smaller, less human readable)
Default is False
json (dict): Dictionary containing the parsed json file
Raises:
ValueError: Raises a ValueError if the Golang version is lower than 1.18
| class DemoParser:
"""DemoParser can parse, load and clean data from a CSGO demofile. Can be instantiated without a specified demofile.
Attributes:
demofile (string): A string denoting the path to the demo file, which ends in .dem
Defaults to ''
outpath (string): Path where to save the outputfile to. Default is current directory
demo_id (string): A unique demo name/game id. Default is inferred from demofile name
output_file (str): The output file name. Default is 'demoid'+".json"
log (bool): A boolean indicating if the log should print to stdout. Default is False
parse_rate (int, optional): One of 128, 64, 32, 16, 8, 4, 2, or 1.
The lower the value, the more frames are collected. Indicates spacing between parsed demo frames in ticks. Default is 128.
parse_frames (bool): Flag if you want to parse frames (trajectory data) or not. Default is True
parse_kill_frames (bool): Flag if you want to parse frames on kills. Default is False
trade_time (int, optional): Length of the window for a trade (in seconds). Default is 5.
dmg_rolled (bool): Boolean if you want damages rolled up
(since multiple damages for a player can happen in 1 tick from the same weapon.)
Default is False
parse_chat (bool): Flag if you want to parse chat messages. Default is False
buy_style (string): Buy style string, one of "hltv" or "csgo"
Default is "hltv"
json_indentation (bool): Whether the json file should be pretty printed
with indentation (larger, more readable) or not (smaller, less human readable)
Default is False
json (dict): Dictionary containing the parsed json file
Raises:
ValueError: Raises a ValueError if the Golang version is lower than 1.18
"""
def __init__(
self,
demofile: str = "",
outpath: Optional[str] = None,
demo_id: Optional[str] = None,
log: bool = False,
parse_rate: int = 128,
parse_frames: bool = True,
parse_kill_frames: bool = False,
trade_time: int = 5,
dmg_rolled: bool = False,
parse_chat: bool = False,
buy_style: str = "hltv",
json_indentation: bool = False,
):
# Set up logger
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
)
self.logger = logging.getLogger("awpy")
self.logger.propagate = log
# Handle demofile and demo_id name. Only take the file name and remove the last extension.
self.demofile = os.path.abspath(demofile)
self.logger.info("Initialized awpy DemoParser with demofile %s", self.demofile)
if not demo_id: # (demo_id is None) | (demo_id == "")
self.demo_id = os.path.splitext(
os.path.basename(demofile.replace("\\", "/"))
)[0]
else:
self.demo_id = demo_id
self.logger.info("Setting demo id to %s", self.demo_id)
self.output_file = self.demo_id + ".json"
if outpath is None:
self.outpath = os.path.abspath(os.getcwd())
else:
self.outpath = os.path.abspath(outpath)
# Handle parse rate. If the parse rate is less than 64, likely to be slow
if parse_rate < 1 or not isinstance(parse_rate, int):
self.logger.warning(
"Parse rate of %s not acceptable! Parse rate must be an integer greater than 0.",
str(parse_rate),
)
parse_rate = 128
self.parse_rate = parse_rate
if parse_rate < 64 and parse_rate > 1:
self.logger.warning(
"A parse rate lower than 64 may be slow depending on the tickrate of the demo, which is usually 64 for MM and 128 for pro demos."
)
self.parse_rate = parse_rate
elif parse_rate >= 256:
self.logger.warning(
"A high parse rate means very few frames. Only use for testing purposes."
)
self.parse_rate = parse_rate
else:
self.parse_rate = parse_rate
self.logger.info("Setting parse rate to %s", str(self.parse_rate))
# Handle trade time
self.trade_time = trade_time
if trade_time <= 0:
self.logger.warning(
"Trade time can't be negative, setting to default value of 5 seconds."
)
self.trade_time = 5
elif trade_time > 7:
self.logger.warning(
"Trade time of %s is rather long. Consider a value between 4-7.",
str(trade_time),
)
self.logger.info("Setting trade time to %s", str(self.trade_time))
# Handle buy style
if buy_style not in ["hltv", "csgo"]:
self.logger.warning(
"Buy style specified is not one of hltv, csgo, will be set to hltv by default"
)
self.buy_style = "hltv"
else:
self.buy_style = buy_style
self.logger.info("Setting buy style to %s", str(self.buy_style))
self.dmg_rolled = dmg_rolled
self.parse_chat = parse_chat
self.parse_frames = parse_frames
self.parse_kill_frames = parse_kill_frames
self.json_indentation = json_indentation
self.logger.info("Rollup damages set to %s", str(self.dmg_rolled))
self.logger.info("Parse chat set to %s", str(self.parse_chat))
self.logger.info("Parse frames set to %s", str(self.parse_frames))
self.logger.info("Parse kill frames set to %s", str(self.parse_kill_frames))
self.logger.info(
"Output json indentation set to %s", str(self.json_indentation)
)
# Set parse error to False
self.parse_error = False
# Initialize json attribute as None
self.json: Optional[Game] = None
def parse_demo(self) -> None:
"""Parse a demofile using the Go script parse_demo.go -- this function needs the .demofile to be set in the class, and the file needs to exist.
Returns:
Outputs a JSON file to current working directory.
Raises:
ValueError: Raises a ValueError if the Golang version is lower than 1.18
FileNotFoundError: Raises a FileNotFoundError if the demofile path does not exist.
"""
# Check if Golang version is compatible
acceptable_go = check_go_version()
if not acceptable_go:
error_message = "Error calling Go. Check if Go is installed using 'go version'. Need at least v1.18.0."
self.logger.error(
error_message
)
raise ValueError(
error_message
)
else:
self.logger.info("Go version>=1.18.0")
# Check if demofile exists
if not os.path.exists(os.path.abspath(self.demofile)):
self.logger.error("Demofile path does not exist!")
raise FileNotFoundError("Demofile path does not exist!")
path = os.path.join(os.path.dirname(__file__), "")
self.logger.info("Running Golang parser from %s", path)
self.logger.info("Looking for file at %s", self.demofile)
self.parser_cmd = [
"go",
"run",
"parse_demo.go",
"-demo",
self.demofile,
"-parserate",
str(self.parse_rate),
"-tradetime",
str(self.trade_time),
"-buystyle",
str(self.buy_style),
"-demoid",
str(self.demo_id),
"-out",
self.outpath,
]
if self.dmg_rolled:
self.parser_cmd.append("--dmgrolled")
if self.parse_frames:
self.parser_cmd.append("--parseframes")
if self.parse_kill_frames:
self.parser_cmd.append("--parsekillframes")
if self.json_indentation:
self.parser_cmd.append("--jsonindentation")
if self.parse_chat:
self.parser_cmd.append("--parsechat")
proc = subprocess.Popen(
self.parser_cmd,
stdout=subprocess.PIPE,
cwd=path,
)
stdout = proc.stdout.read().splitlines() if proc.stdout is not None else None
self.output_file = self.demo_id + ".json"
if os.path.isfile(self.outpath + "/" + self.output_file):
self.logger.info("Wrote demo parse output to %s", self.output_file)
self.parse_error = False
else:
self.parse_error = True
self.logger.error("No file produced, error in calling Golang")
self.logger.error(stdout)
def read_json(self, json_path: str):
"""Reads the JSON file given a JSON path. Can be used to read in already processed demofiles.
Args:
json_path (string): Path to JSON file
Returns:
JSON in Python dictionary form
Raises:
FileNotFoundError: Raises a FileNotFoundError if the JSON path doesn't exist
"""
# Check if JSON exists
if not os.path.exists(json_path):
self.logger.error("JSON path does not exist!")
raise FileNotFoundError("JSON path does not exist!")
# Read in json to .json attribute
with open(json_path, encoding="utf8") as f:
demo_data: Game = json.load(f)
self.json = demo_data
self.logger.info(
"JSON data loaded, available in the `json` attribute to parser"
)
return demo_data
def parse(
self, return_type: str = "json", clean: bool = True
) -> Union[Game, dict[str, Any]]:
"""Wrapper for parse_demo() and read_json(). Use to parse a demo.
Args:
return_type (string, optional): Either "json" or "df". Default is "json"
clean (bool, optional): True to run clean_rounds, otherwise, uncleaned data is returned. Defaults to True.
Returns:
A dictionary of output (which is parsed to a JSON file in the working directory)
Raises:
ValueError: Raises a ValueError if the return_type is not "json" or "df"
AttributeError: Raises an AttributeError if the .json attribute is None
"""
self.parse_demo()
self.read_json(json_path=self.outpath + "/" + self.output_file)
if clean:
clean_data = self.clean_rounds()
if self.json:
self.logger.info("JSON output found")
if return_type == "json":
return self.json
elif return_type == "df":
demo_data = self.parse_json_to_df()
self.logger.info("Returned dataframe output")
return demo_data
else:
self.logger.error("Parse return_type must be either 'json' or 'df'")
raise ValueError("return_type must be either 'json' or 'df'")
else:
self.logger.error("JSON couldn't be returned")
raise AttributeError("No JSON parsed! Error in producing JSON.")
def parse_json_to_df(self) -> dict[str, Any]:
"""Returns JSON into dictionary where keys correspond to data frames
Returns:
A dictionary of output
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
demo_data: dict[str, Any] = {}
demo_data["matchID"] = self.json["matchID"]
demo_data["clientName"] = self.json["clientName"]
demo_data["mapName"] = self.json["mapName"]
demo_data["tickRate"] = self.json["tickRate"]
demo_data["playbackTicks"] = self.json["playbackTicks"]
# Rounds
demo_data["rounds"] = self._parse_rounds()
# Kills
demo_data["kills"] = self._parse_kills()
demo_data["kills"]["attackerSteamID"] = demo_data["kills"][
"attackerSteamID"
].astype(pd.Int64Dtype())
demo_data["kills"]["victimSteamID"] = demo_data["kills"][
"victimSteamID"
].astype(pd.Int64Dtype())
demo_data["kills"]["assisterSteamID"] = demo_data["kills"][
"assisterSteamID"
].astype(pd.Int64Dtype())
demo_data["kills"]["flashThrowerSteamID"] = demo_data["kills"][
"flashThrowerSteamID"
].astype(pd.Int64Dtype())
# Damages
demo_data["damages"] = self._parse_damages()
demo_data["damages"]["attackerSteamID"] = demo_data["damages"][
"attackerSteamID"
].astype(pd.Int64Dtype())
demo_data["damages"]["victimSteamID"] = demo_data["damages"][
"victimSteamID"
].astype(pd.Int64Dtype())
# Grenades
demo_data["grenades"] = self._parse_grenades()
demo_data["grenades"]["throwerSteamID"] = demo_data["grenades"][
"throwerSteamID"
].astype(pd.Int64Dtype())
# Flashes
demo_data["flashes"] = self._parse_flashes()
demo_data["flashes"]["attackerSteamID"] = demo_data["flashes"][
"attackerSteamID"
].astype(pd.Int64Dtype())
demo_data["flashes"]["playerSteamID"] = demo_data["flashes"][
"playerSteamID"
].astype(pd.Int64Dtype())
# Weapon Fires
demo_data["weaponFires"] = self._parse_weapon_fires()
demo_data["weaponFires"]["playerSteamID"] = demo_data["weaponFires"][
"playerSteamID"
].astype(pd.Int64Dtype())
# Bomb Events
demo_data["bombEvents"] = self._parse_bomb_events()
demo_data["bombEvents"]["playerSteamID"] = demo_data["bombEvents"][
"playerSteamID"
].astype(pd.Int64Dtype())
# Frames
demo_data["frames"] = self._parse_frames()
# Player Frames
demo_data["playerFrames"] = self._parse_player_frames()
demo_data["playerFrames"]["steamID"] = demo_data["playerFrames"][
"steamID"
].astype(pd.Int64Dtype())
self.logger.info("Returned dataframe output")
return demo_data
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_frames(self) -> pd.DataFrame:
"""Returns frames as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a frame (game state) in the demo, which is a discrete point of time.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
frames_dataframes = []
for r in self.json["gameRounds"] or []:
for frame in r["frames"] or []:
frame_item: dict[str, Any] = {}
frame_item["roundNum"] = r["roundNum"]
for k in ("tick", "seconds"):
# Currently there is no better way:
# https://github.com/python/mypy/issues/9230
k = cast(Literal["tick", "seconds"], k)
frame_item[k] = frame[k]
frame_item["ctTeamName"] = frame["ct"]["teamName"]
frame_item["ctEqVal"] = frame["ct"]["teamEqVal"]
frame_item["ctAlivePlayers"] = frame["ct"]["alivePlayers"]
frame_item["ctUtility"] = frame["ct"]["totalUtility"]
frame_item["tTeamName"] = frame["t"]["teamName"]
frame_item["tEqVal"] = frame["t"]["teamEqVal"]
frame_item["tAlivePlayers"] = frame["t"]["alivePlayers"]
frame_item["tUtility"] = frame["t"]["totalUtility"]
frames_dataframes.append(frame_item)
frames_df = pd.DataFrame(frames_dataframes)
frames_df["matchID"] = self.json["matchID"]
frames_df["mapName"] = self.json["mapName"]
return pd.DataFrame(frames_dataframes)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_player_frames(self) -> pd.DataFrame:
"""Returns player frames as a Pandas dataframe.
Returns:
A Pandas dataframe where each row is a player's attributes at a given frame (game state).
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
player_frames = []
for r in self.json["gameRounds"] or []:
for frame in r["frames"] or []:
for side in ["ct", "t"]:
# Currently there is no better way:
# https://github.com/python/mypy/issues/9230
side = cast(Literal["ct", "t"], side)
if frame[side]["players"] is not None and (
# The or [] should be unneccesary but mypy can not handle this
len(frame[side]["players"] or [])
> 0 # Used to be == 5, to ensure the sides were equal.
):
# Same here
for player in frame[side]["players"] or []:
player_item: dict[str, Any] = {}
player_item["roundNum"] = r["roundNum"]
player_item["tick"] = frame["tick"]
player_item["seconds"] = frame["seconds"]
player_item["side"] = side
player_item["teamName"] = frame[side]["teamName"]
for col, val in player.items():
if col != "inventory":
player_item[col] = val
player_frames.append(player_item)
player_frames_df = pd.DataFrame(player_frames)
player_frames_df["matchID"] = self.json["matchID"]
player_frames_df["mapName"] = self.json["mapName"]
return pd.DataFrame(player_frames_df)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_rounds(self) -> pd.DataFrame:
"""Returns rounds as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a round
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
rounds = []
# There is currently no better way than this monstrosity...
# https://github.com/python/mypy/issues/9230
# https://stackoverflow.com/a/64522240/7895542
cols_type = Literal[
"roundNum",
"startTick",
"freezeTimeEndTick",
"endTick",
"endOfficialTick",
"tScore",
"ctScore",
"endTScore",
"endCTScore",
"tTeam",
"ctTeam",
"winningSide",
"winningTeam",
"losingTeam",
"roundEndReason",
"ctFreezeTimeEndEqVal",
"ctRoundStartEqVal",
"ctRoundSpendMoney",
"ctBuyType",
"tFreezeTimeEndEqVal",
"tRoundStartEqVal",
"tRoundSpendMoney",
"tBuyType",
]
cols: list[cols_type] = list(get_args(cols_type))
for r in self.json["gameRounds"] or []:
round_item: dict[str, Any] = {}
for k in cols:
round_item[k] = r[k]
round_item["matchID"] = self.json["matchID"]
round_item["mapName"] = self.json["mapName"]
rounds.append(round_item)
return pd.DataFrame(rounds)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_kills(self) -> pd.DataFrame:
"""Returns kills as either a Pandas dataframe
Returns:
A Pandas dataframe where each row is a kill
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
kills = []
for r in self.json["gameRounds"] or []:
if r["kills"] is not None:
for k in r["kills"]:
new_k: dict[str, Any] = dict(k)
new_k["roundNum"] = r["roundNum"]
new_k["matchID"] = self.json["matchID"]
new_k["mapName"] = self.json["mapName"]
kills.append(new_k)
return pd.DataFrame(kills)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_weapon_fires(self) -> pd.DataFrame:
"""Returns weapon fires as either a list or Pandas dataframe
Returns:
A Pandas dataframe where each row is a weapon fire event
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
shots = []
for r in self.json["gameRounds"] or []:
if r["weaponFires"] is not None:
for wf in r["weaponFires"]:
new_wf: dict[str, Any] = dict(wf)
new_wf["roundNum"] = r["roundNum"]
new_wf["matchID"] = self.json["matchID"]
new_wf["mapName"] = self.json["mapName"]
shots.append(new_wf)
return pd.DataFrame(shots)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_damages(self) -> pd.DataFrame:
"""Returns damages as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a damage event.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
damages = []
for r in self.json["gameRounds"] or []:
if r["damages"] is not None:
for d in r["damages"]:
new_d: dict[str, Any] = dict(d)
new_d["roundNum"] = r["roundNum"]
new_d["matchID"] = self.json["matchID"]
new_d["mapName"] = self.json["mapName"]
damages.append(new_d)
return pd.DataFrame(damages)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_grenades(self) -> pd.DataFrame:
"""Returns grenades as a Pandas dataframe
Returns:
A list or Pandas dataframe where each row is a grenade throw
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
grenades = []
for r in self.json["gameRounds"] or []:
if r["grenades"] is not None:
for g in r["grenades"]:
new_g: dict[str, Any] = dict(g)
new_g["roundNum"] = r["roundNum"]
new_g["matchID"] = self.json["matchID"]
new_g["mapName"] = self.json["mapName"]
grenades.append(new_g)
return pd.DataFrame(grenades)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_bomb_events(self) -> pd.DataFrame:
"""Returns bomb events as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a bomb event (defuse, plant, etc.)
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
bomb_events = []
for r in self.json["gameRounds"] or []:
if r["bombEvents"] is not None:
for b in r["bombEvents"]:
new_b: dict[str, Any] = dict(b)
new_b["roundNum"] = r["roundNum"]
new_b["matchID"] = self.json["matchID"]
new_b["mapName"] = self.json["mapName"]
bomb_events.append(new_b)
return pd.DataFrame(bomb_events)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_flashes(self) -> pd.DataFrame:
"""Returns flashes as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a flash event.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
flashes = []
for r in self.json["gameRounds"] or []:
if r["flashes"] is not None:
for f in r["flashes"]:
new_f: dict[str, Any] = dict(f)
new_f["roundNum"] = r["roundNum"]
new_f["matchId"] = self.json["matchID"]
new_f["mapName"] = self.json["mapName"]
flashes.append(new_f)
return pd.DataFrame(flashes)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def clean_rounds(
self,
remove_no_frames: bool = True,
remove_warmups: bool = True,
remove_knifes: bool = True,
remove_bad_timings: bool = True,
remove_excess_players: bool = True,
remove_excess_kills: bool = True,
remove_bad_endings: bool = True,
remove_bad_scoring: bool = True,
return_type: str = "json",
save_to_json: bool = True,
) -> Union[Game, dict[str, Any]]:
"""Cleans a parsed demofile JSON.
Args:
remove_no_frames (bool, optional): Remove rounds where there are no frames. Default to True.
remove_warmups (bool, optional): Remove warmup rounds. Defaults to True.
remove_knifes (bool, optional): Remove knife rounds. Defaults to True.
remove_bad_timings (bool, optional): Remove bad timings. Defaults to True.
remove_excess_players (bool, optional): Remove rounds with more than 5 players. Defaults to True.
remove_excess_kills (bool, optional): Remove rounds with more than 10 kills. Defaults to True.
remove_bad_endings (bool, optional): Remove rounds with bad round end reasons. Defaults to True.
remove_bad_scoring (bool, optional): Remove rounds where the scoring is off (like scores going below the previous round's). Defaults to False.
return_type (str, optional): Return JSON or DataFrame. Defaults to "json".
save_to_json (bool, optional): Whether to write the JSON to a file. Defaults to True.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
ValueError: Raises a ValueError if the return type is neither 'json' nor 'df'
Returns:
dict: A dictionary of the cleaned demo.
"""
if self.json:
if remove_no_frames:
self.remove_rounds_with_no_frames()
if remove_warmups:
self.remove_warmups()
if remove_knifes:
self.remove_knife_rounds()
if remove_bad_timings:
self.remove_time_rounds()
if remove_excess_players:
self.remove_excess_players()
if remove_excess_kills:
self.remove_excess_kill_rounds()
if remove_bad_endings:
self.remove_end_round()
if remove_bad_scoring:
self.remove_bad_scoring()
self.renumber_rounds()
# self.rescore_rounds() -- Need to edit to take into account half switches
if save_to_json:
self.write_json()
if return_type == "json":
return self.json
elif return_type == "df":
demo_data = self.parse_json_to_df()
self.logger.info("Returned cleaned dataframe output")
return demo_data
raise ValueError(
f"Invalid return_type of {return_type}. Use 'json' or 'df' instead!"
)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def write_json(self) -> None:
"""Rewrite the JSON file"""
with open(self.outpath + "/" + self.output_file, "w", encoding="utf8") as fp:
json.dump(self.json, fp, indent=(1 if self.json_indentation else None))
def renumber_rounds(self) -> None:
"""Renumbers the rounds.
Raises:
AttributeError: Raises an AttributeError if the .json attribute has a "gameRounds" key.
"""
if self.json and self.json["gameRounds"]:
for i, r in enumerate(self.json["gameRounds"]):
self.json["gameRounds"][i]["roundNum"] = i + 1
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def rescore_rounds(self) -> None:
"""Rescore the rounds based on round end reason.
Raises:
AttributeError: Raises an AttributeError if the .json attribute has a "gameRounds" key.
"""
if self.json and self.json["gameRounds"]:
for i, r in enumerate(self.json["gameRounds"]):
if i == 0:
self.json["gameRounds"][i]["tScore"] = 0
self.json["gameRounds"][i]["ctScore"] = 0
if self.json["gameRounds"][i]["winningSide"] == "ct":
self.json["gameRounds"][i]["endCTScore"] = 1
self.json["gameRounds"][i]["endTScore"] = 0
if self.json["gameRounds"][i]["winningSide"] == "t":
self.json["gameRounds"][i]["endCTScore"] = 0
self.json["gameRounds"][i]["endTScore"] = 1
elif i > 0:
self.json["gameRounds"][i]["tScore"] = self.json["gameRounds"][
i - 1
]["endTScore"]
self.json["gameRounds"][i]["ctScore"] = self.json["gameRounds"][
i - 1
]["endCTScore"]
if self.json["gameRounds"][i]["winningSide"] == "ct":
self.json["gameRounds"][i]["endCTScore"] = (
self.json["gameRounds"][i]["ctScore"] + 1
)
self.json["gameRounds"][i]["endTScore"] = self.json[
"gameRounds"
][i]["tScore"]
if self.json["gameRounds"][i]["winningSide"] == "t":
self.json["gameRounds"][i]["endCTScore"] = self.json[
"gameRounds"
][i]["ctScore"]
self.json["gameRounds"][i]["endTScore"] = (
self.json["gameRounds"][i]["tScore"] + 1
)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def remove_bad_scoring(self) -> None:
"""Removes rounds where the scoring is bad.
We loop through the rounds:
If the round ahead has equal or less score, we do not add the current round.
If the round ahead has +1 score, we add the current round
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
for i, r in enumerate(self.json["gameRounds"] or []):
current_round_total = (
r["tScore"] + r["endTScore"] + r["ctScore"] + r["endCTScore"]
)
if i < len(self.json["gameRounds"]) - 1: # type: ignore[arg-type]
# Non-OT rounds
lookahead_round = self.json["gameRounds"][i + 1] # type: ignore[index]
lookahead_round_total = (
lookahead_round["tScore"]
+ lookahead_round["endTScore"]
+ lookahead_round["ctScore"]
+ lookahead_round["endCTScore"]
)
if lookahead_round_total > current_round_total:
cleaned_rounds.append(r)
elif (r["endTScore"] == 16) & (r["endCTScore"] <= 14):
cleaned_rounds.append(r)
elif (r["endCTScore"] == 16) & (r["endTScore"] <= 14):
cleaned_rounds.append(r)
else:
# OT win scores are of the type:
# 15 + (4xN) with N a natural numbers (1, 2, 3, ...)
# So 19, 23, 27, ...
# So if you substract 15 from an OT winning round the number is divisible by 4
# OT_Scores = [19, 23, 27, 31, 35, 39, 43, 47]
if (
(r["endCTScore"] - 15) % 4 == 0
and r["endTScore"] < r["endCTScore"]
) or (
(r["endTScore"] - 15) % 4 == 0
and r["endCTScore"] < r["endTScore"]
):
cleaned_rounds.append(r)
# for s in OT_Scores:
# if (r["endCTScore"] == s) & (r["endTScore"] < s - 1):
# cleaned_rounds.append(r)
# elif (r["endTScore"] == s) & (r["endCTScore"] < s - 1):
# cleaned_rounds.append(r)
else:
lookback_round = self.json["gameRounds"][i - 1] # type: ignore[index]
lookback_round_total = (
lookback_round["tScore"]
+ lookback_round["endTScore"]
+ lookback_round["ctScore"]
+ lookback_round["endCTScore"]
)
if current_round_total > lookback_round_total:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def remove_rounds_with_no_frames(self) -> None:
"""Removes rounds with no frames
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
if not self.parse_frames:
self.logger.warning(
"parse_frames is set to False, must be true for remove_no_frames to work. Skipping remove_no_frames."
)
else:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if len(r["frames"] or []) > 0:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def remove_excess_players(self) -> None:
"""Removes rounds where there are more than 5 players on a side.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
if not self.parse_frames:
self.logger.warning(
"parse_frames is set to False, must be true for remove_excess_players to work. Skipping remove_excess_players."
)
else:
cleaned_rounds = []
# Remove rounds where the number of players is too large
for r in self.json["gameRounds"] or []:
if len(r["frames"] or []) > 0:
f = r["frames"][0] # type: ignore[index]
if f["ct"]["players"] is None:
if f["t"]["players"] is None:
pass
elif len(f["t"]["players"]) <= 5:
cleaned_rounds.append(r)
elif len(f["ct"]["players"]) <= 5:
if (f["t"]["players"] is None) or (
len(f["t"]["players"]) <= 5
):
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def remove_warmups(self) -> None:
"""Removes warmup rounds.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
# Remove warmups where the demo may have started recording in the middle of a warmup round
if "warmupChanged" in self.json["matchPhases"]:
if len(self.json["matchPhases"]["warmupChanged"] or []) > 1:
last_warmup_changed = self.json["matchPhases"]["warmupChanged"][1] # type: ignore[index]
for r in self.json["gameRounds"] or []:
if (r["startTick"] > last_warmup_changed) and (
not r["isWarmup"]
):
cleaned_rounds.append(r)
if r["startTick"] == last_warmup_changed:
cleaned_rounds.append(r)
else:
for r in self.json["gameRounds"] or []:
if not r["isWarmup"]:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def remove_end_round(self, bad_endings: Optional[list[str]] = None) -> None:
"""Removes rounds with bad end reason.
Args:
bad_endings (list, optional): List of bad round end reasons. Defaults to ["Draw", "Unknown", ""].
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if bad_endings is None:
bad_endings = ["Draw", "Unknown", ""]
if self.json:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if r["roundEndReason"] not in bad_endings:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def remove_knife_rounds(self) -> None:
"""Removes knife rounds.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if not r["isWarmup"]:
total_kills = len(r["kills"] or [])
total_knife_kills = 0
if total_kills > 0:
# We know this is save because the len call gives 0
# and this if never gets enteres if r["kills"] is None
# but mypy does not know this
for k in r["kills"]: # type: ignore[union-attr]
if k["weapon"] == "Knife":
total_knife_kills += 1
if (total_knife_kills != total_kills) | (total_knife_kills == 0):
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def remove_excess_kill_rounds(self) -> None:
"""Removes rounds with more than 10 kills.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if not r["isWarmup"]:
if len(r["kills"] or []) <= 10:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def remove_time_rounds(self) -> None:
"""Remove rounds with odd round timings.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if (
(r["startTick"] <= r["endTick"])
and (r["startTick"] <= r["endOfficialTick"])
and (r["startTick"] <= r["freezeTimeEndTick"])
):
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (demofile: str = '', outpath: Optional[str] = None, demo_id: Optional[str] = None, log: bool = False, parse_rate: int = 128, parse_frames: bool = True, parse_kill_frames: bool = False, trade_time: int = 5, dmg_rolled: bool = False, parse_chat: bool = False, buy_style: str = 'hltv', json_indentation: bool = False) |
725,583 | awpy.parser.demoparser | __init__ | null | def __init__(
self,
demofile: str = "",
outpath: Optional[str] = None,
demo_id: Optional[str] = None,
log: bool = False,
parse_rate: int = 128,
parse_frames: bool = True,
parse_kill_frames: bool = False,
trade_time: int = 5,
dmg_rolled: bool = False,
parse_chat: bool = False,
buy_style: str = "hltv",
json_indentation: bool = False,
):
# Set up logger
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
)
self.logger = logging.getLogger("awpy")
self.logger.propagate = log
# Handle demofile and demo_id name. Only take the file name and remove the last extension.
self.demofile = os.path.abspath(demofile)
self.logger.info("Initialized awpy DemoParser with demofile %s", self.demofile)
if not demo_id: # (demo_id is None) | (demo_id == "")
self.demo_id = os.path.splitext(
os.path.basename(demofile.replace("\\", "/"))
)[0]
else:
self.demo_id = demo_id
self.logger.info("Setting demo id to %s", self.demo_id)
self.output_file = self.demo_id + ".json"
if outpath is None:
self.outpath = os.path.abspath(os.getcwd())
else:
self.outpath = os.path.abspath(outpath)
# Handle parse rate. If the parse rate is less than 64, likely to be slow
if parse_rate < 1 or not isinstance(parse_rate, int):
self.logger.warning(
"Parse rate of %s not acceptable! Parse rate must be an integer greater than 0.",
str(parse_rate),
)
parse_rate = 128
self.parse_rate = parse_rate
if parse_rate < 64 and parse_rate > 1:
self.logger.warning(
"A parse rate lower than 64 may be slow depending on the tickrate of the demo, which is usually 64 for MM and 128 for pro demos."
)
self.parse_rate = parse_rate
elif parse_rate >= 256:
self.logger.warning(
"A high parse rate means very few frames. Only use for testing purposes."
)
self.parse_rate = parse_rate
else:
self.parse_rate = parse_rate
self.logger.info("Setting parse rate to %s", str(self.parse_rate))
# Handle trade time
self.trade_time = trade_time
if trade_time <= 0:
self.logger.warning(
"Trade time can't be negative, setting to default value of 5 seconds."
)
self.trade_time = 5
elif trade_time > 7:
self.logger.warning(
"Trade time of %s is rather long. Consider a value between 4-7.",
str(trade_time),
)
self.logger.info("Setting trade time to %s", str(self.trade_time))
# Handle buy style
if buy_style not in ["hltv", "csgo"]:
self.logger.warning(
"Buy style specified is not one of hltv, csgo, will be set to hltv by default"
)
self.buy_style = "hltv"
else:
self.buy_style = buy_style
self.logger.info("Setting buy style to %s", str(self.buy_style))
self.dmg_rolled = dmg_rolled
self.parse_chat = parse_chat
self.parse_frames = parse_frames
self.parse_kill_frames = parse_kill_frames
self.json_indentation = json_indentation
self.logger.info("Rollup damages set to %s", str(self.dmg_rolled))
self.logger.info("Parse chat set to %s", str(self.parse_chat))
self.logger.info("Parse frames set to %s", str(self.parse_frames))
self.logger.info("Parse kill frames set to %s", str(self.parse_kill_frames))
self.logger.info(
"Output json indentation set to %s", str(self.json_indentation)
)
# Set parse error to False
self.parse_error = False
# Initialize json attribute as None
self.json: Optional[Game] = None
| (self, demofile: str = '', outpath: Optional[str] = None, demo_id: Optional[str] = None, log: bool = False, parse_rate: int = 128, parse_frames: bool = True, parse_kill_frames: bool = False, trade_time: int = 5, dmg_rolled: bool = False, parse_chat: bool = False, buy_style: str = 'hltv', json_indentation: bool = False) |
725,584 | awpy.parser.demoparser | _parse_bomb_events | Returns bomb events as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a bomb event (defuse, plant, etc.)
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def _parse_bomb_events(self) -> pd.DataFrame:
"""Returns bomb events as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a bomb event (defuse, plant, etc.)
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
bomb_events = []
for r in self.json["gameRounds"] or []:
if r["bombEvents"] is not None:
for b in r["bombEvents"]:
new_b: dict[str, Any] = dict(b)
new_b["roundNum"] = r["roundNum"]
new_b["matchID"] = self.json["matchID"]
new_b["mapName"] = self.json["mapName"]
bomb_events.append(new_b)
return pd.DataFrame(bomb_events)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> pandas.core.frame.DataFrame |
725,585 | awpy.parser.demoparser | _parse_damages | Returns damages as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a damage event.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def _parse_damages(self) -> pd.DataFrame:
"""Returns damages as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a damage event.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
damages = []
for r in self.json["gameRounds"] or []:
if r["damages"] is not None:
for d in r["damages"]:
new_d: dict[str, Any] = dict(d)
new_d["roundNum"] = r["roundNum"]
new_d["matchID"] = self.json["matchID"]
new_d["mapName"] = self.json["mapName"]
damages.append(new_d)
return pd.DataFrame(damages)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> pandas.core.frame.DataFrame |
725,586 | awpy.parser.demoparser | _parse_flashes | Returns flashes as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a flash event.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def _parse_flashes(self) -> pd.DataFrame:
"""Returns flashes as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a flash event.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
flashes = []
for r in self.json["gameRounds"] or []:
if r["flashes"] is not None:
for f in r["flashes"]:
new_f: dict[str, Any] = dict(f)
new_f["roundNum"] = r["roundNum"]
new_f["matchId"] = self.json["matchID"]
new_f["mapName"] = self.json["mapName"]
flashes.append(new_f)
return pd.DataFrame(flashes)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> pandas.core.frame.DataFrame |
725,587 | awpy.parser.demoparser | _parse_frames | Returns frames as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a frame (game state) in the demo, which is a discrete point of time.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def _parse_frames(self) -> pd.DataFrame:
"""Returns frames as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a frame (game state) in the demo, which is a discrete point of time.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
frames_dataframes = []
for r in self.json["gameRounds"] or []:
for frame in r["frames"] or []:
frame_item: dict[str, Any] = {}
frame_item["roundNum"] = r["roundNum"]
for k in ("tick", "seconds"):
# Currently there is no better way:
# https://github.com/python/mypy/issues/9230
k = cast(Literal["tick", "seconds"], k)
frame_item[k] = frame[k]
frame_item["ctTeamName"] = frame["ct"]["teamName"]
frame_item["ctEqVal"] = frame["ct"]["teamEqVal"]
frame_item["ctAlivePlayers"] = frame["ct"]["alivePlayers"]
frame_item["ctUtility"] = frame["ct"]["totalUtility"]
frame_item["tTeamName"] = frame["t"]["teamName"]
frame_item["tEqVal"] = frame["t"]["teamEqVal"]
frame_item["tAlivePlayers"] = frame["t"]["alivePlayers"]
frame_item["tUtility"] = frame["t"]["totalUtility"]
frames_dataframes.append(frame_item)
frames_df = pd.DataFrame(frames_dataframes)
frames_df["matchID"] = self.json["matchID"]
frames_df["mapName"] = self.json["mapName"]
return pd.DataFrame(frames_dataframes)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> pandas.core.frame.DataFrame |
725,588 | awpy.parser.demoparser | _parse_grenades | Returns grenades as a Pandas dataframe
Returns:
A list or Pandas dataframe where each row is a grenade throw
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def _parse_grenades(self) -> pd.DataFrame:
"""Returns grenades as a Pandas dataframe
Returns:
A list or Pandas dataframe where each row is a grenade throw
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
grenades = []
for r in self.json["gameRounds"] or []:
if r["grenades"] is not None:
for g in r["grenades"]:
new_g: dict[str, Any] = dict(g)
new_g["roundNum"] = r["roundNum"]
new_g["matchID"] = self.json["matchID"]
new_g["mapName"] = self.json["mapName"]
grenades.append(new_g)
return pd.DataFrame(grenades)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> pandas.core.frame.DataFrame |
725,589 | awpy.parser.demoparser | _parse_kills | Returns kills as either a Pandas dataframe
Returns:
A Pandas dataframe where each row is a kill
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def _parse_kills(self) -> pd.DataFrame:
"""Returns kills as either a Pandas dataframe
Returns:
A Pandas dataframe where each row is a kill
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
kills = []
for r in self.json["gameRounds"] or []:
if r["kills"] is not None:
for k in r["kills"]:
new_k: dict[str, Any] = dict(k)
new_k["roundNum"] = r["roundNum"]
new_k["matchID"] = self.json["matchID"]
new_k["mapName"] = self.json["mapName"]
kills.append(new_k)
return pd.DataFrame(kills)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> pandas.core.frame.DataFrame |
725,590 | awpy.parser.demoparser | _parse_player_frames | Returns player frames as a Pandas dataframe.
Returns:
A Pandas dataframe where each row is a player's attributes at a given frame (game state).
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def _parse_player_frames(self) -> pd.DataFrame:
"""Returns player frames as a Pandas dataframe.
Returns:
A Pandas dataframe where each row is a player's attributes at a given frame (game state).
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
player_frames = []
for r in self.json["gameRounds"] or []:
for frame in r["frames"] or []:
for side in ["ct", "t"]:
# Currently there is no better way:
# https://github.com/python/mypy/issues/9230
side = cast(Literal["ct", "t"], side)
if frame[side]["players"] is not None and (
# The or [] should be unneccesary but mypy can not handle this
len(frame[side]["players"] or [])
> 0 # Used to be == 5, to ensure the sides were equal.
):
# Same here
for player in frame[side]["players"] or []:
player_item: dict[str, Any] = {}
player_item["roundNum"] = r["roundNum"]
player_item["tick"] = frame["tick"]
player_item["seconds"] = frame["seconds"]
player_item["side"] = side
player_item["teamName"] = frame[side]["teamName"]
for col, val in player.items():
if col != "inventory":
player_item[col] = val
player_frames.append(player_item)
player_frames_df = pd.DataFrame(player_frames)
player_frames_df["matchID"] = self.json["matchID"]
player_frames_df["mapName"] = self.json["mapName"]
return pd.DataFrame(player_frames_df)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> pandas.core.frame.DataFrame |
725,591 | awpy.parser.demoparser | _parse_rounds | Returns rounds as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a round
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def _parse_rounds(self) -> pd.DataFrame:
"""Returns rounds as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a round
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
rounds = []
# There is currently no better way than this monstrosity...
# https://github.com/python/mypy/issues/9230
# https://stackoverflow.com/a/64522240/7895542
cols_type = Literal[
"roundNum",
"startTick",
"freezeTimeEndTick",
"endTick",
"endOfficialTick",
"tScore",
"ctScore",
"endTScore",
"endCTScore",
"tTeam",
"ctTeam",
"winningSide",
"winningTeam",
"losingTeam",
"roundEndReason",
"ctFreezeTimeEndEqVal",
"ctRoundStartEqVal",
"ctRoundSpendMoney",
"ctBuyType",
"tFreezeTimeEndEqVal",
"tRoundStartEqVal",
"tRoundSpendMoney",
"tBuyType",
]
cols: list[cols_type] = list(get_args(cols_type))
for r in self.json["gameRounds"] or []:
round_item: dict[str, Any] = {}
for k in cols:
round_item[k] = r[k]
round_item["matchID"] = self.json["matchID"]
round_item["mapName"] = self.json["mapName"]
rounds.append(round_item)
return pd.DataFrame(rounds)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> pandas.core.frame.DataFrame |
725,592 | awpy.parser.demoparser | _parse_weapon_fires | Returns weapon fires as either a list or Pandas dataframe
Returns:
A Pandas dataframe where each row is a weapon fire event
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def _parse_weapon_fires(self) -> pd.DataFrame:
"""Returns weapon fires as either a list or Pandas dataframe
Returns:
A Pandas dataframe where each row is a weapon fire event
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
shots = []
for r in self.json["gameRounds"] or []:
if r["weaponFires"] is not None:
for wf in r["weaponFires"]:
new_wf: dict[str, Any] = dict(wf)
new_wf["roundNum"] = r["roundNum"]
new_wf["matchID"] = self.json["matchID"]
new_wf["mapName"] = self.json["mapName"]
shots.append(new_wf)
return pd.DataFrame(shots)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> pandas.core.frame.DataFrame |
725,593 | awpy.parser.demoparser | clean_rounds | Cleans a parsed demofile JSON.
Args:
remove_no_frames (bool, optional): Remove rounds where there are no frames. Default to True.
remove_warmups (bool, optional): Remove warmup rounds. Defaults to True.
remove_knifes (bool, optional): Remove knife rounds. Defaults to True.
remove_bad_timings (bool, optional): Remove bad timings. Defaults to True.
remove_excess_players (bool, optional): Remove rounds with more than 5 players. Defaults to True.
remove_excess_kills (bool, optional): Remove rounds with more than 10 kills. Defaults to True.
remove_bad_endings (bool, optional): Remove rounds with bad round end reasons. Defaults to True.
remove_bad_scoring (bool, optional): Remove rounds where the scoring is off (like scores going below the previous round's). Defaults to False.
return_type (str, optional): Return JSON or DataFrame. Defaults to "json".
save_to_json (bool, optional): Whether to write the JSON to a file. Defaults to True.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
ValueError: Raises a ValueError if the return type is neither 'json' nor 'df'
Returns:
dict: A dictionary of the cleaned demo.
| def clean_rounds(
self,
remove_no_frames: bool = True,
remove_warmups: bool = True,
remove_knifes: bool = True,
remove_bad_timings: bool = True,
remove_excess_players: bool = True,
remove_excess_kills: bool = True,
remove_bad_endings: bool = True,
remove_bad_scoring: bool = True,
return_type: str = "json",
save_to_json: bool = True,
) -> Union[Game, dict[str, Any]]:
"""Cleans a parsed demofile JSON.
Args:
remove_no_frames (bool, optional): Remove rounds where there are no frames. Default to True.
remove_warmups (bool, optional): Remove warmup rounds. Defaults to True.
remove_knifes (bool, optional): Remove knife rounds. Defaults to True.
remove_bad_timings (bool, optional): Remove bad timings. Defaults to True.
remove_excess_players (bool, optional): Remove rounds with more than 5 players. Defaults to True.
remove_excess_kills (bool, optional): Remove rounds with more than 10 kills. Defaults to True.
remove_bad_endings (bool, optional): Remove rounds with bad round end reasons. Defaults to True.
remove_bad_scoring (bool, optional): Remove rounds where the scoring is off (like scores going below the previous round's). Defaults to False.
return_type (str, optional): Return JSON or DataFrame. Defaults to "json".
save_to_json (bool, optional): Whether to write the JSON to a file. Defaults to True.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
ValueError: Raises a ValueError if the return type is neither 'json' nor 'df'
Returns:
dict: A dictionary of the cleaned demo.
"""
if self.json:
if remove_no_frames:
self.remove_rounds_with_no_frames()
if remove_warmups:
self.remove_warmups()
if remove_knifes:
self.remove_knife_rounds()
if remove_bad_timings:
self.remove_time_rounds()
if remove_excess_players:
self.remove_excess_players()
if remove_excess_kills:
self.remove_excess_kill_rounds()
if remove_bad_endings:
self.remove_end_round()
if remove_bad_scoring:
self.remove_bad_scoring()
self.renumber_rounds()
# self.rescore_rounds() -- Need to edit to take into account half switches
if save_to_json:
self.write_json()
if return_type == "json":
return self.json
elif return_type == "df":
demo_data = self.parse_json_to_df()
self.logger.info("Returned cleaned dataframe output")
return demo_data
raise ValueError(
f"Invalid return_type of {return_type}. Use 'json' or 'df' instead!"
)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self, remove_no_frames: bool = True, remove_warmups: bool = True, remove_knifes: bool = True, remove_bad_timings: bool = True, remove_excess_players: bool = True, remove_excess_kills: bool = True, remove_bad_endings: bool = True, remove_bad_scoring: bool = True, return_type: str = 'json', save_to_json: bool = True) -> Union[awpy.types.Game, dict[str, Any]] |
725,594 | awpy.parser.demoparser | parse | Wrapper for parse_demo() and read_json(). Use to parse a demo.
Args:
return_type (string, optional): Either "json" or "df". Default is "json"
clean (bool, optional): True to run clean_rounds, otherwise, uncleaned data is returned. Defaults to True.
Returns:
A dictionary of output (which is parsed to a JSON file in the working directory)
Raises:
ValueError: Raises a ValueError if the return_type is not "json" or "df"
AttributeError: Raises an AttributeError if the .json attribute is None
| def parse(
self, return_type: str = "json", clean: bool = True
) -> Union[Game, dict[str, Any]]:
"""Wrapper for parse_demo() and read_json(). Use to parse a demo.
Args:
return_type (string, optional): Either "json" or "df". Default is "json"
clean (bool, optional): True to run clean_rounds, otherwise, uncleaned data is returned. Defaults to True.
Returns:
A dictionary of output (which is parsed to a JSON file in the working directory)
Raises:
ValueError: Raises a ValueError if the return_type is not "json" or "df"
AttributeError: Raises an AttributeError if the .json attribute is None
"""
self.parse_demo()
self.read_json(json_path=self.outpath + "/" + self.output_file)
if clean:
clean_data = self.clean_rounds()
if self.json:
self.logger.info("JSON output found")
if return_type == "json":
return self.json
elif return_type == "df":
demo_data = self.parse_json_to_df()
self.logger.info("Returned dataframe output")
return demo_data
else:
self.logger.error("Parse return_type must be either 'json' or 'df'")
raise ValueError("return_type must be either 'json' or 'df'")
else:
self.logger.error("JSON couldn't be returned")
raise AttributeError("No JSON parsed! Error in producing JSON.")
| (self, return_type: str = 'json', clean: bool = True) -> Union[awpy.types.Game, dict[str, Any]] |
725,595 | awpy.parser.demoparser | parse_demo | Parse a demofile using the Go script parse_demo.go -- this function needs the .demofile to be set in the class, and the file needs to exist.
Returns:
Outputs a JSON file to current working directory.
Raises:
ValueError: Raises a ValueError if the Golang version is lower than 1.18
FileNotFoundError: Raises a FileNotFoundError if the demofile path does not exist.
| def parse_demo(self) -> None:
"""Parse a demofile using the Go script parse_demo.go -- this function needs the .demofile to be set in the class, and the file needs to exist.
Returns:
Outputs a JSON file to current working directory.
Raises:
ValueError: Raises a ValueError if the Golang version is lower than 1.18
FileNotFoundError: Raises a FileNotFoundError if the demofile path does not exist.
"""
# Check if Golang version is compatible
acceptable_go = check_go_version()
if not acceptable_go:
error_message = "Error calling Go. Check if Go is installed using 'go version'. Need at least v1.18.0."
self.logger.error(
error_message
)
raise ValueError(
error_message
)
else:
self.logger.info("Go version>=1.18.0")
# Check if demofile exists
if not os.path.exists(os.path.abspath(self.demofile)):
self.logger.error("Demofile path does not exist!")
raise FileNotFoundError("Demofile path does not exist!")
path = os.path.join(os.path.dirname(__file__), "")
self.logger.info("Running Golang parser from %s", path)
self.logger.info("Looking for file at %s", self.demofile)
self.parser_cmd = [
"go",
"run",
"parse_demo.go",
"-demo",
self.demofile,
"-parserate",
str(self.parse_rate),
"-tradetime",
str(self.trade_time),
"-buystyle",
str(self.buy_style),
"-demoid",
str(self.demo_id),
"-out",
self.outpath,
]
if self.dmg_rolled:
self.parser_cmd.append("--dmgrolled")
if self.parse_frames:
self.parser_cmd.append("--parseframes")
if self.parse_kill_frames:
self.parser_cmd.append("--parsekillframes")
if self.json_indentation:
self.parser_cmd.append("--jsonindentation")
if self.parse_chat:
self.parser_cmd.append("--parsechat")
proc = subprocess.Popen(
self.parser_cmd,
stdout=subprocess.PIPE,
cwd=path,
)
stdout = proc.stdout.read().splitlines() if proc.stdout is not None else None
self.output_file = self.demo_id + ".json"
if os.path.isfile(self.outpath + "/" + self.output_file):
self.logger.info("Wrote demo parse output to %s", self.output_file)
self.parse_error = False
else:
self.parse_error = True
self.logger.error("No file produced, error in calling Golang")
self.logger.error(stdout)
| (self) -> NoneType |
725,596 | awpy.parser.demoparser | parse_json_to_df | Returns JSON into dictionary where keys correspond to data frames
Returns:
A dictionary of output
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def parse_json_to_df(self) -> dict[str, Any]:
"""Returns JSON into dictionary where keys correspond to data frames
Returns:
A dictionary of output
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
demo_data: dict[str, Any] = {}
demo_data["matchID"] = self.json["matchID"]
demo_data["clientName"] = self.json["clientName"]
demo_data["mapName"] = self.json["mapName"]
demo_data["tickRate"] = self.json["tickRate"]
demo_data["playbackTicks"] = self.json["playbackTicks"]
# Rounds
demo_data["rounds"] = self._parse_rounds()
# Kills
demo_data["kills"] = self._parse_kills()
demo_data["kills"]["attackerSteamID"] = demo_data["kills"][
"attackerSteamID"
].astype(pd.Int64Dtype())
demo_data["kills"]["victimSteamID"] = demo_data["kills"][
"victimSteamID"
].astype(pd.Int64Dtype())
demo_data["kills"]["assisterSteamID"] = demo_data["kills"][
"assisterSteamID"
].astype(pd.Int64Dtype())
demo_data["kills"]["flashThrowerSteamID"] = demo_data["kills"][
"flashThrowerSteamID"
].astype(pd.Int64Dtype())
# Damages
demo_data["damages"] = self._parse_damages()
demo_data["damages"]["attackerSteamID"] = demo_data["damages"][
"attackerSteamID"
].astype(pd.Int64Dtype())
demo_data["damages"]["victimSteamID"] = demo_data["damages"][
"victimSteamID"
].astype(pd.Int64Dtype())
# Grenades
demo_data["grenades"] = self._parse_grenades()
demo_data["grenades"]["throwerSteamID"] = demo_data["grenades"][
"throwerSteamID"
].astype(pd.Int64Dtype())
# Flashes
demo_data["flashes"] = self._parse_flashes()
demo_data["flashes"]["attackerSteamID"] = demo_data["flashes"][
"attackerSteamID"
].astype(pd.Int64Dtype())
demo_data["flashes"]["playerSteamID"] = demo_data["flashes"][
"playerSteamID"
].astype(pd.Int64Dtype())
# Weapon Fires
demo_data["weaponFires"] = self._parse_weapon_fires()
demo_data["weaponFires"]["playerSteamID"] = demo_data["weaponFires"][
"playerSteamID"
].astype(pd.Int64Dtype())
# Bomb Events
demo_data["bombEvents"] = self._parse_bomb_events()
demo_data["bombEvents"]["playerSteamID"] = demo_data["bombEvents"][
"playerSteamID"
].astype(pd.Int64Dtype())
# Frames
demo_data["frames"] = self._parse_frames()
# Player Frames
demo_data["playerFrames"] = self._parse_player_frames()
demo_data["playerFrames"]["steamID"] = demo_data["playerFrames"][
"steamID"
].astype(pd.Int64Dtype())
self.logger.info("Returned dataframe output")
return demo_data
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> dict[str, typing.Any] |
725,597 | awpy.parser.demoparser | read_json | Reads the JSON file given a JSON path. Can be used to read in already processed demofiles.
Args:
json_path (string): Path to JSON file
Returns:
JSON in Python dictionary form
Raises:
FileNotFoundError: Raises a FileNotFoundError if the JSON path doesn't exist
| def read_json(self, json_path: str):
"""Reads the JSON file given a JSON path. Can be used to read in already processed demofiles.
Args:
json_path (string): Path to JSON file
Returns:
JSON in Python dictionary form
Raises:
FileNotFoundError: Raises a FileNotFoundError if the JSON path doesn't exist
"""
# Check if JSON exists
if not os.path.exists(json_path):
self.logger.error("JSON path does not exist!")
raise FileNotFoundError("JSON path does not exist!")
# Read in json to .json attribute
with open(json_path, encoding="utf8") as f:
demo_data: Game = json.load(f)
self.json = demo_data
self.logger.info(
"JSON data loaded, available in the `json` attribute to parser"
)
return demo_data
| (self, json_path: str) |
725,598 | awpy.parser.demoparser | remove_bad_scoring | Removes rounds where the scoring is bad.
We loop through the rounds:
If the round ahead has equal or less score, we do not add the current round.
If the round ahead has +1 score, we add the current round
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def remove_bad_scoring(self) -> None:
"""Removes rounds where the scoring is bad.
We loop through the rounds:
If the round ahead has equal or less score, we do not add the current round.
If the round ahead has +1 score, we add the current round
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
for i, r in enumerate(self.json["gameRounds"] or []):
current_round_total = (
r["tScore"] + r["endTScore"] + r["ctScore"] + r["endCTScore"]
)
if i < len(self.json["gameRounds"]) - 1: # type: ignore[arg-type]
# Non-OT rounds
lookahead_round = self.json["gameRounds"][i + 1] # type: ignore[index]
lookahead_round_total = (
lookahead_round["tScore"]
+ lookahead_round["endTScore"]
+ lookahead_round["ctScore"]
+ lookahead_round["endCTScore"]
)
if lookahead_round_total > current_round_total:
cleaned_rounds.append(r)
elif (r["endTScore"] == 16) & (r["endCTScore"] <= 14):
cleaned_rounds.append(r)
elif (r["endCTScore"] == 16) & (r["endTScore"] <= 14):
cleaned_rounds.append(r)
else:
# OT win scores are of the type:
# 15 + (4xN) with N a natural numbers (1, 2, 3, ...)
# So 19, 23, 27, ...
# So if you substract 15 from an OT winning round the number is divisible by 4
# OT_Scores = [19, 23, 27, 31, 35, 39, 43, 47]
if (
(r["endCTScore"] - 15) % 4 == 0
and r["endTScore"] < r["endCTScore"]
) or (
(r["endTScore"] - 15) % 4 == 0
and r["endCTScore"] < r["endTScore"]
):
cleaned_rounds.append(r)
# for s in OT_Scores:
# if (r["endCTScore"] == s) & (r["endTScore"] < s - 1):
# cleaned_rounds.append(r)
# elif (r["endTScore"] == s) & (r["endCTScore"] < s - 1):
# cleaned_rounds.append(r)
else:
lookback_round = self.json["gameRounds"][i - 1] # type: ignore[index]
lookback_round_total = (
lookback_round["tScore"]
+ lookback_round["endTScore"]
+ lookback_round["ctScore"]
+ lookback_round["endCTScore"]
)
if current_round_total > lookback_round_total:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> NoneType |
725,599 | awpy.parser.demoparser | remove_end_round | Removes rounds with bad end reason.
Args:
bad_endings (list, optional): List of bad round end reasons. Defaults to ["Draw", "Unknown", ""].
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def remove_end_round(self, bad_endings: Optional[list[str]] = None) -> None:
"""Removes rounds with bad end reason.
Args:
bad_endings (list, optional): List of bad round end reasons. Defaults to ["Draw", "Unknown", ""].
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if bad_endings is None:
bad_endings = ["Draw", "Unknown", ""]
if self.json:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if r["roundEndReason"] not in bad_endings:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self, bad_endings: Optional[list[str]] = None) -> NoneType |
725,600 | awpy.parser.demoparser | remove_excess_kill_rounds | Removes rounds with more than 10 kills.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def remove_excess_kill_rounds(self) -> None:
"""Removes rounds with more than 10 kills.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if not r["isWarmup"]:
if len(r["kills"] or []) <= 10:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> NoneType |
725,601 | awpy.parser.demoparser | remove_excess_players | Removes rounds where there are more than 5 players on a side.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def remove_excess_players(self) -> None:
"""Removes rounds where there are more than 5 players on a side.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
if not self.parse_frames:
self.logger.warning(
"parse_frames is set to False, must be true for remove_excess_players to work. Skipping remove_excess_players."
)
else:
cleaned_rounds = []
# Remove rounds where the number of players is too large
for r in self.json["gameRounds"] or []:
if len(r["frames"] or []) > 0:
f = r["frames"][0] # type: ignore[index]
if f["ct"]["players"] is None:
if f["t"]["players"] is None:
pass
elif len(f["t"]["players"]) <= 5:
cleaned_rounds.append(r)
elif len(f["ct"]["players"]) <= 5:
if (f["t"]["players"] is None) or (
len(f["t"]["players"]) <= 5
):
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> NoneType |
725,602 | awpy.parser.demoparser | remove_knife_rounds | Removes knife rounds.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def remove_knife_rounds(self) -> None:
"""Removes knife rounds.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if not r["isWarmup"]:
total_kills = len(r["kills"] or [])
total_knife_kills = 0
if total_kills > 0:
# We know this is save because the len call gives 0
# and this if never gets enteres if r["kills"] is None
# but mypy does not know this
for k in r["kills"]: # type: ignore[union-attr]
if k["weapon"] == "Knife":
total_knife_kills += 1
if (total_knife_kills != total_kills) | (total_knife_kills == 0):
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> NoneType |
725,603 | awpy.parser.demoparser | remove_rounds_with_no_frames | Removes rounds with no frames
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def remove_rounds_with_no_frames(self) -> None:
"""Removes rounds with no frames
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
if not self.parse_frames:
self.logger.warning(
"parse_frames is set to False, must be true for remove_no_frames to work. Skipping remove_no_frames."
)
else:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if len(r["frames"] or []) > 0:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> NoneType |
725,604 | awpy.parser.demoparser | remove_time_rounds | Remove rounds with odd round timings.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def remove_time_rounds(self) -> None:
"""Remove rounds with odd round timings.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
for r in self.json["gameRounds"] or []:
if (
(r["startTick"] <= r["endTick"])
and (r["startTick"] <= r["endOfficialTick"])
and (r["startTick"] <= r["freezeTimeEndTick"])
):
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> NoneType |
725,605 | awpy.parser.demoparser | remove_warmups | Removes warmup rounds.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
| def remove_warmups(self) -> None:
"""Removes warmup rounds.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
cleaned_rounds = []
# Remove warmups where the demo may have started recording in the middle of a warmup round
if "warmupChanged" in self.json["matchPhases"]:
if len(self.json["matchPhases"]["warmupChanged"] or []) > 1:
last_warmup_changed = self.json["matchPhases"]["warmupChanged"][1] # type: ignore[index]
for r in self.json["gameRounds"] or []:
if (r["startTick"] > last_warmup_changed) and (
not r["isWarmup"]
):
cleaned_rounds.append(r)
if r["startTick"] == last_warmup_changed:
cleaned_rounds.append(r)
else:
for r in self.json["gameRounds"] or []:
if not r["isWarmup"]:
cleaned_rounds.append(r)
self.json["gameRounds"] = cleaned_rounds
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> NoneType |
725,606 | awpy.parser.demoparser | renumber_rounds | Renumbers the rounds.
Raises:
AttributeError: Raises an AttributeError if the .json attribute has a "gameRounds" key.
| def renumber_rounds(self) -> None:
"""Renumbers the rounds.
Raises:
AttributeError: Raises an AttributeError if the .json attribute has a "gameRounds" key.
"""
if self.json and self.json["gameRounds"]:
for i, r in enumerate(self.json["gameRounds"]):
self.json["gameRounds"][i]["roundNum"] = i + 1
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> NoneType |
725,607 | awpy.parser.demoparser | rescore_rounds | Rescore the rounds based on round end reason.
Raises:
AttributeError: Raises an AttributeError if the .json attribute has a "gameRounds" key.
| def rescore_rounds(self) -> None:
"""Rescore the rounds based on round end reason.
Raises:
AttributeError: Raises an AttributeError if the .json attribute has a "gameRounds" key.
"""
if self.json and self.json["gameRounds"]:
for i, r in enumerate(self.json["gameRounds"]):
if i == 0:
self.json["gameRounds"][i]["tScore"] = 0
self.json["gameRounds"][i]["ctScore"] = 0
if self.json["gameRounds"][i]["winningSide"] == "ct":
self.json["gameRounds"][i]["endCTScore"] = 1
self.json["gameRounds"][i]["endTScore"] = 0
if self.json["gameRounds"][i]["winningSide"] == "t":
self.json["gameRounds"][i]["endCTScore"] = 0
self.json["gameRounds"][i]["endTScore"] = 1
elif i > 0:
self.json["gameRounds"][i]["tScore"] = self.json["gameRounds"][
i - 1
]["endTScore"]
self.json["gameRounds"][i]["ctScore"] = self.json["gameRounds"][
i - 1
]["endCTScore"]
if self.json["gameRounds"][i]["winningSide"] == "ct":
self.json["gameRounds"][i]["endCTScore"] = (
self.json["gameRounds"][i]["ctScore"] + 1
)
self.json["gameRounds"][i]["endTScore"] = self.json[
"gameRounds"
][i]["tScore"]
if self.json["gameRounds"][i]["winningSide"] == "t":
self.json["gameRounds"][i]["endCTScore"] = self.json[
"gameRounds"
][i]["ctScore"]
self.json["gameRounds"][i]["endTScore"] = (
self.json["gameRounds"][i]["tScore"] + 1
)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
| (self) -> NoneType |
725,608 | awpy.parser.demoparser | write_json | Rewrite the JSON file | def write_json(self) -> None:
"""Rewrite the JSON file"""
with open(self.outpath + "/" + self.output_file, "w", encoding="utf8") as fp:
json.dump(self.json, fp, indent=(1 if self.json_indentation else None))
| (self) -> NoneType |
725,612 | flask_redis.client | FlaskRedis | null | class FlaskRedis(object):
def __init__(self, app=None, strict=True, config_prefix="REDIS", **kwargs):
self._redis_client = None
self.provider_class = redis.StrictRedis if strict else redis.Redis
self.provider_kwargs = kwargs
self.config_prefix = config_prefix
if app is not None:
self.init_app(app)
@classmethod
def from_custom_provider(cls, provider, app=None, **kwargs):
assert provider is not None, "your custom provider is None, come on"
# We never pass the app parameter here, so we can call init_app
# ourselves later, after the provider class has been set
instance = cls(**kwargs)
instance.provider_class = provider
if app is not None:
instance.init_app(app)
return instance
def init_app(self, app, **kwargs):
redis_url = app.config.get(
"{0}_URL".format(self.config_prefix), "redis://localhost:6379/0"
)
self.provider_kwargs.update(kwargs)
self._redis_client = self.provider_class.from_url(
redis_url, **self.provider_kwargs
)
if not hasattr(app, "extensions"):
app.extensions = {}
app.extensions[self.config_prefix.lower()] = self
def __getattr__(self, name):
return getattr(self._redis_client, name)
def __getitem__(self, name):
return self._redis_client[name]
def __setitem__(self, name, value):
self._redis_client[name] = value
def __delitem__(self, name):
del self._redis_client[name]
| (app=None, strict=True, config_prefix='REDIS', **kwargs) |
725,613 | flask_redis.client | __delitem__ | null | def __delitem__(self, name):
del self._redis_client[name]
| (self, name) |
725,614 | flask_redis.client | __getattr__ | null | def __getattr__(self, name):
return getattr(self._redis_client, name)
| (self, name) |
725,615 | flask_redis.client | __getitem__ | null | def __getitem__(self, name):
return self._redis_client[name]
| (self, name) |
725,616 | flask_redis.client | __init__ | null | def __init__(self, app=None, strict=True, config_prefix="REDIS", **kwargs):
self._redis_client = None
self.provider_class = redis.StrictRedis if strict else redis.Redis
self.provider_kwargs = kwargs
self.config_prefix = config_prefix
if app is not None:
self.init_app(app)
| (self, app=None, strict=True, config_prefix='REDIS', **kwargs) |
725,617 | flask_redis.client | __setitem__ | null | def __setitem__(self, name, value):
self._redis_client[name] = value
| (self, name, value) |
725,618 | flask_redis.client | init_app | null | def init_app(self, app, **kwargs):
redis_url = app.config.get(
"{0}_URL".format(self.config_prefix), "redis://localhost:6379/0"
)
self.provider_kwargs.update(kwargs)
self._redis_client = self.provider_class.from_url(
redis_url, **self.provider_kwargs
)
if not hasattr(app, "extensions"):
app.extensions = {}
app.extensions[self.config_prefix.lower()] = self
| (self, app, **kwargs) |
725,620 | zep_python.exceptions | APIError |
Raised when the API response format is unexpected.
Inherits from ZepClientError.
| class APIError(ZepClientError):
"""
Raised when the API response format is unexpected.
Inherits from ZepClientError.
"""
def __init__(
self, response: Union[httpx.Response, None] = None, message: str = "API error"
) -> None:
if response:
response_data = {
"status_code": response.status_code,
"message": response.text,
}
else:
response_data = None
super().__init__(message=message, response_data=response_data)
| (response: 'Union[httpx.Response, None]' = None, message: 'str' = 'API error') -> 'None' |
725,621 | zep_python.exceptions | __init__ | null | def __init__(
self, response: Union[httpx.Response, None] = None, message: str = "API error"
) -> None:
if response:
response_data = {
"status_code": response.status_code,
"message": response.text,
}
else:
response_data = None
super().__init__(message=message, response_data=response_data)
| (self, response: Optional[httpx.Response] = None, message: str = 'API error') -> NoneType |
725,622 | zep_python.exceptions | __str__ | null | def __str__(self):
return f"{self.message}: {self.response_data}"
| (self) |
725,623 | zep_python.memory.models | Memory |
Represents a memory object with messages, metadata, and other attributes.
Attributes
----------
messages : Optional[List[Dict[str, Any]]]
A list of message objects, where each message contains a role and content.
metadata : Optional[Dict[str, Any]]
A dictionary containing metadata associated with the memory.
summary : Optional[Summary]
A Summary object.
uuid : Optional[str]
A unique identifier for the memory.
created_at : Optional[str]
The timestamp when the memory was created.
token_count : Optional[int]
The token count of the memory.
Methods
-------
to_dict() -> Dict[str, Any]:
Returns a dictionary representation of the message.
| class Memory(BaseModel):
"""
Represents a memory object with messages, metadata, and other attributes.
Attributes
----------
messages : Optional[List[Dict[str, Any]]]
A list of message objects, where each message contains a role and content.
metadata : Optional[Dict[str, Any]]
A dictionary containing metadata associated with the memory.
summary : Optional[Summary]
A Summary object.
uuid : Optional[str]
A unique identifier for the memory.
created_at : Optional[str]
The timestamp when the memory was created.
token_count : Optional[int]
The token count of the memory.
Methods
-------
to_dict() -> Dict[str, Any]:
Returns a dictionary representation of the message.
"""
messages: List[Message] = Field(
default=[], description="A List of Messages or empty List is required"
)
metadata: Optional[Dict[str, Any]] = Field(optional=True, default=None)
summary: Optional[Summary] = Field(optional=True, default=None)
uuid: Optional[str] = Field(optional=True, default=None)
created_at: Optional[str] = Field(optional=True, default=None)
token_count: Optional[int] = Field(optional=True, default=None)
def to_dict(self) -> Dict[str, Any]:
return self.dict()
| (*, messages: List[zep_python.message.models.Message] = [], metadata: Optional[Dict[str, Any]] = None, summary: Optional[zep_python.memory.models.Summary] = None, uuid: Optional[str] = None, created_at: Optional[str] = None, token_count: Optional[int] = None) -> None |
725,645 | zep_python.memory.models | to_dict | null | def to_dict(self) -> Dict[str, Any]:
return self.dict()
| (self) -> Dict[str, Any] |
725,646 | zep_python.memory.models | MemorySearchPayload |
Represents a search payload for querying memory.
Attributes
----------
metadata : Dict[str, Any]
Metadata associated with the search query.
text : str
The text of the search query.
search_scope : Optional[str]
Search over messages or summaries. Defaults to "messages".
Must be one of "messages" or "summary".
search_type : Optional[str]
The type of search to perform. Defaults to "similarity".
Must be one of "similarity" or "mmr".
mmr_lambda : Optional[float]
The lambda parameter for the MMR Reranking Algorithm.
| class MemorySearchPayload(BaseModel):
"""
Represents a search payload for querying memory.
Attributes
----------
metadata : Dict[str, Any]
Metadata associated with the search query.
text : str
The text of the search query.
search_scope : Optional[str]
Search over messages or summaries. Defaults to "messages".
Must be one of "messages" or "summary".
search_type : Optional[str]
The type of search to perform. Defaults to "similarity".
Must be one of "similarity" or "mmr".
mmr_lambda : Optional[float]
The lambda parameter for the MMR Reranking Algorithm.
"""
text: Optional[str] = Field(default=None)
metadata: Optional[Dict[str, Any]] = Field(default=None)
search_scope: Optional[str] = Field(default="messages")
search_type: Optional[str] = Field(default="similarity")
mmr_lambda: Optional[float] = Field(default=None)
| (*, text: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None, search_scope: Optional[str] = 'messages', search_type: Optional[str] = 'similarity', mmr_lambda: Optional[float] = None) -> None |
725,668 | zep_python.memory.models | MemorySearchResult |
Represents a search result from querying memory.
Attributes
----------
message : Optional[Dict[str, Any]]
The message matched by search.
summary : Optional[Summary]
The summary matched by search.
metadata : Optional[Dict[str, Any]]
Metadata associated with the search result.
dist : Optional[float]
The distance metric of the search result.
| class MemorySearchResult(BaseModel):
"""
Represents a search result from querying memory.
Attributes
----------
message : Optional[Dict[str, Any]]
The message matched by search.
summary : Optional[Summary]
The summary matched by search.
metadata : Optional[Dict[str, Any]]
Metadata associated with the search result.
dist : Optional[float]
The distance metric of the search result.
"""
# TODO: Legacy bug. message should be a Message object.
message: Optional[Dict[str, Any]] = None
summary: Optional[Summary] = None
metadata: Optional[Dict[str, Any]] = None
dist: Optional[float] = None
| (*, message: Optional[Dict[str, Any]] = None, summary: Optional[zep_python.memory.models.Summary] = None, metadata: Optional[Dict[str, Any]] = None, dist: Optional[float] = None) -> None |
725,690 | zep_python.message.models | Message |
Represents a message in a conversation.
Attributes
----------
uuid : str, optional
The unique identifier of the message.
created_at : str, optional
The timestamp of when the message was created.
role : str
The role of the sender of the message (e.g., "user", "assistant").
content : str
The content of the message.
token_count : int, optional
The number of tokens in the message.
Methods
-------
to_dict() -> Dict[str, Any]:
Returns a dictionary representation of the message.
| class Message(BaseModel):
"""
Represents a message in a conversation.
Attributes
----------
uuid : str, optional
The unique identifier of the message.
created_at : str, optional
The timestamp of when the message was created.
role : str
The role of the sender of the message (e.g., "user", "assistant").
content : str
The content of the message.
token_count : int, optional
The number of tokens in the message.
Methods
-------
to_dict() -> Dict[str, Any]:
Returns a dictionary representation of the message.
"""
role: str = Field("A role is required")
content: str = Field("Content is required")
uuid: Optional[str] = Field(optional=True, default=None)
created_at: Optional[str] = Field(optional=True, default=None)
token_count: Optional[int] = Field(optional=True, default=None)
metadata: Optional[Dict[str, Any]] = Field(optional=True, default=None)
def to_dict(self) -> Dict[str, Any]:
"""
Returns a dictionary representation of the message.
Returns
-------
Dict[str, Any]
A dictionary containing the attributes of the message.
"""
return self.dict()
| (*, role: str = 'A role is required', content: str = 'Content is required', uuid: Optional[str] = None, created_at: Optional[str] = None, token_count: Optional[int] = None, metadata: Optional[Dict[str, Any]] = None) -> None |
725,712 | zep_python.message.models | to_dict |
Returns a dictionary representation of the message.
Returns
-------
Dict[str, Any]
A dictionary containing the attributes of the message.
| def to_dict(self) -> Dict[str, Any]:
"""
Returns a dictionary representation of the message.
Returns
-------
Dict[str, Any]
A dictionary containing the attributes of the message.
"""
return self.dict()
| (self) -> Dict[str, Any] |
725,713 | zep_python.exceptions | NotFoundError |
Raised when the API response contains no results.
Inherits from ZepClientError.
| class NotFoundError(ZepClientError):
"""
Raised when the API response contains no results.
Inherits from ZepClientError.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
| (message: 'str') -> 'None' |
725,716 | zep_python.memory.models | Session |
Represents a session object with a unique identifier, metadata,
and other attributes.
Attributes
----------
uuid : Optional[str]
A unique identifier for the session.
This is generated server-side and is not expected to be present on creation.
created_at : str
The timestamp when the session was created.
Generated by the server.
updated_at : str
The timestamp when the session was last updated.
Generated by the server.
deleted_at : Optional[datetime]
The timestamp when the session was deleted.
Generated by the server.
session_id : str
The unique identifier of the session.
metadata : Dict[str, Any]
The metadata associated with the session.
| class Session(BaseModel):
"""
Represents a session object with a unique identifier, metadata,
and other attributes.
Attributes
----------
uuid : Optional[str]
A unique identifier for the session.
This is generated server-side and is not expected to be present on creation.
created_at : str
The timestamp when the session was created.
Generated by the server.
updated_at : str
The timestamp when the session was last updated.
Generated by the server.
deleted_at : Optional[datetime]
The timestamp when the session was deleted.
Generated by the server.
session_id : str
The unique identifier of the session.
metadata : Dict[str, Any]
The metadata associated with the session.
"""
uuid: Optional[str] = None
id: Optional[int] = None
created_at: Optional[str] = None
updated_at: Optional[str] = None
deleted_at: Optional[str] = None
session_id: str
user_id: Optional[str] = None
metadata: Optional[Dict[str, Any]] = None
| (*, uuid: Optional[str] = None, id: Optional[int] = None, created_at: Optional[str] = None, updated_at: Optional[str] = None, deleted_at: Optional[str] = None, session_id: str, user_id: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None) -> None |
725,738 | zep_python.memory.models | Summary |
Represents a summary of a conversation.
Attributes
----------
uuid : str
The unique identifier of the summary.
created_at : str
The timestamp of when the summary was created.
content : str
The content of the summary.
recent_message_uuid : str
The unique identifier of the most recent message in the conversation.
token_count : int
The number of tokens in the summary.
Methods
-------
to_dict() -> Dict[str, Any]:
Returns a dictionary representation of the summary.
| class Summary(BaseModel):
"""
Represents a summary of a conversation.
Attributes
----------
uuid : str
The unique identifier of the summary.
created_at : str
The timestamp of when the summary was created.
content : str
The content of the summary.
recent_message_uuid : str
The unique identifier of the most recent message in the conversation.
token_count : int
The number of tokens in the summary.
Methods
-------
to_dict() -> Dict[str, Any]:
Returns a dictionary representation of the summary.
"""
uuid: str = Field("A uuid is required")
created_at: str = Field("A created_at is required")
content: str = Field("Content is required")
recent_message_uuid: str = Field("A recent_message_uuid is required")
token_count: int = Field("A token_count is required")
def to_dict(self) -> Dict[str, Any]:
"""
Returns a dictionary representation of the summary.
Returns
-------
Dict[str, Any]
A dictionary containing the attributes of the summary.
"""
return self.dict()
| (*, uuid: str = 'A uuid is required', created_at: str = 'A created_at is required', content: str = 'Content is required', recent_message_uuid: str = 'A recent_message_uuid is required', token_count: int = 'A token_count is required') -> None |
725,760 | zep_python.memory.models | to_dict |
Returns a dictionary representation of the summary.
Returns
-------
Dict[str, Any]
A dictionary containing the attributes of the summary.
| def to_dict(self) -> Dict[str, Any]:
"""
Returns a dictionary representation of the summary.
Returns
-------
Dict[str, Any]
A dictionary containing the attributes of the summary.
"""
return self.dict()
| (self) -> Dict[str, Any] |
725,761 | zep_python.zep_client | ZepClient |
ZepClient class implementation.
Attributes
----------
base_url : str
The base URL of the API.
memory : MemoryClient
The client used for making Memory API requests.
document : DocumentClient
The client used for making Document API requests.
Methods
-------
get_memory(session_id: str, lastn: Optional[int] = None) -> List[Memory]:
Retrieve memory for the specified session. (Deprecated)
add_memory(session_id: str, memory_messages: Memory) -> str:
Add memory to the specified session. (Deprecated)
delete_memory(session_id: str) -> str:
Delete memory for the specified session. (Deprecated)
search_memory(session_id: str, search_payload: SearchPayload,
limit: Optional[int] = None) -> List[SearchResult]:
Search memory for the specified session. (Deprecated)
close() -> None:
Close the HTTP client.
| class ZepClient:
"""
ZepClient class implementation.
Attributes
----------
base_url : str
The base URL of the API.
memory : MemoryClient
The client used for making Memory API requests.
document : DocumentClient
The client used for making Document API requests.
Methods
-------
get_memory(session_id: str, lastn: Optional[int] = None) -> List[Memory]:
Retrieve memory for the specified session. (Deprecated)
add_memory(session_id: str, memory_messages: Memory) -> str:
Add memory to the specified session. (Deprecated)
delete_memory(session_id: str) -> str:
Delete memory for the specified session. (Deprecated)
search_memory(session_id: str, search_payload: SearchPayload,
limit: Optional[int] = None) -> List[SearchResult]:
Search memory for the specified session. (Deprecated)
close() -> None:
Close the HTTP client.
"""
base_url: str
memory: MemoryClient
document: DocumentClient
user: UserClient
def __init__(self, base_url: str, api_key: Optional[str] = None) -> None:
"""
Initialize the ZepClient with the specified base URL.
Parameters
----------
base_url : str
The base URL of the API.
api_key : Optional[str]
The API key to use for authentication. (optional)
"""
headers: Dict[str, str] = {}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
self.base_url = concat_url(base_url, API_BASE_PATH)
self.aclient = httpx.AsyncClient(
base_url=self.base_url, headers=headers, timeout=API_TIMEOUT
)
self.client = httpx.Client(
base_url=self.base_url, headers=headers, timeout=API_TIMEOUT
)
self._healthcheck(base_url)
self.memory = MemoryClient(self.aclient, self.client)
self.message = MessageClient(self.aclient, self.client)
self.document = DocumentClient(self.aclient, self.client)
self.user = UserClient(self.aclient, self.client)
def _healthcheck(self, base_url: str) -> None:
"""
Check that the Zep server is running, the API URL is correct,
and that the server version is compatible with this client.
Raises
------
ConnectionError
If the server is not running or the API URL is incorrect.
"""
url = concat_url(base_url, "/healthz")
error_msg = """Failed to connect to Zep server. Please check that:
- the server is running
- the API URL is correct
- No other process is using the same port
"""
try:
response = httpx.get(url)
if response.status_code != 200 or response.text != ".":
raise APIError(response, error_msg)
zep_server_version_str = response.headers.get("X-Zep-Version")
if zep_server_version_str:
if "dev" in zep_server_version_str:
return
zep_server_version = parse_version_string(zep_server_version_str)
else:
zep_server_version = Version("0.0.0")
if zep_server_version < Version(MINIMUM_SERVER_VERSION):
warnings.warn(
(
"You are using an incompatible Zep server version. Please"
f" upgrade to {MINIMUM_SERVER_VERSION} or later."
),
Warning,
stacklevel=2,
)
except (httpx.ConnectError, httpx.NetworkError, httpx.TimeoutException) as e:
raise APIError(None, error_msg) from e
async def __aenter__(self) -> "ZepClient":
"""Asynchronous context manager entry point"""
return self
async def __aexit__(
self,
exc_type: Type[Exception],
exc_val: Exception,
exc_tb: TracebackType,
) -> None:
"""Asynchronous context manager exit point"""
await self.aclose()
def __enter__(self) -> "ZepClient":
"""Sync context manager entry point"""
return self
def __exit__(
self,
exc_type: Type[Exception],
exc_val: Exception,
exc_tb: TracebackType,
) -> None:
"""Sync context manager exit point"""
self.close()
# Facade methods for Memory API
def get_session(self, session_id: str) -> Session:
deprecated_warning(self.get_session)
return self.memory.get_session(session_id)
async def aget_session(self, session_id: str) -> Session:
deprecated_warning(self.aget_session)
return await self.memory.aget_session(session_id)
def add_session(self, session: Session) -> Session:
deprecated_warning(self.add_session)
return self.memory.add_session(session)
async def aadd_session(self, session: Session) -> Session:
deprecated_warning(self.aadd_session)
return await self.memory.aadd_session(session)
def get_memory(self, session_id: str, lastn: Optional[int] = None) -> Memory:
deprecated_warning(self.get_memory)
return self.memory.get_memory(session_id, lastn)
async def aget_memory(self, session_id: str, lastn: Optional[int] = None) -> Memory:
deprecated_warning(self.aget_memory)
return await self.memory.aget_memory(session_id, lastn)
def add_memory(self, session_id: str, memory_messages: Memory) -> str:
deprecated_warning(self.add_memory)
return self.memory.add_memory(session_id, memory_messages)
async def aadd_memory(self, session_id: str, memory_messages: Memory) -> str:
deprecated_warning(self.aadd_memory)
return await self.memory.aadd_memory(session_id, memory_messages)
def delete_memory(self, session_id: str) -> str:
deprecated_warning(self.delete_memory)
return self.memory.delete_memory(session_id)
async def adelete_memory(self, session_id: str) -> str:
deprecated_warning(self.adelete_memory)
return await self.memory.adelete_memory(session_id)
def search_memory(
self,
session_id: str,
search_payload: MemorySearchPayload,
limit: Optional[int] = None,
) -> List[MemorySearchResult]:
deprecated_warning(self.search_memory)
return self.memory.search_memory(session_id, search_payload, limit)
async def asearch_memory(
self,
session_id: str,
search_payload: MemorySearchPayload,
limit: Optional[int] = None,
) -> List[MemorySearchResult]:
deprecated_warning(self.asearch_memory)
return await self.memory.asearch_memory(session_id, search_payload, limit)
# Close the HTTP client
async def aclose(self) -> None:
"""
Asynchronously close the HTTP client.
[Optional] This method may be called when the ZepClient is no longer needed to
release resources.
"""
await self.aclient.aclose()
def close(self) -> None:
"""
Close the HTTP client.
[Optional] This method may be called when the ZepClient is no longer needed to
release resources.
"""
self.client.close()
| (base_url: str, api_key: 'Optional[str]' = None) -> 'None' |
725,762 | zep_python.zep_client | __aenter__ | Asynchronous context manager entry point | def _healthcheck(self, base_url: str) -> None:
"""
Check that the Zep server is running, the API URL is correct,
and that the server version is compatible with this client.
Raises
------
ConnectionError
If the server is not running or the API URL is incorrect.
"""
url = concat_url(base_url, "/healthz")
error_msg = """Failed to connect to Zep server. Please check that:
- the server is running
- the API URL is correct
- No other process is using the same port
"""
try:
response = httpx.get(url)
if response.status_code != 200 or response.text != ".":
raise APIError(response, error_msg)
zep_server_version_str = response.headers.get("X-Zep-Version")
if zep_server_version_str:
if "dev" in zep_server_version_str:
return
zep_server_version = parse_version_string(zep_server_version_str)
else:
zep_server_version = Version("0.0.0")
if zep_server_version < Version(MINIMUM_SERVER_VERSION):
warnings.warn(
(
"You are using an incompatible Zep server version. Please"
f" upgrade to {MINIMUM_SERVER_VERSION} or later."
),
Warning,
stacklevel=2,
)
except (httpx.ConnectError, httpx.NetworkError, httpx.TimeoutException) as e:
raise APIError(None, error_msg) from e
| (self) -> zep_python.zep_client.ZepClient |
725,763 | zep_python.zep_client | __aexit__ | Asynchronous context manager exit point | def _healthcheck(self, base_url: str) -> None:
"""
Check that the Zep server is running, the API URL is correct,
and that the server version is compatible with this client.
Raises
------
ConnectionError
If the server is not running or the API URL is incorrect.
"""
url = concat_url(base_url, "/healthz")
error_msg = """Failed to connect to Zep server. Please check that:
- the server is running
- the API URL is correct
- No other process is using the same port
"""
try:
response = httpx.get(url)
if response.status_code != 200 or response.text != ".":
raise APIError(response, error_msg)
zep_server_version_str = response.headers.get("X-Zep-Version")
if zep_server_version_str:
if "dev" in zep_server_version_str:
return
zep_server_version = parse_version_string(zep_server_version_str)
else:
zep_server_version = Version("0.0.0")
if zep_server_version < Version(MINIMUM_SERVER_VERSION):
warnings.warn(
(
"You are using an incompatible Zep server version. Please"
f" upgrade to {MINIMUM_SERVER_VERSION} or later."
),
Warning,
stacklevel=2,
)
except (httpx.ConnectError, httpx.NetworkError, httpx.TimeoutException) as e:
raise APIError(None, error_msg) from e
| (self, exc_type: Type[Exception], exc_val: Exception, exc_tb: traceback) -> NoneType |
725,764 | zep_python.zep_client | __enter__ | Sync context manager entry point | def __enter__(self) -> "ZepClient":
"""Sync context manager entry point"""
return self
| (self) -> zep_python.zep_client.ZepClient |
725,765 | zep_python.zep_client | __exit__ | Sync context manager exit point | def __exit__(
self,
exc_type: Type[Exception],
exc_val: Exception,
exc_tb: TracebackType,
) -> None:
"""Sync context manager exit point"""
self.close()
| (self, exc_type: Type[Exception], exc_val: Exception, exc_tb: traceback) -> NoneType |
725,766 | zep_python.zep_client | __init__ |
Initialize the ZepClient with the specified base URL.
Parameters
----------
base_url : str
The base URL of the API.
api_key : Optional[str]
The API key to use for authentication. (optional)
| def __init__(self, base_url: str, api_key: Optional[str] = None) -> None:
"""
Initialize the ZepClient with the specified base URL.
Parameters
----------
base_url : str
The base URL of the API.
api_key : Optional[str]
The API key to use for authentication. (optional)
"""
headers: Dict[str, str] = {}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
self.base_url = concat_url(base_url, API_BASE_PATH)
self.aclient = httpx.AsyncClient(
base_url=self.base_url, headers=headers, timeout=API_TIMEOUT
)
self.client = httpx.Client(
base_url=self.base_url, headers=headers, timeout=API_TIMEOUT
)
self._healthcheck(base_url)
self.memory = MemoryClient(self.aclient, self.client)
self.message = MessageClient(self.aclient, self.client)
self.document = DocumentClient(self.aclient, self.client)
self.user = UserClient(self.aclient, self.client)
| (self, base_url: str, api_key: Optional[str] = None) -> NoneType |
725,767 | zep_python.zep_client | _healthcheck |
Check that the Zep server is running, the API URL is correct,
and that the server version is compatible with this client.
Raises
------
ConnectionError
If the server is not running or the API URL is incorrect.
| def _healthcheck(self, base_url: str) -> None:
"""
Check that the Zep server is running, the API URL is correct,
and that the server version is compatible with this client.
Raises
------
ConnectionError
If the server is not running or the API URL is incorrect.
"""
url = concat_url(base_url, "/healthz")
error_msg = """Failed to connect to Zep server. Please check that:
- the server is running
- the API URL is correct
- No other process is using the same port
"""
try:
response = httpx.get(url)
if response.status_code != 200 or response.text != ".":
raise APIError(response, error_msg)
zep_server_version_str = response.headers.get("X-Zep-Version")
if zep_server_version_str:
if "dev" in zep_server_version_str:
return
zep_server_version = parse_version_string(zep_server_version_str)
else:
zep_server_version = Version("0.0.0")
if zep_server_version < Version(MINIMUM_SERVER_VERSION):
warnings.warn(
(
"You are using an incompatible Zep server version. Please"
f" upgrade to {MINIMUM_SERVER_VERSION} or later."
),
Warning,
stacklevel=2,
)
except (httpx.ConnectError, httpx.NetworkError, httpx.TimeoutException) as e:
raise APIError(None, error_msg) from e
| (self, base_url: str) -> NoneType |
725,768 | zep_python.zep_client | aadd_memory | null | def add_memory(self, session_id: str, memory_messages: Memory) -> str:
deprecated_warning(self.add_memory)
return self.memory.add_memory(session_id, memory_messages)
| (self, session_id: str, memory_messages: zep_python.memory.models.Memory) -> str |
725,769 | zep_python.zep_client | aadd_session | null | def add_session(self, session: Session) -> Session:
deprecated_warning(self.add_session)
return self.memory.add_session(session)
| (self, session: zep_python.memory.models.Session) -> zep_python.memory.models.Session |
725,770 | zep_python.zep_client | aclose |
Asynchronously close the HTTP client.
[Optional] This method may be called when the ZepClient is no longer needed to
release resources.
| def search_memory(
self,
session_id: str,
search_payload: MemorySearchPayload,
limit: Optional[int] = None,
) -> List[MemorySearchResult]:
deprecated_warning(self.search_memory)
return self.memory.search_memory(session_id, search_payload, limit)
| (self) -> NoneType |
725,773 | zep_python.zep_client | adelete_memory | null | def delete_memory(self, session_id: str) -> str:
deprecated_warning(self.delete_memory)
return self.memory.delete_memory(session_id)
| (self, session_id: str) -> str |
725,774 | zep_python.zep_client | aget_memory | null | def get_memory(self, session_id: str, lastn: Optional[int] = None) -> Memory:
deprecated_warning(self.get_memory)
return self.memory.get_memory(session_id, lastn)
| (self, session_id: str, lastn: Optional[int] = None) -> zep_python.memory.models.Memory |
725,775 | zep_python.zep_client | aget_session | null | def get_session(self, session_id: str) -> Session:
deprecated_warning(self.get_session)
return self.memory.get_session(session_id)
| (self, session_id: str) -> zep_python.memory.models.Session |
725,776 | zep_python.zep_client | asearch_memory | null | def search_memory(
self,
session_id: str,
search_payload: MemorySearchPayload,
limit: Optional[int] = None,
) -> List[MemorySearchResult]:
deprecated_warning(self.search_memory)
return self.memory.search_memory(session_id, search_payload, limit)
| (self, session_id: str, search_payload: zep_python.memory.models.MemorySearchPayload, limit: Optional[int] = None) -> List[zep_python.memory.models.MemorySearchResult] |
725,777 | zep_python.zep_client | close |
Close the HTTP client.
[Optional] This method may be called when the ZepClient is no longer needed to
release resources.
| def close(self) -> None:
"""
Close the HTTP client.
[Optional] This method may be called when the ZepClient is no longer needed to
release resources.
"""
self.client.close()
| (self) -> NoneType |
725,782 | zep_python | deprecated_import | null | def deprecated_import():
warnings.warn(
(
"Importing memory classes from the base client path is deprecated, "
"please import from zep_python.memory instead."
),
DeprecationWarning,
stacklevel=2,
)
from zep_python.memory.models import (
Memory,
MemorySearchPayload,
MemorySearchResult,
Message,
Session,
Summary,
)
return Memory, MemorySearchPayload, MemorySearchResult, Message, Session, Summary
| () |
725,791 | databricks_api.databricks | DatabricksAPI | null | class DatabricksAPI:
def __init__(self, **kwargs):
warnings.warn(
"""
==================================================================
Please switch to the official Databricks SDK for Python by running
`pip install databricks-sdk`. See more information and sources at
https://github.com/databricks/databricks-sdk-py
==================================================================""",
DeprecationWarning,
)
if "host" in kwargs:
if not kwargs["host"].startswith("https://"):
kwargs["host"] = "https://" + kwargs["host"]
self.client = ApiClient(**kwargs)
for _, camel_name, service in _get_services():
setattr(self, camel_name, service(self.client))
| (**kwargs) |
725,792 | databricks_api.databricks | __init__ | null | def __init__(self, **kwargs):
warnings.warn(
"""
==================================================================
Please switch to the official Databricks SDK for Python by running
`pip install databricks-sdk`. See more information and sources at
https://github.com/databricks/databricks-sdk-py
==================================================================""",
DeprecationWarning,
)
if "host" in kwargs:
if not kwargs["host"].startswith("https://"):
kwargs["host"] = "https://" + kwargs["host"]
self.client = ApiClient(**kwargs)
for _, camel_name, service in _get_services():
setattr(self, camel_name, service(self.client))
| (self, **kwargs) |
725,795 | flask_admin.base | Admin |
Collection of the admin views. Also manages menu structure.
| class Admin(object):
"""
Collection of the admin views. Also manages menu structure.
"""
def __init__(self, app=None, name=None,
url=None, subdomain=None,
index_view=None,
translations_path=None,
endpoint=None,
static_url_path=None,
base_template=None,
template_mode=None,
category_icon_classes=None):
"""
Constructor.
:param app:
Flask application object
:param name:
Application name. Will be displayed in the main menu and as a page title. Defaults to "Admin"
:param url:
Base URL
:param subdomain:
Subdomain to use
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param translations_path:
Location of the translation message catalogs. By default will use the translations
shipped with Flask-Admin.
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
:param static_url_path:
Static URL Path. If provided, this specifies the default path to the static url directory for
all its views. Can be overridden in view configuration.
:param base_template:
Override base HTML template for all static views. Defaults to `admin/base.html`.
:param template_mode:
Base template path. Defaults to `bootstrap2`. If you want to use
Bootstrap 3 or 4 integration, change it to `bootstrap3` or `bootstrap4`.
:param category_icon_classes:
A dict of category names as keys and html classes as values to be added to menu category icons.
Example: {'Favorites': 'glyphicon glyphicon-star'}
"""
self.app = app
self.translations_path = translations_path
self._views = []
self._menu = []
self._menu_categories = dict()
self._menu_links = []
if name is None:
name = 'Admin'
self.name = name
self.index_view = index_view or AdminIndexView(endpoint=endpoint, url=url)
self.endpoint = endpoint or self.index_view.endpoint
self.url = url or self.index_view.url
self.static_url_path = static_url_path
self.subdomain = subdomain
self.base_template = base_template or 'admin/base.html'
self.template_mode = template_mode or 'bootstrap2'
self.category_icon_classes = category_icon_classes or dict()
# Add index view
self._set_admin_index_view(index_view=index_view, endpoint=endpoint, url=url)
# Register with application
if app is not None:
self._init_extension()
def add_view(self, view):
"""
Add a view to the collection.
:param view:
View to add.
"""
# Add to views
self._views.append(view)
# If app was provided in constructor, register view with Flask app
if self.app is not None:
self.app.register_blueprint(view.create_blueprint(self))
self._add_view_to_menu(view)
def _set_admin_index_view(self, index_view=None,
endpoint=None, url=None):
"""
Add the admin index view.
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param url:
Base URL
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
"""
self.index_view = index_view or AdminIndexView(endpoint=endpoint, url=url)
self.endpoint = endpoint or self.index_view.endpoint
self.url = url or self.index_view.url
# Add predefined index view
# assume index view is always the first element of views.
if len(self._views) > 0:
self._views[0] = self.index_view
self._menu[0] = MenuView(self.index_view.name, self.index_view)
else:
self.add_view(self.index_view)
def add_views(self, *args):
"""
Add one or more views to the collection.
Examples::
admin.add_views(view1)
admin.add_views(view1, view2, view3, view4)
admin.add_views(*my_list)
:param args:
Argument list including the views to add.
"""
for view in args:
self.add_view(view)
def add_category(self, name, class_name=None, icon_type=None, icon_value=None):
"""
Add a category of a given name
:param name:
The name of the new menu category.
:param class_name:
The class name for the new menu category.
:param icon_type:
The icon name for the new menu category.
:param icon_value:
The icon value for the new menu category.
"""
cat_text = as_unicode(name)
category = self.get_category_menu_item(name)
if category:
return
category = MenuCategory(name, class_name=class_name, icon_type=icon_type, icon_value=icon_value)
self._menu_categories[cat_text] = category
self._menu.append(category)
def add_sub_category(self, name, parent_name):
"""
Add a category of a given name underneath
the category with parent_name.
:param name:
The name of the new menu category.
:param parent_name:
The name of a parent_name category
"""
name_text = as_unicode(name)
parent_name_text = as_unicode(parent_name)
category = self.get_category_menu_item(name_text)
parent = self.get_category_menu_item(parent_name_text)
if category is None and parent is not None:
category = SubMenuCategory(name)
self._menu_categories[name_text] = category
parent.add_child(category)
def add_link(self, link):
"""
Add link to menu links collection.
:param link:
Link to add.
"""
if link.category:
self.add_menu_item(link, link.category)
else:
self._menu_links.append(link)
def add_links(self, *args):
"""
Add one or more links to the menu links collection.
Examples::
admin.add_links(link1)
admin.add_links(link1, link2, link3, link4)
admin.add_links(*my_list)
:param args:
Argument list including the links to add.
"""
for link in args:
self.add_link(link)
def add_menu_item(self, menu_item, target_category=None):
"""
Add menu item to menu tree hierarchy.
:param menu_item:
MenuItem class instance
:param target_category:
Target category name
"""
if target_category:
cat_text = as_unicode(target_category)
category = self._menu_categories.get(cat_text)
# create a new menu category if one does not exist already
if category is None:
category = MenuCategory(target_category)
category.class_name = self.category_icon_classes.get(cat_text)
self._menu_categories[cat_text] = category
self._menu.append(category)
category.add_child(menu_item)
else:
self._menu.append(menu_item)
def _add_menu_item(self, menu_item, target_category):
warnings.warn('Admin._add_menu_item is obsolete - use Admin.add_menu_item instead.')
return self.add_menu_item(menu_item, target_category)
def _add_view_to_menu(self, view):
"""
Add a view to the menu tree
:param view:
View to add
"""
self.add_menu_item(MenuView(view.name, view), view.category)
def get_category_menu_item(self, name):
return self._menu_categories.get(name)
def init_app(self, app, index_view=None,
endpoint=None, url=None):
"""
Register all views with the Flask application.
:param app:
Flask application instance
"""
self.app = app
self._init_extension()
# Register Index view
if index_view is not None:
self._set_admin_index_view(
index_view=index_view,
endpoint=endpoint,
url=url
)
# Register views
for view in self._views:
app.register_blueprint(view.create_blueprint(self))
def _init_extension(self):
if not hasattr(self.app, 'extensions'):
self.app.extensions = dict()
admins = self.app.extensions.get('admin', [])
for p in admins:
if p.endpoint == self.endpoint:
raise Exception(u'Cannot have two Admin() instances with same'
u' endpoint name.')
if p.url == self.url and p.subdomain == self.subdomain:
raise Exception(u'Cannot assign two Admin() instances with same'
u' URL and subdomain to the same application.')
admins.append(self)
self.app.extensions['admin'] = admins
def menu(self):
"""
Return the menu hierarchy.
"""
return self._menu
def menu_links(self):
"""
Return menu links.
"""
return self._menu_links
| (app=None, name=None, url=None, subdomain=None, index_view=None, translations_path=None, endpoint=None, static_url_path=None, base_template=None, template_mode=None, category_icon_classes=None) |
725,796 | flask_admin.base | __init__ |
Constructor.
:param app:
Flask application object
:param name:
Application name. Will be displayed in the main menu and as a page title. Defaults to "Admin"
:param url:
Base URL
:param subdomain:
Subdomain to use
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param translations_path:
Location of the translation message catalogs. By default will use the translations
shipped with Flask-Admin.
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
:param static_url_path:
Static URL Path. If provided, this specifies the default path to the static url directory for
all its views. Can be overridden in view configuration.
:param base_template:
Override base HTML template for all static views. Defaults to `admin/base.html`.
:param template_mode:
Base template path. Defaults to `bootstrap2`. If you want to use
Bootstrap 3 or 4 integration, change it to `bootstrap3` or `bootstrap4`.
:param category_icon_classes:
A dict of category names as keys and html classes as values to be added to menu category icons.
Example: {'Favorites': 'glyphicon glyphicon-star'}
| def __init__(self, app=None, name=None,
url=None, subdomain=None,
index_view=None,
translations_path=None,
endpoint=None,
static_url_path=None,
base_template=None,
template_mode=None,
category_icon_classes=None):
"""
Constructor.
:param app:
Flask application object
:param name:
Application name. Will be displayed in the main menu and as a page title. Defaults to "Admin"
:param url:
Base URL
:param subdomain:
Subdomain to use
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param translations_path:
Location of the translation message catalogs. By default will use the translations
shipped with Flask-Admin.
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
:param static_url_path:
Static URL Path. If provided, this specifies the default path to the static url directory for
all its views. Can be overridden in view configuration.
:param base_template:
Override base HTML template for all static views. Defaults to `admin/base.html`.
:param template_mode:
Base template path. Defaults to `bootstrap2`. If you want to use
Bootstrap 3 or 4 integration, change it to `bootstrap3` or `bootstrap4`.
:param category_icon_classes:
A dict of category names as keys and html classes as values to be added to menu category icons.
Example: {'Favorites': 'glyphicon glyphicon-star'}
"""
self.app = app
self.translations_path = translations_path
self._views = []
self._menu = []
self._menu_categories = dict()
self._menu_links = []
if name is None:
name = 'Admin'
self.name = name
self.index_view = index_view or AdminIndexView(endpoint=endpoint, url=url)
self.endpoint = endpoint or self.index_view.endpoint
self.url = url or self.index_view.url
self.static_url_path = static_url_path
self.subdomain = subdomain
self.base_template = base_template or 'admin/base.html'
self.template_mode = template_mode or 'bootstrap2'
self.category_icon_classes = category_icon_classes or dict()
# Add index view
self._set_admin_index_view(index_view=index_view, endpoint=endpoint, url=url)
# Register with application
if app is not None:
self._init_extension()
| (self, app=None, name=None, url=None, subdomain=None, index_view=None, translations_path=None, endpoint=None, static_url_path=None, base_template=None, template_mode=None, category_icon_classes=None) |
725,797 | flask_admin.base | _add_menu_item | null | def _add_menu_item(self, menu_item, target_category):
warnings.warn('Admin._add_menu_item is obsolete - use Admin.add_menu_item instead.')
return self.add_menu_item(menu_item, target_category)
| (self, menu_item, target_category) |
725,798 | flask_admin.base | _add_view_to_menu |
Add a view to the menu tree
:param view:
View to add
| def _add_view_to_menu(self, view):
"""
Add a view to the menu tree
:param view:
View to add
"""
self.add_menu_item(MenuView(view.name, view), view.category)
| (self, view) |
725,799 | flask_admin.base | _init_extension | null | def _init_extension(self):
if not hasattr(self.app, 'extensions'):
self.app.extensions = dict()
admins = self.app.extensions.get('admin', [])
for p in admins:
if p.endpoint == self.endpoint:
raise Exception(u'Cannot have two Admin() instances with same'
u' endpoint name.')
if p.url == self.url and p.subdomain == self.subdomain:
raise Exception(u'Cannot assign two Admin() instances with same'
u' URL and subdomain to the same application.')
admins.append(self)
self.app.extensions['admin'] = admins
| (self) |
725,800 | flask_admin.base | _set_admin_index_view |
Add the admin index view.
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param url:
Base URL
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
| def _set_admin_index_view(self, index_view=None,
endpoint=None, url=None):
"""
Add the admin index view.
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param url:
Base URL
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
"""
self.index_view = index_view or AdminIndexView(endpoint=endpoint, url=url)
self.endpoint = endpoint or self.index_view.endpoint
self.url = url or self.index_view.url
# Add predefined index view
# assume index view is always the first element of views.
if len(self._views) > 0:
self._views[0] = self.index_view
self._menu[0] = MenuView(self.index_view.name, self.index_view)
else:
self.add_view(self.index_view)
| (self, index_view=None, endpoint=None, url=None) |
725,801 | flask_admin.base | add_category |
Add a category of a given name
:param name:
The name of the new menu category.
:param class_name:
The class name for the new menu category.
:param icon_type:
The icon name for the new menu category.
:param icon_value:
The icon value for the new menu category.
| def add_category(self, name, class_name=None, icon_type=None, icon_value=None):
"""
Add a category of a given name
:param name:
The name of the new menu category.
:param class_name:
The class name for the new menu category.
:param icon_type:
The icon name for the new menu category.
:param icon_value:
The icon value for the new menu category.
"""
cat_text = as_unicode(name)
category = self.get_category_menu_item(name)
if category:
return
category = MenuCategory(name, class_name=class_name, icon_type=icon_type, icon_value=icon_value)
self._menu_categories[cat_text] = category
self._menu.append(category)
| (self, name, class_name=None, icon_type=None, icon_value=None) |
725,802 | flask_admin.base | add_link |
Add link to menu links collection.
:param link:
Link to add.
| def add_link(self, link):
"""
Add link to menu links collection.
:param link:
Link to add.
"""
if link.category:
self.add_menu_item(link, link.category)
else:
self._menu_links.append(link)
| (self, link) |
725,803 | flask_admin.base | add_links |
Add one or more links to the menu links collection.
Examples::
admin.add_links(link1)
admin.add_links(link1, link2, link3, link4)
admin.add_links(*my_list)
:param args:
Argument list including the links to add.
| def add_links(self, *args):
"""
Add one or more links to the menu links collection.
Examples::
admin.add_links(link1)
admin.add_links(link1, link2, link3, link4)
admin.add_links(*my_list)
:param args:
Argument list including the links to add.
"""
for link in args:
self.add_link(link)
| (self, *args) |
725,804 | flask_admin.base | add_menu_item |
Add menu item to menu tree hierarchy.
:param menu_item:
MenuItem class instance
:param target_category:
Target category name
| def add_menu_item(self, menu_item, target_category=None):
"""
Add menu item to menu tree hierarchy.
:param menu_item:
MenuItem class instance
:param target_category:
Target category name
"""
if target_category:
cat_text = as_unicode(target_category)
category = self._menu_categories.get(cat_text)
# create a new menu category if one does not exist already
if category is None:
category = MenuCategory(target_category)
category.class_name = self.category_icon_classes.get(cat_text)
self._menu_categories[cat_text] = category
self._menu.append(category)
category.add_child(menu_item)
else:
self._menu.append(menu_item)
| (self, menu_item, target_category=None) |
725,805 | flask_admin.base | add_sub_category |
Add a category of a given name underneath
the category with parent_name.
:param name:
The name of the new menu category.
:param parent_name:
The name of a parent_name category
| def add_sub_category(self, name, parent_name):
"""
Add a category of a given name underneath
the category with parent_name.
:param name:
The name of the new menu category.
:param parent_name:
The name of a parent_name category
"""
name_text = as_unicode(name)
parent_name_text = as_unicode(parent_name)
category = self.get_category_menu_item(name_text)
parent = self.get_category_menu_item(parent_name_text)
if category is None and parent is not None:
category = SubMenuCategory(name)
self._menu_categories[name_text] = category
parent.add_child(category)
| (self, name, parent_name) |
725,806 | flask_admin.base | add_view |
Add a view to the collection.
:param view:
View to add.
| def add_view(self, view):
"""
Add a view to the collection.
:param view:
View to add.
"""
# Add to views
self._views.append(view)
# If app was provided in constructor, register view with Flask app
if self.app is not None:
self.app.register_blueprint(view.create_blueprint(self))
self._add_view_to_menu(view)
| (self, view) |
725,807 | flask_admin.base | add_views |
Add one or more views to the collection.
Examples::
admin.add_views(view1)
admin.add_views(view1, view2, view3, view4)
admin.add_views(*my_list)
:param args:
Argument list including the views to add.
| def add_views(self, *args):
"""
Add one or more views to the collection.
Examples::
admin.add_views(view1)
admin.add_views(view1, view2, view3, view4)
admin.add_views(*my_list)
:param args:
Argument list including the views to add.
"""
for view in args:
self.add_view(view)
| (self, *args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.