response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Compute Pi to the required precision. Adapted from https://docs.python.org/3/library/decimal.html
def pi(precision: int) -> list[int]: """Compute Pi to the required precision. Adapted from https://docs.python.org/3/library/decimal.html """ saved_precision = getcontext().prec # Save precision getcontext().prec = precision three = Decimal(3) # substitute "three=3.0" for regular floats lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24 while s != lasts: lasts = s n, na = n + na, na + 8 d, da = d + da, da + 32 t = (t * n) / d s += t # type: ignore[assignment] digits = [] while s != 0: integral = int(s) digits.append(integral) s = (s - integral) * 10 getcontext().prec = saved_precision return digits
Run one or multiple Taipy services. A Taipy service is an instance of a class that runs code as a Web application. Parameters: *services (Union[`Gui^`, `Rest^`, `Core^`]): Services to run, as separate arguments.<br/> If several services are provided, all the services run simultaneously.<br/> If this is empty or set to None, this method does nothing. **kwargs: Other parameters to provide to the services.
def _run(*services: _AppType, **kwargs) -> t.Optional[Flask]: """Run one or multiple Taipy services. A Taipy service is an instance of a class that runs code as a Web application. Parameters: *services (Union[`Gui^`, `Rest^`, `Core^`]): Services to run, as separate arguments.<br/> If several services are provided, all the services run simultaneously.<br/> If this is empty or set to None, this method does nothing. **kwargs: Other parameters to provide to the services. """ gui = __get_app(services, Gui) rest = __get_app(services, Rest) core = __get_app(services, Core) if gui and core: from taipy.core._core_cli import _CoreCLI from taipy.gui._gui_cli import _GuiCLI _CoreCLI.create_parser() _GuiCLI.create_parser() if rest or core: if not core: core = Core() core.run() if not rest and not gui: return None if gui and rest: gui._set_flask(rest._app) # type: ignore[union-attr] return gui.run(**kwargs) else: app = rest or gui assert app is not None # Avoid pyright typing error return app.run(**kwargs)
Save or update an entity. This function allows you to save or update an entity in Taipy. Parameters: entity (Union[DataNode^, Task^, Sequence^, Scenario^, Cycle^, Submission^]): The entity to save or update.
def set(entity: Union[DataNode, Task, Sequence, Scenario, Cycle]): """Save or update an entity. This function allows you to save or update an entity in Taipy. Parameters: entity (Union[DataNode^, Task^, Sequence^, Scenario^, Cycle^, Submission^]): The entity to save or update. """ if isinstance(entity, Cycle): return _CycleManagerFactory._build_manager()._set(entity) if isinstance(entity, Scenario): return _ScenarioManagerFactory._build_manager()._set(entity) if isinstance(entity, Sequence): return _SequenceManagerFactory._build_manager()._set(entity) if isinstance(entity, Task): return _TaskManagerFactory._build_manager()._set(entity) if isinstance(entity, DataNode): return _DataManagerFactory._build_manager()._set(entity) if isinstance(entity, Submission): return _SubmissionManagerFactory._build_manager()._set(entity)
Indicate if an entity can be submitted. This function checks if the given entity can be submitted for execution. Returns: True if the given entity can be submitted. False otherwise.
def is_submittable(entity: Union[Scenario, ScenarioId, Sequence, SequenceId, Task, TaskId, str]) -> bool: """Indicate if an entity can be submitted. This function checks if the given entity can be submitted for execution. Returns: True if the given entity can be submitted. False otherwise. """ if isinstance(entity, Scenario): return _ScenarioManagerFactory._build_manager()._is_submittable(entity) if isinstance(entity, str) and entity.startswith(Scenario._ID_PREFIX): return _ScenarioManagerFactory._build_manager()._is_submittable(ScenarioId(entity)) if isinstance(entity, Sequence): return _SequenceManagerFactory._build_manager()._is_submittable(entity) if isinstance(entity, str) and entity.startswith(Sequence._ID_PREFIX): return _SequenceManagerFactory._build_manager()._is_submittable(SequenceId(entity)) if isinstance(entity, Task): return _TaskManagerFactory._build_manager()._is_submittable(entity) if isinstance(entity, str) and entity.startswith(Task._ID_PREFIX): return _TaskManagerFactory._build_manager()._is_submittable(TaskId(entity)) return False
Indicate if an entity can be edited. This function checks if the given entity can be edited. Returns: True if the given entity can be edited. False otherwise.
def is_editable( entity: Union[ DataNode, Task, Job, Sequence, Scenario, Cycle, Submission, DataNodeId, TaskId, JobId, SequenceId, ScenarioId, CycleId, SubmissionId, ], ) -> bool: """Indicate if an entity can be edited. This function checks if the given entity can be edited. Returns: True if the given entity can be edited. False otherwise. """ if isinstance(entity, Cycle): return _CycleManagerFactory._build_manager()._is_editable(entity) if isinstance(entity, str) and entity.startswith(Cycle._ID_PREFIX): return _CycleManagerFactory._build_manager()._is_editable(CycleId(entity)) if isinstance(entity, Scenario): return _ScenarioManagerFactory._build_manager()._is_editable(entity) if isinstance(entity, str) and entity.startswith(Scenario._ID_PREFIX): return _ScenarioManagerFactory._build_manager()._is_editable(ScenarioId(entity)) if isinstance(entity, Sequence): return _SequenceManagerFactory._build_manager()._is_editable(entity) if isinstance(entity, str) and entity.startswith(Sequence._ID_PREFIX): return _SequenceManagerFactory._build_manager()._is_editable(SequenceId(entity)) if isinstance(entity, Task): return _TaskManagerFactory._build_manager()._is_editable(entity) if isinstance(entity, str) and entity.startswith(Task._ID_PREFIX): return _TaskManagerFactory._build_manager()._is_editable(TaskId(entity)) if isinstance(entity, Job): return _JobManagerFactory._build_manager()._is_editable(entity) if isinstance(entity, str) and entity.startswith(Job._ID_PREFIX): return _JobManagerFactory._build_manager()._is_editable(JobId(entity)) if isinstance(entity, DataNode): return _DataManagerFactory._build_manager()._is_editable(entity) if isinstance(entity, str) and entity.startswith(DataNode._ID_PREFIX): return _DataManagerFactory._build_manager()._is_editable(DataNodeId(entity)) if isinstance(entity, Submission): return _SubmissionManagerFactory._build_manager()._is_editable(entity) if isinstance(entity, str) and entity.startswith(Submission._ID_PREFIX): return _SubmissionManagerFactory._build_manager()._is_editable(SequenceId(entity)) return False
Indicate if an entity can be read. This function checks if the given entity can be read. Returns: True if the given entity can be read. False otherwise.
def is_readable( entity: Union[ DataNode, Task, Job, Sequence, Scenario, Cycle, Submission, DataNodeId, TaskId, JobId, SequenceId, ScenarioId, CycleId, SubmissionId, ], ) -> bool: """Indicate if an entity can be read. This function checks if the given entity can be read. Returns: True if the given entity can be read. False otherwise. """ if isinstance(entity, Cycle): return _CycleManagerFactory._build_manager()._is_readable(entity) if isinstance(entity, str) and entity.startswith(Cycle._ID_PREFIX): return _CycleManagerFactory._build_manager()._is_readable(CycleId(entity)) if isinstance(entity, Scenario): return _ScenarioManagerFactory._build_manager()._is_readable(entity) if isinstance(entity, str) and entity.startswith(Scenario._ID_PREFIX): return _ScenarioManagerFactory._build_manager()._is_readable(ScenarioId(entity)) if isinstance(entity, Sequence): return _SequenceManagerFactory._build_manager()._is_readable(entity) if isinstance(entity, str) and entity.startswith(Sequence._ID_PREFIX): return _SequenceManagerFactory._build_manager()._is_readable(SequenceId(entity)) if isinstance(entity, Task): return _TaskManagerFactory._build_manager()._is_readable(entity) if isinstance(entity, str) and entity.startswith(Task._ID_PREFIX): return _TaskManagerFactory._build_manager()._is_readable(TaskId(entity)) if isinstance(entity, Job): return _JobManagerFactory._build_manager()._is_readable(entity) if isinstance(entity, str) and entity.startswith(Job._ID_PREFIX): return _JobManagerFactory._build_manager()._is_readable(JobId(entity)) if isinstance(entity, DataNode): return _DataManagerFactory._build_manager()._is_readable(entity) if isinstance(entity, str) and entity.startswith(DataNode._ID_PREFIX): return _DataManagerFactory._build_manager()._is_readable(DataNodeId(entity)) if isinstance(entity, Submission): return _SubmissionManagerFactory._build_manager()._is_readable(entity) if isinstance(entity, str) and entity.startswith(Submission._ID_PREFIX): return _SubmissionManagerFactory._build_manager()._is_readable(SequenceId(entity)) return False
Submit a scenario, sequence or task entity for execution. This function submits the given entity for execution and returns the created job(s). If the entity is a sequence or a scenario, all the tasks of the entity are submitted for execution. Parameters: entity (Union[Scenario^, Sequence^, Task^]): The scenario, sequence or task to submit. force (bool): If True, the execution is forced even if for skippable tasks. wait (bool): Wait for the orchestrated jobs created from the submission to be finished in asynchronous mode. timeout (Union[float, int]): The optional maximum number of seconds to wait for the jobs to be finished before returning. **properties (dict[str, any]): A keyworded variable length list of user additional arguments that will be stored within the `Submission^`. It can be accessed via `Submission.properties^`. Returns: The created `Submission^` containing the information about the submission.
def submit( entity: Union[Scenario, Sequence, Task], force: bool = False, wait: bool = False, timeout: Optional[Union[float, int]] = None, **properties, ) -> Submission: """Submit a scenario, sequence or task entity for execution. This function submits the given entity for execution and returns the created job(s). If the entity is a sequence or a scenario, all the tasks of the entity are submitted for execution. Parameters: entity (Union[Scenario^, Sequence^, Task^]): The scenario, sequence or task to submit. force (bool): If True, the execution is forced even if for skippable tasks. wait (bool): Wait for the orchestrated jobs created from the submission to be finished in asynchronous mode. timeout (Union[float, int]): The optional maximum number of seconds to wait for the jobs to be finished before returning. **properties (dict[str, any]): A keyworded variable length list of user additional arguments that will be stored within the `Submission^`. It can be accessed via `Submission.properties^`. Returns: The created `Submission^` containing the information about the submission. """ if isinstance(entity, Scenario): return _ScenarioManagerFactory._build_manager()._submit( entity, force=force, wait=wait, timeout=timeout, **properties ) if isinstance(entity, Sequence): return _SequenceManagerFactory._build_manager()._submit( entity, force=force, wait=wait, timeout=timeout, **properties ) if isinstance(entity, Task): return _TaskManagerFactory._build_manager()._submit( entity, force=force, wait=wait, timeout=timeout, **properties ) return None
Check if an entity with the specified identifier exists. This function checks if an entity with the given identifier exists. It supports various types of entity identifiers, including `TaskId^`, `DataNodeId^`, `SequenceId^`, `ScenarioId^`, `JobId^`, `CycleId^`, `SubmissionId^`, and string representations. Parameters: entity_id (Union[DataNodeId^, TaskId^, SequenceId^, ScenarioId^, JobId^, CycleId^, SubmissionId^, str]): The identifier of the entity to check for existence. Returns: True if the given entity exists. False otherwise. Raises: ModelNotFound: If the entity's type cannot be determined. Note: The function performs checks for various entity types (`Job^`, `Cycle^`, `Scenario^`, `Sequence^`, `Task^`, `DataNode^`, `Submission^`) based on their respective identifier prefixes.
def exists(entity_id: Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId, SubmissionId, str]) -> bool: """Check if an entity with the specified identifier exists. This function checks if an entity with the given identifier exists. It supports various types of entity identifiers, including `TaskId^`, `DataNodeId^`, `SequenceId^`, `ScenarioId^`, `JobId^`, `CycleId^`, `SubmissionId^`, and string representations. Parameters: entity_id (Union[DataNodeId^, TaskId^, SequenceId^, ScenarioId^, JobId^, CycleId^, SubmissionId^, str]): The identifier of the entity to check for existence. Returns: True if the given entity exists. False otherwise. Raises: ModelNotFound: If the entity's type cannot be determined. Note: The function performs checks for various entity types (`Job^`, `Cycle^`, `Scenario^`, `Sequence^`, `Task^`, `DataNode^`, `Submission^`) based on their respective identifier prefixes. """ if _is_job(entity_id): return _JobManagerFactory._build_manager()._exists(JobId(entity_id)) if _is_cycle(entity_id): return _CycleManagerFactory._build_manager()._exists(CycleId(entity_id)) if _is_scenario(entity_id): return _ScenarioManagerFactory._build_manager()._exists(ScenarioId(entity_id)) if _is_sequence(entity_id): return _SequenceManagerFactory._build_manager()._exists(SequenceId(entity_id)) if _is_task(entity_id): return _TaskManagerFactory._build_manager()._exists(TaskId(entity_id)) if _is_data_node(entity_id): return _DataManagerFactory._build_manager()._exists(DataNodeId(entity_id)) if _is_submission(entity_id): return _SubmissionManagerFactory._build_manager()._exists(SubmissionId(entity_id)) raise ModelNotFound("NOT_DETERMINED", entity_id)
Retrieve an entity by its unique identifier. This function allows you to retrieve an entity by specifying its identifier. The identifier must match the pattern of one of the supported entity types: Task^, DataNode^, Sequence^, Job^, Cycle^, Submission^, or Scenario^. Parameters: entity_id (Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId, str]): The identifier of the entity to retrieve.<br/> It should conform to the identifier pattern of one of the entities (`Task^`, `DataNode^`, `Sequence^`, `Job^`, `Cycle^` or `Scenario^`). Returns: The entity that corresponds to the provided identifier. Returns None if no matching entity is found. Raises: ModelNotFound^: If the provided *entity_id* does not match any known entity pattern.
def get( entity_id: Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId, SubmissionId, str], ) -> Union[Task, DataNode, Sequence, Scenario, Job, Cycle, Submission]: """Retrieve an entity by its unique identifier. This function allows you to retrieve an entity by specifying its identifier. The identifier must match the pattern of one of the supported entity types: Task^, DataNode^, Sequence^, Job^, Cycle^, Submission^, or Scenario^. Parameters: entity_id (Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId, str]): The identifier of the entity to retrieve.<br/> It should conform to the identifier pattern of one of the entities (`Task^`, `DataNode^`, `Sequence^`, `Job^`, `Cycle^` or `Scenario^`). Returns: The entity that corresponds to the provided identifier. Returns None if no matching entity is found. Raises: ModelNotFound^: If the provided *entity_id* does not match any known entity pattern. """ if _is_job(entity_id): return _JobManagerFactory._build_manager()._get(JobId(entity_id)) if _is_cycle(entity_id): return _CycleManagerFactory._build_manager()._get(CycleId(entity_id)) if _is_scenario(entity_id): return _ScenarioManagerFactory._build_manager()._get(ScenarioId(entity_id)) if _is_sequence(entity_id): return _SequenceManagerFactory._build_manager()._get(SequenceId(entity_id)) if _is_task(entity_id): return _TaskManagerFactory._build_manager()._get(TaskId(entity_id)) if _is_data_node(entity_id): return _DataManagerFactory._build_manager()._get(DataNodeId(entity_id)) if _is_submission(entity_id): return _SubmissionManagerFactory._build_manager()._get(SubmissionId(entity_id)) raise ModelNotFound("NOT_DETERMINED", entity_id)
Retrieve a list of all existing tasks. This function returns a list of all tasks that currently exist. Returns: A list containing all the tasks.
def get_tasks() -> List[Task]: """Retrieve a list of all existing tasks. This function returns a list of all tasks that currently exist. Returns: A list containing all the tasks. """ return _TaskManagerFactory._build_manager()._get_all()
Check if a `Scenario^`, a `Job^` or a `Submission^` can be deleted. This function determines whether a scenario or a job can be safely deleted without causing conflicts or issues. Parameters: entity (Union[Scenario, Job, Submission, ScenarioId, JobId, SubmissionId]): The scenario, job or submission to check. Returns: True if the given scenario, job or submission can be deleted. False otherwise.
def is_deletable(entity: Union[Scenario, Job, Submission, ScenarioId, JobId, SubmissionId]) -> bool: """Check if a `Scenario^`, a `Job^` or a `Submission^` can be deleted. This function determines whether a scenario or a job can be safely deleted without causing conflicts or issues. Parameters: entity (Union[Scenario, Job, Submission, ScenarioId, JobId, SubmissionId]): The scenario, job or submission to check. Returns: True if the given scenario, job or submission can be deleted. False otherwise. """ if isinstance(entity, Job): return _JobManagerFactory._build_manager()._is_deletable(entity) if isinstance(entity, str) and entity.startswith(Job._ID_PREFIX): return _JobManagerFactory._build_manager()._is_deletable(JobId(entity)) if isinstance(entity, Scenario): return _ScenarioManagerFactory._build_manager()._is_deletable(entity) if isinstance(entity, str) and entity.startswith(Scenario._ID_PREFIX): return _ScenarioManagerFactory._build_manager()._is_deletable(ScenarioId(entity)) if isinstance(entity, Submission): return _SubmissionManagerFactory._build_manager()._is_deletable(entity) if isinstance(entity, str) and entity.startswith(Submission._ID_PREFIX): return _SubmissionManagerFactory._build_manager()._is_deletable(SubmissionId(entity)) return True
Delete an entity and its nested entities. This function deletes the specified entity and recursively deletes all its nested entities. The behavior varies depending on the type of entity provided: - If a `CycleId` is provided, the nested scenarios, tasks, data nodes, and jobs are deleted. - If a `ScenarioId` is provided, the nested sequences, tasks, data nodes, submissions and jobs are deleted. If the scenario is primary, it can only be deleted if it is the only scenario in the cycle. In that case, its cycle is also deleted. Use the `is_deletable()^` function to check if the scenario can be deleted. - If a `SequenceId` is provided, the related jobs are deleted. - If a `TaskId` is provided, the related data nodes, and jobs are deleted. - If a `DataNodeId` is provided, the data node is deleted. - If a `SubmissionId^` is provided, the related jobs are deleted. The submission can only be deleted if the execution has been finished. - If a `JobId^` is provided, the job entity can only be deleted if the execution has been finished. Parameters: entity_id (Union[TaskId, DataNodeId, SequenceId, ScenarioId, SubmissionId, JobId, CycleId]): The identifier of the entity to delete. Raises: ModelNotFound: No entity corresponds to the specified *entity_id*.
def delete(entity_id: Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId, SubmissionId]): """Delete an entity and its nested entities. This function deletes the specified entity and recursively deletes all its nested entities. The behavior varies depending on the type of entity provided: - If a `CycleId` is provided, the nested scenarios, tasks, data nodes, and jobs are deleted. - If a `ScenarioId` is provided, the nested sequences, tasks, data nodes, submissions and jobs are deleted. If the scenario is primary, it can only be deleted if it is the only scenario in the cycle. In that case, its cycle is also deleted. Use the `is_deletable()^` function to check if the scenario can be deleted. - If a `SequenceId` is provided, the related jobs are deleted. - If a `TaskId` is provided, the related data nodes, and jobs are deleted. - If a `DataNodeId` is provided, the data node is deleted. - If a `SubmissionId^` is provided, the related jobs are deleted. The submission can only be deleted if the execution has been finished. - If a `JobId^` is provided, the job entity can only be deleted if the execution has been finished. Parameters: entity_id (Union[TaskId, DataNodeId, SequenceId, ScenarioId, SubmissionId, JobId, CycleId]): The identifier of the entity to delete. Raises: ModelNotFound: No entity corresponds to the specified *entity_id*. """ if _is_job(entity_id): job_manager = _JobManagerFactory._build_manager() return job_manager._delete(job_manager._get(JobId(entity_id))) if _is_cycle(entity_id): return _CycleManagerFactory._build_manager()._hard_delete(CycleId(entity_id)) if _is_scenario(entity_id): return _ScenarioManagerFactory._build_manager()._hard_delete(ScenarioId(entity_id)) if _is_sequence(entity_id): return _SequenceManagerFactory._build_manager()._hard_delete(SequenceId(entity_id)) if _is_task(entity_id): return _TaskManagerFactory._build_manager()._hard_delete(TaskId(entity_id)) if _is_data_node(entity_id): return _DataManagerFactory._build_manager()._delete(DataNodeId(entity_id)) if _is_submission(entity_id): return _SubmissionManagerFactory._build_manager()._hard_delete(SubmissionId(entity_id)) raise ModelNotFound("NOT_DETERMINED", entity_id)
Retrieve a list of existing scenarios filtered by cycle or tag. This function allows you to retrieve a list of scenarios based on optional filtering criteria. If both a _cycle_ and a _tag_ are provided, the returned list contains scenarios that belong to the specified _cycle_ **and** also have the specified _tag_. Parameters: cycle (Optional[Cycle^]): The optional `Cycle^` to filter scenarios by. tag (Optional[str]): The optional tag to filter scenarios by. is_sorted (bool): The option to sort scenarios. The default sorting key is name. descending (bool): The option to sort scenarios on the sorting key in descending order. sort_key (Literal["name", "id", "creation_date", "tags"]): The optiononal sort_key to decide upon what key scenarios are sorted. The sorting is in increasing order for dates, in alphabetical order for name and id, in lexographical order for tags. Returns: The list of scenarios filtered by cycle or tag and optionally sorted by name, id, creation_date or tags. If no filtering criterion is provided, this method returns all existing scenarios. If is_sorted is set to true, the scenarios are sorted by sort_key. The scenarios are sorted by name if an incorrect or no sort_key is provided.
def get_scenarios( cycle: Optional[Cycle] = None, tag: Optional[str] = None, is_sorted: bool = False, descending: bool = False, sort_key: Literal["name", "id", "config_id", "creation_date", "tags"] = "name", ) -> List[Scenario]: """Retrieve a list of existing scenarios filtered by cycle or tag. This function allows you to retrieve a list of scenarios based on optional filtering criteria. If both a _cycle_ and a _tag_ are provided, the returned list contains scenarios that belong to the specified _cycle_ **and** also have the specified _tag_. Parameters: cycle (Optional[Cycle^]): The optional `Cycle^` to filter scenarios by. tag (Optional[str]): The optional tag to filter scenarios by. is_sorted (bool): The option to sort scenarios. The default sorting key is name. descending (bool): The option to sort scenarios on the sorting key in descending order. sort_key (Literal["name", "id", "creation_date", "tags"]): The optiononal sort_key to decide upon what key scenarios are sorted. The sorting is in increasing order for dates, in alphabetical order for name and id, in lexographical order for tags. Returns: The list of scenarios filtered by cycle or tag and optionally sorted by name, id, creation_date or tags. If no filtering criterion is provided, this method returns all existing scenarios. If is_sorted is set to true, the scenarios are sorted by sort_key. The scenarios are sorted by name if an incorrect or no sort_key is provided. """ scenario_manager = _ScenarioManagerFactory._build_manager() if not cycle and not tag: scenarios = scenario_manager._get_all() elif cycle and not tag: scenarios = scenario_manager._get_all_by_cycle(cycle) elif not cycle and tag: scenarios = scenario_manager._get_all_by_tag(tag) elif cycle and tag: cycles_scenarios = scenario_manager._get_all_by_cycle(cycle) scenarios = [scenario for scenario in cycles_scenarios if scenario.has_tag(tag)] else: scenarios = [] if is_sorted: scenario_manager._sort_scenarios(scenarios, descending, sort_key) return scenarios
Retrieve the primary scenario associated with a cycle. Parameters: cycle (Cycle^): The cycle for which to retrieve the primary scenario. Returns: The primary scenario of the given _cycle_. If the cycle has no primary scenario, this method returns None.
def get_primary(cycle: Cycle) -> Optional[Scenario]: """Retrieve the primary scenario associated with a cycle. Parameters: cycle (Cycle^): The cycle for which to retrieve the primary scenario. Returns: The primary scenario of the given _cycle_. If the cycle has no primary scenario, this method returns None. """ return _ScenarioManagerFactory._build_manager()._get_primary(cycle)
Retrieve a list of all primary scenarios. Parameters: is_sorted (bool): The option to sort scenarios. The default sorting key is name. descending (bool): The option to sort scenarios on the sorting key in descending order. sort_key (Literal["name", "id", "creation_date", "tags"]): The optiononal sort_key to decide upon what key scenarios are sorted. The sorting is in increasing order for dates, in alphabetical order for name and id, in lexographical order for tags. Returns: The list containing all primary scenarios, optionally sorted by name, id, creation_date or tags. The sorting is in increasing order for dates, in alphabetical order for name and id, and in lexicographical order for tags. If sorted is set to true, but if an incorrect or no sort_key is provided, the scenarios are sorted by name.
def get_primary_scenarios( is_sorted: bool = False, descending: bool = False, sort_key: Literal["name", "id", "config_id", "creation_date", "tags"] = "name", ) -> List[Scenario]: """Retrieve a list of all primary scenarios. Parameters: is_sorted (bool): The option to sort scenarios. The default sorting key is name. descending (bool): The option to sort scenarios on the sorting key in descending order. sort_key (Literal["name", "id", "creation_date", "tags"]): The optiononal sort_key to decide upon what key scenarios are sorted. The sorting is in increasing order for dates, in alphabetical order for name and id, in lexographical order for tags. Returns: The list containing all primary scenarios, optionally sorted by name, id, creation_date or tags. The sorting is in increasing order for dates, in alphabetical order for name and id, and in lexicographical order for tags. If sorted is set to true, but if an incorrect or no sort_key is provided, the scenarios are sorted by name. """ scenario_manager = _ScenarioManagerFactory._build_manager() scenarios = scenario_manager._get_primary_scenarios() if is_sorted: scenario_manager._sort_scenarios(scenarios, descending, sort_key) return scenarios
Determine if a scenario can be promoted to become a primary scenario. This function checks whether the given scenario is eligible to be promoted as a primary scenario. Parameters: scenario (Union[Scenario, ScenarioId]): The scenario to be evaluated for promotability. Returns: True if the given scenario can be promoted to be a primary scenario. False otherwise.
def is_promotable(scenario: Union[Scenario, ScenarioId]) -> bool: """Determine if a scenario can be promoted to become a primary scenario. This function checks whether the given scenario is eligible to be promoted as a primary scenario. Parameters: scenario (Union[Scenario, ScenarioId]): The scenario to be evaluated for promotability. Returns: True if the given scenario can be promoted to be a primary scenario. False otherwise. """ return _ScenarioManagerFactory._build_manager()._is_promotable_to_primary(scenario)
Promote a scenario as the primary scenario of its cycle. This function promotes the given scenario as the primary scenario of its associated cycle. If the cycle already has a primary scenario, that scenario is demoted and is no longer considered the primary scenario for its cycle. Parameters: scenario (Scenario^): The scenario to promote as the new _primary_ scenario.
def set_primary(scenario: Scenario): """Promote a scenario as the primary scenario of its cycle. This function promotes the given scenario as the primary scenario of its associated cycle. If the cycle already has a primary scenario, that scenario is demoted and is no longer considered the primary scenario for its cycle. Parameters: scenario (Scenario^): The scenario to promote as the new _primary_ scenario. """ return _ScenarioManagerFactory._build_manager()._set_primary(scenario)
Add a tag to a scenario. This function adds a user-defined tag to the specified scenario. If another scenario within the same cycle already has the same tag applied, the previous scenario is untagged. Parameters: scenario (Scenario^): The scenario to which the tag will be added. tag (str): The tag to apply to the scenario.
def tag(scenario: Scenario, tag: str): """Add a tag to a scenario. This function adds a user-defined tag to the specified scenario. If another scenario within the same cycle already has the same tag applied, the previous scenario is untagged. Parameters: scenario (Scenario^): The scenario to which the tag will be added. tag (str): The tag to apply to the scenario. """ return _ScenarioManagerFactory._build_manager()._tag(scenario, tag)
Remove a tag from a scenario. This function removes a specified tag from the given scenario. If the scenario does not have the specified tag, it has no effect. Parameters: scenario (Scenario^): The scenario from which the tag will be removed. tag (str): The tag to remove from the scenario.
def untag(scenario: Scenario, tag: str): """Remove a tag from a scenario. This function removes a specified tag from the given scenario. If the scenario does not have the specified tag, it has no effect. Parameters: scenario (Scenario^): The scenario from which the tag will be removed. tag (str): The tag to remove from the scenario. """ return _ScenarioManagerFactory._build_manager()._untag(scenario, tag)
Compare the data nodes of several scenarios. You can specify which data node config identifier should the comparison be performed on. Parameters: *scenarios (*Scenario^): The list of the scenarios to compare. data_node_config_id (Optional[str]): The config identifier of the DataNode to perform the comparison on. <br/> If _data_node_config_id_ is not provided, the scenarios are compared on all defined comparators.<br/> Returns: The comparison results. The key is the data node config identifier used for comparison. Raises: InsufficientScenarioToCompare^: Raised when only one or no scenario for comparison is provided. NonExistingComparator^: Raised when the scenario comparator does not exist. DifferentScenarioConfigs^: Raised when the provided scenarios do not share the same scenario config. NonExistingScenarioConfig^: Raised when the scenario config of the provided scenarios could not be found.
def compare_scenarios(*scenarios: Scenario, data_node_config_id: Optional[str] = None) -> Dict[str, Any]: """Compare the data nodes of several scenarios. You can specify which data node config identifier should the comparison be performed on. Parameters: *scenarios (*Scenario^): The list of the scenarios to compare. data_node_config_id (Optional[str]): The config identifier of the DataNode to perform the comparison on. <br/> If _data_node_config_id_ is not provided, the scenarios are compared on all defined comparators.<br/> Returns: The comparison results. The key is the data node config identifier used for comparison. Raises: InsufficientScenarioToCompare^: Raised when only one or no scenario for comparison is provided. NonExistingComparator^: Raised when the scenario comparator does not exist. DifferentScenarioConfigs^: Raised when the provided scenarios do not share the same scenario config. NonExistingScenarioConfig^: Raised when the scenario config of the provided scenarios could not be found. """ return _ScenarioManagerFactory._build_manager()._compare(*scenarios, data_node_config_id=data_node_config_id)
Subscribe a function to be called on job status change. The subscription is applied to all jobs created for the execution of _scenario_. If no scenario is provided, the subscription applies to all scenarios. Parameters: callback (Callable[[Scenario^, Job^], None]): The function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. scenario (Optional[Scenario^]): The scenario to which the callback is applied. If None, the subscription is registered for all scenarios. Note: Notifications are applied only for jobs created **after** this subscription.
def subscribe_scenario( callback: Callable[[Scenario, Job], None], params: Optional[List[Any]] = None, scenario: Optional[Scenario] = None, ): """Subscribe a function to be called on job status change. The subscription is applied to all jobs created for the execution of _scenario_. If no scenario is provided, the subscription applies to all scenarios. Parameters: callback (Callable[[Scenario^, Job^], None]): The function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. scenario (Optional[Scenario^]): The scenario to which the callback is applied. If None, the subscription is registered for all scenarios. Note: Notifications are applied only for jobs created **after** this subscription. """ params = [] if params is None else params return _ScenarioManagerFactory._build_manager()._subscribe(callback, params, scenario)
Unsubscribe a function that is called when the status of a `Job^` changes. If no scenario is provided, the subscription is removed for all scenarios. Parameters: callback (Callable[[Scenario^, Job^], None]): The function to unsubscribe from. params (Optional[List[Any]]): The parameters to be passed to the callback. scenario (Optional[Scenario]): The scenario to unsubscribe from. If None, it applies to all scenarios. Note: The callback function will continue to be called for ongoing jobs.
def unsubscribe_scenario( callback: Callable[[Scenario, Job], None], params: Optional[List[Any]] = None, scenario: Optional[Scenario] = None ): """Unsubscribe a function that is called when the status of a `Job^` changes. If no scenario is provided, the subscription is removed for all scenarios. Parameters: callback (Callable[[Scenario^, Job^], None]): The function to unsubscribe from. params (Optional[List[Any]]): The parameters to be passed to the callback. scenario (Optional[Scenario]): The scenario to unsubscribe from. If None, it applies to all scenarios. Note: The callback function will continue to be called for ongoing jobs. """ return _ScenarioManagerFactory._build_manager()._unsubscribe(callback, params, scenario)
Subscribe a function to be called on job status change. The subscription is applied to all jobs created for the execution of _sequence_. Parameters: callback (Callable[[Sequence^, Job^], None]): The callable function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. sequence (Optional[Sequence^]): The sequence to subscribe on. If None, the subscription is applied to all sequences. Note: Notifications are applied only for jobs created **after** this subscription.
def subscribe_sequence( callback: Callable[[Sequence, Job], None], params: Optional[List[Any]] = None, sequence: Optional[Sequence] = None ): """Subscribe a function to be called on job status change. The subscription is applied to all jobs created for the execution of _sequence_. Parameters: callback (Callable[[Sequence^, Job^], None]): The callable function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. sequence (Optional[Sequence^]): The sequence to subscribe on. If None, the subscription is applied to all sequences. Note: Notifications are applied only for jobs created **after** this subscription. """ return _SequenceManagerFactory._build_manager()._subscribe(callback, params, sequence)
Unsubscribe a function that is called when the status of a Job changes. Parameters: callback (Callable[[Sequence^, Job^], None]): The callable function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. sequence (Optional[Sequence^]): The sequence to unsubscribe to. If None, it applies to all sequences. Note: The function will continue to be called for ongoing jobs.
def unsubscribe_sequence( callback: Callable[[Sequence, Job], None], params: Optional[List[Any]] = None, sequence: Optional[Sequence] = None ): """Unsubscribe a function that is called when the status of a Job changes. Parameters: callback (Callable[[Sequence^, Job^], None]): The callable function to be called on status change. params (Optional[List[Any]]): The parameters to be passed to the _callback_. sequence (Optional[Sequence^]): The sequence to unsubscribe to. If None, it applies to all sequences. Note: The function will continue to be called for ongoing jobs. """ return _SequenceManagerFactory._build_manager()._unsubscribe(callback, params, sequence)
Return all existing sequences. Returns: The list of all sequences.
def get_sequences() -> List[Sequence]: """Return all existing sequences. Returns: The list of all sequences. """ return _SequenceManagerFactory._build_manager()._get_all()
Return all the existing jobs. Returns: The list of all jobs.
def get_jobs() -> List[Job]: """Return all the existing jobs. Returns: The list of all jobs. """ return _JobManagerFactory._build_manager()._get_all()
Return all the existing submissions. Returns: The list of all submissions.
def get_submissions() -> List[Submission]: """Return all the existing submissions. Returns: The list of all submissions. """ return _SubmissionManagerFactory._build_manager()._get_all()
Delete a job. This function deletes the specified job. If the job is not completed and *force* is not set to True, a `JobNotDeletedException^` may be raised. Parameters: job (Job^): The job to delete. force (Optional[bool]): If True, forces the deletion of _job_, even if it is not completed yet. Raises: JobNotDeletedException^: If the job is not finished.
def delete_job(job: Job, force: Optional[bool] = False): """Delete a job. This function deletes the specified job. If the job is not completed and *force* is not set to True, a `JobNotDeletedException^` may be raised. Parameters: job (Job^): The job to delete. force (Optional[bool]): If True, forces the deletion of _job_, even if it is not completed yet. Raises: JobNotDeletedException^: If the job is not finished. """ return _JobManagerFactory._build_manager()._delete(job, force)
Delete all jobs.
def delete_jobs(): """Delete all jobs.""" return _JobManagerFactory._build_manager()._delete_all()
Cancel a job and set the status of the subsequent jobs to ABANDONED. This function cancels the specified job and sets the status of any subsequent jobs to ABANDONED. Parameters: job (Job^): The job to cancel.
def cancel_job(job: Union[str, Job]): """Cancel a job and set the status of the subsequent jobs to ABANDONED. This function cancels the specified job and sets the status of any subsequent jobs to ABANDONED. Parameters: job (Job^): The job to cancel. """ _JobManagerFactory._build_manager()._cancel(job)
Return the latest job of a task. This function retrieves the latest job associated with a task. Parameters: task (Task^): The task to retrieve the latest job from. Returns: The latest job created from _task_, or None if no job has been created from _task_.
def get_latest_job(task: Task) -> Optional[Job]: """Return the latest job of a task. This function retrieves the latest job associated with a task. Parameters: task (Task^): The task to retrieve the latest job from. Returns: The latest job created from _task_, or None if no job has been created from _task_. """ return _JobManagerFactory._build_manager()._get_latest(task)
Return the latest submission of a scenario, sequence or task. This function retrieves the latest submission associated with a scenario, sequence or task. Parameters: entity (Union[Scenario^, Sequence^, Task^]): The scenario, sequence or task to retrieve the latest submission from. Returns: The latest submission created from _scenario_, _sequence_ and _task_, or None if no submission has been created from _scenario_, _sequence_ and _task_.
def get_latest_submission(entity: Union[Scenario, Sequence, Task]) -> Optional[Submission]: """Return the latest submission of a scenario, sequence or task. This function retrieves the latest submission associated with a scenario, sequence or task. Parameters: entity (Union[Scenario^, Sequence^, Task^]): The scenario, sequence or task to retrieve the latest submission from. Returns: The latest submission created from _scenario_, _sequence_ and _task_, or None if no submission has been created from _scenario_, _sequence_ and _task_. """ return _SubmissionManagerFactory._build_manager()._get_latest(entity)
Return all the existing data nodes. Returns: The list of all data nodes.
def get_data_nodes() -> List[DataNode]: """Return all the existing data nodes. Returns: The list of all data nodes. """ return _DataManagerFactory._build_manager()._get_all()
Return the list of all existing cycles. Returns: The list of all cycles.
def get_cycles() -> List[Cycle]: """Return the list of all existing cycles. Returns: The list of all cycles. """ return _CycleManagerFactory._build_manager()._get_all()
Create and return a new scenario based on a scenario configuration. This function checks and locks the configuration, manages application's version, and creates a new scenario from the scenario configuration provided. If the scenario belongs to a cycle, the cycle (corresponding to the _creation_date_ and the configuration frequency attribute) is created if it does not exist yet. Parameters: config (ScenarioConfig^): The scenario configuration used to create a new scenario. creation_date (Optional[datetime.datetime]): The creation date of the scenario. If None, the current date time is used. name (Optional[str]): The displayable name of the scenario. Returns: The new scenario. Raises: SystemExit: If the configuration check returns some errors.
def create_scenario( config: ScenarioConfig, creation_date: Optional[datetime] = None, name: Optional[str] = None, ) -> Scenario: """Create and return a new scenario based on a scenario configuration. This function checks and locks the configuration, manages application's version, and creates a new scenario from the scenario configuration provided. If the scenario belongs to a cycle, the cycle (corresponding to the _creation_date_ and the configuration frequency attribute) is created if it does not exist yet. Parameters: config (ScenarioConfig^): The scenario configuration used to create a new scenario. creation_date (Optional[datetime.datetime]): The creation date of the scenario. If None, the current date time is used. name (Optional[str]): The displayable name of the scenario. Returns: The new scenario. Raises: SystemExit: If the configuration check returns some errors. """ Core._manage_version_and_block_config() return _ScenarioManagerFactory._build_manager()._create(config, creation_date, name)
Create and return a new GLOBAL data node from a data node configuration. This function checks and locks the configuration, manages application's version, and creates the new data node from the data node configuration provided. Parameters: config (DataNodeConfig^): The data node configuration. It must have a `GLOBAL` scope. Returns: The new global data node. Raises: DataNodeConfigIsNotGlobal^: If the data node configuration does not have GLOBAL scope. SystemExit: If the configuration check returns some errors.
def create_global_data_node(config: DataNodeConfig) -> DataNode: """Create and return a new GLOBAL data node from a data node configuration. This function checks and locks the configuration, manages application's version, and creates the new data node from the data node configuration provided. Parameters: config (DataNodeConfig^): The data node configuration. It must have a `GLOBAL` scope. Returns: The new global data node. Raises: DataNodeConfigIsNotGlobal^: If the data node configuration does not have GLOBAL scope. SystemExit: If the configuration check returns some errors. """ # Check if the data node config has GLOBAL scope if config.scope is not Scope.GLOBAL: raise DataNodeConfigIsNotGlobal(config.id) Core._manage_version_and_block_config() if dns := _DataManagerFactory._build_manager()._get_by_config_id(config.id): return dns[0] return _DataManagerFactory._build_manager()._create_and_set(config, None, None)
Deprecated. Use `clean_all_entities` function instead.
def clean_all_entities_by_version(version_number=None) -> bool: """Deprecated. Use `clean_all_entities` function instead.""" _warn_deprecated("'clean_all_entities_by_version'", suggest="the 'clean_all_entities' function") return clean_all_entities(version_number)
Deletes all entities associated with the specified version. Parameters: version_number (str): The version number of the entities to be deleted. The version_number should not be a production version. Returns: True if the operation succeeded, False otherwise. Notes: - If the specified version does not exist, the operation will be aborted, and False will be returned. - If the specified version is a production version, the operation will be aborted, and False will be returned. - This function cleans all entities, including jobs, submissions, scenarios, cycles, sequences, tasks, and data nodes.
def clean_all_entities(version_number: str) -> bool: """Deletes all entities associated with the specified version. Parameters: version_number (str): The version number of the entities to be deleted. The version_number should not be a production version. Returns: True if the operation succeeded, False otherwise. Notes: - If the specified version does not exist, the operation will be aborted, and False will be returned. - If the specified version is a production version, the operation will be aborted, and False will be returned. - This function cleans all entities, including jobs, submissions, scenarios, cycles, sequences, tasks, and data nodes. """ version_manager = _VersionManagerFactory._build_manager() try: version_number = version_manager._replace_version_number(version_number) except NonExistingVersion as e: __logger.warning(f"{e.message} Abort cleaning the entities of version '{version_number}'.") return False if version_number in version_manager._get_production_versions(): __logger.warning( f"Abort cleaning the entities of version '{version_number}'. A production version can not be deleted." ) return False _JobManagerFactory._build_manager()._delete_by_version(version_number) _SubmissionManagerFactory._build_manager()._delete_by_version(version_number) _ScenarioManagerFactory._build_manager()._delete_by_version(version_number) _SequenceManagerFactory._build_manager()._delete_by_version(version_number) _TaskManagerFactory._build_manager()._delete_by_version(version_number) _DataManagerFactory._build_manager()._delete_by_version(version_number) version_manager._delete(version_number) try: version_manager._delete_production_version(version_number) except VersionIsNotProductionVersion: pass return True
Export all related entities of a scenario to a folder. This function exports all related entities of the specified scenario to the specified folder. Parameters: scenario_id (ScenarioId): The ID of the scenario to export. folder_path (Union[str, pathlib.Path]): The folder path to export the scenario to. If the path exists and the override parameter is False, an exception is raised. override (bool): If True, the existing folder will be overridden. Default is False. include_data (bool): If True, the file-based data nodes are exported as well. This includes Pickle, CSV, Excel, Parquet, and JSON data nodes. If the scenario has a data node that is not file-based, a warning will be logged, and the data node will not be exported. The default value is False. Raises: ExportFolderAlreadyExist^: If the `folder_path` already exists and the override parameter is False.
def export_scenario( scenario_id: ScenarioId, folder_path: Union[str, pathlib.Path], override: bool = False, include_data: bool = False, ): """Export all related entities of a scenario to a folder. This function exports all related entities of the specified scenario to the specified folder. Parameters: scenario_id (ScenarioId): The ID of the scenario to export. folder_path (Union[str, pathlib.Path]): The folder path to export the scenario to. If the path exists and the override parameter is False, an exception is raised. override (bool): If True, the existing folder will be overridden. Default is False. include_data (bool): If True, the file-based data nodes are exported as well. This includes Pickle, CSV, Excel, Parquet, and JSON data nodes. If the scenario has a data node that is not file-based, a warning will be logged, and the data node will not be exported. The default value is False. Raises: ExportFolderAlreadyExist^: If the `folder_path` already exists and the override parameter is False. """ manager = _ScenarioManagerFactory._build_manager() scenario = manager._get(scenario_id) entity_ids = manager._get_children_entity_ids(scenario) entity_ids.scenario_ids = {scenario_id} if scenario.cycle: entity_ids.cycle_ids = {scenario.cycle.id} if folder_path == Config.core.taipy_storage_folder: raise InvalidExportPath("The export folder must not be the storage folder.") if os.path.exists(folder_path): if override: __logger.warning(f"Override the existing folder '{folder_path}'") shutil.rmtree(folder_path, ignore_errors=True) else: raise ExportFolderAlreadyExists(str(folder_path), scenario_id) for data_node_id in entity_ids.data_node_ids: _DataManagerFactory._build_manager()._export(data_node_id, folder_path, include_data=include_data) for task_id in entity_ids.task_ids: _TaskManagerFactory._build_manager()._export(task_id, folder_path) for sequence_id in entity_ids.sequence_ids: _SequenceManagerFactory._build_manager()._export(sequence_id, folder_path) for cycle_id in entity_ids.cycle_ids: _CycleManagerFactory._build_manager()._export(cycle_id, folder_path) for scenario_id in entity_ids.scenario_ids: _ScenarioManagerFactory._build_manager()._export(scenario_id, folder_path) for job_id in entity_ids.job_ids: _JobManagerFactory._build_manager()._export(job_id, folder_path) for submission_id in entity_ids.submission_ids: _SubmissionManagerFactory._build_manager()._export(submission_id, folder_path) _VersionManagerFactory._build_manager()._export(scenario.version, folder_path)
Get the parents of an entity from itself or its identifier. Parameters: entity (Union[TaskId, DataNodeId, SequenceId, Task, DataNode, Sequence]): The entity or its identifier to get the parents. Returns: The dictionary of all parent entities. They are grouped by their type (Scenario^, Sequences^, or tasks^) so each key corresponds to a level of the parents and the value is a set of the parent entities. An empty dictionary is returned if the entity does not have parents.<br/> Example: The following instruction returns all the scenarios that include the datanode identified by "my_datanode_id". `taipy.get_parents("id_of_my_datanode")["scenario"]` Raises: ModelNotFound^: If _entity_ does not match a correct entity pattern.
def get_parents( entity: Union[TaskId, DataNodeId, SequenceId, Task, DataNode, Sequence], parent_dict=None ) -> Dict[str, Set[_Entity]]: """Get the parents of an entity from itself or its identifier. Parameters: entity (Union[TaskId, DataNodeId, SequenceId, Task, DataNode, Sequence]): The entity or its identifier to get the parents. Returns: The dictionary of all parent entities. They are grouped by their type (Scenario^, Sequences^, or tasks^) so each key corresponds to a level of the parents and the value is a set of the parent entities. An empty dictionary is returned if the entity does not have parents.<br/> Example: The following instruction returns all the scenarios that include the datanode identified by "my_datanode_id". `taipy.get_parents("id_of_my_datanode")["scenario"]` Raises: ModelNotFound^: If _entity_ does not match a correct entity pattern. """ def update_parent_dict(parents_set, parent_dict): for k, value in parents_set.items(): if k in parent_dict.keys(): parent_dict[k].update(value) else: parent_dict[k] = value if isinstance(entity, str): entity = get(entity) parent_dict = parent_dict or {} if isinstance(entity, (Scenario, Cycle)): return parent_dict current_parent_dict: Dict[str, Set] = {} for parent in entity.parent_ids: parent_entity = get(parent) if parent_entity._MANAGER_NAME in current_parent_dict.keys(): current_parent_dict[parent_entity._MANAGER_NAME].add(parent_entity) else: current_parent_dict[parent_entity._MANAGER_NAME] = {parent_entity} if isinstance(entity, Sequence): update_parent_dict(current_parent_dict, parent_dict) if isinstance(entity, Task): parent_entity_key_to_search_next = "scenario" update_parent_dict(current_parent_dict, parent_dict) for parent in parent_dict.get(parent_entity_key_to_search_next, []): get_parents(parent, parent_dict) if isinstance(entity, DataNode): parent_entity_key_to_search_next = "task" update_parent_dict(current_parent_dict, parent_dict) for parent in parent_dict.get(parent_entity_key_to_search_next, []): get_parents(parent, parent_dict) return parent_dict
Get the scenarios grouped by cycles. Returns: The dictionary of all cycles and their corresponding scenarios.
def get_cycles_scenarios() -> Dict[Optional[Cycle], List[Scenario]]: """Get the scenarios grouped by cycles. Returns: The dictionary of all cycles and their corresponding scenarios. """ cycles_scenarios: Dict[Optional[Cycle], List[Scenario]] = {} for scenario in get_scenarios(): if scenario.cycle in cycles_scenarios.keys(): cycles_scenarios[scenario.cycle].append(scenario) else: cycles_scenarios[scenario.cycle] = [scenario] return cycles_scenarios
Get the entities by its config id. Parameters: config_id (str): The config id of the entities Returns: The list of all entities by the config id.
def get_entities_by_config_id( config_id: str, ) -> Union[List, List[Task], List[DataNode], List[Sequence], List[Scenario]]: """Get the entities by its config id. Parameters: config_id (str): The config id of the entities Returns: The list of all entities by the config id. """ entities: List = [] if entities := _ScenarioManagerFactory._build_manager()._get_by_config_id(config_id): return entities if entities := _TaskManagerFactory._build_manager()._get_by_config_id(config_id): return entities if entities := _DataManagerFactory._build_manager()._get_by_config_id(config_id): return entities return entities
Check if a package is installed. Args: module_name: Name of the taipy module importing the package. package_name: Name of the package. .
def _check_dependency_is_installed(module_name: str, package_name: str) -> None: """ Check if a package is installed. Args: module_name: Name of the taipy module importing the package. package_name: Name of the package. . """ extras = { "boto3": "s3", "pymongo": "mongo", } if not util.find_spec(package_name): raise RuntimeError( f"Cannot use {module_name} as {package_name} package is not installed. Please install it " f"using `pip install taipy[{extras.get(package_name)}]`." )
Create a connection to a Mongo database. The `"mongodb_extra_args"` passed by the user is originally a dictionary, but since `@lru_cache` wrapper only accepts hashable parameters, the `"mongodb_extra_args"` should be converted into a frozenset beforehand. Parameters: db_host (str): the database host. db_port (int): the database port. db_username (str): the database username. db_password (str): the database password. db_extra_args (frozenset): A frozenset converted from a dictionary of additional arguments to be passed into database connection string. Returns: pymongo.MongoClient
def _connect_mongodb( db_host: str, db_port: int, db_username: str, db_password: str, db_extra_args: frozenset, db_driver: str ) -> pymongo.MongoClient: """Create a connection to a Mongo database. The `"mongodb_extra_args"` passed by the user is originally a dictionary, but since `@lru_cache` wrapper only accepts hashable parameters, the `"mongodb_extra_args"` should be converted into a frozenset beforehand. Parameters: db_host (str): the database host. db_port (int): the database port. db_username (str): the database username. db_password (str): the database password. db_extra_args (frozenset): A frozenset converted from a dictionary of additional arguments to be passed into database connection string. Returns: pymongo.MongoClient """ auth_str = "" if db_username and db_password: auth_str = f"{db_username}:{db_password}@" extra_args_str = "&".join(f"{k}={str(v)}" for k, v in db_extra_args) if extra_args_str: extra_args_str = "/?" + extra_args_str driver = "mongodb" if db_driver: driver = f"{driver}+{db_driver}" connection_string = f"{driver}://{auth_str}{db_host}" connection_string = connection_string if db_driver else f"{connection_string}:{db_port}" connection_string += extra_args_str return pymongo.MongoClient(connection_string)
Retries the wrapped function/method if the exceptions listed in ``exceptions`` are thrown. The number of retries is defined by Config.core.read_entity_retry. Parameters: exceptions (tuple): Tuple of exceptions that trigger a retry attempt. sleep_time (float): Time to sleep between retries.
def _retry_repository_operation(exceptions: Tuple, sleep_time: float = 0.2): """ Retries the wrapped function/method if the exceptions listed in ``exceptions`` are thrown. The number of retries is defined by Config.core.read_entity_retry. Parameters: exceptions (tuple): Tuple of exceptions that trigger a retry attempt. sleep_time (float): Time to sleep between retries. """ def decorator(func): def newfn(*args, **kwargs): for _ in range(Config.core.read_entity_retry): try: return func(*args, **kwargs) except exceptions: time.sleep(sleep_time) return func(*args, **kwargs) return newfn return decorator
Helper function to make an event for this entity with the given `EventOperation^` type. In case of `EventOperation.UPDATE^` events, an attribute name and value must be given. Parameters: entity (Any): The entity object to generate an event for. operation (EventOperation^): The operation of the event. The possible values are: <ul> <li>CREATION</li> <li>UPDATE</li> <li>DELETION</li> <li>SUBMISSION</li> </ul> attribute_name (Optional[str]): The name of the updated attribute for a `EventOperation.UPDATE`. This argument is always given in case of an UPDATE. attribute_value (Optional[Any]): The value of the updated attribute for a `EventOperation.UPDATE`. This argument is always given in case of an UPDATE. **kwargs (dict[str, any]): Any extra information that would be passed to the metadata event. Note: you should pass only simple types: str, float, double as values.
def _make_event( entity: Any, operation: EventOperation, /, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ) -> Event: """Helper function to make an event for this entity with the given `EventOperation^` type. In case of `EventOperation.UPDATE^` events, an attribute name and value must be given. Parameters: entity (Any): The entity object to generate an event for. operation (EventOperation^): The operation of the event. The possible values are: <ul> <li>CREATION</li> <li>UPDATE</li> <li>DELETION</li> <li>SUBMISSION</li> </ul> attribute_name (Optional[str]): The name of the updated attribute for a `EventOperation.UPDATE`. This argument is always given in case of an UPDATE. attribute_value (Optional[Any]): The value of the updated attribute for a `EventOperation.UPDATE`. This argument is always given in case of an UPDATE. **kwargs (dict[str, any]): Any extra information that would be passed to the metadata event. Note: you should pass only simple types: str, float, double as values.""" raise Exception(f"Unexpected entity type: {type(entity)}")
Internal helper function to send events. It basically creates an event corresponding to the given arguments and send it using `Notifier.publish(event)` Parameters: entity_type (EventEntityType^) operation (EventOperation^) entity_id (Optional[str]) attribute_name (Optional[str]) attribute_value (Optional[Any]) **kwargs
def _publish_event( entity_type: EventEntityType, operation: EventOperation, /, entity_id: Optional[str] = None, attribute_name: Optional[str] = None, attribute_value: Optional[Any] = None, **kwargs, ): """Internal helper function to send events. It basically creates an event corresponding to the given arguments and send it using `Notifier.publish(event)` Parameters: entity_type (EventEntityType^) operation (EventOperation^) entity_id (Optional[str]) attribute_name (Optional[str]) attribute_value (Optional[Any]) **kwargs """ event = Event( entity_id=entity_id, entity_type=entity_type, operation=operation, attribute_name=attribute_name, attribute_value=attribute_value, metadata=kwargs, ) Notifier.publish(event)
Migrate entities from filesystem to the current version. Args: path (str): The path to the folder containing the entities. backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise.
def _migrate_fs_entities(path: str, backup: bool = True) -> bool: """Migrate entities from filesystem to the current version. Args: path (str): The path to the folder containing the entities. backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise. """ if not os.path.isdir(path): __logger.error(f"Folder '{path}' does not exist.") return False if backup: backup_path = f"{path}_backup" try: shutil.copytree(path, backup_path) except FileExistsError: __logger.warning(f"The backup folder '{backup_path}' already exists. Migration canceled.") return False else: __logger.info(f"Backed up entities from '{path}' to '{backup_path}' folder before migration.") __logger.info(f"Starting entity migration from '{path}' folder.") entities = _load_all_entities_from_fs(path) entities, _ = _migrate(entities) __write_entities_to_fs(entities, path) __logger.info("Migration finished") return True
Migrate entities from mongodb to the current version. Args: hostname (str, optional): The hostname of the mongodb. Defaults to "localhost". port (int, optional): The port of the mongodb. Defaults to 27017. user (str, optional): The username of the mongodb. Defaults to "". password (str, optional): The password of the mongodb. Defaults to "". backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise.
def _migrate_mongo_entities( hostname: str = "localhost", port: int = 27017, user: str = "", password: str = "", backup: bool = True, ) -> bool: """Migrate entities from mongodb to the current version. Args: hostname (str, optional): The hostname of the mongodb. Defaults to "localhost". port (int, optional): The port of the mongodb. Defaults to 27017. user (str, optional): The username of the mongodb. Defaults to "". password (str, optional): The password of the mongodb. Defaults to "". backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise. """ if backup: _backup_mongo_entities(hostname=hostname, port=port, user=user, password=password) __logger.info(f"Starting entity migration from MongoDB {hostname}:{port}") entities = __load_all_entities_from_mongo(hostname, port, user, password) entities, _ = _migrate(entities) __write_entities_to_mongo(entities, hostname, port, user, password) __logger.info("Migration finished") return True
Migrate entities from sqlite database to the current version. Args: path (str): The path to the sqlite database. backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise.
def _migrate_sql_entities(path: str, backup: bool = True) -> bool: """Migrate entities from sqlite database to the current version. Args: path (str): The path to the sqlite database. backup (bool, optional): Whether to backup the entities before migrating. Defaults to True. Returns: bool: True if the migration was successful, False otherwise. """ if not path: __logger.error("Missing the required sqlite path.") return False if not os.path.exists(path): __logger.error(f"File '{path}' does not exist.") return False if backup: file_name, file_extension = path.rsplit(".", 1) shutil.copyfile(path, f"{file_name}_backup.{file_extension}") __logger.info(f"Backed up entities from '{path}' to '{file_name}_backup.{file_extension}' before migration.") __logger.info(f"Starting entity migration from sqlite database '{path}'") entities, versions = _load_all_entities_from_sql(path) entities, versions = _migrate(entities, versions) __write_entities_to_sql(entities, versions, path) __logger.info("Migration finished") return True
Rename scheduler by orchestrator on old jobs. Used to migrate from <=2.2 to >=2.3 version.
def __migrate_subscriber(fct_module, fct_name): """Rename scheduler by orchestrator on old jobs. Used to migrate from <=2.2 to >=2.3 version.""" if fct_module == "taipy.core._scheduler._scheduler": fct_module = fct_module.replace("_scheduler", "_orchestrator") fct_name = fct_name.replace("_Scheduler", "_Orchestrator") return fct_module, fct_name
Check the Windows Registry to see if VT code handling has been enabled by default, see https://superuser.com/a/1300251/447564.
def _vt_codes_enabled_in_windows_registry(): """ Check the Windows Registry to see if VT code handling has been enabled by default, see https://superuser.com/a/1300251/447564. """ try: # winreg is only available on Windows. import winreg except ImportError: return False else: try: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Console") reg_key_value, _ = winreg.QueryValueEx(reg_key, "VirtualTerminalLevel") except FileNotFoundError: return False else: return reg_key_value == 1
Return True if the running system's terminal supports color, and False otherwise.
def _is_color_supported(): """ Return True if the running system's terminal supports color, and False otherwise. """ is_a_tty = hasattr(sys.stdout, "isatty") and sys.stdout.isatty() return is_a_tty and ( sys.platform != "win32" or "ANSICON" in os.environ or "WT_SESSION" in os.environ # Windows Terminal supports VT codes. or os.environ.get("TERM_PROGRAM") == "vscode" # VSCode's built-in terminal supports colors. or _vt_codes_enabled_in_windows_registry() )
Download content to the client. Arguments: state (State^): The current user state as received in any callback. content: File path or file content. See below. name: File name for the content on the client browser (defaults to content name). on_action: Callback function (or callback name) to call when the download ends. See below. ## Notes: - *content*: this parameter can hold several values depending on your use case: - a string: the value must be an existing path name to the file that gets downloaded or the URL to the resource you want to download. - a buffer (such as a `bytes` object): if the size of the buffer is smaller than the [*data_url_max_size*](../gui/configuration.md#p-data_url_max_size) configuration setting, then the [`python-magic`](https://pypi.org/project/python-magic/) package is used to determine the [MIME type](https://en.wikipedia.org/wiki/Media_type) of the buffer content, and the download is performed using a generated "data:" URL with the relevant type, and a base64-encoded version of the buffer content.<br/> If the buffer is too large, its content is transferred after saving it in a temporary server file. - *on_action*: this callback is triggered when the transfer of the content is achieved.</br> In this function, you can perform any clean-up operation that could be required after the download is completed.<br/> This callback can use three optional parameters: - *state*: the `State^` instance of the caller. - *id* (optional): a string representing the identifier of the caller. If this function is called directly, this will always be "Gui.download". Some controls may also trigger download actions, and then *id* would reflect the identifier of those controls. - *payload* (optional): an optional payload from the caller.<br/> This is a dictionary with the following keys: - *action*: the name of the callback; - *args*: an array of two strings. The first element reflects the *name* parameter, and the second element reflects the server-side URL where the file is located.
def download( state: State, content: t.Any, name: t.Optional[str] = "", on_action: t.Optional[t.Union[str, t.Callable]] = "" ): """Download content to the client. Arguments: state (State^): The current user state as received in any callback. content: File path or file content. See below. name: File name for the content on the client browser (defaults to content name). on_action: Callback function (or callback name) to call when the download ends. See below. ## Notes: - *content*: this parameter can hold several values depending on your use case: - a string: the value must be an existing path name to the file that gets downloaded or the URL to the resource you want to download. - a buffer (such as a `bytes` object): if the size of the buffer is smaller than the [*data_url_max_size*](../gui/configuration.md#p-data_url_max_size) configuration setting, then the [`python-magic`](https://pypi.org/project/python-magic/) package is used to determine the [MIME type](https://en.wikipedia.org/wiki/Media_type) of the buffer content, and the download is performed using a generated "data:" URL with the relevant type, and a base64-encoded version of the buffer content.<br/> If the buffer is too large, its content is transferred after saving it in a temporary server file. - *on_action*: this callback is triggered when the transfer of the content is achieved.</br> In this function, you can perform any clean-up operation that could be required after the download is completed.<br/> This callback can use three optional parameters: - *state*: the `State^` instance of the caller. - *id* (optional): a string representing the identifier of the caller. If this function is called directly, this will always be "Gui.download". Some controls may also trigger download actions, and then *id* would reflect the identifier of those controls. - *payload* (optional): an optional payload from the caller.<br/> This is a dictionary with the following keys: - *action*: the name of the callback; - *args*: an array of two strings. The first element reflects the *name* parameter, and the second element reflects the server-side URL where the file is located. """ if state and isinstance(state._gui, Gui): state._gui._download(content, name, on_action) else: _warn("'download()' must be called in the context of a callback.")
Send a notification to the user interface. Arguments: state (State^): The current user state as received in any callback. notification_type: The notification type. This can be one of "success", "info", "warning", or "error".<br/> To remove the last notification, set this parameter to the empty string. message: The text message to display. system_notification: If True, the system will also show the notification.<br/> If not specified or set to None, this parameter will use the value of *configuration[system_notification]*. duration: The time, in milliseconds, during which the notification is shown. If not specified or set to None, this parameter will use the value of *configuration[notification_duration]*. Note that you can also call this function with *notification_type* set to the first letter or the alert type (i.e. setting *notification_type* to "i" is equivalent to setting it to "info"). If *system_notification* is set to True, then the browser requests the system to display a notification as well. They usually appear in small windows that fly out of the system tray.<br/> The first time your browser is requested to show such a system notification for Taipy applications, you may be prompted to authorize the browser to do so. Please refer to your browser documentation for details on how to allow or prevent this feature.
def notify( state: State, notification_type: str = "I", message: str = "", system_notification: t.Optional[bool] = None, duration: t.Optional[int] = None, ): """Send a notification to the user interface. Arguments: state (State^): The current user state as received in any callback. notification_type: The notification type. This can be one of "success", "info", "warning", or "error".<br/> To remove the last notification, set this parameter to the empty string. message: The text message to display. system_notification: If True, the system will also show the notification.<br/> If not specified or set to None, this parameter will use the value of *configuration[system_notification]*. duration: The time, in milliseconds, during which the notification is shown. If not specified or set to None, this parameter will use the value of *configuration[notification_duration]*. Note that you can also call this function with *notification_type* set to the first letter or the alert type (i.e. setting *notification_type* to "i" is equivalent to setting it to "info"). If *system_notification* is set to True, then the browser requests the system to display a notification as well. They usually appear in small windows that fly out of the system tray.<br/> The first time your browser is requested to show such a system notification for Taipy applications, you may be prompted to authorize the browser to do so. Please refer to your browser documentation for details on how to allow or prevent this feature. """ if state and isinstance(state._gui, Gui): state._gui._notify(notification_type, message, system_notification, duration) else: _warn("'notify()' must be called in the context of a callback.")
Hold the User Interface actions. When the User Interface is held, users cannot interact with visual elements.<br/> The application must call `resume_control()^` so that users can interact again with the visual elements. You can set a callback function (or the name of a function) in the *callback* parameter. Then, a "Cancel" button will be displayed so the user can cancel whatever is happening in the application. When pressed, the callback is invoked. Arguments: state (State^): The current user state received in any callback. callback (Optional[Union[str, Callable]]): The function to be called if the user chooses to cancel.<br/> If empty or None, no cancel action is provided to the user.<br/> The signature of this function is: - state (State^): The user state; - id (str): the id of the button that triggered the callback. That will always be "UIBlocker" since it is created and managed internally; message: The message to show. The default value is the string "Work in Progress...".
def hold_control( state: State, callback: t.Optional[t.Union[str, t.Callable]] = None, message: t.Optional[str] = "Work in Progress...", ): """Hold the User Interface actions. When the User Interface is held, users cannot interact with visual elements.<br/> The application must call `resume_control()^` so that users can interact again with the visual elements. You can set a callback function (or the name of a function) in the *callback* parameter. Then, a "Cancel" button will be displayed so the user can cancel whatever is happening in the application. When pressed, the callback is invoked. Arguments: state (State^): The current user state received in any callback. callback (Optional[Union[str, Callable]]): The function to be called if the user chooses to cancel.<br/> If empty or None, no cancel action is provided to the user.<br/> The signature of this function is: - state (State^): The user state; - id (str): the id of the button that triggered the callback. That will always be "UIBlocker" since it is created and managed internally; message: The message to show. The default value is the string "Work in Progress...". """ if state and isinstance(state._gui, Gui): state._gui._hold_actions(callback, message) else: _warn("'hold_actions()' must be called in the context of a callback.")
Resume the User Interface actions. This function must be called after `hold_control()^` was invoked, when interaction must be allowed again for the user. Arguments: state (State^): The current user state as received in any callback.
def resume_control(state: State): """Resume the User Interface actions. This function must be called after `hold_control()^` was invoked, when interaction must be allowed again for the user. Arguments: state (State^): The current user state as received in any callback. """ if state and isinstance(state._gui, Gui): state._gui._resume_actions() else: _warn("'resume_actions()' must be called in the context of a callback.")
Navigate to a page. Arguments: state (State^): The current user state as received in any callback. to: The name of the page to navigate to. This can be a page identifier (as created by `Gui.add_page()^` with no leading '/') or an URL.<br/> If omitted, the application navigates to the root page. params: A dictionary of query parameters. tab: When navigating to a page that is not a known page, the page is opened in a tab identified by *tab* (as in [window.open](https://developer.mozilla.org/en-US/docs/Web/API/Window/open)).<br/> The default value creates a new tab for the page (which is equivalent to setting *tab* to "_blank"). force: When navigating to a known page, the content is refreshed even it the page is already shown.
def navigate( state: State, to: t.Optional[str] = "", params: t.Optional[t.Dict[str, str]] = None, tab: t.Optional[str] = None, force: t.Optional[bool] = False, ): """Navigate to a page. Arguments: state (State^): The current user state as received in any callback. to: The name of the page to navigate to. This can be a page identifier (as created by `Gui.add_page()^` with no leading '/') or an URL.<br/> If omitted, the application navigates to the root page. params: A dictionary of query parameters. tab: When navigating to a page that is not a known page, the page is opened in a tab identified by *tab* (as in [window.open](https://developer.mozilla.org/en-US/docs/Web/API/Window/open)).<br/> The default value creates a new tab for the page (which is equivalent to setting *tab* to "_blank"). force: When navigating to a known page, the content is refreshed even it the page is already shown. """ if state and isinstance(state._gui, Gui): state._gui._navigate(to, params, tab, force) else: _warn("'navigate()' must be called in the context of a callback.")
Get a user content URL. This function can be used if you need to deliver dynamic content to your page: you can create a path at run-time that, when queried, will deliver some user-defined content defined in the *on_user_content* callback (see the description of the `Gui^` class for more information). Arguments: state (State^): The current user state as received in any callback. path: An optional additional path to the URL. params: An optional dictionary sent to the *on_user_content* callback.<br/> These arguments are added as query parameters to the generated URL and converted into strings. Returns: An URL that, when queried, triggers the *on_user_content* callback.
def get_user_content_url( state: State, path: t.Optional[str] = None, params: t.Optional[t.Dict[str, str]] = None ) -> t.Optional[str]: """Get a user content URL. This function can be used if you need to deliver dynamic content to your page: you can create a path at run-time that, when queried, will deliver some user-defined content defined in the *on_user_content* callback (see the description of the `Gui^` class for more information). Arguments: state (State^): The current user state as received in any callback. path: An optional additional path to the URL. params: An optional dictionary sent to the *on_user_content* callback.<br/> These arguments are added as query parameters to the generated URL and converted into strings. Returns: An URL that, when queried, triggers the *on_user_content* callback. """ if state and isinstance(state._gui, Gui): return state._gui._get_user_content_url(path, params) _warn("'get_user_content_url()' must be called in the context of a callback.") return None
Get the identifier of a state. The state identifier is a string generated by Taipy GUI for a given `State^` that is used to serialize callbacks. See the [User Manual section on Long Running Callbacks](../gui/callbacks.md#long-running-callbacks) for details on when and how this function can be used. Arguments: state (State^): The current user state as received in any callback. Returns: A string that uniquely identifies the state.<br/> If None, then **state** was not handled by a `Gui^` instance.
def get_state_id(state: State) -> t.Optional[str]: """Get the identifier of a state. The state identifier is a string generated by Taipy GUI for a given `State^` that is used to serialize callbacks. See the [User Manual section on Long Running Callbacks](../gui/callbacks.md#long-running-callbacks) for details on when and how this function can be used. Arguments: state (State^): The current user state as received in any callback. Returns: A string that uniquely identifies the state.<br/> If None, then **state** was not handled by a `Gui^` instance. """ if state and isinstance(state._gui, Gui): return state._gui._get_client_id() return None
Get the name of the module currently in used when using page scopes Arguments: state (State^): The current user state as received in any callback. Returns: The name of the current module
def get_module_context(state: State) -> t.Optional[str]: """Get the name of the module currently in used when using page scopes Arguments: state (State^): The current user state as received in any callback. Returns: The name of the current module """ if state and isinstance(state._gui, Gui): return state._gui._get_locals_context() return None
Get the module name that triggered a callback. Pages can be defined in different modules yet refer to callback functions declared elsewhere (typically, the application's main module). This function returns the name of the module where the page that holds the control that triggered the callback was declared. This lets applications implement different behaviors depending on what page is involved. This function must be called only in the body of a callback function. Arguments: state (State^): The `State^` instance, as received in any callback. Returns: The name of the module that holds the definition of the page containing the control that triggered the callback that was provided the *state* object.
def get_module_name_from_state(state: State) -> t.Optional[str]: """Get the module name that triggered a callback. Pages can be defined in different modules yet refer to callback functions declared elsewhere (typically, the application's main module). This function returns the name of the module where the page that holds the control that triggered the callback was declared. This lets applications implement different behaviors depending on what page is involved. This function must be called only in the body of a callback function. Arguments: state (State^): The `State^` instance, as received in any callback. Returns: The name of the module that holds the definition of the page containing the control that triggered the callback that was provided the *state* object. """ if state and isinstance(state._gui, Gui): return state._gui._get_locals_context() return None
Invoke a user callback for a given state. See the [User Manual section on Long Running Callbacks in a Thread](../gui/callbacks.md#long-running-callbacks-in-a-thread) for details on when and how this function can be used. Arguments: gui (Gui^): The current Gui instance. state_id: The identifier of the state to use, as returned by `get_state_id()^`. callback (Callable[[State^, ...], None]): The user-defined function that is invoked.<br/> The first parameter of this function **must** be a `State^`. args (Union[Tuple, List]): The remaining arguments, as a List or a Tuple. module_context (Optional[str]): the name of the module that will be used.
def invoke_callback( gui: Gui, state_id: str, callback: t.Callable, args: t.Union[t.Tuple, t.List], module_context: t.Optional[str] = None, ) -> t.Any: """Invoke a user callback for a given state. See the [User Manual section on Long Running Callbacks in a Thread](../gui/callbacks.md#long-running-callbacks-in-a-thread) for details on when and how this function can be used. Arguments: gui (Gui^): The current Gui instance. state_id: The identifier of the state to use, as returned by `get_state_id()^`. callback (Callable[[State^, ...], None]): The user-defined function that is invoked.<br/> The first parameter of this function **must** be a `State^`. args (Union[Tuple, List]): The remaining arguments, as a List or a Tuple. module_context (Optional[str]): the name of the module that will be used. """ if isinstance(gui, Gui): return gui._call_user_callback(state_id, callback, list(args), module_context) _warn("'invoke_callback()' must be called with a valid Gui instance.")
Invoke a callback for every client. This callback gets invoked for every client connected to the application with the appropriate `State^` instance. You can then perform client-specific tasks, such as updating the state variable reflected in the user interface. Arguments: gui (Gui^): The current Gui instance. callback: The user-defined function to be invoked.<br/> The first parameter of this function must be a `State^` object representing the client for which it is invoked.<br/> The other parameters should reflect the ones provided in the *args* collection. args: The parameters to send to *callback*, if any.
def broadcast_callback( gui: Gui, callback: t.Callable, args: t.Optional[t.Union[t.Tuple, t.List]] = None, module_context: t.Optional[str] = None, ) -> t.Any: """Invoke a callback for every client. This callback gets invoked for every client connected to the application with the appropriate `State^` instance. You can then perform client-specific tasks, such as updating the state variable reflected in the user interface. Arguments: gui (Gui^): The current Gui instance. callback: The user-defined function to be invoked.<br/> The first parameter of this function must be a `State^` object representing the client for which it is invoked.<br/> The other parameters should reflect the ones provided in the *args* collection. args: The parameters to send to *callback*, if any. """ if isinstance(gui, Gui): return gui._call_broadcast_callback(callback, list(args) if args else [], module_context) _warn("'broadcast_callback()' must be called with a valid Gui instance.")
Invoke a long running user callback. Long-running callbacks are run in a separate thread to not block the application itself. This function expects to be provided a function to run in the background (in *user_function*).<br/> It can also be specified a *status function* that is called when the operation performed by *user_function* is finished (successfully or not), or periodically (using the *period* parameter). See the [User Manual section on Long Running Callbacks](../gui/callbacks.md#long-running-callbacks) for details on when and how this function can be used. Arguments: state (State^): The `State^` instance, as received in any callback. user_function (Callable[[...], None]): The function that will be run independently of Taipy GUI. Note that this function must not use *state*, which is not persisted across threads. user_function_args (Optional[List|Tuple]): The arguments to send to *user_function*. user_status_function (Optional(Callable[[State^, bool, ...], None])): The optional user-defined status function that is invoked at the end of and possibly during the runtime of *user_function*: - The first parameter of this function is set to a `State^` instance. - The second parameter of this function is set to a bool or an int, depending on the conditions under which it is called: - If this parameter is set to a bool value, then: - If True, this indicates that *user_function* has finished properly. The last argument passed will be the result of the user_function call. - If False, this indicates that *user_function* failed. - If this parameter is set to an int value, then this value indicates how many periods (as lengthy as indicated in *period*) have elapsed since *user_function* was started. user_status_function_args (Optional[List|Tuple]): The remaining arguments of the user status function. period (int): The interval, in milliseconds, at which *user_status_function* is called.<br/> The default value is 0, meaning no call to *user_status_function* will happen until *user_function* terminates (then the second parameter of that call will be ).</br> When set to a value smaller than 500, *user_status_function* is called only when *user_function* terminates (as if *period* was set to 0).
def invoke_long_callback( state: State, user_function: t.Callable, user_function_args: t.Union[t.Tuple, t.List] = None, user_status_function: t.Optional[t.Callable] = None, user_status_function_args: t.Union[t.Tuple, t.List] = None, period=0, ): """Invoke a long running user callback. Long-running callbacks are run in a separate thread to not block the application itself. This function expects to be provided a function to run in the background (in *user_function*).<br/> It can also be specified a *status function* that is called when the operation performed by *user_function* is finished (successfully or not), or periodically (using the *period* parameter). See the [User Manual section on Long Running Callbacks](../gui/callbacks.md#long-running-callbacks) for details on when and how this function can be used. Arguments: state (State^): The `State^` instance, as received in any callback. user_function (Callable[[...], None]): The function that will be run independently of Taipy GUI. Note that this function must not use *state*, which is not persisted across threads. user_function_args (Optional[List|Tuple]): The arguments to send to *user_function*. user_status_function (Optional(Callable[[State^, bool, ...], None])): The optional user-defined status function that is invoked at the end of and possibly during the runtime of *user_function*: - The first parameter of this function is set to a `State^` instance. - The second parameter of this function is set to a bool or an int, depending on the conditions under which it is called: - If this parameter is set to a bool value, then: - If True, this indicates that *user_function* has finished properly. The last argument passed will be the result of the user_function call. - If False, this indicates that *user_function* failed. - If this parameter is set to an int value, then this value indicates how many periods (as lengthy as indicated in *period*) have elapsed since *user_function* was started. user_status_function_args (Optional[List|Tuple]): The remaining arguments of the user status function. period (int): The interval, in milliseconds, at which *user_status_function* is called.<br/> The default value is 0, meaning no call to *user_status_function* will happen until *user_function* terminates (then the second parameter of that call will be ).</br> When set to a value smaller than 500, *user_status_function* is called only when *user_function* terminates (as if *period* was set to 0). """ if not state or not isinstance(state._gui, Gui): _warn("'invoke_long_callback()' must be called in the context of a callback.") if user_status_function_args is None: user_status_function_args = [] if user_function_args is None: user_function_args = [] state_id = get_state_id(state) module_context = get_module_context(state) if not isinstance(state_id, str) or not isinstance(module_context, str): return this_gui = state._gui def callback_on_exception(state: State, function_name: str, e: Exception): if not this_gui._call_on_exception(function_name, e): _warn(f"invoke_long_callback(): Exception raised in function {function_name}()", e) def callback_on_status( status: t.Union[int, bool], e: t.Optional[Exception] = None, function_name: t.Optional[str] = None, function_result: t.Optional[t.Any] = None, ): if callable(user_status_function): invoke_callback( this_gui, str(state_id), user_status_function, [status] + list(user_status_function_args) + [function_result], # type: ignore str(module_context), ) if e: invoke_callback( this_gui, str(state_id), callback_on_exception, ( str(function_name), e, ), str(module_context), ) def user_function_in_thread(*uf_args): try: res = user_function(*uf_args) callback_on_status(True, function_result=res) except Exception as e: callback_on_status(False, e, user_function.__name__) def thread_status(name: str, period_s: float, count: int): active_thread = next((t for t in threading.enumerate() if t.name == name), None) if active_thread: callback_on_status(count) threading.Timer(period_s, thread_status, (name, period_s, count + 1)).start() thread = threading.Thread(target=user_function_in_thread, args=user_function_args) thread.start() if isinstance(period, int) and period >= 500 and callable(user_status_function): thread_status(thread.name, period / 1000.0, 0)
NOT DOCUMENTED
def is_debugging() -> bool: """NOT DOCUMENTED""" return hasattr(sys, "gettrace") and sys.gettrace() is not None
Application factory, used to create application
def create_app(testing=False, flask_env=None, secret_key=None) -> Flask: """Application factory, used to create application""" app = Flask(__name__) app.config.update( ENV=os.getenv("FLASK_ENV", flask_env), TESTING=os.getenv("TESTING", testing), SECRET_KEY=os.getenv("SECRET_KEY", secret_key), ) app.url_map.strict_slashes = False app.config["RESTFUL_JSON"] = {"cls": _CustomEncoder} configure_apispec(app) register_blueprints(app) with app.app_context(): api.views.register_views() return app
Configure APISpec for swagger support
def configure_apispec(app): """Configure APISpec for swagger support""" apispec.init_app(app) apispec.spec.components.schema( "PaginatedResult", { "properties": { "total": {"type": "integer"}, "pages": {"type": "integer"}, "next": {"type": "string"}, "prev": {"type": "string"}, } }, )
Register all blueprints for application
def register_blueprints(app): """Register all blueprints for application""" app.register_blueprint(api.views.blueprint)
Return json error for marshmallow validation errors. This will avoid having to try/catch ValidationErrors in all endpoints, returning correct JSON response with associated HTTP 400 Status (https://tools.ietf.org/html/rfc7231#section-6.5.1)
def handle_marshmallow_error(e): """Return json error for marshmallow validation errors. This will avoid having to try/catch ValidationErrors in all endpoints, returning correct JSON response with associated HTTP 400 Status (https://tools.ietf.org/html/rfc7231#section-6.5.1) """ return jsonify(e.messages), 400
Load enterprise resources.
def load_enterprise_resources(api: Api): """ Load enterprise resources. """ if not _using_enterprise(): return load_resources = _load_fct("taipy.enterprise.rest.api.views", "_load_resources") load_resources(api)
Remove a subparser from argparse.
def remove_subparser(name: str): """Remove a subparser from argparse.""" _TaipyParser._sub_taipyparsers.pop(name, None) if _TaipyParser._subparser_action: _TaipyParser._subparser_action._name_parser_map.pop(name, None) for action in _TaipyParser._subparser_action._choices_actions: if action.dest == name: _TaipyParser._subparser_action._choices_actions.remove(action)
Check if the task instance in the task config correctly points to the Config._applied_config, not the Config._python_config or the Config._file_config
def _check_tasks_instance(task_id, scenario_id): """Check if the task instance in the task config correctly points to the Config._applied_config, not the Config._python_config or the Config._file_config """ task_config_applied_instance = Config.tasks[task_id] task_config_instance_via_scenario = None for task in Config.scenarios[scenario_id].tasks: if task.id == task_id: task_config_instance_via_scenario = task task_config_python_instance = None if Config._python_config._sections.get("TASK", None): task_config_python_instance = Config._python_config._sections["TASK"][task_id] task_config_file_instance = None if Config._file_config._sections.get("TASK", None): task_config_file_instance = Config._file_config._sections["TASK"][task_id] assert task_config_python_instance is not task_config_applied_instance assert task_config_python_instance is not task_config_instance_via_scenario assert task_config_file_instance is not task_config_applied_instance assert task_config_file_instance is not task_config_instance_via_scenario assert task_config_instance_via_scenario is task_config_applied_instance
Check if the data node instance in the task config correctly points to the Config._applied_config, not the Config._python_config or the Config._file_config
def _check_data_nodes_instance(dn_id, task_id): """Check if the data node instance in the task config correctly points to the Config._applied_config, not the Config._python_config or the Config._file_config """ dn_config_applied_instance = Config.data_nodes[dn_id] for dn in Config.tasks[task_id].inputs: if dn.id == dn_id: dn_config_instance_via_task = dn for dn in Config.tasks[task_id].outputs: if dn.id == dn_id: dn_config_instance_via_task = dn dn_config_python_instance = None if Config._python_config._sections.get("DATA_NODE", None): dn_config_python_instance = Config._python_config._sections["DATA_NODE"][dn_id] dn_config_file_instance = None if Config._file_config._sections.get("DATA_NODE", None): dn_config_file_instance = Config._file_config._sections["DATA_NODE"][dn_id] if dn_config_python_instance: assert dn_config_python_instance.scope is None assert dn_config_python_instance is not dn_config_applied_instance assert dn_config_python_instance is not dn_config_instance_via_task if dn_config_file_instance: assert dn_config_file_instance.scope is None assert dn_config_file_instance is not dn_config_applied_instance assert dn_config_file_instance is not dn_config_instance_via_task assert dn_config_applied_instance.scope == DataNodeConfig._DEFAULT_SCOPE assert dn_config_instance_via_task is dn_config_applied_instance
Tests the binding of a few variables and a function
def test_variable_binding(helpers): """ Tests the binding of a few variables and a function """ def another_function(gui): pass x = 10 y = 20 z = "button label" gui = Gui() gui.add_page("test", Markdown("<|{x}|> | <|{y}|> | <|{z}|button|on_action=another_function|>")) gui.run(run_server=False, single_client=True) client = gui._server.test_client() jsx = client.get("/taipy-jsx/test").json["jsx"] for expected in ["<Button", 'defaultLabel="button label"', "label={tpec_TpExPr_z_TPMDL_0}"]: assert expected in jsx assert gui._bindings().x == x assert gui._bindings().y == y assert gui._bindings().z == z with gui.get_flask_app().app_context(): assert callable(gui._get_user_function("another_function")) helpers.test_cleanup()
Tests the binding of a dictionary property
def test_dict_binding(helpers): """ Tests the binding of a dictionary property """ d = {"k": "test"} # noqa: F841 gui = Gui("<|{d.k}|>") gui.run(run_server=False) client = gui._server.test_client() jsx = client.get("/taipy-jsx/TaiPy_root_page").json["jsx"] for expected in ["<Field", 'defaultValue="test"']: assert expected in jsx helpers.test_cleanup()
Run the templates on a subprocess and get stdout after timeout
def _run_template(taipy_path, cwd, main_path, time_out=30): """Run the templates on a subprocess and get stdout after timeout""" env = {"PYTHONPATH": taipy_path} if platform.system() == "Windows": env.update(os.environ) with subprocess.Popen( [sys.executable, main_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env, ) as proc: try: stdout, stderr = proc.communicate(timeout=time_out) except subprocess.TimeoutExpired: proc.kill() stdout, stderr = proc.communicate() # Print the error if there is any (for debugging) if stderr := _bytes_to_str(stderr): print(stderr) # noqa: T201 return _bytes_to_str(stdout)
Import taipy package and call gui, Scenario and rest attributes.
def test_import_taipy_packages() -> bool: """ Import taipy package and call gui, Scenario and rest attributes. """ import taipy as tp valid_install = True if not hasattr(tp, "gui"): logging.error("Taipy installation has no attribute gui") valid_install = False if not hasattr(tp, "Scenario"): logging.error("Taipy installation has no attribute Scenario") valid_install = False if not hasattr(tp, "rest"): logging.error("Taipy installation has no attribute rest") valid_install = False return valid_install
Extract the installation markers of a package from a requirements line. ex: "pandas>=1.0.0,<2.0.0;python_version<'3.9'" -> "python_version<'3.9'"
def extract_installation_markers(package: str) -> str: """ Extract the installation markers of a package from a requirements line. ex: "pandas>=1.0.0,<2.0.0;python_version<'3.9'" -> "python_version<'3.9'" """ if ";" not in package: return "" return package.split(";")[1]
Extract the min version of a package from a requirements line. ex: "pandas>=1.0.0,<2.0.0;python_version<'3.9'" -> "1.0.0"
def extract_min_version(package: str) -> str: """ Extract the min version of a package from a requirements line. ex: "pandas>=1.0.0,<2.0.0;python_version<'3.9'" -> "1.0.0" """ # The max version is the defined version if it is a fixed version. if "==" in package: version = package.split("==")[1] if ";" in version: # Remove installation markers. version = version.split(";")[0] return version return package.split(">=")[1].split(",")[0]
Extract the max version of a package from a requirements line. Ex: - pandas==1.0.0 -> 1.0.0 - pandas>=1.0.0,<=2.0.0 -> 2.0.0 - pandas==1.0.0;python_version<'3.9' -> 1.0.0 - pandas>=1.0.0,<2.0.0;python_version<'3.9' -> 2.0.0
def extract_max_version(package: str) -> str: """ Extract the max version of a package from a requirements line. Ex: - pandas==1.0.0 -> 1.0.0 - pandas>=1.0.0,<=2.0.0 -> 2.0.0 - pandas==1.0.0;python_version<'3.9' -> 1.0.0 - pandas>=1.0.0,<2.0.0;python_version<'3.9' -> 2.0.0 """ # The max version is the defined version if it is a fixed version. if "==" in package: version = package.split("==")[1] if ";" in version: # Remove installation markers. version = version.split(";")[0] return version version = None if ",<=" in package: version = package.split(",<=")[1] else: version = package.split(",<")[1] if ";" in version: # Remove installation markers. version = version.split(";")[0] return version
Extract the name of a package from a requirements line. Ex: - pandas==1.0.0 -> pandas - pandas>=1.0.0,<2.0.0 -> pandas - pandas==1.0.0;python_version<'3.9' -> pandas - pandas>=1.0.0,<2.0.0;python_version<'3.9' -> pandas
def extract_name(package: str) -> str: """ Extract the name of a package from a requirements line. Ex: - pandas==1.0.0 -> pandas - pandas>=1.0.0,<2.0.0 -> pandas - pandas==1.0.0;python_version<'3.9' -> pandas - pandas>=1.0.0,<2.0.0;python_version<'3.9' -> pandas """ if "==" in package: return package.split("==")[0] name = package.split(">=")[0] # Remove optional dependencies. # Ex: "pandas[sql]" -> "pandas" if "[" in name: name = name.split("[")[0] return name
Extract the extras dependencies of a package from a requirements line. Ex: - pymongo[srv]>=4.2.0,<=4.6.1 -> ["srv"]
def extract_extras_dependencies(package: str) -> List[str]: """ Extract the extras dependencies of a package from a requirements line. Ex: - pymongo[srv]>=4.2.0,<=4.6.1 -> ["srv"] """ if "[" not in package: return [] return package.split("[")[1].split("]")[0].split(",")
Load and concat dependencies from requirements files.
def load_dependencies(requirements_filenames: List[str], enforce_format: bool) -> Dict[str, Package]: """ Load and concat dependencies from requirements files. """ # Extracted dependencies from requirements files. dependencies = {} for filename in requirements_filenames: file_dependencies = Path(filename).read_text("UTF-8").split("\n") for package_requirements in file_dependencies: # Ignore empty lines. if not package_requirements: continue # Ensure the package is correctly formatted with born min and max. if enforce_format: Package.check_format(package_requirements) package = Package.from_requirements(package_requirements, filename) # dependencies may be present multiple times in different files. # In that case, do not load the releases again but ensure versions are the same. if package.name in dependencies: existing_package = dependencies[package.name] if ( not existing_package.min_version == package.min_version or not existing_package.max_version == package.max_version ): raise Exception( f"Inconsistent version of '{package.name}' between '{filename}' and {','.join(package.files)}." ) # Add the file as dependency of the package. existing_package.files.append(filename) # Stop processing, package is already extracted. continue dependencies[package.name] = package return dependencies
Display dependencies information.
def display_dependencies_versions(dependencies: Dict[str, Package]): """ Display dependencies information. """ to_print = [] for package_name, package in dependencies.items(): if package.is_taipy: continue # Load the latest releases of the package. package.load_releases() to_print.append( ( package_name, f'{package.min_version} ({package.min_release.upload_date if package.min_release else "N.A."})', f'{package.max_version} ({package.max_release.upload_date if package.max_release else "N.C."})', f"{package.releases[0].version} ({package.releases[0].upload_date})", len(list(itertools.takewhile(lambda x: x.version != package.max_version, package.releases))), # noqa: B023 ) ) to_print.sort(key=lambda x: x[0]) h = ["name", "version-min", "version-max", "current-version", "nb-releases-behind"] print(tabulate.tabulate(to_print, headers=h, tablefmt="pretty"))
Display and updates dependencies.
def update_dependencies( # Dependencies installed in the environment. dependencies_installed: Dict[str, Package], # Dependencies set in requirements files. dependencies_set: Dict[str, Package], # Requirements files to update. requirements_filenames: List[str], ): """ Display and updates dependencies. """ to_print = [] for name, ds in dependencies_set.items(): if ds.is_taipy: continue # Find the package in use. di = dependencies_installed.get(name) # Some package as 'gitignore-parser' becomes 'gitignore_parser' during the installation. if not di: di = dependencies_installed.get(name.replace("-", "_")) if di: if di.max_version != ds.max_version: to_print.append((name, di.max_version, ",".join(f.split("/")[0] for f in ds.files))) # Save the new dependency version. ds.max_version = di.max_version # Print the dependencies to update. to_print.sort(key=lambda x: x[0]) print(tabulate.tabulate(to_print, headers=["name", "version", "files"], tablefmt="pretty")) # noqa: T201 # Update requirements files. for fd in requirements_filenames: requirements = "\n".join( d.as_requirements_line() for d in sorted(dependencies_set.values(), key=lambda d: d.name) if fd in d.files ) # Add a new line at the end of the file. requirements += "\n" Path(fd).write_text(requirements, "UTF-8")
Print the dependencies as requirements lines without version.
def generate_raw_requirements_txt(dependencies: Dict[str, Package]): """ Print the dependencies as requirements lines without version. """ for package in dependencies.values(): if not package.is_taipy: print(package.as_requirements_line(with_version=False))
Update in place dependencies version of a Pipfile. Warning: Dependencies are loaded from requirements files without extras or markers. The Pipfile contains extras and markers information.
def update_pipfile(pipfile: str, dependencies_version: Dict[str, Package]): """ Update in place dependencies version of a Pipfile. Warning: Dependencies are loaded from requirements files without extras or markers. The Pipfile contains extras and markers information. """ dependencies_str = "" pipfile_obj = toml.load(pipfile) packages = pipfile_obj.pop("packages") for name, dep in packages.items(): # Find the package in use. rp = dependencies_version.get(name) # Some package as 'gitignore-parser' becomes 'gitignore_parser' during the installation. if not rp: rp = dependencies_version.get(name.replace("-", "_")) if rp: # Change for the real name of the package. rp.name = name if not rp: # Package not found. Can be due to python version. # Ex: backports.zoneinfo if isinstance(dep, dict): new_dep = "" # Format as a Pipfile line. new_dep = f'version="{dep["version"]}"' if dep.get("markers"): new_dep += f', markers="{dep["markers"]}"' if dep.get("extras"): new_dep += f', extras={dep["extras"]}' dep = f"{{{new_dep}}}" dependencies_str += f'"{name}" = {dep}\n' else: if isinstance(dep, dict): # Requirements does not have installation markers and extras. rp.installation_markers = dep.get("markers", "") rp.extras_dependencies = [dep.get("extras")[0]] if dep.get("extras") else [] dependencies_str += f"{rp.as_pipfile_line()}\n" toml_str = toml.dumps(pipfile_obj) Path(pipfile).write_text(f"{toml_str}\n\n[packages]\n{dependencies_str}", "UTF-8")
Load version.json file from base path.
def __load_version_from_path(base_path: str) -> Version: """Load version.json file from base path.""" with open(os.path.join(base_path, "version.json")) as version_file: data = json.load(version_file) return Version(**data)
Load version.json file from base path and return the version string.
def extract_version(base_path: str) -> Version: """ Load version.json file from base path and return the version string. """ return __load_version_from_path(base_path)
Split a string into chunks with overlap :param s: the input string :param separator: the separator to split the string :param num_tokens: the number of tokens in each chunk :param step_tokens: the number of tokens to step forward :param encoding: the encoding to encode the string
def chunk_str_overlap( s: str, separator: chr = "\n", num_tokens: int = 64, step_tokens: int = 64, encoding: tiktoken.Encoding = None, ) -> List[str]: """ Split a string into chunks with overlap :param s: the input string :param separator: the separator to split the string :param num_tokens: the number of tokens in each chunk :param step_tokens: the number of tokens to step forward :param encoding: the encoding to encode the string """ assert step_tokens <= num_tokens, ( f"The number of tokens {num_tokens} in each chunk " f"should be larger than the step size {step_tokens}." ) lines = s.split(separator) chunks = dict() final_chunks = [] if len(lines) == 0: return [] first_line = lines[0] first_line_size = len(encoding.encode(first_line)) chunks[0] = [first_line, first_line_size] this_step_size = first_line_size for i in range(1, len(lines)): line = lines[i] line_size = len(encoding.encode(line)) to_pop = [] for key in chunks: if chunks[key][1] + line_size > num_tokens: to_pop.append(key) else: chunks[key][0] += f"{separator}{line}" chunks[key][1] += line_size final_chunks += [chunks.pop(key)[0] for key in to_pop] if this_step_size + line_size > step_tokens: chunks[i] = [line, line_size] this_step_size = 0 this_step_size += line_size max_remained_chunk = "" max_remained_chunk_size = 0 for key in chunks: if chunks[key][1] > max_remained_chunk_size: max_remained_chunk_size = chunks[key][1] max_remained_chunk = chunks[key][0] if max_remained_chunk_size > 0: final_chunks.append(max_remained_chunk) return final_chunks
Get the title of a file :param file_name: the file name :param prop: the property to get the title
def get_title( file_name: str, prop="title: ", ) -> str: """ Get the title of a file :param file_name: the file name :param prop: the property to get the title """ with open(file_name, encoding="utf-8", errors="ignore") as f_in: for line in f_in: line = line.strip() if line and (line.startswith(prop) or any([c.isalnum() for c in line])): return line return ""
Extract text from a file in pdf, docx, csv or pptx format :param file: the file path :param file_type: the extension of the file
def extract_text_from_file( file: str, file_type: Literal["pdf", "docx", "csv", "pptx"], ) -> Tuple[str, str]: """ Extract text from a file in pdf, docx, csv or pptx format :param file: the file path :param file_type: the extension of the file """ if file_type == "pdf": try: from pypdf import PdfReader except ImportError: raise ImportError("Please install pypdf first.") # Extract text from pdf using PyPDF2 reader = PdfReader(file) extracted_text = " ".join([page.extract_text() for page in reader.pages]) title = extracted_text.split("\n")[0] elif file_type == "docx": try: import docx2txt except ImportError: raise ImportError("Please install docx2txt first.") # Extract text from docx using docx2txt extracted_text = docx2txt.process(file) title = extracted_text.split("\n")[0] elif file_type == "csv": # Extract text from csv using csv module extracted_text = "" title = "" reader = csv.reader(file) for row in reader: extracted_text += " ".join(row) + "\n" elif file_type == "pptx": try: import pptx except ImportError: raise ImportError("Please install python-pptx first.") extracted_text = "" no_title = True title = "" presentation = pptx.Presentation(file) for slide in presentation.slides: for shape in slide.shapes: if shape.has_text_frame: for paragraph in shape.text_frame.paragraphs: for run in paragraph.runs: extracted_text += run.text + " " if no_title and len(run.text) > 10: title = run.text no_title = False extracted_text += "\n" else: # Unsupported file type raise ValueError(f"Unsupported file type: {file_type}") return title[:100], extracted_text
Returns the title, parsed text and a BeautifulSoup object with different file extension : param read_file: the input file with a given extension : return: the title, parsed text and a BeautifulSoup object, the BeautifulSoup object is used to get the document link from the html files
def text_parser( read_file: str, ) -> Tuple[str, str]: """ Returns the title, parsed text and a BeautifulSoup object with different file extension : param read_file: the input file with a given extension : return: the title, parsed text and a BeautifulSoup object, the BeautifulSoup object is used to get the document link from the html files """ filename, extension = os.path.splitext(read_file) extension = extension.lstrip(".") title = filename soup = None supported_extensions = ["md", "markdown", "html", "htm", "txt", "json", "jsonl"] other_extensions = ["docx", "pptx", "pdf", "csv"] # utf-8-sig will treat BOM header as a metadata of a file not a part of the file content default_encoding = "utf-8-sig" if extension in ("md", "markdown", "txt"): title = get_title(read_file) with open(read_file, "r", encoding=default_encoding, errors="ignore") as f: text = f.read() elif extension in ("html", "htm"): from bs4 import BeautifulSoup with open(read_file, "r", encoding=default_encoding, errors="ignore") as f: soup = BeautifulSoup(f, "html.parser") title = next(soup.stripped_strings)[:100] text = soup.get_text("\n") # read json/jsonl file in and convert each json to a row of string elif extension in ("json", "jsonl"): try: with open(read_file, "r", encoding=default_encoding, errors="ignore") as f: data = json.load(f) if extension == "json" else [json.loads(line) for line in f] except: # json file encoding issue, skip this file return title, "" if isinstance(data, dict): text = json.dumps(data) elif isinstance(data, list): content_list = [json.dumps(each_json) for each_json in data] text = "\n".join(content_list) title = filename elif extension in other_extensions: title, text = extract_text_from_file(read_file, extension) else: # no support for other format print( f"Not support for file with extension: {extension}. " f"The supported extensions are {supported_extensions}", ) return title, "" output_text = re.sub(r"\n{3,}", "\n\n", text) # keep whitespaces for formatting output_text = re.sub(r"-{3,}", "---", output_text) output_text = re.sub(r"\*{3,}", "***", output_text) output_text = re.sub(r"_{3,}", "___", output_text) return title, output_text
Split documents into chunks :param doc_path: the path of the documents :param chunk_size: the size of the chunk :param chunk_step: the step size of the chunk
def chunk_document( doc_path: str, chunk_size: int, chunk_step: int, ) -> Tuple[int, List[str], List[Dict[str, str]], Dict[str, int]]: """ Split documents into chunks :param doc_path: the path of the documents :param chunk_size: the size of the chunk :param chunk_step: the step size of the chunk """ texts = [] metadata_list = [] file_count = 0 chunk_id_to_index = dict() enc = tiktoken.encoding_for_model("gpt-3.5-turbo") # traverse all files under dir print("Split documents into chunks...") for root, dirs, files in os.walk(doc_path): for name in files: f = os.path.join(root, name) print(f"Reading {f}") try: title, content = text_parser(f) file_count += 1 if file_count % 100 == 0: print(f"{file_count} files read.") if len(content) == 0: continue chunks = chunk_str_overlap( content.strip(), num_tokens=chunk_size, step_tokens=chunk_step, separator="\n", encoding=enc, ) source = os.path.sep.join(f.split(os.path.sep)[4:]) for i in range(len(chunks)): # custom metadata if needed metadata = { "source": source, "title": title, "chunk_id": i, } chunk_id_to_index[f"{source}_{i}"] = len(texts) + i metadata_list.append(metadata) texts.extend(chunks) except Exception as e: print(f"Error encountered when reading {f}: {traceback.format_exc()} {e}") return file_count, texts, metadata_list, chunk_id_to_index
Get a random id with the given length and prefix.
def get_id(length: int = 6, prefix: Optional[str] = None) -> str: """Get a random id with the given length and prefix.""" id = secrets.token_hex(length) if prefix is not None: return f"{prefix}-{id}" return id
Chat with TaskWeaver in command line
def chat(ctx: click.Context): """ Chat with TaskWeaver in command line """ ctx_obj: CliContext = ctx.obj from taskweaver.chat.console import chat_taskweaver click.echo(get_ascii_banner()) chat_taskweaver(ctx_obj.workspace)
Initialize TaskWeaver environment
def init( ctx: click.Context, project: str, ): """Initialize TaskWeaver environment""" click.echo( f"Initializing TaskWeaver in directory {project}...", ) if not os.path.exists(project): os.mkdir(project) def get_dir(*dir: str): return os.path.join(project, *dir) dir_list = [ "codeinterpreter_examples", "planner_examples", "plugins", "config", "workspace", ] for dir in dir_list: dir_path = get_dir(dir) if not os.path.exists(dir_path): os.mkdir(dir_path) init_temp_dir = get_dir("init") import zipfile from pathlib import Path tpl_dir = os.path.join(init_temp_dir, "template") ext_zip_file = Path(__file__).parent / "taskweaver-ext.zip" if os.path.exists(ext_zip_file): with zipfile.ZipFile(ext_zip_file, "r") as zip_ref: # Extract all files to the current directory zip_ref.extractall(tpl_dir) tpl_planner_example_dir = os.path.join(tpl_dir, "taskweaver-ext", "planner_examples") tpl_ci_example_dir = os.path.join(tpl_dir, "taskweaver-ext", "codeinterpreter_examples") tpl_plugin_dir = os.path.join(tpl_dir, "taskweaver-ext", "plugins") tpl_config_dir = os.path.join(tpl_dir, "taskweaver-ext") planner_example_dir = get_dir("planner_examples") ci_example_dir = get_dir("codeinterpreter_examples") plugin_dir = get_dir("plugins") copy_files(tpl_planner_example_dir, planner_example_dir) copy_files(tpl_ci_example_dir, ci_example_dir) copy_files(tpl_plugin_dir, plugin_dir) copy_file(tpl_config_dir, "taskweaver_config.json", get_dir("")) try: shutil.rmtree(init_temp_dir) except Exception: click.secho("Failed to remove temporary directory", fg="yellow") click.secho( f"TaskWeaver project has been initialized successfully at {click.format_filename(project)}.", fg="green", )
Start TaskWeaver web server
def web(host: str, port: int, debug: bool, open: bool): """Start TaskWeaver web server""" from taskweaver.chat.web import start_web_service if not debug: # debug mode will restart app iteratively, skip the plugin listing # display_enabled_examples_plugins() pass def post_app_start(): if open: click.secho("launching web browser...", fg="green") open_url = f"http://{'localhost' if host == '0.0.0.0' else host}:{port}" click.launch(open_url) start_web_service( host, port, is_debug=debug, post_app_start=post_app_start if open else None, )
Load all the examples from a folder. Args: folder: the folder path. role_set: the roles should be included in the examples.
def load_examples( folder: str, role_set: Optional[Set[str]] = None, ) -> List[Conversation]: """ Load all the examples from a folder. Args: folder: the folder path. role_set: the roles should be included in the examples. """ example_file_list: List[str] = glob.glob(path.join(folder, "*.yaml")) example_conv_pool: List[Conversation] = [] for yaml_path in example_file_list: conversation = Conversation.from_yaml(yaml_path) if conversation.enabled: if not role_set: example_conv_pool.append(conversation) else: roles = conversation.roles if set(roles).issubset(role_set): example_conv_pool.append(conversation) return example_conv_pool