response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
register a plugin, the plugin could be a class or a callable function :param func: the plugin class or a callable function
def register_plugin(func: Union[Callable[..., Any], Type[Plugin]]): """ register a plugin, the plugin could be a class or a callable function :param func: the plugin class or a callable function """ global register_plugin_inner if "register_plugin_inner" not in globals() or register_plugin_inner is None: print("no registry for loading plugin") elif isinstance(func, type) and issubclass(func, Plugin): register_plugin_inner(func) elif callable(func): func_name = func.__name__ def callable_func(self: Plugin, *args: List[Any], **kwargs: Dict[str, Any]): self.log("info", "calling function " + func_name) result = func(*args, **kwargs) return result wrapper_cls = type( f"FuncPlugin_{func_name}", (Plugin,), { "__call__": callable_func, }, ) register_plugin_inner(wrapper_cls) else: raise Exception( "only callable function or plugin class could be registered as Plugin", ) return func
register a plugin test
def test_plugin(name: Optional[str] = None, description: Optional[str] = None): """ register a plugin test """ def inner(func: Callable[..., Any]): global register_plugin_test_inner if "register_plugin_test_inner" not in globals() or register_plugin_test_inner is None: print("no registry for loading plugin") elif callable(func): test_name: str = func.__name__ if name is None else name test_description: str = func.__doc__ or "" if description is None else description register_plugin_test_inner(test_name, test_description, func) return func return inner
Make an API call to a given host and endpoint
def make_api_call( host: Any = "", endpoint: Any = "", method: Any = "GET", headers: Dict[str, str] = {"Content-Type": "application/json"}, query_params: Union[Dict[str, Any], str, Any] = {}, body: str = "", timeout_secs: int = 60, ) -> str: """Make an API call to a given host and endpoint""" response = {} if not (isinstance(host, str) and isinstance(endpoint, str) and isinstance(method, str)): raise ValueError("host, endpoint, method, and body must be a string") allowed_methods = ["GET", "POST", "PUT", "DELETE"] if method not in allowed_methods: raise ValueError(f"method must be one of {allowed_methods}") if not query_params: query_params = {} elif isinstance(query_params, str): try: query_params = json.loads(query_params) except json.JSONDecodeError: raise ValueError( "query_params must be a dictionary or a JSON string", ) elif not isinstance(query_params, dict): raise ValueError("query_params must be a dictionary or a JSON string") if not host.startswith(("http://", "https://")): normalized_host: str = f"https://{host}" else: normalized_host = host url = urljoin(normalized_host, endpoint) try: if method not in allowed_methods: raise ValueError(f"method must be one of {allowed_methods}") response = requests.request(method=method, url=url, headers=headers, json=body, timeout=timeout_secs) response_text = response.text response = { "status": "success", "status_code": response.status_code, "response": response_text, } except requests.exceptions.RequestException as e: response = { "status": "error", "status_code": 500, "response": str(e), } return json.dumps(response)
Builds a toy dataset with two features. Args: n: Number of timestamps. data_prefix: Optional prefix in the feature names. data2_is_categorical_integer: If true, the second feature is categorical. If false (default), the second feature is numerical. Returns: An EventSet containing the toy dataset.
def _build_toy_dataset( n: int, data_prefix="", data2_is_categorical_integer=False, num_indexes: int = 10, ) -> tp.EventSet: """Builds a toy dataset with two features. Args: n: Number of timestamps. data_prefix: Optional prefix in the feature names. data2_is_categorical_integer: If true, the second feature is categorical. If false (default), the second feature is numerical. Returns: An EventSet containing the toy dataset. """ np.random.seed(0) index_values = list(range(int(num_indexes))) timestamps = np.sort(np.random.randn(n) * n) index_1 = np.random.choice(index_values, n) index_2 = np.random.choice(index_values, n) data_1 = np.random.randn(n) if data2_is_categorical_integer: data_2 = np.random.choice(list(range(int(10))), n) else: data_2 = np.random.randn(n) return tp.from_pandas( pd.DataFrame( { "timestamp": timestamps, "index_1": index_1, "index_2": index_2, data_prefix + "data_1": data_1, data_prefix + "data_2": data_2, } ), indexes=["index_1", "index_2"], )
Runs a single-input, single-output Temporian graph in Beam. Usage example: ```python import temporian as tp import temporian.beam as tpb # Create a graph. input_node = tp.input_node([("a", tp.str_), ("b", tp.float32)]) output_node = input_node["b"].moving_sum(4) with beam.Pipeline() as pipeline: (pipeline | "Read input" >> tpb.from_csv(input_path, input_node.schema) | "Process data" >> tpb.run(input=input_node, output=output_node) | "Save result" >> tpb.to_csv(output_path, output_node.schema) ) pipeline.run() ``` If you graph contains more than one input or output nodes, use `run_multi_io` instead. Args: pipe: A Beam PCollection containing the input event set. Use `tpb.from_csv` to read data from csv files, or use `tpb.to_event_set` to import an event set from a dictionary of key/values such as the output of Beam IO connectors (https://beam.apache.org/documentation/io/connectors/). input: Input node of a Temporian graph. output: Output node of a Temporian graph. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. Returns: A Beam PCollection containing the output event set.
def run( pipe: BeamEventSet, input: EventSetNode, output: EventSetNode, verbose: int = 0, ) -> BeamEventSet: """Runs a single-input, single-output Temporian graph in Beam. Usage example: ```python import temporian as tp import temporian.beam as tpb # Create a graph. input_node = tp.input_node([("a", tp.str_), ("b", tp.float32)]) output_node = input_node["b"].moving_sum(4) with beam.Pipeline() as pipeline: (pipeline | "Read input" >> tpb.from_csv(input_path, input_node.schema) | "Process data" >> tpb.run(input=input_node, output=output_node) | "Save result" >> tpb.to_csv(output_path, output_node.schema) ) pipeline.run() ``` If you graph contains more than one input or output nodes, use `run_multi_io` instead. Args: pipe: A Beam PCollection containing the input event set. Use `tpb.from_csv` to read data from csv files, or use `tpb.to_event_set` to import an event set from a dictionary of key/values such as the output of Beam IO connectors (https://beam.apache.org/documentation/io/connectors/). input: Input node of a Temporian graph. output: Output node of a Temporian graph. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. Returns: A Beam PCollection containing the output event set. """ output_pipe = run_multi_io( inputs={input: pipe}, outputs=[output], verbose=verbose ) return output_pipe[output]
Runs a multi-input, multi-output Temporian graph in Beam. Usage example: ```python import temporian as tp import temporian.beam as tpb # Create a graph. input_node_1 = tp.input_node([("a", tp.float32)]) input_node_2 = tp.input_node([("b", tp.float32)]) output_node_1 = input_node_1.moving_sum(4) output_node_2 = input_node_2.moving_sum(4) with beam.Pipeline() as p: input_beam_1 = p | tpb.from_csv(input_path_1, input_node_1.schema) input_beam_2 = p | tpb.from_csv(input_path_2, input_node_2.schema) outputs = tpb.run_multi_io( inputs={ input_node_1: input_beam_1, input_node_2: input_beam_2, }, outputs=[output_node_1, output_node_2], ) outputs[output_node_1] | tpb.to_csv( output_path_1, output_node_1.schema, shard_name_template="" ) outputs[output_node_2] | tpb.to_csv( output_path_2, output_node_2.schema, shard_name_template="" ) pipeline.run() ``` If you graph contains a single input and output node, use `run` instead. Args: inputs: EventSetNode indexed dictionary of input Beam event-sets for all the inputs of the Temporian graph. outputs: List of output nodes to compute. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. Returns: A output node indexed dictionary of output beam event-sets. Each item in `outputs` becomes one item in the returned dictionary.
def run_multi_io( inputs: Dict[EventSetNode, BeamEventSet], outputs: List[EventSetNode], verbose: int = 0, ) -> Dict[EventSetNode, BeamEventSet]: """Runs a multi-input, multi-output Temporian graph in Beam. Usage example: ```python import temporian as tp import temporian.beam as tpb # Create a graph. input_node_1 = tp.input_node([("a", tp.float32)]) input_node_2 = tp.input_node([("b", tp.float32)]) output_node_1 = input_node_1.moving_sum(4) output_node_2 = input_node_2.moving_sum(4) with beam.Pipeline() as p: input_beam_1 = p | tpb.from_csv(input_path_1, input_node_1.schema) input_beam_2 = p | tpb.from_csv(input_path_2, input_node_2.schema) outputs = tpb.run_multi_io( inputs={ input_node_1: input_beam_1, input_node_2: input_beam_2, }, outputs=[output_node_1, output_node_2], ) outputs[output_node_1] | tpb.to_csv( output_path_1, output_node_1.schema, shard_name_template="" ) outputs[output_node_2] | tpb.to_csv( output_path_2, output_node_2.schema, shard_name_template="" ) pipeline.run() ``` If you graph contains a single input and output node, use `run` instead. Args: inputs: EventSetNode indexed dictionary of input Beam event-sets for all the inputs of the Temporian graph. outputs: List of output nodes to compute. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. Returns: A output node indexed dictionary of output beam event-sets. Each item in `outputs` becomes one item in the returned dictionary. """ schedule = build_schedule( inputs=set(inputs.keys()), outputs=set(outputs), verbose=verbose ) data = {**inputs} # Check that operators implementations are available needed_operators = set() for step in schedule.steps: needed_operators.add(step.op.definition.key) implementation_lib.check_operators_implementations_are_available( needed_operators ) num_steps = len(schedule.steps) for step_idx, step in enumerate(schedule.steps): operator_def = step.op.definition if verbose > 0: print("=============================", file=sys.stderr) print( f"{step_idx+1} / {num_steps}: Run {step.op}", file=sys.stderr, ) # Construct operator inputs operator_inputs = { input_key: data[input_node] for input_key, input_node in step.op.inputs.items() } # Get Beam implementation implementation_cls = implementation_lib.get_implementation_class( operator_def.key ) implementation = implementation_cls(step.op) # Add implementation to Beam pipeline operator_outputs = implementation(**operator_inputs) # Collect outputs for output_key, output_node in step.op.outputs.items(): data[output_node] = operator_outputs[output_key] return {output: data[output] for output in outputs}
Checks if operator implementations are available.
def check_operators_implementations_are_available(needed: Set[str]): """Checks if operator implementations are available.""" missing = set(needed) - set(_OPERATOR_IMPLEMENTATIONS.keys()) if missing: raise ValueError( f"Unknown operator implementations '{missing}' for Beam backend. It" " seems this operator is only available for the in-process" " Temporian backend. Available Beam operator implementations are:" f" {list(_OPERATOR_IMPLEMENTATIONS.keys())}." )
Registers an operator implementation.
def register_operator_implementation( operator_class, operator_implementation_class ): """Registers an operator implementation.""" op_key = operator_class.operator_key() if op_key in _OPERATOR_IMPLEMENTATIONS: raise ValueError("Operator implementation already registered") _OPERATOR_IMPLEMENTATIONS[op_key] = operator_implementation_class
Gets an operator implementation class from a registered key.
def get_implementation_class(key: str): """Gets an operator implementation class from a registered key.""" if key not in _OPERATOR_IMPLEMENTATIONS: raise ValueError( f"Unknown operator implementation '{key}' for Beam backend. It" " seems this operator is only available for the in-process" " Temporian backend. Available Beam operator implementations are:" f" {list(_OPERATOR_IMPLEMENTATIONS.keys())}." ) return _OPERATOR_IMPLEMENTATIONS[key]
Lists the registered operator implementations.
def registered_implementations() -> Dict[str, Any]: """Lists the registered operator implementations.""" return _OPERATOR_IMPLEMENTATIONS
Parse a csv file into dictionary of key -> value.
def _parse_csv_file( file: beam.io.filesystem.FileMetadata, ) -> Iterator[Dict[str, str]]: """Parse a csv file into dictionary of key -> value.""" with beam.io.filesystems.FileSystems.open(file.path) as byte_stream: string_stream = (x.decode("utf-8") for x in byte_stream) for row in csv.DictReader(string_stream): yield row
Reads a file or set of csv files into a PCollection of key->values. This format is similar to output of the official beam IO connectors: https://beam.apache.org/documentation/io/connectors/ CSV values are always string, so the output of `from_csv_raw` is always a dictionary of string to string. Use `to_event_set` (or better, use `from_csv` instead of `from_csv_raw`) to cast values to the expected pipeline input dtype. Args: pipe: A begin Beam pipe. file_pattern: Path or path matching expression compatible with `MatchFiles`. Returns: A PCollection of dictionary of key:value.
def from_csv_raw(pipe, file_pattern: str) -> beam.PCollection[Dict[str, str]]: """Reads a file or set of csv files into a PCollection of key->values. This format is similar to output of the official beam IO connectors: https://beam.apache.org/documentation/io/connectors/ CSV values are always string, so the output of `from_csv_raw` is always a dictionary of string to string. Use `to_event_set` (or better, use `from_csv` instead of `from_csv_raw`) to cast values to the expected pipeline input dtype. Args: pipe: A begin Beam pipe. file_pattern: Path or path matching expression compatible with `MatchFiles`. Returns: A PCollection of dictionary of key:value. """ return ( pipe | "List files" >> MatchFiles(file_pattern) | "Shuffle" >> beam.Reshuffle() | "Parse file" >> beam.FlatMap(_parse_csv_file) )
Reads a file or set of csv files into a Beam EventSet. Limitation: Timestamps have to be numerical values. See documentation of `to_event_set` for more details. Usage example: ``` input_node: tp.EventSetNode = ... p | tpb.from_csv("/tmp/path.csv", input_node.schema) | ... ``` `from_csv` is equivalent to `from_csv_raw + to_event_set`. Args: pipe: Begin Beam pipe. file_pattern: Path or path matching expression compatible with `MatchFiles`. schema: Schema of the data. If you have a Temporian node, the schema is available with `node.schema`. timestamp_key: Key containing the timestamps. Returns: A PCollection of event-set compatible with tpb.run.
def from_csv( pipe, file_pattern: str, schema: Schema, timestamp_key: str = "timestamp" ) -> BeamEventSet: """Reads a file or set of csv files into a Beam EventSet. Limitation: Timestamps have to be numerical values. See documentation of `to_event_set` for more details. Usage example: ``` input_node: tp.EventSetNode = ... p | tpb.from_csv("/tmp/path.csv", input_node.schema) | ... ``` `from_csv` is equivalent to `from_csv_raw + to_event_set`. Args: pipe: Begin Beam pipe. file_pattern: Path or path matching expression compatible with `MatchFiles`. schema: Schema of the data. If you have a Temporian node, the schema is available with `node.schema`. timestamp_key: Key containing the timestamps. Returns: A PCollection of event-set compatible with tpb.run. """ return ( pipe | "Read csv" >> from_csv_raw(file_pattern) | "Convert to Event Set" >> to_event_set(schema, timestamp_key, format="single_events") )
Writes a Beam EventSet to a file or set of csv files. Limitation: Timestamps are always stored as numerical values. TODO: Support datetime timestamps. Usage example: ``` input_node: tp.EventSetNode = ... ( p | tpb.from_csv("/input.csv", input_node.schema) | ... # processing | tpb.to_csv("/output.csv", output_node.schema) ) ``` Args: pipe: Beam pipe containing an EventSet. file_path_prefix: Path or path matching expression compatible with WriteToText. schema: Schema of the data. If you have a Temporian node, the schema is available with `node.schema`. timestamp_key: Key containing the timestamps. **wargs: Arguments passed to `beam.io.textio.WriteToText`.
def to_csv( pipe: BeamEventSet, file_path_prefix: str, schema: Schema, timestamp_key: str = "timestamp", **wargs, ): """Writes a Beam EventSet to a file or set of csv files. Limitation: Timestamps are always stored as numerical values. TODO: Support datetime timestamps. Usage example: ``` input_node: tp.EventSetNode = ... ( p | tpb.from_csv("/input.csv", input_node.schema) | ... # processing | tpb.to_csv("/output.csv", output_node.schema) ) ``` Args: pipe: Beam pipe containing an EventSet. file_path_prefix: Path or path matching expression compatible with WriteToText. schema: Schema of the data. If you have a Temporian node, the schema is available with `node.schema`. timestamp_key: Key containing the timestamps. **wargs: Arguments passed to `beam.io.textio.WriteToText`. """ header_values = ( [timestamp_key] + schema.index_names() + schema.feature_names() ) header_string = io.StringIO() header_writer = csv.writer(header_string) header_writer.writerow(header_values) return ( add_feature_idx_and_flatten(pipe) | "Group by features" >> beam.GroupByKey() | "Convert to csv" >> beam.Map(_convert_to_csv) | "Write csv" >> beam.io.textio.WriteToText( file_path_prefix=file_path_prefix, header=header_string.getvalue(), append_trailing_newlines=False, **wargs, ) )
Convert a user feature value to the internal representation.
def _cast_feature_value(value: Any, dtype: DType) -> SingleFeatureValue: """Convert a user feature value to the internal representation.""" py_type = tp_dtype_to_py_type(dtype) if py_type is bytes and isinstance(value, str): return bytes(value, encoding="utf-8") return py_type(value)
Convert a user index value to the internal representation.
def _cast_index_value(value: Any, dtype: DType) -> BeamIndexKeyItem: """Convert a user index value to the internal representation.""" return _cast_feature_value(value, dtype)
Transforms a dict of key:value to a StructuredRow. In essence, this function replaces the string index feature and index values with a integer index (based on a schema). Example: row = {"timestamps": 10, "f1": 11, "f2": 12, "i1": 13} schema: features = [f1, f2], indexes = [i1] timestamp_key: timestamps Output (13, ), (10, (11, 12)) This function is used during the conversion of key:value features feed by the user into BeamEventSet, the working format used by Temporian.
def _parse_and_index( row: Dict[str, Any], schema: Schema, timestamp_key: str ) -> StructuredRow: """Transforms a dict of key:value to a StructuredRow. In essence, this function replaces the string index feature and index values with a integer index (based on a schema). Example: row = {"timestamps": 10, "f1": 11, "f2": 12, "i1": 13} schema: features = [f1, f2], indexes = [i1] timestamp_key: timestamps Output (13, ), (10, (11, 12)) This function is used during the conversion of key:value features feed by the user into BeamEventSet, the working format used by Temporian. """ index_values = tuple( _cast_index_value(row[index.name], index.dtype) for index in schema.indexes ) feature_values = tuple( _cast_feature_value(row[feature.name], feature.dtype) for feature in schema.features ) timestamp = float(row[timestamp_key]) return index_values, (timestamp, feature_values)
Same as _MergeTimestamps, but when there are no features.
def _merge_timestamps_no_features( item: Tuple[BeamIndexKey, Sequence[StructuredRowValue]], ) -> FeatureItem: """Same as _MergeTimestamps, but when there are no features.""" index, feat_and_ts = item timestamps = np.fromiter( (v[0] for v in feat_and_ts), dtype=np.float64, count=len(feat_and_ts) ) return index, (timestamps, None)
Converts a `indexedEvents` event-set into an internal event-set. Example: Input Schema features=[("f1", DType.INT64), ("f2", DType.STRING)] indexes=[("i1", DType.INT64), ("i2", DType.STRING)] input (one item): { "timestamp": [100.0, 101.0, 102.0], "f1": [1, 2, 3], "f2": [b"a", b"b", b"c"], "i1": 10, "i2": b"x", } Output (two items) # Feature "f1" ((10, b"x"), 0, ([100.0, 101.0, 102.0], [1, 2, 3]) # Feature "f2" ((10, b"x"), 1, ([100.0, 101.0, 102.0], [b"a", b"b", b"c"])
def _event_set_dict_to_event_set( input: Dict[str, Any], schema: Schema, timestamp_key: str ) -> Iterator[FeatureItemWithIdx]: """Converts a `indexedEvents` event-set into an internal event-set. Example: Input Schema features=[("f1", DType.INT64), ("f2", DType.STRING)] indexes=[("i1", DType.INT64), ("i2", DType.STRING)] input (one item): { "timestamp": [100.0, 101.0, 102.0], "f1": [1, 2, 3], "f2": [b"a", b"b", b"c"], "i1": 10, "i2": b"x", } Output (two items) # Feature "f1" ((10, b"x"), 0, ([100.0, 101.0, 102.0], [1, 2, 3]) # Feature "f2" ((10, b"x"), 1, ([100.0, 101.0, 102.0], [b"a", b"b", b"c"]) """ timestamps = input[timestamp_key] if ( not isinstance(timestamps, np.ndarray) or timestamps.dtype.type != np.float64 ): raise ValueError( f"Timestamp with value {timestamps} is expected to be np.float64" f" numpy array, but has dtype {type(timestamps)} instead." ) index = [] for index_schema in schema.indexes: expected_type = tp_dtype_to_py_type(index_schema.dtype) src_value = input[index_schema.name] if not isinstance(src_value, expected_type): raise ValueError( f'Index "{index_schema.name}" with value "{src_value}" is' f" expected to be of type {expected_type} (since the Temporian " f" dtype is {index_schema.dtype}) but type" f" {type(src_value)} was found." ) index.append(src_value) index_tuple = tuple(index) for feature_idx, feature_schema in enumerate(schema.features): expected_dtype = tp_dtype_to_np_dtype(feature_schema.dtype) src_value = input[feature_schema.name] if ( not isinstance(src_value, np.ndarray) or src_value.dtype.type != expected_dtype ): if isinstance(src_value, np.ndarray): effective_type = src_value.dtype.type else: effective_type = type(src_value) raise ValueError( f'Feature "{feature_schema.name}" with value "{src_value}" is' " expected to be a numpy array with dtype" f" {expected_dtype} (since the Temporian dtype is" f" {feature_schema.dtype}) but numpy dtype" f" {effective_type} was found." ) yield index_tuple, (timestamps, src_value, feature_idx)
Same as _event_set_dict_to_event_set, but without features
def _event_set_dict_to_event_set_no_features( input: Dict[str, Any], schema: Schema, timestamp_key: str ) -> FeatureItem: """Same as _event_set_dict_to_event_set, but without features""" timestamps = input[timestamp_key] if ( not isinstance(timestamps, np.ndarray) or timestamps.dtype.type != np.float64 ): raise ValueError( f"Timestamp with value {timestamps} is expected to be np.float64" f" numpy array, but has dtype {type(timestamps)} instead." ) index = [] for index_schema in schema.indexes: expected_type = tp_dtype_to_py_type(index_schema.dtype) src_value = input[index_schema.name] if not isinstance(src_value, expected_type): raise ValueError( f'Index "{index_schema.name}" with value "{src_value}" is' f" expected to be of type {expected_type} (since the Temporian " f" dtype is {index_schema.dtype}) but type" f" {type(src_value)} was found." ) index.append(src_value) index_tuple = tuple(index) return index_tuple, (timestamps, None)
Converts a PCollection of key:value to a Beam EventSet. This method is compatible with the output of `from_csv_raw` and the Official Beam IO connectors. When importing data from csv files, use `from_csv` to convert csv files directly into EventSets. Unlike Temporian in-process EventSet import method ( [tp.event_set][temporian.event_set])), this method (`tpb.to_event_set`) requires for timestamps to be numerical values. Args: pipe: Beam pipe of key values. schema: Schema of the data. Note: The schema of a Temporian node is available with `node.schema`. timestamp_key: Key containing the timestamps. format: Format of the events inside the received dictionary. See [DictEventSetFormat][temporian.io.format.DictEventSetFormat] for more. Returns: Beam EventSet.
def to_event_set( pipe: beam.PCollection[Dict[str, Any]], schema: Schema, timestamp_key: str = "timestamp", format: DictEventSetFormatChoices = DictEventSetFormat.GROUPED_BY_INDEX, ) -> BeamEventSet: """Converts a PCollection of key:value to a Beam EventSet. This method is compatible with the output of `from_csv_raw` and the Official Beam IO connectors. When importing data from csv files, use `from_csv` to convert csv files directly into EventSets. Unlike Temporian in-process EventSet import method ( [tp.event_set][temporian.event_set])), this method (`tpb.to_event_set`) requires for timestamps to be numerical values. Args: pipe: Beam pipe of key values. schema: Schema of the data. Note: The schema of a Temporian node is available with `node.schema`. timestamp_key: Key containing the timestamps. format: Format of the events inside the received dictionary. See [DictEventSetFormat][temporian.io.format.DictEventSetFormat] for more. Returns: Beam EventSet. """ # TODO: Add support for datetime timestamps. num_features = len(schema.features) if format == DictEventSetFormat.GROUPED_BY_INDEX: if num_features != 0: return partition_by_feature_idx( pipe | "Parse dict" >> beam.FlatMap( _event_set_dict_to_event_set, schema, timestamp_key ), num_features=num_features, reshuffle=True, ) else: return _reshuffle_item_in_tuples( ( pipe | "Parse dict" >> beam.Map( _event_set_dict_to_event_set_no_features, schema, timestamp_key, ), ) ) elif format == DictEventSetFormat.SINGLE_EVENTS: indexed = ( pipe | "Parse and index" >> beam.Map(_parse_and_index, schema, timestamp_key) # Group by index values and feature index | "Aggregate" >> beam.GroupByKey() # Build feature and timestamps arrays. ) if num_features != 0: return partition_by_feature_idx( indexed # Build feature and timestamps arrays. | "Merge by timestamps" >> beam.ParDo(_MergeTimestamps(schema.features)), num_features=num_features, reshuffle=True, ) else: return _reshuffle_item_in_tuples( ( indexed # Build feature and timestamps arrays. | "Merge by timestamps" >> beam.Map(_merge_timestamps_no_features), ) ) else: raise ValueError(f"Unknown format {format}")
Groups a tuple of PCollection of features into a single PCollection.
def add_feature_idx_and_flatten( pipe: BeamEventSet, ) -> beam.PCollection[FeatureItemWithIdx]: """Groups a tuple of PCollection of features into a single PCollection.""" with_idx = [] for feature_idx, feature in enumerate(pipe): with_idx.append( feature | f"Add feature idx {feature_idx}" >> beam.Map(_add_feature_idx, feature_idx) ) return tuple(with_idx) | "Flatten features" >> beam.Flatten()
Splits a PCollection of features into a tuple of PCollections. The inverse of add_feature_idx_and_flatten.
def partition_by_feature_idx( pipe: beam.PCollection[FeatureItemWithIdx], num_features: int, reshuffle: bool, ) -> BeamEventSet: """Splits a PCollection of features into a tuple of PCollections. The inverse of add_feature_idx_and_flatten. """ partitions = pipe | "Partition by features" >> beam.Partition( _extract_feature_idx, num_features ) without_idx = [] for feature_idx, feature in enumerate(partitions): item = feature | f"Remove feature idx {feature_idx}" >> beam.Map( _remove_feature_idx ) if reshuffle: item = item | f"Shuffle feature {feature_idx}" >> beam.Reshuffle() without_idx.append(item) return tuple(without_idx)
Converts a Beam EventSet to PCollection of key->value. This method is compatible with the output of `from_csv_raw` and the Official Beam IO connectors. This method is the inverse of `to_event_set`. Args: pipe: PCollection of Beam EventSet. schema: Schema of the data. timestamp_key: Key containing the timestamps. format: Format of the events inside the output dictionary. See [DictEventSetFormat][temporian.io.format.DictEventSetFormat] for more. Returns: Beam pipe of key values.
def to_dict( pipe: BeamEventSet, schema: Schema, timestamp_key: str = "timestamp", format: DictEventSetFormatChoices = DictEventSetFormat.GROUPED_BY_INDEX, ) -> beam.PCollection[Dict[str, Any]]: """Converts a Beam EventSet to PCollection of key->value. This method is compatible with the output of `from_csv_raw` and the Official Beam IO connectors. This method is the inverse of `to_event_set`. Args: pipe: PCollection of Beam EventSet. schema: Schema of the data. timestamp_key: Key containing the timestamps. format: Format of the events inside the output dictionary. See [DictEventSetFormat][temporian.io.format.DictEventSetFormat] for more. Returns: Beam pipe of key values. """ # TODO: Add support for datetime timestamps. grouped_by_features = ( add_feature_idx_and_flatten(pipe) | "Group by index " >> beam.GroupByKey() ) if format == DictEventSetFormat.GROUPED_BY_INDEX: return grouped_by_features | "Convert to dict" >> beam.Map( _convert_to_dict_event_set_key_value, schema, timestamp_key ) elif format == DictEventSetFormat.SINGLE_EVENTS: return grouped_by_features | "Convert to dict" >> beam.FlatMap( _convert_to_dict_event_key_value, schema, timestamp_key ) else: raise ValueError(f"Unknown format {format}")
Imports an EventSet from a TF.Records of TF.Examples. TF.Records of TF.Examples is one of the standard solution to store data for TensorFlow models. https://www.tensorflow.org/tutorials/load_data/tfrecord The GZIP compression is used. Usage example: ``` input_node: tp.EventSetNode = ... ( p | tpb.from_tensorflow_record("/input.tfr.gzip", input_node.schema) | ... # processing | tpb.to_tensorflow_record("/output.tfr.gzip", output_node.schema) ) ``` Args: pipe: Beam pipe. file_pattern: Path or path matching expression compatible with `MatchFiles`. schema: Schema of the data. If you have a Temporian node, the schema is available with `node.schema`. timestamp_key: Key containing the timestamps. format: Format of the events inside the received record. At the moment only TFRecordEventSetFormat.GROUPED_BY_INDEX is supported. See [TFRecordEventSetFormat][temporian.io.format.TFRecordEventSetFormat] for more. Returns: A PCollection of event-set compatible with tpb.run.
def from_tensorflow_record( pipe, file_pattern: str, schema: Schema, timestamp_key: str = "timestamp", format: TFRecordEventSetFormatChoices = TFRecordEventSetFormat.GROUPED_BY_INDEX, ) -> BeamEventSet: """Imports an EventSet from a TF.Records of TF.Examples. TF.Records of TF.Examples is one of the standard solution to store data for TensorFlow models. https://www.tensorflow.org/tutorials/load_data/tfrecord The GZIP compression is used. Usage example: ``` input_node: tp.EventSetNode = ... ( p | tpb.from_tensorflow_record("/input.tfr.gzip", input_node.schema) | ... # processing | tpb.to_tensorflow_record("/output.tfr.gzip", output_node.schema) ) ``` Args: pipe: Beam pipe. file_pattern: Path or path matching expression compatible with `MatchFiles`. schema: Schema of the data. If you have a Temporian node, the schema is available with `node.schema`. timestamp_key: Key containing the timestamps. format: Format of the events inside the received record. At the moment only TFRecordEventSetFormat.GROUPED_BY_INDEX is supported. See [TFRecordEventSetFormat][temporian.io.format.TFRecordEventSetFormat] for more. Returns: A PCollection of event-set compatible with tpb.run. """ if format == TFRecordEventSetFormat.SINGLE_EVENTS: raise ValueError( "format=TFRecordEventSetFormat.SINGLE_EVENTS is not implemented" ) if format != TFRecordEventSetFormat.GROUPED_BY_INDEX: raise ValueError(f"Unknown format {format}") tf = import_tf() return ( pipe | "Read tf.record" >> beam.io.tfrecordio.ReadFromTFRecord( file_pattern=file_pattern, coder=beam.coders.ProtoCoder(tf.train.Example), compression_type=beam.io.filesystem.CompressionTypes.GZIP, ) | "Tf.record to dict" >> beam.ParDo( _TFExampleToDict( schema=schema, timestamp_key=timestamp_key, ) ) | "Dict to event set" >> to_event_set( schema=schema, timestamp_key=timestamp_key, format=format, ) )
Export an EventSet to a TF.Records of TF.Examples. TF.Records of TF.Examples is one of the standard solution to store data for TensorFlow models. https://www.tensorflow.org/tutorials/load_data/tfrecord The GZIP compression is used. Usage example: ``` input_node: tp.EventSetNode = ... ( p | tpb.from_tensorflow_record("/input.tfr.gzip", input_node.schema) | ... # processing | tpb.to_tensorflow_record("/output.tfr.gzip", output_node.schema) ) ``` Args: pipe: Beam pipe. file_pattern: Path or path matching expression compatible with `MatchFiles`. schema: Schema of the data. If you have a Temporian node, the schema is available with `node.schema`. timestamp_key: Key containing the timestamps. format: Format of the events inside the received record. At the moment only TFRecordEventSetFormat.GROUPED_BY_INDEX is supported. See [TFRecordEventSetFormat][temporian.io.format.TFRecordEventSetFormat] for more.
def to_tensorflow_record( pipe: BeamEventSet, file_path_prefix: str, schema: Schema, timestamp_key: str = "timestamp", format: TFRecordEventSetFormatChoices = TFRecordEventSetFormat.GROUPED_BY_INDEX, **kwargs, ): """Export an EventSet to a TF.Records of TF.Examples. TF.Records of TF.Examples is one of the standard solution to store data for TensorFlow models. https://www.tensorflow.org/tutorials/load_data/tfrecord The GZIP compression is used. Usage example: ``` input_node: tp.EventSetNode = ... ( p | tpb.from_tensorflow_record("/input.tfr.gzip", input_node.schema) | ... # processing | tpb.to_tensorflow_record("/output.tfr.gzip", output_node.schema) ) ``` Args: pipe: Beam pipe. file_pattern: Path or path matching expression compatible with `MatchFiles`. schema: Schema of the data. If you have a Temporian node, the schema is available with `node.schema`. timestamp_key: Key containing the timestamps. format: Format of the events inside the received record. At the moment only TFRecordEventSetFormat.GROUPED_BY_INDEX is supported. See [TFRecordEventSetFormat][temporian.io.format.TFRecordEventSetFormat] for more. """ if format == TFRecordEventSetFormat.SINGLE_EVENTS: raise ValueError( "format=TFRecordEventSetFormat.SINGLE_EVENTS is not implemented" ) if format != TFRecordEventSetFormat.GROUPED_BY_INDEX: raise ValueError(f"Unknown format {format}") tf = import_tf() return ( pipe | "Event set to dict" >> to_dict(schema=schema, timestamp_key=timestamp_key, format=format) | "Dict to Tf.record" >> beam.ParDo( _DictToTFExample(schema=schema, timestamp_key=timestamp_key) ) | "Write tf.record" >> beam.io.tfrecordio.WriteToTFRecord( file_path_prefix=file_path_prefix, coder=beam.coders.ProtoCoder(tf.train.Example), compression_type=beam.io.filesystem.CompressionTypes.GZIP, **kwargs, ) )
Apply a function to a recursive structure of dict and list. Args: data: The data to apply the function to. Returns: The data with the function applied.
def structure_np_to_list(data): """ Apply a function to a recursive structure of dict and list. Args: data: The data to apply the function to. Returns: The data with the function applied. """ if isinstance(data, np.ndarray): return data.tolist() if isinstance(data, dict): return {key: structure_np_to_list(value) for key, value in data.items()} if isinstance(data, list): return [structure_np_to_list(item) for item in data] if isinstance(data, (int, float, str, bytes)): return data raise ValueError(f"Non supported type {type(data)}")
Adds the new index values to all remaining feature items.
def _add_index_to_feature( items: Tuple[ BeamIndexKey, Tuple[Iterable[FeatureItemValue], ...], ], ) -> Iterator[FeatureItem]: """Adds the new index values to all remaining feature items.""" old_index, mess = items # Note: "mess" contains exactly one value in each "Iterable". feature = next(iter(mess[0])) indexes_values = [next(iter(item))[POS_FEATURE_VALUES] for item in mess[1:]] timestamps, feature_values = feature assert feature_values is not None # Compute the example idxs for each unique index value. # # Note: This solution is very slow. This is the same used in the in-process # implementation. new_index_to_value_idxs = defaultdict(list) for event_idx, new_index in enumerate(zip(*indexes_values)): new_index = tuple([v.item() for v in new_index]) new_index_to_value_idxs[new_index].append(event_idx) for new_index, example_idxs in new_index_to_value_idxs.items(): # Note: The new index is added after the existing index items. dst_indexes = old_index + new_index assert isinstance(dst_indexes, tuple) # This is the "BeamEventSet" format. yield dst_indexes, ( timestamps[example_idxs], feature_values[example_idxs], )
Applies a function on each feature of a Beam eventset.
def beam_eventset_map( src: BeamEventSet, name: str, fn: Callable[[FeatureItem, int], Iterable[FeatureItem]], ) -> BeamEventSet: """Applies a function on each feature of a Beam eventset.""" def apply(idx, item): return item | f"Map on feature #{idx} {name}" >> beam.Map(fn, idx) return tuple([apply(idx, item) for idx, item in enumerate(src)])
Applies a function on each feature of a Beam eventset.
def beam_eventset_flatmap( src: BeamEventSet, name: str, fn: Callable[[FeatureItem, int], Iterable[FeatureItem]], ) -> BeamEventSet: """Applies a function on each feature of a Beam eventset.""" def apply(idx, item): return item | f"Map on feature #{idx} {name}" >> beam.FlatMap(fn, idx) return tuple([apply(idx, item) for idx, item in enumerate(src)])
Applies a function on each feature of a Beam eventset.
def beam_eventset_map_with_sampling( input: BeamEventSet, sampling: BeamEventSet, name: str, fn: Callable[ [BeamIndexKey, Optional[FeatureItemValue], FeatureItemValue, int], FeatureItem, ], ) -> BeamEventSet: """Applies a function on each feature of a Beam eventset.""" assert len(sampling) >= 1 def fn_on_cogroup( item: Tuple[ BeamIndexKey, Tuple[Iterable[FeatureItemValue], Iterable[FeatureItemValue]], ], idx: int, ) -> Iterator[FeatureItem]: index, (it_feature, it_sampling) = item feature = extract_from_iterable(it_feature) sampling = extract_from_iterable(it_sampling) if sampling is not None: yield fn(index, feature, sampling, idx) def apply(idx, item): return ( (item, sampling[0]) | f"Join feature and sampling on feature #{idx} {name}" >> beam.CoGroupByKey() | f"Map on feature #{idx} {name}" >> beam.FlatMap(fn_on_cogroup, idx) ) return tuple([apply(idx, item) for idx, item in enumerate(input)])
Checks the result of the Numpy backend against the Beam backend. Args: test: The absl's test. input_data: An event set to feed to a graph. output_node: Output of the graph. input_node: Input of the graph. If not set, uses input_data.node() instead. cast: DType to cast beam's output to after loading it from csv. Useful for comparing outputs that are expected to be int32 for example, since when written to CSV those will be loaded back up as int64.
def check_beam_implementation( test: absltest.TestCase, input_data: Union[EventSet, List[EventSet]], output_node: EventSetNode, cast: Optional[DType] = None, ): """Checks the result of the Numpy backend against the Beam backend. Args: test: The absl's test. input_data: An event set to feed to a graph. output_node: Output of the graph. input_node: Input of the graph. If not set, uses input_data.node() instead. cast: DType to cast beam's output to after loading it from csv. Useful for comparing outputs that are expected to be int32 for example, since when written to CSV those will be loaded back up as int64. """ if isinstance(input_data, EventSet): input_data = [input_data] tmp_dir = tempfile.mkdtemp() output_path = os.path.join(tmp_dir, "output.csv") input_paths = [] # Export input data to csv for input_idx, input_evset in enumerate(input_data): input_path = os.path.join(tmp_dir, f"input_{input_idx}.csv") input_paths.append(input_path) to_tensorflow_record(input_evset, path=input_path) # Run the Temporian program using the Beam backend with TestPipeline() as p: input_pcollection = {} for input_path, input_evset in zip(input_paths, input_data): input_pcollection[ input_evset.node() ] = p | beam_from_tensorflow_record( input_path, input_evset.node().schema ) output_pcollection = run_multi_io( inputs=input_pcollection, outputs=[output_node] ) assert len(output_pcollection) == 1 output = output_pcollection[output_node] | beam_to_tensorflow_record( output_path, output_node.schema, shard_name_template="" ) assert_that( output, equal_to([output_path]), ) beam_output = from_tensorflow_record(output_path, output_node.schema) if cast: beam_output = beam_output.cast(cast) # Run the Temporian program using the numpy backend expected_output = output_node.run(input_data) assertEqualEventSet(test, beam_output, expected_output)
Compiles a Temporian function. A Temporian function is a function that takes [`EventSetOrNodes`][temporian.types.EventSetOrNode] as arguments and returns [`EventSetOrNodes`][temporian.types.EventSetOrNode] as outputs. Compiling a function allows Temporian to optimize the underlying graph defined by the operators used inside the function, making it run on [`EventSets`][temporian.EventSet] more efficiently than if it weren't compiled, both in terms of memory and speed. Compiling a function is a necessary step before saving it to a file with [`tp.save()`][temporian.save]. The output can be a single EventSetOrNode, a list of EventSetOrNodes, or a dictionary of names to EventSetOrNodes. Example usage: ```python >>> @tp.compile ... def f(x: tp.EventSetNode, y: tp.EventSetNode) -> tp.EventSetNode: ... return x.prefix("pre_").cumsum() + y >>> evset = tp.event_set( ... timestamps=[1, 2, 3], ... features={"value": [10, 20, 30]}, ... ) >>> result = f(evset, evset) >>> isinstance(result, tp.EventSet) True ``` Example usage with arguments: ```python >>> @tp.compile(verbose=1) ... def f(x: tp.EventSetNode) -> tp.EventSetNode: ... return x.prefix("pre_") ``` Args: fn: The function to compile. The function must take EventSetNodes as arguments (and may have other arguments of arbitrary types) and return EventSetNodes as outputs. verbose: If >0, prints details about the execution on the standard error output when the wrapped function is applied eagerly on EventSets. The larger the number, the more information is displayed. force_garbage_collector_interval: If set, triggers the garbage collection every "force_garbage_collector_interval" seconds. Returns: The compiled function.
def compile( fn: Optional[F] = None, *, verbose: int = 0, force_garbage_collector_interval: Optional[float] = 10, ) -> F: """Compiles a Temporian function. A Temporian function is a function that takes [`EventSetOrNodes`][temporian.types.EventSetOrNode] as arguments and returns [`EventSetOrNodes`][temporian.types.EventSetOrNode] as outputs. Compiling a function allows Temporian to optimize the underlying graph defined by the operators used inside the function, making it run on [`EventSets`][temporian.EventSet] more efficiently than if it weren't compiled, both in terms of memory and speed. Compiling a function is a necessary step before saving it to a file with [`tp.save()`][temporian.save]. The output can be a single EventSetOrNode, a list of EventSetOrNodes, or a dictionary of names to EventSetOrNodes. Example usage: ```python >>> @tp.compile ... def f(x: tp.EventSetNode, y: tp.EventSetNode) -> tp.EventSetNode: ... return x.prefix("pre_").cumsum() + y >>> evset = tp.event_set( ... timestamps=[1, 2, 3], ... features={"value": [10, 20, 30]}, ... ) >>> result = f(evset, evset) >>> isinstance(result, tp.EventSet) True ``` Example usage with arguments: ```python >>> @tp.compile(verbose=1) ... def f(x: tp.EventSetNode) -> tp.EventSetNode: ... return x.prefix("pre_") ``` Args: fn: The function to compile. The function must take EventSetNodes as arguments (and may have other arguments of arbitrary types) and return EventSetNodes as outputs. verbose: If >0, prints details about the execution on the standard error output when the wrapped function is applied eagerly on EventSets. The larger the number, the more information is displayed. force_garbage_collector_interval: If set, triggers the garbage collection every "force_garbage_collector_interval" seconds. Returns: The compiled function. """ def _compile(fn): if hasattr(fn, "_typecheck"): raise ValueError( "Apply @compile before @typecheck if using both (i.e. if using" " them as decorators, place @compile just below @typecheck in" " the code)." ) @wraps(fn) def wrapper(*args, **kwargs): is_eager = None args = list(args) # EventSetNode -> EventSet mapping for eager evaluation inputs_map = {} for i, arg in enumerate(args): args[i], is_eager = _process_argument( arg, is_eager=is_eager, inputs_map=inputs_map ) for k, arg in kwargs.items(): kwargs[k], is_eager = _process_argument( arg, is_eager=is_eager, inputs_map=inputs_map ) outputs = fn(*args, **kwargs) if is_eager is None: raise ValueError( "Cannot compile a function without EventSet or EventSetNode" " argument." ) elif is_eager: from temporian.core.evaluation import run return run( query=outputs, input=inputs_map, verbose=verbose, check_execution=True, force_garbage_collector_interval=force_garbage_collector_interval, ) return outputs setattr(wrapper, "is_tp_compiled", True) return wrapper # Function is being called as a decorator if fn is not None: return _compile(fn) # Else the function is being called as a function, so we return a decorator # that will receive the function to compile. return _compile
Processes arguments to an operator by checking if its being used in eager mode and converting EventSets to EventSetNodes if so. Also checks that all arguments are of the same type (EventSet or EventSetNode), by checking that is_eager is consistent with the type of obj, and raising if not. Note that the inputs_map is modified in-place by this function.
def _process_argument( obj: Any, is_eager: Optional[bool], inputs_map: Dict[EventSetNode, EventSet], ) -> Tuple[Any, Optional[bool]]: """Processes arguments to an operator by checking if its being used in eager mode and converting EventSets to EventSetNodes if so. Also checks that all arguments are of the same type (EventSet or EventSetNode), by checking that is_eager is consistent with the type of obj, and raising if not. Note that the inputs_map is modified in-place by this function. """ if isinstance(obj, tuple): obj = list(obj) for i, v in enumerate(obj): obj[i], is_eager = _process_argument(v, is_eager, inputs_map) return tuple(obj), is_eager if isinstance(obj, list): obj = copy(obj) for i, v in enumerate(obj): obj[i], is_eager = _process_argument(v, is_eager, inputs_map) return obj, is_eager if isinstance(obj, dict): obj = copy(obj) for k, v in obj.items(): obj[k], is_eager = _process_argument(v, is_eager, inputs_map) return obj, is_eager err = ( "Cannot mix EventSets and EventSetNodes as inputs to an operator." " Either get the node corresponding to each EventSet with .node(), or" " pass EventSets only." ) if isinstance(obj, EventSet): if is_eager is None: is_eager = True elif not is_eager: # If an EventSetNode had been received and we receive an EventSet, raise raise ValueError(err) node = obj.node() # Its fine to overwrite the same node since the corresponding EventSet # is guaranteed to be the same one. inputs_map[node] = obj obj = node elif isinstance(obj, EventSetNode): if is_eager is None: is_eager = False elif is_eager: # If an EventSet had been received and we receive an EventSetNode, raise raise ValueError(err) return obj, is_eager
Evaluates [`EventSetNodes`][temporian.EventSetNode] on [`EventSets`][temporian.EventSet]. Performs all computation defined by the graph between the `query` EventSetNodes and the `input` EventSets. The result is returned in the same format as the `query` argument. Single input output example: ```python >>> input_evset = tp.event_set(timestamps=[1, 2, 3], features={"f": [0, 4, 10]}) >>> input_node = input_evset.node() >>> output_node = input_node.moving_sum(5) >>> output_evset = tp.run(output_node, input_evset) >>> # Equivalent >>> output_evset = output_node.run(input_evset) >>> # Also equivalent >>> output_evset = tp.run(output_node, {input_node: input_evset}) ``` Multiple inputs and outputs example: ```python >>> evset_1 = tp.event_set(timestamps=[1, 2, 3], features={"f1": [0.1, 42, 10]}) >>> evset_2 = tp.event_set(timestamps=[1, 2, 3], ... features={"f2": [-1.5, 50, 30]}, ... same_sampling_as=evset_1 ... ) >>> # Graph with 2 inputs and 2 steps >>> input_1 = evset_1.node() >>> input_2 = evset_2.node() >>> step_1 = input_1 + input_2 >>> step_2 = step_1.simple_moving_average(2) >>> # Get step_1 and step_2 at once >>> evset_step_1, evset_step_2 = tp.run([step_1, step_2], ... {input_1: evset_1, input_2: evset_2} ... ) >>> # Equivalent evset_step_1, evset_step_2 = tp.run( ... [step_1, step_2], ... [evset_1, evset_2], ... ) >>> # Also equivalent. EventSets are mapped by their .node(), not by position. >>> evset_step_1, evset_step_2 = tp.run( ... [step_1, step_2], ... [evset_2, evset_1], ... ) ``` Args: query: EventSetNodes to compute. Supports EventSetNode, dict of EventSetNodes and list of EventSetNodes. input: Event sets to be used for the computation. Supports EventSet, list of EventSets, dict of EventSetNodes to EventSets, and dict of EventSetNode names to EventSet. If a single EventSet or list of EventSet, they must be named and will be used as input for the EventSetNodes with the same name. If a dict of EventSetNode names to EventSet, they will be used as input for the EventSetNodes with those names. If a dict of EventSetNodes to event sets, they will be used as input for those EventSetNodes. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. check_execution: If true, the input and output of the op implementation are validated to check any bug in the library internal code. If false, checks are skipped. force_garbage_collector_interval: If set, triggers the garbage collection every "force_garbage_collector_interval" seconds. Returns: An object with the same structure as `query` containing the results. If `query` is a dictionary of EventSetNodes, the returned object will be a dictionary of EventSet. If `query` is a list of EventSetNodes, the returned value will be a list of EventSet with the same order.
def run( query: EventSetNodeCollection, input: NodeToEventSetMapping, verbose: int = 0, check_execution: bool = True, force_garbage_collector_interval: Optional[float] = 10, ) -> EventSetCollection: """Evaluates [`EventSetNodes`][temporian.EventSetNode] on [`EventSets`][temporian.EventSet]. Performs all computation defined by the graph between the `query` EventSetNodes and the `input` EventSets. The result is returned in the same format as the `query` argument. Single input output example: ```python >>> input_evset = tp.event_set(timestamps=[1, 2, 3], features={"f": [0, 4, 10]}) >>> input_node = input_evset.node() >>> output_node = input_node.moving_sum(5) >>> output_evset = tp.run(output_node, input_evset) >>> # Equivalent >>> output_evset = output_node.run(input_evset) >>> # Also equivalent >>> output_evset = tp.run(output_node, {input_node: input_evset}) ``` Multiple inputs and outputs example: ```python >>> evset_1 = tp.event_set(timestamps=[1, 2, 3], features={"f1": [0.1, 42, 10]}) >>> evset_2 = tp.event_set(timestamps=[1, 2, 3], ... features={"f2": [-1.5, 50, 30]}, ... same_sampling_as=evset_1 ... ) >>> # Graph with 2 inputs and 2 steps >>> input_1 = evset_1.node() >>> input_2 = evset_2.node() >>> step_1 = input_1 + input_2 >>> step_2 = step_1.simple_moving_average(2) >>> # Get step_1 and step_2 at once >>> evset_step_1, evset_step_2 = tp.run([step_1, step_2], ... {input_1: evset_1, input_2: evset_2} ... ) >>> # Equivalent evset_step_1, evset_step_2 = tp.run( ... [step_1, step_2], ... [evset_1, evset_2], ... ) >>> # Also equivalent. EventSets are mapped by their .node(), not by position. >>> evset_step_1, evset_step_2 = tp.run( ... [step_1, step_2], ... [evset_2, evset_1], ... ) ``` Args: query: EventSetNodes to compute. Supports EventSetNode, dict of EventSetNodes and list of EventSetNodes. input: Event sets to be used for the computation. Supports EventSet, list of EventSets, dict of EventSetNodes to EventSets, and dict of EventSetNode names to EventSet. If a single EventSet or list of EventSet, they must be named and will be used as input for the EventSetNodes with the same name. If a dict of EventSetNode names to EventSet, they will be used as input for the EventSetNodes with those names. If a dict of EventSetNodes to event sets, they will be used as input for those EventSetNodes. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. check_execution: If true, the input and output of the op implementation are validated to check any bug in the library internal code. If false, checks are skipped. force_garbage_collector_interval: If set, triggers the garbage collection every "force_garbage_collector_interval" seconds. Returns: An object with the same structure as `query` containing the results. If `query` is a dictionary of EventSetNodes, the returned object will be a dictionary of EventSet. If `query` is a list of EventSetNodes, the returned value will be a list of EventSet with the same order. """ # TODO: Create an internal configuration object for options such as # `check_execution`. begin_time = time.perf_counter() input = _normalize_input(input) normalized_query = _normalize_query(query) if verbose >= 1: print("Build schedule", file=sys.stderr, flush=True) # Schedule execution assert isinstance(normalized_query, set) input_nodes = set(input.keys()) schedule = build_schedule( inputs=input_nodes, outputs=normalized_query, verbose=verbose ) if verbose == 1: print( f"Run {len(schedule.steps)} operators", file=sys.stderr, ) elif verbose >= 2: print("Schedule:\n", schedule, file=sys.stderr) # Evaluate schedule # # Note: "outputs" is a dictionary of event (including the query events) to # event data. outputs = np_eval.run_schedule( input, schedule, verbose=verbose, check_execution=check_execution, force_garbage_collector_interval=force_garbage_collector_interval, ) end_time = time.perf_counter() if verbose == 1: print(f"Execution in {end_time - begin_time:.5f} s", file=sys.stderr) return _denormalize_outputs(outputs, query)
Calculates which operators need to be executed in which order to compute a set of output EventSetNodes given a set of input EventSetNodes. This implementation is based on Kahn's algorithm. Args: inputs: Input EventSetNodes. outputs: Output EventSetNodes. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. Returns: Tuple of: - Ordered list of operators, such that the first operator should be computed before the second, second before the third, etc. - Mapping of EventSetNode name inputs to EventSetNodes. The keys are the string values in the `inputs` argument, and the values are the EventSetNodes corresponding to each one. If a value was already an EventSetNode, it won't be present in the returned dictionary.
def build_schedule( inputs: Optional[Set[EventSetNode]], outputs: Set[EventSetNode], verbose: int = 0, ) -> Schedule: """Calculates which operators need to be executed in which order to compute a set of output EventSetNodes given a set of input EventSetNodes. This implementation is based on Kahn's algorithm. Args: inputs: Input EventSetNodes. outputs: Output EventSetNodes. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. Returns: Tuple of: - Ordered list of operators, such that the first operator should be computed before the second, second before the third, etc. - Mapping of EventSetNode name inputs to EventSetNodes. The keys are the string values in the `inputs` argument, and the values are the EventSetNodes corresponding to each one. If a value was already an EventSetNode, it won't be present in the returned dictionary. """ # List all EventSetNodes and operators in between inputs and outputs. # # Fails if the outputs cannot be computed from the inputs e.g. some inputs # are missing. graph = infer_graph(inputs, outputs) schedule = Schedule(input_nodes=graph.inputs) if verbose >= 2: print("Graph:\n", graph, file=sys.stderr) # Operators ready to be computed (i.e. ready to be added to "planned_ops") # as all their inputs are already computed by "planned_ops" or specified by # "inputs". ready_ops: List[Operator] = [] ready_ops_set: Set[Operator] = set() # "node_to_op[e]" is the list of operators with node "e" as input. node_to_op: Dict[EventSetNode, List[Operator]] = defaultdict(lambda: []) # "op_to_num_pending_inputs[op]" is the number of "not yet scheduled" inputs # of operator "op". Operators in "op_to_num_pending_inputs" have not yet # scheduled. op_to_num_pending_inputs: Dict[Operator, int] = defaultdict(lambda: 0) # Compute "node_to_op" and "op_to_num_pending_inputs". for op in graph.operators: num_pending_inputs = 0 for input_node in op.inputs.values(): node_to_op[input_node].append(op) if input_node in graph.inputs: # This input is already available continue num_pending_inputs += 1 if num_pending_inputs == 0: # Ready to be scheduled ready_ops.append(op) ready_ops_set.add(op) else: # Some of the inputs are missing. op_to_num_pending_inputs[op] = num_pending_inputs # Make evaluation order deterministic. # # Execute the op with smallest internal ordered id first. ready_ops.sort(key=lambda op: op._internal_ordered_id, reverse=True) # Compute the schedule while ready_ops: # Get an op ready to be scheduled op = ready_ops.pop() ready_ops_set.remove(op) # Nodes released after the op is executed released_nodes = [] for input in op.inputs.values(): if input in outputs: continue if input not in node_to_op: continue # The list of ops that depends on this input (including the current # op "op"). input_usage = node_to_op[input] input_usage.remove(op) if not input_usage: released_nodes.append(input) del node_to_op[input] # Schedule the op schedule.steps.append( ScheduleStep(op=op, released_nodes=released_nodes) ) # Update all the ops that depends on "op". Enlist the ones that are # ready to be computed for output in op.outputs.values(): if output not in node_to_op: continue for new_op in node_to_op[output]: # "new_op" depends on the result of "op". assert new_op in op_to_num_pending_inputs num_missing_inputs = op_to_num_pending_inputs[new_op] - 1 op_to_num_pending_inputs[new_op] = num_missing_inputs assert num_missing_inputs >= 0 if num_missing_inputs == 0: # "new_op" can be computed ready_ops.append(new_op) ready_ops_set.add(new_op) del op_to_num_pending_inputs[new_op] assert not op_to_num_pending_inputs return schedule
Tests if a node depends on a leak operator. Tests if a [`EventSetNode`][temporian.EventSetNode] or collection of nodes depends on the only operator that can introduce future leakage: [`EventSet.leak()`][temporian.EventSet.leak]. Single input output example: ```python >>> a = tp.input_node([("f", tp.float32)]) >>> b = a.moving_sum(5) >>> c = b.leak(6) >>> d = c.prefix("my_prefix_") >>> e = d.moving_sum(7) >>> # The computation of "e" contains a leak. >>> tp.has_leak(e) True >>> # The computation of "e" given "d" does not contain a leak. >>> tp.has_leak(e, d) False ``` Args: output: Nodes to compute. Supports Node, dict of Nodes and list of Nodes. input: Optional input nodes. Supports Node, dict of Nodes and list of Nodes. If not specified, assumes for the input nodes to be the the raw data inputs e.g. [`tp.input_node()`][temporian.input_node] and [`tp.event_set()`][temporian.event_set]. Returns: True if and only if the computation of `output` from `inputs` depends on a [`EventSet.leak()`][temporian.EventSet.leak] operator.
def has_leak( output: EventSetNodeCollection, input: Optional[EventSetNodeCollection] = None, ) -> bool: """Tests if a node depends on a leak operator. Tests if a [`EventSetNode`][temporian.EventSetNode] or collection of nodes depends on the only operator that can introduce future leakage: [`EventSet.leak()`][temporian.EventSet.leak]. Single input output example: ```python >>> a = tp.input_node([("f", tp.float32)]) >>> b = a.moving_sum(5) >>> c = b.leak(6) >>> d = c.prefix("my_prefix_") >>> e = d.moving_sum(7) >>> # The computation of "e" contains a leak. >>> tp.has_leak(e) True >>> # The computation of "e" given "d" does not contain a leak. >>> tp.has_leak(e, d) False ``` Args: output: Nodes to compute. Supports Node, dict of Nodes and list of Nodes. input: Optional input nodes. Supports Node, dict of Nodes and list of Nodes. If not specified, assumes for the input nodes to be the the raw data inputs e.g. [`tp.input_node()`][temporian.input_node] and [`tp.event_set()`][temporian.event_set]. Returns: True if and only if the computation of `output` from `inputs` depends on a [`EventSet.leak()`][temporian.EventSet.leak] operator. """ if input is None: normalized_input = None else: normalized_input = _normalize_query(input) normalized_output = _normalize_query(output) graph = infer_graph(inputs=normalized_input, outputs=normalized_output) leak_key = LeakOperator.operator_key() for operator in graph.operators: if operator.operator_key() == leak_key: return True return False
Normalizes an input into a dictionary of node to evsets.
def _normalize_input( input: NodeToEventSetMapping, ) -> Dict[EventSetNode, EventSet]: """Normalizes an input into a dictionary of node to evsets.""" if isinstance(input, dict): keys_are_node = all([isinstance(x, EventSetNode) for x in input.keys()]) values_are_node = all([isinstance(x, EventSet) for x in input.values()]) if not keys_are_node or not values_are_node: raise ValueError( "Invalid input argument. Dictionary input argument should be a" " dictionary of EventSetNode to EventSet. Instead, got" f" {input!r}" ) return input if isinstance(input, EventSet): return {input.node(): input} if isinstance(input, list): return {evset.node(): evset for evset in input} raise TypeError( "Evaluate input argument must be an EventSet, list of EventSet, or a" f" dictionary of EventSetNode to EventSets. Received {input!r} instead." )
Normalizes a query into a list of query EventSetNodes.
def _normalize_query(query: EventSetNodeCollection) -> Set[EventSetNode]: """Normalizes a query into a list of query EventSetNodes.""" if isinstance(query, EventSetNode): return {query} if isinstance(query, set): return query if isinstance(query, list): return set(query) if isinstance(query, dict): return set(query.values()) raise TypeError( f"Evaluate query argument must be one of {EventSetNodeCollection}." f" Received {type(query)} instead." )
Converts outputs into the same format as the query.
def _denormalize_outputs( outputs: Dict[EventSetNode, EventSet], query: EventSetNodeCollection ) -> EventSetCollection: """Converts outputs into the same format as the query.""" if isinstance(query, EventSetNode): return outputs[query] if isinstance(query, list): return [outputs[k] for k in query] if isinstance(query, dict): return { query_key: outputs[query_evt] for query_key, query_evt in query.items() } raise RuntimeError("Unexpected case")
Extracts the nodes in between the output and input nodes. Unlike infer_graph, infer_graph_named_nodes requires for the input and output nodes to be named.
def infer_graph_named_nodes( inputs: Optional[NamedEventSetNodes], outputs: NamedEventSetNodes ) -> Graph: """Extracts the nodes in between the output and input nodes. Unlike infer_graph, infer_graph_named_nodes requires for the input and output nodes to be named. """ normalized_inputs: Optional[Dict[str, EventSetNode]] = None input_nodes = None if inputs is not None: normalized_inputs = normalize_named_nodes(inputs) input_nodes = set(normalized_inputs.values()) normalized_outputs = normalize_named_nodes(outputs) output_nodes = set(normalized_outputs.values()) g = infer_graph(inputs=input_nodes, outputs=output_nodes) if normalized_inputs is None: normalized_inputs = normalize_named_nodes(list(g.inputs)) g.set_input_node_names(normalized_inputs) g.set_output_node_names(normalized_outputs) return g
Extracts the nodes in between the output and input nodes. If inputs is set, fails if outputs cannot be computed from `inputs`. If inputs is not set, infers the required set of inputs. Args: inputs: Set of available input nodes. If None, inputs are inferred. outputs: Set of expected output nodes. Returns: The inferred graph. Raises: ValueError: If there are repeated nodes in the `inputs`; an unexpected type of input is provided; an unnamed node is inferred as input; or some nodes are required but not provided.
def infer_graph( inputs: Optional[Set[EventSetNode]], outputs: Set[EventSetNode] ) -> Graph: """Extracts the nodes in between the output and input nodes. If inputs is set, fails if outputs cannot be computed from `inputs`. If inputs is not set, infers the required set of inputs. Args: inputs: Set of available input nodes. If None, inputs are inferred. outputs: Set of expected output nodes. Returns: The inferred graph. Raises: ValueError: If there are repeated nodes in the `inputs`; an unexpected type of input is provided; an unnamed node is inferred as input; or some nodes are required but not provided. """ # The following algorithm lists all the nodes between the output and # input nodes. Informally, the algorithm works as follow: # # pending_node <= use outputs # done_node <= empty # # While pending node not empty: # Extract a node from pending_node # if node is a provided input node # continue # if node has no creator # record this node for future error / input inference # continue # Adds all the input nodes of node's creator op to the pending list # # Extract the names # outputs_set = outputs if isinstance(outputs, set) else # set(outputs.values()) # if inputs is None: # input_set = None # else: # input_set = inputs if isinstance(inputs, set) else # set(inputs.values()) graph = Graph() graph.outputs.update(outputs) # The next nodes to process. EventSetNodes are processed from the outputs to # the inputs. pending_nodes: Set[EventSetNode] = outputs.copy() # Features already processed. done_nodes: Set[EventSetNode] = set() # List of the missing nodes. Used to create an error message. missing_nodes: Set[EventSetNode] = set() while pending_nodes: # Select a node to process. node = next(iter(pending_nodes)) pending_nodes.remove(node) assert node not in done_nodes graph.add_node(node) if inputs is not None and node in inputs: # The feature is provided by the user. graph.inputs.add(node) continue if node.creator is None: # The node does not have a source. if inputs is not None: missing_nodes.add(node) else: graph.inputs.add(node) continue # Record the operator op. graph.add_operator(node.creator) # Add the parent nodes to the pending list. for input_node in node.creator.inputs.values(): if input_node in done_nodes: # Already processed. continue pending_nodes.add(input_node) # Record the operator outputs. While the user did not request # them, they will be created (and so, we need to track them). for output_node in node.creator.outputs.values(): graph.add_node(output_node) if missing_nodes: # Fail if not all nodes are sourced. raise ValueError( "The following input nodes are required but not provided as" f" input:\n{missing_nodes}" ) # Record all the features and samplings. for e in graph.nodes: graph.add_sampling(e.sampling_node) for f in e.feature_nodes: graph.add_feature(f) return graph
Normalizes a node or list of nodes into a dictionary of nodes.
def normalize_named_nodes(src: NamedEventSetNodes) -> Dict[str, EventSetNode]: """Normalizes a node or list of nodes into a dictionary of nodes.""" save_src = src if isinstance(src, EventSetNode): # Will be further processed after. src = [src] if isinstance(src, set): src = list(src) if isinstance(src, list): new_src = {} for node in src: if node.name is None: raise ValueError( "Input / output node or list nodes need to be named " 'with "node.name = ...". Alternatively, provide a ' "dictionary of nodes." ) new_src[node.name] = node src = new_src if not isinstance(src, dict): raise ValueError( f'Unexpected node(s) "{save_src}". Expecting dict of nodes, ' "list of nodes, or a single node." ) return src
Registers an operator.
def register_operator(operator_class: Type[Operator]): """Registers an operator.""" op_key = operator_class.operator_key() if op_key in _OPERATORS: raise ValueError("Operator already registered") _OPERATORS[op_key] = operator_class
Gets an operator class from a registered key.
def get_operator_class(key: str): """Gets an operator class from a registered key.""" if key not in _OPERATORS: raise ValueError( f"Unknown operator '{key}'. " f"Available operators are: {list(_OPERATORS.keys())}." ) return _OPERATORS[key]
Lists the registered operators.
def registered_operators() -> Dict[str, Any]: """Lists the registered operators.""" return _OPERATORS
(For test operators purposes only) Unregisters an operator.
def _unregister_operator(operator_class: Type[Operator]): """(For test operators purposes only) Unregisters an operator.""" op_key = operator_class.operator_key() if op_key in _OPERATORS: _OPERATORS.pop(op_key)
Saves a compiled Temporian function to a file. The saved function must only take [`EventSetOrNodes`][temporian.types.EventSetOrNode] as arguments, return a dictionary of names to EventSetOrNodes, and be decorated with [`@tp.compile`][temporian.compile]. Temporian saves the graph built between the function's input and output nodes, not the function itself. Any arbitrary code that is executed in the function will not be ran when loading it back up and executing it. If you need to save a function that additionally takes other types of arguments, try using `functools.partial` to create a new function that takes only EventSetNodes, and save that instead. Note that the partial function needs to be compiled too, with `tp.compile(partial(...))`. Args: fn: The function to save. path: The path to save the function to. args: Positional arguments to pass to the function to trace it. The arguments can be either EventSets, EventSetNodes, or raw Schemas. In all cases, the values will be converted to EventSetNodes before being passed to the function to trace it. kwargs: Keyword arguments to pass to the function to trace it. Same restrictions as for `args`. Raises: ValueError: If the received function is not compiled. ValueError: If any of the received inputs is not of the specified types. ValueError: If the function doesn't return the specified type.
def save( fn: Callable[..., Dict[str, EventSetOrNode]], path: str, *args: Union[EventSetNode, EventSet, Schema], **kwargs: Union[EventSetNode, EventSet, Schema], ) -> None: """Saves a compiled Temporian function to a file. The saved function must only take [`EventSetOrNodes`][temporian.types.EventSetOrNode] as arguments, return a dictionary of names to EventSetOrNodes, and be decorated with [`@tp.compile`][temporian.compile]. Temporian saves the graph built between the function's input and output nodes, not the function itself. Any arbitrary code that is executed in the function will not be ran when loading it back up and executing it. If you need to save a function that additionally takes other types of arguments, try using `functools.partial` to create a new function that takes only EventSetNodes, and save that instead. Note that the partial function needs to be compiled too, with `tp.compile(partial(...))`. Args: fn: The function to save. path: The path to save the function to. args: Positional arguments to pass to the function to trace it. The arguments can be either EventSets, EventSetNodes, or raw Schemas. In all cases, the values will be converted to EventSetNodes before being passed to the function to trace it. kwargs: Keyword arguments to pass to the function to trace it. Same restrictions as for `args`. Raises: ValueError: If the received function is not compiled. ValueError: If any of the received inputs is not of the specified types. ValueError: If the function doesn't return the specified type. """ if not hasattr(fn, "is_tp_compiled"): raise ValueError( "Can only save a function that has been compiled with" " `@tp.compile`." ) merged_kwargs = _kwargs_from_args_and_kwargs( list(inspect.signature(fn).parameters.keys()), args, kwargs ) node_kwargs = {k: _process_fn_input(v) for k, v in merged_kwargs.items()} outputs = fn(**node_kwargs) _check_fn_outputs(outputs) save_graph(inputs=node_kwargs, outputs=outputs, path=path)
Loads a compiled Temporian function from a file. The loaded function receives the same positional and keyword arguments and applies the same operator graph to its inputs as when it was saved. Args: path: The path to load the function from. Returns: The loaded function.
def load( path: str, ) -> Callable[..., Dict[str, EventSetNode]]: """Loads a compiled Temporian function from a file. The loaded function receives the same positional and keyword arguments and applies the same operator graph to its inputs as when it was saved. Args: path: The path to load the function from. Returns: The loaded function. """ g = _load_graph(path) inputs = g.named_inputs assert inputs is not None input_names = list(inputs.keys()) @compile def fn( *args: EventSetNode, **kwargs: EventSetNode, ) -> Dict[str, EventSetNode]: kwargs = _kwargs_from_args_and_kwargs(input_names, args, kwargs) return g.apply_on_inputs(named_inputs=kwargs) fn.__signature__ = inspect.signature(fn).replace( parameters=[ inspect.Parameter( name=k, annotation=EventSetNode, kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, ) for k in inputs ] ) return fn
Saves the graph between the `inputs` and `outputs` [`EventSetNodes`][temporian.EventSetNode] to a file. Usage example: ```python >>> evset = tp.event_set( ... timestamps=[1, 2, 3], ... features={"input_feature": [0, 42, 10]} ... ) >>> # Create a graph >>> a = evset.node() >>> b = a.moving_sum(2) >>> b = b.rename("result_feature") >>> # Check evaluation >>> b.run({a: evset}) indexes: [] features: [('result_feature', int64)] events: (3 events): timestamps: [1. 2. 3.] 'result_feature': [ 0 42 52] ... >>> # Save the graph >>> file_path = tmp_dir / "graph.tem" >>> tp.save_graph( ... inputs={"input_node": a}, ... outputs={"output_node": b}, ... path=file_path, ... ) >>> # Load the graph >>> inputs, outputs = tp.load_graph(path=file_path) >>> # Evaluate reloaded graph >>> a_reloaded = inputs["input_node"] >>> b_reloaded = outputs["output_node"] >>> b_reloaded.run({a_reloaded: evset}) indexes: [] features: [('result_feature', int64)] events: (3 events): timestamps: [1. 2. 3.] 'result_feature': [ 0 42 52] ... ``` Args: inputs: Input EventSetNodes. If None, the inputs is inferred. In this case, input EventSetNodes have to be named. outputs: Output EventSetNodes. path: File path to save to.
def save_graph( inputs: Optional[graph.NamedEventSetNodes], outputs: graph.NamedEventSetNodes, path: str, ) -> None: """Saves the graph between the `inputs` and `outputs` [`EventSetNodes`][temporian.EventSetNode] to a file. Usage example: ```python >>> evset = tp.event_set( ... timestamps=[1, 2, 3], ... features={"input_feature": [0, 42, 10]} ... ) >>> # Create a graph >>> a = evset.node() >>> b = a.moving_sum(2) >>> b = b.rename("result_feature") >>> # Check evaluation >>> b.run({a: evset}) indexes: [] features: [('result_feature', int64)] events: (3 events): timestamps: [1. 2. 3.] 'result_feature': [ 0 42 52] ... >>> # Save the graph >>> file_path = tmp_dir / "graph.tem" >>> tp.save_graph( ... inputs={"input_node": a}, ... outputs={"output_node": b}, ... path=file_path, ... ) >>> # Load the graph >>> inputs, outputs = tp.load_graph(path=file_path) >>> # Evaluate reloaded graph >>> a_reloaded = inputs["input_node"] >>> b_reloaded = outputs["output_node"] >>> b_reloaded.run({a_reloaded: evset}) indexes: [] features: [('result_feature', int64)] events: (3 events): timestamps: [1. 2. 3.] 'result_feature': [ 0 42 52] ... ``` Args: inputs: Input EventSetNodes. If None, the inputs is inferred. In this case, input EventSetNodes have to be named. outputs: Output EventSetNodes. path: File path to save to. """ # TODO: Add support for compressed / binary serialization. g = graph.infer_graph_named_nodes(inputs=inputs, outputs=outputs) proto = _serialize(g) with open(path, "wb") as f: f.write(text_format.MessageToBytes(proto))
Loads a Temporian graph from a file. See [`tp.save()`][temporian.save] and [`tp.save_graph()`][temporian.save_graph] for usage examples. Args: path: File path to load from. squeeze: If true, and if the input/output contains a single EventSetNode, returns an EventSetNode (instead of a dictionary of EventSetNodes). Returns: Input and output EventSetNodes.
def load_graph( path: str, squeeze: bool = False ) -> Tuple[ Union[EventSetNode, Dict[str, EventSetNode]], Union[EventSetNode, Dict[str, EventSetNode]], ]: """Loads a Temporian graph from a file. See [`tp.save()`][temporian.save] and [`tp.save_graph()`][temporian.save_graph] for usage examples. Args: path: File path to load from. squeeze: If true, and if the input/output contains a single EventSetNode, returns an EventSetNode (instead of a dictionary of EventSetNodes). Returns: Input and output EventSetNodes. """ g = _load_graph(path=path) inputs = g.named_inputs outputs = g.named_outputs assert inputs is not None assert outputs is not None if squeeze and len(inputs) == 1: inputs = list(inputs.values())[0] if squeeze and len(outputs) == 1: outputs = list(outputs.values())[0] return inputs, outputs
Merges args and kwargs into a single name->value param dict.
def _kwargs_from_args_and_kwargs( param_names: List[str], args: Tuple[Any, ...], kwargs: Dict[str, Any], ) -> Dict[str, Any]: """Merges args and kwargs into a single name->value param dict.""" if len(args) > len(param_names): raise ValueError( f"The function takes {len(param_names)} arguments, but" f" {len(args)} positional arguments were received." ) # zip stops at the shortest iterable, extra param names are ignored arg_kwargs = {k: v for k, v in zip(param_names, args)} for k in arg_kwargs: if k in kwargs: raise ValueError( f"The function received multiple values for the argument {k}." ) return {**arg_kwargs, **kwargs}
Serializes a graph into a protobuffer.
def _serialize(src: graph.Graph) -> pb.Graph: """Serializes a graph into a protobuffer.""" if src.named_inputs is None: raise ValueError( "Cannot serialize a graph without named input EventSetNodes" ) if src.named_outputs is None: raise ValueError( "Cannot serialize a graph without named output EventSetNodes" ) return pb.Graph( operators=[_serialize_operator(o) for o in src.operators], nodes=[_serialize_node(e, src.operators) for e in src.nodes], features=[_serialize_feature(f, src.operators) for f in src.features], samplings=[ _serialize_sampling(s, src.operators) for s in src.samplings ], inputs=[ _serialize_io_signature(k, e) for k, e in src.named_inputs.items() ], outputs=[ _serialize_io_signature(k, e) for k, e in src.named_outputs.items() ], )
Unserializes a protobuffer into a graph.
def _unserialize(src: pb.Graph) -> graph.Graph: """Unserializes a protobuffer into a graph.""" # Decode the components. # All the fields except for the "creator" ones are set. samplings = {s.id: _unserialize_sampling(s) for s in src.samplings} features = {f.id: _unserialize_feature(f) for f in src.features} nodes = {e.id: _unserialize_node(e, samplings, features) for e in src.nodes} operators = {o.id: _unserialize_operator(o, nodes) for o in src.operators} # Set the creator fields. def get_creator(op_id: str) -> base.Operator: if op_id not in operators: logging.info(operators) raise ValueError(f"Non existing creator operator {op_id}") return operators[op_id] for src_node in src.nodes: if src_node.creator_operator_id: nodes[src_node.id].creator = get_creator( src_node.creator_operator_id ) for src_feature in src.features: if src_feature.creator_operator_id: features[src_feature.id].creator = get_creator( src_feature.creator_operator_id ) for src_sampling in src.samplings: if src_sampling.creator_operator_id: samplings[src_sampling.id].creator = get_creator( src_sampling.creator_operator_id ) # Copy extracted items. g = graph.Graph() for sampling in samplings.values(): g.samplings.add(sampling) for node in nodes.values(): g.nodes.add(node) for feature in features.values(): g.features.add(feature) for operator in operators.values(): g.operators.add(operator) # IO Signature def get_node(node_id: str) -> EventSetNode: if node_id not in nodes: raise ValueError(f"Non existing node {node_id}") return nodes[node_id] g.named_inputs = {} g.named_outputs = {} for item in src.inputs: node = get_node(item.node_id) g.inputs.add(node) g.named_inputs[item.key] = node for item in src.outputs: node = get_node(item.node_id) g.outputs.add(get_node(item.node_id)) g.named_outputs[item.key] = node return g
Creates a unique identifier for an object within a graph.
def _identifier(item: Any) -> str: """Creates a unique identifier for an object within a graph.""" if item is None: raise ValueError("Cannot get id of None") return str(id(item))
Creates a unique identifier for an object within a graph.
def _identifier_or_none( item: Any, options: Optional[Set[Any]] = None ) -> Optional[str]: """Creates a unique identifier for an object within a graph.""" if item is None: return None if options is not None and item not in options: return None return str(id(item))
Builds the set of identifiers of a collections of nodes/features/...
def _all_identifiers(collection: Any) -> Set[str]: """Builds the set of identifiers of a collections of nodes/features/...""" return {_identifier(x) for x in collection}
Converts input value from milliseconds to a `Duration` in seconds. Example: ```python >>> duration = tp.duration.milliseconds(250) >>> duration 0.25 >>> # Usage in a window operation >>> a = tp.event_set( ... timestamps=[0.5, 1.0, 1.2], ... features={"f1": [1, 5, -5]} ... ) >>> a.moving_sum(window_length=duration) indexes: ... timestamps: [0.5 1. 1.2] 'f1': [1 5 0] ... ``` Args: value: Number of milliseconds. Returns: Equivalent number of seconds.
def milliseconds(value: Union[int, float]) -> Duration: """Converts input value from milliseconds to a `Duration` in seconds. Example: ```python >>> duration = tp.duration.milliseconds(250) >>> duration 0.25 >>> # Usage in a window operation >>> a = tp.event_set( ... timestamps=[0.5, 1.0, 1.2], ... features={"f1": [1, 5, -5]} ... ) >>> a.moving_sum(window_length=duration) indexes: ... timestamps: [0.5 1. 1.2] 'f1': [1 5 0] ... ``` Args: value: Number of milliseconds. Returns: Equivalent number of seconds. """ return float(value / 1000)
Converts input value from seconds to a `Duration` in seconds. Since the `Duration` object is equivalent to a `float` value in seconds, this method does nothing else than casting the input to `float`. It may be used in order to make the code more explicit. Explicit time units: ```python >>> duration = tp.duration.seconds(3) >>> duration 3.0 >>> # Usage in a window operation >>> a = tp.event_set( ... timestamps=[1, 2, 6], ... features={"f1": [1, 5, -5]}, ... ) >>> a.moving_sum(window_length=duration) indexes: ... timestamps: [1. 2. 6.] 'f1': [ 1 6 -5] ... ``` Args: value: Number of seconds. Returns: Same number of seconds.
def seconds(value: Union[int, float]) -> Duration: """Converts input value from seconds to a `Duration` in seconds. Since the `Duration` object is equivalent to a `float` value in seconds, this method does nothing else than casting the input to `float`. It may be used in order to make the code more explicit. Explicit time units: ```python >>> duration = tp.duration.seconds(3) >>> duration 3.0 >>> # Usage in a window operation >>> a = tp.event_set( ... timestamps=[1, 2, 6], ... features={"f1": [1, 5, -5]}, ... ) >>> a.moving_sum(window_length=duration) indexes: ... timestamps: [1. 2. 6.] 'f1': [ 1 6 -5] ... ``` Args: value: Number of seconds. Returns: Same number of seconds. """ return float(value)
Converts input value from minutes to a `Duration` in seconds. Example: ```python >>> timestamps = [tp.duration.minutes(i) for i in [5, 10, 30]] >>> timestamps [300.0, 600.0, 1800.0] >>> # Usage in a window operation >>> a = tp.event_set(timestamps=timestamps, features={"f1": [1, 5, -5]}) >>> a.moving_sum(window_length=tp.duration.minutes(6)) indexes: ... timestamps: [ 300. 600. 1800.] 'f1': [ 1 6 -5] ... ``` Args: value: Number of minutes. Returns: Equivalent number of seconds.
def minutes(value: Union[int, float]) -> Duration: """Converts input value from minutes to a `Duration` in seconds. Example: ```python >>> timestamps = [tp.duration.minutes(i) for i in [5, 10, 30]] >>> timestamps [300.0, 600.0, 1800.0] >>> # Usage in a window operation >>> a = tp.event_set(timestamps=timestamps, features={"f1": [1, 5, -5]}) >>> a.moving_sum(window_length=tp.duration.minutes(6)) indexes: ... timestamps: [ 300. 600. 1800.] 'f1': [ 1 6 -5] ... ``` Args: value: Number of minutes. Returns: Equivalent number of seconds. """ return float(value * 60)
Converts input value from hours to a `Duration` in seconds. Example: ```python >>> timestamps = [tp.duration.hours(i) for i in [1, 2, 10]] >>> timestamps [3600.0, 7200.0, 36000.0] >>> # Usage in a window operation >>> a = tp.event_set(timestamps=timestamps, features={"f1": [1, 5, -5]}) >>> a.moving_sum(window_length=tp.duration.hours(2)) indexes: ... timestamps: [ 3600. 7200. 36000.] 'f1': [ 1 6 -5] ... ``` Args: value: Number of hours. Returns: Equivalent number of seconds.
def hours(value: Union[int, float]) -> Duration: """Converts input value from hours to a `Duration` in seconds. Example: ```python >>> timestamps = [tp.duration.hours(i) for i in [1, 2, 10]] >>> timestamps [3600.0, 7200.0, 36000.0] >>> # Usage in a window operation >>> a = tp.event_set(timestamps=timestamps, features={"f1": [1, 5, -5]}) >>> a.moving_sum(window_length=tp.duration.hours(2)) indexes: ... timestamps: [ 3600. 7200. 36000.] 'f1': [ 1 6 -5] ... ``` Args: value: Number of hours. Returns: Equivalent number of seconds. """ return float(value * 60 * 60)
Converts input value from number of days to a `Duration` in seconds. Example: ```python >>> a = tp.event_set( ... # Dates are converted to unix timestamps ... timestamps=["2020-01-01", "2020-01-02", "2020-01-31"], ... features={"f1": [1, 5, -5]} ... ) >>> a.moving_sum(window_length=tp.duration.days(2)) indexes: ... timestamps: ['2020-01-01T00:00:00' '2020-01-02T00:00:00' '2020-01-31T00:00:00'] 'f1': [ 1 6 -5] ... ``` Args: value: number of days. Returns: Equivalent number of seconds.
def days(value: Union[int, float]) -> Duration: """Converts input value from number of days to a `Duration` in seconds. Example: ```python >>> a = tp.event_set( ... # Dates are converted to unix timestamps ... timestamps=["2020-01-01", "2020-01-02", "2020-01-31"], ... features={"f1": [1, 5, -5]} ... ) >>> a.moving_sum(window_length=tp.duration.days(2)) indexes: ... timestamps: ['2020-01-01T00:00:00' '2020-01-02T00:00:00' '2020-01-31T00:00:00'] 'f1': [ 1 6 -5] ... ``` Args: value: number of days. Returns: Equivalent number of seconds. """ return float(value * 60 * 60 * 24)
Converts input value from number of weeks to a `Duration` in seconds. ```python >>> a = tp.event_set( ... # Dates are converted to unix timestamps ... timestamps=["2020-01-01", "2020-01-07", "2020-01-31"], ... features={"f1": [1, 5, -5]} ... ) >>> a.moving_sum(window_length=tp.duration.weeks(2)) indexes: ... timestamps: ['2020-01-01T00:00:00' '2020-01-07T00:00:00' '2020-01-31T00:00:00'] 'f1': [ 1 6 -5] ... ``` Args: value: Number of weeks. Returns: Equivalent number of seconds.
def weeks(value: Union[int, float]) -> Duration: """Converts input value from number of weeks to a `Duration` in seconds. ```python >>> a = tp.event_set( ... # Dates are converted to unix timestamps ... timestamps=["2020-01-01", "2020-01-07", "2020-01-31"], ... features={"f1": [1, 5, -5]} ... ) >>> a.moving_sum(window_length=tp.duration.weeks(2)) indexes: ... timestamps: ['2020-01-01T00:00:00' '2020-01-07T00:00:00' '2020-01-31T00:00:00'] 'f1': [ 1 6 -5] ... ``` Args: value: Number of weeks. Returns: Equivalent number of seconds. """ return float(value * 60 * 60 * 24 * 7)
Converts a unix timestamp in seconds to datetime (UTC). Args: timestamp: Single timestamp in seconds. Returns: Single UTC datetime.
def convert_timestamp_to_datetime(timestamp: Timestamp) -> datetime.datetime: """Converts a unix timestamp in seconds to datetime (UTC). Args: timestamp: Single timestamp in seconds. Returns: Single UTC datetime. """ norm_timestamp = normalize_timestamp(timestamp) return datetime.datetime.fromtimestamp( norm_timestamp, tz=datetime.timezone.utc )
Converts unix timestamps in seconds to a list of datetimes (UTC). Example: ```python > convert_timestamps_to_datetimes([0, 1689791856]) [datetime.datetime(1970, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), datetime.datetime(2023, 7, 19, 18, 37, 36, tzinfo=datetime.timezone.utc)] Args: ts: Iterable of timestamps, in seconds. Returns: List of UTC datetimes.
def convert_timestamps_to_datetimes( ts: Iterable[Timestamp], ) -> List[datetime.datetime]: """Converts unix timestamps in seconds to a list of datetimes (UTC). Example: ```python > convert_timestamps_to_datetimes([0, 1689791856]) [datetime.datetime(1970, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), datetime.datetime(2023, 7, 19, 18, 37, 36, tzinfo=datetime.timezone.utc)] Args: ts: Iterable of timestamps, in seconds. Returns: List of UTC datetimes. """ return [convert_timestamp_to_datetime(t) for t in ts]
Converts date value to a number representing the Unix timestamp. If a float or int, it is returned as float. If a date, it is converted to a Unix timestamp (number of seconds from Unix epoch). Args: date: Date to convert. Returns: Unix timestamp (seconds elapsed from unix epoch). Raises: TypeError: unsupported type. Supported types are: - np.datetime64 - datetime.datetime
def convert_date_to_duration(date: Timestamp) -> NormalizedDuration: """Converts date value to a number representing the Unix timestamp. If a float or int, it is returned as float. If a date, it is converted to a Unix timestamp (number of seconds from Unix epoch). Args: date: Date to convert. Returns: Unix timestamp (seconds elapsed from unix epoch). Raises: TypeError: unsupported type. Supported types are: - np.datetime64 - datetime.datetime """ # if it is already a number, return it as float if isinstance(date, float): return date if isinstance(date, int): return float(date) # if it is a date, convert it to unix timestamp if isinstance(date, np.datetime64): return convert_numpy_datetime64_to_duration(date) if isinstance(date, datetime.datetime): return convert_datetime_to_duration(date) if isinstance(date, datetime.date): return convert_datetime_date_to_duration(date) raise TypeError(f"Unsupported type: {type(date)}")
Convert numpy datetime64 to duration epoch UTC.
def convert_numpy_datetime64_to_duration( date: np.datetime64, ) -> NormalizedDuration: """Convert numpy datetime64 to duration epoch UTC.""" return float(date.astype("datetime64[s]").astype("float64"))
Convert datetime to duration epoch UTC.
def convert_datetime_to_duration(date: datetime.datetime) -> NormalizedDuration: """Convert datetime to duration epoch UTC.""" return float(date.replace(tzinfo=datetime.timezone.utc).timestamp())
Convert date to duration epoch UTC.
def convert_datetime_date_to_duration( date: datetime.date, ) -> NormalizedDuration: """Convert date to duration epoch UTC.""" return convert_datetime_to_duration( datetime.datetime.combine(date, datetime.time(0, 0)) )
Returns the abbreviation for a duration. Args: duration: Duration in seconds. cutoff: Cutoff for the abbreviation. For example, if cutoff is "day", the smallest unit will be days. Possible options are "week", "day", "hour" and "minute", "seconds" and "milliseconds". Default is "milliseconds". Returns: Abbreviation for the duration.
def duration_abbreviation( duration: Duration, cutoff: Union[str, TimeUnit] = "milliseconds" ) -> str: """Returns the abbreviation for a duration. Args: duration: Duration in seconds. cutoff: Cutoff for the abbreviation. For example, if cutoff is "day", the smallest unit will be days. Possible options are "week", "day", "hour" and "minute", "seconds" and "milliseconds". Default is "milliseconds". Returns: Abbreviation for the duration. """ # check cutoff is a TimeUnit or if its a string that is a valid TimeUnit if not TimeUnit.is_valid(cutoff): raise ValueError( f"Invalid cutoff: {cutoff}. Possible options are: {list(TimeUnit)}" ) duration_str = "" if duration < 0: duration = -duration if duration >= weeks(1): duration_str += f"{int(duration / weeks(1))}w" if cutoff == "week": return duration_str duration = duration % weeks(1) if duration >= days(1): duration_str += f"{int(duration / days(1))}d" if cutoff == "day": return duration_str duration = duration % days(1) if duration >= hours(1): duration_str += f"{int(duration / hours(1))}h" if cutoff == "hour": return duration_str duration = duration % hours(1) if duration >= minutes(1): duration_str += f"{int(duration / minutes(1))}min" if cutoff == "minute": return duration_str duration = duration % minutes(1) if duration >= seconds(1): duration_str += f"{int(duration / seconds(1))}s" if cutoff == "seconds": return duration_str duration = duration % seconds(1) if duration >= milliseconds(1): duration_str += f"{int(duration / milliseconds(1))}ms" return duration_str return duration_str
Creates an input [`EventSetNode`][temporian.EventSetNode]. An input EventSetNode can be used to feed data into a graph. Usage example: ```python >>> # Without index >>> a = tp.input_node(features=[("f1", tp.float64), ("f2", tp.str_)]) >>> # With an index >>> a = tp.input_node( ... features=[("f1", tp.float64), ("f2", tp.str_)], ... indexes=["f2"], ... ) >>> # Two nodes with the same sampling >>> a = tp.input_node(features=[("f1", tp.float64)]) >>> b = tp.input_node(features=[("f2", tp.float64)], same_sampling_as=a) ``` Args: features: List of names and dtypes of the features. indexes: List of names and dtypes of the index. If empty, the data is assumed not indexed. is_unix_timestamp: If true, the timestamps are interpreted as unix timestamps in seconds. same_sampling_as: If set, the created EventSetNode is guaranteed to have the same sampling as same_sampling_as`. In this case, `indexes` and `is_unix_timestamp` should not be provided. Some operators require for input EventSetNodes to have the same sampling. name: Name for the EventSetNode. Returns: EventSetNode with the given specifications.
def input_node( features: Union[List[FeatureSchema], List[Tuple[str, DType]]], indexes: Optional[ Union[List[IndexSchema], List[Tuple[str, IndexDType]]] ] = None, is_unix_timestamp: bool = False, same_sampling_as: Optional[EventSetNode] = None, name: Optional[str] = None, ) -> EventSetNode: """Creates an input [`EventSetNode`][temporian.EventSetNode]. An input EventSetNode can be used to feed data into a graph. Usage example: ```python >>> # Without index >>> a = tp.input_node(features=[("f1", tp.float64), ("f2", tp.str_)]) >>> # With an index >>> a = tp.input_node( ... features=[("f1", tp.float64), ("f2", tp.str_)], ... indexes=["f2"], ... ) >>> # Two nodes with the same sampling >>> a = tp.input_node(features=[("f1", tp.float64)]) >>> b = tp.input_node(features=[("f2", tp.float64)], same_sampling_as=a) ``` Args: features: List of names and dtypes of the features. indexes: List of names and dtypes of the index. If empty, the data is assumed not indexed. is_unix_timestamp: If true, the timestamps are interpreted as unix timestamps in seconds. same_sampling_as: If set, the created EventSetNode is guaranteed to have the same sampling as same_sampling_as`. In this case, `indexes` and `is_unix_timestamp` should not be provided. Some operators require for input EventSetNodes to have the same sampling. name: Name for the EventSetNode. Returns: EventSetNode with the given specifications. """ if same_sampling_as is not None: if indexes is not None: raise ValueError( "indexes cannot be provided with same_sampling_as=True" ) return create_node_new_features_existing_sampling( features=features, sampling_node=same_sampling_as, name=name, creator=None, ) else: if indexes is None: indexes = [] return create_node_new_features_new_sampling( features=features, indexes=indexes, is_unix_timestamp=is_unix_timestamp, name=name, creator=None, )
Creates an input [`EventSetNode`][temporian.EventSetNode] from a schema. Usage example: ```python >>> # Create two nodes with the same schema. >>> a = tp.input_node(features=[("f1", tp.float64), ("f2", tp.str_)]) >>> b = tp.input_node_from_schema(a.schema) ``` Args: schema: Schema of the node. same_sampling_as: If set, the created EventSetNode is guaranteed to have the same sampling as same_sampling_as`. In this case, `indexes` and `is_unix_timestamp` should not be provided. Some operators require for input EventSetNodes to have the same sampling. name: Name for the EventSetNode. Returns: EventSetNode with the given specifications.
def input_node_from_schema( schema: Schema, same_sampling_as: Optional[EventSetNode] = None, name: Optional[str] = None, ) -> EventSetNode: """Creates an input [`EventSetNode`][temporian.EventSetNode] from a schema. Usage example: ```python >>> # Create two nodes with the same schema. >>> a = tp.input_node(features=[("f1", tp.float64), ("f2", tp.str_)]) >>> b = tp.input_node_from_schema(a.schema) ``` Args: schema: Schema of the node. same_sampling_as: If set, the created EventSetNode is guaranteed to have the same sampling as same_sampling_as`. In this case, `indexes` and `is_unix_timestamp` should not be provided. Some operators require for input EventSetNodes to have the same sampling. name: Name for the EventSetNode. Returns: EventSetNode with the given specifications. """ return input_node( features=schema.features, indexes=schema.indexes, is_unix_timestamp=schema.is_unix_timestamp, same_sampling_as=same_sampling_as, name=name, )
Creates an EventSetNode with an existing sampling and new features. When possible, this is the EventSetNode creation function to use.
def create_node_new_features_existing_sampling( features: Union[List[FeatureSchema], List[Tuple[str, DType]]], sampling_node: EventSetNode, creator: Optional[Operator], name: Optional[str] = None, ) -> EventSetNode: """Creates an EventSetNode with an existing sampling and new features. When possible, this is the EventSetNode creation function to use. """ # TODO: Use better way assert sampling_node is not None assert features is not None assert isinstance(sampling_node, EventSetNode) assert isinstance(features, List) assert ( len(features) == 0 or isinstance(features[0], FeatureSchema) or isinstance(features[0], tuple) ) return EventSetNode( schema=Schema( features=features, # The indexes and is_unix_timestamp are defined by the sampling. indexes=sampling_node.schema.indexes, is_unix_timestamp=sampling_node.schema.is_unix_timestamp, ), # Making use to use the same sampling reference. sampling=sampling_node.sampling_node, # New features. features=[Feature(creator=creator) for _ in features], name=name, creator=creator, )
Creates an EventSetNode with a new sampling and new features.
def create_node_new_features_new_sampling( features: Union[List[FeatureSchema], List[Tuple[str, DType]]], indexes: Union[List[IndexSchema], List[Tuple[str, IndexDType]]], is_unix_timestamp: bool, creator: Optional[Operator], name: Optional[str] = None, ) -> EventSetNode: """Creates an EventSetNode with a new sampling and new features.""" # TODO: Use better way assert isinstance(features, List) assert ( len(features) == 0 or isinstance(features[0], FeatureSchema) or isinstance(features[0], tuple) ) return EventSetNode( schema=Schema( features=features, indexes=indexes, is_unix_timestamp=is_unix_timestamp, ), # New sampling sampling=Sampling(creator=creator), # New features. features=[Feature(creator=creator) for _ in features], name=name, creator=creator, )
Creates an EventSetNode with NEW features and NEW sampling. If sampling is not specified, a new sampling is created. Similarly, if features is not specifies, new features are created.
def create_node_with_new_reference( schema: Schema, sampling: Optional[Sampling] = None, features: Optional[List[Feature]] = None, name: Optional[str] = None, creator: Optional[Operator] = None, ) -> EventSetNode: """Creates an EventSetNode with NEW features and NEW sampling. If sampling is not specified, a new sampling is created. Similarly, if features is not specifies, new features are created. """ if sampling is None: sampling = Sampling(creator=creator) if features is None: features = [Feature(creator=creator) for _ in schema.features] assert len(features) == len(schema.features) return EventSetNode( schema=schema, sampling=sampling, features=features, name=name, creator=creator, )
Combines events from multiple [`EventSets`][temporian.EventSet] together. Input events must have the same features (i.e. same feature names and dtypes) and index schemas (i.e. same index names and dtypes). Combine is different from `glue` and `join`, since those append together different features. Args: *inputs: EventSets to combine their events. how: Whether to use the indexes from "outer" (union of all inputs' index values), "inner" (only those present in all inputs) or "left" (only use index values from the first input). Basic example: ```python >>> a = tp.event_set(timestamps=[0, 1, 3], ... features={"A": [0, 10, 30], "B": [0, -10, -30]} ... ) >>> b = tp.event_set(timestamps=[1, 4], ... features={"A": [10, 40], "B": [-10, -40]} ... ) >>> # Inputs a and b have some duplicated timestamps >>> c = tp.combine(a, b) >>> c indexes: [] features: [('A', int64), ('B', int64)] events: (5 events): timestamps: [0. 1. 1. 3. 4.] 'A': [ 0 10 10 30 40] 'B': [ 0 -10 -10 -30 -40] ... >>> # Events with duplicated timestamps can be unified afterwards >>> unique_t = c.unique_timestamps() >>> d = c.moving_sum(window_length=tp.duration.shortest, sampling=unique_t) >>> d indexes: [] features: [('A', int64), ('B', int64)] events: (4 events): timestamps: [0. 1. 3. 4.] 'A': [ 0 20 30 40] 'B': [ 0 -20 -30 -40] ... ``` Example with different index values ```python # Index "idx=a" is only in a, "idx=b" in both, "idx=c" only in b >>> a = tp.event_set(timestamps=[0, 1, 3], ... features={"A": [0, 10, 30], ... "idx": ["a", "a", "b"]}, ... indexes=["idx"], ... ) >>> b = tp.event_set(timestamps=[1.5, 4.5, 5.5], ... features={"A": [15, 45, 55], ... "idx": ["b", "c", "c"]}, ... indexes=["idx"] ... ) >>> # By default, "outer" uses index values from all inputs >>> c = tp.combine(a, b) >>> c indexes: [('idx', str_)] features: [('A', int64)] events: idx=b'a' (2 events): timestamps: [0. 1.] 'A': [ 0 10] idx=b'b' (2 events): timestamps: [1.5 3. ] 'A': [15 30] idx=b'c' (2 events): timestamps: [4.5 5.5] 'A': [45 55] ... >>> # Use "left" to use only index values from the first input a >>> c = tp.combine(a, b, how="left") >>> c indexes: [('idx', str_)] features: [('A', int64)] events: idx=b'a' (2 events): timestamps: [0. 1.] 'A': [ 0 10] idx=b'b' (2 events): timestamps: [1.5 3. ] 'A': [15 30] ... >>> # Use "inner" to use only index values that are present in all inputs >>> c = tp.combine(a, b, how="inner") >>> c indexes: [('idx', str_)] features: [('A', int64)] events: idx=b'b' (2 events): timestamps: [1.5 3. ] 'A': [15 30] ... ``` Returns: An EventSet with events from all inputs combined.
def combine( *inputs: EventSetOrNode, how: Union[str, How] = How.outer, ) -> EventSetOrNode: """ Combines events from multiple [`EventSets`][temporian.EventSet] together. Input events must have the same features (i.e. same feature names and dtypes) and index schemas (i.e. same index names and dtypes). Combine is different from `glue` and `join`, since those append together different features. Args: *inputs: EventSets to combine their events. how: Whether to use the indexes from "outer" (union of all inputs' index values), "inner" (only those present in all inputs) or "left" (only use index values from the first input). Basic example: ```python >>> a = tp.event_set(timestamps=[0, 1, 3], ... features={"A": [0, 10, 30], "B": [0, -10, -30]} ... ) >>> b = tp.event_set(timestamps=[1, 4], ... features={"A": [10, 40], "B": [-10, -40]} ... ) >>> # Inputs a and b have some duplicated timestamps >>> c = tp.combine(a, b) >>> c indexes: [] features: [('A', int64), ('B', int64)] events: (5 events): timestamps: [0. 1. 1. 3. 4.] 'A': [ 0 10 10 30 40] 'B': [ 0 -10 -10 -30 -40] ... >>> # Events with duplicated timestamps can be unified afterwards >>> unique_t = c.unique_timestamps() >>> d = c.moving_sum(window_length=tp.duration.shortest, sampling=unique_t) >>> d indexes: [] features: [('A', int64), ('B', int64)] events: (4 events): timestamps: [0. 1. 3. 4.] 'A': [ 0 20 30 40] 'B': [ 0 -20 -30 -40] ... ``` Example with different index values ```python # Index "idx=a" is only in a, "idx=b" in both, "idx=c" only in b >>> a = tp.event_set(timestamps=[0, 1, 3], ... features={"A": [0, 10, 30], ... "idx": ["a", "a", "b"]}, ... indexes=["idx"], ... ) >>> b = tp.event_set(timestamps=[1.5, 4.5, 5.5], ... features={"A": [15, 45, 55], ... "idx": ["b", "c", "c"]}, ... indexes=["idx"] ... ) >>> # By default, "outer" uses index values from all inputs >>> c = tp.combine(a, b) >>> c indexes: [('idx', str_)] features: [('A', int64)] events: idx=b'a' (2 events): timestamps: [0. 1.] 'A': [ 0 10] idx=b'b' (2 events): timestamps: [1.5 3. ] 'A': [15 30] idx=b'c' (2 events): timestamps: [4.5 5.5] 'A': [45 55] ... >>> # Use "left" to use only index values from the first input a >>> c = tp.combine(a, b, how="left") >>> c indexes: [('idx', str_)] features: [('A', int64)] events: idx=b'a' (2 events): timestamps: [0. 1.] 'A': [ 0 10] idx=b'b' (2 events): timestamps: [1.5 3. ] 'A': [15 30] ... >>> # Use "inner" to use only index values that are present in all inputs >>> c = tp.combine(a, b, how="inner") >>> c indexes: [('idx', str_)] features: [('A', int64)] events: idx=b'b' (2 events): timestamps: [1.5 3. ] 'A': [15 30] ... ``` Returns: An EventSet with events from all inputs combined. """ if not How.is_valid(how): raise ValueError(f"Invalid argument: {how=}. Options are {list(How)}") how = How[how] if len(inputs) == 1 and isinstance(inputs[0], EventSetNode): return inputs[0] # NOTE: input name must match op. definition name inputs_dict = { f"{_INPUT_KEY_PREFIX}{idx}": input for idx, input in enumerate(inputs) } return Combine(how=how, **inputs_dict).outputs["output"]
Concatenates features from [`EventSets`][temporian.EventSet] with the same sampling. Feature names cannot be duplicated across EventSets. See the examples below for workarounds on gluing EventSets with duplicated feature names or different samplings. Example: ```python >>> a = tp.event_set( ... timestamps=[0, 1, 5], ... features={"M": [0, 10, 50], "N": [50, 100, 500]}, ... ) >>> b = a["M"] + a["N"] >>> c = a["M"] - a["N"] # Glue all features from a,b,c >>> d = tp.glue(a, b.rename("plus_N"), c.rename("minus_N")) >>> d indexes: [] features: [('M', int64), ('N', int64), ('plus_N', int64), ('minus_N', int64)] events: (3 events): timestamps: [0. 1. 5.] 'M': [ 0 10 50] 'N': [ 50 100 500] 'plus_N': [ 50 110 550] 'minus_N': [ -50 -90 -450] ... ``` To glue EventSets with duplicated feature names, add a prefix or rename them before. Example with duplicated names: ```python >>> a = tp.event_set( ... timestamps=[0, 1, 5], ... features={"M": [0, 10, 50], "N": [50, 100, 500]}, ... ) # Same feature names as a >>> b = 3 * a # Add a prefix before glue >>> output = tp.glue(a, b.prefix("3x")) >>> output.schema.features [('M', int64), ('N', int64), ('3xM', int64), ('3xN', int64)] # Or rename before glue >>> output = tp.glue(a["M"], b["M"].rename("M_new")) >>> output.schema.features [('M', int64), ('M_new', int64)] ``` To concatenate EventSets with different samplings, use [`EventSet.resample()`][temporian.EventSet.resample] first. Example with different samplings: ```python >>> a = tp.event_set(timestamps=[0, 2], features={"A": [0, 20]}) >>> b = tp.event_set(timestamps=[0, 2], features={"B": [1, 21]}) >>> c = tp.event_set(timestamps=[1, 4], features={"C": [10, 40]}) >>> output = tp.glue(a, b.resample(a), c.resample(a)) >>> output indexes: [] features: [('A', int64), ('B', int64), ('C', int64)] events: (2 events): timestamps: [0. 2.] 'A': [ 0 20] 'B': [ 1 21] 'C': [ 0 10] ... ``` Args: *inputs: EventSets to concatenate the features of. Returns: EventSet with concatenated features.
def glue( *inputs: EventSetOrNode, ) -> EventSetOrNode: """Concatenates features from [`EventSets`][temporian.EventSet] with the same sampling. Feature names cannot be duplicated across EventSets. See the examples below for workarounds on gluing EventSets with duplicated feature names or different samplings. Example: ```python >>> a = tp.event_set( ... timestamps=[0, 1, 5], ... features={"M": [0, 10, 50], "N": [50, 100, 500]}, ... ) >>> b = a["M"] + a["N"] >>> c = a["M"] - a["N"] # Glue all features from a,b,c >>> d = tp.glue(a, b.rename("plus_N"), c.rename("minus_N")) >>> d indexes: [] features: [('M', int64), ('N', int64), ('plus_N', int64), ('minus_N', int64)] events: (3 events): timestamps: [0. 1. 5.] 'M': [ 0 10 50] 'N': [ 50 100 500] 'plus_N': [ 50 110 550] 'minus_N': [ -50 -90 -450] ... ``` To glue EventSets with duplicated feature names, add a prefix or rename them before. Example with duplicated names: ```python >>> a = tp.event_set( ... timestamps=[0, 1, 5], ... features={"M": [0, 10, 50], "N": [50, 100, 500]}, ... ) # Same feature names as a >>> b = 3 * a # Add a prefix before glue >>> output = tp.glue(a, b.prefix("3x")) >>> output.schema.features [('M', int64), ('N', int64), ('3xM', int64), ('3xN', int64)] # Or rename before glue >>> output = tp.glue(a["M"], b["M"].rename("M_new")) >>> output.schema.features [('M', int64), ('M_new', int64)] ``` To concatenate EventSets with different samplings, use [`EventSet.resample()`][temporian.EventSet.resample] first. Example with different samplings: ```python >>> a = tp.event_set(timestamps=[0, 2], features={"A": [0, 20]}) >>> b = tp.event_set(timestamps=[0, 2], features={"B": [1, 21]}) >>> c = tp.event_set(timestamps=[1, 4], features={"C": [10, 40]}) >>> output = tp.glue(a, b.resample(a), c.resample(a)) >>> output indexes: [] features: [('A', int64), ('B', int64), ('C', int64)] events: (2 events): timestamps: [0. 2.] 'A': [ 0 20] 'B': [ 1 21] 'C': [ 0 10] ... ``` Args: *inputs: EventSets to concatenate the features of. Returns: EventSet with concatenated features. """ if len(inputs) == 1 and isinstance(inputs[0], EventSetNode): return inputs[0] inputs_dict = { f"{_INPUT_KEY_PREFIX}{idx}": input for idx, input in enumerate(inputs) } return GlueOperator(**inputs_dict).outputs["output"]
Evaluates a schedule on a dictionary of input [`EventSets`][temporian.EventSet]. Args: inputs: Mapping of EventSetNodes to materialized EventSets. schedule: Sequence of operators to apply on the data. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. check_execution: If `True`, data of the intermediate results of the operators is checked against its expected structure and raises if it differs. force_garbage_collector_interval: If set, triggers the garbage collection every "force_garbage_collector_interval" seconds.
def run_schedule( inputs: Dict[EventSetNode, EventSet], schedule: Schedule, verbose: int, check_execution: bool, force_garbage_collector_interval: Optional[float] = 10, ) -> Dict[EventSetNode, EventSet]: """Evaluates a schedule on a dictionary of input [`EventSets`][temporian.EventSet]. Args: inputs: Mapping of EventSetNodes to materialized EventSets. schedule: Sequence of operators to apply on the data. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. check_execution: If `True`, data of the intermediate results of the operators is checked against its expected structure and raises if it differs. force_garbage_collector_interval: If set, triggers the garbage collection every "force_garbage_collector_interval" seconds. """ data = {**inputs} gc_begin_time = time.time() num_steps = len(schedule.steps) for step_idx, step in enumerate(schedule.steps): operator_def = step.op.definition # Get implementation implementation_cls = implementation_lib.get_implementation_class( operator_def.key ) # Instantiate implementation implementation = implementation_cls(step.op) if verbose == 1: print( f" {step_idx+1} / {num_steps}: {step.op.operator_key()}", file=sys.stderr, end="", flush=True, ) elif verbose >= 2: print("=============================", file=sys.stderr) print( f"{step_idx+1} / {num_steps}: Run {step.op}", file=sys.stderr, flush=True, ) # Construct operator inputs operator_inputs = { input_key: data[input_node] for input_key, input_node in step.op.inputs.items() } if verbose >= 2: print( f"Inputs:\n{operator_inputs}\n", file=sys.stderr, flush=True, ) # Compute output begin_time = time.perf_counter() if check_execution: operator_outputs = implementation.call(**operator_inputs) else: operator_outputs = implementation(**operator_inputs) end_time = time.perf_counter() if verbose == 1: print( f" [{end_time - begin_time:.5f} s]", file=sys.stderr, flush=True, ) elif verbose >= 2: print(f"Outputs:\n{operator_outputs}\n", file=sys.stderr) print( f"Duration: {end_time - begin_time} s", file=sys.stderr, flush=True, ) # materialize data in output nodes for output_key, output_node in step.op.outputs.items(): output_evset = operator_outputs[output_key] output_evset._internal_node = output_node data[output_node] = output_evset # Release unused memory for node in step.released_nodes: assert node in data del data[node] if ( force_garbage_collector_interval is not None and (time.time() - gc_begin_time) >= force_garbage_collector_interval ): begin_gc = time.time() if verbose >= 2: print("Garbage collection", file=sys.stderr, flush=True, end="") gc.collect() gc_begin_time = time.time() if verbose >= 2: print( f" [{gc_begin_time - begin_gc:.5f} s]", file=sys.stderr, flush=True, ) return data
Registers an operator implementation.
def register_operator_implementation( operator_class, operator_implementation_class ): """Registers an operator implementation.""" op_key = operator_class.operator_key() if op_key in _OPERATOR_IMPLEMENTATIONS: raise ValueError("Operator implementation already registered") _OPERATOR_IMPLEMENTATIONS[op_key] = operator_implementation_class
Gets an operator implementation class from a registered key.
def get_implementation_class(key: str): """Gets an operator implementation class from a registered key.""" if key not in _OPERATOR_IMPLEMENTATIONS: raise ValueError( f"Unknown operator implementation '{key}'. Available operator " f"implementations are: {list(_OPERATOR_IMPLEMENTATIONS.keys())}." ) return _OPERATOR_IMPLEMENTATIONS[key]
Lists the registered operator implementations.
def registered_implementations() -> Dict[str, Any]: """Lists the registered operator implementations.""" return _OPERATOR_IMPLEMENTATIONS
HTML representation, mainly for IPython notebooks.
def display_html(evset: EventSet) -> str: """HTML representation, mainly for IPython notebooks.""" # Create DOM impl = minidom.getDOMImplementation() assert impl is not None dom = impl.createDocument(None, "div", None) top = dom.documentElement # Other configs convert_datetime = evset.schema.is_unix_timestamp feature_schemas = evset.schema.features all_index_keys = evset.get_index_keys(sort=True) num_indexes = len(all_index_keys) num_features = len(evset.schema.features) # If limit=0 or None, set limit=len max_indexes = config.display_max_indexes or num_indexes max_features = config.display_max_features or num_features has_hidden_feats = num_features > max_features visible_feats = feature_schemas[:max_features] # Header with features and indexes. top.appendChild(display_html_header(dom, evset)) # Create one table and header per index value for index_key in all_index_keys[:max_indexes]: index_data = evset.data[index_key] num_timestamps = len(index_data.timestamps) max_timestamps = ( min(config.display_max_events, num_timestamps) if config.display_max_events != None else num_timestamps ) if max_timestamps == 1: display_timestamps = index_data.timestamps[ :1 ] # Just take the first timestamp display_features = [ values[:1] for values in index_data.features ] # And the corresponding features else: # Slices timestamps and features if there are more than 'max_timestamps' events half_max_timestamps = max_timestamps // 2 display_timestamps = ( np.concatenate( ( index_data.timestamps[:half_max_timestamps], index_data.timestamps[-half_max_timestamps:], ) ) if num_timestamps > max_timestamps else index_data.timestamps ) display_features = ( [ np.concatenate( ( values[:half_max_timestamps], values[-half_max_timestamps:], ) ) for values in index_data.features ] if num_timestamps > max_timestamps else index_data.features ) # Display index values html_index_value = html_div(dom) top.appendChild(html_index_value) html_index_value.appendChild(html_style_bold(dom, "index")) html_index_value.appendChild(html_text(dom, " (")) last_index_key_idx = len(index_key) - 1 for idx, (item_value, item_schema) in enumerate( zip(index_key, evset.schema.indexes) ): html_index_value.appendChild( html_style( dom, f"{item_schema.name}: ", _HTML_STYLE_INDEX_KEY, bold=True, ) ) if isinstance(item_value, bytes): item_value = item_value.decode() html_index_value.appendChild( html_style( dom, str(item_value), _HTML_STYLE_INDEX_VALUE, ) ) if idx != last_index_key_idx: html_index_value.appendChild(html_text(dom, ", ")) html_index_value.appendChild( html_text(dom, f") with {num_timestamps} events") ) # Table with column names table = html_style(dom, dom.createElement("table"), _HTML_STYLE_TABLE) col_names = ["timestamp"] + [ html_style(dom, feature.name, _HTML_STYLE_FEATURE_KEY) for feature in visible_feats ] if has_hidden_feats: col_names += [ELLIPSIS] table.appendChild(html_table_row(dom, col_names, header=True)) # Rows with events for timestamp_idx, timestamp in enumerate(display_timestamps): row = [] # Timestamp column timestamp_repr = ( convert_timestamp_to_datetime(timestamp) if convert_datetime else repr_float_html(timestamp) ) row.append(f"{timestamp_repr}") # Feature values for feature_idx, feature_schema in enumerate(visible_feats): row.append( repr_value_html( display_features[feature_idx][timestamp_idx], feature_schema.dtype, ) ) # Add ... column on the right if has_hidden_feats: row.append(ELLIPSIS) # Create ellipsis row between first half and last half if more than max_timestamps entries table.appendChild(html_table_row(dom, row)) if ( timestamp_idx == ((max_timestamps // 2) - 1) and num_timestamps > max_timestamps ) or (max_timestamps == 1): ellipsis_row = [ELLIPSIS] * ( 1 + len(visible_feats) + int(has_hidden_feats) ) table.appendChild(html_table_row(dom, ellipsis_row)) top.appendChild(table) # If there are hidden indexes, show how many if num_indexes > max_indexes: hidden_indexes = num_indexes - max_indexes top.appendChild( html_text( dom, f"{ELLIPSIS} ({hidden_indexes} more indexes not shown)" ) ) return top.toprettyxml(indent=" ")
Repr for a list of features.
def repr_features_text(evset: EventSet, features: List[np.ndarray]) -> str: """Repr for a list of features.""" max_features = config.print_max_features # 0 will print all feature_repr = [] for idx, (feature_schema, feature_data) in enumerate( zip(evset.schema.features, features) ): if max_features and (idx + 1) > max_features: feature_repr.append("...") break feature_repr.append(f"'{feature_schema.name}': {feature_data}") return "\n".join(feature_repr)
Converts a numpy dtype into a temporian dtype.
def numpy_dtype_to_tp_dtype(feature_name: str, numpy_dtype) -> DType: """Converts a numpy dtype into a temporian dtype.""" if numpy_dtype not in _DTYPE_MAPPING: raise ValueError( f"Features {feature_name!r} with dtype {numpy_dtype} cannot be" " imported in Temporian. Supported" f" dtypes={list(_DTYPE_MAPPING.keys())}." ) return _DTYPE_MAPPING[numpy_dtype]
Gets the matching temporian dtype of a numpy array.
def numpy_array_to_tp_dtype( feature_name: str, numpy_array: np.ndarray ) -> DType: """Gets the matching temporian dtype of a numpy array.""" return numpy_dtype_to_tp_dtype(feature_name, numpy_array.dtype.type)
Normalizes a list of feature values to temporian format. Keep this function in sync with the documentation of "io.event_set". `normalize_features` should match `_DTYPE_MAPPING`.
def normalize_features( feature_values: Any, name: str, ) -> np.ndarray: """Normalizes a list of feature values to temporian format. Keep this function in sync with the documentation of "io.event_set". `normalize_features` should match `_DTYPE_MAPPING`. """ logging.debug("Normalizing feature %s", name) def _str_to_bytes(feat_array: np.ndarray) -> np.ndarray: """Encode string/object/bytes to np.bytes, using UTF-8 encoding""" return np.char.encode(feat_array, "UTF-8") # Convert pandas, list, tuples -> np.ndarray if str(type(feature_values)) == "<class 'pandas.core.series.Series'>": logging.debug("From pandas.Series") if feature_values.dtype == "object": feature_values = feature_values.fillna("") feature_values = feature_values.to_numpy(copy=True) elif isinstance(feature_values, (tuple, list)): logging.debug("From list") # Convert list/tuple to array feature_values = np.array(feature_values) elif not isinstance(feature_values, np.ndarray): raise ValueError( "Feature values should be provided in a tuple, list, numpy array or" f" pandas Series. Got type {type(feature_values)} instead." ) # Next steps: Assume np.ndarray, normalize dtype assert isinstance(feature_values, np.ndarray) array_dtype = feature_values.dtype.type # Convert np.datetime -> np.float64 if array_dtype == np.datetime64: logging.debug("From np.datetime64") # nanosecond resolution as in timestamps feature_values = datetime64_array_to_float64(feature_values) # Convert np.object_, np.str_ -> np.bytes_ elif array_dtype == np.str_: logging.debug("From np.str_") feature_values = _str_to_bytes(feature_values) elif array_dtype == np.object_: logging.debug("From np.object_") logging.warning( ( 'Feature "%s" is an array of numpy.object_ and will be' " casted to numpy.string_ (Note: numpy.string_ is" " equivalent to numpy.bytes_)." ), name, ) feature_values = _str_to_bytes(feature_values.astype(str, copy=False)) return feature_values
Normalizes timestamps to temporian format. Keep this function in sync with the documentation of "io.event_set". Returns: Normalized timestamps (numpy float64 of unix epoch in seconds) and if the raw timestamps look like a unix epoch.
def normalize_timestamps( values: Any, ) -> Tuple[np.ndarray, bool]: """Normalizes timestamps to temporian format. Keep this function in sync with the documentation of "io.event_set". Returns: Normalized timestamps (numpy float64 of unix epoch in seconds) and if the raw timestamps look like a unix epoch. """ # Convert to numpy array if not isinstance(values, np.ndarray): values = np.array(values) # values is represented as a number. Copy and cast to float64. if np.issubdtype(values.dtype, np.integer) or np.issubdtype( values.dtype, np.floating ): values = values.astype(np.float64, copy=True) if values.dtype.type == np.float64: # Check NaN if np.isnan(values).any(): raise ValueError("Timestamps contain NaN values.") return values, False if values.dtype.type in [np.str_, np.bytes_, np.object_]: # Raises ValueError if cannot parse a value values = values.astype("datetime64[ns]") if values.dtype.type == np.datetime64: # values is a date. Cast to unix epoch in float64 seconds. values = datetime64_array_to_float64(values) return values, True raise ValueError( f"Invalid timestamps array dtype={values.dtype}." " Supported types are: integers, floating point, strings or objects." )
Normalizes a list of index keys. If `indexes` is None: if available_indexes is not None it returns those, else returns an empty list.
def normalize_index_key_list( indexes: Optional[IndexKeyList], available_indexes: Optional[List[IndexKey]] = None, ) -> List[NormalizedIndexKey]: """Normalizes a list of index keys. If `indexes` is None: if available_indexes is not None it returns those, else returns an empty list.""" if indexes is None: if available_indexes is not None: # All available indexes normalized_indexes = available_indexes else: normalized_indexes = [] elif isinstance(indexes, list): # e.g. indexes=["a", ("b",)] normalized_indexes = [ v if isinstance(v, tuple) else (v,) for v in indexes ] elif isinstance(indexes, tuple): # e.g. indexes=("a",) normalized_indexes = [indexes] else: # e.g. indexes="a" normalized_indexes = [(indexes,)] normalized_indexes = [normalize_index_key(x) for x in normalized_indexes] return normalized_indexes
Normalizes a tp DType or python type to a tp DType.
def normalize_dtype(x: Any) -> DType: """Normalizes a tp DType or python type to a tp DType.""" if isinstance(x, DType): return x elif x in PY_TYPE_TO_DTYPE: return PY_TYPE_TO_DTYPE[x] raise ValueError(f"Cannot normalize {x!r} as a DType.")
Normalizes a TargetDtypes object to either a single DType, a dictionary of feature names to DTypes, or a dictionary of DTypes to DTypes. Only one of the three items in the output tuple will not be None.
def normalize_target_dtypes( input: EventSetNode, target: TargetDtypes, ) -> Tuple[ Optional[DType], Optional[Dict[str, DType]], Optional[Dict[DType, DType]], ]: """Normalizes a TargetDtypes object to either a single DType, a dictionary of feature names to DTypes, or a dictionary of DTypes to DTypes. Only one of the three items in the output tuple will not be None.""" # Convert 'target' to one of these: dtype: Optional[DType] = None feature_name_to_dtype: Optional[Dict[str, DType]] = None dtype_to_dtype: Optional[Dict[DType, DType]] = None if isinstance(target, dict): keys_are_strs = all(isinstance(v, str) for v in target.keys()) keys_are_dtypes = all( isinstance(v, (DType, type)) for v in target.keys() ) values_are_dtypes = all( isinstance(v, (DType, type)) for v in target.values() ) if keys_are_strs and values_are_dtypes: feature_name_to_dtype = { key: normalize_dtype(value) for key, value in target.items() } input_feature_names = input.schema.feature_names() for feature_name in feature_name_to_dtype.keys(): if feature_name not in input_feature_names: raise ValueError(f"Unknown feature {feature_name!r}") elif keys_are_dtypes and values_are_dtypes: dtype_to_dtype = { normalize_dtype(key): normalize_dtype(value) for key, value in target.items() } elif isinstance(target, DType) or target in [float, int, str, bool]: dtype = normalize_dtype(target) if ( dtype is None and feature_name_to_dtype is None and dtype_to_dtype is None ): raise ValueError( "`target` should be one of the following: (1) a Temporian dtype" " e.g. tp.float64, (2) a dictionary of feature name (str) to" " temporian dtype, or (3) a dictionary of temporian dtype to" " temporian dtype. Alternatively, Temporian dtypes can be replaced" " with python type. For example cast(..., target=float) is" " equivalent to cast(..., target=tp.float64).\nInstead got," f" `target` = {target!r}." ) return dtype, feature_name_to_dtype, dtype_to_dtype
Builds a list of output dtypes for the input based on the output of normalize_target_dtypes.
def build_dtypes_list_from_target_dtypes( input: EventSetNode, dtype: Optional[DType] = None, dtype_to_dtype: Optional[Dict[DType, DType]] = None, feature_name_to_dtype: Optional[Dict[str, DType]] = None, ) -> List[DType]: """Builds a list of output dtypes for the input based on the output of normalize_target_dtypes.""" if dtype is not None: return [dtype] * len(input.schema.features) if feature_name_to_dtype is not None: return [ feature_name_to_dtype.get(f.name, f.dtype) for f in input.schema.features ] if dtype_to_dtype is not None: return [ dtype_to_dtype.get(f.dtype, f.dtype) for f in input.schema.features ] # If none were set, return the input dtypes return [f.dtype for f in input.schema.features]
Creates an [`EventSet`][temporian.EventSet] from arrays (lists, NumPy arrays, Pandas Series.) Usage examples: ```python >>> # Creates an EventSet with 4 timestamps and 3 features. >>> evset = tp.event_set( ... timestamps=[1, 2, 3, 4], ... features={ ... "feature_1": [0.5, 0.6, np.nan, 0.9], ... "feature_2": ["red", "blue", "red", "blue"], ... "feature_3": [10, -1, 5, 5], ... }, ... ) >>> # Creates an EventSet with an index. >>> evset = tp.event_set( ... timestamps=[1, 2, 3, 4], ... features={ ... "feature_1": [0.5, 0.6, np.nan, 0.9], ... "feature_2": ["red", "blue", "red", "blue"], ... }, ... indexes=["feature_2"], ... ) >>> # Create an EventSet with datetimes. >>> from datetime import datetime >>> evset = tp.event_set( ... timestamps=[datetime(2015, 1, 1), datetime(2015, 1, 2)], ... features={ ... "feature_1": [0.5, 0.6], ... "feature_2": ["red", "blue"], ... }, ... indexes=["feature_2"], ... ) ``` Supported values for `timestamps`: - List of int, float, str, bytes and datetime. - Numpy arrays of int{32, 64}, float{32, 64}, str_, string_ / bytes_, Numpy datetime64, and object containing "str". - Pandas series of int{32, 64}, float{32, 64}, Pandas Timestamp. String timestamps are interpreted as ISO 8601 datetime. Supported values for `features`: - List of int, float, str, bytes, bool, and datetime. - Numpy arrays of int{32, 64}, float{32, 64}, str_, string_ / bytes_, Numpy datetime64, or object containing "str". - Pandas series of int{32, 64}, float{32, 64}, Pandas Timestamp. Date / datetime features are converted to int64 unix times. NaN for float-like features are interpreted as missing values. Args: timestamps: Array of timestamps values. features: Dictionary of feature names to feature values. Feature and timestamp arrays must be of the same length. indexes: Names of the features to use as indexes. If empty (default), the data is not indexed. Only integer and string features can be used as indexes. name: Optional name of the EventSet. Used for debugging, and graph serialization. is_unix_timestamp: Whether the timestamps correspond to unix time. Unix times are required for calendar operators. If `None` (default), timestamps are interpreted as unix times if the `timestamps` argument is an array of date or date-like object. same_sampling_as: If set, the new EventSet is checked and tagged as having the same sampling as `same_sampling_as`. Some operators, such as [`EventSet.filter()`][temporian.EventSet.filter], require their inputs to have the same sampling. Returns: An EventSet.
def event_set( timestamps: DataArray, features: Optional[Dict[str, DataArray]] = None, indexes: Optional[List[str]] = None, name: Optional[str] = None, is_unix_timestamp: Optional[bool] = None, same_sampling_as: Optional[EventSet] = None, ) -> EventSet: """Creates an [`EventSet`][temporian.EventSet] from arrays (lists, NumPy arrays, Pandas Series.) Usage examples: ```python >>> # Creates an EventSet with 4 timestamps and 3 features. >>> evset = tp.event_set( ... timestamps=[1, 2, 3, 4], ... features={ ... "feature_1": [0.5, 0.6, np.nan, 0.9], ... "feature_2": ["red", "blue", "red", "blue"], ... "feature_3": [10, -1, 5, 5], ... }, ... ) >>> # Creates an EventSet with an index. >>> evset = tp.event_set( ... timestamps=[1, 2, 3, 4], ... features={ ... "feature_1": [0.5, 0.6, np.nan, 0.9], ... "feature_2": ["red", "blue", "red", "blue"], ... }, ... indexes=["feature_2"], ... ) >>> # Create an EventSet with datetimes. >>> from datetime import datetime >>> evset = tp.event_set( ... timestamps=[datetime(2015, 1, 1), datetime(2015, 1, 2)], ... features={ ... "feature_1": [0.5, 0.6], ... "feature_2": ["red", "blue"], ... }, ... indexes=["feature_2"], ... ) ``` Supported values for `timestamps`: - List of int, float, str, bytes and datetime. - Numpy arrays of int{32, 64}, float{32, 64}, str_, string_ / bytes_, Numpy datetime64, and object containing "str". - Pandas series of int{32, 64}, float{32, 64}, Pandas Timestamp. String timestamps are interpreted as ISO 8601 datetime. Supported values for `features`: - List of int, float, str, bytes, bool, and datetime. - Numpy arrays of int{32, 64}, float{32, 64}, str_, string_ / bytes_, Numpy datetime64, or object containing "str". - Pandas series of int{32, 64}, float{32, 64}, Pandas Timestamp. Date / datetime features are converted to int64 unix times. NaN for float-like features are interpreted as missing values. Args: timestamps: Array of timestamps values. features: Dictionary of feature names to feature values. Feature and timestamp arrays must be of the same length. indexes: Names of the features to use as indexes. If empty (default), the data is not indexed. Only integer and string features can be used as indexes. name: Optional name of the EventSet. Used for debugging, and graph serialization. is_unix_timestamp: Whether the timestamps correspond to unix time. Unix times are required for calendar operators. If `None` (default), timestamps are interpreted as unix times if the `timestamps` argument is an array of date or date-like object. same_sampling_as: If set, the new EventSet is checked and tagged as having the same sampling as `same_sampling_as`. Some operators, such as [`EventSet.filter()`][temporian.EventSet.filter], require their inputs to have the same sampling. Returns: An EventSet. """ if features is None: features = {} logging.debug("Normalizing features") features = { name: normalize_features(value, name) for name, value in features.items() } # Check timestamps and all features are of same length if not all(len(f) == len(timestamps) for f in features.values()): raise ValueError( "Timestamps and all features must have the same length." ) # Convert timestamps to expected type. logging.debug("Normalizing timestamps") timestamps, auto_is_unix_timestamp = normalize_timestamps(timestamps) if not np.all(timestamps[:-1] <= timestamps[1:]): logging.debug("Sorting timestamps") order = np.argsort(timestamps, kind="mergesort") timestamps = timestamps[order] features = {name: value[order] for name, value in features.items()} if is_unix_timestamp is None: is_unix_timestamp = auto_is_unix_timestamp assert isinstance(is_unix_timestamp, bool) # Infer the schema logging.debug("Assembling schema") schema = Schema( features=[ (feature_key, numpy_array_to_tp_dtype(feature_key, feature_data)) for feature_key, feature_data in features.items() ], indexes=[], is_unix_timestamp=is_unix_timestamp, ) # Shallow copy the data to temporian format logging.debug("Assembling data") index_data = IndexData( features=[ features[feature_name] for feature_name in schema.feature_names() ], timestamps=timestamps, schema=schema, ) evset = EventSet( schema=schema, data={(): index_data}, ) if indexes: # Index the data logging.debug("Indexing events") input_node = evset.node() output_node = add_index(input_node, indexes=indexes) evset = run(output_node, {input_node: evset}) assert isinstance(evset, EventSet) evset.name = name if same_sampling_as is not None: logging.debug("Setting same sampling") evset.schema.check_compatible_index(same_sampling_as.schema) if evset.data.keys() != same_sampling_as.data.keys(): raise ValueError( "The new EventSet and `same_sampling_as` have the same" " indexes, but different index keys. They should have the" " same index keys to have the same sampling." ) for key, same_sampling_as_value in same_sampling_as.data.items(): if not np.all( evset.data[key].timestamps == same_sampling_as_value.timestamps ): raise ValueError( "The new EventSet and `same_sampling_as` have different" f" timestamps values for the index={key!r}. The timestamps" " should be equal for both to have the same sampling." ) # Discard the new timestamps arrays. evset.data[key].timestamps = same_sampling_as_value.timestamps evset.node()._sampling = same_sampling_as.node().sampling_node return evset
Creates an [`EventSet`][temporian.EventSet] from indexed data. Unlike `event_set`, `from_struct` expects for the data to be already split by index value. Supported values for timestamps, indexes, and features as similar to `event_set`. Usage examples: ```python >>> evset = tp.from_struct( ... [ ... ( ... {"i1": 1, "i2": "A"}, ... {"timestamp": [1, 2], "f1": [10, 11], "f2": ["X", "Y"]}, ... ), ... ( ... {"i1": 1, "i2": "B"}, ... {"timestamp": [3, 4], "f1": [12, 13], "f2": ["X", "X"]}, ... ), ... ]) ``` Args: data: Indexed data. timestamps: Name of the feature to be used as timestamps for the EventSet. is_unix_timestamp: Whether the timestamps correspond to unix time. Unix times are required for calendar operators. If `None` (default), timestamps are interpreted as unix times if the `timestamps` argument is an array of date or date-like object. Returns: An EventSet.
def from_struct( data: List[Tuple[Dict[str, Any], Dict[str, DataArray]]], timestamps: str = "timestamp", is_unix_timestamp: bool = False, ) -> EventSet: """Creates an [`EventSet`][temporian.EventSet] from indexed data. Unlike `event_set`, `from_struct` expects for the data to be already split by index value. Supported values for timestamps, indexes, and features as similar to `event_set`. Usage examples: ```python >>> evset = tp.from_struct( ... [ ... ( ... {"i1": 1, "i2": "A"}, ... {"timestamp": [1, 2], "f1": [10, 11], "f2": ["X", "Y"]}, ... ), ... ( ... {"i1": 1, "i2": "B"}, ... {"timestamp": [3, 4], "f1": [12, 13], "f2": ["X", "X"]}, ... ), ... ]) ``` Args: data: Indexed data. timestamps: Name of the feature to be used as timestamps for the EventSet. is_unix_timestamp: Whether the timestamps correspond to unix time. Unix times are required for calendar operators. If `None` (default), timestamps are interpreted as unix times if the `timestamps` argument is an array of date or date-like object. Returns: An EventSet. """ if not isinstance(data, list): raise ValueError("data is expected to be a list of two-items tuples") if len(data) == 0: raise ValueError("Cannot create eventset without any values") if not isinstance(data[0], tuple): raise ValueError("data is expected to be a list of two-items tuples") first_index_value = data[0][0] index_schema = [] for k, v in first_index_value.items(): index_schema.append( IndexSchema(name=k, dtype=DType.from_python_value(v)) ) first_feature_values = data[0][1] if timestamps not in first_feature_values: raise ValueError(f"No value with name timestamps={timestamps!r}") # Build schema features_schema = [] for k, v in first_feature_values.items(): if k == timestamps: continue if isinstance(v, np.ndarray): tp_dtype = numpy_dtype_to_tp_dtype(k, v.dtype.type) else: if not isinstance(v, list): raise ValueError( "Feature values are expected to be numpy arrays or lists." f" Instead feature {k} has type {type(v)}" ) if len(v) == 0: raise ValueError("Feature {k} has zero observations.") tp_dtype = DType.from_python_value(v[0]) features_schema.append(FeatureSchema(name=k, dtype=tp_dtype)) schema = Schema( features=features_schema, indexes=index_schema, is_unix_timestamp=is_unix_timestamp, ) # Build content evtset_data = {} for src_index_value, src_feature_value in data: dst_timestamps, _ = normalize_timestamps(src_feature_value[timestamps]) dst_index_value = tuple( normalize_index_item(src_index_value[k.name]) for k in index_schema ) dst_feature_value = [ normalize_features(src_feature_value[k.name], k) for k in features_schema ] evtset_data[dst_index_value] = IndexData( features=dst_feature_value, timestamps=dst_timestamps, schema=schema, ) return EventSet( schema=schema, data=evtset_data, )
Sort user inputs into groups of features to plot together.
def build_groups( evsets: InputEventSet, features: Optional[Set[str]], allow_list: bool = True, ) -> Groups: """Sort user inputs into groups of features to plot together.""" if isinstance(evsets, EventSet): # Plot each feature individually groups = [] for feature_idx, feature in enumerate(evsets.schema.features): if features is not None and feature.name not in features: continue groups.append(Group([GroupItem(evsets, feature_idx)])) if len(groups) == 0: # Plot the timestamps groups.append(Group([GroupItem(evsets, -1, name=evsets.name)])) return groups if isinstance(evsets, tuple): # Plot all the event sets and their features together group_items = [] for evset in evsets: if not isinstance(evset, EventSet): raise ValueError( f"Expecting tuple of EventSets. Got {type(evset)} instead." ) plot_for_current_evset = False for feature_idx, feature in enumerate(evset.schema.features): if features is not None and feature.name not in features: continue group_items.append(GroupItem(evset, feature_idx)) plot_for_current_evset = True if not plot_for_current_evset: group_items.append(GroupItem(evset, -1, name=evset.name)) return [Group(group_items)] if allow_list and isinstance(evsets, list): groups = [] for x in evsets: groups.extend(build_groups(x, features, allow_list=False)) return groups raise ValueError("Non supported evsets input")
Normalizes the "features" argument of plot.
def normalize_features(features: InputFeatures) -> Optional[Set[str]]: """Normalizes the "features" argument of plot.""" if features is None: return None if isinstance(features, str): return {features} if isinstance(features, list): return set(features) if isinstance(features, set): return features raise ValueError(f"Non supported feature type {features}")
Returns the list of all the event sets.
def _unroll_evsets(evsets: InputEventSet) -> List[EventSet]: """Returns the list of all the event sets.""" if isinstance(evsets, EventSet): return [evsets] if isinstance(evsets, (list, tuple)): return sum((_unroll_evsets(x) for x in evsets), []) raise ValueError("Non supported evsets input")
Lists all the index values to plot.
def _list_index_values( indexes: Optional[IndexKeyList], evsets: InputEventSet, max_values: int ) -> List[NormalizedIndexKey]: """Lists all the index values to plot.""" flat_indexes = set(normalize_index_key_list(indexes, None)) index_values = set() for evtset in _unroll_evsets(evsets): for index_value in evtset.data: if indexes is None or index_value in flat_indexes: index_values.add(index_value) if len(index_values) >= max_values: return list(index_values) return list(index_values)
Plots one or several [`EventSets`][temporian.EventSet]. If multiple EventSets are provided, they should all have the same index. The time axis (i.e., horizontal axis) is shared among all the plots. Different features can be plotted independently or on the same plots. Plotting an EventSet without features plots timestamps instead. When plotting a single EventSet, this function is equivalent to [`EventSet.plot()`][temporian.EventSet.plot]. Feature names are used as a legend. When plotting an EventSet without features, the legend is set to be "[sampling]", or to the `name` of the EventSet, if set. Examples: ```python >>> evset = tp.event_set(timestamps=[1, 2, 4], ... features={"f1": [0, 42, 10], "f2": [10, -10, 20]}) # Plot each feature individually >>> tp.plot(evset) # Plots multiple features in the same sub-plot >>> tp.plot(evset, merge=True) # Equivalent >>> evset_2 = tp.event_set([5, 6]) >>> tp.plot([evset, evset_2], merge=True) >>> tp.plot((evset, evset_2)) # Make the plot interactive >>> tp.plot(evset, interactive=True) # Save figure to file >>> fig = tp.plot(evset, return_fig=True) >>> fig.savefig("/tmp/fig.png") # Change drawing style >>> tp.plot(evset, style="line") ``` Args: evsets: Single or list of EventSets to plot. Also, tuples can be used to group multiple EventSets in the same sub-plot. Otherwise, all EventSets and features are plotted in separate sub-plots. indexes: The index keys or list of indexes keys to plot. If indexes=None, plots all the available indexes. Indexes should be provided as single value (e.g. string) or tuple of values. Example: indexes="a", indexes=("a",), indexes=("a", "b",), indexes=["a", "b"], indexes=[("a", "b"), ("a", "c")]. features: Feature names of the event(s) to plot. Use 'evset.feature_names' for the list of available names. If a feature doesn't exist in an event, it's silently skipped. If None, plots all features of all events. width_px: Width of the figure in pixel. height_per_plot_px: Height of each sub-plot (one per feature) in pixel. max_points: Maximum number of points to plot. min_time: If set, only plot events after it. max_time: If set, only plot events before it. max_num_plots: Maximum number of plots to display. If more plots are available, only plot the first `max_num_plots` ones and print a warning. style: A `Style` or equivalent string like: `line`, `marker` or `vline`. return_fig: If true, returns the figure object. The figure object depends on the backend. interactive: If true, creates an interactive plotting. interactive=True effectively selects a backend that support interactive plotting. Ignored if "backend" is set. backend: Plotting library to use. Possible values are: matplotlib, bokeh, and bokeh_webgl. If set, overrides the "interactive" argument. merge: If true, plots all features in the same plots. If false, plots features in separate plots. merge=True on event-sets [e1, e2] is equivalent to plotting (e1, e2). font_scale: Scaling factor for all the fonts.
def plot( evsets: InputEventSet, indexes: Optional[IndexKeyList] = None, features: InputFeatures = None, width_px: int = 1024, height_per_plot_px: int = 150, max_points: Optional[int] = None, min_time: Optional[duration_utils.Timestamp] = None, max_time: Optional[duration_utils.Timestamp] = None, max_num_plots: int = 20, style: Union[Style, str] = Style.auto, return_fig: bool = False, interactive: bool = False, backend: Optional[str] = None, merge: bool = False, font_scale: float = 1, ): """Plots one or several [`EventSets`][temporian.EventSet]. If multiple EventSets are provided, they should all have the same index. The time axis (i.e., horizontal axis) is shared among all the plots. Different features can be plotted independently or on the same plots. Plotting an EventSet without features plots timestamps instead. When plotting a single EventSet, this function is equivalent to [`EventSet.plot()`][temporian.EventSet.plot]. Feature names are used as a legend. When plotting an EventSet without features, the legend is set to be "[sampling]", or to the `name` of the EventSet, if set. Examples: ```python >>> evset = tp.event_set(timestamps=[1, 2, 4], ... features={"f1": [0, 42, 10], "f2": [10, -10, 20]}) # Plot each feature individually >>> tp.plot(evset) # Plots multiple features in the same sub-plot >>> tp.plot(evset, merge=True) # Equivalent >>> evset_2 = tp.event_set([5, 6]) >>> tp.plot([evset, evset_2], merge=True) >>> tp.plot((evset, evset_2)) # Make the plot interactive >>> tp.plot(evset, interactive=True) # Save figure to file >>> fig = tp.plot(evset, return_fig=True) >>> fig.savefig("/tmp/fig.png") # Change drawing style >>> tp.plot(evset, style="line") ``` Args: evsets: Single or list of EventSets to plot. Also, tuples can be used to group multiple EventSets in the same sub-plot. Otherwise, all EventSets and features are plotted in separate sub-plots. indexes: The index keys or list of indexes keys to plot. If indexes=None, plots all the available indexes. Indexes should be provided as single value (e.g. string) or tuple of values. Example: indexes="a", indexes=("a",), indexes=("a", "b",), indexes=["a", "b"], indexes=[("a", "b"), ("a", "c")]. features: Feature names of the event(s) to plot. Use 'evset.feature_names' for the list of available names. If a feature doesn't exist in an event, it's silently skipped. If None, plots all features of all events. width_px: Width of the figure in pixel. height_per_plot_px: Height of each sub-plot (one per feature) in pixel. max_points: Maximum number of points to plot. min_time: If set, only plot events after it. max_time: If set, only plot events before it. max_num_plots: Maximum number of plots to display. If more plots are available, only plot the first `max_num_plots` ones and print a warning. style: A `Style` or equivalent string like: `line`, `marker` or `vline`. return_fig: If true, returns the figure object. The figure object depends on the backend. interactive: If true, creates an interactive plotting. interactive=True effectively selects a backend that support interactive plotting. Ignored if "backend" is set. backend: Plotting library to use. Possible values are: matplotlib, bokeh, and bokeh_webgl. If set, overrides the "interactive" argument. merge: If true, plots all features in the same plots. If false, plots features in separate plots. merge=True on event-sets [e1, e2] is equivalent to plotting (e1, e2). font_scale: Scaling factor for all the fonts. """ if merge: if isinstance(evsets, EventSet): evsets = (evsets,) elif isinstance(evsets, List): evsets = tuple(evsets) else: raise ValueError( "If merge=True, 'evsets' should be an EventSet or a list of" f" EventSets. Got {type(evsets)} instead." ) normalized_features = normalize_features(features) groups = build_groups(evsets, normalized_features) normalized_indexes = _list_index_values(indexes, evsets, max_num_plots) if len(groups) == 0: raise ValueError("Not input eventsets") if isinstance(style, str): style = Style[style] assert isinstance(style, Style) options = Options( interactive=interactive, backend=backend, width_px=width_px, height_per_plot_px=height_per_plot_px, max_points=max_points, min_time=( duration_utils.normalize_timestamp(min_time) if min_time is not None else None ), max_time=( duration_utils.normalize_timestamp(max_time) if max_time is not None else None ), max_num_plots=max_num_plots, style=style, font_scale=font_scale, ) if backend is None: backend = "bokeh" if interactive else "matplotlib" if backend not in BACKENDS: raise ValueError( f"Unknown plotting backend {backend}. Available " f"backends: {BACKENDS}" ) try: plotter_class = BACKENDS[backend]() fig = plot_with_plotter( plotter_class=plotter_class, groups=groups, indexes=normalized_indexes, options=options, ) except ImportError: print(error_message_import_backend(backend)) raise return fig if return_fig else None
Computes the number of sub-plots.
def get_num_plots( groups: Groups, indexes: List[tuple], options: Options, ): """Computes the number of sub-plots.""" num_plots = len(indexes) * len(groups) if num_plots == 0: raise ValueError("There is nothing to plot.") if num_plots > options.max_num_plots: print( f"The number of plots ({num_plots}) is larger than " f'"options.max_num_plots={options.max_num_plots}". Only the first ' "plots will be printed." ) num_plots = options.max_num_plots return num_plots
Finds the best plotting style.
def auto_style(uniform: bool, xs, ys) -> Style: """Finds the best plotting style.""" if len(ys) <= 1: return Style.marker if len(ys) == 0: all_ys_are_equal = True else: all_ys_are_equal = np.all(ys == ys[0]) if not uniform and (len(xs) <= 1000 or all_ys_are_equal): return Style.marker else: return Style.line