response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Checks if timestamps are uniformly sampled. | def is_uniform(xs) -> bool:
"""Checks if timestamps are uniformly sampled."""
diff = np.diff(xs)
if len(diff) == 0:
return True
return np.allclose(diff, diff[0]) |
Plots "(xs, ys)" on the axis "ax". | def _matplotlib_sub_plot(
ax,
xs,
ys,
options: Options,
color,
name: Optional[str],
is_unix_timestamp: bool,
style: Style,
legend: Optional[str] = None,
**wargs,
):
"""Plots "(xs, ys)" on the axis "ax"."""
if style == Style.line:
mat_style = {} # Default
elif style == Style.marker:
mat_style = {"marker": "2", "linestyle": "None"}
elif style == Style.vline:
mat_style = {"marker": "|", "linestyle": "None"}
else:
raise ValueError("Non implemented style")
if legend is not None:
wargs["label"] = legend
ax.plot(xs, ys, lw=0.5, color=color, **mat_style, **wargs)
if options.min_time is not None or options.max_time is not None:
args = {}
if options.min_time is not None:
args["left"] = (
convert_timestamp_to_datetime(options.min_time)
if is_unix_timestamp
else options.min_time
)
if options.max_time is not None:
args["right"] = (
convert_timestamp_to_datetime(options.max_time)
if is_unix_timestamp
else options.max_time
)
ax.set_xlim(**args)
ax.xaxis.set_tick_params(labelsize=8)
ax.xaxis.set_major_locator(ticker.MaxNLocator(10))
ax.xaxis.set_minor_locator(ticker.NullLocator())
if name is not None:
ax.set_ylabel(name, size=8)
ax.yaxis.set_tick_params(labelsize=8)
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
ax.yaxis.set_minor_locator(ticker.NullLocator())
for item in (
[ax.xaxis.label, ax.yaxis.label]
+ ax.get_xticklabels()
+ ax.get_yticklabels()
):
item.set_fontsize(_font_size(options))
ax.grid(lw=0.4, ls="--", axis="x") |
Checks if EventSets are matching the expected schema. | def _check_value_to_schema(
values: Dict[str, EventSet],
nodes: Dict[str, EventSetNode],
label: str,
) -> None:
"""Checks if EventSets are matching the expected schema."""
for key, node in nodes.items():
value = values[key]
if value.schema != node.schema:
raise RuntimeError(
"Unexpected EventSet schema.\n"
f"Actual schema:\n{value.schema}\n"
f"Expected schema:\n{node.schema}"
)
index_data = value.get_arbitrary_index_data()
if index_data is not None:
if len(index_data.features) != len(value.schema.features):
raise RuntimeError(
"Invalid internal number of input features for argument"
f" {label!r}.\nExpected {len(value.schema.features)}, but"
f" got {len(index_data.features)}.\nSchema:\n{value.schema}"
)
for feature_value, feature_schema in zip(
index_data.features, value.schema.features
):
expected_dtype = numpy_array_to_tp_dtype(
feature_schema.name, feature_value
)
if feature_schema.dtype != expected_dtype:
raise RuntimeError(
f"Feature dtypes in {label} don't match the expected"
f" ones. Expected dtype {expected_dtype} for feature"
f" {feature_schema.name}, but got"
f" {feature_schema.dtype} instead."
)
if len(index_data.timestamps) != len(feature_value):
raise RuntimeError(
"Number of timestamps does not match the number of"
f" values in feature {feature_schema.name}."
) |
Checks if the input/output of an operator matches its definition. | def _check_input(
inputs: Dict[str, EventSet],
operator: Operator,
) -> None:
"""Checks if the input/output of an operator matches its definition."""
with OperatorExceptionDecorator(operator):
# Check input schema
effective_input_keys = set(inputs.keys())
expected_input_keys = set(operator.inputs.keys())
if effective_input_keys != expected_input_keys:
raise RuntimeError(
"Input keys do not match the expected ones. "
f"Received: {effective_input_keys}. "
f"Expected: {expected_input_keys}."
)
_check_value_to_schema(inputs, nodes=operator.inputs, label="input") |
Checks if the input/output of an operator matches its definition. | def _check_output(
inputs: Dict[str, EventSet],
outputs: Dict[str, EventSet],
operator: Operator,
) -> None:
"""Checks if the input/output of an operator matches its definition."""
with OperatorExceptionDecorator(operator):
# Check output schema
effective_output_keys = set(outputs.keys())
expected_output_keys = set(operator.outputs.keys())
if effective_output_keys != expected_output_keys:
raise RuntimeError(
"Output keys do not match the expected ones. "
f"Received: {effective_output_keys}. "
f"Expected: {expected_output_keys}."
)
_check_value_to_schema(outputs, nodes=operator.outputs, label="outputs")
# Check for unnecessary memory copy.
for output_key in operator.outputs.keys():
output = outputs[output_key]
# TODO: Check copy or referencing of feature data.
matching_samplings = set(operator.list_matching_io_samplings())
for input_key in operator.inputs.keys():
input = inputs[input_key]
expected_matching_sampling = (
input_key,
output_key,
) in matching_samplings
is_same, reason = _is_same_sampling(output, input)
if expected_matching_sampling and not is_same:
raise RuntimeError(
f"The sampling of the input argument '{input_key}' and"
f" output '{output_key}' are expected to have THE SAME"
" sampling. However, a different sampling was"
f" generated during the op execution ({input} vs"
f" {output}). Reason: {reason}"
) |
Tests if a cast should be tested for overflow.
Don't check overflow for BOOLEAN or STRING:
- boolean: makes no sense, everybody knows what to expect.
- string: on src_dtype, too costly to convert to numeric dtype
and compare to the limit. On dst_type, there's no limit. | def _can_overflow(origin_dtype: DType, dst_dtype: DType) -> bool:
"""Tests if a cast should be tested for overflow.
Don't check overflow for BOOLEAN or STRING:
- boolean: makes no sense, everybody knows what to expect.
- string: on src_dtype, too costly to convert to numeric dtype
and compare to the limit. On dst_type, there's no limit.
"""
if origin_dtype in _NO_CHECK_TYPES or dst_dtype in _NO_CHECK_TYPES:
return False
return _DTYPE_LIMITS[origin_dtype].max > _DTYPE_LIMITS[dst_dtype].max |
Reads an [`EventSet`][temporian.EventSet] from a CSV file.
Example:
```python
>>> # Example CSV
>>> temp_file = str(tmp_dir / "temporal_data.csv")
>>> _ = open(temp_file, "w").write(
... "date,feature_1,feature_2
"
... "2023-01-01,10.0,3.0
"
... "2023-01-02,20.0,4.0
"
... "2023-02-01,30.0,5.0"
... )
>>> # Load CSV
>>> evset = tp.from_csv(temp_file, timestamps="date")
>>> evset
indexes: []
features: [('feature_1', float64), ('feature_2', float64)]
events:
(3 events):
timestamps: ['2023-01-01T00:00:00' '2023-01-02T00:00:00'
'2023-02-01T00:00:00']
'feature_1': [10. 20. 30.]
'feature_2': [3. 4. 5.]
...
```
Args:
path: Path to the file.
timestamps: Name of the column to be used as timestamps for the
EventSet.
indexes: Names of the columns to be used as indexes for the EventSet.
If None, a flat EventSet will be created.
sep: Separator to use.
Returns:
EventSet read from file.
| def from_csv(
path: str,
timestamps: str = "timestamp",
indexes: Optional[List[str]] = None,
sep: str = ",",
) -> EventSet:
"""Reads an [`EventSet`][temporian.EventSet] from a CSV file.
Example:
```python
>>> # Example CSV
>>> temp_file = str(tmp_dir / "temporal_data.csv")
>>> _ = open(temp_file, "w").write(
... "date,feature_1,feature_2\n"
... "2023-01-01,10.0,3.0\n"
... "2023-01-02,20.0,4.0\n"
... "2023-02-01,30.0,5.0"
... )
>>> # Load CSV
>>> evset = tp.from_csv(temp_file, timestamps="date")
>>> evset
indexes: []
features: [('feature_1', float64), ('feature_2', float64)]
events:
(3 events):
timestamps: ['2023-01-01T00:00:00' '2023-01-02T00:00:00'
'2023-02-01T00:00:00']
'feature_1': [10. 20. 30.]
'feature_2': [3. 4. 5.]
...
```
Args:
path: Path to the file.
timestamps: Name of the column to be used as timestamps for the
EventSet.
indexes: Names of the columns to be used as indexes for the EventSet.
If None, a flat EventSet will be created.
sep: Separator to use.
Returns:
EventSet read from file.
"""
import pandas as pd
if indexes is None:
indexes = []
df = pd.read_csv(path, sep=sep)
return from_pandas(df, indexes=indexes, timestamps=timestamps) |
Saves an [`EventSet`][temporian.EventSet] to a CSV file.
Example:
```python
>>> output_path = str(tmp_dir / "output_data.csv")
>>> evset = tp.event_set(timestamps=[1,], features={"f1": [0.1]})
>>> tp.to_csv(evset, output_path)
```
Args:
evset: EventSet to save.
path: Path to the file.
sep: Separator to use.
na_rep: Representation to use for missing values.
columns: Columns to save. If `None`, saves all columns. | def to_csv(
evset: EventSet,
path: str,
sep: str = ",",
na_rep: Optional[str] = None,
columns: Optional[List[str]] = None,
):
"""Saves an [`EventSet`][temporian.EventSet] to a CSV file.
Example:
```python
>>> output_path = str(tmp_dir / "output_data.csv")
>>> evset = tp.event_set(timestamps=[1,], features={"f1": [0.1]})
>>> tp.to_csv(evset, output_path)
```
Args:
evset: EventSet to save.
path: Path to the file.
sep: Separator to use.
na_rep: Representation to use for missing values.
columns: Columns to save. If `None`, saves all columns.
"""
df = to_pandas(evset)
df.to_csv(path, index=False, sep=sep, na_rep=na_rep, columns=columns) |
Converts an [`EventSet`][temporian.EventSet] to a flattened dictionary with
numpy arrays.
Usage example:
```python
>>> from datetime import datetime
>>> evset = tp.event_set(
... timestamps=['2023-11-08T17:14:38', '2023-11-29T21:44:46'],
... features={
... "store": ['STORE_1', 'STORE_2'],
... "revenue": [1571, 6101]
... },
... indexes=["store"],
... )
# Timestamps are exported as datetime64[s] if they were created as datetimes,
# otherwhise they are floats
>>> res = tp.to_numpy(evset)
>>> res
{'store': array([b'STORE_2', b'STORE_1'], dtype='|S7'), 'revenue': array([6101, 1571]),
'timestamp': array(['2023-11-29T21:44:46', '2023-11-08T17:14:38'], dtype='datetime64[s]')}
```
Args:
evset: input event set.
timestamp_to_datetime: If true, cast Temporian timestamps to datetime64
when is_unix_timestamp is set to True.
timestamps: If true, the timestamps are included as a column.
Returns:
object with numpy arrays created from EventSet. | def to_numpy(
evset: EventSet,
timestamp_to_datetime: bool = True,
timestamps: bool = True,
) -> Dict[str, ndarray]:
"""Converts an [`EventSet`][temporian.EventSet] to a flattened dictionary with
numpy arrays.
Usage example:
```python
>>> from datetime import datetime
>>> evset = tp.event_set(
... timestamps=['2023-11-08T17:14:38', '2023-11-29T21:44:46'],
... features={
... "store": ['STORE_1', 'STORE_2'],
... "revenue": [1571, 6101]
... },
... indexes=["store"],
... )
# Timestamps are exported as datetime64[s] if they were created as datetimes,
# otherwhise they are floats
>>> res = tp.to_numpy(evset)
>>> res
{'store': array([b'STORE_2', b'STORE_1'], dtype='|S7'), 'revenue': array([6101, 1571]),
'timestamp': array(['2023-11-29T21:44:46', '2023-11-08T17:14:38'], dtype='datetime64[s]')}
```
Args:
evset: input event set.
timestamp_to_datetime: If true, cast Temporian timestamps to datetime64
when is_unix_timestamp is set to True.
timestamps: If true, the timestamps are included as a column.
Returns:
object with numpy arrays created from EventSet.
"""
timestamp_key = "timestamp"
index_names = evset.schema.index_names()
feature_names = evset.schema.feature_names()
column_names = index_names + feature_names
if timestamps:
column_names += [timestamp_key]
dst = {column_name: [] for column_name in column_names}
for index, data in evset.data.items():
assert isinstance(index, tuple)
if timestamps:
# Timestamps
if evset.schema.is_unix_timestamp and timestamp_to_datetime:
dst[timestamp_key].append(
data.timestamps.astype("datetime64[s]")
)
else:
dst[timestamp_key].append(data.timestamps)
# Features
for feature_name, feature in zip(feature_names, data.features):
dst[feature_name].append(feature)
# Indexes
num_timestamps = len(data.timestamps)
for index_name, index_item in zip(index_names, index):
dst[index_name].append(np.repeat(index_item, num_timestamps))
dst = {k: np.concatenate(v) for k, v in dst.items()}
return dst |
Converts a Pandas DataFrame into an [`EventSet`][temporian.EventSet].
The column `timestamps` (defaults to "timestamp") contains the
timestamps. Columns `indexes` (default to `None`, equivalent to `[]`),
contains the indexes. The remaining columns are converted into features.
See [`tp.event_set()`][temporian.event_set] for the list of supported
timestamp and feature types.
Usage example:
```python
>>> df = pd.DataFrame(
... data=[
... [1.0, 5, "A"],
... [2.0, 6, "A"],
... [3.0, 7, "B"],
... ],
... columns=["timestamp", "feature_1", "feature_2"],
... )
>>> evset = tp.from_pandas(df, indexes=["feature_2"])
```
Args:
df: A non indexed Pandas dataframe.
indexes: Names of the columns to use as indexes. If empty
(default), the data is not indexed. Only integer and string columns
can be used as indexes.
timestamps: Name of the column containing the timestamps. See
[`tp.event_set()`][temporian.event_set] for the list of supported
timestamp types.
name: Optional name of the EventSet. Used for debugging, and
graph serialization.
same_sampling_as: If set, the new EventSet is checked and tagged as
having the same sampling as `same_sampling_as`. Some operators,
such as [`EventSet.filter()`][temporian.EventSet.filter], require
their inputs to have the same sampling.
Returns:
An EventSet.
Raises:
ValueError: If `indexes` or `timestamps` are not in `df`'s
columns.
ValueError: If a column has an unsupported dtype. | def from_pandas(
df: "pandas.DataFrame",
indexes: Optional[List[str]] = None,
timestamps: str = "timestamp",
name: Optional[str] = None,
same_sampling_as: Optional[EventSet] = None,
) -> EventSet:
"""Converts a Pandas DataFrame into an [`EventSet`][temporian.EventSet].
The column `timestamps` (defaults to "timestamp") contains the
timestamps. Columns `indexes` (default to `None`, equivalent to `[]`),
contains the indexes. The remaining columns are converted into features.
See [`tp.event_set()`][temporian.event_set] for the list of supported
timestamp and feature types.
Usage example:
```python
>>> df = pd.DataFrame(
... data=[
... [1.0, 5, "A"],
... [2.0, 6, "A"],
... [3.0, 7, "B"],
... ],
... columns=["timestamp", "feature_1", "feature_2"],
... )
>>> evset = tp.from_pandas(df, indexes=["feature_2"])
```
Args:
df: A non indexed Pandas dataframe.
indexes: Names of the columns to use as indexes. If empty
(default), the data is not indexed. Only integer and string columns
can be used as indexes.
timestamps: Name of the column containing the timestamps. See
[`tp.event_set()`][temporian.event_set] for the list of supported
timestamp types.
name: Optional name of the EventSet. Used for debugging, and
graph serialization.
same_sampling_as: If set, the new EventSet is checked and tagged as
having the same sampling as `same_sampling_as`. Some operators,
such as [`EventSet.filter()`][temporian.EventSet.filter], require
their inputs to have the same sampling.
Returns:
An EventSet.
Raises:
ValueError: If `indexes` or `timestamps` are not in `df`'s
columns.
ValueError: If a column has an unsupported dtype.
"""
feature_dict = df.drop(columns=timestamps).to_dict("series")
return event_set(
timestamps=df[timestamps].to_numpy(),
features={k: v.to_numpy() for k, v in feature_dict.items()},
indexes=indexes,
name=name,
same_sampling_as=same_sampling_as,
) |
Converts an [`EventSet`][temporian.EventSet] to a pandas DataFrame.
Usage example:
```python
>>> from datetime import datetime
>>> evset = tp.event_set(
... timestamps=[datetime(2015, 1, 1), datetime(2015, 1, 2)],
... features={
... "feature_1": [0.5, 0.6],
... "my_index": ["red", "red"],
... },
... indexes=["my_index"],
... )
# Indices are not set as dataframe's indices. Timestamps are exported as
# datetime64[s] if they were created as datetimes, otherwhise they are
# floats
>>> df = tp.to_pandas(evset)
>>> df
my_index feature_1 timestamp
0 red 0.5 2015-01-01
1 red 0.6 2015-01-02
# Set index/date manually in pandas
>>> df.set_index("my_index")
feature_1 timestamp
my_index
red 0.5 2015-01-01
red 0.6 2015-01-02
```
Args:
evset: Input EventSet.
tp_string_to_pd_string: If true, cast Temporian strings (equivalent to
np.string_ or np.bytes) to Pandas strings (equivalent to np.str_).
timestamp_to_datetime: If true, cast Temporian timestamps to datetime64
when is_unix_timestamp is set to True.
timestamps: If true, the timestamps are included as a column.
Returns:
DataFrame created from EventSet. | def to_pandas(
evset: EventSet,
tp_string_to_pd_string: bool = True,
timestamp_to_datetime: bool = True,
timestamps: bool = True,
) -> "pandas.DataFrame":
"""Converts an [`EventSet`][temporian.EventSet] to a pandas DataFrame.
Usage example:
```python
>>> from datetime import datetime
>>> evset = tp.event_set(
... timestamps=[datetime(2015, 1, 1), datetime(2015, 1, 2)],
... features={
... "feature_1": [0.5, 0.6],
... "my_index": ["red", "red"],
... },
... indexes=["my_index"],
... )
# Indices are not set as dataframe's indices. Timestamps are exported as
# datetime64[s] if they were created as datetimes, otherwhise they are
# floats
>>> df = tp.to_pandas(evset)
>>> df
my_index feature_1 timestamp
0 red 0.5 2015-01-01
1 red 0.6 2015-01-02
# Set index/date manually in pandas
>>> df.set_index("my_index")
feature_1 timestamp
my_index
red 0.5 2015-01-01
red 0.6 2015-01-02
```
Args:
evset: Input EventSet.
tp_string_to_pd_string: If true, cast Temporian strings (equivalent to
np.string_ or np.bytes) to Pandas strings (equivalent to np.str_).
timestamp_to_datetime: If true, cast Temporian timestamps to datetime64
when is_unix_timestamp is set to True.
timestamps: If true, the timestamps are included as a column.
Returns:
DataFrame created from EventSet.
"""
import pandas as pd
timestamp_key = "timestamp"
index_names = evset.schema.index_names()
feature_names = evset.schema.feature_names()
column_names = index_names + feature_names
if timestamps:
column_names += [timestamp_key]
dst = {column_name: [] for column_name in column_names}
for index, data in evset.data.items():
assert isinstance(index, tuple)
if timestamps:
# Timestamps
if evset.schema.is_unix_timestamp and timestamp_to_datetime:
dst[timestamp_key].append(
data.timestamps.astype("datetime64[s]")
)
else:
dst[timestamp_key].append(data.timestamps)
# Features
for feature_name, feature in zip(feature_names, data.features):
dst[feature_name].append(feature)
# Indexes
num_timestamps = len(data.timestamps)
for index_name, index_item in zip(index_names, index):
dst[index_name].append(np.repeat(index_item, num_timestamps))
dst = {k: np.concatenate(v) for k, v in dst.items()}
if tp_string_to_pd_string:
for feature in evset.schema.features:
if feature.dtype == DType.STRING:
dst[feature.name] = dst[feature.name].astype(str)
for index in evset.schema.indexes:
if index.dtype == DType.STRING:
dst[index.name] = dst[index.name].astype(str)
return pd.DataFrame(dst) |
Reads an [`EventSet`][temporian.EventSet] from a parquet file.
Example:
```python
>>> temp_file = str(tmp_dir / "temporal_data.parquet")
>>> og_eventset = tp.event_set(timestamps=[1,], features={"f1": [0.1]})
>>> tp.to_parquet(og_eventset, temp_file)
>>> evset = tp.from_parquet(temp_file)
>>> evset
indexes: []
features: [('f1', float64)]
events:
(1 events):
timestamps: [1.]
'f1': [0.1]
...
```
Args:
path: Path to the file.
timestamps: Name of the column to be used as timestamps for the
EventSet.
indexes: Names of the columns to be used as indexes for the EventSet.
If None, a flat EventSet will be created.
Returns:
EventSet read from file. | def from_parquet(
path: str,
timestamps: str = "timestamp",
indexes: Optional[List[str]] = None,
**kwargs,
) -> EventSet:
"""Reads an [`EventSet`][temporian.EventSet] from a parquet file.
Example:
```python
>>> temp_file = str(tmp_dir / "temporal_data.parquet")
>>> og_eventset = tp.event_set(timestamps=[1,], features={"f1": [0.1]})
>>> tp.to_parquet(og_eventset, temp_file)
>>> evset = tp.from_parquet(temp_file)
>>> evset
indexes: []
features: [('f1', float64)]
events:
(1 events):
timestamps: [1.]
'f1': [0.1]
...
```
Args:
path: Path to the file.
timestamps: Name of the column to be used as timestamps for the
EventSet.
indexes: Names of the columns to be used as indexes for the EventSet.
If None, a flat EventSet will be created.
Returns:
EventSet read from file.
"""
import pandas as pd
if indexes is None:
indexes = []
df = pd.read_parquet(path, **kwargs)
return from_pandas(df, indexes=indexes, timestamps=timestamps) |
Saves an [`EventSet`][temporian.EventSet] to a CSV file.
Example:
```python
>>> output_path = str(tmp_dir / "output_data.parquet")
>>> evset = tp.event_set(timestamps=[1,], features={"f1": [0.1]})
>>> tp.to_parquet(evset, output_path)
```
Args:
evset: EventSet to save.
path: Path to the file. | def to_parquet(evset: EventSet, path: str, **kwargs):
"""Saves an [`EventSet`][temporian.EventSet] to a CSV file.
Example:
```python
>>> output_path = str(tmp_dir / "output_data.parquet")
>>> evset = tp.event_set(timestamps=[1,], features={"f1": [0.1]})
>>> tp.to_parquet(evset, output_path)
```
Args:
evset: EventSet to save.
path: Path to the file.
"""
# TODO: include indexes in the file's metadata
# so that they can be recovered automatically
df = to_pandas(evset)
df.to_parquet(path, **kwargs) |
Converts a Polars DataFrame into an EventSet.
See [`tp.event_set()`][temporian.event_set] for the list of supported
timestamp and feature types.
The `allow_copy` parameter is passed directly to Polars' `to_numpy` method.
If set to `False`, the conversion process may fail if Polars is unable to
perform a zero-copy conversion.Users are encouraged to refer to Polars
documentation on `to_numpy` for detailed information on when a
non-zero-copy conversion might be required.
Note:
The function attempts to minimize data copying but will copy if required for compatibility.
Usage example:
```python
>>> import polars as pl
>>> df = pl.DataFrame(
... {
... "product_id": [666964, 666964, 574016, 574016],
... "timestamp": [1.0, 2.0, 3.0, 4.0],
... "costs": [740.0, 508.0, 573.0, None],
... }
... )
>>> evset = tp.from_polars(df, indexes=["product_id"])
>>> df1 = pl.DataFrame(
... {
... "timestamp": [1, 2, 3, 4],
... "id": [1, 2, 3, None],
... "category": [10, 20, 30, 40]
... }
... )
# allow_copy=False will result in zero_copy error for the below event set.
>>> evset1 = tp.from_polars(df1, indexes=["category"], allow_copy=True)
```
Args:
df: A non indexed Polars dataframe.
indexes: Names of the columns to use as indexes. If empty
(default), the data is not indexed. Only integer and string columns
can be used as indexes.
timestamps: Name of the column containing the timestamps. See
[`tp.event_set()`][temporian.event_set] for the list of supported
timestamp types.
name: Optional name of the EventSet. Used for debugging, and
graph serialization.
same_sampling_as: If set, the new EventSet is checked and tagged as
having the same sampling as `same_sampling_as`. Some operators,
such as [`EventSet.filter()`][temporian.EventSet.filter], require
their inputs to have the same sampling.
allow_copy: Allow memory to be copied to perform the conversion. If set
to False, causes conversions that are not zero-copy to fail.
Returns:
An EventSet. | def from_polars(
df: "polars.DataFrame",
indexes: Optional[List[str]] = None,
timestamps: str = "timestamp",
name: Optional[str] = None,
same_sampling_as: Optional[EventSet] = None,
allow_copy: bool = True,
) -> EventSet:
"""Converts a Polars DataFrame into an EventSet.
See [`tp.event_set()`][temporian.event_set] for the list of supported
timestamp and feature types.
The `allow_copy` parameter is passed directly to Polars' `to_numpy` method.
If set to `False`, the conversion process may fail if Polars is unable to
perform a zero-copy conversion.Users are encouraged to refer to Polars
documentation on `to_numpy` for detailed information on when a
non-zero-copy conversion might be required.
Note:
The function attempts to minimize data copying but will copy if required for compatibility.
Usage example:
```python
>>> import polars as pl
>>> df = pl.DataFrame(
... {
... "product_id": [666964, 666964, 574016, 574016],
... "timestamp": [1.0, 2.0, 3.0, 4.0],
... "costs": [740.0, 508.0, 573.0, None],
... }
... )
>>> evset = tp.from_polars(df, indexes=["product_id"])
>>> df1 = pl.DataFrame(
... {
... "timestamp": [1, 2, 3, 4],
... "id": [1, 2, 3, None],
... "category": [10, 20, 30, 40]
... }
... )
# allow_copy=False will result in zero_copy error for the below event set.
>>> evset1 = tp.from_polars(df1, indexes=["category"], allow_copy=True)
```
Args:
df: A non indexed Polars dataframe.
indexes: Names of the columns to use as indexes. If empty
(default), the data is not indexed. Only integer and string columns
can be used as indexes.
timestamps: Name of the column containing the timestamps. See
[`tp.event_set()`][temporian.event_set] for the list of supported
timestamp types.
name: Optional name of the EventSet. Used for debugging, and
graph serialization.
same_sampling_as: If set, the new EventSet is checked and tagged as
having the same sampling as `same_sampling_as`. Some operators,
such as [`EventSet.filter()`][temporian.EventSet.filter], require
their inputs to have the same sampling.
allow_copy: Allow memory to be copied to perform the conversion. If set
to False, causes conversions that are not zero-copy to fail.
Returns:
An EventSet.
"""
if timestamps not in df.columns:
raise ValueError(
f"Timestamp column '{timestamps}' not found in the DataFrame."
)
# Extract timestamps, allowing copy if necessary for compatibility
timestamps_array = df.get_column(timestamps).to_numpy(allow_copy=allow_copy)
# Prepare features, allowing copy if necessary
feature_columns = [col for col in df.columns if col != timestamps]
feature_dict = {
col: df.get_column(col).to_numpy(allow_copy=allow_copy)
for col in feature_columns
}
return event_set(
timestamps=timestamps_array,
features=feature_dict,
indexes=indexes,
name=name,
same_sampling_as=same_sampling_as,
) |
Converts an [`EventSet`][temporian.EventSet] to a Polars DataFrame.
Usage example:
```python
>>> from datetime import datetime
>>> evset = tp.event_set(
... timestamps=[datetime(2015, 1, 1), datetime(2015, 1, 2)],
... features={
... "feature_1": [0.5, 0.6],
... "my_index": ["red", "yellow"],
... },
... indexes=["my_index"],
... )
>>> df = tp.to_polars(evset)
```
Args:
evset: Input EventSet.
timestamp_to_datetime: If true, convert epoch timestamps to Polars Date objects.
timestamps: If true, include the timestamps as a column in the DataFrame.
tp_string_to_pl_string: If true, cast Temporian strings to Polars Object.
Returns:
A Polars DataFrame created from the EventSet. | def to_polars(
evset: EventSet,
tp_string_to_pl_string: bool = True,
timestamp_to_datetime: bool = True,
timestamps: bool = True,
) -> "pl.DataFrame":
"""Converts an [`EventSet`][temporian.EventSet] to a Polars DataFrame.
Usage example:
```python
>>> from datetime import datetime
>>> evset = tp.event_set(
... timestamps=[datetime(2015, 1, 1), datetime(2015, 1, 2)],
... features={
... "feature_1": [0.5, 0.6],
... "my_index": ["red", "yellow"],
... },
... indexes=["my_index"],
... )
>>> df = tp.to_polars(evset)
```
Args:
evset: Input EventSet.
timestamp_to_datetime: If true, convert epoch timestamps to Polars Date objects.
timestamps: If true, include the timestamps as a column in the DataFrame.
tp_string_to_pl_string: If true, cast Temporian strings to Polars Object.
Returns:
A Polars DataFrame created from the EventSet.
"""
pl = import_pl()
timestamp_key = "timestamp"
index_names = evset.schema.index_names()
feature_names = evset.schema.feature_names()
column_names = index_names + feature_names
if timestamps:
column_names += [timestamp_key]
# Initialize an empty dictionary to hold column data
data_dict = {column_name: [] for column_name in column_names}
for index, data in evset.data.items():
assert isinstance(index, tuple)
if timestamps:
timestamps_data = data.timestamps
if evset.schema.is_unix_timestamp and timestamp_to_datetime:
# Convert Unix timestamps to Polars datetime objects
# Assuming timestamps_data is a list of integers representing Unix timestamps in seconds
datetime_series = pl.from_epoch(
pl.Series(timestamps_data), time_unit="s"
)
data_dict[timestamp_key].extend(datetime_series)
else:
data_dict[timestamp_key].extend(timestamps_data)
# Features
for feature_name, feature in zip(feature_names, data.features):
data_dict[feature_name].extend(feature)
# Indexes
num_timestamps = len(data.timestamps)
for index_name, index_item in zip(index_names, index):
data_dict[index_name].extend([index_item] * num_timestamps)
# Concatenate lists of values for each column
for col_name, col_data in data_dict.items():
data_dict[col_name] = pl.Series(col_data)
if tp_string_to_pl_string:
for feature in evset.schema.features:
if feature.dtype == DType.STRING:
data_dict[feature.name] = data_dict[feature.name].cast(pl.Utf8)
for index in evset.schema.indexes:
if index.dtype == DType.STRING:
data_dict[index.name] = data_dict[index.name].cast(pl.Utf8)
return pl.DataFrame(data_dict) |
Converts an [`EventSet`][temporian.EventSet] to a tensorflow Dataset.
Usage example:
```python
evset = event_set(
timestamps=[1, 2, 3, 4],
features={
"f1": [10, 11, 12, 13],
"f2": [b"a", b"b", b"c", b"d"],
"label": [0, 1, 0, 1],
},
)
tf_dataset = tp.to_tensorflow_dataset(evset)
def extract_label(example):
label = example.pop("label")
return example, label
tf_dataset = tf_dataset.map(extract_label).batch(100)
model = ... # A Keras model
model.fit(tf_dataset)
```
Args:
evset: Input event set.
timestamps: Output key containing the timestamps.
Returns:
TensorFlow dataset created from EventSet. | def to_tensorflow_dataset(
evset: EventSet, timestamps: str = "timestamp"
) -> "tensorflow.data.Dataset":
"""Converts an [`EventSet`][temporian.EventSet] to a tensorflow Dataset.
Usage example:
```python
evset = event_set(
timestamps=[1, 2, 3, 4],
features={
"f1": [10, 11, 12, 13],
"f2": [b"a", b"b", b"c", b"d"],
"label": [0, 1, 0, 1],
},
)
tf_dataset = tp.to_tensorflow_dataset(evset)
def extract_label(example):
label = example.pop("label")
return example, label
tf_dataset = tf_dataset.map(extract_label).batch(100)
model = ... # A Keras model
model.fit(tf_dataset)
```
Args:
evset: Input event set.
timestamps: Output key containing the timestamps.
Returns:
TensorFlow dataset created from EventSet.
"""
tf = import_tf()
if len(evset.schema.indexes) != 0:
evset = drop_index(evset)
data = evset.get_arbitrary_index_data()
dict_data = {timestamps: data.timestamps}
for feature_idx, feature in enumerate(evset.schema.features):
dict_data[feature.name] = data.features[feature_idx]
return tf.data.Dataset.from_tensor_slices(dict_data) |
Exports an EventSet into TF.Records of TF.Examples.
TF.Records of TF.Examples is one of the standard solution to store data
for TensorFlow models.
https://www.tensorflow.org/tutorials/load_data/tfrecord
The GZIP compression is used.
Args:
evset: Event set to export.
path: Path to output TF.Record.
timestamps: Name of the output column containing timestamps.
format: Format of the events inside the received record. At the moment
only TFRecordEventSetFormat.GROUPED_BY_INDEX is supported. See
[TFRecordEventSetFormat][temporian.io.format.TFRecordEventSetFormat]
for more. | def to_tensorflow_record(
evset: EventSet,
path: str,
timestamps: str = "timestamp",
format: TFRecordEventSetFormatChoices = TFRecordEventSetFormat.GROUPED_BY_INDEX,
):
"""Exports an EventSet into TF.Records of TF.Examples.
TF.Records of TF.Examples is one of the standard solution to store data
for TensorFlow models.
https://www.tensorflow.org/tutorials/load_data/tfrecord
The GZIP compression is used.
Args:
evset: Event set to export.
path: Path to output TF.Record.
timestamps: Name of the output column containing timestamps.
format: Format of the events inside the received record. At the moment
only TFRecordEventSetFormat.GROUPED_BY_INDEX is supported. See
[TFRecordEventSetFormat][temporian.io.format.TFRecordEventSetFormat]
for more.
"""
if format == TFRecordEventSetFormat.SINGLE_EVENTS:
raise ValueError(
"format=TFRecordEventSetFormat.SINGLE_EVENTS is not implemented"
)
if format != TFRecordEventSetFormat.GROUPED_BY_INDEX:
raise ValueError(f"Unknown format {format}")
tf = import_tf()
with tf.io.TFRecordWriter(path, options="GZIP") as file_writer:
def f(example: tf.train.Example, key: str):
return example.features.feature[key]
for index_key, index_value in evset.data.items():
ex = tf.train.Example()
# Timestamps
f(ex, timestamps).float_list.value[:] = index_value.timestamps
# Features
for feature_idx, feature_schema in enumerate(evset.schema.features):
if feature_schema.dtype in [
DType.BOOLEAN,
DType.INT32,
DType.INT64,
]:
f(ex, feature_schema.name).int64_list.value[
:
] = index_value.features[feature_idx]
elif feature_schema.dtype in [
DType.FLOAT32,
DType.FLOAT64,
]:
f(ex, feature_schema.name).float_list.value[
:
] = index_value.features[feature_idx]
elif feature_schema.dtype == DType.STRING:
f(ex, feature_schema.name).bytes_list.value[
:
] = index_value.features[feature_idx]
else:
raise ValueError("Non supported feature dtype")
# Indexes
for index_value, index_schema in zip(
index_key, evset.schema.indexes
):
if index_schema.dtype in [
DType.BOOLEAN,
DType.INT32,
DType.INT64,
]:
f(ex, index_schema.name).int64_list.value.append(
index_value
)
elif index_schema.dtype in [
DType.FLOAT32,
DType.FLOAT64,
]:
f(ex, index_schema.name).float_list.value.append(
index_value
)
elif index_schema.dtype == DType.STRING:
f(ex, index_schema.name).bytes_list.value.append(
index_value
)
else:
raise ValueError("Non supported index dtype")
file_writer.write(ex.SerializeToString()) |
Imports an EventSet from a TF.Records of TF.Examples.
TF.Records of TF.Examples is one of the standard solution to store data
for TensorFlow models.
https://www.tensorflow.org/tutorials/load_data/tfrecord
The GZIP compression is used.
Args:
path: Path to TF.Record file or list of path to TF.Record files.
timestamps: Name of the output column containing timestamps.
format: Format of the events inside the received record. At the moment
only TFRecordEventSetFormat.GROUPED_BY_INDEX is supported. See
[TFRecordEventSetFormat][temporian.io.format.TFRecordEventSetFormat]
for more.
num_parallel_reads: Number of files to read in parallel. Only used if
`path` is a list of files. If not set, files are read sequentially.
buffer_size: Number of bytes in the buffer. If not set, use a sensible
default value.
Returns:
Imported EventSet. | def from_tensorflow_record(
path: Union[str, List[str]],
schema: Schema,
timestamps: str = "timestamp",
format: TFRecordEventSetFormatChoices = TFRecordEventSetFormat.GROUPED_BY_INDEX,
num_parallel_reads: Optional[int] = None,
buffer_size: Optional[int] = None,
) -> EventSet:
"""Imports an EventSet from a TF.Records of TF.Examples.
TF.Records of TF.Examples is one of the standard solution to store data
for TensorFlow models.
https://www.tensorflow.org/tutorials/load_data/tfrecord
The GZIP compression is used.
Args:
path: Path to TF.Record file or list of path to TF.Record files.
timestamps: Name of the output column containing timestamps.
format: Format of the events inside the received record. At the moment
only TFRecordEventSetFormat.GROUPED_BY_INDEX is supported. See
[TFRecordEventSetFormat][temporian.io.format.TFRecordEventSetFormat]
for more.
num_parallel_reads: Number of files to read in parallel. Only used if
`path` is a list of files. If not set, files are read sequentially.
buffer_size: Number of bytes in the buffer. If not set, use a sensible
default value.
Returns:
Imported EventSet.
"""
# TODO(gbm): Automatic schema
if format == TFRecordEventSetFormat.SINGLE_EVENTS:
raise ValueError(
"format=TFRecordEventSetFormat.SINGLE_EVENTS is not implemented"
)
if format != TFRecordEventSetFormat.GROUPED_BY_INDEX:
raise ValueError(f"Unknown format {format}")
tf = import_tf()
evset = EventSet(data={}, schema=deepcopy(schema))
def get_value(example: tf.train.Example, key: str):
if key not in example.features.feature:
raise ValueError(f"Missing feature {key}")
feature = example.features.feature[key]
if feature.HasField("int64_list"):
return feature.int64_list.value
elif feature.HasField("float_list"):
return feature.float_list.value
elif feature.HasField("bytes_list"):
return feature.bytes_list.value
else:
raise ValueError("Non supported type")
tf_dataset = tf.data.TFRecordDataset(
path,
compression_type="GZIP",
num_parallel_reads=num_parallel_reads,
buffer_size=buffer_size,
)
for serialized_example in tf_dataset:
example = tf.train.Example()
example.ParseFromString(serialized_example.numpy())
# Timestamps
timestamp_values = np.array(get_value(example, timestamps), np.float64)
num_timestamps = len(timestamp_values)
if not np.all(np.diff(timestamp_values) >= 0):
print("timestamp_values:", timestamp_values)
raise ValueError("The timestamps are not sorted")
# Indexes
indexes = []
for index_schema in schema.indexes:
value = get_value(example, index_schema.name)
if len(value) != 1:
raise ValueError(
"Index value is expected to have exactly one value."
f" Instead got {value}"
)
py_type = tp_dtype_to_py_type(index_schema.dtype)
indexes.append(py_type(value[0]))
# Features
features = []
for feature_schema in schema.features:
value = get_value(example, feature_schema.name)
if len(value) != num_timestamps:
raise ValueError(
f"Timestamp '{timestamp_values}' and feature"
f" '{feature_schema.name}' should contain the same number"
f" of values. Timestamp '{timestamp_values}' contains"
f" {num_timestamps} values and feature"
f" '{feature_schema.name}' contains {len(value)} values."
)
np_type = tp_dtype_to_np_dtype(feature_schema.dtype)
features.append(np.array(value, dtype=np_type))
evset.data[tuple(indexes)] = IndexData(
timestamps=timestamp_values, features=features
)
return evset |
Returns the path to a test data file relative to the project's root, e.g.
temporian/test/test_data/io/input.csv.
Necessary when accessing these files in Bazel-ran tests. | def get_test_data_path(path: str) -> str:
"""Returns the path to a test data file relative to the project's root, e.g.
temporian/test/test_data/io/input.csv.
Necessary when accessing these files in Bazel-ran tests."""
dir = flags.FLAGS.test_srcdir
# If test_srcdir is not set, we are not running in Bazel, return the path.
if dir == "":
return path
return os.path.join(flags.FLAGS.test_srcdir, "temporian", path) |
Tests that the output of an operator is the expected one, and performs
tests that are common to all operators.
Extend with more checks as needed.
Currently tests:
- The result is the same as the expected output.
- The result has the same sampling as the expected output.
- Serialization / unserialization of the graph. | def assertOperatorResult(
test: absltest.TestCase,
result: EventSet,
expected: EventSet,
check_sampling=True,
):
"""Tests that the output of an operator is the expected one, and performs
tests that are common to all operators.
Extend with more checks as needed.
Currently tests:
- The result is the same as the expected output.
- The result has the same sampling as the expected output.
- Serialization / unserialization of the graph.
"""
assertEqualEventSet(test, result, expected)
if check_sampling:
result.check_same_sampling(expected)
# Graph can be serialized and deserialized
if result.creator is None:
raise ValueError("EventSet has no creator.")
op = result.creator
if op.definition.is_serializable:
serialized_op = serialization._serialize_operator(op)
nodes = {}
for node in op.inputs.values():
nodes[serialization._identifier(node)] = node
for node in op.outputs.values():
nodes[serialization._identifier(node)] = node
_ = serialization._unserialize_operator(serialized_op, nodes) |
Indents a string. | def indent(text: str, num_spaces: int = 4) -> str:
"""Indents a string."""
block = " " * num_spaces
return block + block.join(text.splitlines(True)) |
Converts a number of bytes in a human readable form.
Example: 1500 -> "1.5 GB". | def pretty_num_bytes(nbytes: int) -> str:
"""Converts a number of bytes in a human readable form.
Example: 1500 -> "1.5 GB".
"""
if nbytes > 5e8:
return f"{(nbytes / 1e9):.1f} GB"
elif nbytes > 5e5:
return f"{(nbytes / 1e6):.1f} MB"
elif nbytes > 5e2:
return f"{(nbytes / 1e3):.1f} kB"
else:
return f"{nbytes} B" |
Sets the behavior when RT Check finds an issue.
Arg:
value: If True, a RT check error will raise an exception. If False, a RT
check error will print a warning. | def runtime_check_raise_exception(value: bool) -> None:
"""Sets the behavior when RT Check finds an issue.
Arg:
value: If True, a RT check error will raise an exception. If False, a RT
check error will print a warning.
"""
global _ERROR_RAISES_EXCEPTION
_ERROR_RAISES_EXCEPTION = value |
Text of a type mismatch error. | def _base_error(value, annotation):
"""Text of a type mismatch error."""
return (
f"Expecting value of type {annotation} but received value of type"
f' {type(value)}. The value is "{value}".'
) |
Checks recursively that "value" is compatible with "annotation". | def _check_annotation(trace: _Trace, is_compiled: bool, value, annotation):
"""Checks recursively that "value" is compatible with "annotation"."""
if _DEBUG:
logging.info(
"Checking %s (%s) against %s (%s)",
value,
type(value),
annotation,
type(annotation),
)
if isinstance(annotation, str):
# Unfold annotation
try:
annotation = typing.get_type_hints(annotation)
except ValueError:
logging.warning("Cannot unfold annotation %s", annotation)
if annotation in (
EventSet,
EventSetNode,
EventSetOperations,
) and isinstance(value, (EventSet, EventSetNode, EventSetOperations)):
return
if annotation in [inspect._empty, Any, Optional]:
# No annotation information
return None
annotation_args = typing.get_args(annotation)
if isinstance(annotation, typing._GenericAlias):
# The annotation is a possibly composed type e.g. List, List[int]
origin = typing.get_origin(annotation)
assert origin is not None
# Literal values check (e.g: Literal['*'])
if origin is Literal:
# Check param value in the allowed literal values
if value not in typing.get_args(annotation):
trace.exception(_base_error(value, annotation))
return
if origin is not Union:
if not isinstance(value, origin):
# The origin (e.g. "list" in "List[int]") is wrong.
trace.exception(_base_error(value, annotation))
# Check the sub-argument in composed types.
if origin in [List, Set, list, set]:
_check_annotation_list_or_set_or_uniform_tuple(
trace, is_compiled, value, annotation_args
)
elif origin in [dict, Dict]:
_check_annotation_dict(trace, is_compiled, value, annotation_args)
elif origin is Union:
_check_annotation_union(trace, is_compiled, value, annotation_args)
elif origin in [tuple, Tuple]:
_check_annotation_tuple(trace, is_compiled, value, annotation_args)
else:
if _DEBUG:
logging.warning(
"Unknown generic alias annotation %s (%s) with origin=%s",
annotation,
type(annotation),
origin,
)
else:
try:
is_instance_result = isinstance(value, annotation)
except TypeError:
if _DEBUG:
logging.warning(
"Cannot check %s (%s) against %s (%s)",
value,
type(value),
annotation,
type(annotation),
)
return None
if not is_instance_result:
trace.exception(_base_error(value, annotation))
return None |
Annotation that check the arguments and outputs of a function at runtime.
@typecheck checks, at runtime, that the type hints of the arguments and output
of a function are satisfied.
Usage example:
```python
@typecheck
def f(a, b: int, c: str = "aze") -> List[str]:
return ["hello", "world"]
f(1, 2, "a") # Ok
f(1, 2, 3) # Fails
```
If combined with @compile, @typecheck should be applied after @compile (i.e.
place @compile just below @typecheck in the code).
This code only support what is required by Temporian API.
Does not support typing.GenericTypeAlias e.g. list[int]. Use List[int]
instead.
Args:
fn: Function to instrument.
Returns:
Instrumented function. | def typecheck(fn):
"""Annotation that check the arguments and outputs of a function at runtime.
@typecheck checks, at runtime, that the type hints of the arguments and output
of a function are satisfied.
Usage example:
```python
@typecheck
def f(a, b: int, c: str = "aze") -> List[str]:
return ["hello", "world"]
f(1, 2, "a") # Ok
f(1, 2, 3) # Fails
```
If combined with @compile, @typecheck should be applied after @compile (i.e.
place @compile just below @typecheck in the code).
This code only support what is required by Temporian API.
Does not support typing.GenericTypeAlias e.g. list[int]. Use List[int]
instead.
Args:
fn: Function to instrument.
Returns:
Instrumented function.
"""
is_compiled = False
if hasattr(fn, "__wrapped__"):
signature_fn = fn.__wrapped__
if hasattr(signature_fn, "is_tp_compiled"):
is_compiled = getattr(signature_fn, "is_tp_compiled")
else:
signature_fn = fn
signature = inspect.signature(signature_fn)
@wraps(fn)
def wrapper(*args, **kwargs):
try:
# Check inputs
all_args = signature.bind(*args, **kwargs)
for arg_key, arg_value in all_args.arguments.items():
trace = _Trace().add_context(
f'When checking the argument "{arg_key}" of function'
f' "{fn.__name__}".'
)
if arg_key not in signature.parameters:
raise ValueError(f'Unexpected argument "{arg_key}"')
param = signature.parameters[arg_key]
if param.kind in [
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
]:
_check_annotation(
trace,
is_compiled,
arg_value,
param.annotation,
)
elif param.kind is inspect.Parameter.VAR_POSITIONAL:
_check_annotation_list_or_set_or_uniform_tuple(
trace,
is_compiled,
arg_value,
[param.annotation],
)
elif param.kind is inspect.Parameter.VAR_KEYWORD:
for sub_key, sub_value in arg_value.items():
_check_annotation(
_Trace().add_context(
f'When checking the key "{sub_key}" of argument'
f' "{arg_key}" of function "{fn.__name__}".'
),
is_compiled,
sub_value,
param.annotation,
)
except ValueError as e:
if _ERROR_RAISES_EXCEPTION:
# Reset the stack trace of the exception.
e.__traceback__ = None
raise e
else:
logging.warning("%s", str(e))
output = fn(*args, **kwargs)
try:
# Check outputs
trace = _Trace().add_context(
f'When checking the returned value of function "{fn.__name__}".'
)
_check_annotation(
trace,
is_compiled,
output,
signature.return_annotation,
)
except ValueError as e:
if _ERROR_RAISES_EXCEPTION:
# Reset the stack trace of the exception.
e.__traceback__ = None
raise e
else:
logging.warning("%s", str(e))
return output
setattr(wrapper, "_typecheck", True)
return wrapper |
Copies the files matching a pattern from the src to the dst directory. | def rec_glob_copy(src_dir: str, dst_dir: str, pattern: str):
"""Copies the files matching a pattern from the src to the dst directory."""
# TODO: Use "root_dir=src_dir" argument when >=python3.10
os.makedirs(dst_dir, exist_ok=True)
for fall in glob.glob(f"{src_dir}/{pattern}", recursive=True):
frel = os.path.relpath(fall, src_dir)
dst = f"{dst_dir}/{frel}"
os.makedirs(os.path.dirname(dst), exist_ok=True)
s.copy(f"{src_dir}/{frel}", dst) |
Extracts the value of a keyword from a call. | def get_keyword(call: ast.Call, key: str) -> Any:
"""Extracts the value of a keyword from a call."""
for keyword in call.keywords:
if keyword.arg == key:
assert isinstance(keyword.value, ast.Constant)
return keyword.value.value
raise ValueError(f"Cannot find {key} in {ast.dump(call, indent=4)}") |
Extracts the value of a keyword from a call.
The value is expected to be a list.
Returns [] if the keyword does not exist. | def get_keyword_list_or_empty(call: ast.Call, key: str) -> List[Any]:
"""Extracts the value of a keyword from a call.
The value is expected to be a list.
Returns [] if the keyword does not exist.
"""
for keyword in call.keywords:
if keyword.arg == key:
assert isinstance(keyword.value, ast.List)
ret = []
for v in keyword.value.elts:
assert isinstance(v, ast.Constant)
ret.append(v.value)
return ret
return [] |
Lists the imports in a .py file. | def source_imports(path: str) -> List[str]:
"""Lists the imports in a .py file."""
file_ast = ast.parse(open(path, encoding="utf-8").read())
imports = []
for item in file_ast.body:
if isinstance(item, ast.Import):
for name in item.names:
imports.append(name.name)
if isinstance(item, ast.ImportFrom):
for name in item.names:
assert item.module is not None
imports.append(item.module + "." + name.name)
return imports |
List the build rules in a BUILD file. | def list_build_rules(path: str) -> List[BuildRule]:
"""List the build rules in a BUILD file."""
build_content = open(path, encoding="utf-8").read()
build_content = re.sub(
THIRD_PARTY_RULE_PREFIX + r"(\S+)",
r'"//\1",',
build_content,
)
file_ast = ast.parse(build_content)
rules: List[BuildRule] = []
for item in file_ast.body:
if not isinstance(item, ast.Expr):
continue
if not isinstance(item.value, ast.Call):
continue
if not isinstance(item.value.func, ast.Name):
continue
if item.value.func.id not in ALLOWED_BUILD_RULES:
continue
name = get_keyword(item.value, "name")
deps = get_keyword_list_or_empty(item.value, "deps")
srcs = get_keyword_list_or_empty(item.value, "srcs")
assert isinstance(name, str)
if not srcs:
# Skip rules without sources.
continue
rules.append(
BuildRule(name=name, deps=deps, srcs=srcs, rule=item.value.func.id)
)
return rules |
Expands completely a bazel dep. | def expand_dep(dep: str, rule_dir: str) -> Tuple[str, ...]:
"""Expands completely a bazel dep."""
if dep[0] == ":":
dep = rule_dir + dep
elif dep[:2] == "//":
dep = dep[2:]
else:
raise ValueError(f"Wrong dep format: {dep}")
items = dep.split("/")
final_split = items[-1].split(":")
if len(final_split) == 1:
items.append(items[-1])
elif len(final_split) == 2:
items.pop()
items.extend(final_split)
else:
assert False
return tuple(items) |
List the possible source file of a given import. | def list_possible_source_of_import(
imp_items: Tuple[str, ...],
) -> List[Tuple[str, ...]]:
"""List the possible source file of a given import."""
assert len(imp_items) >= 1
if imp_items[0] in THIRD_PARTY_MODULES:
return [(imp_items[0], "__init__.py")]
# Example of situations with import a.b.c (3)
srcs = []
# a/b.py
if len(imp_items) >= 2:
srcs.append(imp_items[:-2] + (imp_items[-2] + ".py",))
# a/b/c.py
srcs.append(imp_items[:-1] + (imp_items[-1] + ".py",))
# a/b/c/__init__.py
srcs.append(imp_items + ("__init__.py",))
# a/b/__init__.py
srcs.append(imp_items[:-1] + ("__init__.py",))
return srcs |
Decomposes a path into individual dirname.
For example "a/b/c" becomes ["a", "b", "c"]. | def extract_dirname_from_path(path: str) -> List[str]:
"""Decomposes a path into individual dirname.
For example "a/b/c" becomes ["a", "b", "c"].
"""
dirnames = []
while True:
path, dirname = os.path.split(path)
if dirname and dirname != ".":
dirnames.append(dirname)
else:
if path and path != ".":
dirnames.append(path)
break
dirnames.reverse()
return dirnames |
List all the BUILD files.
Returns:
The list of (directory, filename) of all BUILD files. | def find_all_build_files(dir: str) -> List[Tuple[str, str]]:
"""List all the BUILD files.
Returns:
The list of (directory, filename) of all BUILD files.
"""
build_file_dirs = []
for root, _, files in os.walk(dir):
for file in files:
if file in BUILD_FILENAMES:
root = root.strip("./")
build_file_dirs.append((root, file))
return build_file_dirs |
Mapping from source file to build rules. | def find_source_to_rules(
build_file_dirs: List[Tuple[str, str]],
) -> SourceToRule:
"""Mapping from source file to build rules."""
source_to_rules = defaultdict(lambda: [])
for build_file_dir, build_file_name in build_file_dirs:
rule_base = tuple(extract_dirname_from_path(build_file_dir))
file_rules = list_build_rules(
os.path.join(build_file_dir, build_file_name)
)
for rule in file_rules:
for src in rule.srcs:
source_to_rules[rule_base + (src,)].append(
rule_base + (rule.name,)
)
return source_to_rules |
Computes the operation on the deps to support all the imports.
Args:
deps: Dependencies of the rule.
imports: Imports of the rule.
rule_dir: Path of the rule relative to the repo root.
source_to_rules: Mapping from all available source files to rules. | def compute_delta(
deps: List[str],
imports: List[str],
rule_dir: str,
source_to_rules: SourceToRule,
rule_name: str,
) -> Optional[DepsDelta]:
"""Computes the operation on the deps to support all the imports.
Args:
deps: Dependencies of the rule.
imports: Imports of the rule.
rule_dir: Path of the rule relative to the repo root.
source_to_rules: Mapping from all available source files to rules.
"""
issues = []
adds = set()
subs = set()
# The current dependencies of the rule.
expanded_deps = set([expand_dep(dep, rule_dir) for dep in deps])
# Dependencies effectively used by this rule.
used_deps = set()
for imp in imports:
imp_items = tuple(imp.split("."))
# This import does not need a build rule.
if imp_items[0] in BUILT_IN_MODULES or imp in BUILT_IN_MODULES:
continue
# The source files that might solve this import.
possible_srcs = list_possible_source_of_import(imp_items)
matching_possible_src = None
for possible_src in possible_srcs:
if possible_src in source_to_rules:
matching_possible_src = possible_src
break
if matching_possible_src is None:
issues.append(
f'Cannot infer dependency for "{imp}". Possible source files:'
f" {possible_srcs}."
)
continue
possible_deps = source_to_rules[matching_possible_src]
if len(possible_deps) > 1:
issues.append(f'Multiple possible rules for "{imp}"')
# This module is part of the same rule as its user.
if possible_deps[0] == expand_dep(":" + rule_name, rule_dir):
continue
if possible_deps[0] not in expanded_deps:
adds.add(possible_deps[0])
else:
used_deps.add(possible_deps[0])
for dep in expanded_deps:
if dep in used_deps:
continue
subs.add(dep)
if adds or subs or issues:
return DepsDelta(adds=list(adds), subs=list(subs), issues=issues)
else:
return None |
Converts a tuple rule to a user rule.
Example: ("a", "b", "c") => "//a/b:c". | def to_user_rule(normalized_rule: Tuple[str, ...]) -> str:
"""Converts a tuple rule to a user rule.
Example: ("a", "b", "c") => "//a/b:c".
"""
if (
len(normalized_rule) == 2
and normalized_rule[0] == normalized_rule[1]
and normalized_rule[0] in THIRD_PARTY_MODULES
):
return THIRD_PARTY_RULE_PREFIX + normalized_rule[0]
if len(normalized_rule) >= 2 and normalized_rule[-1] == normalized_rule[-2]:
return '"//' + "/".join(normalized_rule[:-1]) + '",'
else:
return (
'"//'
+ "/".join(normalized_rule[:-1])
+ ":"
+ normalized_rule[-1]
+ '",'
) |
Clean a BUILD file. | def clean_build_rule(
rule: BuildRule, rule_dir: str, source_to_rules: SourceToRule
):
"""Clean a BUILD file."""
imports = []
for src in rule.srcs:
src_path = os.path.join(rule_dir, src)
imports.extend(source_imports(src_path))
return compute_delta(
rule.deps, imports, rule_dir, source_to_rules, rule.name
) |
Google license. | def license_content():
"""Google license."""
return """# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" |
Download the data from Yann's website, unless it's already here. | def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath |
Extract the images into a 4D uint8 numpy array [index, y, x, depth]. | def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data |
Convert class labels from scalars to one-hot vectors. | def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot |
Extract the labels into a 1D uint8 numpy array [index]. | def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels |
Prepare a symmetric MPO.
Args:
Jz, Jxy, Bz: Hamiltonian parameters.
dtype: data type.
Returns:
`tn.FiniteMPO`: The mpo of the XXZ Heisenberg model with U(1) symmetry. | def blocksparse_XXZ_mpo(Jz: np.ndarray,
Jxy: np.ndarray,
Bz: np.ndarray,
dtype: Type[np.number] = np.float64) -> tn.FiniteMPO:
"""
Prepare a symmetric MPO.
Args:
Jz, Jxy, Bz: Hamiltonian parameters.
dtype: data type.
Returns:
`tn.FiniteMPO`: The mpo of the XXZ Heisenberg model with U(1) symmetry.
"""
dense_mpo = tn.FiniteXXZ(Jz, Jxy, Bz, dtype=dtype).tensors
ileft = tn.Index(tn.U1Charge(np.array([0])), False)
iright = ileft.flip_flow()
i1 = tn.Index(tn.U1Charge(np.array([0, -1, 1, 0, 0])), False)
i2 = tn.Index(tn.U1Charge(np.array([0, -1, 1, 0, 0])), True)
i3 = tn.Index(tn.U1Charge(np.array([0, 1])), False)
i4 = tn.Index(tn.U1Charge(np.array([0, 1])), True)
mpotensors = [tn.BlockSparseTensor.fromdense(
[ileft, i2, i3, i4], dense_mpo[0])] + [
tn.BlockSparseTensor.fromdense([i1, i2, i3, i4], tensor)
for tensor in dense_mpo[1:-1]
] + [tn.BlockSparseTensor.fromdense([i1, iright, i3, i4], dense_mpo[-1])]
return tn.FiniteMPO(mpotensors, backend='symmetric') |
Prepare a U(1) symmetric spin 1/2 MPS at zero total magnetization.
Args:
N: Number of spins.
D: The bond dimension.
B: The number of symmetry sectors on each ancillary link.
dtype: The data type of the MPS.
Returns:
`tn.FiniteMPS`: A U(1) symmetric spin 1/2 mps at zero total magnetization. | def blocksparse_halffilled_spin_mps(N: int,
D: int,
B: int = 5,
dtype: Type[np.number] = np.float64):
"""
Prepare a U(1) symmetric spin 1/2 MPS at zero total magnetization.
Args:
N: Number of spins.
D: The bond dimension.
B: The number of symmetry sectors on each ancillary link.
dtype: The data type of the MPS.
Returns:
`tn.FiniteMPS`: A U(1) symmetric spin 1/2 mps at zero total magnetization.
"""
auxcharges = [tn.U1Charge([0])] + [
tn.U1Charge.random(D, n // 2, n // 2 + B) for n in range(N - 1)
] + [tn.U1Charge([N // 2])]
tensors = [
tn.BlockSparseTensor.random([
tn.Index(auxcharges[n], False),
tn.Index(tn.U1Charge([0, 1]), False),
tn.Index(auxcharges[n + 1], True)
],
dtype=dtype) for n in range(N)
]
return tn.FiniteMPS(tensors, canonicalize=True, backend='symmetric') |
Helper function to initialize an MPS for a given backend.
Args:
N: Number of spins.
D: The bond dimension.
dtype: The data type of the MPS.
Returns:
`tn.FiniteMPS`: A spin 1/2 mps for the corresponding backend. | def initialize_spin_mps(N: int, D: int, dtype: Type[np.number], backend: Text):
"""
Helper function to initialize an MPS for a given backend.
Args:
N: Number of spins.
D: The bond dimension.
dtype: The data type of the MPS.
Returns:
`tn.FiniteMPS`: A spin 1/2 mps for the corresponding backend.
"""
if backend == 'symmetric':
return blocksparse_halffilled_spin_mps(N=N, D=D, B=5, dtype=dtype)
return tn.FiniteMPS.random([2] * N, [D] * (N - 1), dtype=dtype, backend=backend) |
Helper function to initialize the XXZ Heisenberg MPO
for a given backend.
Args:
Jz, Jxy, Bz: Hamiltonian parameters.
dtype: data type.
backend: The backend.
Returns:
`tn.FiniteMPS`: A spin 1/2 mps for the corresponding backend. | def initialize_XXZ_mpo(Jz: np.ndarray, Jxy: np.ndarray, Bz: np.ndarray,
dtype: Type[np.number], backend: Text):
"""
Helper function to initialize the XXZ Heisenberg MPO
for a given backend.
Args:
Jz, Jxy, Bz: Hamiltonian parameters.
dtype: data type.
backend: The backend.
Returns:
`tn.FiniteMPS`: A spin 1/2 mps for the corresponding backend.
"""
if backend == 'symmetric':
return blocksparse_XXZ_mpo(Jz=Jz, Jxy=Jxy, Bz=Bz, dtype=dtype)
return tn.FiniteXXZ(Jz, Jxy, Bz, dtype=dtype, backend=backend) |
Run two-site dmrg for the XXZ Heisenberg model using a given backend.
Args:
N: Number of spins.
D: The bond dimension.
dtype: The data type of the MPS.
Jz, Jxy, Bz: Hamiltonian parameters.
num_sweeps: Number of DMRG sweeps to perform.
backend: The backend.
Returns:
float/complex: The energy upon termination of DMRG. | def run_twosite_dmrg(N: int, D: int, dtype: Type[np.number], Jz: np.ndarray,
Jxy: np.ndarray, Bz: np.ndarray, num_sweeps: int,
backend: Text):
"""
Run two-site dmrg for the XXZ Heisenberg model using a given backend.
Args:
N: Number of spins.
D: The bond dimension.
dtype: The data type of the MPS.
Jz, Jxy, Bz: Hamiltonian parameters.
num_sweeps: Number of DMRG sweeps to perform.
backend: The backend.
Returns:
float/complex: The energy upon termination of DMRG.
"""
mps = initialize_spin_mps(N, 32, dtype, backend)
mpo = initialize_XXZ_mpo(Jz, Jxy, Bz, dtype, backend)
dmrg = tn.FiniteDMRG(mps, mpo)
return dmrg.run_two_site(
max_bond_dim=D, num_sweeps=num_sweeps, num_krylov_vecs=10, verbose=1) |
Creates output node axes corresponding to the Fourier transform of inputs.
Uses Cooley-Tukey's FFT algorithm. All axes are expected to have length 2. The
input axes must be (and output axes will be) binary.
Args:
inputs: The node axes to act upon.
Returns:
A list of `Edges` containing the result. | def add_fft(inputs: List[tn.Edge],) -> List[tn.Edge]:
"""Creates output node axes corresponding to the Fourier transform of inputs.
Uses Cooley-Tukey's FFT algorithm. All axes are expected to have length 2. The
input axes must be (and output axes will be) binary.
Args:
inputs: The node axes to act upon.
Returns:
A list of `Edges` containing the result.
"""
if not all(e.is_dangling() for e in inputs):
raise ValueError("Inputs must be dangling edges.")
hadamard = np.array([[1, 1], [1, -1]], dtype=np.complex128) / np.sqrt(2)
def cz(p: int) -> np.ndarray:
result = np.eye(4, dtype=np.complex128)
result[3, 3] = np.exp(-1j * np.pi / 2**p)
return result.reshape((2,) * 4)
def inline_stitch(targets: List[int], tensor: np.ndarray, name: str):
"""Applies an operation to the targeted axis indices."""
op_node = tn.Node(tensor, name)
for k, t in enumerate(targets):
incoming_state = state[t]
receiving_port = op_node[k]
output_port = op_node[k + len(targets)]
incoming_state ^ receiving_port
state[t] = output_port
state = list(inputs)
# Mix "n twiddle.
n = len(state)
for i in range(n):
for j in range(1, i + 1):
inline_stitch([i - j, i], cz(j), "TWIDDLE_{}_{}".format(j, i))
inline_stitch([i], hadamard, "MIX_{}".format(i))
# FFT reverses bit order.
return state[::-1] |
Create a 3SAT TensorNetwork of the given 3SAT clauses.
After full contraction, this network will be a tensor of size (2, 2, ..., 2)
with the rank being the same as the number of variables. Each element of the
final tensor represents whether the given assignment satisfies all of the
clauses. For example, if final_node.get_tensor()[0][1][1] == 1, then the
assiment (False, True, True) satisfies all clauses.
Args:
clauses: A list of 3 int tuples. Each element in the tuple corresponds to a
variable in the clause. If that int is negative, that variable is negated
in the clause.
Returns:
net: The 3SAT TensorNetwork.
var_edges: The edges for the given variables.
Raises:
ValueError: If any of the clauses have a 0 in them. | def sat_tn(clauses: List[Tuple[int, int, int]]) -> List[tn.Edge]:
"""Create a 3SAT TensorNetwork of the given 3SAT clauses.
After full contraction, this network will be a tensor of size (2, 2, ..., 2)
with the rank being the same as the number of variables. Each element of the
final tensor represents whether the given assignment satisfies all of the
clauses. For example, if final_node.get_tensor()[0][1][1] == 1, then the
assiment (False, True, True) satisfies all clauses.
Args:
clauses: A list of 3 int tuples. Each element in the tuple corresponds to a
variable in the clause. If that int is negative, that variable is negated
in the clause.
Returns:
net: The 3SAT TensorNetwork.
var_edges: The edges for the given variables.
Raises:
ValueError: If any of the clauses have a 0 in them.
"""
for clause in clauses:
if 0 in clause:
raise ValueError("0's are not allowed in the clauses.")
var_set = set()
for clause in clauses:
var_set |= {abs(x) for x in clause}
num_vars = max(var_set)
var_nodes = []
var_edges = []
# Prepare the variable nodes.
for _ in range(num_vars):
new_node = tn.Node(np.ones(2, dtype=np.int32))
var_nodes.append(new_node)
var_edges.append(new_node[0])
# Create the nodes for each clause
for clause in clauses:
a, b, c, = clause
clause_tensor = np.ones((2, 2, 2), dtype=np.int32)
clause_tensor[(-np.sign(a) + 1) // 2, (-np.sign(b) + 1) // 2,
(-np.sign(c) + 1) // 2] = 0
clause_node = tn.Node(clause_tensor)
# Connect the variable to the clause through a copy tensor.
for i, var in enumerate(clause):
copy_tensor_node = tn.CopyNode(3, 2)
clause_node[i] ^ copy_tensor_node[0]
var_edges[abs(var) - 1] ^ copy_tensor_node[1]
var_edges[abs(var) - 1] = copy_tensor_node[2]
return var_edges |
Create a 3SAT Count TensorNetwork.
After full contraction, the final node will be the count of all possible
solutions to the given 3SAT problem.
Args:
clauses: A list of 3 int tuples. Each element in the tuple corresponds to a
variable in the clause. If that int is negative, that variable is negated
in the clause.
Returns:
nodes: The set of nodes | def sat_count_tn(clauses: List[Tuple[int, int, int]]) -> Set[tn.AbstractNode]:
"""Create a 3SAT Count TensorNetwork.
After full contraction, the final node will be the count of all possible
solutions to the given 3SAT problem.
Args:
clauses: A list of 3 int tuples. Each element in the tuple corresponds to a
variable in the clause. If that int is negative, that variable is negated
in the clause.
Returns:
nodes: The set of nodes
"""
var_edges1 = sat_tn(clauses)
var_edges2 = sat_tn(clauses)
for edge1, edge2 in zip(var_edges1, var_edges2):
edge1 ^ edge2
# TODO(chaseriley): Support diconnected SAT graphs.
return tn.reachable(var_edges1[0].node1) |
Computes the energy using a layer of uniform binary MERA.
Args:
hamiltonian: The hamiltonian (rank-6 tensor) defined at the bottom of the
MERA layer.
state: The 3-site reduced state (rank-6 tensor) defined at the top of the
MERA layer.
isometry: The isometry tensor (rank 3) of the binary MERA.
disentangler: The disentangler tensor (rank 4) of the binary MERA.
Returns:
The energy. | def binary_mera_energy(hamiltonian, state, isometry, disentangler):
"""Computes the energy using a layer of uniform binary MERA.
Args:
hamiltonian: The hamiltonian (rank-6 tensor) defined at the bottom of the
MERA layer.
state: The 3-site reduced state (rank-6 tensor) defined at the top of the
MERA layer.
isometry: The isometry tensor (rank 3) of the binary MERA.
disentangler: The disentangler tensor (rank 4) of the binary MERA.
Returns:
The energy.
"""
backend = "jax"
out = []
for dirn in ('left', 'right'):
iso_l = tensornetwork.Node(isometry, backend=backend)
iso_c = tensornetwork.Node(isometry, backend=backend)
iso_r = tensornetwork.Node(isometry, backend=backend)
iso_l_con = tensornetwork.linalg.node_linalg.conj(iso_l)
iso_c_con = tensornetwork.linalg.node_linalg.conj(iso_c)
iso_r_con = tensornetwork.linalg.node_linalg.conj(iso_r)
op = tensornetwork.Node(hamiltonian, backend=backend)
rho = tensornetwork.Node(state, backend=backend)
un_l = tensornetwork.Node(disentangler, backend=backend)
un_l_con = tensornetwork.linalg.node_linalg.conj(un_l)
un_r = tensornetwork.Node(disentangler, backend=backend)
un_r_con = tensornetwork.linalg.node_linalg.conj(un_r)
tensornetwork.connect(iso_l[2], rho[0])
tensornetwork.connect(iso_c[2], rho[1])
tensornetwork.connect(iso_r[2], rho[2])
tensornetwork.connect(iso_l[0], iso_l_con[0])
tensornetwork.connect(iso_l[1], un_l[2])
tensornetwork.connect(iso_c[0], un_l[3])
tensornetwork.connect(iso_c[1], un_r[2])
tensornetwork.connect(iso_r[0], un_r[3])
tensornetwork.connect(iso_r[1], iso_r_con[1])
if dirn == 'right':
tensornetwork.connect(un_l[0], un_l_con[0])
tensornetwork.connect(un_l[1], op[3])
tensornetwork.connect(un_r[0], op[4])
tensornetwork.connect(un_r[1], op[5])
tensornetwork.connect(op[0], un_l_con[1])
tensornetwork.connect(op[1], un_r_con[0])
tensornetwork.connect(op[2], un_r_con[1])
elif dirn == 'left':
tensornetwork.connect(un_l[0], op[3])
tensornetwork.connect(un_l[1], op[4])
tensornetwork.connect(un_r[0], op[5])
tensornetwork.connect(un_r[1], un_r_con[1])
tensornetwork.connect(op[0], un_l_con[0])
tensornetwork.connect(op[1], un_l_con[1])
tensornetwork.connect(op[2], un_r_con[0])
tensornetwork.connect(un_l_con[2], iso_l_con[1])
tensornetwork.connect(un_l_con[3], iso_c_con[0])
tensornetwork.connect(un_r_con[2], iso_c_con[1])
tensornetwork.connect(un_r_con[3], iso_r_con[0])
tensornetwork.connect(iso_l_con[2], rho[3])
tensornetwork.connect(iso_c_con[2], rho[4])
tensornetwork.connect(iso_r_con[2], rho[5])
# FIXME: Check that this is giving us a good path!
out.append(
contractors.branch(tensornetwork.reachable(rho),
nbranch=2).get_tensor())
return 0.5 * sum(out) |
Updates the isometry with the aim of reducing the energy.
Args:
hamiltonian: The hamiltonian (rank-6 tensor) defined at the bottom of the
MERA layer.
state: The 3-site reduced state (rank-6 tensor) defined at the top of the
MERA layer.
isometry: The isometry tensor (rank 3) of the binary MERA.
disentangler: The disentangler tensor (rank 4) of the binary MERA.
Returns:
The updated isometry. | def update_iso(hamiltonian, state, isometry, disentangler):
"""Updates the isometry with the aim of reducing the energy.
Args:
hamiltonian: The hamiltonian (rank-6 tensor) defined at the bottom of the
MERA layer.
state: The 3-site reduced state (rank-6 tensor) defined at the top of the
MERA layer.
isometry: The isometry tensor (rank 3) of the binary MERA.
disentangler: The disentangler tensor (rank 4) of the binary MERA.
Returns:
The updated isometry.
"""
env = env_iso(hamiltonian, state, isometry, disentangler)
nenv = tensornetwork.Node(env, axis_names=["l", "r", "t"], backend="jax")
output_edges = [nenv["l"], nenv["r"], nenv["t"]]
nu, _, nv, _ = tensornetwork.split_node_full_svd(
nenv, [nenv["l"], nenv["r"]], [nenv["t"]],
left_edge_name="s1",
right_edge_name="s2")
nu["s1"].disconnect()
nv["s2"].disconnect()
tensornetwork.connect(nu["s1"], nv["s2"])
nres = tensornetwork.contract_between(nu, nv, output_edge_order=output_edges)
return np.conj(nres.get_tensor()) |
Updates the disentangler with the aim of reducing the energy.
Args:
hamiltonian: The hamiltonian (rank-6 tensor) defined at the bottom of the
MERA layer.
state: The 3-site reduced state (rank-6 tensor) defined at the top of the
MERA layer.
isometry: The isometry tensor (rank 3) of the binary MERA.
disentangler: The disentangler tensor (rank 4) of the binary MERA.
Returns:
The updated disentangler. | def update_dis(hamiltonian, state, isometry, disentangler):
"""Updates the disentangler with the aim of reducing the energy.
Args:
hamiltonian: The hamiltonian (rank-6 tensor) defined at the bottom of the
MERA layer.
state: The 3-site reduced state (rank-6 tensor) defined at the top of the
MERA layer.
isometry: The isometry tensor (rank 3) of the binary MERA.
disentangler: The disentangler tensor (rank 4) of the binary MERA.
Returns:
The updated disentangler.
"""
env = env_dis(hamiltonian, state, isometry, disentangler)
nenv = tensornetwork.Node(
env, axis_names=["bl", "br", "tl", "tr"], backend="jax")
output_edges = [nenv["bl"], nenv["br"], nenv["tl"], nenv["tr"]]
nu, _, nv, _ = tensornetwork.split_node_full_svd(
nenv, [nenv["bl"], nenv["br"]], [nenv["tl"], nenv["tr"]],
left_edge_name="s1",
right_edge_name="s2")
nu["s1"].disconnect()
nv["s2"].disconnect()
tensornetwork.connect(nu["s1"], nv["s2"])
nres = tensornetwork.contract_between(nu, nv, output_edge_order=output_edges)
return np.conj(nres.get_tensor()) |
Applies a shift to a hamiltonian.
Args:
hamiltonian: The hamiltonian tensor (rank 6).
shift: The amount by which to shift. If `None`, shifts so that the local
term is negative semi-definite.
Returns:
The shifted Hamiltonian. | def shift_ham(hamiltonian, shift=None):
"""Applies a shift to a hamiltonian.
Args:
hamiltonian: The hamiltonian tensor (rank 6).
shift: The amount by which to shift. If `None`, shifts so that the local
term is negative semi-definite.
Returns:
The shifted Hamiltonian.
"""
hmat = np.reshape(hamiltonian, (2**3, -1))
if shift is None:
shift = np.amax(np.linalg.eigh(hmat)[0])
hmat -= shift * np.eye(2**3)
return np.reshape(hmat, [2] * 6) |
Optimize a scale-invariant MERA using linearized updates.
The MERA is assumed to be completely uniform and scale-invariant, consisting
of a single isometry and disentangler.
Args:
hamiltonian: The hamiltonian (rank-6 tensor) defined at the bottom.
state: An initial 3-site reduced state (rank-6 tensor) to initialize the
descending fixed-point computation.
isometry: The isometry tensor (rank 3) of the binary MERA.
disentangler: The disentangler tensor (rank 4) of the binary MERA.
Returns:
state: The approximate descending fixed-point reduced state (rank 6).
isometry: The optimized isometry.
disentangler: The optimized disentangler. | def optimize_linear(hamiltonian, state, isometry, disentangler, num_itr):
"""Optimize a scale-invariant MERA using linearized updates.
The MERA is assumed to be completely uniform and scale-invariant, consisting
of a single isometry and disentangler.
Args:
hamiltonian: The hamiltonian (rank-6 tensor) defined at the bottom.
state: An initial 3-site reduced state (rank-6 tensor) to initialize the
descending fixed-point computation.
isometry: The isometry tensor (rank 3) of the binary MERA.
disentangler: The disentangler tensor (rank 4) of the binary MERA.
Returns:
state: The approximate descending fixed-point reduced state (rank 6).
isometry: The optimized isometry.
disentangler: The optimized disentangler.
"""
h_shifted = shift_ham(hamiltonian)
for i in range(num_itr):
isometry = update_iso(h_shifted, state, isometry, disentangler)
disentangler = update_dis(h_shifted, state, isometry, disentangler)
for _ in range(10):
state = descend(hamiltonian, state, isometry, disentangler)
en = binary_mera_energy(hamiltonian, state, isometry, disentangler)
print("{}:\t{}".format(i, en))
return state, isometry, disentangler |
Dimension 2 "Ising" Hamiltonian.
This version from Evenbly & White, Phys. Rev. Lett. 116, 140403
(2016). | def ham_ising():
"""Dimension 2 "Ising" Hamiltonian.
This version from Evenbly & White, Phys. Rev. Lett. 116, 140403
(2016).
"""
E = np.array([[1, 0], [0, 1]])
X = np.array([[0, 1], [1, 0]])
Z = np.array([[1, 0], [0, -1]])
hmat = np.kron(X, np.kron(Z, X))
hmat -= 0.5 * (np.kron(np.kron(X, X), E) + np.kron(E, np.kron(X, X)))
return np.reshape(hmat, [2] * 6) |
Returns the Hamiltonian and MERA tensors for the D=2 wavelet MERA.
From Evenbly & White, Phys. Rev. Lett. 116, 140403 (2016). | def wavelet_tensors(request):
"""Returns the Hamiltonian and MERA tensors for the D=2 wavelet MERA.
From Evenbly & White, Phys. Rev. Lett. 116, 140403 (2016).
"""
D = 2
h = simple_mera.ham_ising()
E = np.array([[1, 0], [0, 1]])
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.array([[1, 0], [0, -1]])
wmat_un = np.real((np.sqrt(3) + np.sqrt(2)) / 4 * np.kron(E, E) +
(np.sqrt(3) - np.sqrt(2)) / 4 * np.kron(Z, Z) + 1.j *
(1 + np.sqrt(2)) / 4 * np.kron(X, Y) + 1.j *
(1 - np.sqrt(2)) / 4 * np.kron(Y, X))
umat = np.real((np.sqrt(3) + 2) / 4 * np.kron(E, E) +
(np.sqrt(3) - 2) / 4 * np.kron(Z, Z) +
1.j / 4 * np.kron(X, Y) + 1.j / 4 * np.kron(Y, X))
w = np.reshape(wmat_un, (D, D, D, D))[:, 0, :, :]
u = np.reshape(umat, (D, D, D, D))
w = np.transpose(w, [1, 2, 0])
u = np.transpose(u, [2, 3, 0, 1])
return tuple(x.astype(np.complex128) for x in (h, w, u)) |
Prepare gates using 1st-order trotter decomposition.
Currently only implemented for nearest-neighbor Hamiltonians.
Args:
H: List of Hamiltonian terms. Should be length num_sites-1.
step_size: The trotter step size (a scalar).
num_sites: The total number of sites in the system (an integer).
euclidean: Whether the evolution is euclidean, or not (boolean).
Returns:
layers: A list of layers, with each layer a list of gates, one for each
site, or `None` if no gate is applied to that site in the layer. | def trotter_prepare_gates(H, step_size, num_sites, euclidean):
"""Prepare gates using 1st-order trotter decomposition.
Currently only implemented for nearest-neighbor Hamiltonians.
Args:
H: List of Hamiltonian terms. Should be length num_sites-1.
step_size: The trotter step size (a scalar).
num_sites: The total number of sites in the system (an integer).
euclidean: Whether the evolution is euclidean, or not (boolean).
Returns:
layers: A list of layers, with each layer a list of gates, one for each
site, or `None` if no gate is applied to that site in the layer.
"""
if not len(H) == num_sites - 1:
raise ValueError("Number of H terms must match number of sites - 1.")
step_size = tf.cast(step_size, tf.float64) # must be real
step_size = tf.cast(step_size, H[0].dtype)
if euclidean:
step_size = -1.0 * step_size
else:
step_size = 1.j * step_size
eH = []
for h in H:
if len(h.shape) != 4:
raise ValueError("H must be nearest-neighbor.")
h_shp = tf.shape(h)
h_r = tf.reshape(h, (h_shp[0] * h_shp[1], h_shp[2] * h_shp[3]))
eh_r = tf.linalg.expm(step_size * h_r)
eH.append(tf.reshape(eh_r, h_shp))
eh_even = [None] * num_sites
eh_odd = [None] * num_sites
for (n, eh) in enumerate(eH):
if n % 2 == 0:
eh_even[n] = eh
else:
eh_odd[n] = eh
return [eh_even, eh_odd] |
Computes the inner product <psi1|psi2>.
Args:
psi1: A tensor representing the first wavefunction.
psi2: A tensor representing the second wavefunction.
Returns:
inner_product: The vector inner product. | def inner(psi1, psi2):
"""Computes the inner product <psi1|psi2>.
Args:
psi1: A tensor representing the first wavefunction.
psi2: A tensor representing the second wavefunction.
Returns:
inner_product: The vector inner product.
"""
return tf.reduce_sum(tf.math.conj(psi1) * psi2) |
Apply a local operator to a wavefunction.
The number of dimensions of the tensor representing the wavefunction `psi`
is taken to be the number of lattice sites `N`.
The operator acts nontrivially on sites `n1` to `n1 + k - 1` of psi, where
`0 <= n1 < N`, and is expected to have `2*k` dimensions.
The first `k` dimensions represent the output and the last `k` dimensions
represent the input, to be contracted with `psi`.
Args:
psi: An `N`-dimensional tensor representing the wavefunction.
op: Tensor with `2 * k` dimensions. The operator to apply.
n1: The number of the leftmost site at which to apply the operator.
pbc: If `True`, use periodic boundary conditions, so that site `N` is
identified with site `0`. Otherwise, site `N-1` has no neighbors to the
right.
Returns:
psi_final: The result of applying `op` to `psi`. | def apply_op(psi, op, n1, pbc=False):
"""Apply a local operator to a wavefunction.
The number of dimensions of the tensor representing the wavefunction `psi`
is taken to be the number of lattice sites `N`.
The operator acts nontrivially on sites `n1` to `n1 + k - 1` of psi, where
`0 <= n1 < N`, and is expected to have `2*k` dimensions.
The first `k` dimensions represent the output and the last `k` dimensions
represent the input, to be contracted with `psi`.
Args:
psi: An `N`-dimensional tensor representing the wavefunction.
op: Tensor with `2 * k` dimensions. The operator to apply.
n1: The number of the leftmost site at which to apply the operator.
pbc: If `True`, use periodic boundary conditions, so that site `N` is
identified with site `0`. Otherwise, site `N-1` has no neighbors to the
right.
Returns:
psi_final: The result of applying `op` to `psi`.
"""
n_psi = tensornetwork.Node(psi, backend="tensorflow")
site_edges = n_psi.get_all_edges()
site_edges, n_op = _apply_op_network(site_edges, op, n1, pbc)
n_res = tensornetwork.contract_between(
n_op, n_psi, output_edge_order=site_edges)
return n_res.tensor |
Expectation value of a k-local operator, acting on sites n1 to n1 + k-1.
In braket notation: <psi|op(n1)|psi>
The number of dimensions of the tensor representing the wavefunction `psi`
is taken to be the number of lattice sites `N`.
Args:
psi: An `N`-dimensional tensor representing the wavefunction.
op: Tensor with `2 * k` dimensions. The operator to apply.
n1: The number of the leftmost site at which to apply the operator.
pbc: If `True`, use periodic boundary conditions, so that site `N` is
identified with site `0`. Otherwise, site `N-1` has no neighbors to the
right.
Returns:
expval: The expectation value. | def expval(psi, op, n1, pbc=False):
"""Expectation value of a k-local operator, acting on sites n1 to n1 + k-1.
In braket notation: <psi|op(n1)|psi>
The number of dimensions of the tensor representing the wavefunction `psi`
is taken to be the number of lattice sites `N`.
Args:
psi: An `N`-dimensional tensor representing the wavefunction.
op: Tensor with `2 * k` dimensions. The operator to apply.
n1: The number of the leftmost site at which to apply the operator.
pbc: If `True`, use periodic boundary conditions, so that site `N` is
identified with site `0`. Otherwise, site `N-1` has no neighbors to the
right.
Returns:
expval: The expectation value.
"""
n_psi = tensornetwork.Node(psi, backend="tensorflow")
site_edges = n_psi.get_all_edges()
site_edges, n_op = _apply_op_network(site_edges, op, n1, pbc)
n_op_psi = n_op @ n_psi
n_psi_conj = tensornetwork.Node(tf.math.conj(psi), backend="tensorflow")
for i in range(len(site_edges)):
tensornetwork.connect(site_edges[i], n_psi_conj[i])
res = n_psi_conj @ n_op_psi
return res.tensor |
Evolve an initial wavefunction psi using a trotter decomposition of H.
If the evolution is euclidean, the wavefunction will be normalized after
each step.
Args:
psi: An `N`-dimensional tensor representing the initial wavefunction.
H: A list of `N-1` tensors representing nearest-neighbor operators.
step_size: The trotter step size.
num_steps: The number of trotter steps to take.
euclidean: If `True`, evolve in Euclidean (imaginary) time.
callback: Optional callback function for monitoring the evolution.
Returns:
psi_t: The final wavefunction.
t: The final time. | def evolve_trotter(psi,
H,
step_size,
num_steps,
euclidean=False,
callback=None):
"""Evolve an initial wavefunction psi using a trotter decomposition of H.
If the evolution is euclidean, the wavefunction will be normalized after
each step.
Args:
psi: An `N`-dimensional tensor representing the initial wavefunction.
H: A list of `N-1` tensors representing nearest-neighbor operators.
step_size: The trotter step size.
num_steps: The number of trotter steps to take.
euclidean: If `True`, evolve in Euclidean (imaginary) time.
callback: Optional callback function for monitoring the evolution.
Returns:
psi_t: The final wavefunction.
t: The final time.
"""
num_sites = len(psi.shape)
layers = trotter_prepare_gates(H, step_size, num_sites, euclidean)
return _evolve_trotter_gates(
psi, layers, step_size, num_steps, euclidean=euclidean, callback=callback) |
Evolve an initial wavefunction psi via gates specified in `layers`.
If the evolution is euclidean, the wavefunction will be normalized
after each step. | def _evolve_trotter_gates(psi,
layers,
step_size,
num_steps,
euclidean=False,
callback=None):
"""Evolve an initial wavefunction psi via gates specified in `layers`.
If the evolution is euclidean, the wavefunction will be normalized
after each step.
"""
t = 0.0
for i in range(num_steps):
psi = apply_circuit(psi, layers)
if euclidean:
psi = tf.divide(psi, tf.norm(psi))
t += step_size
if callback is not None:
callback(psi, t, i)
return psi, t |
Evolve an initial wavefunction psi using a trotter decomposition of H.
If the evolution is euclidean, the wavefunction will be normalized after
each step.
In this version, `batch_size` steps are "compiled" to a computational graph
using `defun`, which greatly decreases overhead.
Args:
psi: An `N`-dimensional tensor representing the initial wavefunction.
H: A list of `N-1` tensors representing nearest-neighbor operators.
step_size: The trotter step size.
num_steps: The number of trotter steps to take.
euclidean: If `True`, evolve in Euclidean (imaginary) time.
callback: Optional callback function for monitoring the evolution.
batch_size: The number of steps to unroll in the computational graph.
Returns:
psi_t: The final wavefunction.
t: The final time. | def evolve_trotter_defun(psi,
H,
step_size,
num_steps,
euclidean=False,
callback=None,
batch_size=1):
"""Evolve an initial wavefunction psi using a trotter decomposition of H.
If the evolution is euclidean, the wavefunction will be normalized after
each step.
In this version, `batch_size` steps are "compiled" to a computational graph
using `defun`, which greatly decreases overhead.
Args:
psi: An `N`-dimensional tensor representing the initial wavefunction.
H: A list of `N-1` tensors representing nearest-neighbor operators.
step_size: The trotter step size.
num_steps: The number of trotter steps to take.
euclidean: If `True`, evolve in Euclidean (imaginary) time.
callback: Optional callback function for monitoring the evolution.
batch_size: The number of steps to unroll in the computational graph.
Returns:
psi_t: The final wavefunction.
t: The final time.
"""
n_batches, rem = divmod(num_steps, batch_size)
step_size = tf.cast(step_size, psi.dtype)
num_sites = len(psi.shape)
layers = trotter_prepare_gates(H, step_size, num_sites, euclidean)
t = 0.0
for i in range(n_batches):
psi, t_b = _evolve_trotter_gates_defun(
psi, layers, step_size, batch_size, euclidean=euclidean, callback=None)
t += t_b
if callback is not None:
callback(psi, t, (i + 1) * batch_size - 1)
if rem > 0:
psi, t_b = _evolve_trotter_gates_defun(
psi, layers, step_size, rem, euclidean=euclidean, callback=None)
t += t_b
return psi, t |
Applies a quantum circuit to a wavefunction.
The circuit consists of a sequence of layers, with each layer consisting
of non-overlapping gates.
Args:
psi: An `N`-dimensional tensor representing the initial wavefunction.
layers: A sequence of layers. Each layer is a sequence of gates, with
each index of a layer corresponding to a site in `psi`. The `i`th gate
of a layer acts on sites `i` to `i + k - 1`, where `k` is the range of
the gate. Gates may not overlap within a layer.
Returns:
psi_t: The final wavefunction. | def apply_circuit(psi, layers):
"""Applies a quantum circuit to a wavefunction.
The circuit consists of a sequence of layers, with each layer consisting
of non-overlapping gates.
Args:
psi: An `N`-dimensional tensor representing the initial wavefunction.
layers: A sequence of layers. Each layer is a sequence of gates, with
each index of a layer corresponding to a site in `psi`. The `i`th gate
of a layer acts on sites `i` to `i + k - 1`, where `k` is the range of
the gate. Gates may not overlap within a layer.
Returns:
psi_t: The final wavefunction.
"""
num_sites = len(psi.shape)
n_psi = tensornetwork.Node(psi, backend="tensorflow")
site_edges = n_psi.get_all_edges()
nodes = [n_psi]
for gates in layers:
skip = 0
for n in range(num_sites):
if n < len(gates):
gate = gates[n]
else:
gate = None
if skip > 0:
if gate is not None:
raise ValueError(
"Overlapping gates in same layer at site {}!".format(n))
skip -= 1
elif gate is not None:
site_edges, n_gate = _apply_op_network(site_edges, gate, n)
nodes.append(n_gate)
# keep track of how many sites this gate included
op_sites = len(gate.shape) // 2
skip = op_sites - 1
# NOTE: This may not be the optimal order if transpose costs are considered.
n_psi = reduce(tensornetwork.contract_between, nodes)
n_psi.reorder_edges(site_edges)
return n_psi.tensor |
Compute the contracted and free and labels of `network_structure`,
using the following rules:
* Any negative number-type element and any hyphen-prepended str-type
element are considered output labels.
* Any positive number-type element and any non-hyphen-prepended
str-type element are considered contracted labels.
* Any negative number-type element appearing more than once, any
hyphen-prepended str-type element appearing more than once,
any positive element appearing exactly once and
any element appearing more than twice are considered batch labels.
Computed lists are ordered according to int and ASCII ordering
for integer and string values, with first entries in each list
being ordered integer labels followed by ASCII ordered string
labels.
Returns:
int_cont_labels: The number-type contracted labels
str_cont_labels: The str-type contracted labels
int_out_labels: The number-type output labels
str_out_labels: The str-type output labels | def _get_cont_out_labels(
network_structure: Sequence[Sequence[Union[int, str]]]) -> Any:
"""
Compute the contracted and free and labels of `network_structure`,
using the following rules:
* Any negative number-type element and any hyphen-prepended str-type
element are considered output labels.
* Any positive number-type element and any non-hyphen-prepended
str-type element are considered contracted labels.
* Any negative number-type element appearing more than once, any
hyphen-prepended str-type element appearing more than once,
any positive element appearing exactly once and
any element appearing more than twice are considered batch labels.
Computed lists are ordered according to int and ASCII ordering
for integer and string values, with first entries in each list
being ordered integer labels followed by ASCII ordered string
labels.
Returns:
int_cont_labels: The number-type contracted labels
str_cont_labels: The str-type contracted labels
int_out_labels: The number-type output labels
str_out_labels: The str-type output labels
"""
flat_labels = [l for sublist in network_structure for l in sublist]
int_labels = {o for o in flat_labels if not isinstance(o, str)}
str_labels = {o for o in flat_labels if isinstance(o, str)}
int_out_labels = sorted([l for l in int_labels if l < 0], reverse=True)
int_cont_labels = sorted([label for label in int_labels if label >= 0])
# pylint: disable=unnecessary-lambda
str_out_labels = sorted([label for label in str_labels if label[0] == '-'],
key=lambda x: str(x))
# pylint: disable=unnecessary-lambda
str_cont_labels = sorted([label for label in str_labels if label[0] != '-'],
key=lambda x: str(x))
# pylint: disable=line-too-long
return int_cont_labels, str_cont_labels, int_out_labels, str_out_labels |
Map `network_structure` to a canonical form.
The elements in `network_structure` are replaced
by integers according to the following rules:
1. All negative numbers are sorted in decreasing order and mapped to
to decreasing integers, starting with -1.
E.g., the numbers [-4,-10,-1] are mapped to
-1 -> -1, -4 -> -2, -10 -> -3
2. All strings prepended with a hyphen '-' are ordered increasingly
using ASCII ordering, and mapped to decreasing negative integers
starting with the next value following the last integer under
point 1. above. E.g. [-4,-10,-1,'-303','-a','-33']
is mapped to
-1 -> -1, -4 -> -2, -10 -> -3, '-303' -> -4, '-33' -> -5, '-a' -> -6
3. All positive numbers are sorted increasingly and mapped to
increasing integers, starting at 1
4. All strings without a prepended hyphen are sorted increasingly
using ASCII order and mapped to positive integers, starting
with the next integer value following the last used value under
point 3. | def _canonicalize_network_structure(
network_structure: Sequence[Sequence[Union[int, str]]]
) -> Tuple[List[List], Dict]:
"""
Map `network_structure` to a canonical form.
The elements in `network_structure` are replaced
by integers according to the following rules:
1. All negative numbers are sorted in decreasing order and mapped to
to decreasing integers, starting with -1.
E.g., the numbers [-4,-10,-1] are mapped to
-1 -> -1, -4 -> -2, -10 -> -3
2. All strings prepended with a hyphen '-' are ordered increasingly
using ASCII ordering, and mapped to decreasing negative integers
starting with the next value following the last integer under
point 1. above. E.g. [-4,-10,-1,'-303','-a','-33']
is mapped to
-1 -> -1, -4 -> -2, -10 -> -3, '-303' -> -4, '-33' -> -5, '-a' -> -6
3. All positive numbers are sorted increasingly and mapped to
increasing integers, starting at 1
4. All strings without a prepended hyphen are sorted increasingly
using ASCII order and mapped to positive integers, starting
with the next integer value following the last used value under
point 3.
"""
flat_labels = [l for sublist in network_structure for l in sublist]
neg_int_labels = sorted(
list({l for l in flat_labels if not isinstance(l, str) and l < 0}))
pos_int_labels = sorted(
list({l for l in flat_labels if not isinstance(l, str) and l > 0}))
neg_str_labels = sorted(
{l for l in flat_labels if isinstance(l, str) and l[0] == '-'},
reverse=True)
pos_str_labels = sorted(
list({l for l in flat_labels if isinstance(l, str) and l[0] != '-'}))
neg_mapping = dict(
zip(neg_str_labels + neg_int_labels,
np.arange(-len(neg_int_labels + neg_str_labels), 0)))
pos_mapping = dict(
zip(pos_int_labels + pos_str_labels,
np.arange(1, 1 + len(pos_int_labels + pos_str_labels))))
neg_mapping.update(pos_mapping)
mapped_network_structure = [
[neg_mapping[label] for label in labels] for labels in network_structure
]
return mapped_network_structure, neg_mapping |
Perform checks on `network_structure`. | def _check_network(network_structure: Sequence[Sequence[Union[int, str]]],
tensor_dimensions: List[Tuple[int]],
con_order: Optional[List[Union[int, str]]] = None,
out_order: Optional[List[Union[int, str]]] = None) -> None:
"""
Perform checks on `network_structure`.
"""
# check if number of tensors matches the number of lists
# in network_structure
if len(network_structure) != len(tensor_dimensions):
raise ValueError("number of tensors does not match the"
" number of network connections.")
# check number of labels of each element in network_structure
# matches the tensor order
for n, dims in enumerate(tensor_dimensions):
if len(dims) != len(network_structure[n]):
raise ValueError(f"number of indices does not match"
f" number of labels on tensor {n}.")
flat_labels = [l for sublist in network_structure for l in sublist]
# pylint: disable=line-too-long
int_cont_labels, str_cont_labels, int_out_labels, str_out_labels = _get_cont_out_labels(
network_structure)
out_labels = int_out_labels + str_out_labels
cont_labels = int_cont_labels + str_cont_labels
str_labels = str_cont_labels + [l[1:] for l in str_out_labels]
mask = [l.isalnum() for l in str_labels]
if not np.all(mask):
raise ValueError(
f"only alphanumeric values allowed for string labels, "
f"found {[l for n, l in enumerate(str_labels) if not mask[n]]}")
# make sure no value 0 is used as a label (legacy behaviour)
if int_cont_labels.count(0) > 0:
raise ValueError("only nonzero values are allowed to "
"specify network structure.")
if con_order is not None:
#check that all integer elements in `con_order` are positive
int_cons = [o for o in con_order if not isinstance(o, str)]
labels = [o for o in int_cons if o < 0]
if len(labels) > 0:
raise ValueError(f"all number type labels in `con_order` have "
f"to be positive, found {labels}")
str_cons = [o for o in con_order if isinstance(o, str)]
labels = [o for o in str_cons if o[0] == '-']
#check that all string type elements in `con_order` have no hyphens
if len(labels) > 0:
raise ValueError(f"all string type labels in `con_order` "
f"must be unhyphenized, found {labels}")
# check that elements in `con_order` appear only once
labels = []
for l in con_order:
if (con_order.count(l) != 1) and (l not in labels):
labels.append(l)
if len(labels) > 0:
raise ValueError(f"labels {labels} appear more than once in `con_order`.")
# check if passed `con_order` makes sense
if len(con_order) != len(cont_labels):
raise ValueError(f"`con_order = {con_order} is not "
f"a valid contraction order for contracted "
f"labels {cont_labels}")
# check if all labels in `con_order` appear in `network_structure`
labels = [o for o in con_order if o not in flat_labels]
if len(labels) != 0:
raise ValueError(f"labels {labels} in `con_order` do not appear as "
f"contracted labels in `network_structure`.")
if out_order is not None:
#check that all integer elements in `out_order` are negative
int_outs = [o for o in out_order if not isinstance(o, str)]
labels = [o for o in int_outs if o > 0]
if len(labels) > 0:
raise ValueError(f"all number type labels in `out_order` have "
f"to be negative, found {labels}")
#check that all string type elements in `out_order` have hyphens
str_outs = [o for o in out_order if isinstance(o, str)]
labels = [o for o in str_outs if o[0] != '-']
if len(labels) > 0:
raise ValueError(f"all string type labels in `out_order` "
f"have to be hyphenized, found {labels}")
# check that all elements in `out_order` appear exactly once
labels = []
for l in out_order:
if (out_order.count(l) != 1) and (l not in labels):
labels.append(l)
if len(labels) > 0:
raise ValueError(f"labels {labels} appear more than once in `out_order`.")
# check if `out_order` has right length
if len(out_order) != len(out_labels):
raise ValueError(f"`out_order` = {out_order} is not "
f"a valid output order for open "
f"labels {out_labels}")
# check if all labels in `out_order` appear in `network_structure`
labels = [o for o in out_order if o not in flat_labels]
if len(labels) != 0:
raise ValueError(f"labels {labels} in `out_order` do not "
f"appear in `network_structure`.")
# check if contracted dimensions are matching
mismatched_labels = []
for l in cont_labels:
dims = {
tensor_dimensions[m][n]
for m, labels in enumerate(network_structure)
for n, l1 in enumerate(labels)
if l1 == l
}
if len(dims) > 1:
mismatched_labels.append(l)
if len(mismatched_labels) > 0:
raise ValueError(
f"tensor dimensions for labels {mismatched_labels} are mismatching") |
Perform the partial trace of `tensor`.
All labels appearing twice in `labels` are traced out.
Argns:
tensor: A tensor.
labels: The ncon-style labels of `tensor`.
Returns:
Tensor: The result of the tracing. | def _partial_trace(
tensor: Tensor, labels: List,
backend_obj: AbstractBackend) -> Tuple[Tensor, List, List]:
"""
Perform the partial trace of `tensor`.
All labels appearing twice in `labels` are traced out.
Argns:
tensor: A tensor.
labels: The ncon-style labels of `tensor`.
Returns:
Tensor: The result of the tracing.
"""
trace_labels = [l for l in labels if labels.count(l) == 2]
if len(trace_labels) > 0:
num_cont = len(trace_labels) // 2
unique_trace_labels = sorted(trace_labels)[0:-1:2]
trace_label_positions = [[
n for n, label in enumerate(labels) if label == trace_label
] for trace_label in unique_trace_labels]
contracted_indices = [l[0] for l in trace_label_positions
] + [l[1] for l in trace_label_positions]
free_indices = [
n for n in range(len(labels)) if n not in contracted_indices
]
shape = backend_obj.shape_tuple(tensor)
contracted_dimension = np.prod(
[shape[d] for d in contracted_indices[:num_cont]])
temp_shape = tuple([shape[pos] for pos in free_indices] +
[contracted_dimension, contracted_dimension])
result = backend_obj.trace(
backend_obj.reshape(
backend_obj.transpose(tensor,
tuple(free_indices + contracted_indices)),
temp_shape))
new_labels = [l for l in labels if l not in unique_trace_labels]
return result, new_labels, unique_trace_labels
return tensor, labels, [] |
Subroutine for performing a batched contraction of tensors `t1` and `t2`.
Args:
t1: A Tensor.
t2: A Tensor.
tensors: List of Tensor objects.
network_structure: The canonical labels of the networks.
con_order: Array of contracted labels.
common_batch_labels: The common batch labels of `t1` and `t2`.
labels_t1: The labels of `t1`
labels_t2: The labels of `t2`
backend_obj: A backend object.
Returns:
List[Tensor]: Updated list of tensors.
List[List]: Updated `network_structure`.
List: Updated `con_order` (contraction order). | def _batch_cont(
t1: Tensor, t2: Tensor, tensors: List[Tensor],
network_structure: List[List], con_order: List, common_batch_labels: Set,
labels_t1: List, labels_t2: List, backend_obj: AbstractBackend
) -> Tuple[Tensor, List[List], List]:
"""
Subroutine for performing a batched contraction of tensors `t1` and `t2`.
Args:
t1: A Tensor.
t2: A Tensor.
tensors: List of Tensor objects.
network_structure: The canonical labels of the networks.
con_order: Array of contracted labels.
common_batch_labels: The common batch labels of `t1` and `t2`.
labels_t1: The labels of `t1`
labels_t2: The labels of `t2`
backend_obj: A backend object.
Returns:
List[Tensor]: Updated list of tensors.
List[List]: Updated `network_structure`.
List: Updated `con_order` (contraction order).
"""
common_batch_labels = list(common_batch_labels)
#find positions of common batch labels
t1_batch_pos = [labels_t1.index(l) for l in common_batch_labels]
t2_batch_pos = [labels_t2.index(l) for l in common_batch_labels]
#find positions of contracted non-batch labels
non_batch_labels_t1 = {l for l in labels_t1 if l not in common_batch_labels}
non_batch_labels_t2 = {l for l in labels_t2 if l not in common_batch_labels}
common_contracted_labels = list(
non_batch_labels_t1.intersection(non_batch_labels_t2))
t1_cont = [labels_t1.index(l) for l in common_contracted_labels]
t2_cont = [labels_t2.index(l) for l in common_contracted_labels]
free_labels_t1 = set(labels_t1) - set(common_contracted_labels) - set(
common_batch_labels)
free_labels_t2 = set(labels_t2) - set(common_contracted_labels) - set(
common_batch_labels)
# find positions of uncontracted non-batch labels
free_pos_t1 = [n for n, l in enumerate(labels_t1) if l in free_labels_t1]
free_pos_t2 = [n for n, l in enumerate(labels_t2) if l in free_labels_t2]
t1_shape = np.array(backend_obj.shape_tuple(t1))
t2_shape = np.array(backend_obj.shape_tuple(t2))
newshape_t1 = (np.prod(t1_shape[t1_batch_pos]),
np.prod(t1_shape[free_pos_t1]), np.prod(t1_shape[t1_cont]))
newshape_t2 = (np.prod(t2_shape[t2_batch_pos]), np.prod(t2_shape[t2_cont]),
np.prod(t2_shape[free_pos_t2]))
#bring batch labels to the front
order_t1 = tuple(t1_batch_pos + free_pos_t1 + t1_cont)
order_t2 = tuple(t2_batch_pos + t2_cont + free_pos_t2)
mat1 = backend_obj.reshape(backend_obj.transpose(t1, order_t1), newshape_t1)
mat2 = backend_obj.reshape(backend_obj.transpose(t2, order_t2), newshape_t2)
result = backend_obj.matmul(mat1, mat2)
final_shape = tuple(
np.concatenate([
t1_shape[t1_batch_pos], t1_shape[free_pos_t1], t2_shape[free_pos_t2]
]))
result = backend_obj.reshape(result, final_shape)
# update labels, tensors, network_structure and con_order
new_labels = [labels_t1[i] for i in t1_batch_pos] + [
labels_t1[i] for i in free_pos_t1
] + [labels_t2[i] for i in free_pos_t2]
network_structure.append(new_labels)
tensors.append(result)
con_order = [c for c in con_order if c not in common_contracted_labels]
return tensors, network_structure, con_order |
Jittable Ncon function. Performs the contraction of `tensors`.
Args:
tensors: List of tensors.
flat_labels: A Tuple of integers.
sizes: Tuple of int used to reconstruct `network_structure` from
`flat_labels`.
con_order: Order of the contraction.
out_order: Order of the final axis order.
backend_obj: A backend object.
Returns:
The final tensor after contraction. | def _jittable_ncon(tensors: List[Tensor], flat_labels: Tuple[int],
sizes: Tuple[int], con_order: Tuple[int],
out_order: Tuple[int],
backend_obj: AbstractBackend) -> Tensor:
"""
Jittable Ncon function. Performs the contraction of `tensors`.
Args:
tensors: List of tensors.
flat_labels: A Tuple of integers.
sizes: Tuple of int used to reconstruct `network_structure` from
`flat_labels`.
con_order: Order of the contraction.
out_order: Order of the final axis order.
backend_obj: A backend object.
Returns:
The final tensor after contraction.
"""
# some jax-juggling to avoid retracing ...
flat_labels = list(flat_labels)
slices = np.append(0, np.cumsum(sizes))
network_structure = [
flat_labels[slices[n]:slices[n + 1]] for n in range(len(slices) - 1)
]
out_order = list(out_order)
con_order = list(con_order)
# pylint: disable=unnecessary-comprehension
init_con_order = [c for c in con_order]
init_network_structure = [c for c in network_structure]
# partial trace
for n, tensor in enumerate(tensors):
tensors[n], network_structure[n], contracted_labels = _partial_trace(
tensor, network_structure[n], backend_obj)
if len(contracted_labels) > 0:
con_order = [c for c in con_order if c not in contracted_labels]
flat_labels = [l for sublist in network_structure for l in sublist]
# contracted all positive labels appearing only once in `network_structure`
contractable_labels = [
l for l in flat_labels if (flat_labels.count(l) == 1) and (l > 0)
]
# update con_order
if len(contractable_labels) > 0:
con_order = [o for o in con_order if o not in contractable_labels]
# collapse axes of single-labelled tensors
locs = []
for n, labels in enumerate(network_structure):
if len(set(labels).intersection(contractable_labels)) > 0:
locs.append(n)
for loc in locs:
labels = network_structure[loc]
contractable_inds = [labels.index(l) for l in contractable_labels]
network_structure[loc] = [l for l in labels if l not in contractable_labels]
tensors[loc] = backend_obj.sum(tensors[loc], tuple(contractable_inds))
# perform binary and batch contractions
skip_counter = 0
batch_labels = []
batch_cnts = []
for l in set(flat_labels):
cnt = flat_labels.count(l)
if (cnt > 2) or (cnt == 2 and l < 0):
batch_labels.append(l)
batch_cnts.append(cnt)
while len(con_order) > 0:
# the next index to be contracted
cont_ind = con_order[0]
if cont_ind in batch_labels:
# if its still a batch index then do it later
con_order.append(con_order.pop(0))
skip_counter += 1
# avoid being stuck in an infinite loop
if skip_counter > len(con_order):
raise ValueError(f"ncon seems stuck in an infinite loop. \n"
f"Please check if `con_order` = {init_con_order} is "
f"a valid contraction order for \n"
f"`network_structure` = {init_network_structure}")
continue
# find locations of `cont_ind` in `network_structure`
locs = [
n for n, labels in enumerate(network_structure) if cont_ind in labels
]
t2 = tensors.pop(locs[1])
t1 = tensors.pop(locs[0])
labels_t2 = network_structure.pop(locs[1])
labels_t1 = network_structure.pop(locs[0])
common_labels, t1_cont, t2_cont = label_intersection(labels_t1, labels_t2)
# check if there are batch labels (i.e. labels appearing more than twice
# in `network_structure`).
common_batch_labels = set(batch_labels).intersection(common_labels)
if len(common_batch_labels) > 0:
# case1: both tensors have one or more common batch indices -> use matmul
ix = np.nonzero(
np.array(batch_labels)[:, None] == np.array(
list(common_batch_labels))[None, :])[0]
# reduce the counts of these labels in `batch_cnts` by 1
delete = []
for i in ix:
batch_cnts[i] -= 1
if (batch_labels[i] > 0) and (batch_cnts[i] <= 2):
delete.append(i)
elif (batch_labels[i] < 0) and (batch_cnts[i] < 2):
delete.append(i)
for i in sorted(delete, reverse=True):
del batch_cnts[i]
del batch_labels[i]
tensors, network_structure, con_order = _batch_cont(
t1, t2, tensors, network_structure, con_order, common_batch_labels,
labels_t1, labels_t2, backend_obj)
# in all other cases do a regular tensordot
else:
# for len(t1_cont)~<20 this is faster than np.argsort
ind_sort = [t1_cont.index(l) for l in sorted(t1_cont)]
tensors.append(
backend_obj.tensordot(
t1,
t2,
axes=(tuple(t1_cont[i] for i in ind_sort),
tuple(t2_cont[i] for i in ind_sort))))
new_labels = [l for l in labels_t1 if l not in common_labels
] + [l for l in labels_t2 if l not in common_labels]
network_structure.append(new_labels)
# remove contracted labels from con_order
con_order = [c for c in con_order if c not in common_labels]
# perform outer products and remaining batch contractions
while len(tensors) > 1:
t2 = tensors.pop()
t1 = tensors.pop()
labels_t2 = network_structure.pop()
labels_t1 = network_structure.pop()
# check if there are negative batch indices left
# (have to be collapsed to a single one)
common_labels, t1_cont, t2_cont = label_intersection(labels_t1, labels_t2)
common_batch_labels = set(batch_labels).intersection(common_labels)
if len(common_batch_labels) > 0:
# collapse all negative batch indices
tensors, network_structure, con_order = _batch_cont(
t1, t2, tensors, network_structure, con_order, common_batch_labels,
labels_t1, labels_t2, backend_obj)
else:
tensors.append(backend_obj.outer_product(t1, t2))
network_structure.append(labels_t1 + labels_t2)
# if necessary do a final permutation
if len(network_structure[0]) > 1:
labels = network_structure[0]
final_order = tuple(labels.index(l) for l in out_order)
return backend_obj.transpose(tensors[0], final_order)
return tensors[0] |
Contracts a list of backend-tensors or `Tensor`s
according to a tensor network
specification.
The network is provided as a list of lists, one for each
tensor, specifying the labels for the edges connected to that tensor.
Labels can be any numbers or strings. Negative number-type labels
and string-type labels with a prepended hyphen ('-') are open labels
and remain uncontracted.
Positive number-type labels and string-type labels with no prepended
hyphen ('-') are closed labels and are contracted.
Any open label appearing more than once is treated as an open
batch label. Any closed label appearing more than once is treated as
a closed batch label.
Upon finishing the contraction, all open batch labels will have been
collapsed into a single dimension, and all closed batch labels will
have been summed over.
If `out_order = None`, output labels are ordered according to descending
number ordering and ascending ASCII ordering, with number labels always
appearing before string labels. Example:
network_structure = [[-1, 1, '-rick', '2',-2], [-2, '2', 1, '-morty']]
results in an output order of [-1, -2, '-morty', '-rick'].
If `out_order` is given, the indices of the resulting tensor will be
transposed into this order.
If `con_order = None`, `ncon` will first contract all number labels
in ascending order followed by all string labels in ascending ASCII
order.
If `con_order` is given, `ncon` will contract according to this order.
For example, matrix multiplication:
.. code-block:: python
A = np.array([[1.0, 2.0], [3.0, 4.0]])
B = np.array([[1.0, 1.0], [0.0, 1.0]])
ncon([A,B], [(-1, 1), (1, -2)])
Matrix trace:
.. code-block:: python
A = np.array([[1.0, 2.0], [3.0, 4.0]])
ncon([A], [(1, 1)]) # 5.0
Note:
Disallowing `0` as an edge label is legacy behaviour, see
`original NCON implementation`_.
.. _original NCON implementation:
https://arxiv.org/abs/1402.0939
Args:
tensors: List of backend-tensors or `Tensor`s.
network_structure: List of lists specifying the tensor network structure.
con_order: List of edge labels specifying the contraction order.
out_order: List of edge labels specifying the output order.
check_network: Boolean flag. If `True` check the network.
backend: String specifying the backend to use. Defaults to
`tensornetwork.backend_contextmanager.get_default_backend`.
Returns:
The result of the contraction:
* A backend-tensor: If all elements of `tensors` are backend-tensors.
* A `Tensor`: If all elements of `tensors` are `Tensor` objects. | def ncon(
tensors: Sequence[Union[tn_tensor.Tensor, Tensor]],
network_structure: Sequence[Sequence[Union[str, int]]],
con_order: Optional[Sequence] = None,
out_order: Optional[Sequence] = None,
check_network: bool = True,
backend: Optional[Union[Text, AbstractBackend]] = None
) -> Union[tn_tensor.Tensor, Tensor]:
r"""Contracts a list of backend-tensors or `Tensor`s
according to a tensor network
specification.
The network is provided as a list of lists, one for each
tensor, specifying the labels for the edges connected to that tensor.
Labels can be any numbers or strings. Negative number-type labels
and string-type labels with a prepended hyphen ('-') are open labels
and remain uncontracted.
Positive number-type labels and string-type labels with no prepended
hyphen ('-') are closed labels and are contracted.
Any open label appearing more than once is treated as an open
batch label. Any closed label appearing more than once is treated as
a closed batch label.
Upon finishing the contraction, all open batch labels will have been
collapsed into a single dimension, and all closed batch labels will
have been summed over.
If `out_order = None`, output labels are ordered according to descending
number ordering and ascending ASCII ordering, with number labels always
appearing before string labels. Example:
network_structure = [[-1, 1, '-rick', '2',-2], [-2, '2', 1, '-morty']]
results in an output order of [-1, -2, '-morty', '-rick'].
If `out_order` is given, the indices of the resulting tensor will be
transposed into this order.
If `con_order = None`, `ncon` will first contract all number labels
in ascending order followed by all string labels in ascending ASCII
order.
If `con_order` is given, `ncon` will contract according to this order.
For example, matrix multiplication:
.. code-block:: python
A = np.array([[1.0, 2.0], [3.0, 4.0]])
B = np.array([[1.0, 1.0], [0.0, 1.0]])
ncon([A,B], [(-1, 1), (1, -2)])
Matrix trace:
.. code-block:: python
A = np.array([[1.0, 2.0], [3.0, 4.0]])
ncon([A], [(1, 1)]) # 5.0
Note:
Disallowing `0` as an edge label is legacy behaviour, see
`original NCON implementation`_.
.. _original NCON implementation:
https://arxiv.org/abs/1402.0939
Args:
tensors: List of backend-tensors or `Tensor`s.
network_structure: List of lists specifying the tensor network structure.
con_order: List of edge labels specifying the contraction order.
out_order: List of edge labels specifying the output order.
check_network: Boolean flag. If `True` check the network.
backend: String specifying the backend to use. Defaults to
`tensornetwork.backend_contextmanager.get_default_backend`.
Returns:
The result of the contraction:
* A backend-tensor: If all elements of `tensors` are backend-tensors.
* A `Tensor`: If all elements of `tensors` are `Tensor` objects.
"""
# TODO (mganahl): for certain cases np.einsum is still faster than ncon:
# - contractions containing batched outer products with small dimensions
# This should eventually be fixed, but it's not a priority.
if backend is None:
backend = get_default_backend()
if isinstance(backend, AbstractBackend):
backend_obj = backend
else:
backend_obj = backend_factory.get_backend(backend)
if out_order == []: #allow empty list as input
out_order = None
if con_order == []: #allow empty list as input
con_order = None
are_tensors = [isinstance(t, tn_tensor.Tensor) for t in tensors]
tensors_set = {t for t in tensors if isinstance(t, tn_tensor.Tensor)}
if not all(n.backend.name == backend_obj.name for n in tensors_set):
raise ValueError("Some tensors have backends different from '{}'".format(
backend_obj.name))
_tensors = []
for t in tensors:
if isinstance(t, tn_tensor.Tensor):
_tensors.append(t.array)
else:
_tensors.append(t)
_tensors = [backend_obj.convert_to_tensor(t) for t in _tensors]
if check_network:
_check_network(network_structure, [t.shape for t in _tensors], con_order,
out_order)
network_structure, mapping = _canonicalize_network_structure(
network_structure)
flat_labels = [l for sublist in network_structure for l in sublist]
unique_flat_labels = list(set(flat_labels))
if out_order is None:
# negative batch labels (negative labels appearing more than once)
# are subject to the same output ordering as regular output labels
out_order = sorted([l for l in unique_flat_labels if l < 0], reverse=True)
else:
out_order = [mapping[o] for o in out_order]
if con_order is None:
# canonicalization of network structure takes care of appropriate
# contraction ordering (i.e. use ASCII ordering for str and
# regular ordering for int)
# all positive labels appearing are considered proper contraction labels.
con_order = sorted([l for l in unique_flat_labels if l > 0])
else:
con_order = [mapping[o] for o in con_order]
if backend not in _CACHED_JITTED_NCONS:
_CACHED_JITTED_NCONS[backend] = backend_obj.jit(
_jittable_ncon, static_argnums=(1, 2, 3, 4, 5))
sizes = tuple(len(l) for l in network_structure)
res_tensor = _CACHED_JITTED_NCONS[backend](_tensors, tuple(flat_labels),
sizes, tuple(con_order),
tuple(out_order), backend_obj)
if all(are_tensors):
return tn_tensor.Tensor(res_tensor, backend=backend_obj)
return res_tensor |
Get all edges shared between two nodes.
Args:
node1: The first node.
node2: The second node.
Returns:
A (possibly empty) `set` of `Edge`s shared by the nodes. | def get_shared_edges(node1: AbstractNode, node2: AbstractNode) -> Set[Edge]:
"""Get all edges shared between two nodes.
Args:
node1: The first node.
node2: The second node.
Returns:
A (possibly empty) `set` of `Edge`s shared by the nodes.
"""
nodes = {node1, node2}
shared_edges = set()
# Assuming the network is well formed, all of the edges shared by
# these two nodes will be stored in just one of the nodes, so we only
# have to do this loop once.
for edge in node1.edges:
if set(edge.get_nodes()) == nodes:
shared_edges.add(edge)
return shared_edges |
Get all of the edges parallel to the given `edge`.
Args:
edge: The given edge.
Returns:
A `set` of all of the edges parallel to the given edge
(including the given edge). | def get_parallel_edges(edge: Edge) -> Set[Edge]:
"""
Get all of the edges parallel to the given `edge`.
Args:
edge: The given edge.
Returns:
A `set` of all of the edges parallel to the given edge
(including the given edge).
"""
return get_shared_edges(edge.node1, edge.node2) |
Return the set of all non-dangling edges. | def get_all_nondangling(nodes: Iterable[AbstractNode]) -> Set[Edge]:
"""Return the set of all non-dangling edges."""
edges = set()
for node in nodes:
edges |= node.get_all_nondangling()
return edges |
Return the set of all dangling edges. | def get_all_dangling(nodes: Iterable[AbstractNode]) -> List[Edge]:
"""Return the set of all dangling edges."""
edges = []
for node in nodes:
edges += node.get_all_dangling()
return edges |
Flatten trace edges into single edge.
Args:
edges: List of trace edges to flatten
new_edge_name: Optional name of the new edge created.
Returns:
The new edge that represents the flattening of the given edges. | def _flatten_trace_edges(edges: List[Edge],
new_edge_name: Optional[Text] = None) -> Edge:
"""Flatten trace edges into single edge.
Args:
edges: List of trace edges to flatten
new_edge_name: Optional name of the new edge created.
Returns:
The new edge that represents the flattening of the given edges.
"""
node = edges[0].node1 # We are in the trace case, so this is the only node.
backend = node.backend
# Flatten all of the edge's axes into a a single list.
perm_back = [min(e.axis1, e.axis2) for e in edges]
perm_back += [max(e.axis1, e.axis2) for e in edges]
perm_front = set(range(len(node.edges))) - set(perm_back)
perm_front = sorted(perm_front)
perm = perm_front + perm_back
new_dim = backend.shape_prod(
[backend.shape_tensor(node.tensor)[e.axis1] for e in edges])
node.reorder_axes(perm)
unaffected_shape = backend.shape_tensor(node.tensor)[:len(perm_front)]
new_shape = backend.shape_concat([unaffected_shape, [new_dim, new_dim]],
axis=-1)
node.tensor = backend.reshape(node.tensor, new_shape)
edge1 = Edge(node1=node, axis1=len(perm_front), name="TraceFront")
edge2 = Edge(node1=node, axis1=len(perm_front) + 1, name="TraceBack")
node.edges = node.edges[:len(perm_front)] + [edge1, edge2]
new_edge = connect(edge1, edge2, new_edge_name)
# pylint: disable=expression-not-assigned
[edge.disable() for edge in edges] #disable edges!
return new_edge |
Flatten edges into single edge.
If two nodes have multiple edges connecting them, it may be
beneficial to flatten these edges into a single edge to avoid having several
unnecessary trace edges. This can speed up computation time and reduce
memory cost.
Warning: This will remove all axes names.
Args:
edges: A list of edges to flatten.
new_edge_name: Optional name to give to the newly created edge.
Returns:
The new flattened edge.
Raises:
ValueError: If edges is an empty list.
ValueError: If not all of the edges connect to the same node(s).
ValueError: If one of the nodes connecting to these edges does not have
edge definitions for all of its axes. | def flatten_edges(edges: List[Edge],
new_edge_name: Optional[Text] = None) -> Edge:
"""Flatten edges into single edge.
If two nodes have multiple edges connecting them, it may be
beneficial to flatten these edges into a single edge to avoid having several
unnecessary trace edges. This can speed up computation time and reduce
memory cost.
Warning: This will remove all axes names.
Args:
edges: A list of edges to flatten.
new_edge_name: Optional name to give to the newly created edge.
Returns:
The new flattened edge.
Raises:
ValueError: If edges is an empty list.
ValueError: If not all of the edges connect to the same node(s).
ValueError: If one of the nodes connecting to these edges does not have
edge definitions for all of its axes.
"""
if not edges:
raise ValueError("At least 1 edge must be given.")
backends = [edge.node1.backend for edge in edges] + [
edge.node2.backend for edge in edges if edge.node2 is not None
]
if not all(b.name == backends[0].name for b in backends):
raise ValueError("Not all backends are the same.")
backend = backends[0]
if len(edges) == 1:
return edges[0] # Don't bother with reshaping.
# Set equality is transitive (a=b, b=c, therefore a=c) so it is only
# necessary to compare the first edge against the rest.
expected_nodes = set(edges[0].get_nodes())
for edge in edges:
if expected_nodes != set(edge.get_nodes()):
raise ValueError(
"Two edges do not share the same nodes. "
"'{}'s nodes: '{}', '{}'. '{}'s nodes: '{}', '{}'".format(
edges[0], edges[0].node1, edges[0].node2, edge, edge.node1,
edge.node2))
if len(expected_nodes) == 1:
return _flatten_trace_edges(edges, new_edge_name) #disables edges
# Flatten standard or dangling edges.
new_dangling_edges = []
for node in expected_nodes:
# Required for dangling case.
if node is None:
continue
axis_names = node.axis_names
perm_back = []
for edge in edges:
# There will only be 1 edge since we are in the standard edge case.
perm_back.append(node.edges.index(edge))
perm_front = sorted(set(range(len(node.edges))) - set(perm_back))
node.reorder_axes(perm_front + perm_back)
old_tensor_shape = backend.shape_tensor(node.tensor)
# Calculate the new axis dimension as a product of the other
# axes dimensions.
flattened_axis_dim = backend.shape_prod(old_tensor_shape[len(perm_front):])
new_tensor_shape = backend.shape_concat(
[old_tensor_shape[:len(perm_front)], [flattened_axis_dim]], axis=-1)
new_tensor = backend.reshape(node.tensor, new_tensor_shape)
# Modify the node in place. Currently, this is they only method that
# modifies a node's tensor.
node.tensor = new_tensor
# This Edge is required for the connect call later.
edge = Edge(node1=node, axis1=len(perm_front), name=new_edge_name)
node.edges = node.edges[:len(perm_front)] + [edge]
new_dangling_edges.append(edge)
# TODO: Allow renaming of the new axis.
if axis_names:
node.axis_names = [axis_names[n] for n in range(len(node.edges))]
else:
node.axis_names = [str(n) for n in range(len(node.edges))]
node1, node2 = tuple(expected_nodes)
# Sets are returned in a random order, so this is how we deal with
# dangling edges.
# pylint: disable=expression-not-assigned
[edge.disable() for edge in edges] #disable edges!
if node1 is None or node2 is None:
return new_dangling_edges[0]
return connect(new_dangling_edges[0], new_dangling_edges[1], new_edge_name) |
Flatten all of the edges between the given two nodes.
Args:
node1: The first node.
node2: The second node.
Returns:
The flattened `Edge` object. If there was only one edge between the two
nodes, then the original edge is returned. If there were no edges
between the nodes, a None is returned. | def flatten_edges_between(
node1: AbstractNode,
node2: AbstractNode,
) -> Optional[Edge]:
"""Flatten all of the edges between the given two nodes.
Args:
node1: The first node.
node2: The second node.
Returns:
The flattened `Edge` object. If there was only one edge between the two
nodes, then the original edge is returned. If there were no edges
between the nodes, a None is returned.
"""
shared_edges = get_shared_edges(node1, node2)
if shared_edges:
return flatten_edges(list(shared_edges))
return None |
Flatten all edges that belong to the nodes.
Returns:
A list of all the flattened edges. If there was only one edge between
two given nodes, that original edge is included in this list. | def flatten_all_edges(nodes: Iterable[AbstractNode]) -> List[Edge]:
"""Flatten all edges that belong to the nodes.
Returns:
A list of all the flattened edges. If there was only one edge between
two given nodes, that original edge is included in this list.
"""
flattened_edges = []
for edge in get_all_nondangling(nodes):
if not edge.is_disabled:
flat_edge = flatten_edges_between(edge.node1, edge.node2)
flattened_edges.append(flat_edge)
return flattened_edges |
Split trace edges into single edge.
Args:
edge: Trace edge to split.
shape: Tuple of integers used to split trace edge into multiple edges.
new_edge_names: Optional names of the new edges created.
Returns:
A list of new edges where the product of the dimensions of the new
edges corresponds to the dimension of the edge before splitting. | def _split_trace_edge(
edge: Edge,
shape: Tuple[int, ...],
new_edge_names: Optional[List[Text]] = None,
) -> List[Edge]:
"""Split trace edges into single edge.
Args:
edge: Trace edge to split.
shape: Tuple of integers used to split trace edge into multiple edges.
new_edge_names: Optional names of the new edges created.
Returns:
A list of new edges where the product of the dimensions of the new
edges corresponds to the dimension of the edge before splitting.
"""
node = edge.node1 # We are in the trace case, so this is the only node.
backend = node.backend
# Permute until edge axes to be split are at the back and reshape.
perm_back = [min(edge.axis1, edge.axis2)]
perm_back += [max(edge.axis1, edge.axis2)]
perm_front = set(range(len(node.edges))) - set(perm_back)
perm_front = sorted(perm_front)
node.reorder_axes(perm_front + perm_back)
unaffected_shape = backend.shape_tensor(node.tensor)[:len(perm_front)]
new_shape = backend.shape_concat([unaffected_shape, shape, shape], axis=-1)
node.tensor = backend.reshape(node.tensor, new_shape)
# Trim edges and add placeholder edges for new axes.
node.edges = node.edges[:len(perm_front)] + 2 * len(shape) * [None]
# Create new dangling edges and connect them to each other.
new_edges = []
for idx in range(len(shape)):
edge1 = Edge(node1=node, axis1=len(perm_front) + idx)
edge2 = Edge(node1=node, axis1=len(perm_front) + len(shape) + idx)
node.edges[len(perm_front) + idx] = edge1
node.edges[len(perm_front) + len(shape) + idx] = edge2
new_edges.append(
connect(edge1, edge2,
new_edge_names[idx] if new_edge_names is not None else None))
# pylint: disable=expression-not-assigned
edge.disable() # disable old edge!
return new_edges |
Split an `Edge` into multiple edges according to `shape`. Reshapes
the underlying tensors connected to the edge accordingly.
This method acts as the inverse operation of flattening edges and
distinguishes between the following edge cases when adding new edges:
1) standard edge connecting two different nodes: reshape node dimensions
2) dangling edge (node2 is None): reshape node1 dimension
3) trace edge (node1 is node2): reshape node1 dimension
Args:
edge: Edge to split.
shape: Tuple of integers used to split edge into multiple edges.
Returns:
A list of new edges where the product of the dimensions of the new
edges corresponds to the dimension of the edge before splitting.
Raises:
ValueError: If the edge dimension mismatches with the split shape.
ValueError: If the edge is connecting nodes with different backends. | def split_edge(edge: Edge,
shape: Tuple[int, ...],
new_edge_names: Optional[List[Text]] = None) -> List[Edge]:
"""Split an `Edge` into multiple edges according to `shape`. Reshapes
the underlying tensors connected to the edge accordingly.
This method acts as the inverse operation of flattening edges and
distinguishes between the following edge cases when adding new edges:
1) standard edge connecting two different nodes: reshape node dimensions
2) dangling edge (node2 is None): reshape node1 dimension
3) trace edge (node1 is node2): reshape node1 dimension
Args:
edge: Edge to split.
shape: Tuple of integers used to split edge into multiple edges.
Returns:
A list of new edges where the product of the dimensions of the new
edges corresponds to the dimension of the edge before splitting.
Raises:
ValueError: If the edge dimension mismatches with the split shape.
ValueError: If the edge is connecting nodes with different backends.
"""
# Check if reshape operation is possible.
if not np.prod(shape) == edge.dimension:
raise ValueError("Edge {} with dimension {} cannot be split according to "
"shape {}.".format(edge, edge.dimension, shape))
# Check if possible reshape operation is trivial.
if len(shape) == 1:
return [edge]
# Handle trace edge case separately.
if edge.is_trace():
return _split_trace_edge(edge, shape, new_edge_names)
backends = [node.backend for node in edge.get_nodes() if node is not None]
if not all(b.name == backends[0].name for b in backends):
raise ValueError("Not all backends are the same.")
backend = backends[0]
# Split standard or dangling edge.
new_dangling_edges = []
expected_nodes = set(edge.get_nodes())
for node in expected_nodes:
# Required for dangling case.
if node is None:
continue
axis_names = node.axis_names
# Permute until edge axes to be split are at the back and reshape.
perm_back = [node.edges.index(edge)]
perm_front = set(range(len(node.edges))) - set(perm_back)
perm_front = sorted(perm_front)
node.reorder_axes(perm_front + perm_back)
unaffected_shape = backend.shape_tensor(node.tensor)[:len(perm_front)]
new_shape = backend.shape_concat([unaffected_shape, shape], axis=-1)
node.tensor = backend.reshape(node.tensor, new_shape) # in-place update
# Trim edges.
node.edges = node.edges[:len(perm_front)]
# Create new dangling edges.
for idx in range(len(shape)):
new_dangling_edge = Edge(
node1=node,
axis1=len(perm_front) + idx,
name=new_edge_names[idx] if new_edge_names is not None else None)
node.edges += [new_dangling_edge]
new_dangling_edges.append(new_dangling_edge)
# TODO: Allow renaming of new axes (possibly distinct from new_edge_names).
if axis_names:
new_axis_names = [axis_names[n] for n in range(len(unaffected_shape))]
if new_edge_names:
new_axis_names.extend(new_edge_names)
else:
new_axis_names.extend(
[str(n) for n in range(len(unaffected_shape), len(node.edges))])
node.axis_names = new_axis_names
else:
node.axis_names = [str(n) for n in range(len(node.edges))]
node1, node2 = tuple(expected_nodes)
# pylint: disable=expression-not-assigned
edge.disable() # disable old edge
# Return new dangling edges for dangling case.
if node1 is None or node2 is None:
return new_dangling_edges
# Create connected edges between nodes for standard case.
new_edges = []
for idx in range(len(shape)):
new_edges.append(
connect(new_dangling_edges[idx], new_dangling_edges[len(shape) + idx],
new_edge_names[idx] if new_edge_names is not None else None))
return new_edges |
Slices an edge and the connected tensors beginning at `start_index` for
length `length`, along the axis determined by `edge`.
This method modifies the tensors stored in the two nodes connected by `edge`
to corresponding tensor slices (along the axis determined by `edge`) and
returns an updated edge connecting the two nodes along the same axis as
the original `edge`.
Args:
edge: The edge to slice.
start_index: Integer specifying the beginning of the slice.
length: Integer specifying the length of the slice.
Returns:
The updated edge after slicing.
Raises:
ValueError: If the length of the slice is negative.
ValueError: If the slice is incompatible with the edge dimension.
ValueError: If the edge is connecting nodes with different backends. | def slice_edge(edge: Edge, start_index: int, length: int) -> Edge:
"""Slices an edge and the connected tensors beginning at `start_index` for
length `length`, along the axis determined by `edge`.
This method modifies the tensors stored in the two nodes connected by `edge`
to corresponding tensor slices (along the axis determined by `edge`) and
returns an updated edge connecting the two nodes along the same axis as
the original `edge`.
Args:
edge: The edge to slice.
start_index: Integer specifying the beginning of the slice.
length: Integer specifying the length of the slice.
Returns:
The updated edge after slicing.
Raises:
ValueError: If the length of the slice is negative.
ValueError: If the slice is incompatible with the edge dimension.
ValueError: If the edge is connecting nodes with different backends.
"""
if length <= 0:
raise ValueError("Length of slice must be positive.")
if ((start_index + length > edge.dimension) or (-length < start_index < 0)):
raise ValueError("Length {} slice beginning at {} is invalid for edge of "
"dimension {}".format(length, start_index, edge.dimension))
backends = [node.backend for node in edge.get_nodes() if node is not None]
if not all(b.name == backends[0].name for b in backends):
raise ValueError("Not all backends are the same.")
backend = backends[0]
# Handles all three types of edges
for node, axis in zip(edge.get_nodes(), [edge.axis1, edge.axis2]):
if node is not None:
tensor = node.get_tensor()
start_indices = [0] * node.get_rank()
start_indices[axis] = start_index
start_indices = tuple(start_indices)
slice_sizes = list(node.shape)
slice_sizes[axis] = length
slice_sizes = tuple(slice_sizes)
new_tensor = backend.slice(tensor, start_indices, slice_sizes)
node.set_tensor(new_tensor)
return edge |
Collapse a trace edge. `edge` is disabled before returning.
Take a trace edge (i.e. with edge.node1 = edge.node2),
remove it, update the axis numbers of all remaining edges
and move them to `new_node`.
Args:
edge: The edge to contract.
new_node: The new node created after contraction.
Returns:
None
Raises:
ValueError: If edge is not a trace edge. | def _remove_trace_edge(edge: Edge, new_node: AbstractNode) -> None:
"""Collapse a trace edge. `edge` is disabled before returning.
Take a trace edge (i.e. with edge.node1 = edge.node2),
remove it, update the axis numbers of all remaining edges
and move them to `new_node`.
Args:
edge: The edge to contract.
new_node: The new node created after contraction.
Returns:
None
Raises:
ValueError: If edge is not a trace edge.
"""
if edge.is_dangling():
raise ValueError("Attempted to remove dangling edge '{}'.".format(edge))
if edge.node1 is not edge.node2:
raise ValueError("Edge '{}' is not a trace edge.".format(edge))
axes = sorted([edge.axis1, edge.axis2])
node_edges = edge.node1.edges[:]
node_edges.pop(axes[0])
node_edges.pop(axes[1] - 1)
seen_edges = set()
for tmp_edge in node_edges:
if tmp_edge in seen_edges:
continue
seen_edges.add(tmp_edge)
if tmp_edge.node1 is edge.node1:
to_reduce = 0
to_reduce += 1 if tmp_edge.axis1 > axes[0] else 0
to_reduce += 1 if tmp_edge.axis1 > axes[1] else 0
tmp_edge.axis1 -= to_reduce
tmp_edge.node1 = new_node
if tmp_edge.node2 is edge.node1:
to_reduce = 0
to_reduce += 1 if tmp_edge.axis2 > axes[0] else 0
to_reduce += 1 if tmp_edge.axis2 > axes[1] else 0
tmp_edge.axis2 -= to_reduce
tmp_edge.node2 = new_node
# Update edges for the new node.
for i, e in enumerate(node_edges):
new_node.add_edge(e, i)
edge.node1.fresh_edges(edge.node1.axis_names)
edge.disable() |
Takes a set of `edges` shared between `node1` and `node2` to be contracted
over, and moves all other uncontracted edges from `node1` and `node2` to
`new_node`.
The nodes that currently share the edges in `edges` must be supplied as
`node1` and `node2`. The ordering of `node1` and `node2` must match the
axis ordering of `new_node` (as determined by the contraction procedure).
`node1` and `node2` get both a fresh set edges.
`edges` are disabled before returning.
Args:
edges: The edges to contract.
node1: The old node that supplies the first edges of `new_node`.
node2: The old node that supplies the last edges of `new_node`.
new_node: The new node that represents the contraction of the two old
nodes.
Returns:
node1, node2L
Raises:
Value Error: If edge isn't in the network. | def _remove_edges(edges: Set[Edge], node1: AbstractNode, node2: AbstractNode,
new_node: AbstractNode) -> None:
"""Takes a set of `edges` shared between `node1` and `node2` to be contracted
over, and moves all other uncontracted edges from `node1` and `node2` to
`new_node`.
The nodes that currently share the edges in `edges` must be supplied as
`node1` and `node2`. The ordering of `node1` and `node2` must match the
axis ordering of `new_node` (as determined by the contraction procedure).
`node1` and `node2` get both a fresh set edges.
`edges` are disabled before returning.
Args:
edges: The edges to contract.
node1: The old node that supplies the first edges of `new_node`.
node2: The old node that supplies the last edges of `new_node`.
new_node: The new node that represents the contraction of the two old
nodes.
Returns:
node1, node2L
Raises:
Value Error: If edge isn't in the network.
"""
if node1 is node2:
raise ValueError(
"node1 and node2 are the same ('{}' == '{}'), but trace edges cannot "
"be removed by _remove_edges.".format(node1, node2))
node1_edges = node1.edges[:]
node2_edges = node2.edges[:]
nodes_set = set([node1, node2])
for edge in edges:
if edge.is_dangling():
raise ValueError("Attempted to remove dangling edge '{}'.".format(edge))
if set([edge.node1, edge.node2]) != nodes_set:
raise ValueError(
"Attempted to remove edges belonging to different node pairs: "
"'{}' != '{}'.".format(nodes_set, set([edge.node1, edge.node2])))
node1_axis_names = node1.axis_names
node2_axis_names = node2.axis_names
remaining_edges = []
for (i, edge) in enumerate(node1_edges):
if edge not in edges: # NOTE: Makes the cost quadratic in # edges
edge.update_axis(old_node=node1,
old_axis=i,
new_axis=len(remaining_edges),
new_node=new_node)
remaining_edges.append(edge)
for (i, edge) in enumerate(node2_edges):
if edge not in edges:
edge.update_axis(old_node=node2,
old_axis=i,
new_axis=len(remaining_edges),
new_node=new_node)
remaining_edges.append(edge)
for (i, edge) in enumerate(remaining_edges):
new_node.add_edge(edge, i)
node1.fresh_edges(node1_axis_names)
node2.fresh_edges(node2_axis_names)
# pylint: disable=expression-not-assigned
[edge.disable() for edge in edges] |
Contract a trace edge.
`edge` is disabled before returning.
Args:
edge: The edge name or object to contract next.
name: Name to give to the new node. If None, a name will automatically be
generated.
Returns:
The new node created after the contraction.
Raise:
ValueError: When edge is a dangling edge. | def _contract_trace(edge: Edge, name: Optional[Text] = None) -> AbstractNode:
"""Contract a trace edge.
`edge` is disabled before returning.
Args:
edge: The edge name or object to contract next.
name: Name to give to the new node. If None, a name will automatically be
generated.
Returns:
The new node created after the contraction.
Raise:
ValueError: When edge is a dangling edge.
"""
if edge.is_dangling():
raise ValueError("Attempted to contract dangling edge '{}'".format(edge))
if edge.node1 is not edge.node2:
raise ValueError("Can not take trace of edge '{}'. This edge connects to "
"two different nodes: '{}' and '{}".format(
edge, edge.node1, edge.node2))
backend = edge.node1.backend
axes = sorted([edge.axis1, edge.axis2])
dims = len(edge.node1.tensor.shape)
permutation = sorted(set(range(dims)) - set(axes)) + axes
new_tensor = backend.trace(
backend.transpose(edge.node1.tensor, perm=permutation))
name = name if name else edge.node1.name
new_node = Node(new_tensor, name=name, backend=backend)
_remove_trace_edge(edge, new_node) #disables edge
return new_node |
Contract an edge connecting two nodes.
All edges of `node1` and `node2` are passed on to the new node,
and `node1` and `node2` get a new set of dangling edges.
`edge` is disabled before returning.
Args:
edge: The edge to contract.
name: Name of the new node created.
Returns:
The new node created after the contraction.
Raises:
ValueError: When edge is a dangling edge or if it already has been
contracted. | def contract(edge: Edge,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None) -> AbstractNode:
"""Contract an edge connecting two nodes.
All edges of `node1` and `node2` are passed on to the new node,
and `node1` and `node2` get a new set of dangling edges.
`edge` is disabled before returning.
Args:
edge: The edge to contract.
name: Name of the new node created.
Returns:
The new node created after the contraction.
Raises:
ValueError: When edge is a dangling edge or if it already has been
contracted.
"""
if edge.is_dangling():
raise ValueError("Attempting to contract dangling edge")
for node in [edge.node1, edge.node2]:
if (node is not None) and (not hasattr(node, 'backend')):
raise TypeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
if edge.node1.backend.name != edge.node2.backend.name:
raise ValueError("edge.node1 {} and edge.node2 {} have different backends "
"{} and {}".format(edge.node1.name, edge.node2.name,
edge.node1.backend.name,
edge.node2.backend.name))
if edge.node1:
backend = edge.node1.backend
else:
raise ValueError("edge {} has no nodes. "
"Cannot perform a contraction".format(edge.name))
backend = edge.node1.backend
if edge.node1 is edge.node2:
return _contract_trace(edge, name)
new_tensor = backend.tensordot(edge.node1.tensor, edge.node2.tensor,
[[edge.axis1], [edge.axis2]])
new_node = Node(tensor=new_tensor,
name=name,
axis_names=axis_names,
backend=backend.name)
# edge.node1 and edge.node2 get new edges in _remove_edges
_remove_edges(set([edge]), edge.node1, edge.node2, new_node)
return new_node |
Contract all edges incident on given copy node.
Args:
copy_node: Copy tensor node to be contracted.
name: Name of the new node created.
Returns:
New node representing contracted tensor.
Raises:
ValueError: If copy_node has dangling edge(s). | def contract_copy_node(copy_node: CopyNode,
name: Optional[Text] = None) -> AbstractNode:
"""Contract all edges incident on given copy node.
Args:
copy_node: Copy tensor node to be contracted.
name: Name of the new node created.
Returns:
New node representing contracted tensor.
Raises:
ValueError: If copy_node has dangling edge(s).
"""
new_tensor = copy_node.compute_contracted_tensor()
new_node = Node(new_tensor, name, backend=copy_node.backend.name)
partners = copy_node.get_partners()
new_axis = 0
for partner in partners:
for edge in partner.edges:
if edge.node1 is copy_node or edge.node2 is copy_node:
continue
old_axis = edge.axis1 if edge.node1 is partner else edge.axis2
edge.update_axis(old_node=partner,
old_axis=old_axis,
new_node=new_node,
new_axis=new_axis)
new_node.add_edge(edge, new_axis)
new_axis += 1
assert len(new_tensor.shape) == new_axis
copy_node.fresh_edges(copy_node.axis_names)
return new_node |
Contract all edges parallel to this edge.
This method calls `contract_between` with the nodes connected by the edge.
Args:
edge: The edge to contract.
Returns:
The new node created after contraction. | def contract_parallel(edge: Edge) -> AbstractNode:
"""Contract all edges parallel to this edge.
This method calls `contract_between` with the nodes connected by the edge.
Args:
edge: The edge to contract.
Returns:
The new node created after contraction.
"""
if edge.is_dangling():
raise ValueError("Attempted to contract dangling edge: '{}'".format(edge))
return contract_between(edge.node1, edge.node2) |
Break an existing non-dangling edge.
This updates both Edge.node1 and Edge.node2 by removing the connecting
edge from `Edge.node1.edges` and `Edge.node2.edges` and adding new
dangling edges instead | def disconnect(edge,
edge1_name: Optional[Text] = None,
edge2_name: Optional[Text] = None) -> Tuple[Edge, Edge]:
"""Break an existing non-dangling edge.
This updates both Edge.node1 and Edge.node2 by removing the connecting
edge from `Edge.node1.edges` and `Edge.node2.edges` and adding new
dangling edges instead
"""
return edge.disconnect(edge1_name, edge2_name) |
Contract all of the edges between the two given nodes.
If `output_edge_order` is not set, the output axes will be ordered as:
`[...free axes of node1..., ...free axes of node2...]`. Within the axes
of each `node`, the input order is preserved.
Args:
node1: The first node.
node2: The second node.
name: Name to give to the new node created.
allow_outer_product: Optional boolean. If two nodes do not share any edges
and `allow_outer_product` is set to `True`, then we return the outer
product of the two nodes. Else, we raise a `ValueError`.
output_edge_order: Optional sequence of Edges. When not `None`, must
contain all edges belonging to, but not shared by `node1` and `node2`.
The axes of the new node will be permuted (if necessary) to match this
ordering of Edges.
axis_names: An optional list of names for the axis of the new node in order
of the output axes.
Returns:
The new node created.
Raises:
ValueError: If no edges are found between node1 and node2 and
`allow_outer_product` is set to `False`. | def contract_between(
node1: AbstractNode,
node2: AbstractNode,
name: Optional[Text] = None,
allow_outer_product: bool = False,
output_edge_order: Optional[Sequence[Edge]] = None,
axis_names: Optional[List[Text]] = None,
) -> AbstractNode:
"""Contract all of the edges between the two given nodes.
If `output_edge_order` is not set, the output axes will be ordered as:
`[...free axes of node1..., ...free axes of node2...]`. Within the axes
of each `node`, the input order is preserved.
Args:
node1: The first node.
node2: The second node.
name: Name to give to the new node created.
allow_outer_product: Optional boolean. If two nodes do not share any edges
and `allow_outer_product` is set to `True`, then we return the outer
product of the two nodes. Else, we raise a `ValueError`.
output_edge_order: Optional sequence of Edges. When not `None`, must
contain all edges belonging to, but not shared by `node1` and `node2`.
The axes of the new node will be permuted (if necessary) to match this
ordering of Edges.
axis_names: An optional list of names for the axis of the new node in order
of the output axes.
Returns:
The new node created.
Raises:
ValueError: If no edges are found between node1 and node2 and
`allow_outer_product` is set to `False`.
"""
for node in [node1, node2]:
if not hasattr(node, 'backend'):
raise TypeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
if node1.backend.name != node2.backend.name:
raise ValueError("node {} and node {} have different backends "
"{} and {}.".format(node1.name, node2.name,
node1.backend.name,
node2.backend.name))
backend = node1.backend
shared_edges = get_shared_edges(node1, node2)
# Trace edges cannot be contracted using tensordot.
if node1 is node2:
flat_edge = flatten_edges_between(node1, node2)
if not flat_edge:
raise ValueError("No trace edges found on contraction of edges between "
"node '{}' and itself.".format(node1))
new_node = contract(flat_edge, name)
elif not shared_edges:
if not allow_outer_product:
raise ValueError("No edges found between nodes '{}' and '{}' "
"and allow_outer_product=False.".format(node1, node2))
new_node = outer_product(node1, node2, name=name)
else:
# Collect the axis of each node corresponding to each edge, in order.
# This specifies the contraction for tensordot.
# NOTE: The ordering of node references in each contraction edge is ignored.
axes1 = []
axes2 = []
for edge in shared_edges:
if edge.node1 is node1:
axes1.append(edge.axis1)
axes2.append(edge.axis2)
else:
axes1.append(edge.axis2)
axes2.append(edge.axis1)
if output_edge_order:
# Determine heuristically if output transposition can be minimized by
# flipping the arguments to tensordot.
node1_output_axes = []
node2_output_axes = []
for (i, edge) in enumerate(output_edge_order):
if edge in shared_edges:
raise ValueError(
"Edge '{}' in output_edge_order is shared by the nodes to be "
"contracted: '{}' and '{}'.".format(edge, node1, node2))
edge_nodes = set(edge.get_nodes())
if node1 in edge_nodes:
node1_output_axes.append(i)
elif node2 in edge_nodes:
node2_output_axes.append(i)
else:
raise ValueError(
"Edge '{}' in output_edge_order is not connected to node '{}' or "
"node '{}'".format(edge, node1, node2))
if node1_output_axes and node2_output_axes and (
np.mean(node1_output_axes) > np.mean(node2_output_axes)):
node1, node2 = node2, node1
axes1, axes2 = axes2, axes1
# Sorting the indicies improves performance.
ind_sort = [axes1.index(l) for l in sorted(axes1)]
axes1 = [axes1[i] for i in ind_sort]
axes2 = [axes2[i] for i in ind_sort]
new_tensor = backend.tensordot(node1.tensor, node2.tensor, [axes1, axes2])
new_node = Node(tensor=new_tensor, name=name, backend=backend)
# node1 and node2 get new edges in _remove_edges
_remove_edges(shared_edges, node1, node2, new_node)
if output_edge_order:
new_node = new_node.reorder_edges(list(output_edge_order))
if axis_names:
new_node.add_axis_names(axis_names)
return new_node |
Get the outer product of `nodes`
For example, if there are 3 nodes remaining in `nodes` with
shapes :math:`(2, 3)`, :math:`(4, 5, 6)`, and :math:`(7)`
respectively, the newly returned node will have shape
:math:`(2, 3, 4, 5, 6, 7)`.
Args:
nodes: A collection of nodes.
edge_order: Edge order for the final node.
Returns:
The outer product of the remaining nodes.
Raises:
ValueError: If any of the remaining nodes are not fully contracted. | def outer_product_final_nodes(nodes: Iterable[AbstractNode],
edge_order: List[Edge]) -> AbstractNode:
"""Get the outer product of `nodes`
For example, if there are 3 nodes remaining in `nodes` with
shapes :math:`(2, 3)`, :math:`(4, 5, 6)`, and :math:`(7)`
respectively, the newly returned node will have shape
:math:`(2, 3, 4, 5, 6, 7)`.
Args:
nodes: A collection of nodes.
edge_order: Edge order for the final node.
Returns:
The outer product of the remaining nodes.
Raises:
ValueError: If any of the remaining nodes are not fully contracted.
"""
nodes = list(nodes)
for node in nodes:
if node.has_nondangling_edge():
raise ValueError("Node '{}' has a non-dangling edge remaining.")
final_node = nodes[0]
for node in nodes[1:]:
final_node = outer_product(final_node, node)
return final_node.reorder_edges(edge_order) |
Calculates an outer product of the two nodes.
This causes the nodes to combine their edges and axes, so the shapes are
combined. For example, if `a` had a shape (2, 3) and `b` had a shape
:math`(4, 5, 6)`, then the node `net.outer_product(a, b)` will have shape
:math:`(2, 3, 4, 5, 6)`. All edges of `node1` and `node2` are passed on to
the new node, and `node1` and `node2` get a new set of dangling edges.
Args:
node1: The first node. The axes on this node will be on the left side of
the new node.
node2: The second node. The axes on this node will be on the right side of
the new node.
name: Optional name to give the new node created.
axis_names: An optional list of names for the axis of the new node.
Returns:
A new node. Its shape will be `node1.shape + node2.shape`.
Raises:
TypeError: If `node1` and `node2` have wrong types. | def outer_product(node1: AbstractNode,
node2: AbstractNode,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None) -> AbstractNode:
"""Calculates an outer product of the two nodes.
This causes the nodes to combine their edges and axes, so the shapes are
combined. For example, if `a` had a shape (2, 3) and `b` had a shape
:math`(4, 5, 6)`, then the node `net.outer_product(a, b)` will have shape
:math:`(2, 3, 4, 5, 6)`. All edges of `node1` and `node2` are passed on to
the new node, and `node1` and `node2` get a new set of dangling edges.
Args:
node1: The first node. The axes on this node will be on the left side of
the new node.
node2: The second node. The axes on this node will be on the right side of
the new node.
name: Optional name to give the new node created.
axis_names: An optional list of names for the axis of the new node.
Returns:
A new node. Its shape will be `node1.shape + node2.shape`.
Raises:
TypeError: If `node1` and `node2` have wrong types.
"""
for node in [node1, node2]:
if not hasattr(node, 'backend'):
raise TypeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
if node1.backend.name != node2.backend.name:
raise ValueError("node {} and node {} have different backends. "
"Cannot perform outer product".format(node1, node2))
backend = node1.backend
if node1.get_rank() == 0 or node2.get_rank() == 0:
new_tensor = backend.multiply(node1.tensor, node2.tensor)
else:
new_tensor = backend.outer_product(node1.tensor, node2.tensor)
node1_axis_names = node1.axis_names
node2_axis_names = node2.axis_names
new_node = Node(tensor=new_tensor,
name=name,
axis_names=axis_names,
backend=backend)
additional_axes = len(node1.tensor.shape)
for i, edge in enumerate(node1.edges):
edge.update_axis(i, node1, i, new_node)
for i, edge in enumerate(node2.edges):
edge.update_axis(i, node2, i + additional_axes, new_node)
for i, edge in enumerate(node1.edges + node2.edges):
new_node.add_edge(edge, i, True)
node1.fresh_edges(node1_axis_names)
node2.fresh_edges(node2_axis_names)
return new_node |
Copy the given nodes and their edges.
This will return a tuple linking original nodes/edges to their copies.
If nodes A and B are connected but only A is passed in to be
copied, the edge between them will become a dangling edge.
Args:
nodes: An Iterable (Usually a `list` or `set`) of `nodes`.
conjugate: Boolean. Whether to conjugate all of the nodes
(useful for calculating norms and reduced density matrices).
Returns:
A tuple containing:
node_dict:
A dictionary mapping the nodes to their copies.
edge_dict:
A dictionary mapping the edges to their copies. | def copy(nodes: Iterable[AbstractNode],
conjugate: bool = False) -> Tuple[dict, dict]:
"""Copy the given nodes and their edges.
This will return a tuple linking original nodes/edges to their copies.
If nodes A and B are connected but only A is passed in to be
copied, the edge between them will become a dangling edge.
Args:
nodes: An Iterable (Usually a `list` or `set`) of `nodes`.
conjugate: Boolean. Whether to conjugate all of the nodes
(useful for calculating norms and reduced density matrices).
Returns:
A tuple containing:
node_dict:
A dictionary mapping the nodes to their copies.
edge_dict:
A dictionary mapping the edges to their copies.
"""
node_dict = {}
for node in nodes:
node_dict[node] = node.copy(conjugate)
edge_dict = {}
for edge in get_all_edges(nodes):
node1 = edge.node1
axis1 = edge.node1.get_axis_number(edge.axis1)
# edge dangling or node2 does not need to be copied
if edge.is_dangling() or edge.node2 not in node_dict:
new_edge = Edge(node_dict[node1], axis1, edge.name)
node_dict[node1].add_edge(new_edge, axis1)
edge_dict[edge] = new_edge
continue
node2 = edge.node2
axis2 = edge.node2.get_axis_number(edge.axis2)
# copy node2 but not node1
if node1 not in node_dict:
new_edge = Edge(node_dict[node2], axis2, edge.name)
node_dict[node2].add_edge(new_edge, axis2)
edge_dict[edge] = new_edge
continue
# both nodes should be copied
new_edge = Edge(node_dict[node1], axis1, edge.name, node_dict[node2], axis2)
if not edge.is_trace():
node_dict[node2].add_edge(new_edge, axis2)
node_dict[node1].add_edge(new_edge, axis1)
edge_dict[edge] = new_edge
return node_dict, edge_dict |
Copy the given nodes and their edges.
If nodes A and B are connected but only A is passed in to be
copied, the edge between them will become a dangling edge.
Args:
nodes: An `Iterable` (Usually a `List` or `Set`) of `Nodes`.
conjugate: Boolean. Whether to conjugate all of the nodes
(useful for calculating norms and reduced density
matrices).
Returns:
A list containing the copies of the nodes. | def replicate_nodes(nodes: Iterable[AbstractNode],
conjugate: bool = False) -> List[AbstractNode]:
"""Copy the given nodes and their edges.
If nodes A and B are connected but only A is passed in to be
copied, the edge between them will become a dangling edge.
Args:
nodes: An `Iterable` (Usually a `List` or `Set`) of `Nodes`.
conjugate: Boolean. Whether to conjugate all of the nodes
(useful for calculating norms and reduced density
matrices).
Returns:
A list containing the copies of the nodes.
"""
new_nodes, _ = copy(nodes, conjugate=conjugate)
return [new_nodes[node] for node in nodes] |
Remove a node from the network.
Args:
node: The node to be removed.
Returns:
A tuple of:
disconnected_edges_by_name:
A Dictionary mapping `node`'s axis names to the newly broken edges.
disconnected_edges_by_axis:
A Dictionary mapping `node`'s axis numbers to the newly broken edges. | def remove_node(node: AbstractNode) -> Tuple[Dict[Text, Edge], Dict[int, Edge]]:
"""Remove a node from the network.
Args:
node: The node to be removed.
Returns:
A tuple of:
disconnected_edges_by_name:
A Dictionary mapping `node`'s axis names to the newly broken edges.
disconnected_edges_by_axis:
A Dictionary mapping `node`'s axis numbers to the newly broken edges.
"""
disconnected_edges_by_name = {}
disconnected_edges_by_axis = {}
for i, name in enumerate(node.axis_names):
if not node[i].is_dangling() and not node[i].is_trace():
edge1, edge2 = disconnect(node[i])
new_disconnected_edge = edge1 if edge1.node1 is not node else edge2
disconnected_edges_by_axis[i] = new_disconnected_edge
disconnected_edges_by_name[name] = new_disconnected_edge
return disconnected_edges_by_name, disconnected_edges_by_axis |
Split a `node` using Singular Value Decomposition.
Let :math:`M` be the matrix created by flattening `left_edges` and
`right_edges` into 2 axes.
Let :math:`U S V^* = M` be the SVD of :math:`M`.
This will split the network into 2 nodes.
The left node's tensor will be :math:`U \sqrt{S}`
and the right node's tensor will be
:math:`\sqrt{S} V^*` where :math:`V^*` is the adjoint of :math:`V`.
The singular value decomposition is truncated if `max_singular_values` or
`max_truncation_err` is not `None`.
The truncation error is the 2-norm of the vector of truncated singular
values. If only `max_truncation_err` is set, as many singular values will
be truncated as possible while maintaining:
`norm(truncated_singular_values) <= max_truncation_err`.
If `relative` is set `True` then `max_truncation_err` is understood
relative to the largest singular value.
If only `max_singular_values` is set, the number of singular values kept
will be `min(max_singular_values, number_of_singular_values)`, so that
`max(0, number_of_singular_values - max_singular_values)` are truncated.
If both `max_truncation_err` and `max_singular_values` are set,
`max_singular_values` takes priority: The truncation error may be larger
than `max_truncation_err` if required to satisfy `max_singular_values`.
Args:
node: The node you want to split.
left_edges: The edges you want connected to the new left node.
right_edges: The edges you want connected to the new right node.
max_singular_values: The maximum number of singular values to keep.
max_truncation_err: The maximum allowed truncation error.
relative: Multiply `max_truncation_err` with the largest singular value.
left_name: The name of the new left node. If `None`, a name will be
generated automatically.
right_name: The name of the new right node. If `None`, a name will be
generated automatically.
edge_name: The name of the new `Edge` connecting the new left and
right node. If `None`, a name will be generated automatically.
The new axis will get the same name as the edge.
Returns:
A tuple containing:
left_node:
A new node created that connects to all of the `left_edges`.
Its underlying tensor is :math:`U \sqrt{S}`
right_node:
A new node created that connects to all of the `right_edges`.
Its underlying tensor is :math:`\sqrt{S} V^*`
truncated_singular_values:
The vector of truncated singular values.
Raises:
AttributeError: If `node` has no backend attribute | def split_node(
node: AbstractNode,
left_edges: List[Edge],
right_edges: List[Edge],
max_singular_values: Optional[int] = None,
max_truncation_err: Optional[float] = None,
relative: Optional[bool] = False,
left_name: Optional[Text] = None,
right_name: Optional[Text] = None,
edge_name: Optional[Text] = None,
) -> Tuple[AbstractNode, AbstractNode, Tensor]:
"""Split a `node` using Singular Value Decomposition.
Let :math:`M` be the matrix created by flattening `left_edges` and
`right_edges` into 2 axes.
Let :math:`U S V^* = M` be the SVD of :math:`M`.
This will split the network into 2 nodes.
The left node's tensor will be :math:`U \\sqrt{S}`
and the right node's tensor will be
:math:`\\sqrt{S} V^*` where :math:`V^*` is the adjoint of :math:`V`.
The singular value decomposition is truncated if `max_singular_values` or
`max_truncation_err` is not `None`.
The truncation error is the 2-norm of the vector of truncated singular
values. If only `max_truncation_err` is set, as many singular values will
be truncated as possible while maintaining:
`norm(truncated_singular_values) <= max_truncation_err`.
If `relative` is set `True` then `max_truncation_err` is understood
relative to the largest singular value.
If only `max_singular_values` is set, the number of singular values kept
will be `min(max_singular_values, number_of_singular_values)`, so that
`max(0, number_of_singular_values - max_singular_values)` are truncated.
If both `max_truncation_err` and `max_singular_values` are set,
`max_singular_values` takes priority: The truncation error may be larger
than `max_truncation_err` if required to satisfy `max_singular_values`.
Args:
node: The node you want to split.
left_edges: The edges you want connected to the new left node.
right_edges: The edges you want connected to the new right node.
max_singular_values: The maximum number of singular values to keep.
max_truncation_err: The maximum allowed truncation error.
relative: Multiply `max_truncation_err` with the largest singular value.
left_name: The name of the new left node. If `None`, a name will be
generated automatically.
right_name: The name of the new right node. If `None`, a name will be
generated automatically.
edge_name: The name of the new `Edge` connecting the new left and
right node. If `None`, a name will be generated automatically.
The new axis will get the same name as the edge.
Returns:
A tuple containing:
left_node:
A new node created that connects to all of the `left_edges`.
Its underlying tensor is :math:`U \\sqrt{S}`
right_node:
A new node created that connects to all of the `right_edges`.
Its underlying tensor is :math:`\\sqrt{S} V^*`
truncated_singular_values:
The vector of truncated singular values.
Raises:
AttributeError: If `node` has no backend attribute
"""
if not hasattr(node, 'backend'):
raise AttributeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
if node.axis_names and edge_name:
left_axis_names = []
right_axis_names = [edge_name]
for edge in left_edges:
left_axis_names.append(node.axis_names[edge.axis1] if edge.node1 is node
else node.axis_names[edge.axis2])
for edge in right_edges:
right_axis_names.append(node.axis_names[edge.axis1] if edge.node1 is node
else node.axis_names[edge.axis2])
left_axis_names.append(edge_name)
else:
left_axis_names = None
right_axis_names = None
backend = node.backend
transp_tensor = node.tensor_from_edge_order(left_edges + right_edges)
u, s, vh, trun_vals = backend.svd(transp_tensor,
len(left_edges),
max_singular_values,
max_truncation_err,
relative=relative)
sqrt_s = backend.sqrt(s)
u_s = backend.broadcast_right_multiplication(u, sqrt_s)
vh_s = backend.broadcast_left_multiplication(sqrt_s, vh)
left_node = Node(u_s,
name=left_name,
axis_names=left_axis_names,
backend=backend)
left_axes_order = [
edge.axis1 if edge.node1 is node else edge.axis2 for edge in left_edges
]
for i, edge in enumerate(left_edges):
left_node.add_edge(edge, i)
edge.update_axis(left_axes_order[i], node, i, left_node)
right_node = Node(vh_s,
name=right_name,
axis_names=right_axis_names,
backend=backend)
right_axes_order = [
edge.axis1 if edge.node1 is node else edge.axis2 for edge in right_edges
]
for i, edge in enumerate(right_edges):
# i + 1 to account for the new edge.
right_node.add_edge(edge, i + 1)
edge.update_axis(right_axes_order[i], node, i + 1, right_node)
connect(left_node.edges[-1], right_node.edges[0], name=edge_name)
node.fresh_edges(node.axis_names)
return left_node, right_node, trun_vals |
Split a `node` using QR decomposition.
Let :math:`M` be the matrix created by
flattening `left_edges` and `right_edges` into 2 axes.
Let :math:`QR = M` be the QR Decomposition of :math:`M`.
This will split the network into 2 nodes.
The `left node`'s tensor will be :math:`Q` (an orthonormal matrix)
and the `right node`'s tensor will be :math:`R` (an upper triangular matrix)
Args:
node: The node you want to split.
left_edges: The edges you want connected to the new left node.
right_edges: The edges you want connected to the new right node.
left_name: The name of the new left node. If `None`, a name will be
generated automatically.
right_name: The name of the new right node. If `None`, a name will be
generated automatically.
edge_name: The name of the new `Edge` connecting the new left and right
node. If `None`, a name will be generated automatically.
Returns:
A tuple containing:
left_node:
A new node created that connects to all of the `left_edges`.
Its underlying tensor is :math:`Q`
right_node:
A new node created that connects to all of the `right_edges`.
Its underlying tensor is :math:`R`
Raises:
AttributeError: If `node` has no backend attribute | def split_node_qr(
node: AbstractNode,
left_edges: List[Edge],
right_edges: List[Edge],
left_name: Optional[Text] = None,
right_name: Optional[Text] = None,
edge_name: Optional[Text] = None,
) -> Tuple[AbstractNode, AbstractNode]:
"""Split a `node` using QR decomposition.
Let :math:`M` be the matrix created by
flattening `left_edges` and `right_edges` into 2 axes.
Let :math:`QR = M` be the QR Decomposition of :math:`M`.
This will split the network into 2 nodes.
The `left node`'s tensor will be :math:`Q` (an orthonormal matrix)
and the `right node`'s tensor will be :math:`R` (an upper triangular matrix)
Args:
node: The node you want to split.
left_edges: The edges you want connected to the new left node.
right_edges: The edges you want connected to the new right node.
left_name: The name of the new left node. If `None`, a name will be
generated automatically.
right_name: The name of the new right node. If `None`, a name will be
generated automatically.
edge_name: The name of the new `Edge` connecting the new left and right
node. If `None`, a name will be generated automatically.
Returns:
A tuple containing:
left_node:
A new node created that connects to all of the `left_edges`.
Its underlying tensor is :math:`Q`
right_node:
A new node created that connects to all of the `right_edges`.
Its underlying tensor is :math:`R`
Raises:
AttributeError: If `node` has no backend attribute
"""
if not hasattr(node, 'backend'):
raise AttributeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
if node.axis_names and edge_name:
left_axis_names = []
right_axis_names = [edge_name]
for edge in left_edges:
left_axis_names.append(node.axis_names[edge.axis1] if edge.node1 is node
else node.axis_names[edge.axis2])
for edge in right_edges:
right_axis_names.append(node.axis_names[edge.axis1] if edge.node1 is node
else node.axis_names[edge.axis2])
left_axis_names.append(edge_name)
else:
left_axis_names = None
right_axis_names = None
backend = node.backend
transp_tensor = node.tensor_from_edge_order(left_edges + right_edges)
q, r = backend.qr(transp_tensor, len(left_edges))
left_node = Node(q,
name=left_name,
axis_names=left_axis_names,
backend=backend)
left_axes_order = [
edge.axis1 if edge.node1 is node else edge.axis2 for edge in left_edges
]
for i, edge in enumerate(left_edges):
left_node.add_edge(edge, i)
edge.update_axis(left_axes_order[i], node, i, left_node)
right_node = Node(r,
name=right_name,
axis_names=right_axis_names,
backend=backend)
right_axes_order = [
edge.axis1 if edge.node1 is node else edge.axis2 for edge in right_edges
]
for i, edge in enumerate(right_edges):
# i + 1 to account for the new edge.
right_node.add_edge(edge, i + 1)
edge.update_axis(right_axes_order[i], node, i + 1, right_node)
connect(left_node.edges[-1], right_node.edges[0], name=edge_name)
node.fresh_edges(node.axis_names)
return left_node, right_node |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.