index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
31,490 | laspy.copc | Bounds | Bounds(mins: numpy.ndarray, maxs: numpy.ndarray) | class Bounds:
mins: np.ndarray
maxs: np.ndarray
def overlaps(self, other: "Bounds") -> bool:
return bool(np.all((self.mins <= other.maxs) & (self.maxs >= other.mins)))
def ensure_3d(self, mins: np.ndarray, maxs: np.ndarray) -> "Bounds":
new_mins = np.zeros(3, dtype=np.float64)
new_maxs = np.zeros(3, dtype=np.float64)
new_mins[: len(self.mins)] = self.mins[:]
new_mins[len(self.mins) :] = mins[len(self.mins) :]
new_maxs[: len(self.maxs)] = self.maxs[:]
new_maxs[len(self.maxs) :] = maxs[len(self.maxs) :]
return Bounds(new_mins, new_maxs)
| (mins: numpy.ndarray, maxs: numpy.ndarray) -> None |
31,491 | laspy.copc | __eq__ | null | import io
import multiprocessing
import os
import struct
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from math import ceil, log2
from operator import attrgetter
from queue import Queue, SimpleQueue
from threading import Thread
from typing import Dict, Iterator, List, Optional, Tuple, Union
try:
import requests
except ModuleNotFoundError:
requests = None
else:
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
try:
import lazrs
except ModuleNotFoundError:
lazrs = None
import numpy as np
from .compression import DecompressionSelection
from .errors import LaspyException, LazError
from .header import LasHeader
from .point.record import PackedPointRecord, ScaleAwarePointRecord
from .vlrs.known import BaseKnownVLR
DEFAULT_HTTP_WORKERS_NUM = multiprocessing.cpu_count() * 5
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
| (self, other) |
31,493 | laspy.copc | __repr__ | null | def __hash__(self):
return hash((self.level, self.x, self.y, self.z))
| (self) |
31,494 | laspy.copc | ensure_3d | null | def ensure_3d(self, mins: np.ndarray, maxs: np.ndarray) -> "Bounds":
new_mins = np.zeros(3, dtype=np.float64)
new_maxs = np.zeros(3, dtype=np.float64)
new_mins[: len(self.mins)] = self.mins[:]
new_mins[len(self.mins) :] = mins[len(self.mins) :]
new_maxs[: len(self.maxs)] = self.maxs[:]
new_maxs[len(self.maxs) :] = maxs[len(self.maxs) :]
return Bounds(new_mins, new_maxs)
| (self, mins: numpy.ndarray, maxs: numpy.ndarray) -> laspy.copc.Bounds |
31,495 | laspy.copc | overlaps | null | def overlaps(self, other: "Bounds") -> bool:
return bool(np.all((self.mins <= other.maxs) & (self.maxs >= other.mins)))
| (self, other: laspy.copc.Bounds) -> bool |
31,496 | laspy.copc | CopcReader |
Class allowing to do queries over a `COPC`_ LAZ
In short, COPC files are LAZ 1.4 files organized in a particular way
(Octree) making it possible to do spatial queries
as well as queries with a level of details.
CopcReader **requires** the ``lazrz`` backend to work.
Optionaly, if ``requests`` is installed, CopcReader can handle
Copc files that are on a remote HTTP server
This class *only* reads COPC files, it does not support normal
LAS/LAZ files.
To create an instance of it you'll likely
want to use the :meth:`.CopcReader.open` constructor
.. versionadded:: 2.2
.. _COPC: https://github.com/copcio/copcio.github.io
| class CopcReader:
"""
Class allowing to do queries over a `COPC`_ LAZ
In short, COPC files are LAZ 1.4 files organized in a particular way
(Octree) making it possible to do spatial queries
as well as queries with a level of details.
CopcReader **requires** the ``lazrz`` backend to work.
Optionaly, if ``requests`` is installed, CopcReader can handle
Copc files that are on a remote HTTP server
This class *only* reads COPC files, it does not support normal
LAS/LAZ files.
To create an instance of it you'll likely
want to use the :meth:`.CopcReader.open` constructor
.. versionadded:: 2.2
.. _COPC: https://github.com/copcio/copcio.github.io
"""
def __init__(
self,
stream,
close_fd: bool = True,
http_num_threads: int = DEFAULT_HTTP_WORKERS_NUM,
_http_strategy: str = "queue",
decompression_selection: DecompressionSelection = DecompressionSelection.all(),
):
"""
Creates a CopcReader.
Parameters
---------
stream: the stream from where data can be read.
It must have the following file object methods:
read, seek, tell
http_num_threads: int, optional, default num cpu * 5
Number of worker threads to do concurent HTTP requests,
ignored when reading non-HTTP file
close_fd: optional, default bool
Whether the stream/file object shall be closed, this only work
when using the CopcReader in a with statement.
decompression_selection: DecompressionSelection,
see :func:`laspy.open`
.. versionadded:: 2.4
The ``decompression_selection`` parameter.
"""
if lazrs is None:
raise LazError("COPC support requires the 'lazrs' backend")
self.source = stream
self.close_fd = close_fd
self.http_num_threads = http_num_threads
self.http_strategy = _http_strategy
self.decompression_selection: lazrs.DecompressionSelection = (
decompression_selection.to_lazrs()
)
self.header = LasHeader.read_from(self.source)
self.copc_info: CopcInfoVlr = self.header.vlrs[0]
if not isinstance(self.copc_info, CopcInfoVlr):
copc_info_exists = any(
isinstance(vlr, CopcInfoVlr) for vlr in self.header.vlrs
)
if copc_info_exists:
raise LaspyException(
"This file is not a valid COPC, "
"it does have a COPC VLR, however it is not the first VLR "
"as it should"
)
else:
raise LaspyException(
"This file is not a valid COPC, " "it does not have a COPC VLR"
)
if self.copc_info.hierarchy_root_offset < self.header.offset_to_point_data:
self.hierarchy = self.header.vlrs.extract("CopcHierarchyVlr")[0]
else:
# TODO maybe we could read the whole EVLR's byte
# so we could load the octree without having any more requests to do
# since everything would be in memory
self.source.seek(self.copc_info.hierarchy_root_offset)
# This only contains the record_data_bytes
root_hierarchy_vlr_bytes = self.source.read(
self.copc_info.hierarchy_root_size
)
hierarchy = CopcHierarchyVlr()
hierarchy.parse_record_data(root_hierarchy_vlr_bytes)
self.laszip_vlr = self.header.vlrs.pop(self.header.vlrs.index("LasZipVlr"))
self.source.seek(self.copc_info.hierarchy_root_offset)
root_page_bytes = self.source.read(self.copc_info.hierarchy_root_size)
# At first the hierary only contains the root page entries
# but it will get updated as the queries may need more pages
self.root_page = HierarchyPage.from_bytes(root_page_bytes)
@classmethod
def open(
cls,
source: Union[str, os.PathLike, io.IOBase],
http_num_threads: int = DEFAULT_HTTP_WORKERS_NUM,
_http_strategy: str = "queue",
decompression_selection: DecompressionSelection = DecompressionSelection.all(),
) -> "CopcReader":
"""
Opens the COPC file.
Parameters
----------
source: str, io.IOBase, uri or file-like object of the COPC file.
Supported sources are:
- 'local' files accesible with a path.
- HTTP / HTTPS endpoints. The pyhon package ``requests`` is
required in order to be able to work with HTTP endpoints.
- file-like objects, e.g. fsspec io.IOBase objects.
http_num_threads: int, optional, default num cpu * 5
Number of worker threads to do concurent HTTP requests,
ignored when reading non-HTTP file
decompression_selection: DecompressionSelection,
see :func:`laspy.open`
Opening a local file
.. code-block:: Python
from laspy import CopcReader
with CopcReader.open("some_file.laz") as reader:
...
Opening a file on a remite HTTP server
(``requests`` package required)
.. code-block:: Python
from laspy import CopcReader
url = "https://s3.amazonaws.com/hobu-lidar/autzen-classified.copc.laz"
with CopcReader.open(url) as reader:
...
.. versionadded:: 2.4
The ``decompression_selection`` parameter.
"""
if isinstance(source, (str, os.PathLike)):
source = str(source)
if source.startswith("http"):
source = HttpRangeStream(source)
else:
source = open(source, mode="rb")
return cls(
source,
http_num_threads=http_num_threads,
decompression_selection=decompression_selection,
)
def query(
self,
bounds: Optional[Bounds] = None,
resolution: Optional[Union[float, int]] = None,
level: Optional[Union[int, range]] = None,
) -> ScaleAwarePointRecord:
""" "
Query the COPC file to retrieve the points matching the
requested bounds and level.
Parameters
----------
bounds: Bounds, optional, default None
The bounds for which you wish to aquire points.
If None, the whole file's bounds will be considered
2D bounds are suported, (No point will be filtered on its Z coordinate)
resolution: float or int, optional, default None
Limits the octree levels to be queried in order to have
a point cloud with the requested resolution.
- The unit is the one of the data.
- If None, the resulting cloud will be at the
full resolution offered by the COPC source
- Mutually exclusive with level parameter
level: int or range, optional, default None
The level of detail (LOD).
- If None, all LOD are going to be considered
- If it is an int, only points that are of the requested LOD
will be returned.
- If it is a range, points for which the LOD is within the range
will be returned
"""
if resolution is not None and level is not None:
raise ValueError("Cannot specify both level and resolution")
elif resolution is not None and level is None:
level_max = max(1, ceil(log2(self.copc_info.spacing / resolution)) + 1)
level = range(0, level_max)
if isinstance(level, int):
level = range(level, level + 1)
if bounds is not None:
bounds = bounds.ensure_3d(self.header.mins, self.header.maxs)
nodes = load_octree_for_query(
self.source,
self.copc_info,
self.root_page,
query_bounds=bounds,
level_range=level,
)
# print("num nodes to query:", len(nodes));
points = self._fetch_and_decrompress_points_of_nodes(nodes)
if bounds is not None:
MINS = np.round(
(bounds.mins - self.header.offsets) / self.header.scales
).astype(np.int32)
MAXS = np.round(
(bounds.maxs - self.header.offsets) / self.header.scales
).astype(np.int32)
x_keep = (MINS[0] <= points.X) & (points.X <= MAXS[0])
y_keep = (MINS[1] <= points.Y) & (points.Y <= MAXS[1])
z_keep = (MINS[2] <= points.Z) & (points.Z <= MAXS[2])
# using scaled coordinates
# x, y, z = np.array(points.x), np.array(points.y), np.array(points.z)
# x_keep = (bounds.mins[0] <= x) & (x <= bounds.maxs[0])
# y_keep = (bounds.mins[1] <= y) & (y <= bounds.maxs[1])
# z_keep = (bounds.mins[2] <= z) & (z <= bounds.maxs[2])
keep_mask = x_keep & y_keep & z_keep
points.array = points.array[keep_mask].copy()
return points
def spatial_query(self, bounds: Bounds) -> ScaleAwarePointRecord:
return self.query(bounds=bounds, level=None)
def level_query(self, level: Union[int, range]) -> ScaleAwarePointRecord:
return self.query(bounds=None, level=level)
def _fetch_and_decrompress_points_of_nodes(
self, nodes_to_read: List[OctreeNode]
) -> ScaleAwarePointRecord:
if not nodes_to_read:
return ScaleAwarePointRecord.empty(header=self.header)
# Group together contiguous nodes
# so that we minimize the number of
# read requests (seek + read) / http requests
nodes_to_read = sorted(nodes_to_read, key=attrgetter("offset"))
grouped_nodes: List[List[OctreeNode]] = []
current_group: List[OctreeNode] = []
last_node_end = nodes_to_read[0].offset
for node in nodes_to_read:
if node.offset == last_node_end:
current_group.append(node)
last_node_end += node.byte_size
else:
grouped_nodes.append(current_group)
current_group = [node]
last_node_end = node.offset + node.byte_size
if current_group:
grouped_nodes.append(current_group)
compressed_bytes, num_points, chunk_table = self._fetch_all_chunks(
grouped_nodes
)
points_array = np.zeros(
num_points * self.header.point_format.size, dtype=np.uint8
)
lazrs.decompress_points_with_chunk_table(
compressed_bytes,
self.laszip_vlr.record_data,
points_array,
chunk_table,
self.decompression_selection,
)
r = PackedPointRecord.from_buffer(points_array, self.header.point_format)
points = ScaleAwarePointRecord(
r.array, r.point_format, self.header.scales, self.header.offsets
)
return points
def _fetch_all_chunks(
self, grouped_nodes: List[List[OctreeNode]]
) -> Tuple[bytearray, int, List[Tuple[int, int]]]:
num_points = 0
num_compressed_bytes = 0
chunk_table: List[Tuple[int, int]] = []
byte_queries: List[Tuple[int, int]] = []
for group in grouped_nodes:
num_compressed_group_bytes = 0
for node in group:
chunk_table.append((node.point_count, node.byte_size))
num_compressed_group_bytes += node.byte_size
num_points += node.point_count
num_compressed_bytes += num_compressed_group_bytes
byte_queries.append((group[0].offset, num_compressed_group_bytes))
compressed_bytes = bytearray(num_compressed_bytes)
if isinstance(self.source, HttpRangeStream):
if self.http_strategy == "queue":
http_queue_strategy(
self.source, byte_queries, compressed_bytes, self.http_num_threads
)
else:
http_thread_executor_strategy(
self.source, byte_queries, compressed_bytes, self.http_num_threads
)
elif hasattr(self.source, "readinto"):
citer = ChunkIter(compressed_bytes)
for offset, size in byte_queries:
self.source.seek(offset)
cc = citer.next(size)
self.source.readinto(cc)
else:
citer = ChunkIter(compressed_bytes)
for offset, size in byte_queries:
self.source.seek(offset)
cc = citer.next(size)
cc[:] = self.source.read(size)
return compressed_bytes, num_points, chunk_table
def __enter__(self) -> "CopcReader":
return self
def __exit__(self, _exc_type, _exc_val, _exc_tb) -> None:
if self.close_fd:
self.source.close()
| (stream, close_fd: bool = True, http_num_threads: int = 300, _http_strategy: str = 'queue', decompression_selection: laspy._compression.selection.DecompressionSelection = <DecompressionSelection.ALL_EXTRA_BYTES|WAVEPACKET|NIR|RGB|GPS_TIME|POINT_SOURCE_ID|USER_DATA|SCAN_ANGLE|INTENSITY|FLAGS|CLASSIFICATION|Z|XY_RETURNS_CHANNEL: 8191>) |
31,497 | laspy.copc | __enter__ | null | def __enter__(self) -> "CopcReader":
return self
| (self) -> laspy.copc.CopcReader |
31,498 | laspy.copc | __exit__ | null | def __exit__(self, _exc_type, _exc_val, _exc_tb) -> None:
if self.close_fd:
self.source.close()
| (self, _exc_type, _exc_val, _exc_tb) -> NoneType |
31,499 | laspy.copc | __init__ |
Creates a CopcReader.
Parameters
---------
stream: the stream from where data can be read.
It must have the following file object methods:
read, seek, tell
http_num_threads: int, optional, default num cpu * 5
Number of worker threads to do concurent HTTP requests,
ignored when reading non-HTTP file
close_fd: optional, default bool
Whether the stream/file object shall be closed, this only work
when using the CopcReader in a with statement.
decompression_selection: DecompressionSelection,
see :func:`laspy.open`
.. versionadded:: 2.4
The ``decompression_selection`` parameter.
| def __init__(
self,
stream,
close_fd: bool = True,
http_num_threads: int = DEFAULT_HTTP_WORKERS_NUM,
_http_strategy: str = "queue",
decompression_selection: DecompressionSelection = DecompressionSelection.all(),
):
"""
Creates a CopcReader.
Parameters
---------
stream: the stream from where data can be read.
It must have the following file object methods:
read, seek, tell
http_num_threads: int, optional, default num cpu * 5
Number of worker threads to do concurent HTTP requests,
ignored when reading non-HTTP file
close_fd: optional, default bool
Whether the stream/file object shall be closed, this only work
when using the CopcReader in a with statement.
decompression_selection: DecompressionSelection,
see :func:`laspy.open`
.. versionadded:: 2.4
The ``decompression_selection`` parameter.
"""
if lazrs is None:
raise LazError("COPC support requires the 'lazrs' backend")
self.source = stream
self.close_fd = close_fd
self.http_num_threads = http_num_threads
self.http_strategy = _http_strategy
self.decompression_selection: lazrs.DecompressionSelection = (
decompression_selection.to_lazrs()
)
self.header = LasHeader.read_from(self.source)
self.copc_info: CopcInfoVlr = self.header.vlrs[0]
if not isinstance(self.copc_info, CopcInfoVlr):
copc_info_exists = any(
isinstance(vlr, CopcInfoVlr) for vlr in self.header.vlrs
)
if copc_info_exists:
raise LaspyException(
"This file is not a valid COPC, "
"it does have a COPC VLR, however it is not the first VLR "
"as it should"
)
else:
raise LaspyException(
"This file is not a valid COPC, " "it does not have a COPC VLR"
)
if self.copc_info.hierarchy_root_offset < self.header.offset_to_point_data:
self.hierarchy = self.header.vlrs.extract("CopcHierarchyVlr")[0]
else:
# TODO maybe we could read the whole EVLR's byte
# so we could load the octree without having any more requests to do
# since everything would be in memory
self.source.seek(self.copc_info.hierarchy_root_offset)
# This only contains the record_data_bytes
root_hierarchy_vlr_bytes = self.source.read(
self.copc_info.hierarchy_root_size
)
hierarchy = CopcHierarchyVlr()
hierarchy.parse_record_data(root_hierarchy_vlr_bytes)
self.laszip_vlr = self.header.vlrs.pop(self.header.vlrs.index("LasZipVlr"))
self.source.seek(self.copc_info.hierarchy_root_offset)
root_page_bytes = self.source.read(self.copc_info.hierarchy_root_size)
# At first the hierary only contains the root page entries
# but it will get updated as the queries may need more pages
self.root_page = HierarchyPage.from_bytes(root_page_bytes)
| (self, stream, close_fd: bool = True, http_num_threads: int = 300, _http_strategy: str = 'queue', decompression_selection: laspy._compression.selection.DecompressionSelection = <DecompressionSelection.ALL_EXTRA_BYTES|WAVEPACKET|NIR|RGB|GPS_TIME|POINT_SOURCE_ID|USER_DATA|SCAN_ANGLE|INTENSITY|FLAGS|CLASSIFICATION|Z|XY_RETURNS_CHANNEL: 8191>) |
31,500 | laspy.copc | _fetch_all_chunks | null | def _fetch_all_chunks(
self, grouped_nodes: List[List[OctreeNode]]
) -> Tuple[bytearray, int, List[Tuple[int, int]]]:
num_points = 0
num_compressed_bytes = 0
chunk_table: List[Tuple[int, int]] = []
byte_queries: List[Tuple[int, int]] = []
for group in grouped_nodes:
num_compressed_group_bytes = 0
for node in group:
chunk_table.append((node.point_count, node.byte_size))
num_compressed_group_bytes += node.byte_size
num_points += node.point_count
num_compressed_bytes += num_compressed_group_bytes
byte_queries.append((group[0].offset, num_compressed_group_bytes))
compressed_bytes = bytearray(num_compressed_bytes)
if isinstance(self.source, HttpRangeStream):
if self.http_strategy == "queue":
http_queue_strategy(
self.source, byte_queries, compressed_bytes, self.http_num_threads
)
else:
http_thread_executor_strategy(
self.source, byte_queries, compressed_bytes, self.http_num_threads
)
elif hasattr(self.source, "readinto"):
citer = ChunkIter(compressed_bytes)
for offset, size in byte_queries:
self.source.seek(offset)
cc = citer.next(size)
self.source.readinto(cc)
else:
citer = ChunkIter(compressed_bytes)
for offset, size in byte_queries:
self.source.seek(offset)
cc = citer.next(size)
cc[:] = self.source.read(size)
return compressed_bytes, num_points, chunk_table
| (self, grouped_nodes: List[List[laspy.copc.OctreeNode]]) -> Tuple[bytearray, int, List[Tuple[int, int]]] |
31,501 | laspy.copc | _fetch_and_decrompress_points_of_nodes | null | def _fetch_and_decrompress_points_of_nodes(
self, nodes_to_read: List[OctreeNode]
) -> ScaleAwarePointRecord:
if not nodes_to_read:
return ScaleAwarePointRecord.empty(header=self.header)
# Group together contiguous nodes
# so that we minimize the number of
# read requests (seek + read) / http requests
nodes_to_read = sorted(nodes_to_read, key=attrgetter("offset"))
grouped_nodes: List[List[OctreeNode]] = []
current_group: List[OctreeNode] = []
last_node_end = nodes_to_read[0].offset
for node in nodes_to_read:
if node.offset == last_node_end:
current_group.append(node)
last_node_end += node.byte_size
else:
grouped_nodes.append(current_group)
current_group = [node]
last_node_end = node.offset + node.byte_size
if current_group:
grouped_nodes.append(current_group)
compressed_bytes, num_points, chunk_table = self._fetch_all_chunks(
grouped_nodes
)
points_array = np.zeros(
num_points * self.header.point_format.size, dtype=np.uint8
)
lazrs.decompress_points_with_chunk_table(
compressed_bytes,
self.laszip_vlr.record_data,
points_array,
chunk_table,
self.decompression_selection,
)
r = PackedPointRecord.from_buffer(points_array, self.header.point_format)
points = ScaleAwarePointRecord(
r.array, r.point_format, self.header.scales, self.header.offsets
)
return points
| (self, nodes_to_read: List[laspy.copc.OctreeNode]) -> laspy.point.record.ScaleAwarePointRecord |
31,502 | laspy.copc | level_query | null | def level_query(self, level: Union[int, range]) -> ScaleAwarePointRecord:
return self.query(bounds=None, level=level)
| (self, level: Union[int, range]) -> laspy.point.record.ScaleAwarePointRecord |
31,503 | laspy.copc | query | "
Query the COPC file to retrieve the points matching the
requested bounds and level.
Parameters
----------
bounds: Bounds, optional, default None
The bounds for which you wish to aquire points.
If None, the whole file's bounds will be considered
2D bounds are suported, (No point will be filtered on its Z coordinate)
resolution: float or int, optional, default None
Limits the octree levels to be queried in order to have
a point cloud with the requested resolution.
- The unit is the one of the data.
- If None, the resulting cloud will be at the
full resolution offered by the COPC source
- Mutually exclusive with level parameter
level: int or range, optional, default None
The level of detail (LOD).
- If None, all LOD are going to be considered
- If it is an int, only points that are of the requested LOD
will be returned.
- If it is a range, points for which the LOD is within the range
will be returned
| def query(
self,
bounds: Optional[Bounds] = None,
resolution: Optional[Union[float, int]] = None,
level: Optional[Union[int, range]] = None,
) -> ScaleAwarePointRecord:
""" "
Query the COPC file to retrieve the points matching the
requested bounds and level.
Parameters
----------
bounds: Bounds, optional, default None
The bounds for which you wish to aquire points.
If None, the whole file's bounds will be considered
2D bounds are suported, (No point will be filtered on its Z coordinate)
resolution: float or int, optional, default None
Limits the octree levels to be queried in order to have
a point cloud with the requested resolution.
- The unit is the one of the data.
- If None, the resulting cloud will be at the
full resolution offered by the COPC source
- Mutually exclusive with level parameter
level: int or range, optional, default None
The level of detail (LOD).
- If None, all LOD are going to be considered
- If it is an int, only points that are of the requested LOD
will be returned.
- If it is a range, points for which the LOD is within the range
will be returned
"""
if resolution is not None and level is not None:
raise ValueError("Cannot specify both level and resolution")
elif resolution is not None and level is None:
level_max = max(1, ceil(log2(self.copc_info.spacing / resolution)) + 1)
level = range(0, level_max)
if isinstance(level, int):
level = range(level, level + 1)
if bounds is not None:
bounds = bounds.ensure_3d(self.header.mins, self.header.maxs)
nodes = load_octree_for_query(
self.source,
self.copc_info,
self.root_page,
query_bounds=bounds,
level_range=level,
)
# print("num nodes to query:", len(nodes));
points = self._fetch_and_decrompress_points_of_nodes(nodes)
if bounds is not None:
MINS = np.round(
(bounds.mins - self.header.offsets) / self.header.scales
).astype(np.int32)
MAXS = np.round(
(bounds.maxs - self.header.offsets) / self.header.scales
).astype(np.int32)
x_keep = (MINS[0] <= points.X) & (points.X <= MAXS[0])
y_keep = (MINS[1] <= points.Y) & (points.Y <= MAXS[1])
z_keep = (MINS[2] <= points.Z) & (points.Z <= MAXS[2])
# using scaled coordinates
# x, y, z = np.array(points.x), np.array(points.y), np.array(points.z)
# x_keep = (bounds.mins[0] <= x) & (x <= bounds.maxs[0])
# y_keep = (bounds.mins[1] <= y) & (y <= bounds.maxs[1])
# z_keep = (bounds.mins[2] <= z) & (z <= bounds.maxs[2])
keep_mask = x_keep & y_keep & z_keep
points.array = points.array[keep_mask].copy()
return points
| (self, bounds: Optional[laspy.copc.Bounds] = None, resolution: Union[float, int, NoneType] = None, level: Union[int, range, NoneType] = None) -> laspy.point.record.ScaleAwarePointRecord |
31,504 | laspy.copc | spatial_query | null | def spatial_query(self, bounds: Bounds) -> ScaleAwarePointRecord:
return self.query(bounds=bounds, level=None)
| (self, bounds: laspy.copc.Bounds) -> laspy.point.record.ScaleAwarePointRecord |
31,505 | laspy._compression.selection | DecompressionSelection |
Holds which fields to decompress or not.
Only used for files with version >= 1.4 && point format id >= 6.
Ignored on other cases.
Each flag in the enum has a corresponding ``decompress_$name`` and
``skip_$name`` methods to easily create a selection.
>>> import laspy
>>> # Creating a selection that decompresses the base + z field
>>> selection = laspy.DecompressionSelection.base().decompress_z()
>>> selection.is_set(laspy.DecompressionSelection.Z)
True
>>> selection.is_set(laspy.DecompressionSelection.INTENSITY)
False
>>> # Creating a selection that decompresses all fields but the intensity
>>> selection = laspy.DecompressionSelection.all().skip_intensity()
>>> selection.is_set(laspy.DecompressionSelection.INTENSITY)
False
>>> selection.is_set(laspy.DecompressionSelection.Z)
True
.. versionadded:: 2.4
| class DecompressionSelection(enum.IntFlag):
"""
Holds which fields to decompress or not.
Only used for files with version >= 1.4 && point format id >= 6.
Ignored on other cases.
Each flag in the enum has a corresponding ``decompress_$name`` and
``skip_$name`` methods to easily create a selection.
>>> import laspy
>>> # Creating a selection that decompresses the base + z field
>>> selection = laspy.DecompressionSelection.base().decompress_z()
>>> selection.is_set(laspy.DecompressionSelection.Z)
True
>>> selection.is_set(laspy.DecompressionSelection.INTENSITY)
False
>>> # Creating a selection that decompresses all fields but the intensity
>>> selection = laspy.DecompressionSelection.all().skip_intensity()
>>> selection.is_set(laspy.DecompressionSelection.INTENSITY)
False
>>> selection.is_set(laspy.DecompressionSelection.Z)
True
.. versionadded:: 2.4
"""
#: Flag to decompress x, y, return number, number of returns and scanner channel
XY_RETURNS_CHANNEL = enum.auto()
#: Flag to decompress z
Z = enum.auto()
#: Flag to decompress the classification
CLASSIFICATION = enum.auto()
#: Flag to decompress the classification flags (withheld, key point, overlap, etc)
FLAGS = enum.auto()
#: Flag to decompress the intensity
INTENSITY = enum.auto()
#: Flag to decompress the scan angle
SCAN_ANGLE = enum.auto()
#: Flag to decompress the user data
USER_DATA = enum.auto()
#: Flag to decompress the point source id
POINT_SOURCE_ID = enum.auto()
#: Flag to decompress the gps time
GPS_TIME = enum.auto()
#: Flag to decompress the red, green, blue
RGB = enum.auto()
#: Flag to decompress the nir
NIR = enum.auto()
#: Flag to decompress the wavepacket
WAVEPACKET = enum.auto()
#: Flag to decompress all the extra bytes
ALL_EXTRA_BYTES = enum.auto()
@classmethod
def all(cls) -> "DecompressionSelection":
"""Returns a selection where all fields will be decompressed"""
selection = cls.base()
for flag in cls:
selection = selection._set(flag)
return selection
@classmethod
def base(cls) -> "DecompressionSelection":
"""
Returns a decompression selection where only the base
x, y, return number, number of returns and scanner channel will be decompressed
"""
return cls.xy_returns_channel()
@classmethod
def xy_returns_channel(cls) -> "DecompressionSelection":
"""
Returns a decompression selection where only the base
x, y, return number, number of returns and scanner channel will be decompressed
"""
return cls.XY_RETURNS_CHANNEL
def to_lazrs(self) -> "lazrs.DecompressionSelection":
import lazrs
variant_mapping = {
DecompressionSelection.XY_RETURNS_CHANNEL: lazrs.SELECTIVE_DECOMPRESS_XY_RETURNS_CHANNEL,
DecompressionSelection.Z: lazrs.SELECTIVE_DECOMPRESS_Z,
DecompressionSelection.CLASSIFICATION: lazrs.SELECTIVE_DECOMPRESS_CLASSIFICATION,
DecompressionSelection.FLAGS: lazrs.SELECTIVE_DECOMPRESS_FLAGS,
DecompressionSelection.INTENSITY: lazrs.SELECTIVE_DECOMPRESS_INTENSITY,
DecompressionSelection.SCAN_ANGLE: lazrs.SELECTIVE_DECOMPRESS_SCAN_ANGLE,
DecompressionSelection.USER_DATA: lazrs.SELECTIVE_DECOMPRESS_USER_DATA,
DecompressionSelection.POINT_SOURCE_ID: lazrs.SELECTIVE_DECOMPRESS_POINT_SOURCE_ID,
DecompressionSelection.GPS_TIME: lazrs.SELECTIVE_DECOMPRESS_GPS_TIME,
DecompressionSelection.RGB: lazrs.SELECTIVE_DECOMPRESS_RGB,
DecompressionSelection.NIR: lazrs.SELECTIVE_DECOMPRESS_NIR,
DecompressionSelection.WAVEPACKET: lazrs.SELECTIVE_DECOMPRESS_WAVEPACKET,
DecompressionSelection.ALL_EXTRA_BYTES: lazrs.SELECTIVE_DECOMPRESS_ALL_EXTRA_BYTES,
}
lazrs_selection = lazrs.SELECTIVE_DECOMPRESS_XY_RETURNS_CHANNEL
for variant in DecompressionSelection:
lazrs_selection |= variant_mapping[variant] if self.is_set(variant) else 0
return lazrs.DecompressionSelection(lazrs_selection)
def to_laszip(self) -> int:
import laszip
variant_mapping = {
DecompressionSelection.XY_RETURNS_CHANNEL: laszip.DECOMPRESS_SELECTIVE_CHANNEL_RETURNS_XY,
DecompressionSelection.Z: laszip.DECOMPRESS_SELECTIVE_Z,
DecompressionSelection.CLASSIFICATION: laszip.DECOMPRESS_SELECTIVE_CLASSIFICATION,
DecompressionSelection.FLAGS: laszip.DECOMPRESS_SELECTIVE_FLAGS,
DecompressionSelection.INTENSITY: laszip.DECOMPRESS_SELECTIVE_INTENSITY,
DecompressionSelection.SCAN_ANGLE: laszip.DECOMPRESS_SELECTIVE_SCAN_ANGLE,
DecompressionSelection.USER_DATA: laszip.DECOMPRESS_SELECTIVE_USER_DATA,
DecompressionSelection.POINT_SOURCE_ID: laszip.DECOMPRESS_SELECTIVE_POINT_SOURCE,
DecompressionSelection.GPS_TIME: laszip.DECOMPRESS_SELECTIVE_GPS_TIME,
DecompressionSelection.RGB: laszip.DECOMPRESS_SELECTIVE_RGB,
DecompressionSelection.NIR: laszip.DECOMPRESS_SELECTIVE_NIR,
DecompressionSelection.WAVEPACKET: laszip.DECOMPRESS_SELECTIVE_WAVEPACKET,
DecompressionSelection.ALL_EXTRA_BYTES: laszip.DECOMPRESS_SELECTIVE_EXTRA_BYTES,
}
laszip_selection = laszip.DECOMPRESS_SELECTIVE_CHANNEL_RETURNS_XY
for variant in DecompressionSelection:
laszip_selection |= variant_mapping[variant] if self.is_set(variant) else 0
return laszip_selection
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
31,506 | laspy.point.dims | DimensionInfo | Tuple that contains information of a dimension | class DimensionInfo(NamedTuple):
"""Tuple that contains information of a dimension"""
name: str
kind: DimensionKind
num_bits: int
num_elements: int = 1
is_standard: bool = True
description: str = ""
offsets: Optional[np.ndarray] = None
scales: Optional[np.ndarray] = None
@classmethod
def from_extra_bytes_param(cls, params):
me = cls(
params.name,
DimensionKind.from_letter(params.type.base.kind),
params.type.itemsize * 8,
params.type.shape[0] if params.type.ndim == 1 else 1,
False,
params.description,
params.offsets,
params.scales,
)
me._validate()
return me
@classmethod
def from_dtype(
cls,
name: str,
dtype: np.dtype,
is_standard: bool = True,
description: str = "",
offsets: Optional[np.ndarray] = None,
scales: Optional[np.ndarray] = None,
) -> "DimensionInfo":
if dtype.ndim != 0:
num_elements = dtype.shape[0]
else:
num_elements = 1
kind = DimensionKind.from_letter(dtype.base.kind)
num_bits = dtype.itemsize * 8
self = cls(
name,
kind,
num_bits,
num_elements,
is_standard,
description=description,
offsets=offsets,
scales=scales,
)
self._validate()
return self
@classmethod
def from_bitmask(
cls, name: str, bit_mask: int, is_standard: bool = False
) -> "DimensionInfo":
kind = DimensionKind.BitField
bit_size = num_bit_set(bit_mask)
return cls(name, kind, bit_size, is_standard=is_standard)
@property
def num_bytes(self) -> int:
return int(self.num_bits // 8)
@property
def num_bytes_singular_element(self) -> int:
return int(self.num_bits // (8 * self.num_elements))
@property
def is_scaled(self) -> bool:
return self.scales is not None or self.offsets is not None
@property
def max(self):
if self.kind == DimensionKind.BitField:
return (2**self.num_bits) - 1
elif self.kind == DimensionKind.FloatingPoint:
return np.finfo(self.type_str()).max
else:
return np.iinfo(self.type_str()).max
@property
def min(self):
if (
self.kind == DimensionKind.BitField
or self.kind == DimensionKind.UnsignedInteger
):
return 0
elif self.kind == DimensionKind.FloatingPoint:
return np.finfo(self.type_str()).min
else:
return np.iinfo(self.type_str()).min
def type_str(self) -> Optional[str]:
if self.kind == DimensionKind.BitField:
return None
if self.num_elements == 1:
return f"{self.kind.letter()}{self.num_bytes_singular_element}"
return (
f"{self.num_elements}{self.kind.letter()}{self.num_bytes_singular_element}"
)
@property
def dtype(self) -> Optional[np.dtype]:
type_str = self.type_str()
if type_str is not None:
return np.dtype(type_str)
return None
def __eq__(self, other: "DimensionInfo") -> bool:
# Named Tuple implements that for us, but
# when scales and offset are not None (thus are array)
# The default '==' won't work
# (ValueError, value of an array with more than one element is ambiguous)
return (
self.name == other.name
and self.kind == other.kind
and self.num_bits == other.num_bits
and self.is_standard == other.is_standard
and self.description == other.description
and np.all(self.offsets == other.offsets)
and np.all(self.scales == other.scales)
)
def __ne__(self, other: "DimensionInfo") -> bool:
return not self == other
def _validate(self):
if (self.offsets is not None and self.scales is None) or (
self.offsets is None and self.scales is not None
):
raise ValueError("Cannot provide scales without offsets and vice-versa")
if self.offsets is not None and len(self.offsets) != self.num_elements:
raise ValueError(
f"len(offsets) ({len(self.offsets)}) is not the same as the number of elements ({self.num_elements})"
)
if self.scales is not None and len(self.scales) != self.num_elements:
raise ValueError(
f"len(scales) ({len(self.scales)}) is not the same as the number of elements ({self.num_elements})"
)
| (name: str, kind: laspy.point.dims.DimensionKind, num_bits: int, num_elements: int = 1, is_standard: bool = True, description: str = '', offsets: Optional[numpy.ndarray] = None, scales: Optional[numpy.ndarray] = None) |
31,507 | laspy.point.dims | __eq__ | null | def __eq__(self, other: "DimensionInfo") -> bool:
# Named Tuple implements that for us, but
# when scales and offset are not None (thus are array)
# The default '==' won't work
# (ValueError, value of an array with more than one element is ambiguous)
return (
self.name == other.name
and self.kind == other.kind
and self.num_bits == other.num_bits
and self.is_standard == other.is_standard
and self.description == other.description
and np.all(self.offsets == other.offsets)
and np.all(self.scales == other.scales)
)
| (self, other: laspy.point.dims.DimensionInfo) -> bool |
31,509 | laspy.point.dims | __ne__ | null | def __ne__(self, other: "DimensionInfo") -> bool:
return not self == other
| (self, other: laspy.point.dims.DimensionInfo) -> bool |
31,510 | namedtuple_DimensionInfo | __new__ | Create new instance of DimensionInfo(name, kind, num_bits, num_elements, is_standard, description, offsets, scales) | from builtins import function
| (_cls, name: str, kind: laspy.point.dims.DimensionKind, num_bits: int, num_elements: int = 1, is_standard: bool = True, description: str = '', offsets: Optional[numpy.ndarray] = None, scales: Optional[numpy.ndarray] = None) |
31,513 | collections | _replace | Return a new DimensionInfo object replacing specified fields with new values | def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
| (self, /, **kwds) |
31,514 | laspy.point.dims | _validate | null | def _validate(self):
if (self.offsets is not None and self.scales is None) or (
self.offsets is None and self.scales is not None
):
raise ValueError("Cannot provide scales without offsets and vice-versa")
if self.offsets is not None and len(self.offsets) != self.num_elements:
raise ValueError(
f"len(offsets) ({len(self.offsets)}) is not the same as the number of elements ({self.num_elements})"
)
if self.scales is not None and len(self.scales) != self.num_elements:
raise ValueError(
f"len(scales) ({len(self.scales)}) is not the same as the number of elements ({self.num_elements})"
)
| (self) |
31,515 | laspy.point.dims | type_str | null | def type_str(self) -> Optional[str]:
if self.kind == DimensionKind.BitField:
return None
if self.num_elements == 1:
return f"{self.kind.letter()}{self.num_bytes_singular_element}"
return (
f"{self.num_elements}{self.kind.letter()}{self.num_bytes_singular_element}"
)
| (self) -> Optional[str] |
31,516 | laspy.point.dims | DimensionKind | An enumeration. | class DimensionKind(Enum):
SignedInteger = 0
UnsignedInteger = 1
FloatingPoint = 2
BitField = 3
@classmethod
def from_letter(cls, letter: str) -> "DimensionKind":
if letter == "u":
return cls.UnsignedInteger
elif letter == "i":
return cls.SignedInteger
elif letter == "f":
return cls.FloatingPoint
else:
raise ValueError(f"Unknown type letter '{letter}'")
def letter(self) -> Optional[str]:
if self == DimensionKind.UnsignedInteger:
return "u"
elif self == DimensionKind.SignedInteger:
return "i"
elif self == DimensionKind.FloatingPoint:
return "f"
else:
return None
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
31,517 | laspy.point.format | ExtraBytesParams | All parameters needed to create extra bytes | class ExtraBytesParams:
"""All parameters needed to create extra bytes"""
def __init__(
self,
name: str,
type: Union[str, np.dtype, Type[np.uint8]],
description: str = "",
offsets: Optional[Iterable[Number]] = None,
scales: Optional[Iterable[Number]] = None,
) -> None:
self.name = name
""" The name of the extra dimension """
if isinstance(type, str):
# Work around numpy deprecating support
# for '1type' strings
n = "".join(takewhile(lambda c: c.isdigit(), type))
if n == "1":
type = type[1:]
self.type = np.dtype(type)
""" The type of the extra dimension """
self.description = description
""" A description of the extra dimension """
self.offsets = np.array(offsets) if offsets is not None else offsets
""" The offsets to use if its a 'scaled dimension', can be none """
self.scales = np.array(scales) if scales is not None else scales
""" The scales to use if its a 'scaled dimension', can be none """
| (name: str, type: Union[str, numpy.dtype, Type[numpy.uint8]], description: str = '', offsets: Optional[Iterable[Union[numpy.number, float, int]]] = None, scales: Optional[Iterable[Union[numpy.number, float, int]]] = None) -> None |
31,518 | laspy.point.format | __init__ | null | def __init__(
self,
name: str,
type: Union[str, np.dtype, Type[np.uint8]],
description: str = "",
offsets: Optional[Iterable[Number]] = None,
scales: Optional[Iterable[Number]] = None,
) -> None:
self.name = name
""" The name of the extra dimension """
if isinstance(type, str):
# Work around numpy deprecating support
# for '1type' strings
n = "".join(takewhile(lambda c: c.isdigit(), type))
if n == "1":
type = type[1:]
self.type = np.dtype(type)
""" The type of the extra dimension """
self.description = description
""" A description of the extra dimension """
self.offsets = np.array(offsets) if offsets is not None else offsets
""" The offsets to use if its a 'scaled dimension', can be none """
self.scales = np.array(scales) if scales is not None else scales
""" The scales to use if its a 'scaled dimension', can be none """
| (self, name: str, type: Union[str, numpy.dtype, Type[numpy.uint8]], description: str = '', offsets: Optional[Iterable[Union[numpy.number, float, int]]] = None, scales: Optional[Iterable[Union[numpy.number, float, int]]] = None) -> NoneType |
31,519 | laspy.lasdata | LasData | Class synchronizing all the moving parts of LAS files.
It connects the point record, header, vlrs together.
To access points dimensions using this class you have two possibilities
.. code:: python
las = laspy.read('some_file.las')
las.classification
# or
las['classification']
| class LasData:
"""Class synchronizing all the moving parts of LAS files.
It connects the point record, header, vlrs together.
To access points dimensions using this class you have two possibilities
.. code:: python
las = laspy.read('some_file.las')
las.classification
# or
las['classification']
"""
def __init__(
self,
header: LasHeader,
points: Optional[
Union[record.PackedPointRecord, record.ScaleAwarePointRecord]
] = None,
) -> None:
if points is None:
points = record.ScaleAwarePointRecord.zeros(
header.point_count, header=header
)
if points.point_format != header.point_format:
raise errors.LaspyException("Incompatible Point Formats")
if isinstance(points, record.PackedPointRecord):
points = record.ScaleAwarePointRecord(
points.array,
header.point_format,
scales=header.scales,
offsets=header.offsets,
)
else:
assert np.all(header.scales, points.scales)
assert np.all(header.offsets, points.offsets)
self.__dict__["_points"] = points
self.points: record.ScaleAwarePointRecord
self.header: LasHeader = header
@property
def point_format(self) -> PointFormat:
"""Shortcut to get the point format"""
return self.points.point_format
@property
def xyz(self) -> np.ndarray:
"""Returns a **new** 2D numpy array with the x,y,z coordinates
>>> import laspy
>>> las = laspy.read("tests/data/simple.las")
>>> xyz = las.xyz
>>> xyz.ndim
2
>>> xyz.shape
(1065, 3)
>>> np.all(xyz[..., 0] == las.x)
True
"""
return np.vstack((self.x, self.y, self.z)).transpose()
@xyz.setter
def xyz(self, value) -> None:
self.points[("x", "y", "z")] = value
@property
def points(self) -> record.PackedPointRecord:
"""Returns the point record"""
return self._points
@points.setter
def points(self, new_points: record.PackedPointRecord) -> None:
if new_points.point_format != self.point_format:
raise errors.IncompatibleDataFormat(
"Cannot set points with a different point format, convert first"
)
self._points = new_points
self.update_header()
# make sure both point format point to the same object
self._points.point_format = self.header.point_format
@property
def vlrs(self) -> VLRList:
return self.header.vlrs
@vlrs.setter
def vlrs(self, vlrs) -> None:
self.header.vlrs = vlrs
@property
def evlrs(self) -> Optional[VLRList]:
return self.header.evlrs
@evlrs.setter
def evlrs(self, evlrs: VLRList) -> None:
self.header.evlrs = evlrs
def add_extra_dim(self, params: ExtraBytesParams) -> None:
"""Adds a new extra dimension to the point record
.. note::
If you plan on adding multiple extra dimensions,
prefer :meth:`.add_extra_dims` as it will
save re-allocations and data copy
Parameters
----------
params : ExtraBytesParams
parameters of the new extra dimension to add
"""
self.add_extra_dims([params])
def add_extra_dims(self, params: List[ExtraBytesParams]) -> None:
"""Add multiple extra dimensions at once
Parameters
----------
params: list of parameters of the new extra dimensions to add
"""
self.header.add_extra_dims(params)
new_point_record = record.ScaleAwarePointRecord.zeros(
len(self.points), header=self.header
)
new_point_record.copy_fields_from(self.points)
self.points = new_point_record
def remove_extra_dims(self, names: Iterable[str]) -> None:
"""Remove multiple extra dimensions from this object
Parameters
----------
names: iterable,
names of the extra dimensions to be removed
Raises
------
LaspyException: if you try to remove an extra dimension that do not exist.
"""
extra_dimension_names = list(self.point_format.extra_dimension_names)
not_extra_dimension = [
name for name in names if name not in extra_dimension_names
]
if not_extra_dimension:
raise errors.LaspyException(
f"'{not_extra_dimension}' are not extra dimensions and cannot be removed"
)
self.header.remove_extra_dims(names)
new_point_record = record.ScaleAwarePointRecord.zeros(
len(self.points), header=self.header
)
new_point_record.copy_fields_from(self.points)
self.points = new_point_record
def remove_extra_dim(self, name: str) -> None:
"""Remove an extra dimensions from this object
.. note::
If you plan on removing multiple extra dimensions,
prefer :meth:`.remove_extra_dims` as it will
save re-allocations and data copy
Parameters
----------
name: str,
name of the extra dimension to be removed
Raises
------
LaspyException: if you try to remove an extra dimension that do not exist.
"""
self.remove_extra_dims([name])
def update_header(self) -> None:
"""Update the information stored in the header
to be in sync with the actual data.
This method is called automatically when you save a file using
:meth:`laspy.lasdatas.base.LasBase.write`
"""
self.header.update(self.points)
self.header.point_format_id = self.points.point_format.id
self.header.point_data_record_length = self.points.point_size
if self.header.version.minor >= 4:
if self.evlrs is not None:
self.header.number_of_evlrs = len(self.evlrs)
self.header.start_of_waveform_data_packet_record = 0
# TODO
# if len(self.vlrs.get("WktCoordinateSystemVlr")) == 1:
# self.header.global_encoding.wkt = 1
else:
self.header.number_of_evlrs = 0
@overload
def write(
self,
destination: str,
laz_backend: Optional[Union[LazBackend, Sequence[LazBackend]]] = ...,
) -> None:
...
@overload
def write(
self,
destination: BinaryIO,
do_compress: Optional[bool] = ...,
laz_backend: Optional[Union[LazBackend, Sequence[LazBackend]]] = ...,
) -> None:
...
def write(self, destination, do_compress=None, laz_backend=None):
"""Writes to a stream or file
.. note::
When destination is a string, it will be interpreted as the path were the file should be written to,
and whether the file will be compressed depends on the extension used (case insensitive):
- .laz -> compressed
- .las -> uncompressed
And the do_compress option will be ignored
Parameters
----------
destination: str or file object
filename or stream to write to
do_compress: bool, optional
Flags to indicate if you want to compress the data
laz_backend: optional, the laz backend to use
By default, laspy detect available backends
"""
if isinstance(destination, (str, pathlib.Path)):
do_compress = pathlib.Path(destination).suffix.lower() == ".laz"
with open(destination, mode="wb+") as out:
self._write_to(out, do_compress=do_compress, laz_backend=laz_backend)
else:
self._write_to(
destination, do_compress=do_compress, laz_backend=laz_backend
)
def _write_to(
self,
out_stream: BinaryIO,
do_compress: Optional[bool] = None,
laz_backend: Optional[Union[LazBackend, Sequence[LazBackend]]] = None,
) -> None:
with LasWriter(
out_stream,
self.header,
do_compress=do_compress,
closefd=False,
laz_backend=laz_backend,
) as writer:
writer.write_points(self.points)
if self.header.version.minor >= 4 and self.evlrs is not None:
writer.write_evlrs(self.evlrs)
def change_scaling(self, scales=None, offsets=None) -> None:
"""This changes the scales and/or offset used for the x,y,z
dimensions.
It recomputes the internal, non-scaled X,Y,Z dimensions
to match the new scales and offsets.
It also updates the header with the new values of scales and offsets.
Parameters
----------
scales: optional
New scales to be used. If not provided, the scales won't change.
offsets: optional
New offsets to be used. If not provided, the offsets won't change.
Example
-------
>>> import laspy
>>> header = laspy.LasHeader()
>>> header.scales = np.array([0.1, 0.1, 0.1])
>>> header.offsets = np.array([0, 0 ,0])
>>> las = laspy.LasData(header=header)
>>> las.x = [10.0]
>>> las.y = [20.0]
>>> las.z = [30.0]
>>> # X = (x - x_offset) / x_scale
>>> assert np.all(las.xyz == [[10.0, 20., 30]])
>>> assert np.all(las.X == [100])
>>> assert np.all(las.Y == [200])
>>> assert np.all(las.Z == [300])
We change the scales (only changing x_scale here)
but not the offsets.
The xyz coordinates (double) are the same (minus possible rounding with actual coordinates)
However the integer coordinates changed
>>> las.change_scaling(scales=[0.01, 0.1, 0.1])
>>> assert np.all(las.xyz == [[10.0, 20., 30]])
>>> assert np.all(las.X == [1000])
>>> assert np.all(las.Y == [200])
>>> assert np.all(las.Z == [300])
Same idea if we change the offsets, the xyz do not change
but XYZ does
>>> las.change_scaling(offsets=[0, 10, 15])
>>> assert np.all(las.xyz == [[10.0, 20., 30]])
>>> assert np.all(las.X == [1000])
>>> assert np.all(las.Y == [100])
>>> assert np.all(las.Z == [150])
"""
self.points.change_scaling(scales, offsets)
if scales is not None:
self.header.scales = scales
if offsets is not None:
self.header.offsets = offsets
def __getattr__(self, item):
"""Automatically called by Python when the attribute
named 'item' is no found. We use this function to forward the call the
point record. This is the mechanism used to allow the users to access
the points dimensions directly through a LasData.
Parameters
----------
item: str
name of the attribute, should be a dimension name
Returns
-------
The requested dimension if it exists
"""
try:
return self.points[item]
except ValueError:
raise AttributeError(
f"{self.__class__.__name__} object has no attribute '{item}'"
) from None
def __setattr__(self, key, value):
"""This is called on every access to an attribute of the instance.
Again we use this to forward the call the the points record
But this time checking if the key is actually a dimension name
so that an error is raised if the user tries to set a valid
LAS dimension even if it is not present in the field.
eg: user tries to set the red field of a file with point format 0:
an error is raised
"""
if key in ("x", "y", "z"):
# It is possible that user created a `LasData` object
# via `laspy.create`, and changed the headers offsets and scales
# values afterwards. So we need to sync the points's record.
self.points.offsets = self.header.offsets
self.points.scales = self.header.scales
self.points[key] = value
return
name_validity = self.points.validate_dimension_name(key)
if name_validity == DimensionNameValidity.Valid:
self[key] = value
elif name_validity == DimensionNameValidity.Unsupported:
raise ValueError(
f"Point format {self.point_format} does not support {key} dimension"
)
else:
super().__setattr__(key, value)
@typing.overload
def __getitem__(
self, item: Union[str, List[str]]
) -> Union[np.ndarray, ScaledArrayView, SubFieldView]:
...
@typing.overload
def __getitem__(self, item: Union[int, typing.Iterable[int], slice]) -> "LasData":
...
def __getitem__(self, item):
try:
item_is_list_of_str = all(isinstance(el, str) for el in iter(item))
except TypeError:
item_is_list_of_str = False
if isinstance(item, str) or item_is_list_of_str:
return self.points[item]
else:
las = LasData(deepcopy(self.header), points=self.points[item])
las.update_header()
return las
def __setitem__(self, key, value):
self.points[key] = value
def __len__(self):
return len(self.points)
def __repr__(self) -> str:
return "<LasData({}.{}, point fmt: {}, {} points, {} vlrs)>".format(
self.header.version.major,
self.header.version.minor,
self.points.point_format,
len(self.points),
len(self.vlrs),
)
| (header: laspy.header.LasHeader, points: Union[laspy.point.record.PackedPointRecord, laspy.point.record.ScaleAwarePointRecord, NoneType] = None) -> None |
31,520 | laspy.lasdata | __getattr__ | Automatically called by Python when the attribute
named 'item' is no found. We use this function to forward the call the
point record. This is the mechanism used to allow the users to access
the points dimensions directly through a LasData.
Parameters
----------
item: str
name of the attribute, should be a dimension name
Returns
-------
The requested dimension if it exists
| def __getattr__(self, item):
"""Automatically called by Python when the attribute
named 'item' is no found. We use this function to forward the call the
point record. This is the mechanism used to allow the users to access
the points dimensions directly through a LasData.
Parameters
----------
item: str
name of the attribute, should be a dimension name
Returns
-------
The requested dimension if it exists
"""
try:
return self.points[item]
except ValueError:
raise AttributeError(
f"{self.__class__.__name__} object has no attribute '{item}'"
) from None
| (self, item) |
31,521 | laspy.lasdata | __getitem__ | null | def __getitem__(self, item):
try:
item_is_list_of_str = all(isinstance(el, str) for el in iter(item))
except TypeError:
item_is_list_of_str = False
if isinstance(item, str) or item_is_list_of_str:
return self.points[item]
else:
las = LasData(deepcopy(self.header), points=self.points[item])
las.update_header()
return las
| (self, item) |
31,522 | laspy.lasdata | __init__ | null | def __init__(
self,
header: LasHeader,
points: Optional[
Union[record.PackedPointRecord, record.ScaleAwarePointRecord]
] = None,
) -> None:
if points is None:
points = record.ScaleAwarePointRecord.zeros(
header.point_count, header=header
)
if points.point_format != header.point_format:
raise errors.LaspyException("Incompatible Point Formats")
if isinstance(points, record.PackedPointRecord):
points = record.ScaleAwarePointRecord(
points.array,
header.point_format,
scales=header.scales,
offsets=header.offsets,
)
else:
assert np.all(header.scales, points.scales)
assert np.all(header.offsets, points.offsets)
self.__dict__["_points"] = points
self.points: record.ScaleAwarePointRecord
self.header: LasHeader = header
| (self, header: laspy.header.LasHeader, points: Union[laspy.point.record.PackedPointRecord, laspy.point.record.ScaleAwarePointRecord, NoneType] = None) -> NoneType |
31,523 | laspy.lasdata | __len__ | null | def __len__(self):
return len(self.points)
| (self) |
31,524 | laspy.lasdata | __repr__ | null | def __repr__(self) -> str:
return "<LasData({}.{}, point fmt: {}, {} points, {} vlrs)>".format(
self.header.version.major,
self.header.version.minor,
self.points.point_format,
len(self.points),
len(self.vlrs),
)
| (self) -> str |
31,525 | laspy.lasdata | __setattr__ | This is called on every access to an attribute of the instance.
Again we use this to forward the call the the points record
But this time checking if the key is actually a dimension name
so that an error is raised if the user tries to set a valid
LAS dimension even if it is not present in the field.
eg: user tries to set the red field of a file with point format 0:
an error is raised
| def __setattr__(self, key, value):
"""This is called on every access to an attribute of the instance.
Again we use this to forward the call the the points record
But this time checking if the key is actually a dimension name
so that an error is raised if the user tries to set a valid
LAS dimension even if it is not present in the field.
eg: user tries to set the red field of a file with point format 0:
an error is raised
"""
if key in ("x", "y", "z"):
# It is possible that user created a `LasData` object
# via `laspy.create`, and changed the headers offsets and scales
# values afterwards. So we need to sync the points's record.
self.points.offsets = self.header.offsets
self.points.scales = self.header.scales
self.points[key] = value
return
name_validity = self.points.validate_dimension_name(key)
if name_validity == DimensionNameValidity.Valid:
self[key] = value
elif name_validity == DimensionNameValidity.Unsupported:
raise ValueError(
f"Point format {self.point_format} does not support {key} dimension"
)
else:
super().__setattr__(key, value)
| (self, key, value) |
31,526 | laspy.lasdata | __setitem__ | null | def __setitem__(self, key, value):
self.points[key] = value
| (self, key, value) |
31,527 | laspy.lasdata | _write_to | null | def _write_to(
self,
out_stream: BinaryIO,
do_compress: Optional[bool] = None,
laz_backend: Optional[Union[LazBackend, Sequence[LazBackend]]] = None,
) -> None:
with LasWriter(
out_stream,
self.header,
do_compress=do_compress,
closefd=False,
laz_backend=laz_backend,
) as writer:
writer.write_points(self.points)
if self.header.version.minor >= 4 and self.evlrs is not None:
writer.write_evlrs(self.evlrs)
| (self, out_stream: <class 'BinaryIO'>, do_compress: Optional[bool] = None, laz_backend: Union[laspy._compression.backend.LazBackend, Sequence[laspy._compression.backend.LazBackend], NoneType] = None) -> NoneType |
31,528 | laspy.lasdata | add_extra_dim | Adds a new extra dimension to the point record
.. note::
If you plan on adding multiple extra dimensions,
prefer :meth:`.add_extra_dims` as it will
save re-allocations and data copy
Parameters
----------
params : ExtraBytesParams
parameters of the new extra dimension to add
| def add_extra_dim(self, params: ExtraBytesParams) -> None:
"""Adds a new extra dimension to the point record
.. note::
If you plan on adding multiple extra dimensions,
prefer :meth:`.add_extra_dims` as it will
save re-allocations and data copy
Parameters
----------
params : ExtraBytesParams
parameters of the new extra dimension to add
"""
self.add_extra_dims([params])
| (self, params: laspy.point.format.ExtraBytesParams) -> NoneType |
31,529 | laspy.lasdata | add_extra_dims | Add multiple extra dimensions at once
Parameters
----------
params: list of parameters of the new extra dimensions to add
| def add_extra_dims(self, params: List[ExtraBytesParams]) -> None:
"""Add multiple extra dimensions at once
Parameters
----------
params: list of parameters of the new extra dimensions to add
"""
self.header.add_extra_dims(params)
new_point_record = record.ScaleAwarePointRecord.zeros(
len(self.points), header=self.header
)
new_point_record.copy_fields_from(self.points)
self.points = new_point_record
| (self, params: List[laspy.point.format.ExtraBytesParams]) -> NoneType |
31,530 | laspy.lasdata | change_scaling | This changes the scales and/or offset used for the x,y,z
dimensions.
It recomputes the internal, non-scaled X,Y,Z dimensions
to match the new scales and offsets.
It also updates the header with the new values of scales and offsets.
Parameters
----------
scales: optional
New scales to be used. If not provided, the scales won't change.
offsets: optional
New offsets to be used. If not provided, the offsets won't change.
Example
-------
>>> import laspy
>>> header = laspy.LasHeader()
>>> header.scales = np.array([0.1, 0.1, 0.1])
>>> header.offsets = np.array([0, 0 ,0])
>>> las = laspy.LasData(header=header)
>>> las.x = [10.0]
>>> las.y = [20.0]
>>> las.z = [30.0]
>>> # X = (x - x_offset) / x_scale
>>> assert np.all(las.xyz == [[10.0, 20., 30]])
>>> assert np.all(las.X == [100])
>>> assert np.all(las.Y == [200])
>>> assert np.all(las.Z == [300])
We change the scales (only changing x_scale here)
but not the offsets.
The xyz coordinates (double) are the same (minus possible rounding with actual coordinates)
However the integer coordinates changed
>>> las.change_scaling(scales=[0.01, 0.1, 0.1])
>>> assert np.all(las.xyz == [[10.0, 20., 30]])
>>> assert np.all(las.X == [1000])
>>> assert np.all(las.Y == [200])
>>> assert np.all(las.Z == [300])
Same idea if we change the offsets, the xyz do not change
but XYZ does
>>> las.change_scaling(offsets=[0, 10, 15])
>>> assert np.all(las.xyz == [[10.0, 20., 30]])
>>> assert np.all(las.X == [1000])
>>> assert np.all(las.Y == [100])
>>> assert np.all(las.Z == [150])
| def change_scaling(self, scales=None, offsets=None) -> None:
"""This changes the scales and/or offset used for the x,y,z
dimensions.
It recomputes the internal, non-scaled X,Y,Z dimensions
to match the new scales and offsets.
It also updates the header with the new values of scales and offsets.
Parameters
----------
scales: optional
New scales to be used. If not provided, the scales won't change.
offsets: optional
New offsets to be used. If not provided, the offsets won't change.
Example
-------
>>> import laspy
>>> header = laspy.LasHeader()
>>> header.scales = np.array([0.1, 0.1, 0.1])
>>> header.offsets = np.array([0, 0 ,0])
>>> las = laspy.LasData(header=header)
>>> las.x = [10.0]
>>> las.y = [20.0]
>>> las.z = [30.0]
>>> # X = (x - x_offset) / x_scale
>>> assert np.all(las.xyz == [[10.0, 20., 30]])
>>> assert np.all(las.X == [100])
>>> assert np.all(las.Y == [200])
>>> assert np.all(las.Z == [300])
We change the scales (only changing x_scale here)
but not the offsets.
The xyz coordinates (double) are the same (minus possible rounding with actual coordinates)
However the integer coordinates changed
>>> las.change_scaling(scales=[0.01, 0.1, 0.1])
>>> assert np.all(las.xyz == [[10.0, 20., 30]])
>>> assert np.all(las.X == [1000])
>>> assert np.all(las.Y == [200])
>>> assert np.all(las.Z == [300])
Same idea if we change the offsets, the xyz do not change
but XYZ does
>>> las.change_scaling(offsets=[0, 10, 15])
>>> assert np.all(las.xyz == [[10.0, 20., 30]])
>>> assert np.all(las.X == [1000])
>>> assert np.all(las.Y == [100])
>>> assert np.all(las.Z == [150])
"""
self.points.change_scaling(scales, offsets)
if scales is not None:
self.header.scales = scales
if offsets is not None:
self.header.offsets = offsets
| (self, scales=None, offsets=None) -> NoneType |
31,531 | laspy.lasdata | remove_extra_dim | Remove an extra dimensions from this object
.. note::
If you plan on removing multiple extra dimensions,
prefer :meth:`.remove_extra_dims` as it will
save re-allocations and data copy
Parameters
----------
name: str,
name of the extra dimension to be removed
Raises
------
LaspyException: if you try to remove an extra dimension that do not exist.
| def remove_extra_dim(self, name: str) -> None:
"""Remove an extra dimensions from this object
.. note::
If you plan on removing multiple extra dimensions,
prefer :meth:`.remove_extra_dims` as it will
save re-allocations and data copy
Parameters
----------
name: str,
name of the extra dimension to be removed
Raises
------
LaspyException: if you try to remove an extra dimension that do not exist.
"""
self.remove_extra_dims([name])
| (self, name: str) -> NoneType |
31,532 | laspy.lasdata | remove_extra_dims | Remove multiple extra dimensions from this object
Parameters
----------
names: iterable,
names of the extra dimensions to be removed
Raises
------
LaspyException: if you try to remove an extra dimension that do not exist.
| def remove_extra_dims(self, names: Iterable[str]) -> None:
"""Remove multiple extra dimensions from this object
Parameters
----------
names: iterable,
names of the extra dimensions to be removed
Raises
------
LaspyException: if you try to remove an extra dimension that do not exist.
"""
extra_dimension_names = list(self.point_format.extra_dimension_names)
not_extra_dimension = [
name for name in names if name not in extra_dimension_names
]
if not_extra_dimension:
raise errors.LaspyException(
f"'{not_extra_dimension}' are not extra dimensions and cannot be removed"
)
self.header.remove_extra_dims(names)
new_point_record = record.ScaleAwarePointRecord.zeros(
len(self.points), header=self.header
)
new_point_record.copy_fields_from(self.points)
self.points = new_point_record
| (self, names: Iterable[str]) -> NoneType |
31,533 | laspy.lasdata | update_header | Update the information stored in the header
to be in sync with the actual data.
This method is called automatically when you save a file using
:meth:`laspy.lasdatas.base.LasBase.write`
| def update_header(self) -> None:
"""Update the information stored in the header
to be in sync with the actual data.
This method is called automatically when you save a file using
:meth:`laspy.lasdatas.base.LasBase.write`
"""
self.header.update(self.points)
self.header.point_format_id = self.points.point_format.id
self.header.point_data_record_length = self.points.point_size
if self.header.version.minor >= 4:
if self.evlrs is not None:
self.header.number_of_evlrs = len(self.evlrs)
self.header.start_of_waveform_data_packet_record = 0
# TODO
# if len(self.vlrs.get("WktCoordinateSystemVlr")) == 1:
# self.header.global_encoding.wkt = 1
else:
self.header.number_of_evlrs = 0
| (self) -> NoneType |
31,534 | laspy.lasdata | write | Writes to a stream or file
.. note::
When destination is a string, it will be interpreted as the path were the file should be written to,
and whether the file will be compressed depends on the extension used (case insensitive):
- .laz -> compressed
- .las -> uncompressed
And the do_compress option will be ignored
Parameters
----------
destination: str or file object
filename or stream to write to
do_compress: bool, optional
Flags to indicate if you want to compress the data
laz_backend: optional, the laz backend to use
By default, laspy detect available backends
| def write(self, destination, do_compress=None, laz_backend=None):
"""Writes to a stream or file
.. note::
When destination is a string, it will be interpreted as the path were the file should be written to,
and whether the file will be compressed depends on the extension used (case insensitive):
- .laz -> compressed
- .las -> uncompressed
And the do_compress option will be ignored
Parameters
----------
destination: str or file object
filename or stream to write to
do_compress: bool, optional
Flags to indicate if you want to compress the data
laz_backend: optional, the laz backend to use
By default, laspy detect available backends
"""
if isinstance(destination, (str, pathlib.Path)):
do_compress = pathlib.Path(destination).suffix.lower() == ".laz"
with open(destination, mode="wb+") as out:
self._write_to(out, do_compress=do_compress, laz_backend=laz_backend)
else:
self._write_to(
destination, do_compress=do_compress, laz_backend=laz_backend
)
| (self, destination, do_compress=None, laz_backend=None) |
31,535 | laspy.header | LasHeader | Contains the information from the header of as LAS file
with 'implementation' field left out and 'users' field
stored in more ergonomic classes.
This header also contains the VLRs
Examples
--------
Creating a default header:
>>> header = LasHeader()
>>> header
<LasHeader(1.2, <PointFormat(3, 0 bytes of extra dims)>)>
Creating a header with the wanted version and point format:
>>> header = LasHeader(version=Version(1, 4), point_format=PointFormat(6))
>>> header
<LasHeader(1.4, <PointFormat(6, 0 bytes of extra dims)>)>
>>> header = LasHeader(version="1.4", point_format=6)
>>> header
<LasHeader(1.4, <PointFormat(6, 0 bytes of extra dims)>)>
| class LasHeader:
"""Contains the information from the header of as LAS file
with 'implementation' field left out and 'users' field
stored in more ergonomic classes.
This header also contains the VLRs
Examples
--------
Creating a default header:
>>> header = LasHeader()
>>> header
<LasHeader(1.2, <PointFormat(3, 0 bytes of extra dims)>)>
Creating a header with the wanted version and point format:
>>> header = LasHeader(version=Version(1, 4), point_format=PointFormat(6))
>>> header
<LasHeader(1.4, <PointFormat(6, 0 bytes of extra dims)>)>
>>> header = LasHeader(version="1.4", point_format=6)
>>> header
<LasHeader(1.4, <PointFormat(6, 0 bytes of extra dims)>)>
"""
#: The default version used when None is given to init
DEFAULT_VERSION = Version(1, 2)
#: The default point format Used when None is given to init
DEFAULT_POINT_FORMAT = PointFormat(3)
_OLD_LASPY_NAMES = {
"max": "maxs",
"min": "mins",
"scale": "scales",
"offset": "offsets",
"filesource_id": "file_source_id",
"system_id": "system_identifier",
"date": "creation_date",
"point_return_count": "number_of_points_by_return",
"software_id": "generating_software",
"point_records_count": "point_count",
}
def __init__(
self,
*,
version: Optional[Union[Version, str]] = None,
point_format: Optional[Union[PointFormat, int]] = None,
) -> None:
if isinstance(point_format, int):
point_format = PointFormat(point_format)
if isinstance(version, str):
version = Version.from_str(version)
if version is None and point_format is None:
version = LasHeader.DEFAULT_VERSION
point_format = LasHeader.DEFAULT_POINT_FORMAT
elif version is not None and point_format is None:
point_format = PointFormat(dims.min_point_format_for_version(str(version)))
elif version is None and point_format is not None:
version = Version.from_str(
dims.preferred_file_version_for_point_format(point_format.id)
)
dims.raise_if_version_not_compatible_with_fmt(point_format.id, str(version))
#: File source id
self.file_source_id: int = 0
self.global_encoding: GlobalEncoding = GlobalEncoding()
#: Project ID
#: Initialized to null UUID
self.uuid: UUID = UUID(bytes_le=b"\0" * 16)
self._version: Version = version
#: System identifier
#: Initialized to 'OTHER'
self.system_identifier: Union[str, bytes] = "OTHER"
#: The software which generated the file
#: Initialized to 'laspy'
self.generating_software: Union[str, bytes] = DEFAULT_GENERATING_SOFTWARE
self._point_format: PointFormat = point_format
#: Day the file was created, initialized to date.today
self.creation_date: Optional[date] = date.today()
#: The number of points in the file
self.point_count: int = 0
#: The numbers used to scale the x,y,z coordinates
self.scales: np.ndarray = np.array([0.01, 0.01, 0.01], dtype=np.float64)
#: The numbers used to offset the x,y,z coordinates
self.offsets: np.ndarray = np.zeros(3, dtype=np.float64)
# The max values for x,y,z
self.maxs: np.ndarray = np.zeros(3, dtype=np.float64)
# The min values for x,y,z
self.mins: np.ndarray = np.zeros(3, dtype=np.float64)
#: Number of points by return
#: for las <= 1.2 only the first 5 elements matters
self.number_of_points_by_return: np.ndarray = np.zeros(15, dtype=np.uint32)
#: The VLRS
self._vlrs: VLRList = VLRList()
#: Extra bytes between end of header and first vlrs
self.extra_header_bytes: bytes = b""
#: Extra bytes between end of vlr end first point
self.extra_vlr_bytes: bytes = b""
#: Las >= 1.3
self.start_of_waveform_data_packet_record: int = 0
#: Las >= 1.4
#: Offset to the first evlr in the file
self.start_of_first_evlr: int = 0
#: The number of evlrs in the file
self.number_of_evlrs: int = 0
#: EVLRs, even though they are not stored in the 'header'
#: part of the file we keep them in this class
#: as they contain same information as vlr.
#: None when the file does not support EVLR
self.evlrs: Optional[VLRList] = None
# Info we keep because it's useful for us but not the user
self.offset_to_point_data: int = 0
self.are_points_compressed: bool = False
self._sync_extra_bytes_vlr()
@property
def point_format(self) -> PointFormat:
"""The point format"""
return self._point_format
@point_format.setter
def point_format(self, new_point_format: PointFormat) -> None:
dims.raise_if_version_not_compatible_with_fmt(
new_point_format.id, str(self.version)
)
self._point_format = new_point_format
self._sync_extra_bytes_vlr()
@property
def version(self) -> Version:
"""The version"""
return self._version
@version.setter
def version(self, version: Version) -> None:
dims.raise_if_version_not_compatible_with_fmt(
self.point_format.id, str(version)
)
self._version = version
# scale properties
@property
def x_scale(self) -> float:
return self.scales[0]
@x_scale.setter
def x_scale(self, value: float) -> None:
self.scales[0] = value
@property
def y_scale(self) -> float:
return self.scales[1]
@y_scale.setter
def y_scale(self, value: float) -> None:
self.scales[1] = value
@property
def z_scale(self) -> float:
return self.scales[2]
@z_scale.setter
def z_scale(self, value: float) -> None:
self.scales[2] = value
# offset properties
@property
def x_offset(self) -> float:
return self.offsets[0]
@x_offset.setter
def x_offset(self, value: float) -> None:
self.offsets[0] = value
@property
def y_offset(self) -> float:
return self.offsets[1]
@y_offset.setter
def y_offset(self, value: float) -> None:
self.offsets[1] = value
@property
def z_offset(self) -> float:
return self.offsets[2]
@z_offset.setter
def z_offset(self, value: float) -> None:
self.offsets[2] = value
# max properties
@property
def x_max(self) -> float:
return self.maxs[0]
@x_max.setter
def x_max(self, value: float) -> None:
self.maxs[0] = value
@property
def y_max(self) -> float:
return self.maxs[1]
@y_max.setter
def y_max(self, value: float) -> None:
self.maxs[1] = value
@property
def z_max(self) -> float:
return self.maxs[2]
@z_max.setter
def z_max(self, value: float) -> None:
self.maxs[2] = value
# min properties
@property
def x_min(self) -> float:
return self.mins[0]
@x_min.setter
def x_min(self, value: float) -> None:
self.mins[0] = value
@property
def y_min(self) -> float:
return self.mins[1]
@y_min.setter
def y_min(self, value: float) -> None:
self.mins[1] = value
@property
def z_min(self) -> float:
return self.mins[2]
@z_min.setter
def z_min(self, value: float) -> None:
self.mins[2] = value
@property
def vlrs(self) -> VLRList:
return self._vlrs
@vlrs.setter
def vlrs(self, vlrs: typing.Iterable[VLR]) -> None:
self._vlrs = VLRList(vlrs)
try:
self.vlrs.extract("LaszipVlr")
except ValueError:
pass
self._sync_extra_bytes_vlr()
def add_extra_dims(self, params: List[ExtraBytesParams]) -> None:
for param in params:
self.point_format.add_extra_dimension(param)
self._sync_extra_bytes_vlr()
def add_extra_dim(self, params: ExtraBytesParams):
self.add_extra_dims([params])
def add_crs(self, crs: "pyproj.CRS", keep_compatibility: bool = True) -> None:
"""Add a Coordinate Reference System VLR from a pyproj CRS object.
The type of VLR created depends on the las version and point format
version. Las version >= 1.4 use WKT string, las version < 1.4 and point
format < 6 use GeoTiff tags.
.. warning::
This requires `pyproj`.
.. warning::
Not all CRS are supported when adding GeoTiff tags.
For example, custom CRS.
Typically, if the CRS has an EPSG code it will be supported.
"""
import pyproj
# check and remove any existing crs vlrs
for crs_vlr_name in (
"WktCoordinateSystemVlr",
"GeoKeyDirectoryVlr",
"GeoAsciiParamsVlr",
"GeoDoubleParamsVlr",
):
try:
self._vlrs.extract(crs_vlr_name)
except IndexError:
pass
new_ver = self._version >= Version(1, 4)
new_pt = self.point_format.id >= 6
if new_pt or (new_ver and not keep_compatibility):
self._vlrs.append(WktCoordinateSystemVlr(crs.to_wkt()))
self.global_encoding.wkt = True
else:
self._vlrs.extend(create_geotiff_projection_vlrs(crs))
def remove_extra_dim(self, name: str) -> None:
self.remove_extra_dims([name])
def remove_extra_dims(self, names: Iterable[str]) -> None:
for name in names:
self.point_format.remove_extra_dimension(name)
self._sync_extra_bytes_vlr()
def set_version_and_point_format(
self, version: Version, point_format: PointFormat
) -> None:
dims.raise_if_version_not_compatible_with_fmt(point_format.id, str(version))
self._version = version
self.point_format = point_format
def partial_reset(self) -> None:
f64info = np.finfo(np.float64)
self.maxs = np.ones(3, dtype=np.float64) * f64info.min
self.mins = np.ones(3, dtype=np.float64) * f64info.max
self.start_of_first_evlr = 0
self.number_of_evlrs = 0
self.point_count = 0
self.number_of_points_by_return = np.zeros(15, dtype=np.uint32)
def update(self, points: PackedPointRecord) -> None:
self.partial_reset()
if not points:
self.maxs = [0.0, 0.0, 0.0]
self.mins = [0.0, 0.0, 0.0]
else:
self.grow(points)
def grow(self, points: PackedPointRecord) -> None:
self.x_max = max(
self.x_max,
(points["X"].max() * self.x_scale) + self.x_offset,
)
self.y_max = max(
self.y_max,
(points["Y"].max() * self.y_scale) + self.y_offset,
)
self.z_max = max(
self.z_max,
(points["Z"].max() * self.z_scale) + self.z_offset,
)
self.x_min = min(
self.x_min,
(points["X"].min() * self.x_scale) + self.x_offset,
)
self.y_min = min(
self.y_min,
(points["Y"].min() * self.y_scale) + self.y_offset,
)
self.z_min = min(
self.z_min,
(points["Z"].min() * self.z_scale) + self.z_offset,
)
for return_number, count in zip(
*np.unique(points.return_number, return_counts=True)
):
if return_number == 0:
continue
if return_number > len(self.number_of_points_by_return):
break # np.unique sorts unique values
self.number_of_points_by_return[return_number - 1] += count
self.point_count += len(points)
def set_compressed(self, state: bool) -> None:
self.are_points_compressed = state
def max_point_count(self) -> int:
if self.version <= Version(1, 3):
return np.iinfo(np.uint32).max
else:
return np.iinfo(np.uint64).max
@classmethod
def read_from(
cls, original_stream: BinaryIO, read_evlrs: bool = False
) -> "LasHeader":
"""
Reads the header from the stream
read_evlrs: If true, evlrs will be read
Leaves the stream pos right before the point starts
(regardless of is read_evlrs was true)
"""
little_endian = "little"
header = cls()
stream = io.BytesIO(cls._prefetch_header_data(original_stream))
file_sig = stream.read(4)
# This should not be possible as _prefetch already checks this
assert file_sig == LAS_FILE_SIGNATURE
header.file_source_id = int.from_bytes(
stream.read(2), little_endian, signed=False
)
header.global_encoding = GlobalEncoding.read_from(stream)
header.uuid = UUID(bytes_le=stream.read(16))
header._version = Version(
int.from_bytes(stream.read(1), little_endian, signed=False),
int.from_bytes(stream.read(1), little_endian, signed=False),
)
header.system_identifier = read_string(stream, SYSTEM_IDENTIFIER_LEN)
header.generating_software = read_string(stream, GENERATING_SOFTWARE_LEN)
creation_day_of_year = int.from_bytes(
stream.read(2), little_endian, signed=False
)
creation_year = int.from_bytes(stream.read(2), little_endian, signed=False)
try:
header.creation_date = date(creation_year, 1, 1) + timedelta(
creation_day_of_year - 1
)
except ValueError:
header.creation_date = None
header_size = int.from_bytes(stream.read(2), little_endian, signed=False)
header.offset_to_point_data = int.from_bytes(
stream.read(4), little_endian, signed=False
)
number_of_vlrs = int.from_bytes(stream.read(4), little_endian, signed=False)
point_format_id = int.from_bytes(stream.read(1), little_endian, signed=False)
point_size = int.from_bytes(stream.read(2), little_endian, signed=False)
header.point_count = int.from_bytes(stream.read(4), little_endian, signed=False)
for i in range(5):
header.number_of_points_by_return[i] = int.from_bytes(
stream.read(4), little_endian, signed=False
)
for i in range(3):
header.scales[i] = struct.unpack("<d", stream.read(8))[0]
for i in range(3):
header.offsets[i] = struct.unpack("<d", stream.read(8))[0]
for i in range(3):
header.maxs[i] = struct.unpack("<d", stream.read(8))[0]
header.mins[i] = struct.unpack("<d", stream.read(8))[0]
if header.version.minor >= 3:
header.start_of_waveform_data_packet_record = int.from_bytes(
stream.read(8), little_endian, signed=False
)
if header.version.minor >= 4:
header.start_of_first_evlr = int.from_bytes(
stream.read(8), little_endian, signed=False
)
header.number_of_evlrs = int.from_bytes(
stream.read(4), little_endian, signed=False
)
header.point_count = int.from_bytes(
stream.read(8), little_endian, signed=False
)
for i in range(15):
header.number_of_points_by_return[i] = int.from_bytes(
stream.read(8), little_endian, signed=False
)
current_pos = stream.tell()
if current_pos < header_size:
header.extra_header_bytes = stream.read(header_size - current_pos)
elif current_pos > header_size:
raise LaspyException("Incoherent header size")
header._vlrs = VLRList.read_from(stream, num_to_read=number_of_vlrs)
current_pos = stream.tell()
if current_pos < header.offset_to_point_data:
header.extra_vlr_bytes = stream.read(
header.offset_to_point_data - current_pos
)
elif current_pos > header.offset_to_point_data:
raise LaspyException("Incoherent offset to point data")
header.are_points_compressed = is_point_format_compressed(point_format_id)
point_format_id = compressed_id_to_uncompressed(point_format_id)
point_format = PointFormat(point_format_id)
try:
extra_bytes_vlr = typing.cast(
ExtraBytesVlr, header._vlrs.get("ExtraBytesVlr")[0]
)
except IndexError:
pass
else:
if point_size == point_format.size:
logger.warning(
"There is an ExtraByteVlr but the header.point_size matches the "
"point size without extra bytes. The extra bytes vlr info will be ignored"
)
header._vlrs.extract("ExtraBytesVlr")
else:
for extra_dim_info in extra_bytes_vlr.type_of_extra_dims():
point_format.add_extra_dimension(extra_dim_info)
header._point_format = point_format
if point_size > point_format.size:
# We have unregistered extra bytes
num_extra_bytes = point_size - point_format.size
point_format.dimensions.append(
dims.DimensionInfo(
name="ExtraBytes",
kind=dims.DimensionKind.UnsignedInteger,
num_bits=8 * num_extra_bytes,
num_elements=num_extra_bytes,
is_standard=False,
description="Un-registered ExtraBytes",
)
)
elif point_size < point_format.size:
raise LaspyException(
f"Incoherent point size, "
f"header says {point_size} point_format created says {point_format.size}"
)
if read_evlrs:
header.read_evlrs(original_stream)
stream.seek(header.offset_to_point_data)
return header
def write_to(
self,
stream: BinaryIO,
ensure_same_size: bool = False,
encoding_errors: str = "strict",
) -> None:
"""
ensure_same_size: if true this function will raise an internal error
if the written header would change the offset to point data
it originally had (meaning the file could become broken),
Used when rewriting a header to update the file (new point count, mins, maxs, etc)
"""
if self.point_count > self.max_point_count():
raise LaspyException(
f"Version {self.version} cannot save clouds with more than"
f" {self.max_point_count()} points (current: {self.point_count})"
)
little_endian = "little"
with io.BytesIO() as tmp:
self._vlrs.write_to(tmp, encoding_errors=encoding_errors)
vlr_bytes = tmp.getvalue()
header_size = LAS_HEADERS_SIZE[str(self.version)]
header_size += len(self.extra_header_bytes)
new_offset_to_data = header_size + len(vlr_bytes) + len(self.extra_vlr_bytes)
if ensure_same_size and new_offset_to_data != self.offset_to_point_data:
raise LaspyException(
"Internal error, writing header would change original offset to data"
"and break the file"
)
self.offset_to_point_data = new_offset_to_data
stream.write(LAS_FILE_SIGNATURE)
stream.write(self.file_source_id.to_bytes(2, little_endian, signed=False))
self.global_encoding.write_to(stream)
stream.write(self.uuid.bytes_le)
stream.write(self.version.major.to_bytes(1, little_endian, signed=False))
stream.write(self.version.minor.to_bytes(1, little_endian, signed=False))
was_truncated = write_string(
stream,
self.system_identifier,
SYSTEM_IDENTIFIER_LEN,
encoding_errors=encoding_errors,
)
if was_truncated:
logger.warning(
f"system identifier does not fit into the {SYSTEM_IDENTIFIER_LEN} maximum bytes,"
f" it will be truncated"
)
was_truncated = write_string(
stream,
self.generating_software,
GENERATING_SOFTWARE_LEN,
encoding_errors=encoding_errors,
)
if was_truncated:
logger.warning(
f"generating software does not fit into the {GENERATING_SOFTWARE_LEN} maximum bytes,"
f" it will be truncated"
)
if self.creation_date is None:
self.creation_date = date.today()
stream.write(
self.creation_date.timetuple().tm_yday.to_bytes(
2, little_endian, signed=False
)
)
stream.write(self.creation_date.year.to_bytes(2, little_endian, signed=False))
stream.write(header_size.to_bytes(2, little_endian, signed=False))
stream.write(self.offset_to_point_data.to_bytes(4, little_endian, signed=False))
stream.write(len(self._vlrs).to_bytes(4, little_endian, signed=False))
point_format_id = self.point_format.id
if self.are_points_compressed:
point_format_id = uncompressed_id_to_compressed(point_format_id)
stream.write(point_format_id.to_bytes(1, little_endian, signed=False))
stream.write(self.point_format.size.to_bytes(2, little_endian, signed=False))
# Point Count
if self.version.minor >= 4:
stream.write(int(0).to_bytes(4, little_endian, signed=False))
for i in range(5):
stream.write(int(0).to_bytes(4, little_endian, signed=False))
else:
stream.write(self.point_count.to_bytes(4, little_endian, signed=False))
for i in range(5):
stream.write(
int(self.number_of_points_by_return[i]).to_bytes(
4, little_endian, signed=False
)
)
for i in range(3):
stream.write(struct.pack("<d", self.scales[i]))
for i in range(3):
stream.write(struct.pack("<d", self.offsets[i]))
for i in range(3):
stream.write(struct.pack("<d", self.maxs[i]))
stream.write(struct.pack("<d", self.mins[i]))
if self.version.minor >= 3:
stream.write(
self.start_of_waveform_data_packet_record.to_bytes(
8, little_endian, signed=False
)
)
if self.version.minor >= 4:
stream.write(
self.start_of_first_evlr.to_bytes(8, little_endian, signed=False)
)
stream.write(self.number_of_evlrs.to_bytes(4, little_endian, signed=False))
stream.write(self.point_count.to_bytes(8, little_endian, signed=False))
for i in range(15):
stream.write(
int(self.number_of_points_by_return[i]).to_bytes(
8, little_endian, signed=False
)
)
stream.write(self.extra_header_bytes)
stream.write(vlr_bytes)
stream.write(self.extra_vlr_bytes)
def parse_crs(self, prefer_wkt=True) -> Optional["pyproj.CRS"]:
"""
Method to parse OGC WKT or GeoTiff VLR keys into a pyproj CRS object
Returns None if no CRS VLR is present, or if the CRS specified
in the VLRS is not understood.
Parameters
----------
prefer_wkt: Optional, default True,
If True the WKT VLR will be preferred in case
both the WKT and Geotiff VLR are present
.. warning::
This requires `pyproj`.
.. versionadded:: 2.5
The ``prefer_wkt`` parameters.
"""
geo_vlr = self._vlrs.get_by_id("LASF_Projection")
if self.evlrs is not None:
geo_vlr.extend(self.evlrs.get_by_id("LASF_Projection"))
parsed_crs = {}
for rec in geo_vlr:
if isinstance(rec, (WktCoordinateSystemVlr, GeoKeyDirectoryVlr)):
crs = rec.parse_crs()
if crs is not None:
parsed_crs[type(rec)] = crs
# Could not parse anything / there was nothing to parse
if not parsed_crs:
return None
if prefer_wkt:
preferred, other = WktCoordinateSystemVlr, GeoKeyDirectoryVlr
else:
preferred, other = GeoKeyDirectoryVlr, WktCoordinateSystemVlr
try:
return parsed_crs[preferred]
except KeyError:
return parsed_crs[other]
def read_evlrs(self, stream):
"""
Reads EVLRs from the stream and sets them in the
data property.
The evlrs are accessed from the `evlrs` property
Does nothing if either of these is true:
- The file does not support EVLRS (version < 1.4)
- The file has no EVLRS
- The stream does not support seeking
Leaves/restores the stream position to where it was before the call
"""
if self.version.minor >= 4:
if self.number_of_evlrs > 0 and stream.seekable():
saved_pos = stream.tell()
stream.seek(self.start_of_first_evlr, io.SEEK_SET)
self.evlrs = VLRList.read_from(
stream, self.number_of_evlrs, extended=True
)
stream.seek(saved_pos)
elif self.number_of_evlrs > 0 and not stream.seekable():
self.evlrs = None
else:
self.evlrs = VLRList()
else:
self.evlrs = None
@staticmethod
def _prefetch_header_data(source) -> bytes:
"""
reads (and returns) from the source all the bytes that
are between the beginning of the file and the start of point data
(which corresponds to Header + VLRS).
It is done in two calls to the source's `read` method
This is done because `LasHeader.read_from`
does a bunch of read to the source, so we prefer to
prefetch data in memory in case the original source
is not buffered (like a http source could be)
"""
header_bytes = source.read(LAS_HEADERS_SIZE["1.1"])
file_sig = header_bytes[: len(LAS_FILE_SIGNATURE)]
if not file_sig:
raise LaspyException(f"Source is empty")
if file_sig != LAS_FILE_SIGNATURE:
raise LaspyException(f'Invalid file signature "{file_sig}"')
if len(header_bytes) < LAS_HEADERS_SIZE["1.1"]:
raise LaspyException("File is to small to be a valid LAS")
offset_to_data = int.from_bytes(
header_bytes[96 : 96 + 4], byteorder="little", signed=False
)
rest = source.read(offset_to_data - len(header_bytes))
return header_bytes + rest
def _sync_extra_bytes_vlr(self) -> None:
try:
self._vlrs.extract("ExtraBytesVlr")
except IndexError:
pass
extra_dimensions = list(self.point_format.extra_dimensions)
if not extra_dimensions:
return
eb_vlr = ExtraBytesVlr()
for extra_dimension in extra_dimensions:
dtype = extra_dimension.dtype
assert dtype is not None
eb_struct = ExtraBytesStruct(
name=extra_dimension.name.encode(),
description=extra_dimension.description.encode(),
)
if extra_dimension.num_elements > 3 and dtype.base == np.uint8:
type_id = 0
eb_struct.options = extra_dimension.num_elements
else:
type_id = extradims.get_id_for_extra_dim_type(dtype)
eb_struct.data_type = type_id
eb_struct.scale = extra_dimension.scales
eb_struct.offset = extra_dimension.offsets
eb_vlr.extra_bytes_structs.append(eb_struct)
self._vlrs.append(eb_vlr)
# To keep some kind of backward compatibility
@property
def major_version(self) -> int:
return self.version.major
@property
def minor_version(self) -> int:
return self.version.minor
def __getattr__(self, item):
try:
return getattr(self, self._OLD_LASPY_NAMES[item])
except KeyError:
raise AttributeError(f"No attribute {item} in LasHeader") from None
def __setattr__(self, key, value):
try:
return setattr(self, self._OLD_LASPY_NAMES[key], value)
except KeyError:
super().__setattr__(key, value)
def __repr__(self) -> str:
return f"<LasHeader({self.version.major}.{self.version.minor}, {self.point_format})>"
| (*, version: Union[laspy.header.Version, str, NoneType] = None, point_format: Union[laspy.point.format.PointFormat, int, NoneType] = None) -> None |
31,536 | laspy.header | __getattr__ | null | def __getattr__(self, item):
try:
return getattr(self, self._OLD_LASPY_NAMES[item])
except KeyError:
raise AttributeError(f"No attribute {item} in LasHeader") from None
| (self, item) |
31,537 | laspy.header | __init__ | null | def __init__(
self,
*,
version: Optional[Union[Version, str]] = None,
point_format: Optional[Union[PointFormat, int]] = None,
) -> None:
if isinstance(point_format, int):
point_format = PointFormat(point_format)
if isinstance(version, str):
version = Version.from_str(version)
if version is None and point_format is None:
version = LasHeader.DEFAULT_VERSION
point_format = LasHeader.DEFAULT_POINT_FORMAT
elif version is not None and point_format is None:
point_format = PointFormat(dims.min_point_format_for_version(str(version)))
elif version is None and point_format is not None:
version = Version.from_str(
dims.preferred_file_version_for_point_format(point_format.id)
)
dims.raise_if_version_not_compatible_with_fmt(point_format.id, str(version))
#: File source id
self.file_source_id: int = 0
self.global_encoding: GlobalEncoding = GlobalEncoding()
#: Project ID
#: Initialized to null UUID
self.uuid: UUID = UUID(bytes_le=b"\0" * 16)
self._version: Version = version
#: System identifier
#: Initialized to 'OTHER'
self.system_identifier: Union[str, bytes] = "OTHER"
#: The software which generated the file
#: Initialized to 'laspy'
self.generating_software: Union[str, bytes] = DEFAULT_GENERATING_SOFTWARE
self._point_format: PointFormat = point_format
#: Day the file was created, initialized to date.today
self.creation_date: Optional[date] = date.today()
#: The number of points in the file
self.point_count: int = 0
#: The numbers used to scale the x,y,z coordinates
self.scales: np.ndarray = np.array([0.01, 0.01, 0.01], dtype=np.float64)
#: The numbers used to offset the x,y,z coordinates
self.offsets: np.ndarray = np.zeros(3, dtype=np.float64)
# The max values for x,y,z
self.maxs: np.ndarray = np.zeros(3, dtype=np.float64)
# The min values for x,y,z
self.mins: np.ndarray = np.zeros(3, dtype=np.float64)
#: Number of points by return
#: for las <= 1.2 only the first 5 elements matters
self.number_of_points_by_return: np.ndarray = np.zeros(15, dtype=np.uint32)
#: The VLRS
self._vlrs: VLRList = VLRList()
#: Extra bytes between end of header and first vlrs
self.extra_header_bytes: bytes = b""
#: Extra bytes between end of vlr end first point
self.extra_vlr_bytes: bytes = b""
#: Las >= 1.3
self.start_of_waveform_data_packet_record: int = 0
#: Las >= 1.4
#: Offset to the first evlr in the file
self.start_of_first_evlr: int = 0
#: The number of evlrs in the file
self.number_of_evlrs: int = 0
#: EVLRs, even though they are not stored in the 'header'
#: part of the file we keep them in this class
#: as they contain same information as vlr.
#: None when the file does not support EVLR
self.evlrs: Optional[VLRList] = None
# Info we keep because it's useful for us but not the user
self.offset_to_point_data: int = 0
self.are_points_compressed: bool = False
self._sync_extra_bytes_vlr()
| (self, *, version: Union[laspy.header.Version, str, NoneType] = None, point_format: Union[laspy.point.format.PointFormat, int, NoneType] = None) -> NoneType |
31,538 | laspy.header | __repr__ | null | def __repr__(self) -> str:
return f"<LasHeader({self.version.major}.{self.version.minor}, {self.point_format})>"
| (self) -> str |
31,539 | laspy.header | __setattr__ | null | def __setattr__(self, key, value):
try:
return setattr(self, self._OLD_LASPY_NAMES[key], value)
except KeyError:
super().__setattr__(key, value)
| (self, key, value) |
31,540 | laspy.header | _prefetch_header_data |
reads (and returns) from the source all the bytes that
are between the beginning of the file and the start of point data
(which corresponds to Header + VLRS).
It is done in two calls to the source's `read` method
This is done because `LasHeader.read_from`
does a bunch of read to the source, so we prefer to
prefetch data in memory in case the original source
is not buffered (like a http source could be)
| @staticmethod
def _prefetch_header_data(source) -> bytes:
"""
reads (and returns) from the source all the bytes that
are between the beginning of the file and the start of point data
(which corresponds to Header + VLRS).
It is done in two calls to the source's `read` method
This is done because `LasHeader.read_from`
does a bunch of read to the source, so we prefer to
prefetch data in memory in case the original source
is not buffered (like a http source could be)
"""
header_bytes = source.read(LAS_HEADERS_SIZE["1.1"])
file_sig = header_bytes[: len(LAS_FILE_SIGNATURE)]
if not file_sig:
raise LaspyException(f"Source is empty")
if file_sig != LAS_FILE_SIGNATURE:
raise LaspyException(f'Invalid file signature "{file_sig}"')
if len(header_bytes) < LAS_HEADERS_SIZE["1.1"]:
raise LaspyException("File is to small to be a valid LAS")
offset_to_data = int.from_bytes(
header_bytes[96 : 96 + 4], byteorder="little", signed=False
)
rest = source.read(offset_to_data - len(header_bytes))
return header_bytes + rest
| (source) -> bytes |
31,541 | laspy.header | _sync_extra_bytes_vlr | null | def _sync_extra_bytes_vlr(self) -> None:
try:
self._vlrs.extract("ExtraBytesVlr")
except IndexError:
pass
extra_dimensions = list(self.point_format.extra_dimensions)
if not extra_dimensions:
return
eb_vlr = ExtraBytesVlr()
for extra_dimension in extra_dimensions:
dtype = extra_dimension.dtype
assert dtype is not None
eb_struct = ExtraBytesStruct(
name=extra_dimension.name.encode(),
description=extra_dimension.description.encode(),
)
if extra_dimension.num_elements > 3 and dtype.base == np.uint8:
type_id = 0
eb_struct.options = extra_dimension.num_elements
else:
type_id = extradims.get_id_for_extra_dim_type(dtype)
eb_struct.data_type = type_id
eb_struct.scale = extra_dimension.scales
eb_struct.offset = extra_dimension.offsets
eb_vlr.extra_bytes_structs.append(eb_struct)
self._vlrs.append(eb_vlr)
| (self) -> NoneType |
31,542 | laspy.header | add_crs | Add a Coordinate Reference System VLR from a pyproj CRS object.
The type of VLR created depends on the las version and point format
version. Las version >= 1.4 use WKT string, las version < 1.4 and point
format < 6 use GeoTiff tags.
.. warning::
This requires `pyproj`.
.. warning::
Not all CRS are supported when adding GeoTiff tags.
For example, custom CRS.
Typically, if the CRS has an EPSG code it will be supported.
| def add_crs(self, crs: "pyproj.CRS", keep_compatibility: bool = True) -> None:
"""Add a Coordinate Reference System VLR from a pyproj CRS object.
The type of VLR created depends on the las version and point format
version. Las version >= 1.4 use WKT string, las version < 1.4 and point
format < 6 use GeoTiff tags.
.. warning::
This requires `pyproj`.
.. warning::
Not all CRS are supported when adding GeoTiff tags.
For example, custom CRS.
Typically, if the CRS has an EPSG code it will be supported.
"""
import pyproj
# check and remove any existing crs vlrs
for crs_vlr_name in (
"WktCoordinateSystemVlr",
"GeoKeyDirectoryVlr",
"GeoAsciiParamsVlr",
"GeoDoubleParamsVlr",
):
try:
self._vlrs.extract(crs_vlr_name)
except IndexError:
pass
new_ver = self._version >= Version(1, 4)
new_pt = self.point_format.id >= 6
if new_pt or (new_ver and not keep_compatibility):
self._vlrs.append(WktCoordinateSystemVlr(crs.to_wkt()))
self.global_encoding.wkt = True
else:
self._vlrs.extend(create_geotiff_projection_vlrs(crs))
| (self, crs: 'pyproj.CRS', keep_compatibility: bool = True) -> None |
31,543 | laspy.header | add_extra_dim | null | def add_extra_dim(self, params: ExtraBytesParams):
self.add_extra_dims([params])
| (self, params: laspy.point.format.ExtraBytesParams) |
31,544 | laspy.header | add_extra_dims | null | def add_extra_dims(self, params: List[ExtraBytesParams]) -> None:
for param in params:
self.point_format.add_extra_dimension(param)
self._sync_extra_bytes_vlr()
| (self, params: List[laspy.point.format.ExtraBytesParams]) -> NoneType |
31,545 | laspy.header | grow | null | def grow(self, points: PackedPointRecord) -> None:
self.x_max = max(
self.x_max,
(points["X"].max() * self.x_scale) + self.x_offset,
)
self.y_max = max(
self.y_max,
(points["Y"].max() * self.y_scale) + self.y_offset,
)
self.z_max = max(
self.z_max,
(points["Z"].max() * self.z_scale) + self.z_offset,
)
self.x_min = min(
self.x_min,
(points["X"].min() * self.x_scale) + self.x_offset,
)
self.y_min = min(
self.y_min,
(points["Y"].min() * self.y_scale) + self.y_offset,
)
self.z_min = min(
self.z_min,
(points["Z"].min() * self.z_scale) + self.z_offset,
)
for return_number, count in zip(
*np.unique(points.return_number, return_counts=True)
):
if return_number == 0:
continue
if return_number > len(self.number_of_points_by_return):
break # np.unique sorts unique values
self.number_of_points_by_return[return_number - 1] += count
self.point_count += len(points)
| (self, points: laspy.point.record.PackedPointRecord) -> NoneType |
31,546 | laspy.header | max_point_count | null | def max_point_count(self) -> int:
if self.version <= Version(1, 3):
return np.iinfo(np.uint32).max
else:
return np.iinfo(np.uint64).max
| (self) -> int |
31,547 | laspy.header | parse_crs |
Method to parse OGC WKT or GeoTiff VLR keys into a pyproj CRS object
Returns None if no CRS VLR is present, or if the CRS specified
in the VLRS is not understood.
Parameters
----------
prefer_wkt: Optional, default True,
If True the WKT VLR will be preferred in case
both the WKT and Geotiff VLR are present
.. warning::
This requires `pyproj`.
.. versionadded:: 2.5
The ``prefer_wkt`` parameters.
| def parse_crs(self, prefer_wkt=True) -> Optional["pyproj.CRS"]:
"""
Method to parse OGC WKT or GeoTiff VLR keys into a pyproj CRS object
Returns None if no CRS VLR is present, or if the CRS specified
in the VLRS is not understood.
Parameters
----------
prefer_wkt: Optional, default True,
If True the WKT VLR will be preferred in case
both the WKT and Geotiff VLR are present
.. warning::
This requires `pyproj`.
.. versionadded:: 2.5
The ``prefer_wkt`` parameters.
"""
geo_vlr = self._vlrs.get_by_id("LASF_Projection")
if self.evlrs is not None:
geo_vlr.extend(self.evlrs.get_by_id("LASF_Projection"))
parsed_crs = {}
for rec in geo_vlr:
if isinstance(rec, (WktCoordinateSystemVlr, GeoKeyDirectoryVlr)):
crs = rec.parse_crs()
if crs is not None:
parsed_crs[type(rec)] = crs
# Could not parse anything / there was nothing to parse
if not parsed_crs:
return None
if prefer_wkt:
preferred, other = WktCoordinateSystemVlr, GeoKeyDirectoryVlr
else:
preferred, other = GeoKeyDirectoryVlr, WktCoordinateSystemVlr
try:
return parsed_crs[preferred]
except KeyError:
return parsed_crs[other]
| (self, prefer_wkt=True) -> Optional[ForwardRef('pyproj.CRS')] |
31,548 | laspy.header | partial_reset | null | def partial_reset(self) -> None:
f64info = np.finfo(np.float64)
self.maxs = np.ones(3, dtype=np.float64) * f64info.min
self.mins = np.ones(3, dtype=np.float64) * f64info.max
self.start_of_first_evlr = 0
self.number_of_evlrs = 0
self.point_count = 0
self.number_of_points_by_return = np.zeros(15, dtype=np.uint32)
| (self) -> NoneType |
31,549 | laspy.header | read_evlrs |
Reads EVLRs from the stream and sets them in the
data property.
The evlrs are accessed from the `evlrs` property
Does nothing if either of these is true:
- The file does not support EVLRS (version < 1.4)
- The file has no EVLRS
- The stream does not support seeking
Leaves/restores the stream position to where it was before the call
| def read_evlrs(self, stream):
"""
Reads EVLRs from the stream and sets them in the
data property.
The evlrs are accessed from the `evlrs` property
Does nothing if either of these is true:
- The file does not support EVLRS (version < 1.4)
- The file has no EVLRS
- The stream does not support seeking
Leaves/restores the stream position to where it was before the call
"""
if self.version.minor >= 4:
if self.number_of_evlrs > 0 and stream.seekable():
saved_pos = stream.tell()
stream.seek(self.start_of_first_evlr, io.SEEK_SET)
self.evlrs = VLRList.read_from(
stream, self.number_of_evlrs, extended=True
)
stream.seek(saved_pos)
elif self.number_of_evlrs > 0 and not stream.seekable():
self.evlrs = None
else:
self.evlrs = VLRList()
else:
self.evlrs = None
| (self, stream) |
31,550 | laspy.header | remove_extra_dim | null | def remove_extra_dim(self, name: str) -> None:
self.remove_extra_dims([name])
| (self, name: str) -> NoneType |
31,551 | laspy.header | remove_extra_dims | null | def remove_extra_dims(self, names: Iterable[str]) -> None:
for name in names:
self.point_format.remove_extra_dimension(name)
self._sync_extra_bytes_vlr()
| (self, names: Iterable[str]) -> NoneType |
31,552 | laspy.header | set_compressed | null | def set_compressed(self, state: bool) -> None:
self.are_points_compressed = state
| (self, state: bool) -> NoneType |
31,553 | laspy.header | set_version_and_point_format | null | def set_version_and_point_format(
self, version: Version, point_format: PointFormat
) -> None:
dims.raise_if_version_not_compatible_with_fmt(point_format.id, str(version))
self._version = version
self.point_format = point_format
| (self, version: laspy.header.Version, point_format: laspy.point.format.PointFormat) -> NoneType |
31,554 | laspy.header | update | null | def update(self, points: PackedPointRecord) -> None:
self.partial_reset()
if not points:
self.maxs = [0.0, 0.0, 0.0]
self.mins = [0.0, 0.0, 0.0]
else:
self.grow(points)
| (self, points: laspy.point.record.PackedPointRecord) -> NoneType |
31,555 | laspy.header | write_to |
ensure_same_size: if true this function will raise an internal error
if the written header would change the offset to point data
it originally had (meaning the file could become broken),
Used when rewriting a header to update the file (new point count, mins, maxs, etc)
| def write_to(
self,
stream: BinaryIO,
ensure_same_size: bool = False,
encoding_errors: str = "strict",
) -> None:
"""
ensure_same_size: if true this function will raise an internal error
if the written header would change the offset to point data
it originally had (meaning the file could become broken),
Used when rewriting a header to update the file (new point count, mins, maxs, etc)
"""
if self.point_count > self.max_point_count():
raise LaspyException(
f"Version {self.version} cannot save clouds with more than"
f" {self.max_point_count()} points (current: {self.point_count})"
)
little_endian = "little"
with io.BytesIO() as tmp:
self._vlrs.write_to(tmp, encoding_errors=encoding_errors)
vlr_bytes = tmp.getvalue()
header_size = LAS_HEADERS_SIZE[str(self.version)]
header_size += len(self.extra_header_bytes)
new_offset_to_data = header_size + len(vlr_bytes) + len(self.extra_vlr_bytes)
if ensure_same_size and new_offset_to_data != self.offset_to_point_data:
raise LaspyException(
"Internal error, writing header would change original offset to data"
"and break the file"
)
self.offset_to_point_data = new_offset_to_data
stream.write(LAS_FILE_SIGNATURE)
stream.write(self.file_source_id.to_bytes(2, little_endian, signed=False))
self.global_encoding.write_to(stream)
stream.write(self.uuid.bytes_le)
stream.write(self.version.major.to_bytes(1, little_endian, signed=False))
stream.write(self.version.minor.to_bytes(1, little_endian, signed=False))
was_truncated = write_string(
stream,
self.system_identifier,
SYSTEM_IDENTIFIER_LEN,
encoding_errors=encoding_errors,
)
if was_truncated:
logger.warning(
f"system identifier does not fit into the {SYSTEM_IDENTIFIER_LEN} maximum bytes,"
f" it will be truncated"
)
was_truncated = write_string(
stream,
self.generating_software,
GENERATING_SOFTWARE_LEN,
encoding_errors=encoding_errors,
)
if was_truncated:
logger.warning(
f"generating software does not fit into the {GENERATING_SOFTWARE_LEN} maximum bytes,"
f" it will be truncated"
)
if self.creation_date is None:
self.creation_date = date.today()
stream.write(
self.creation_date.timetuple().tm_yday.to_bytes(
2, little_endian, signed=False
)
)
stream.write(self.creation_date.year.to_bytes(2, little_endian, signed=False))
stream.write(header_size.to_bytes(2, little_endian, signed=False))
stream.write(self.offset_to_point_data.to_bytes(4, little_endian, signed=False))
stream.write(len(self._vlrs).to_bytes(4, little_endian, signed=False))
point_format_id = self.point_format.id
if self.are_points_compressed:
point_format_id = uncompressed_id_to_compressed(point_format_id)
stream.write(point_format_id.to_bytes(1, little_endian, signed=False))
stream.write(self.point_format.size.to_bytes(2, little_endian, signed=False))
# Point Count
if self.version.minor >= 4:
stream.write(int(0).to_bytes(4, little_endian, signed=False))
for i in range(5):
stream.write(int(0).to_bytes(4, little_endian, signed=False))
else:
stream.write(self.point_count.to_bytes(4, little_endian, signed=False))
for i in range(5):
stream.write(
int(self.number_of_points_by_return[i]).to_bytes(
4, little_endian, signed=False
)
)
for i in range(3):
stream.write(struct.pack("<d", self.scales[i]))
for i in range(3):
stream.write(struct.pack("<d", self.offsets[i]))
for i in range(3):
stream.write(struct.pack("<d", self.maxs[i]))
stream.write(struct.pack("<d", self.mins[i]))
if self.version.minor >= 3:
stream.write(
self.start_of_waveform_data_packet_record.to_bytes(
8, little_endian, signed=False
)
)
if self.version.minor >= 4:
stream.write(
self.start_of_first_evlr.to_bytes(8, little_endian, signed=False)
)
stream.write(self.number_of_evlrs.to_bytes(4, little_endian, signed=False))
stream.write(self.point_count.to_bytes(8, little_endian, signed=False))
for i in range(15):
stream.write(
int(self.number_of_points_by_return[i]).to_bytes(
8, little_endian, signed=False
)
)
stream.write(self.extra_header_bytes)
stream.write(vlr_bytes)
stream.write(self.extra_vlr_bytes)
| (self, stream: <class 'BinaryIO'>, ensure_same_size: bool = False, encoding_errors: str = 'strict') -> NoneType |
31,556 | laspy.lasreader | LasReader | The reader class handles LAS and LAZ via one of the supported backend | class LasReader:
"""The reader class handles LAS and LAZ via one of the supported backend"""
def __init__(
self,
source: BinaryIO,
closefd: bool = True,
laz_backend: Optional[Union[LazBackend, Iterable[LazBackend]]] = None,
read_evlrs: bool = True,
decompression_selection: DecompressionSelection = DecompressionSelection.all(),
):
"""
Initialize the LasReader
Parameters
----------
source: file_object
closefd: bool, default True
laz_backend: LazBackend or list of LazBackend, optional
read_evlrs: bool, default True
only applies to __init__ phase, and for files
that support evlrs
decompression_selection: optional, DecompressionSelection
Selection of fields to decompress, only works form point format >= 6 <= 10
Ignored on other point formats
.. versionadded:: 2.4
The ``read_evlrs`` and ``decompression_selection`` parameters.
"""
self.closefd = closefd
if laz_backend is None:
laz_backend = LazBackend.detect_available()
self.laz_backend = laz_backend
self.header = LasHeader.read_from(source, read_evlrs=read_evlrs)
self.decompression_selection = decompression_selection
# The point source is lazily instanciated.
# Because some reader implementation may
# read informations that require to seek towards the end of
# the file (eg: chunk table), and we prefer to limit opening
# to reading the header
self._point_source: Optional["IPointReader"] = None
self._source = source
self.points_read = 0
@property
def evlrs(self) -> Optional[VLRList]:
return self.header.evlrs
@evlrs.setter
def evlrs(self, evlrs: VLRList) -> None:
self.header.evlrs = evlrs
@property
def point_source(self) -> "IPointReader":
if self._point_source is None:
self._point_source = self._create_point_source(self._source)
return self._point_source
def read_points(self, n: int) -> record.ScaleAwarePointRecord:
"""Read n points from the file
Will only read as many points as the header advertise.
That is, if you ask to read 50 points and there are only 45 points left
this function will only read 45 points.
If there are no points left to read, returns an empty point record.
Parameters
----------
n: The number of points to read
if n is less than 0, this function will read the remaining points
"""
points_left = self.header.point_count - self.points_read
if points_left <= 0:
return record.ScaleAwarePointRecord.empty(
self.header.point_format,
self.header.scales,
self.header.offsets,
)
if n < 0:
n = points_left
else:
n = min(n, points_left)
r = record.PackedPointRecord.from_buffer(
self.point_source.read_n_points(n), self.header.point_format
)
if len(r) < n:
logger.error(f"Could only read {len(r)} of the requested {n} points")
points = record.ScaleAwarePointRecord(
r.array, r.point_format, self.header.scales, self.header.offsets
)
self.points_read += n
return points
def read(self) -> LasData:
"""
Reads all the points that are not read and returns a LasData object
This will also read EVLRS
"""
points = self.read_points(-1)
las_data = LasData(header=self.header, points=points)
shall_read_evlr = (
self.header.version.minor >= 4
and self.header.number_of_evlrs > 0
and self.evlrs is None
)
if shall_read_evlr:
# If we have to read evlrs by now, it either means:
# - the user asked for them not to be read during the opening phase.
# - and/or the stream is not seekable, thus they could not be read during opening phase
#
if self.point_source.source.seekable():
self.read_evlrs()
else:
# In that case we are still going to
# try to read the evlrs by relying on the fact that they should generally be
# right after the last point, which is where we are now.
if self.header.are_points_compressed:
if not isinstance(self.point_source, LazrsPointReader):
raise errors.LaspyException(
"Reading EVLRs from a LAZ in a non-seekable stream "
"can only be done with lazrs backend"
)
# Few things: If the stream is non seekable, only a LazrsPointReader
# could have been created (parallel requires ability to seek)
#
# Also, to work, the next lines of code assumes that:
# 1) We actually are just after the last point
# 2) The chunk table _starts_ just after the last point
# 3) The first EVLR starts just after the chunk table
# These assumptions should be fine for most of the cases
# and non seekable sources are probably not that common
_ = self.point_source.read_chunk_table_only()
# Since the LazrsDecompressor uses a buffered reader
# the python file object's position is not at the position we
# think it is.
# So we have to read data from the decompressor's
# buffered stream.
class LocalReader:
def __init__(self, source: LazrsPointReader) -> None:
self.source = source
def read(self, n: int) -> bytes:
return self.source.read_raw_bytes(n)
self.evlrs = VLRList.read_from(
LocalReader(self.point_source),
self.header.number_of_evlrs,
extended=True,
)
else:
# For this to work, we assume that the first evlr
# start just after the last point
self.header.evlrs = VLRList.read_from(
self.point_source.source,
self.header.number_of_evlrs,
extended=True,
)
return las_data
def seek(self, pos: int, whence: int = io.SEEK_SET) -> int:
"""Seeks to the start of the point at the given pos
Parameters
----------
pos: index of the point to seek to
whence: optional, controls how the pos parameter is interpreted:
io.SEEK_SET: (default) pos is the index of the point from the beginning
io.SEEK_CUR: pos is the point_index relative to the point_index of the last point read
io.SEEK_END: pos is the point_index relative to last point
Returns
-------
The index of the point the reader seeked to, relative to the first point
"""
if whence == io.SEEK_SET:
allowed_range = range(0, self.header.point_count)
point_index = pos
elif whence == io.SEEK_CUR:
allowed_range = range(
-self.points_read, self.header.point_count - self.points_read
)
point_index = self.points_read + pos
elif whence == io.SEEK_END:
allowed_range = range(-self.header.point_count, 0)
point_index = self.header.point_count + pos
else:
raise ValueError(f"Invalid value for whence: {whence}")
if pos not in allowed_range:
whence_str = ["start", "current point", "end"]
raise IndexError(
f"When seeking from the {whence_str[whence]}, pos must be in {allowed_range}"
)
self.point_source.seek(point_index)
self.points_read = point_index
return point_index
def chunk_iterator(self, points_per_iteration: int) -> "PointChunkIterator":
"""Returns an iterator, that will read points by chunks
of the requested size
:param points_per_iteration: number of points to be read with each iteration
:return:
"""
return PointChunkIterator(self, points_per_iteration)
def read_evlrs(self):
self.header.read_evlrs(self._source)
def close(self) -> None:
"""closes the file object used by the reader"""
if self.closefd:
# We check the actual source,
# to avoid creating it, just to close it
if self._point_source is not None:
self._point_source.close()
else:
self._source.close()
def _create_laz_backend(self, source) -> IPointReader:
"""Creates the laz backend to use according to `self.laz_backend`.
If `self.laz_backend` contains mutilple backends, this functions will
try to create them in order until one of them is successfully constructed.
If none could be constructed, the error of the last backend tried wil be raised
"""
if not self.laz_backend:
raise errors.LaspyException(
"No LazBackend selected, cannot decompress data"
)
try:
backends = iter(self.laz_backend)
except TypeError:
backends = (self.laz_backend,)
last_error: Optional[Exception] = None
for backend in backends:
try:
if not backend.is_available():
raise errors.LaspyException(f"The '{backend}' is not available")
reader: IPointReader = backend.create_reader(
source,
self.header,
decompression_selection=self.decompression_selection,
)
except Exception as e:
last_error = e
logger.error(e)
else:
self.header.vlrs.pop(self.header.vlrs.index("LasZipVlr"))
return reader
raise last_error
def _create_point_source(self, source) -> IPointReader:
if self.header.point_count > 0:
if self.header.are_points_compressed:
point_source = self._create_laz_backend(source)
if point_source is None:
raise errors.LaspyException(
"Data is compressed, but no LazBacked could be initialized"
)
return point_source
else:
return UncompressedPointReader(source, self.header)
else:
return EmptyPointReader()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| (source: <class 'BinaryIO'>, closefd: bool = True, laz_backend: Union[laspy._compression.backend.LazBackend, Iterable[laspy._compression.backend.LazBackend], NoneType] = None, read_evlrs: bool = True, decompression_selection: laspy._compression.selection.DecompressionSelection = <DecompressionSelection.ALL_EXTRA_BYTES|WAVEPACKET|NIR|RGB|GPS_TIME|POINT_SOURCE_ID|USER_DATA|SCAN_ANGLE|INTENSITY|FLAGS|CLASSIFICATION|Z|XY_RETURNS_CHANNEL: 8191>) |
31,558 | laspy.lasreader | __exit__ | null | def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| (self, exc_type, exc_val, exc_tb) |
31,559 | laspy.lasreader | __init__ |
Initialize the LasReader
Parameters
----------
source: file_object
closefd: bool, default True
laz_backend: LazBackend or list of LazBackend, optional
read_evlrs: bool, default True
only applies to __init__ phase, and for files
that support evlrs
decompression_selection: optional, DecompressionSelection
Selection of fields to decompress, only works form point format >= 6 <= 10
Ignored on other point formats
.. versionadded:: 2.4
The ``read_evlrs`` and ``decompression_selection`` parameters.
| def __init__(
self,
source: BinaryIO,
closefd: bool = True,
laz_backend: Optional[Union[LazBackend, Iterable[LazBackend]]] = None,
read_evlrs: bool = True,
decompression_selection: DecompressionSelection = DecompressionSelection.all(),
):
"""
Initialize the LasReader
Parameters
----------
source: file_object
closefd: bool, default True
laz_backend: LazBackend or list of LazBackend, optional
read_evlrs: bool, default True
only applies to __init__ phase, and for files
that support evlrs
decompression_selection: optional, DecompressionSelection
Selection of fields to decompress, only works form point format >= 6 <= 10
Ignored on other point formats
.. versionadded:: 2.4
The ``read_evlrs`` and ``decompression_selection`` parameters.
"""
self.closefd = closefd
if laz_backend is None:
laz_backend = LazBackend.detect_available()
self.laz_backend = laz_backend
self.header = LasHeader.read_from(source, read_evlrs=read_evlrs)
self.decompression_selection = decompression_selection
# The point source is lazily instanciated.
# Because some reader implementation may
# read informations that require to seek towards the end of
# the file (eg: chunk table), and we prefer to limit opening
# to reading the header
self._point_source: Optional["IPointReader"] = None
self._source = source
self.points_read = 0
| (self, source: <class 'BinaryIO'>, closefd: bool = True, laz_backend: Union[laspy._compression.backend.LazBackend, Iterable[laspy._compression.backend.LazBackend], NoneType] = None, read_evlrs: bool = True, decompression_selection: laspy._compression.selection.DecompressionSelection = <DecompressionSelection.ALL_EXTRA_BYTES|WAVEPACKET|NIR|RGB|GPS_TIME|POINT_SOURCE_ID|USER_DATA|SCAN_ANGLE|INTENSITY|FLAGS|CLASSIFICATION|Z|XY_RETURNS_CHANNEL: 8191>) |
31,560 | laspy.lasreader | _create_laz_backend | Creates the laz backend to use according to `self.laz_backend`.
If `self.laz_backend` contains mutilple backends, this functions will
try to create them in order until one of them is successfully constructed.
If none could be constructed, the error of the last backend tried wil be raised
| def _create_laz_backend(self, source) -> IPointReader:
"""Creates the laz backend to use according to `self.laz_backend`.
If `self.laz_backend` contains mutilple backends, this functions will
try to create them in order until one of them is successfully constructed.
If none could be constructed, the error of the last backend tried wil be raised
"""
if not self.laz_backend:
raise errors.LaspyException(
"No LazBackend selected, cannot decompress data"
)
try:
backends = iter(self.laz_backend)
except TypeError:
backends = (self.laz_backend,)
last_error: Optional[Exception] = None
for backend in backends:
try:
if not backend.is_available():
raise errors.LaspyException(f"The '{backend}' is not available")
reader: IPointReader = backend.create_reader(
source,
self.header,
decompression_selection=self.decompression_selection,
)
except Exception as e:
last_error = e
logger.error(e)
else:
self.header.vlrs.pop(self.header.vlrs.index("LasZipVlr"))
return reader
raise last_error
| (self, source) -> laspy._pointreader.IPointReader |
31,561 | laspy.lasreader | _create_point_source | null | def _create_point_source(self, source) -> IPointReader:
if self.header.point_count > 0:
if self.header.are_points_compressed:
point_source = self._create_laz_backend(source)
if point_source is None:
raise errors.LaspyException(
"Data is compressed, but no LazBacked could be initialized"
)
return point_source
else:
return UncompressedPointReader(source, self.header)
else:
return EmptyPointReader()
| (self, source) -> laspy._pointreader.IPointReader |
31,562 | laspy.lasreader | chunk_iterator | Returns an iterator, that will read points by chunks
of the requested size
:param points_per_iteration: number of points to be read with each iteration
:return:
| def chunk_iterator(self, points_per_iteration: int) -> "PointChunkIterator":
"""Returns an iterator, that will read points by chunks
of the requested size
:param points_per_iteration: number of points to be read with each iteration
:return:
"""
return PointChunkIterator(self, points_per_iteration)
| (self, points_per_iteration: int) -> laspy.lasreader.PointChunkIterator |
31,563 | laspy.lasreader | close | closes the file object used by the reader | def close(self) -> None:
"""closes the file object used by the reader"""
if self.closefd:
# We check the actual source,
# to avoid creating it, just to close it
if self._point_source is not None:
self._point_source.close()
else:
self._source.close()
| (self) -> NoneType |
31,564 | laspy.lasreader | read |
Reads all the points that are not read and returns a LasData object
This will also read EVLRS
| def read(self) -> LasData:
"""
Reads all the points that are not read and returns a LasData object
This will also read EVLRS
"""
points = self.read_points(-1)
las_data = LasData(header=self.header, points=points)
shall_read_evlr = (
self.header.version.minor >= 4
and self.header.number_of_evlrs > 0
and self.evlrs is None
)
if shall_read_evlr:
# If we have to read evlrs by now, it either means:
# - the user asked for them not to be read during the opening phase.
# - and/or the stream is not seekable, thus they could not be read during opening phase
#
if self.point_source.source.seekable():
self.read_evlrs()
else:
# In that case we are still going to
# try to read the evlrs by relying on the fact that they should generally be
# right after the last point, which is where we are now.
if self.header.are_points_compressed:
if not isinstance(self.point_source, LazrsPointReader):
raise errors.LaspyException(
"Reading EVLRs from a LAZ in a non-seekable stream "
"can only be done with lazrs backend"
)
# Few things: If the stream is non seekable, only a LazrsPointReader
# could have been created (parallel requires ability to seek)
#
# Also, to work, the next lines of code assumes that:
# 1) We actually are just after the last point
# 2) The chunk table _starts_ just after the last point
# 3) The first EVLR starts just after the chunk table
# These assumptions should be fine for most of the cases
# and non seekable sources are probably not that common
_ = self.point_source.read_chunk_table_only()
# Since the LazrsDecompressor uses a buffered reader
# the python file object's position is not at the position we
# think it is.
# So we have to read data from the decompressor's
# buffered stream.
class LocalReader:
def __init__(self, source: LazrsPointReader) -> None:
self.source = source
def read(self, n: int) -> bytes:
return self.source.read_raw_bytes(n)
self.evlrs = VLRList.read_from(
LocalReader(self.point_source),
self.header.number_of_evlrs,
extended=True,
)
else:
# For this to work, we assume that the first evlr
# start just after the last point
self.header.evlrs = VLRList.read_from(
self.point_source.source,
self.header.number_of_evlrs,
extended=True,
)
return las_data
| (self) -> laspy.lasdata.LasData |
31,565 | laspy.lasreader | read_evlrs | null | def read_evlrs(self):
self.header.read_evlrs(self._source)
| (self) |
31,566 | laspy.lasreader | read_points | Read n points from the file
Will only read as many points as the header advertise.
That is, if you ask to read 50 points and there are only 45 points left
this function will only read 45 points.
If there are no points left to read, returns an empty point record.
Parameters
----------
n: The number of points to read
if n is less than 0, this function will read the remaining points
| def read_points(self, n: int) -> record.ScaleAwarePointRecord:
"""Read n points from the file
Will only read as many points as the header advertise.
That is, if you ask to read 50 points and there are only 45 points left
this function will only read 45 points.
If there are no points left to read, returns an empty point record.
Parameters
----------
n: The number of points to read
if n is less than 0, this function will read the remaining points
"""
points_left = self.header.point_count - self.points_read
if points_left <= 0:
return record.ScaleAwarePointRecord.empty(
self.header.point_format,
self.header.scales,
self.header.offsets,
)
if n < 0:
n = points_left
else:
n = min(n, points_left)
r = record.PackedPointRecord.from_buffer(
self.point_source.read_n_points(n), self.header.point_format
)
if len(r) < n:
logger.error(f"Could only read {len(r)} of the requested {n} points")
points = record.ScaleAwarePointRecord(
r.array, r.point_format, self.header.scales, self.header.offsets
)
self.points_read += n
return points
| (self, n: int) -> laspy.point.record.ScaleAwarePointRecord |
31,567 | laspy.lasreader | seek | Seeks to the start of the point at the given pos
Parameters
----------
pos: index of the point to seek to
whence: optional, controls how the pos parameter is interpreted:
io.SEEK_SET: (default) pos is the index of the point from the beginning
io.SEEK_CUR: pos is the point_index relative to the point_index of the last point read
io.SEEK_END: pos is the point_index relative to last point
Returns
-------
The index of the point the reader seeked to, relative to the first point
| def seek(self, pos: int, whence: int = io.SEEK_SET) -> int:
"""Seeks to the start of the point at the given pos
Parameters
----------
pos: index of the point to seek to
whence: optional, controls how the pos parameter is interpreted:
io.SEEK_SET: (default) pos is the index of the point from the beginning
io.SEEK_CUR: pos is the point_index relative to the point_index of the last point read
io.SEEK_END: pos is the point_index relative to last point
Returns
-------
The index of the point the reader seeked to, relative to the first point
"""
if whence == io.SEEK_SET:
allowed_range = range(0, self.header.point_count)
point_index = pos
elif whence == io.SEEK_CUR:
allowed_range = range(
-self.points_read, self.header.point_count - self.points_read
)
point_index = self.points_read + pos
elif whence == io.SEEK_END:
allowed_range = range(-self.header.point_count, 0)
point_index = self.header.point_count + pos
else:
raise ValueError(f"Invalid value for whence: {whence}")
if pos not in allowed_range:
whence_str = ["start", "current point", "end"]
raise IndexError(
f"When seeking from the {whence_str[whence]}, pos must be in {allowed_range}"
)
self.point_source.seek(point_index)
self.points_read = point_index
return point_index
| (self, pos: int, whence: int = 0) -> int |
31,568 | laspy.laswriter | LasWriter |
Allows to write a complete LAS/LAZ file to the destination.
| class LasWriter:
"""
Allows to write a complete LAS/LAZ file to the destination.
"""
def __init__(
self,
dest: BinaryIO,
header: LasHeader,
do_compress: Optional[bool] = None,
laz_backend: Optional[Union[LazBackend, Iterable[LazBackend]]] = None,
closefd: bool = True,
encoding_errors: str = "strict",
) -> None:
"""
Parameters
----------
dest: file_object
file object where the LAS/LAZ will be written
header: LasHeader
The header of the file to be written
do_compress: bool, optional
Whether the file data should be written as LAS (uncompressed)
or LAZ (compressed).
If None, the file won't be compressed, unless a laz_backend is provided
laz_backend: LazBackend or list of LazBackend, optional
The LazBackend to use (or if it is a sequence the LazBackend to try)
for the compression
closefd: bool, default True
should the `dest` be closed when the writer is closed
encoding_errors: str, default 'strict'
How encoding errors should be treated.
Possible values and their explanation can be seen here:
https://docs.python.org/3/library/codecs.html#error-handlers.
"""
self.closefd = closefd
self.encoding_errors = encoding_errors
self.header = deepcopy(header)
# The point writer will take take of creating and writing
# the correct laszip vlr, however we have to make sure
# no prior laszip vlr exists
try:
self.header.vlrs.pop(header.vlrs.index("LasZipVlr"))
except ValueError:
pass
self.header.partial_reset()
self.dest = dest
self.done = False
dims.raise_if_version_not_compatible_with_fmt(
header.point_format.id, str(self.header.version)
)
if laz_backend is not None:
if do_compress is None:
do_compress = True
self.laz_backend = laz_backend
else:
if do_compress is None:
do_compress = False
self.laz_backend = LazBackend.detect_available()
self.header.are_points_compressed = do_compress
if do_compress:
self.point_writer: IPointWriter = self._create_laz_backend(self.laz_backend)
else:
self.point_writer: IPointWriter = UncompressedPointWriter(self.dest)
self.point_writer.write_initial_header_and_vlrs(
self.header, self.encoding_errors
)
def write_points(self, points: PackedPointRecord) -> None:
"""
.. note ::
If you are writing points coming from multiple different input files
into one output file, you have to make sure the point record
you write all use the same scales and offset of the writer.
You can use :meth:`.LasData.change_scaling` or :meth:`.ScaleAwarePointRecord.change_scaling`
to do that.
Parameters
----------
points: PackedPointRecord or ScaleAwarePointRecord
The points to be written
Raises
------
LaspyException
If the point format of the points does not match
the point format of the writer.
"""
if not points:
return
if self.done:
raise LaspyException("Cannot write points anymore")
if points.point_format != self.header.point_format:
raise LaspyException("Incompatible point formats")
if self.header.max_point_count() - self.header.point_count < len(points):
raise LaspyException(
"Cannot write {} points as it would exceed the maximum number of points the file"
"can store. Current point count: {}, max point count: {}".format(
len(points), self.header.point_count, self.header.max_point_count()
)
)
self.header.grow(points)
self.point_writer.write_points(points)
def write_evlrs(self, evlrs: VLRList) -> None:
"""Writes the EVLRs to the file
Parameters
----------
evlrs: VLRList
The EVLRs to be written
Raises
------
LaspyException
If the file's version is not >= 1.4
"""
if self.header.version.minor < 4:
raise LaspyException(
"EVLRs are not supported on files with version less than 1.4"
)
if len(evlrs) > 0:
self.point_writer.done()
self.done = True
self.header.number_of_evlrs = len(evlrs)
self.header.start_of_first_evlr = self.dest.tell()
evlrs.write_to(self.dest, as_extended=True)
def close(self) -> None:
"""Closes the writer.
flushes the points, updates the header, making it impossible
to write points afterwards.
"""
if self.point_writer is not None:
if not self.done:
self.point_writer.done()
if self.header.point_count == 0:
self.header.maxs = [0.0, 0.0, 0.0]
self.header.mins = [0.0, 0.0, 0.0]
self.point_writer.write_updated_header(self.header, self.encoding_errors)
if self.closefd:
self.dest.close()
self.done = True
def _create_laz_backend(
self, laz_backends: Union[LazBackend, Iterable[LazBackend]]
) -> "IPointWriter":
try:
laz_backends = iter(laz_backends)
except TypeError:
laz_backends = (laz_backends,)
last_error: Optional[Exception] = None
for backend in laz_backends:
try:
if not backend.is_available():
raise LaspyException(f"The '{backend}' is not available")
return backend.create_writer(self.dest, self.header)
except Exception as e:
logger.error(e)
last_error = e
if last_error is not None:
raise LaspyException(f"No LazBackend could be initialized: {last_error}")
else:
raise LaspyException("No LazBackend selected, cannot compress")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| (dest: <class 'BinaryIO'>, header: laspy.header.LasHeader, do_compress: Optional[bool] = None, laz_backend: Union[laspy._compression.backend.LazBackend, Iterable[laspy._compression.backend.LazBackend], NoneType] = None, closefd: bool = True, encoding_errors: str = 'strict') -> None |
31,571 | laspy.laswriter | __init__ |
Parameters
----------
dest: file_object
file object where the LAS/LAZ will be written
header: LasHeader
The header of the file to be written
do_compress: bool, optional
Whether the file data should be written as LAS (uncompressed)
or LAZ (compressed).
If None, the file won't be compressed, unless a laz_backend is provided
laz_backend: LazBackend or list of LazBackend, optional
The LazBackend to use (or if it is a sequence the LazBackend to try)
for the compression
closefd: bool, default True
should the `dest` be closed when the writer is closed
encoding_errors: str, default 'strict'
How encoding errors should be treated.
Possible values and their explanation can be seen here:
https://docs.python.org/3/library/codecs.html#error-handlers.
| def __init__(
self,
dest: BinaryIO,
header: LasHeader,
do_compress: Optional[bool] = None,
laz_backend: Optional[Union[LazBackend, Iterable[LazBackend]]] = None,
closefd: bool = True,
encoding_errors: str = "strict",
) -> None:
"""
Parameters
----------
dest: file_object
file object where the LAS/LAZ will be written
header: LasHeader
The header of the file to be written
do_compress: bool, optional
Whether the file data should be written as LAS (uncompressed)
or LAZ (compressed).
If None, the file won't be compressed, unless a laz_backend is provided
laz_backend: LazBackend or list of LazBackend, optional
The LazBackend to use (or if it is a sequence the LazBackend to try)
for the compression
closefd: bool, default True
should the `dest` be closed when the writer is closed
encoding_errors: str, default 'strict'
How encoding errors should be treated.
Possible values and their explanation can be seen here:
https://docs.python.org/3/library/codecs.html#error-handlers.
"""
self.closefd = closefd
self.encoding_errors = encoding_errors
self.header = deepcopy(header)
# The point writer will take take of creating and writing
# the correct laszip vlr, however we have to make sure
# no prior laszip vlr exists
try:
self.header.vlrs.pop(header.vlrs.index("LasZipVlr"))
except ValueError:
pass
self.header.partial_reset()
self.dest = dest
self.done = False
dims.raise_if_version_not_compatible_with_fmt(
header.point_format.id, str(self.header.version)
)
if laz_backend is not None:
if do_compress is None:
do_compress = True
self.laz_backend = laz_backend
else:
if do_compress is None:
do_compress = False
self.laz_backend = LazBackend.detect_available()
self.header.are_points_compressed = do_compress
if do_compress:
self.point_writer: IPointWriter = self._create_laz_backend(self.laz_backend)
else:
self.point_writer: IPointWriter = UncompressedPointWriter(self.dest)
self.point_writer.write_initial_header_and_vlrs(
self.header, self.encoding_errors
)
| (self, dest: <class 'BinaryIO'>, header: laspy.header.LasHeader, do_compress: Optional[bool] = None, laz_backend: Union[laspy._compression.backend.LazBackend, Iterable[laspy._compression.backend.LazBackend], NoneType] = None, closefd: bool = True, encoding_errors: str = 'strict') -> NoneType |
31,572 | laspy.laswriter | _create_laz_backend | null | def _create_laz_backend(
self, laz_backends: Union[LazBackend, Iterable[LazBackend]]
) -> "IPointWriter":
try:
laz_backends = iter(laz_backends)
except TypeError:
laz_backends = (laz_backends,)
last_error: Optional[Exception] = None
for backend in laz_backends:
try:
if not backend.is_available():
raise LaspyException(f"The '{backend}' is not available")
return backend.create_writer(self.dest, self.header)
except Exception as e:
logger.error(e)
last_error = e
if last_error is not None:
raise LaspyException(f"No LazBackend could be initialized: {last_error}")
else:
raise LaspyException("No LazBackend selected, cannot compress")
| (self, laz_backends: Union[laspy._compression.backend.LazBackend, Iterable[laspy._compression.backend.LazBackend]]) -> laspy._pointwriter.IPointWriter |
31,573 | laspy.laswriter | close | Closes the writer.
flushes the points, updates the header, making it impossible
to write points afterwards.
| def close(self) -> None:
"""Closes the writer.
flushes the points, updates the header, making it impossible
to write points afterwards.
"""
if self.point_writer is not None:
if not self.done:
self.point_writer.done()
if self.header.point_count == 0:
self.header.maxs = [0.0, 0.0, 0.0]
self.header.mins = [0.0, 0.0, 0.0]
self.point_writer.write_updated_header(self.header, self.encoding_errors)
if self.closefd:
self.dest.close()
self.done = True
| (self) -> NoneType |
31,574 | laspy.laswriter | write_evlrs | Writes the EVLRs to the file
Parameters
----------
evlrs: VLRList
The EVLRs to be written
Raises
------
LaspyException
If the file's version is not >= 1.4
| def write_evlrs(self, evlrs: VLRList) -> None:
"""Writes the EVLRs to the file
Parameters
----------
evlrs: VLRList
The EVLRs to be written
Raises
------
LaspyException
If the file's version is not >= 1.4
"""
if self.header.version.minor < 4:
raise LaspyException(
"EVLRs are not supported on files with version less than 1.4"
)
if len(evlrs) > 0:
self.point_writer.done()
self.done = True
self.header.number_of_evlrs = len(evlrs)
self.header.start_of_first_evlr = self.dest.tell()
evlrs.write_to(self.dest, as_extended=True)
| (self, evlrs: laspy.vlrs.vlrlist.VLRList) -> NoneType |
31,575 | laspy.laswriter | write_points |
.. note ::
If you are writing points coming from multiple different input files
into one output file, you have to make sure the point record
you write all use the same scales and offset of the writer.
You can use :meth:`.LasData.change_scaling` or :meth:`.ScaleAwarePointRecord.change_scaling`
to do that.
Parameters
----------
points: PackedPointRecord or ScaleAwarePointRecord
The points to be written
Raises
------
LaspyException
If the point format of the points does not match
the point format of the writer.
| def write_points(self, points: PackedPointRecord) -> None:
"""
.. note ::
If you are writing points coming from multiple different input files
into one output file, you have to make sure the point record
you write all use the same scales and offset of the writer.
You can use :meth:`.LasData.change_scaling` or :meth:`.ScaleAwarePointRecord.change_scaling`
to do that.
Parameters
----------
points: PackedPointRecord or ScaleAwarePointRecord
The points to be written
Raises
------
LaspyException
If the point format of the points does not match
the point format of the writer.
"""
if not points:
return
if self.done:
raise LaspyException("Cannot write points anymore")
if points.point_format != self.header.point_format:
raise LaspyException("Incompatible point formats")
if self.header.max_point_count() - self.header.point_count < len(points):
raise LaspyException(
"Cannot write {} points as it would exceed the maximum number of points the file"
"can store. Current point count: {}, max point count: {}".format(
len(points), self.header.point_count, self.header.max_point_count()
)
)
self.header.grow(points)
self.point_writer.write_points(points)
| (self, points: laspy.point.record.PackedPointRecord) -> NoneType |
31,576 | laspy.errors | LaspyException | null | class LaspyException(Exception):
pass
| null |
31,577 | laspy._compression.backend | LazBackend | Supported backends for reading and writing LAS/LAZ | class LazBackend(ILazBackend, enum.Enum, metaclass=ABCEnumMeta):
"""Supported backends for reading and writing LAS/LAZ"""
# type_hint = Union[LazBackend, Iterable[LazBackend]]
LazrsParallel = 0
"""lazrs in multi-thread mode"""
Lazrs = 1
"""lazrs in single-thread mode"""
Laszip = 2
"""laszip backend"""
def _get(self) -> ILazBackend:
return _DEFAULT_BACKENDS[self.value]
def is_available(self) -> bool:
"""Returns true if the backend is available"""
for laz_backend in self.__class__:
laz_backend: LazBackend
if laz_backend == self:
return self._get().is_available()
return False
@property
def supports_append(self) -> bool:
return self._get().supports_append
def create_appender(self, dest: BinaryIO, header: "LasHeader") -> IPointAppender:
return self._get().create_appender(dest, header)
def create_reader(
self,
source: Any,
header: "LasHeader",
decompression_selection: Optional[DecompressionSelection] = None,
) -> IPointReader:
return self._get().create_reader(
source, header, decompression_selection=decompression_selection
)
def create_writer(
self,
dest: Any,
header: "LasHeader",
) -> IPointWriter:
return self._get().create_writer(dest, header)
@classmethod
def detect_available(cls) -> Tuple["LazBackend", ...]:
"""Returns a tuple containing the available backends in the current
python environment
"""
return tuple(
laz_backend
for backend, laz_backend in zip(_DEFAULT_BACKENDS, cls)
if backend.is_available()
)
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
31,578 | laspy.point.record | PackedPointRecord |
In the PackedPointRecord, fields that are a combinations of many sub-fields (fields stored on less than a byte)
are still packed together and are only de-packed and re-packed when accessed.
This uses of less memory than if the sub-fields were unpacked
>>> #return number is a sub-field
>>> from laspy import PointFormat, PackedPointRecord
>>> packed_point_record = PackedPointRecord.zeros(10,PointFormat(0))
>>> return_number = packed_point_record['return_number']
>>> return_number
<SubFieldView([0 0 0 0 0 0 0 0 0 0])>
>>> return_number[:] = 1
>>> np.alltrue(packed_point_record['return_number'] == 1)
True
| class PackedPointRecord:
"""
In the PackedPointRecord, fields that are a combinations of many sub-fields (fields stored on less than a byte)
are still packed together and are only de-packed and re-packed when accessed.
This uses of less memory than if the sub-fields were unpacked
>>> #return number is a sub-field
>>> from laspy import PointFormat, PackedPointRecord
>>> packed_point_record = PackedPointRecord.zeros(10,PointFormat(0))
>>> return_number = packed_point_record['return_number']
>>> return_number
<SubFieldView([0 0 0 0 0 0 0 0 0 0])>
>>> return_number[:] = 1
>>> np.alltrue(packed_point_record['return_number'] == 1)
True
"""
def __init__(self, data: np.ndarray, point_format: PointFormat):
self.__dict__["array"] = data
self.__dict__["point_format"] = point_format
self.__dict__["sub_fields_dict"] = dims.get_sub_fields_dict(point_format.id)
@property
def point_size(self):
"""Returns the point size in bytes taken by each points of the record
Returns
-------
int
The point size in byte
"""
return self.array.dtype.itemsize
@staticmethod
def zeros(point_count, point_format):
"""Creates a new point record with all dimensions initialized to zero
Parameters
----------
point_format: PointFormat
The point format id the point record should have
point_count : int
The number of point the point record should have
Returns
-------
PackedPointRecord
"""
data = np.zeros(point_count, point_format.dtype())
return PackedPointRecord(data, point_format)
@staticmethod
def empty(point_format):
"""Creates an empty point record.
Parameters
----------
point_format: laspy.PointFormat
The point format id the point record should have
Returns
-------
PackedPointRecord
"""
return PackedPointRecord.zeros(point_count=0, point_format=point_format)
@classmethod
def from_point_record(
cls, other_point_record: "PackedPointRecord", new_point_format: PointFormat
) -> "PackedPointRecord":
"""Construct a new PackedPointRecord from an existing one with the ability to change
to point format while doing so
"""
array = np.zeros_like(other_point_record.array, dtype=new_point_format.dtype())
new_record = cls(array, new_point_format)
new_record.copy_fields_from(other_point_record)
return new_record
@classmethod
def from_buffer(cls, buffer, point_format, count=-1, offset=0):
points_dtype = point_format.dtype()
data = np.frombuffer(buffer, dtype=points_dtype, offset=offset, count=count)
return cls(data, point_format)
def copy_fields_from(self, other_record: "PackedPointRecord") -> None:
"""Tries to copy the values of the current dimensions from other_record"""
for dim_name in self.point_format.dimension_names:
try:
self[dim_name] = np.array(other_record[dim_name])
except ValueError:
pass
def copy(self) -> "PackedPointRecord":
return PackedPointRecord(self.array.copy(), deepcopy(self.point_format))
def memoryview(self) -> memoryview:
return memoryview(self.array)
def resize(self, new_size: int) -> None:
size_diff = new_size - len(self.array)
if size_diff > 0:
self.array = np.append(
self.array, np.zeros(size_diff, dtype=self.array.dtype)
)
elif size_diff < 0:
self.array = self.array[:new_size].copy()
def _append_zeros_if_too_small(self, value):
"""Appends zeros to the points stored if the value we are trying to
fit is bigger
"""
if len(value) > len(self.array):
self.resize(len(value))
def __eq__(self, other):
return self.point_format == other.point_format and np.all(
self.array == other.array
)
def __len__(self):
if self.array.ndim == 0:
return 1
return self.array.shape[0]
def __getitem__(self, item):
"""Gives access to the underlying numpy array
Unpack the dimension if item is the name a sub-field
"""
if isinstance(item, (int, slice, np.ndarray, list, tuple)):
return PackedPointRecord(self.array[item], self.point_format)
try:
item = OLD_LASPY_NAMES[item]
except KeyError:
pass
# 1) Is it a sub field ?
try:
composed_dim, sub_field = self.sub_fields_dict[item]
return dims.SubFieldView(self.array[composed_dim], sub_field.mask)
except KeyError:
pass
# 2) Is it a Scaled Extra Byte Dimension ?
try:
dim_info = self.point_format.dimension_by_name(item)
if dim_info.is_standard is False and dim_info.is_scaled:
assert dim_info.scales is not None and dim_info.offsets is not None
return ScaledArrayView(
self.array[item], dim_info.scales, dim_info.offsets
)
except ValueError:
pass
return self.array[item]
def __setitem__(self, key, value):
"""Sets elements in the array"""
if isinstance(key, (tuple, list)):
if not isinstance(value, np.ndarray):
value = np.asarray(value)
if value.dtype.isbuiltin == 0:
# value is most likely a structured array (dtype = [('name1', 'type1'), ...])
# https://numpy.org/devdocs/reference/generated/numpy.dtype.isbuiltin.html
for name, v_name in zip(key, value.dtype.names):
self[name] = value[v_name]
else:
if len(key) == 1 and value.ndim == 1:
value = value[..., np.newaxis]
for i, name in enumerate(key):
self[name] = value[..., i]
return
self._append_zeros_if_too_small(value)
if isinstance(key, str):
self[key][:] = value
else:
self.array[key] = value
def __getattr__(self, item):
try:
return self[item]
except ValueError:
raise AttributeError("{} is not a valid dimension".format(item)) from None
def validate_dimension_name(self, key: str) -> DimensionNameValidity:
"""Given a name of a dimension this validates it."""
try:
key = OLD_LASPY_NAMES[key]
except KeyError:
pass
if key in self.point_format.dimension_names or key in self.array.dtype.names:
return DimensionNameValidity.Valid
elif key in dims.DIMENSIONS_TO_TYPE:
return DimensionNameValidity.Unsupported
else:
return DimensionNameValidity.Invalid
def __setattr__(self, key, value):
name_validity = self.validate_dimension_name(key)
if name_validity == DimensionNameValidity.Valid:
self[key] = value
elif name_validity == DimensionNameValidity.Unsupported:
raise ValueError(
f"Point format {self.point_format} does not support {key} dimension"
)
else:
super().__setattr__(key, value)
def __repr__(self):
return "<{}(fmt: {}, len: {}, point size: {})>".format(
self.__class__.__name__,
self.point_format,
len(self),
self.point_format.size,
)
| (data: numpy.ndarray, point_format: laspy.point.format.PointFormat) |
31,579 | laspy.point.record | __eq__ | null | def __eq__(self, other):
return self.point_format == other.point_format and np.all(
self.array == other.array
)
| (self, other) |
31,580 | laspy.point.record | __getattr__ | null | def __getattr__(self, item):
try:
return self[item]
except ValueError:
raise AttributeError("{} is not a valid dimension".format(item)) from None
| (self, item) |
31,581 | laspy.point.record | __getitem__ | Gives access to the underlying numpy array
Unpack the dimension if item is the name a sub-field
| def __getitem__(self, item):
"""Gives access to the underlying numpy array
Unpack the dimension if item is the name a sub-field
"""
if isinstance(item, (int, slice, np.ndarray, list, tuple)):
return PackedPointRecord(self.array[item], self.point_format)
try:
item = OLD_LASPY_NAMES[item]
except KeyError:
pass
# 1) Is it a sub field ?
try:
composed_dim, sub_field = self.sub_fields_dict[item]
return dims.SubFieldView(self.array[composed_dim], sub_field.mask)
except KeyError:
pass
# 2) Is it a Scaled Extra Byte Dimension ?
try:
dim_info = self.point_format.dimension_by_name(item)
if dim_info.is_standard is False and dim_info.is_scaled:
assert dim_info.scales is not None and dim_info.offsets is not None
return ScaledArrayView(
self.array[item], dim_info.scales, dim_info.offsets
)
except ValueError:
pass
return self.array[item]
| (self, item) |
31,582 | laspy.point.record | __init__ | null | def __init__(self, data: np.ndarray, point_format: PointFormat):
self.__dict__["array"] = data
self.__dict__["point_format"] = point_format
self.__dict__["sub_fields_dict"] = dims.get_sub_fields_dict(point_format.id)
| (self, data: numpy.ndarray, point_format: laspy.point.format.PointFormat) |
31,583 | laspy.point.record | __len__ | null | def __len__(self):
if self.array.ndim == 0:
return 1
return self.array.shape[0]
| (self) |
31,584 | laspy.point.record | __repr__ | null | def __repr__(self):
return "<{}(fmt: {}, len: {}, point size: {})>".format(
self.__class__.__name__,
self.point_format,
len(self),
self.point_format.size,
)
| (self) |
31,585 | laspy.point.record | __setattr__ | null | def __setattr__(self, key, value):
name_validity = self.validate_dimension_name(key)
if name_validity == DimensionNameValidity.Valid:
self[key] = value
elif name_validity == DimensionNameValidity.Unsupported:
raise ValueError(
f"Point format {self.point_format} does not support {key} dimension"
)
else:
super().__setattr__(key, value)
| (self, key, value) |
31,586 | laspy.point.record | __setitem__ | Sets elements in the array | def __setitem__(self, key, value):
"""Sets elements in the array"""
if isinstance(key, (tuple, list)):
if not isinstance(value, np.ndarray):
value = np.asarray(value)
if value.dtype.isbuiltin == 0:
# value is most likely a structured array (dtype = [('name1', 'type1'), ...])
# https://numpy.org/devdocs/reference/generated/numpy.dtype.isbuiltin.html
for name, v_name in zip(key, value.dtype.names):
self[name] = value[v_name]
else:
if len(key) == 1 and value.ndim == 1:
value = value[..., np.newaxis]
for i, name in enumerate(key):
self[name] = value[..., i]
return
self._append_zeros_if_too_small(value)
if isinstance(key, str):
self[key][:] = value
else:
self.array[key] = value
| (self, key, value) |
31,587 | laspy.point.record | _append_zeros_if_too_small | Appends zeros to the points stored if the value we are trying to
fit is bigger
| def _append_zeros_if_too_small(self, value):
"""Appends zeros to the points stored if the value we are trying to
fit is bigger
"""
if len(value) > len(self.array):
self.resize(len(value))
| (self, value) |
31,588 | laspy.point.record | copy | null | def copy(self) -> "PackedPointRecord":
return PackedPointRecord(self.array.copy(), deepcopy(self.point_format))
| (self) -> laspy.point.record.PackedPointRecord |
31,589 | laspy.point.record | copy_fields_from | Tries to copy the values of the current dimensions from other_record | def copy_fields_from(self, other_record: "PackedPointRecord") -> None:
"""Tries to copy the values of the current dimensions from other_record"""
for dim_name in self.point_format.dimension_names:
try:
self[dim_name] = np.array(other_record[dim_name])
except ValueError:
pass
| (self, other_record: laspy.point.record.PackedPointRecord) -> NoneType |
31,590 | laspy.point.record | empty | Creates an empty point record.
Parameters
----------
point_format: laspy.PointFormat
The point format id the point record should have
Returns
-------
PackedPointRecord
| @staticmethod
def empty(point_format):
"""Creates an empty point record.
Parameters
----------
point_format: laspy.PointFormat
The point format id the point record should have
Returns
-------
PackedPointRecord
"""
return PackedPointRecord.zeros(point_count=0, point_format=point_format)
| (point_format) |
31,591 | laspy.point.record | memoryview | null | def memoryview(self) -> memoryview:
return memoryview(self.array)
| (self) -> memoryview |
31,592 | laspy.point.record | resize | null | def resize(self, new_size: int) -> None:
size_diff = new_size - len(self.array)
if size_diff > 0:
self.array = np.append(
self.array, np.zeros(size_diff, dtype=self.array.dtype)
)
elif size_diff < 0:
self.array = self.array[:new_size].copy()
| (self, new_size: int) -> NoneType |
31,593 | laspy.point.record | validate_dimension_name | Given a name of a dimension this validates it. | def validate_dimension_name(self, key: str) -> DimensionNameValidity:
"""Given a name of a dimension this validates it."""
try:
key = OLD_LASPY_NAMES[key]
except KeyError:
pass
if key in self.point_format.dimension_names or key in self.array.dtype.names:
return DimensionNameValidity.Valid
elif key in dims.DIMENSIONS_TO_TYPE:
return DimensionNameValidity.Unsupported
else:
return DimensionNameValidity.Invalid
| (self, key: str) -> laspy.point.record.DimensionNameValidity |
31,594 | laspy.point.record | zeros | Creates a new point record with all dimensions initialized to zero
Parameters
----------
point_format: PointFormat
The point format id the point record should have
point_count : int
The number of point the point record should have
Returns
-------
PackedPointRecord
| @staticmethod
def zeros(point_count, point_format):
"""Creates a new point record with all dimensions initialized to zero
Parameters
----------
point_format: PointFormat
The point format id the point record should have
point_count : int
The number of point the point record should have
Returns
-------
PackedPointRecord
"""
data = np.zeros(point_count, point_format.dtype())
return PackedPointRecord(data, point_format)
| (point_count, point_format) |
31,595 | laspy.point.format | PointFormat | Class that contains the informations about the dimensions that forms a PointFormat.
A PointFormat has 'standard' dimensions (dimensions defined in the LAS standard, each
point format has its set of dimensions), but it can also have extra (non-standard) dimensions
defined by the user)
>>> fmt = PointFormat(3)
>>> all(dim.is_standard for dim in fmt.dimensions)
True
>>> dim = fmt.dimension_by_name("classification") # or fmt["classification"]
>>> dim.max
31
>>> dim.min
0
>>> dim.num_bits
5
| class PointFormat:
"""Class that contains the informations about the dimensions that forms a PointFormat.
A PointFormat has 'standard' dimensions (dimensions defined in the LAS standard, each
point format has its set of dimensions), but it can also have extra (non-standard) dimensions
defined by the user)
>>> fmt = PointFormat(3)
>>> all(dim.is_standard for dim in fmt.dimensions)
True
>>> dim = fmt.dimension_by_name("classification") # or fmt["classification"]
>>> dim.max
31
>>> dim.min
0
>>> dim.num_bits
5
"""
def __init__(
self,
point_format_id: int,
):
"""
Parameters
----------
point_format_id: int
point format id
"""
self.id: int = point_format_id
self.dimensions: List[dims.DimensionInfo] = []
composed_dims = dims.COMPOSED_FIELDS[self.id]
for dim_name in dims.ALL_POINT_FORMATS_DIMENSIONS[self.id]:
try:
sub_fields = composed_dims[dim_name]
except KeyError:
dimension = dims.DimensionInfo.from_dtype(
dim_name, dims.DIMENSIONS_TO_TYPE[dim_name], is_standard=True
)
self.dimensions.append(dimension)
else:
for sub_field in sub_fields:
dimension = dims.DimensionInfo.from_bitmask(
sub_field.name, sub_field.mask, is_standard=True
)
self.dimensions.append(dimension)
@property
def standard_dimensions(self) -> Iterable[dims.DimensionInfo]:
"""Returns an iterable of the standard dimensions
>>> fmt = PointFormat(0)
>>> standard_dims = list(fmt.standard_dimensions)
>>> len(standard_dims)
15
>>> standard_dims[4].name
'return_number'
"""
return (dim for dim in self.dimensions if dim.is_standard)
@property
def extra_dimensions(self) -> Iterable[dims.DimensionInfo]:
return (dim for dim in self.dimensions if dim.is_standard is False)
@property
def dimension_names(self) -> Iterable[str]:
"""Returns the names of the dimensions contained in the point format"""
return (dim.name for dim in self.dimensions)
@property
def standard_dimension_names(self) -> Iterable[str]:
"""Returns the names of the extra dimensions in this point format"""
return (dim.name for dim in self.standard_dimensions)
@property
def extra_dimension_names(self) -> Iterable[str]:
"""Returns the names of the extra dimensions in this point format"""
return (dim.name for dim in self.extra_dimensions)
@property
def size(self) -> int:
"""Returns the number of bytes (standard + extra) a point takes
>>> PointFormat(3).size
34
>>> fmt = PointFormat(3)
>>> fmt.add_extra_dimension(ExtraBytesParams("codification", "uint64"))
>>> fmt.size
42
"""
return int(sum(dim.num_bits for dim in self.dimensions) // 8)
@property
def num_standard_bytes(self) -> int:
"""Returns the number of bytes used by standard dims
>>> fmt = PointFormat(3)
>>> fmt.add_extra_dimension(ExtraBytesParams("codification", "uint64"))
>>> fmt.num_standard_bytes
34
"""
return int(sum(dim.num_bits for dim in self.standard_dimensions) // 8)
@property
def num_extra_bytes(self) -> int:
"""Returns the number of extra bytes
>>> fmt = PointFormat(3)
>>> fmt.add_extra_dimension(ExtraBytesParams("codification", "uint64"))
>>> fmt.num_extra_bytes
8
"""
return int(sum(dim.num_bits for dim in self.extra_dimensions) // 8)
@property
def has_waveform_packet(self):
"""Returns True if the point format has waveform packet dimensions"""
dimensions = set(self.dimension_names)
return all(name in dimensions for name in dims.WAVEFORM_FIELDS_NAMES)
def dimension_by_name(self, name: str) -> dims.DimensionInfo:
"""Returns the dimension info for the dimension by name
ValueError is raised if the dimension does not exist un the point format
>>> info = PointFormat(2).dimension_by_name('number_of_returns')
>>> info.name == 'number_of_returns'
True
>>> info.num_bits == 3
True
>>> info = PointFormat(2).dimension_by_name('gps_time')
Traceback (most recent call last):
...
ValueError: Dimension 'gps_time' does not exist
"""
for dim in self.dimensions:
if dim.name == name:
return dim
raise ValueError(f"Dimension '{name}' does not exist")
def add_extra_dimension(self, param: ExtraBytesParams) -> None:
"""Add an extra, user-defined dimension"""
dim_info = dims.DimensionInfo.from_extra_bytes_param(param)
# todo: this should be checked in extra bytes param ctor
if (
dim_info.num_elements > 3
and dim_info.kind != dims.DimensionKind.UnsignedInteger
):
raise LaspyException("Extra Dimensions do not support more than 3 elements")
self.dimensions.append(dim_info)
def remove_extra_dimension(self, name: str) -> None:
dimensions = [
dim for dim in self.dimensions if dim.name == name and not dim.is_standard
]
try:
dimension = dimensions[0]
except IndexError:
if name in self.standard_dimension_names:
raise LaspyException(
f"The dimension named '{name}' is not an extra dimension, "
"so it cannot be removed"
)
else:
raise LaspyException(
f"'No extra dimension named '{name}' exist"
) from None
self.dimensions = [dim for dim in self.dimensions if dim is not dimension]
def dtype(self):
"""Returns the numpy.dtype used to store the point records in a numpy array
.. note::
The dtype corresponds to the dtype with sub_fields *packed* into their
composed fields
"""
dtype = dims.ALL_POINT_FORMATS_DTYPE[self.id]
descr = dtype.descr
for extra_dim in self.extra_dimensions:
descr.append((extra_dim.name, extra_dim.type_str()))
return np.dtype(descr)
def __getitem__(self, item):
if isinstance(item, str):
return self.dimension_by_name(item)
return self.dimensions[item]
def __eq__(self, other):
if self.id != other.id:
return False
for my_eb, ot_eb in zip_longest(self.extra_dimensions, other.extra_dimensions):
if my_eb is None or ot_eb is None:
return False
if my_eb != ot_eb:
return False
return True
def __repr__(self):
return "<PointFormat({}, {} bytes of extra dims)>".format(
self.id, self.num_extra_bytes
)
| (point_format_id: int) |
31,596 | laspy.point.format | __eq__ | null | def __eq__(self, other):
if self.id != other.id:
return False
for my_eb, ot_eb in zip_longest(self.extra_dimensions, other.extra_dimensions):
if my_eb is None or ot_eb is None:
return False
if my_eb != ot_eb:
return False
return True
| (self, other) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.